From 124f392f66bc4531d8558bc1f5305614917fce76 Mon Sep 17 00:00:00 2001 From: swcompiler Date: Thu, 8 May 2025 16:13:45 +0800 Subject: [PATCH 1/2] Add sw_64 ISA support --- 3000-sw_64.patch | 119758 +++++++++++++++++++++++++++++++++++++ java-1.8.0-openjdk.spec | 50 +- 2 files changed, 119794 insertions(+), 14 deletions(-) create mode 100644 3000-sw_64.patch diff --git a/3000-sw_64.patch b/3000-sw_64.patch new file mode 100644 index 0000000..31e876d --- /dev/null +++ b/3000-sw_64.patch @@ -0,0 +1,119758 @@ +diff -uNr openjdk/c4_cross_swcompile afu8u/c4_cross_swcompile +--- openjdk/c4_cross_swcompile 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/c4_cross_swcompile 2025-05-06 10:53:44.691633659 +0800 +@@ -0,0 +1,63 @@ ++#!/bin/bash ++ ++# JDK_DIR can not cross make, because our cross_compiler is not regular sys-root. ++# Some include files use x86_64's include files in jdk directory, so it leads to ++# some macros are not correct. ++# I suggest to compile images in sw_64 platform, while compile hotspot as cross model. ++ ++ JDK_DIR=${PWD}/build/linux-sw64-normal-server-slowdebug/images/j2sdk-image ++ if [ $# -gt 0 ] ++ then ++ JDK_DIR="$2" ++ fi ++ JVMPATH_DST=${JDK_DIR}/jre/lib/sw64/server ++ ++# $1: debug level (release, fastdebug, slowdebug) ++case "$1" in ++ slowdebug) ++ MYCONF=linux-sw64-normal-server-slowdebug ++ JVMPATH_SRC=${PWD}/build/linux-sw64-normal-server-slowdebug/hotspot/dist/jre/lib/sw64/server ++ JVMPATH_DST=${PWD}/$2/jre/lib/sw64/server ++# JVMPATH_DST=${PWD}/build_native/linux-sw64-normal-server-slowdebug/images/j2sdk-image/jre/lib/sw64/server ++ ;; ++ release) ++ MYCONF=linux-sw64-normal-server-release ++ JVMPATH_SRC=${PWD}/build/linux-sw64-normal-server-release/hotspot/dist/jre/lib/sw64/server ++ JVMPATH_DST=${PWD}/$2/jre/lib/sw64/server ++# JVMPATH_DST=${PWD}/build_native/linux-sw64-normal-server-release/images/j2sdk-image/jre/lib/sw64/server ++ ;; ++ *) ++ echo " Usage:" ++ echo " bash c4_cross_swcompile {release|slowdebug} JDK_DIR" ++ exit 1 ++ ;; ++esac ++ ++# 1) make ++echo make STRIP_POLICY=no_strip POST_STRIP_CMD="" LOG="debug" CONF=${MYCONF} hotspot ++make LOG="debug" CONF=${MYCONF} hotspot ++ ++# 2) copy libjvm.so to JDK directory! (no libjvm.diz, because I have disable-zip-debug-info in cross_swconfigure) ++# JVMPATH_SRC=${PWD}/build/linux-sw64-normal-server-slowdebug/hotspot/dist/jre/lib/sw64/server ++# JVMPATH_DST=${PWD}/build_native/linux-sw64-normal-server-slowdebug/images/j2sdk-image/jre/lib/sw64/server ++ ++echo ++echo ++echo " * =========================================================================================================" ++echo " * Now copy libjvm.so ......" ++echo " * dst is : $JVMPATH_DST " ++echo " * ---------------------------------------------------------------------------------------------------------" ++echo ++if [ -d ${JVMPATH_DST} ] ; then ++ echo " * cp -f ${JVMPATH_SRC}/libjvm.debuginfo ${JVMPATH_DST}" ++ cp -f ${JVMPATH_SRC}/libjvm.debuginfo ${JVMPATH_DST} ++ echo " * cp -f ${JVMPATH_SRC}/libjvm.so ${JVMPATH_DST}" ++ cp -f ${JVMPATH_SRC}/libjvm.so ${JVMPATH_DST} ++else ++ echo " Copy FAILED! No corresponding jdk images: ${JVMPATH_DST}" ++fi ++echo ++echo " * ---------------------------------------------------------------------------------------------------------" ++echo " * End of cross compiling hotspot for swjdk8 ." ++echo " * =========================================================================================================" ++echo +diff -uNr openjdk/c4_cross_swconfigure afu8u/c4_cross_swconfigure +--- openjdk/c4_cross_swconfigure 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/c4_cross_swconfigure 2025-05-06 10:53:44.691633659 +0800 +@@ -0,0 +1,39 @@ ++#!/bin/bash ++# $1: debug level (release, fastdebug, slowdebug) ++ ++#which is used for 9906 ++#crosscompiler=swgcc710-cross-6c-peak-1 ++ ++#which is used for 9916 ++crosscompiler=swgcc830_cross_tools ++ ++case "$1" in ++ slowdebug) ++ bash configure \ ++ --openjdk-target=sw_64-unknown-linux-gnu \ ++ --with-devkit=/usr/sw/$crosscompiler/usr/ \ ++ --x-includes=/usr/sw/$crosscompiler/usr/include \ ++ --x-libraries=/usr/sw/$crosscompiler/usr/lib \ ++ --with-freetype-include=/usr/sw/$crosscompiler/usr/include/freetype2 \ ++ --with-freetype-lib=/usr/sw/$crosscompiler/usr/lib/sw_64-linux-gnu \ ++ --disable-zip-debug-info \ ++ --with-debug-level=slowdebug \ ++ --disable-ccache ++ ;; ++ release) ++ bash configure \ ++ --openjdk-target=sw_64-unknown-linux-gnu \ ++ --with-devkit=/usr/sw/$crosscompiler/usr/ \ ++ --x-includes=/usr/sw/$crosscompiler/usr/include \ ++ --x-libraries=/usr/sw/$crosscompiler/usr/lib \ ++ --with-freetype-include=/usr/sw/$crosscompiler/usr/include/freetype2 \ ++ --with-freetype-lib=/usr/sw/$crosscompiler/usr/lib/sw_64-linux-gnu \ ++ --disable-zip-debug-info \ ++ --with-debug-level=release \ ++ --disable-ccache ++ ;; ++ *) ++ echo " Usage:" ++ echo " ./c4_cross_swconfigure {release|slowdebug}" ++ ;; ++esac +diff -uNr openjdk/Changelog.md afu8u/Changelog.md +--- openjdk/Changelog.md 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/Changelog.md 2025-05-06 10:53:44.687633659 +0800 +@@ -0,0 +1,26 @@ ++swjdk8u312-sw1.3.1(20240914)版主要修改 ++1.优化byte,short类型的arraycopy ++2.优化加负立即数 ++3.优化浮点比较 ++4.新增sun.misc.Unsafe.copyMemory的intrinsic实现 ++5.优化UseCRC32Intrinsics ++6.优化反序列化 ++7.修复slowdebug版本开启-XX:+VerifyOops报错问题 ++8.修复Truncate.java报SIGBUS问题 ++9.优化rsa ++10.优化MembarRelease ++ ++swjdk8u312-sw1.3.0(20240328)版主要修改 ++1.8A与6B代码同源,通过读取cpuinfo自动适配8A ++2.实现部分Core4新增指令优化 ++ ++swjdk8u312-sw1.2.0(20240304)版主要修改: ++1.按照6B平台本地方法栈帧结构,实现NMT及JFR; ++2.解决hadoop集群测试报出的偶发IllegalMonitorStateExeption错; ++3.解决jdk stram NullPointerException错,添加2处memb指令(UseNecessaryMembar,使得6B平台SPECjvm以及SPECjbb的Max-Jops和critical-Jops下降约4%); ++4.增加ReservedCodeCacheSize大小到240M,防止发生性能异常下降; ++ ++swjdk8u312-sw1.1.0(20230710)修改: ++1. 实现兼容8A的锁序列(commit id:afbe8497c741845149eb41ddb9a7cd6f0910eff8); ++2. lldx指令前删除无用的memb指令(commit id:57edcc5c6ad4e93255054e24ae8b5e3bf5e934c0). ++ +diff -uNr openjdk/common/autoconf/build-aux/autoconf-config.guess afu8u/common/autoconf/build-aux/autoconf-config.guess +--- openjdk/common/autoconf/build-aux/autoconf-config.guess 2023-04-19 05:53:02.000000000 +0800 ++++ afu8u/common/autoconf/build-aux/autoconf-config.guess 2025-05-06 10:53:44.691633659 +0800 +@@ -907,6 +907,9 @@ + if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi + echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC} + exit ;; ++ sw_64:Linux:*:*) ++ echo ${UNAME_MACHINE}-unknown-linux-gnu ++ exit ;; + arm*:Linux:*:*) + eval $set_cc_for_build + if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ +diff -uNr openjdk/common/autoconf/build-performance.m4 afu8u/common/autoconf/build-performance.m4 +--- openjdk/common/autoconf/build-performance.m4 2023-04-19 05:53:02.000000000 +0800 ++++ afu8u/common/autoconf/build-performance.m4 2025-05-06 10:53:44.691633659 +0800 +@@ -32,6 +32,9 @@ + if test -f /proc/cpuinfo; then + # Looks like a Linux (or cygwin) system + NUM_CORES=`cat /proc/cpuinfo | grep -c processor` ++ if test "$NUM_CORES" -eq "0"; then # for n version os ?? ++ NUM_CORES=`cat /proc/cpuinfo | grep "cpus active" | awk '{ print [$]4 }'` ++ fi + FOUND_CORES=yes + elif test -x /usr/sbin/psrinfo; then + # Looks like a Solaris system +@@ -71,6 +74,9 @@ + if test -f /proc/meminfo; then + # Looks like a Linux (or cygwin) system + MEMORY_SIZE=`cat /proc/meminfo | grep MemTotal | awk '{print [$]2}'` ++ if test "x$OPENJDK_TARGET_CPU_ARCH" = xsw64; then ++ MEMORY_SIZE=`cat /proc/meminfo | grep MemTotal | grep -v PhyMemTotal | awk '{print [$]2}'` ++ fi + MEMORY_SIZE=`expr $MEMORY_SIZE / 1024` + FOUND_MEM=yes + elif test -x /usr/sbin/prtconf; then +diff -uNr openjdk/common/autoconf/generated-configure.sh afu8u/common/autoconf/generated-configure.sh +--- openjdk/common/autoconf/generated-configure.sh 2023-04-19 05:53:02.000000000 +0800 ++++ afu8u/common/autoconf/generated-configure.sh 2025-05-06 11:13:08.087672949 +0800 +@@ -13684,6 +13684,12 @@ + VAR_CPU_BITS=64 + VAR_CPU_ENDIAN=little + ;; ++ sw_64) ++ VAR_CPU=sw64 ++ VAR_CPU_ARCH=sw64 ++ VAR_CPU_BITS=64 ++ VAR_CPU_ENDIAN=little ++ ;; + powerpc) + VAR_CPU=ppc + VAR_CPU_ARCH=ppc +@@ -13822,6 +13828,12 @@ + VAR_CPU_BITS=64 + VAR_CPU_ENDIAN=little + ;; ++ sw_64) ++ VAR_CPU=sw64 ++ VAR_CPU_ARCH=sw64 ++ VAR_CPU_BITS=64 ++ VAR_CPU_ENDIAN=little ++ ;; + powerpc) + VAR_CPU=ppc + VAR_CPU_ARCH=ppc +@@ -42605,6 +42617,10 @@ + ppc ) + # on ppc we don't prevent gcc to omit frame pointer nor strict-aliasing + ;; ++ sw64 ) ++ COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -O2 -mieee -fno-omit-frame-pointer -fno-delete-null-pointer-checks -fno-lifetime-dse" ++ CFLAGS_JDK="${CFLAGS_JDK} -O2 -mieee -fno-strict-aliasing -fno-delete-null-pointer-checks -fno-lifetime-dse" ++ ;; + * ) + CCXXFLAGS_JDK="$CCXXFLAGS_JDK -fno-omit-frame-pointer" + CFLAGS_JDK="${CFLAGS_JDK} -fno-strict-aliasing" +@@ -48928,7 +48944,11 @@ + if test "x$OPENJDK_TARGET_OS" = xwindows; then + FREETYPE_LIBS="$FREETYPE_LIB_PATH/freetype.lib" + else ++ if test "x$OPENJDK_TARGET_CPU_ARCH" = xsw64; then ++ FREETYPE_LIBS="-L$FREETYPE_LIB_PATH -lfreetype -lz -lpng" ++ else + FREETYPE_LIBS="-L$FREETYPE_LIB_PATH -lfreetype" ++ fi + fi + fi + +@@ -55135,6 +55155,9 @@ + if test -f /proc/meminfo; then + # Looks like a Linux (or cygwin) system + MEMORY_SIZE=`cat /proc/meminfo | grep MemTotal | awk '{print $2}'` ++ if test "x$OPENJDK_TARGET_CPU_ARCH" = xsw64; then ++ MEMORY_SIZE=`cat /proc/meminfo | grep MemTotal | grep -v PhyMemTotal | awk '{print $2}'` ++ fi + MEMORY_SIZE=`expr $MEMORY_SIZE / 1024` + FOUND_MEM=yes + elif test -x /usr/sbin/prtconf; then +diff -uNr openjdk/common/autoconf/libraries.m4 afu8u/common/autoconf/libraries.m4 +--- openjdk/common/autoconf/libraries.m4 2023-04-19 05:53:02.000000000 +0800 ++++ afu8u/common/autoconf/libraries.m4 2025-05-06 10:53:44.707633659 +0800 +@@ -583,7 +583,11 @@ + if test "x$OPENJDK_TARGET_OS" = xwindows; then + FREETYPE_LIBS="$FREETYPE_LIB_PATH/freetype.lib" + else +- FREETYPE_LIBS="-L$FREETYPE_LIB_PATH -lfreetype" ++ if test "x$OPENJDK_TARGET_CPU_ARCH" = xsw64; then ++ FREETYPE_LIBS="-L$FREETYPE_LIB_PATH -lfreetype -lz -lpng" ++ else ++ FREETYPE_LIBS="-L$FREETYPE_LIB_PATH -lfreetype" ++ fi + fi + fi + +diff -uNr openjdk/common/autoconf/platform.m4 afu8u/common/autoconf/platform.m4 +--- openjdk/common/autoconf/platform.m4 2023-04-19 05:53:02.000000000 +0800 ++++ afu8u/common/autoconf/platform.m4 2025-05-06 10:53:44.707633659 +0800 +@@ -54,6 +54,12 @@ + VAR_CPU_BITS=64 + VAR_CPU_ENDIAN=little + ;; ++ sw_64) ++ VAR_CPU=sw64 ++ VAR_CPU_ARCH=sw64 ++ VAR_CPU_BITS=64 ++ VAR_CPU_ENDIAN=little ++ ;; + powerpc) + VAR_CPU=ppc + VAR_CPU_ARCH=ppc +diff -uNr openjdk/common/autoconf/spec.gmk.in afu8u/common/autoconf/spec.gmk.in +--- openjdk/common/autoconf/spec.gmk.in 2023-04-19 05:53:02.000000000 +0800 ++++ afu8u/common/autoconf/spec.gmk.in 2025-05-06 11:13:08.087672949 +0800 +@@ -213,7 +213,11 @@ + endif + + ifneq ($(USER_RELEASE_SUFFIX), ) +- FULL_VERSION=$(RELEASE)-$(USER_RELEASE_SUFFIX)-$(JDK_BUILD_NUMBER) ++ ifeq ($(OPENJDK_BUILD_CPU_ARCH), sw64) ++ FULL_VERSION=$(RELEASE)-$(JDK_BUILD_NUMBER)-$(USER_RELEASE_SUFFIX) ++ else ++ FULL_VERSION=$(RELEASE)-$(USER_RELEASE_SUFFIX)-$(JDK_BUILD_NUMBER) ++ endif + else + FULL_VERSION=$(RELEASE)-$(JDK_BUILD_NUMBER) + endif +diff -uNr openjdk/corba/THIRD_PARTY_README afu8u/corba/THIRD_PARTY_README +--- openjdk/corba/THIRD_PARTY_README 2023-04-19 05:53:02.000000000 +0800 ++++ afu8u/corba/THIRD_PARTY_README 2025-05-06 10:53:44.715633659 +0800 +@@ -7,7 +7,7 @@ + + --- begin of LICENSE --- + +-Copyright (c) 2000-2011 France Télécom ++Copyright (c) 2000-2011 France T??l??com + All rights reserved. + + Redistribution and use in source and binary forms, with or without +@@ -1035,7 +1035,7 @@ + --- begin of LICENSE --- + + Copyright notice +-Copyright © 2011 Ecma International ++Copyright ?? 2011 Ecma International + Ecma International + Rue du Rhone 114 + CH-1204 Geneva +@@ -2527,16 +2527,16 @@ + Unicode Terms of Use + + For the general privacy policy governing access to this site, see the Unicode +-Privacy Policy. For trademark usage, see the Unicode® Consortium Name and ++Privacy Policy. For trademark usage, see the Unicode?? Consortium Name and + Trademark Usage Policy. + + A. Unicode Copyright. +- 1. Copyright © 1991-2013 Unicode, Inc. All rights reserved. ++ 1. Copyright ?? 1991-2013 Unicode, Inc. All rights reserved. + + 2. Certain documents and files on this website contain a legend indicating + that "Modification is permitted." Any person is hereby authorized, + without fee, to modify such documents and files to create derivative +- works conforming to the Unicode® Standard, subject to Terms and ++ works conforming to the Unicode?? Standard, subject to Terms and + Conditions herein. + + 3. Any person is hereby authorized, without fee, to view, use, reproduce, +@@ -2602,14 +2602,14 @@ + + E.Trademarks & Logos. + 1. The Unicode Word Mark and the Unicode Logo are trademarks of Unicode, +- Inc. “The Unicode Consortium” and “Unicode, Inc.” are trade names of ++ Inc. ???The Unicode Consortium??? and ???Unicode, Inc.??? are trade names of + Unicode, Inc. Use of the information and materials found on this +- website indicates your acknowledgement of Unicode, Inc.’s exclusive ++ website indicates your acknowledgement of Unicode, Inc.???s exclusive + worldwide rights in the Unicode Word Mark, the Unicode Logo, and the + Unicode trade names. + +- 2. The Unicode Consortium Name and Trademark Usage Policy (“Trademark +- Policy”) are incorporated herein by reference and you agree to abide by ++ 2. The Unicode Consortium Name and Trademark Usage Policy (???Trademark ++ Policy???) are incorporated herein by reference and you agree to abide by + the provisions of the Trademark Policy, which may be changed from time + to time in the sole discretion of Unicode, Inc. + +@@ -2632,12 +2632,12 @@ + + 2. Modification by Unicode. Unicode shall have the right to modify this + Agreement at any time by posting it to this site. The user may not +- assign any part of this Agreement without Unicode’s prior written ++ assign any part of this Agreement without Unicode???s prior written + consent. + + 3. Taxes. The user agrees to pay any taxes arising from access to this + website or use of the information herein, except for those based on +- Unicode’s net income. ++ Unicode???s net income. + + 4. Severability. If any provision of this Agreement is declared invalid or + unenforceable, the remaining provisions of this Agreement shall remain +@@ -2666,7 +2666,7 @@ + + COPYRIGHT AND PERMISSION NOTICE + +-Copyright © 1991-2012 Unicode, Inc. All rights reserved. Distributed under the ++Copyright ?? 1991-2012 Unicode, Inc. All rights reserved. Distributed under the + Terms of Use in http://www.unicode.org/copyright.html. + + Permission is hereby granted, free of charge, to any person obtaining a copy +diff -uNr openjdk/cross_compile afu8u/cross_compile +--- openjdk/cross_compile 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/cross_compile 2025-05-06 10:53:44.791633662 +0800 +@@ -0,0 +1,21 @@ ++#!/bin/bash ++level=${1?usage: $0 release/slowdebug images-jdk} ++dest=${2?usage: $0 release/slowdebug images-jdk} ++ ++#level=release ++#dest=images-jdk-release ++#level=slowdebug ++#dest=images-jdk ++ ++variant=custom ++remote_ip=172.16.130.191 ++#remote_ip=172.16.12.167 ++ ++#make LOG="debug" CONF=linux-sw64-normal-$variant-$level jdk &&\ ++make LOG="debug" CONF=linux-sw64-normal-$variant-$level-cross hotspot && \ ++echo -e "\n\n>>>>>build success<<<<<\n\n" &&\ ++cp build/linux-sw64-normal-$variant-$level-cross/hotspot/dist/jre/lib/sw64/server/libjvm.so $dest/jre/lib/sw64/server/ && \ ++cp build/linux-sw64-normal-$variant-$level-cross/hotspot/dist/jre/lib/sw64/server/libjvm.debuginfo $dest/jre/lib/sw64/server/ && \ ++echo -e "\n\n>>>>>copy success<<<<<\n\n" && \ ++ping -c 1 -W 1 $remote_ip && \ ++ ssh lsp@$remote_ip "$(pwd)/$dest/bin/java -XX:+PrintCompilation -Xcomp -version" +diff -uNr openjdk/cross_configure afu8u/cross_configure +--- openjdk/cross_configure 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/cross_configure 2025-05-06 10:53:44.791633662 +0800 +@@ -0,0 +1,24 @@ ++#!/bin/bash ++level=${1?usage: $0 release/slowdebug} ++#crosscompiler=swgcc710-ali-cross ++crosscompiler=swgcc710-6a-cross ++# for c version (i.e. 9916) ++#patch=SP ++buildtag=sw1.3.1 ++updatever=312 ++level=${1?usage: $0 release/slowdebug} ++ bash configure \ ++ --openjdk-target=sw_64-unknown-linux-gnu \ ++ --with-devkit=/usr/sw/$crosscompiler/usr/ \ ++ --x-includes=/usr/sw/$crosscompiler/usr/include \ ++ --x-libraries=/usr/sw/$crosscompiler/usr/lib \ ++ --with-freetype-include=/usr/sw/$crosscompiler/usr/include/freetype2 \ ++ --with-freetype-lib=/usr/sw/$crosscompiler/usr/lib/sw_64-linux-gnu \ ++ --with-user-release-suffix=$buildtag \ ++ --with-update-version=$updatever \ ++ --disable-zip-debug-info \ ++ --with-debug-level=$level \ ++ --disable-ccache \ ++ --enable-hotspot-test-in-build \ ++ --with-conf-name=linux-sw64-normal-custom-$level-cross \ ++ --with-milestone=fcs +diff -uNr openjdk/cross_mk afu8u/cross_mk +--- openjdk/cross_mk 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/cross_mk 2025-05-06 10:53:44.791633662 +0800 +@@ -0,0 +1,14 @@ ++ip/n = 172.16.12.167 ++ip/c = 172.16.130.191 ++level = $(lvl) ++user = yj ++dest=j2sdk-image-sw-$@ ++ ++n c:FORCE ++ make CONF=sw64-$(level)-$@ hotspot && \ ++ cp build/sw64-$(level)-$@/hotspot/dist/jre/lib/sw64/server/libjvm.so $(dest)/jre/lib/sw64/server/ && \ ++ cp build/sw64-$(level)-$@/hotspot/dist/jre/lib/sw64/server/libjvm.debuginfo $(dest)/jre/lib/sw64/server/ && \ ++ ping -c 1 -W 1 ${ip/$@} && \ ++ ssh $(user)@${ip/$@} "$(shell pwd)/$(dest)/bin/java -Xcomp -XX:+PrintCompilation -version" ++ ++FORCE: +diff -uNr openjdk/cross_swcompile_clean afu8u/cross_swcompile_clean +--- openjdk/cross_swcompile_clean 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/cross_swcompile_clean 2025-05-06 10:53:44.791633662 +0800 +@@ -0,0 +1,27 @@ ++#!/bin/bash ++#levels=(release slowdebug fastdebug) ++#level=${1:-slowdebug} ++levels=(slowdebug release) ++#level=release ++cc=(swgcc710-ali-cross swgcc710-6a-cross) ++nc=("n" "c") ++ ++#for i in {0..0}; do ++for level in ${levels[@]}; do ++for i in {0..1}; do ++ nOrC=${nc[i]} ++ crosscompiler=${cc[i]} ++ rm -rf build/sw64-$level-$nOrC ++ bash configure \ ++ --openjdk-target=sw_64-unknown-linux-gnu \ ++ --with-devkit=/usr/sw/$crosscompiler/usr/ \ ++ --x-includes=/usr/sw/$crosscompiler/usr/include \ ++ --x-libraries=/usr/sw/$crosscompiler/usr/lib \ ++ --with-freetype-include=/usr/sw/$crosscompiler/usr/include/freetype2 \ ++ --with-freetype-lib=/usr/sw/$crosscompiler/usr/lib/sw_64-linux-gnu \ ++ --disable-zip-debug-info \ ++ --with-debug-level=$level \ ++ --with-conf-name=sw64-$level-$nOrC ++done ++done ++ +diff -uNr openjdk/.git/config afu8u/.git/config +--- openjdk/.git/config 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/.git/config 2025-05-06 10:53:44.631633657 +0800 +@@ -0,0 +1,11 @@ ++[core] ++ repositoryformatversion = 0 ++ filemode = true ++ bare = false ++ logallrefupdates = true ++[remote "origin"] ++ url = git@172.16.130.122:openjdk/afu8u.git ++ fetch = +refs/heads/*:refs/remotes/origin/* ++[branch "sw-master"] ++ remote = origin ++ merge = refs/heads/sw-master +diff -uNr openjdk/.git/description afu8u/.git/description +--- openjdk/.git/description 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/.git/description 2025-05-06 10:53:27.947633093 +0800 +@@ -0,0 +1 @@ ++Unnamed repository; edit this file 'description' to name the repository. +diff -uNr openjdk/.git/HEAD afu8u/.git/HEAD +--- openjdk/.git/HEAD 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/.git/HEAD 2025-05-06 11:13:08.575672966 +0800 +@@ -0,0 +1 @@ ++ref: refs/heads/tag-swjdk8u372-ga +diff -uNr openjdk/.git/hooks/applypatch-msg.sample afu8u/.git/hooks/applypatch-msg.sample +--- openjdk/.git/hooks/applypatch-msg.sample 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/.git/hooks/applypatch-msg.sample 2025-05-06 10:53:27.939633093 +0800 +@@ -0,0 +1,15 @@ ++#!/bin/sh ++# ++# An example hook script to check the commit log message taken by ++# applypatch from an e-mail message. ++# ++# The hook should exit with non-zero status after issuing an ++# appropriate message if it wants to stop the commit. The hook is ++# allowed to edit the commit message file. ++# ++# To enable this hook, rename this file to "applypatch-msg". ++ ++. git-sh-setup ++commitmsg="$(git rev-parse --git-path hooks/commit-msg)" ++test -x "$commitmsg" && exec "$commitmsg" ${1+"$@"} ++: +diff -uNr openjdk/.git/hooks/commit-msg.sample afu8u/.git/hooks/commit-msg.sample +--- openjdk/.git/hooks/commit-msg.sample 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/.git/hooks/commit-msg.sample 2025-05-06 10:53:27.943633093 +0800 +@@ -0,0 +1,24 @@ ++#!/bin/sh ++# ++# An example hook script to check the commit log message. ++# Called by "git commit" with one argument, the name of the file ++# that has the commit message. The hook should exit with non-zero ++# status after issuing an appropriate message if it wants to stop the ++# commit. The hook is allowed to edit the commit message file. ++# ++# To enable this hook, rename this file to "commit-msg". ++ ++# Uncomment the below to add a Signed-off-by line to the message. ++# Doing this in a hook is a bad idea in general, but the prepare-commit-msg ++# hook is more suited to it. ++# ++# SOB=$(git var GIT_AUTHOR_IDENT | sed -n 's/^\(.*>\).*$/Signed-off-by: \1/p') ++# grep -qs "^$SOB" "$1" || echo "$SOB" >> "$1" ++ ++# This example catches duplicate Signed-off-by lines. ++ ++test "" = "$(grep '^Signed-off-by: ' "$1" | ++ sort | uniq -c | sed -e '/^[ ]*1[ ]/d')" || { ++ echo >&2 Duplicate Signed-off-by lines. ++ exit 1 ++} +diff -uNr openjdk/.git/hooks/fsmonitor-watchman.sample afu8u/.git/hooks/fsmonitor-watchman.sample +--- openjdk/.git/hooks/fsmonitor-watchman.sample 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/.git/hooks/fsmonitor-watchman.sample 2025-05-06 10:53:27.943633093 +0800 +@@ -0,0 +1,114 @@ ++#!/usr/bin/perl ++ ++use strict; ++use warnings; ++use IPC::Open2; ++ ++# An example hook script to integrate Watchman ++# (https://facebook.github.io/watchman/) with git to speed up detecting ++# new and modified files. ++# ++# The hook is passed a version (currently 1) and a time in nanoseconds ++# formatted as a string and outputs to stdout all files that have been ++# modified since the given time. Paths must be relative to the root of ++# the working tree and separated by a single NUL. ++# ++# To enable this hook, rename this file to "query-watchman" and set ++# 'git config core.fsmonitor .git/hooks/query-watchman' ++# ++my ($version, $time) = @ARGV; ++ ++# Check the hook interface version ++ ++if ($version == 1) { ++ # convert nanoseconds to seconds ++ $time = int $time / 1000000000; ++} else { ++ die "Unsupported query-fsmonitor hook version '$version'.\n" . ++ "Falling back to scanning...\n"; ++} ++ ++my $git_work_tree; ++if ($^O =~ 'msys' || $^O =~ 'cygwin') { ++ $git_work_tree = Win32::GetCwd(); ++ $git_work_tree =~ tr/\\/\//; ++} else { ++ require Cwd; ++ $git_work_tree = Cwd::cwd(); ++} ++ ++my $retry = 1; ++ ++launch_watchman(); ++ ++sub launch_watchman { ++ ++ my $pid = open2(\*CHLD_OUT, \*CHLD_IN, 'watchman -j --no-pretty') ++ or die "open2() failed: $!\n" . ++ "Falling back to scanning...\n"; ++ ++ # In the query expression below we're asking for names of files that ++ # changed since $time but were not transient (ie created after ++ # $time but no longer exist). ++ # ++ # To accomplish this, we're using the "since" generator to use the ++ # recency index to select candidate nodes and "fields" to limit the ++ # output to file names only. Then we're using the "expression" term to ++ # further constrain the results. ++ # ++ # The category of transient files that we want to ignore will have a ++ # creation clock (cclock) newer than $time_t value and will also not ++ # currently exist. ++ ++ my $query = <<" END"; ++ ["query", "$git_work_tree", { ++ "since": $time, ++ "fields": ["name"], ++ "expression": ["not", ["allof", ["since", $time, "cclock"], ["not", "exists"]]] ++ }] ++ END ++ ++ print CHLD_IN $query; ++ close CHLD_IN; ++ my $response = do {local $/; }; ++ ++ die "Watchman: command returned no output.\n" . ++ "Falling back to scanning...\n" if $response eq ""; ++ die "Watchman: command returned invalid output: $response\n" . ++ "Falling back to scanning...\n" unless $response =~ /^\{/; ++ ++ my $json_pkg; ++ eval { ++ require JSON::XS; ++ $json_pkg = "JSON::XS"; ++ 1; ++ } or do { ++ require JSON::PP; ++ $json_pkg = "JSON::PP"; ++ }; ++ ++ my $o = $json_pkg->new->utf8->decode($response); ++ ++ if ($retry > 0 and $o->{error} and $o->{error} =~ m/unable to resolve root .* directory (.*) is not watched/) { ++ print STDERR "Adding '$git_work_tree' to watchman's watch list.\n"; ++ $retry--; ++ qx/watchman watch "$git_work_tree"/; ++ die "Failed to make watchman watch '$git_work_tree'.\n" . ++ "Falling back to scanning...\n" if $? != 0; ++ ++ # Watchman will always return all files on the first query so ++ # return the fast "everything is dirty" flag to git and do the ++ # Watchman query just to get it over with now so we won't pay ++ # the cost in git to look up each individual file. ++ print "/\0"; ++ eval { launch_watchman() }; ++ exit 0; ++ } ++ ++ die "Watchman: $o->{error}.\n" . ++ "Falling back to scanning...\n" if $o->{error}; ++ ++ binmode STDOUT, ":utf8"; ++ local $, = "\0"; ++ print @{$o->{files}}; ++} +diff -uNr openjdk/.git/hooks/post-update.sample afu8u/.git/hooks/post-update.sample +--- openjdk/.git/hooks/post-update.sample 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/.git/hooks/post-update.sample 2025-05-06 10:53:27.943633093 +0800 +@@ -0,0 +1,8 @@ ++#!/bin/sh ++# ++# An example hook script to prepare a packed repository for use over ++# dumb transports. ++# ++# To enable this hook, rename this file to "post-update". ++ ++exec git update-server-info +diff -uNr openjdk/.git/hooks/pre-applypatch.sample afu8u/.git/hooks/pre-applypatch.sample +--- openjdk/.git/hooks/pre-applypatch.sample 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/.git/hooks/pre-applypatch.sample 2025-05-06 10:53:27.943633093 +0800 +@@ -0,0 +1,14 @@ ++#!/bin/sh ++# ++# An example hook script to verify what is about to be committed ++# by applypatch from an e-mail message. ++# ++# The hook should exit with non-zero status after issuing an ++# appropriate message if it wants to stop the commit. ++# ++# To enable this hook, rename this file to "pre-applypatch". ++ ++. git-sh-setup ++precommit="$(git rev-parse --git-path hooks/pre-commit)" ++test -x "$precommit" && exec "$precommit" ${1+"$@"} ++: +diff -uNr openjdk/.git/hooks/pre-commit.sample afu8u/.git/hooks/pre-commit.sample +--- openjdk/.git/hooks/pre-commit.sample 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/.git/hooks/pre-commit.sample 2025-05-06 10:53:27.943633093 +0800 +@@ -0,0 +1,49 @@ ++#!/bin/sh ++# ++# An example hook script to verify what is about to be committed. ++# Called by "git commit" with no arguments. The hook should ++# exit with non-zero status after issuing an appropriate message if ++# it wants to stop the commit. ++# ++# To enable this hook, rename this file to "pre-commit". ++ ++if git rev-parse --verify HEAD >/dev/null 2>&1 ++then ++ against=HEAD ++else ++ # Initial commit: diff against an empty tree object ++ against=4b825dc642cb6eb9a060e54bf8d69288fbee4904 ++fi ++ ++# If you want to allow non-ASCII filenames set this variable to true. ++allownonascii=$(git config --bool hooks.allownonascii) ++ ++# Redirect output to stderr. ++exec 1>&2 ++ ++# Cross platform projects tend to avoid non-ASCII filenames; prevent ++# them from being added to the repository. We exploit the fact that the ++# printable range starts at the space character and ends with tilde. ++if [ "$allownonascii" != "true" ] && ++ # Note that the use of brackets around a tr range is ok here, (it's ++ # even required, for portability to Solaris 10's /usr/bin/tr), since ++ # the square bracket bytes happen to fall in the designated range. ++ test $(git diff --cached --name-only --diff-filter=A -z $against | ++ LC_ALL=C tr -d '[ -~]\0' | wc -c) != 0 ++then ++ cat <<\EOF ++Error: Attempt to add a non-ASCII file name. ++ ++This can cause problems if you want to work with people on other platforms. ++ ++To be portable it is advisable to rename the file. ++ ++If you know what you are doing you can disable this check using: ++ ++ git config hooks.allownonascii true ++EOF ++ exit 1 ++fi ++ ++# If there are whitespace errors, print the offending file names and fail. ++exec git diff-index --check --cached $against -- +diff -uNr openjdk/.git/hooks/prepare-commit-msg.sample afu8u/.git/hooks/prepare-commit-msg.sample +--- openjdk/.git/hooks/prepare-commit-msg.sample 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/.git/hooks/prepare-commit-msg.sample 2025-05-06 10:53:27.943633093 +0800 +@@ -0,0 +1,42 @@ ++#!/bin/sh ++# ++# An example hook script to prepare the commit log message. ++# Called by "git commit" with the name of the file that has the ++# commit message, followed by the description of the commit ++# message's source. The hook's purpose is to edit the commit ++# message file. If the hook fails with a non-zero status, ++# the commit is aborted. ++# ++# To enable this hook, rename this file to "prepare-commit-msg". ++ ++# This hook includes three examples. The first one removes the ++# "# Please enter the commit message..." help message. ++# ++# The second includes the output of "git diff --name-status -r" ++# into the message, just before the "git status" output. It is ++# commented because it doesn't cope with --amend or with squashed ++# commits. ++# ++# The third example adds a Signed-off-by line to the message, that can ++# still be edited. This is rarely a good idea. ++ ++COMMIT_MSG_FILE=$1 ++COMMIT_SOURCE=$2 ++SHA1=$3 ++ ++/usr/bin/perl -i.bak -ne 'print unless(m/^. Please enter the commit message/..m/^#$/)' "$COMMIT_MSG_FILE" ++ ++# case "$COMMIT_SOURCE,$SHA1" in ++# ,|template,) ++# /usr/bin/perl -i.bak -pe ' ++# print "\n" . `git diff --cached --name-status -r` ++# if /^#/ && $first++ == 0' "$COMMIT_MSG_FILE" ;; ++# *) ;; ++# esac ++ ++# SOB=$(git var GIT_COMMITTER_IDENT | sed -n 's/^\(.*>\).*$/Signed-off-by: \1/p') ++# git interpret-trailers --in-place --trailer "$SOB" "$COMMIT_MSG_FILE" ++# if test -z "$COMMIT_SOURCE" ++# then ++# /usr/bin/perl -i.bak -pe 'print "\n" if !$first_line++' "$COMMIT_MSG_FILE" ++# fi +diff -uNr openjdk/.git/hooks/pre-push.sample afu8u/.git/hooks/pre-push.sample +--- openjdk/.git/hooks/pre-push.sample 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/.git/hooks/pre-push.sample 2025-05-06 10:53:27.943633093 +0800 +@@ -0,0 +1,53 @@ ++#!/bin/sh ++ ++# An example hook script to verify what is about to be pushed. Called by "git ++# push" after it has checked the remote status, but before anything has been ++# pushed. If this script exits with a non-zero status nothing will be pushed. ++# ++# This hook is called with the following parameters: ++# ++# $1 -- Name of the remote to which the push is being done ++# $2 -- URL to which the push is being done ++# ++# If pushing without using a named remote those arguments will be equal. ++# ++# Information about the commits which are being pushed is supplied as lines to ++# the standard input in the form: ++# ++# ++# ++# This sample shows how to prevent push of commits where the log message starts ++# with "WIP" (work in progress). ++ ++remote="$1" ++url="$2" ++ ++z40=0000000000000000000000000000000000000000 ++ ++while read local_ref local_sha remote_ref remote_sha ++do ++ if [ "$local_sha" = $z40 ] ++ then ++ # Handle delete ++ : ++ else ++ if [ "$remote_sha" = $z40 ] ++ then ++ # New branch, examine all commits ++ range="$local_sha" ++ else ++ # Update to existing branch, examine new commits ++ range="$remote_sha..$local_sha" ++ fi ++ ++ # Check for WIP commit ++ commit=`git rev-list -n 1 --grep '^WIP' "$range"` ++ if [ -n "$commit" ] ++ then ++ echo >&2 "Found WIP commit in $local_ref, not pushing" ++ exit 1 ++ fi ++ fi ++done ++ ++exit 0 +diff -uNr openjdk/.git/hooks/pre-rebase.sample afu8u/.git/hooks/pre-rebase.sample +--- openjdk/.git/hooks/pre-rebase.sample 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/.git/hooks/pre-rebase.sample 2025-05-06 10:53:27.939633093 +0800 +@@ -0,0 +1,169 @@ ++#!/bin/sh ++# ++# Copyright (c) 2006, 2008 Junio C Hamano ++# ++# The "pre-rebase" hook is run just before "git rebase" starts doing ++# its job, and can prevent the command from running by exiting with ++# non-zero status. ++# ++# The hook is called with the following parameters: ++# ++# $1 -- the upstream the series was forked from. ++# $2 -- the branch being rebased (or empty when rebasing the current branch). ++# ++# This sample shows how to prevent topic branches that are already ++# merged to 'next' branch from getting rebased, because allowing it ++# would result in rebasing already published history. ++ ++publish=next ++basebranch="$1" ++if test "$#" = 2 ++then ++ topic="refs/heads/$2" ++else ++ topic=`git symbolic-ref HEAD` || ++ exit 0 ;# we do not interrupt rebasing detached HEAD ++fi ++ ++case "$topic" in ++refs/heads/??/*) ++ ;; ++*) ++ exit 0 ;# we do not interrupt others. ++ ;; ++esac ++ ++# Now we are dealing with a topic branch being rebased ++# on top of master. Is it OK to rebase it? ++ ++# Does the topic really exist? ++git show-ref -q "$topic" || { ++ echo >&2 "No such branch $topic" ++ exit 1 ++} ++ ++# Is topic fully merged to master? ++not_in_master=`git rev-list --pretty=oneline ^master "$topic"` ++if test -z "$not_in_master" ++then ++ echo >&2 "$topic is fully merged to master; better remove it." ++ exit 1 ;# we could allow it, but there is no point. ++fi ++ ++# Is topic ever merged to next? If so you should not be rebasing it. ++only_next_1=`git rev-list ^master "^$topic" ${publish} | sort` ++only_next_2=`git rev-list ^master ${publish} | sort` ++if test "$only_next_1" = "$only_next_2" ++then ++ not_in_topic=`git rev-list "^$topic" master` ++ if test -z "$not_in_topic" ++ then ++ echo >&2 "$topic is already up to date with master" ++ exit 1 ;# we could allow it, but there is no point. ++ else ++ exit 0 ++ fi ++else ++ not_in_next=`git rev-list --pretty=oneline ^${publish} "$topic"` ++ /usr/bin/perl -e ' ++ my $topic = $ARGV[0]; ++ my $msg = "* $topic has commits already merged to public branch:\n"; ++ my (%not_in_next) = map { ++ /^([0-9a-f]+) /; ++ ($1 => 1); ++ } split(/\n/, $ARGV[1]); ++ for my $elem (map { ++ /^([0-9a-f]+) (.*)$/; ++ [$1 => $2]; ++ } split(/\n/, $ARGV[2])) { ++ if (!exists $not_in_next{$elem->[0]}) { ++ if ($msg) { ++ print STDERR $msg; ++ undef $msg; ++ } ++ print STDERR " $elem->[1]\n"; ++ } ++ } ++ ' "$topic" "$not_in_next" "$not_in_master" ++ exit 1 ++fi ++ ++<<\DOC_END ++ ++This sample hook safeguards topic branches that have been ++published from being rewound. ++ ++The workflow assumed here is: ++ ++ * Once a topic branch forks from "master", "master" is never ++ merged into it again (either directly or indirectly). ++ ++ * Once a topic branch is fully cooked and merged into "master", ++ it is deleted. If you need to build on top of it to correct ++ earlier mistakes, a new topic branch is created by forking at ++ the tip of the "master". This is not strictly necessary, but ++ it makes it easier to keep your history simple. ++ ++ * Whenever you need to test or publish your changes to topic ++ branches, merge them into "next" branch. ++ ++The script, being an example, hardcodes the publish branch name ++to be "next", but it is trivial to make it configurable via ++$GIT_DIR/config mechanism. ++ ++With this workflow, you would want to know: ++ ++(1) ... if a topic branch has ever been merged to "next". Young ++ topic branches can have stupid mistakes you would rather ++ clean up before publishing, and things that have not been ++ merged into other branches can be easily rebased without ++ affecting other people. But once it is published, you would ++ not want to rewind it. ++ ++(2) ... if a topic branch has been fully merged to "master". ++ Then you can delete it. More importantly, you should not ++ build on top of it -- other people may already want to ++ change things related to the topic as patches against your ++ "master", so if you need further changes, it is better to ++ fork the topic (perhaps with the same name) afresh from the ++ tip of "master". ++ ++Let's look at this example: ++ ++ o---o---o---o---o---o---o---o---o---o "next" ++ / / / / ++ / a---a---b A / / ++ / / / / ++ / / c---c---c---c B / ++ / / / \ / ++ / / / b---b C \ / ++ / / / / \ / ++ ---o---o---o---o---o---o---o---o---o---o---o "master" ++ ++ ++A, B and C are topic branches. ++ ++ * A has one fix since it was merged up to "next". ++ ++ * B has finished. It has been fully merged up to "master" and "next", ++ and is ready to be deleted. ++ ++ * C has not merged to "next" at all. ++ ++We would want to allow C to be rebased, refuse A, and encourage ++B to be deleted. ++ ++To compute (1): ++ ++ git rev-list ^master ^topic next ++ git rev-list ^master next ++ ++ if these match, topic has not merged in next at all. ++ ++To compute (2): ++ ++ git rev-list master..topic ++ ++ if this is empty, it is fully merged to "master". ++ ++DOC_END +diff -uNr openjdk/.git/hooks/pre-receive.sample afu8u/.git/hooks/pre-receive.sample +--- openjdk/.git/hooks/pre-receive.sample 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/.git/hooks/pre-receive.sample 2025-05-06 10:53:27.943633093 +0800 +@@ -0,0 +1,24 @@ ++#!/bin/sh ++# ++# An example hook script to make use of push options. ++# The example simply echoes all push options that start with 'echoback=' ++# and rejects all pushes when the "reject" push option is used. ++# ++# To enable this hook, rename this file to "pre-receive". ++ ++if test -n "$GIT_PUSH_OPTION_COUNT" ++then ++ i=0 ++ while test "$i" -lt "$GIT_PUSH_OPTION_COUNT" ++ do ++ eval "value=\$GIT_PUSH_OPTION_$i" ++ case "$value" in ++ echoback=*) ++ echo "echo from the pre-receive-hook: ${value#*=}" >&2 ++ ;; ++ reject) ++ exit 1 ++ esac ++ i=$((i + 1)) ++ done ++fi +diff -uNr openjdk/.git/hooks/update.sample afu8u/.git/hooks/update.sample +--- openjdk/.git/hooks/update.sample 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/.git/hooks/update.sample 2025-05-06 10:53:27.943633093 +0800 +@@ -0,0 +1,128 @@ ++#!/bin/sh ++# ++# An example hook script to block unannotated tags from entering. ++# Called by "git receive-pack" with arguments: refname sha1-old sha1-new ++# ++# To enable this hook, rename this file to "update". ++# ++# Config ++# ------ ++# hooks.allowunannotated ++# This boolean sets whether unannotated tags will be allowed into the ++# repository. By default they won't be. ++# hooks.allowdeletetag ++# This boolean sets whether deleting tags will be allowed in the ++# repository. By default they won't be. ++# hooks.allowmodifytag ++# This boolean sets whether a tag may be modified after creation. By default ++# it won't be. ++# hooks.allowdeletebranch ++# This boolean sets whether deleting branches will be allowed in the ++# repository. By default they won't be. ++# hooks.denycreatebranch ++# This boolean sets whether remotely creating branches will be denied ++# in the repository. By default this is allowed. ++# ++ ++# --- Command line ++refname="$1" ++oldrev="$2" ++newrev="$3" ++ ++# --- Safety check ++if [ -z "$GIT_DIR" ]; then ++ echo "Don't run this script from the command line." >&2 ++ echo " (if you want, you could supply GIT_DIR then run" >&2 ++ echo " $0 )" >&2 ++ exit 1 ++fi ++ ++if [ -z "$refname" -o -z "$oldrev" -o -z "$newrev" ]; then ++ echo "usage: $0 " >&2 ++ exit 1 ++fi ++ ++# --- Config ++allowunannotated=$(git config --bool hooks.allowunannotated) ++allowdeletebranch=$(git config --bool hooks.allowdeletebranch) ++denycreatebranch=$(git config --bool hooks.denycreatebranch) ++allowdeletetag=$(git config --bool hooks.allowdeletetag) ++allowmodifytag=$(git config --bool hooks.allowmodifytag) ++ ++# check for no description ++projectdesc=$(sed -e '1q' "$GIT_DIR/description") ++case "$projectdesc" in ++"Unnamed repository"* | "") ++ echo "*** Project description file hasn't been set" >&2 ++ exit 1 ++ ;; ++esac ++ ++# --- Check types ++# if $newrev is 0000...0000, it's a commit to delete a ref. ++zero="0000000000000000000000000000000000000000" ++if [ "$newrev" = "$zero" ]; then ++ newrev_type=delete ++else ++ newrev_type=$(git cat-file -t $newrev) ++fi ++ ++case "$refname","$newrev_type" in ++ refs/tags/*,commit) ++ # un-annotated tag ++ short_refname=${refname##refs/tags/} ++ if [ "$allowunannotated" != "true" ]; then ++ echo "*** The un-annotated tag, $short_refname, is not allowed in this repository" >&2 ++ echo "*** Use 'git tag [ -a | -s ]' for tags you want to propagate." >&2 ++ exit 1 ++ fi ++ ;; ++ refs/tags/*,delete) ++ # delete tag ++ if [ "$allowdeletetag" != "true" ]; then ++ echo "*** Deleting a tag is not allowed in this repository" >&2 ++ exit 1 ++ fi ++ ;; ++ refs/tags/*,tag) ++ # annotated tag ++ if [ "$allowmodifytag" != "true" ] && git rev-parse $refname > /dev/null 2>&1 ++ then ++ echo "*** Tag '$refname' already exists." >&2 ++ echo "*** Modifying a tag is not allowed in this repository." >&2 ++ exit 1 ++ fi ++ ;; ++ refs/heads/*,commit) ++ # branch ++ if [ "$oldrev" = "$zero" -a "$denycreatebranch" = "true" ]; then ++ echo "*** Creating a branch is not allowed in this repository" >&2 ++ exit 1 ++ fi ++ ;; ++ refs/heads/*,delete) ++ # delete branch ++ if [ "$allowdeletebranch" != "true" ]; then ++ echo "*** Deleting a branch is not allowed in this repository" >&2 ++ exit 1 ++ fi ++ ;; ++ refs/remotes/*,commit) ++ # tracking branch ++ ;; ++ refs/remotes/*,delete) ++ # delete tracking branch ++ if [ "$allowdeletebranch" != "true" ]; then ++ echo "*** Deleting a tracking branch is not allowed in this repository" >&2 ++ exit 1 ++ fi ++ ;; ++ *) ++ # Anything else (is there anything else?) ++ echo "*** Update hook: unknown type of update to ref $refname of type $newrev_type" >&2 ++ exit 1 ++ ;; ++esac ++ ++# --- Finished ++exit 0 +二进制文件 openjdk/.git/index 和 afu8u/.git/index 不同 +diff -uNr openjdk/.git/info/exclude afu8u/.git/info/exclude +--- openjdk/.git/info/exclude 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/.git/info/exclude 2025-05-06 10:53:27.947633093 +0800 +@@ -0,0 +1,6 @@ ++# git ls-files --others --exclude-from=.git/info/exclude ++# Lines that start with '#' are comments. ++# For a project mostly in C, the following would be a good set of ++# exclude patterns (uncomment them if you want to use them): ++# *.[oa] ++# *~ +diff -uNr openjdk/.git/logs/HEAD afu8u/.git/logs/HEAD +--- openjdk/.git/logs/HEAD 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/.git/logs/HEAD 2025-05-06 11:13:08.575672966 +0800 +@@ -0,0 +1,2 @@ ++0000000000000000000000000000000000000000 68b43348991289f1160bd0d668f6d8297837cfe6 zh 1746500024 +0800 clone: from git@172.16.130.122:openjdk/afu8u.git ++68b43348991289f1160bd0d668f6d8297837cfe6 f949cc56deda7bffc1fd0635baf5cfbcf976bb2d zh 1746501188 +0800 checkout: moving from sw-master to tag-swjdk8u372-ga +diff -uNr openjdk/.git/logs/refs/heads/sw-master afu8u/.git/logs/refs/heads/sw-master +--- openjdk/.git/logs/refs/heads/sw-master 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/.git/logs/refs/heads/sw-master 2025-05-06 10:53:44.631633657 +0800 +@@ -0,0 +1 @@ ++0000000000000000000000000000000000000000 68b43348991289f1160bd0d668f6d8297837cfe6 zh 1746500024 +0800 clone: from git@172.16.130.122:openjdk/afu8u.git +diff -uNr openjdk/.git/logs/refs/heads/tag-swjdk8u372-ga afu8u/.git/logs/refs/heads/tag-swjdk8u372-ga +--- openjdk/.git/logs/refs/heads/tag-swjdk8u372-ga 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/.git/logs/refs/heads/tag-swjdk8u372-ga 2025-05-06 11:13:08.575672966 +0800 +@@ -0,0 +1 @@ ++0000000000000000000000000000000000000000 f949cc56deda7bffc1fd0635baf5cfbcf976bb2d zh 1746501188 +0800 branch: Created from swjdk8u372-ga +diff -uNr openjdk/.git/logs/refs/remotes/origin/HEAD afu8u/.git/logs/refs/remotes/origin/HEAD +--- openjdk/.git/logs/refs/remotes/origin/HEAD 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/.git/logs/refs/remotes/origin/HEAD 2025-05-06 10:53:44.631633657 +0800 +@@ -0,0 +1 @@ ++0000000000000000000000000000000000000000 68b43348991289f1160bd0d668f6d8297837cfe6 zh 1746500024 +0800 clone: from git@172.16.130.122:openjdk/afu8u.git +二进制文件 openjdk/.git/objects/pack/pack-70109d422bae03c52db125397a40b9c66c43ff6a.idx 和 afu8u/.git/objects/pack/pack-70109d422bae03c52db125397a40b9c66c43ff6a.idx 不同 +二进制文件 openjdk/.git/objects/pack/pack-70109d422bae03c52db125397a40b9c66c43ff6a.pack 和 afu8u/.git/objects/pack/pack-70109d422bae03c52db125397a40b9c66c43ff6a.pack 不同 +diff -uNr openjdk/.git/packed-refs afu8u/.git/packed-refs +--- openjdk/.git/packed-refs 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/.git/packed-refs 2025-05-06 10:53:44.631633657 +0800 +@@ -0,0 +1,2299 @@ ++# pack-refs with: peeled fully-peeled sorted ++f24b49d4e4d8e90e87ded4abb848a06b3f4ec165 refs/remotes/origin/8A ++e56d9716fd37ccfdd140cdfa68bf86d7f807fbaa refs/remotes/origin/8A-dev ++ba1a94777619e51c96249d7a7dea7e5b44b9c4a9 refs/remotes/origin/aarch312-memb ++4c5094095fade410e23e6a071c93c7e4fdc1dade refs/remotes/origin/jdk432 ++129290d2ffe3f77ad574bfad8b1ab44f5f3b8fbe refs/remotes/origin/jdk442 ++d9b5db392ea3160dcb440c9da6119fcde91805c2 refs/remotes/origin/lspopt ++ed3d731386bf4f6819d5e5f69376e520df3a6262 refs/remotes/origin/master ++6841dc4452c95a08a0e290c9d19fc6b1fa50cce0 refs/remotes/origin/sha256ForZH ++ee91587a4ec0582a71f3458db5b2b30ed808e0bc refs/remotes/origin/sw-212 ++2ec47d8f4edb65ad37668f33b248d57c7f36a010 refs/remotes/origin/sw-51-test ++68b43348991289f1160bd0d668f6d8297837cfe6 refs/remotes/origin/sw-develop ++68b43348991289f1160bd0d668f6d8297837cfe6 refs/remotes/origin/sw-master ++1a29bcab707f7cec0f8b08c128493984eeae93e7 refs/remotes/origin/sw-master-aes ++2a0ca179f076ba8b0d2191ad5feb6badc6ccab19 refs/remotes/origin/sw-test ++e31e221aea1459eab4eb1e83326c6b72ae72b24c refs/remotes/origin/sw-upgrade ++1687bc161c931079da0c5dbfcb958eb976ea0325 refs/remotes/origin/sw312 ++ec3705babb81764d83e29c73ad4a3dafc91c36ee refs/remotes/origin/sw312-ForTest ++405f29d1a9e1df01864c7dbca801d2a7d26875ab refs/remotes/origin/sw312-aarch64-membar ++40848a00c1ed7ebe22438ab16bae20b6c21c30d5 refs/remotes/origin/sw312-update ++29992aa5d0e969e2e7b96329d4e5570b3ae1130d refs/remotes/origin/swjdk8u312-sw1.3.1+membAsMips ++fb3687449a7a6dc394aa7e3977b70368d597cfa5 refs/remotes/origin/swopt ++2c2449d8b4eea5df19bdc17a2425ffb9ecdfee52 refs/remotes/origin/update ++fc4aedfe08d0d68ec509fa666b92c4a612a057b7 refs/remotes/origin/vector_dev ++b900079aec8947b7918262e9a9aab8594234afa0 refs/remotes/origin/zlopt ++efd610bdd1bfefb5fec3179e39f7b7abec934892 refs/tags/jdk7-b100 ++^adc046b4724af88528a82e304178dffc18959a98 ++41bb883dedc82d6e1382f6a00b7d6db89841e121 refs/tags/jdk7-b101 ++^38378b7ee032d70653b3f0d4205f2305cf4f1170 ++5ef2431d009c48e564c82758de5875e2bb6b2f76 refs/tags/jdk7-b102 ++^7c87c80e687af8a1569a5e6b615cc21afd12e84b ++8f36543686f0d4b3ce977c557e773dd1146391ff refs/tags/jdk7-b103 ++^0861a6d60d82bb436b0f392773d9d325ebe5e77d ++0a4f8c145553187bf1e6061e9ba1e031b9afab17 refs/tags/jdk7-b104 ++^df74e9546287abcac9c1d3995f67c9135be7cb7d ++f2f8b37292929dd4e7ad3f47ba76070f37de67ac refs/tags/jdk7-b105 ++^4862cbc68f15c0a0ab83f3871612aba1b0dcd46b ++8b5873409cdab86ccd1298050ea1aeec446ae8e9 refs/tags/jdk7-b106 ++^b2679e3dc4e7b0167394090b886f2df6d561a6f0 ++669abe597cc4103acddb4eddfa2bc992d9342977 refs/tags/jdk7-b107 ++^2d4322b265b2e3ddccba3d482fcc46b775343518 ++412778b6d52bce6eba3af580b676d69c23b54252 refs/tags/jdk7-b108 ++^ecec377dfb8f513c70d9f9678ea61e4c380c4543 ++ad86a3c1408ddeb26d91408b20cce2babb8b5259 refs/tags/jdk7-b109 ++^a4aa79ba87a370d6f435c643e5f58ef3c9dcc6a6 ++d845d52ae802349532334f5581ee1b5ea46017be refs/tags/jdk7-b110 ++^135c61b9feb66f754e0eb88270fcc045354db16c ++8f37fc875ee7ca2fe32cb50c6e27ae02b319ab32 refs/tags/jdk7-b111 ++^0bd0f10d2c5965369490624627f16a39d35c8734 ++2341024fd83e798d7dd5d597a41e0d65d7c12e97 refs/tags/jdk7-b112 ++^d6ba5ab1f738989994af4b8f3326d989cdd23c05 ++c3a189a0122a37130ca52a2fb9ce29b73841cc36 refs/tags/jdk7-b113 ++^fc0cd82e4607a70972ca6c0b217fe0089315e45e ++2467ae04daa34a624b247f4f47532956cba1d545 refs/tags/jdk7-b114 ++^df7769a808159a5ed011e7507d24d660ff21013a ++66e3caaf9dcb14cf80fc8fdaa0909a2884671539 refs/tags/jdk7-b115 ++^a487e981afa2f83953c84df21a32f68182320c7d ++463eb7cc13b3e154cdd0f869184567f8586db213 refs/tags/jdk7-b116 ++^af4b8efc5b0d192358839bf16365bdbb4ad649f6 ++b151ed6fc037873e33f30cf216674bb0467d29a5 refs/tags/jdk7-b117 ++^44a4e2bc87de4279b33c8101fe58ec60745dc95a ++d76e2efa929bd6c198b0ac2d9cb7ecd2ff4bb9b4 refs/tags/jdk7-b118 ++^8b3d68fe65240da6f4f47125b31246ca946afb6a ++807f8da2bb7dc7f610decf0d160da841ee7b2668 refs/tags/jdk7-b119 ++^e589dbf67e3481b7491baf9325c44a5c8b23901e ++56bdcc494ce423f9e2a706e66a797ec6a7f52a2e refs/tags/jdk7-b120 ++^4ae2fc83cfb5acd53b6bb6d4faa6012a9fe1cfb9 ++22158d77ae4f035958d2c8eac4a13bbaeffc6d24 refs/tags/jdk7-b121 ++^925752098ba63b25c1cb635f50a53684d4094f8b ++f4249f693e6dd25ab445032f8c2e830faecb9561 refs/tags/jdk7-b122 ++^85aab321556e557d298055496d9ed2794f6b6587 ++fd0ed100af641d7eb833aa81205675ad491ff03d refs/tags/jdk7-b123 ++^2632bbc0ac2919d3bff1d5a7455a0af46cf96b26 ++194fd2a8f8b325ef0f49309c707e7786b6775f25 refs/tags/jdk7-b124 ++^d25946a6a9bdd2c36f04677a890d9f3fff9f45c7 ++ace234ed336a79357b7f0616fce264c2a63e2563 refs/tags/jdk7-b125 ++^d4e654d0da7f20b1069abce84c0ffd0c6bcd0449 ++ba82c6acdb0e6d32bd7d53cdcae1246fe4b94302 refs/tags/jdk7-b126 ++^de85b246eb2b6442b6f2997cf82bbc4e3aa4c99d ++bdeb6df49744a1ca55f211fa68a1a60236987cb3 refs/tags/jdk7-b127 ++^3558ae2d03f831ec68ce25f7595d01d1b65086b0 ++956da41d8cffea06ad856dbb730a4704ba518890 refs/tags/jdk7-b128 ++^27982685d0a6cd7546334fe25e84f11013668522 ++51e631d0992715fa9a463e0ef3c9d943bdb534b2 refs/tags/jdk7-b129 ++^05cdc0aa7715bb9292f99860fbd28a0caecf09b7 ++3c9373d3df3a34bf1e692ef305986a8c5443aeff refs/tags/jdk7-b130 ++^fcde9ec75df70be2c42c523c6986ea3540e46df2 ++330b6ca2f9ee9e908318314fbb9c09c3f171ee6f refs/tags/jdk7-b131 ++^cfa6e0878b1aff48b521ee21d2598abe7f4964c2 ++5f19ac0afbb570584ad828d3d2fad5f61c714ab5 refs/tags/jdk7-b132 ++^8170cbb75c9936bcb2f58666a8b87cbd96856d5c ++74bed3ef0e1ed35b99aca9479cd2696479d1f740 refs/tags/jdk7-b133 ++^c6d7830dfc6dfb1b1ce76d8be24244d4b4e88fbe ++a34eebc5c315bc373494a76dc3019f7070fffbd2 refs/tags/jdk7-b134 ++^31a15b61ab6bb6352520042697058b42acfca1f2 ++64f78cd046a40e9a4af7d3af7a8d107246e0ae6b refs/tags/jdk7-b135 ++^f80ed5fdede64782ba572489c3b50357f4989b44 ++4ff9fd0e2348037bb2c8fbbd8fb2b7b1fd286ce5 refs/tags/jdk7-b136 ++^d538bce17282b7e8eed585bbf462bf80a7aacb9f ++70fac480bfa14df49075d241abb3216da117a7ce refs/tags/jdk7-b137 ++^730c7dd8eda0816deb84846952106b95d0440fde ++23001cded84e092e5f6aa2e76a3ba211f54fd901 refs/tags/jdk7-b138 ++^9369498f1a357cc355619062fa6541a1ced74d15 ++c6de0d41508a0b14dc0d7c99f8046399b754b6bd refs/tags/jdk7-b139 ++^cc3355f5379ffe61c12188737441c89d6a2c3d73 ++794bdee7df4c8dff9b0468c10293517ed759341b refs/tags/jdk7-b140 ++^269b07f5859a2a03c11e5e077957433a858e921c ++1e5e8975f01b04390fd8c6f64140bc20946bc019 refs/tags/jdk7-b141 ++^7ff3916e57ab852014885c3e7bdb95e5c1ed8c1e ++47826d25365174ddc966a6dcef521c58e4fdad74 refs/tags/jdk7-b142 ++^1424f2cc9e495594ee6eeb9223be9a356a24ef90 ++c89735d5cd4388bd4d410d5f571890d4eb71dad5 refs/tags/jdk7-b143 ++^7f5de78e6f0f23313686423dd18bc2c6038e1923 ++82ba8a6e01a628ed8711a22d732a821ac1740f8f refs/tags/jdk7-b144 ++^58fb76bbf033a7a396a6449abd1a98848aff1685 ++db5a9dcc9f9f29ecdf3e0df6736fbc9115cc0a2c refs/tags/jdk7-b145 ++^d62d57c36fff42f1ebec6f2705e667bb91ab74b5 ++951c3e1a01aaf26856a20b0c381b356ca8937455 refs/tags/jdk7-b146 ++^8380be1403799bef6d28efea01581cd6893ce6f3 ++4c18e8f3e93e35d77f2f51b1ca1708bac8bcb828 refs/tags/jdk7-b147 ++^7b2e99178f7cf41ecd86b2ccfba38ea653e815e7 ++9b175de44b8d94cc4eb1ffdd9141ebcfbef8837b refs/tags/jdk7-b24 ++^77c86a964655e3586a404d3e0803bd8cd6dbbc01 ++7bab50939039be5f645e155b9ac4d056abbf8532 refs/tags/jdk7-b25 ++^493ac9ee8f9c4fb66b687bd740b8e1fb5ee8b86a ++97c1c021cced669d8c19c33ebbe9b6c579a2e9e1 refs/tags/jdk7-b26 ++^f872a3a32f8e4cbd258e6dc5bacf72a3baa542d5 ++8ded10494122afda3c84738ed427779a66c1e0a2 refs/tags/jdk7-b27 ++^1ab5adaa6ce53bc22f2c8c9e0b115366e4b23f1f ++cd1004e47abcf6b6a09bdf5ca715e0285662f341 refs/tags/jdk7-b28 ++^3021d76b4529165ede29d8fb4454db28767a593d ++9e5d9d2db5d07a1481ecf61eeca943bfc972fc7d refs/tags/jdk7-b29 ++^d7c5094a893900321393d3205809672a61b2dda3 ++ac8631eac98888f58c5489564da769bff3928334 refs/tags/jdk7-b30 ++^744ae99f10082b2bd922e5b995b8787dcd6dfea5 ++c6ce4bf77c9ceeb0dbd0ec0ec9212cd4e5235040 refs/tags/jdk7-b31 ++^575988272e099ed835b41aa4d7f02795dcc55a4c ++884fabec050fa169349b7c7cb4aceec581dff9d3 refs/tags/jdk7-b32 ++^f42262c0392d1737c15a02b7b7a252faf4352907 ++5a415851a75c7608cbb4c58ec8b9474e91defbd8 refs/tags/jdk7-b33 ++^406f3420f4f4ced03917c21c2fe03a80cc71a87e ++d4f5d536ca4d68d53dc47a20067189c61f637954 refs/tags/jdk7-b34 ++^25183c156af4f2a98e847cab1b030a300803de7c ++96faf9559d724880c98940af7e8a671216d46df4 refs/tags/jdk7-b35 ++^2032a83d3f372d0b26dcd9023c1800e612211406 ++b87273f30446d181566f1896e58ea66b94080c9a refs/tags/jdk7-b36 ++^52c8ea5a97497bebed82d2cdfd70b86e459f4926 ++a489551ddee047297daa092d9ebdeb5802c15a39 refs/tags/jdk7-b37 ++^5585d3e79e65031a1ceacff0d228d56c0ecfc779 ++e46cffbcf32e8b09b461cfec2d02ede0487c483a refs/tags/jdk7-b38 ++^e40b297a75e99ad65b348e22a1331dc026f509b0 ++d3a70254ca03328b8e7c1ce7c2b81ec5407f576f refs/tags/jdk7-b39 ++^cfb63b2f47b4fc9e1e477e1affad98496dc9adb2 ++c0d75397102ace426666ac42f37072cb24e1e132 refs/tags/jdk7-b40 ++^6c04fd835e86522d2136ee663e19cfd5f71b9aa8 ++7bf378585a58588838ba6d143843a68907fd0fe3 refs/tags/jdk7-b41 ++^594c9ed01f4a3d911d73b65c3fbb3829435db6cb ++d4fea9f8a8fa7196c9d29857104baea4b0c4a602 refs/tags/jdk7-b42 ++^790cf17df4eb23f483585a2cbc025156c1966bb0 ++4af814f9a46508986dc1aa9cf99cb9d0432597fa refs/tags/jdk7-b43 ++^943b5566ce27943869f8bb7357dd82c532a25583 ++1a54172bae772c6f7e9c00abd84e1d1ff184baa4 refs/tags/jdk7-b44 ++^60a3f5bc64fd5855617bcc0231ff24f2d43c4bb8 ++422769e492d28335d11cebd18d72402382482cfc refs/tags/jdk7-b45 ++^0620440d1f92448c18cedb790913579beaff19d8 ++54e5a28cf11deb4786bdfe0a7a9dacc4511121e6 refs/tags/jdk7-b46 ++^e49da806ac4cbdd256339eb344cef5ce5c502a04 ++111b956456ab1346da9fd82271349be42669467c refs/tags/jdk7-b47 ++^b647e47be80c73593a37139f7e5a1640363ef05f ++8fd5e55c7b5997b52046b66bb29c92e19a4903af refs/tags/jdk7-b48 ++^c44a783b409e34c74c90a7a11776f3c56ed32b8e ++995ff7cb59be7c177d566362f0b3ca6ca7453b7b refs/tags/jdk7-b49 ++^b80a4e3e5b01e6c5c712e62b1e443ee7d3eccd19 ++c6399cce72f276780e2116f5980744531b0a6a50 refs/tags/jdk7-b50 ++^ff625db728de649bf94f806748818d9fbfabc8ab ++d9cd99caab6d675d5ee1a6e1b0f998d31764b9cf refs/tags/jdk7-b51 ++^38930ae468910fb1d763f18c1d9473beffa7f3cb ++fca93cf46e5749ff54f77dba67585ff3d8765a6a refs/tags/jdk7-b52 ++^8844da387d371a94ad40975d72113168f6d5c993 ++0af385d2f302647bb3888f42c23958fd6ad25b77 refs/tags/jdk7-b53 ++^9d14bdc3f86c288f3356e6baedd1871502521a7a ++b03b0c6a2154a4ca8ce0c99e1f7f51e07366d02c refs/tags/jdk7-b54 ++^6e1d4b9a47d7a164e183703af6dc196269487729 ++6e1a0005ed2844ecd5b2a4e24358540bfb520049 refs/tags/jdk7-b55 ++^423befc9a0123b2cf13041f8acbb2a0000ddccdc ++85137260ae007edd1ea14b9cdc6af1b893f17d7f refs/tags/jdk7-b56 ++^21694f8b3fb7bc5210e5c372df30967428468c8a ++f82f3a513bd16122db802d09dfa127dad430abe5 refs/tags/jdk7-b57 ++^30795df1f9490b597e19e6a98b078c93730d2bf9 ++53bee31d02d9aa3f381bf2d5d51dabe20d9eb1f5 refs/tags/jdk7-b58 ++^0cbcd44d1d22441acd1dfdb481a217bfa4636d85 ++31168f1b8b98396ec00b9220d5303811fce645eb refs/tags/jdk7-b59 ++^6d918271d525e0fcf10d2f582e15c2ac4c13998b ++41d9f358754b36548e6b2e5048fb50ae5b2b5d86 refs/tags/jdk7-b60 ++^c1ff9b2e3b5e31e5874ca7d7a38ea4fbd9bbdd5d ++c8035cbd3cb10f6f0b7f04de09fd3cd191d9681d refs/tags/jdk7-b61 ++^54ab66f63e1c374780ad1606d5700f762a92cb80 ++cd586208c77b4e22d85c49125568dbd515414c61 refs/tags/jdk7-b62 ++^30558fec61d54ef2e40fdebae348a70c9cdd6763 ++c4bcfc43f9c5338aaefe824de56947b792071ebc refs/tags/jdk7-b63 ++^2a60847f5b7f5988e84612366c9117ce2f55de53 ++f9aece23ac937f282c1ef1c2ee553b790b76887d refs/tags/jdk7-b64 ++^26adaaea4ed60e82eff9739594268850f910318f ++a1a63928026db0f57df81382085cc618ccacf6d7 refs/tags/jdk7-b65 ++^88b37fb2a8aecf2b0afa75904d212da11919aa98 ++e08b7e4ba15a36b63f501640efb004fe5837f98f refs/tags/jdk7-b66 ++^432d4059851a73a7609aff119cd0802446d44fab ++5a1940077cae9dba048813d509d2ef89bcfb96b1 refs/tags/jdk7-b67 ++^b4b746fb274cb65f8cbf4a884bb20749a8f35d2b ++4f03e7978ed7bb68f9aca77e293f795090c478c9 refs/tags/jdk7-b68 ++^9a9ea731542833248a434d7735e1fe2b635a5144 ++dc51d5c5c12324e7f56149d729aa82b38c88196f refs/tags/jdk7-b69 ++^2ec9bad2a7582607a6b0f60d0797cbbdda72f2e1 ++2a38ccab4919e2353ed6386fd854c25f7d6b303b refs/tags/jdk7-b70 ++^a23f487d2dd0dab4a5bdae5fc8c09dbf645fe533 ++a1b22ccf58e6d74ce8589164af78788ccab378e4 refs/tags/jdk7-b71 ++^2e480a6f2bcd1402cd640be33452c0256d11ac60 ++0a75ac9db88ec1dd9433d696cd605adb405c745c refs/tags/jdk7-b72 ++^e21ea57f72b20424ac38930bcf56513cd42642e5 ++0ace9968df934dbacabba0ee45847ffa3fced39f refs/tags/jdk7-b73 ++^ac3412974fda876f16226a8e1fb6bd6dc22404df ++60145fbfff6148414c835ef9ce262cf0f85d3d98 refs/tags/jdk7-b74 ++^4d263d042f254af2855109e3ba45bdaf3c81baa7 ++cae9ea4202fe0a0b84c3ba3a4286d3785c3f6ca9 refs/tags/jdk7-b75 ++^53c56beacc49396093bfb8638eef74c173f05ac4 ++375f127b0c34d42bef10a0ac44bf0b25516aed92 refs/tags/jdk7-b76 ++^d49120d46668eaaab9df465a9ee17b696cb4e37f ++06075f46db0f07ec9839e75d1c4f5f27a6084520 refs/tags/jdk7-b77 ++^b25112408f3377fe593bcab4d8c25450d999c69c ++499c94d48e4d3cf923aff10dd5dcf541637905d3 refs/tags/jdk7-b78 ++^9cb97e1c64e1cb3b9c9714459d5a8aade9394145 ++7a59943ff0dd152ee979e2af6589568390e35b57 refs/tags/jdk7-b79 ++^2ff37874ef49029c8151533ffc9d71a04386ce14 ++c3121b2c14d248ecd6e581af178a3da05e66c543 refs/tags/jdk7-b80 ++^13ad3ed8b99f8c872eebb5d0bb647d5bb44ea0b5 ++d806fa09e03b29786c1b1776d71e727855a6b7a8 refs/tags/jdk7-b81 ++^0b2e54aa23985dac467d654b8f076cbd0a530ee2 ++3594cf726c8b3f8f2a304467ce7c785cd7045156 refs/tags/jdk7-b82 ++^6a6913f7549510394a18591eb5482fdbfbe627d7 ++82b342952619d2787843e519097281c271b6acfc refs/tags/jdk7-b83 ++^b0a0e766a0137ae33b343dd0dd9b701f6c2e5a98 ++07e0be28ec548faac20b1ba2064da5a58fad7730 refs/tags/jdk7-b84 ++^b7ee18f90316ae3f74c6cef3762e4d74f9762151 ++d4f8aa6d92665bdab465017d25a750b4668c0875 refs/tags/jdk7-b85 ++^1408dbfdbe550dcdea4cb9a849fab6cb371ded18 ++e48bb556d7c2ac32d23e916f46b54e60bca477b9 refs/tags/jdk7-b86 ++^d9942bb3c67113bf9479c6d9174f6e381d558c43 ++0f9ae8dd28767d43f858cf18698787a21932e452 refs/tags/jdk7-b87 ++^bbbf57c1966fc8dea6ab5d35693a9fd88cf6ea45 ++efd4f93dafebaa32b09e7a27f8eb60d55d99ce33 refs/tags/jdk7-b88 ++^3eab8982825fdeaea8eff39242fda4175306474e ++f5a00618023720347d6e8bc2b3c675ae277a108f refs/tags/jdk7-b89 ++^b14ad6680729b40b2e7a19e7803b0f97eceed558 ++8d26dd5a9cd3c91d295c339ca3cc3d1cd43fb877 refs/tags/jdk7-b90 ++^a3a51c78856c360e5efc3aea1c0eda070b4638c5 ++01f5980edd1f78ca8a60e08a280f2fbf80b983da refs/tags/jdk7-b91 ++^7588f37667fd2a8c00d9a1a6e1afc605b75e0dc2 ++d69e7f1549739773cf5213374d4f419a9254ec48 refs/tags/jdk7-b92 ++^9329c4986eae210a66c03400eee8e5b9c5e0fef9 ++1b925484dccfa1a952a3c09ffcda2f1303f259c2 refs/tags/jdk7-b93 ++^f16e36c40b2d8f85b06c848ec2037fed0b0a8521 ++9aaa0184a32764175891f192e3d4c75b5a518484 refs/tags/jdk7-b94 ++^017d851c20f8f838376e5f035fa745f9ea114996 ++dedb4072824b7b60881f57f6de53a867d4741d55 refs/tags/jdk7-b95 ++^d08e6435e4026542e4d4bffdf12748e83ef0f9fb ++9039a8bc5cab82c1f9bec78d51acbb72949c5d3d refs/tags/jdk7-b96 ++^7ac3ff14ceb10e65ffc86b2f6d8e1b96c3434eaf ++9763c02d42fff23734cf599bca96dcc0e5f07366 refs/tags/jdk7-b97 ++^e514f9072fc6ea3d2ad4e9a81251bf1fc1ed1eb6 ++5b35f0daf2f9e94249779aead0ebad3465469eec refs/tags/jdk7-b98 ++^ecd9f7bd776e8d29c8425f922653f1d2db4f91c2 ++d0b45eea6f3e6f4c351fcc5a0bfd1763d9823b3a refs/tags/jdk7-b99 ++^ba79b2bcce1ceefdeb44bb7075bc5d2647a56834 ++33852ed5c01906475eba4839854a5b5a39f65309 refs/tags/jdk8-b01 ++^d2ac7a107e4af38df029fc19e37df25a4831514e ++a02db6e6acd9a8c2b2aad3d857de939352cf830f refs/tags/jdk8-b02 ++^425272b22550faa5443b408fe7f8b79b7e896dd0 ++fc7477630f94a58df74e1e0ea1a7edc137ef49cc refs/tags/jdk8-b03 ++^ce11b33ae7c46f9ff2c5a09ae21a1db818cd1c49 ++25af5e82f9f7f61ff66237834b9a21ff5cd60fa0 refs/tags/jdk8-b04 ++^f36d48190e9cbc338248369a4f5ae635f1b60611 ++4279c95658b930659279147f9569d4e252e92d65 refs/tags/jdk8-b05 ++^4208296685976a0c2ea36b40658dce8e455763e4 ++1fa8ae54f16314fc34855baa73d6de9da41f3ccf refs/tags/jdk8-b06 ++^356b331207d9a58788f4647cf351a9be437a793a ++51c703c885c8bae4ef59745cc1c6db9297e78ab5 refs/tags/jdk8-b07 ++^bb930ae01ef2a0e28e652f19f172894ba6201a65 ++32dcc60a0764dba8fca0b647b2ff3d50d4a41d10 refs/tags/jdk8-b08 ++^48ad07fd2cacdfcde606b33a369b1bf8df592088 ++51b33e55af6050a1cfb6297c0a53aaae4dc7ae9d refs/tags/jdk8-b09 ++^1cf5ba0b12bb8d4d128e1cc0ec40732f0cdcf746 ++fda73bc333c7f08de6312f89c620ec232d049131 refs/tags/jdk8-b10 ++^e482d16c432711d4f165cd46b6db93bf05234512 ++fdc37d42f8f00f8358e33fb39993fa807704ebef refs/tags/jdk8-b100 ++^c77a22380df2af9ce846a13cd5544687b363ae49 ++f0431b532429d56d2a39d0c6972be84b899217ce refs/tags/jdk8-b101 ++^6412341d454eee8a151cf89b51cabfb7b3d87140 ++e2861622725b5276b40d5891c9833cfb0d2a4fc3 refs/tags/jdk8-b102 ++^e9a2e84e45e1120aa306a01dfb087200f6a7f903 ++623d92f31d30968626802ffb3bbceec6b9644dd8 refs/tags/jdk8-b103 ++^41525a23dc036496ea16ec52346acaf473fa8601 ++91494f4f152359a82a335ba48266fe24b88b7c7b refs/tags/jdk8-b104 ++^1962df5a7e3659c50a9b6cdbcdf3ed08b0034851 ++f58827194988e5ccddda280ae0950390f6ca4b55 refs/tags/jdk8-b105 ++^a21b1cb0780505f68e489fad8ad20f5cccb956e7 ++beb501b49ae29f61be0072818710a9d3725c8c20 refs/tags/jdk8-b106 ++^6f3035d9eb1f70384b341a02c97814f6d36b83b8 ++68e6ddc62e0a13797ddea78883e4df970fc77bc9 refs/tags/jdk8-b107 ++^98bbb2a9fc6fbe34e96891e7f9a8663ebfdebc23 ++450932aa6d03a6ab488aee81961aa5e185138b57 refs/tags/jdk8-b108 ++^955960c3dd1aed6cfd071799cc0c472233611d27 ++0a018293a6c9e21745bc444af4b72e84eb2c8c65 refs/tags/jdk8-b109 ++^674c05c3546814df711d0f9384c63d56458aa9ba ++561c543a67d2d1b0fc4a9026d02f9893e8cba9e8 refs/tags/jdk8-b11 ++^7c18f827d7f096cbeb96b086a8516754f0c70221 ++9d2009d2bb5f2dc1ba023209c610a6ef8f47a0e2 refs/tags/jdk8-b110 ++^029001938827cb860dfed40390539cfdcdc39067 ++f5a5cba1f0b90138b295e07e2c11e6a74a4d8031 refs/tags/jdk8-b111 ++^27cab0e0c87f124277c7afeb5dd6a8750443804e ++f98a74d021bd2bb406a457672057d944666e9c4d refs/tags/jdk8-b112 ++^d38a1f186d640dede9fccb727ec98db3a413f9d8 ++b3141c3a22e3b4dc867e5c0f1ed8897e944ac745 refs/tags/jdk8-b113 ++^53fb16b381c50bf51d11b47f8781ec2fe3f8690f ++827dc623d0f8c7daff9b19914fd68e6e579f82f4 refs/tags/jdk8-b114 ++^31a141290be88273d6cd40d076a72fce1dbaed4c ++caddbf8b5820bd6d3d9362136b534a66d0d62570 refs/tags/jdk8-b115 ++^805a34ccbe791ac02ffed1594b519f4f23c61129 ++c0a37161b8fe781ef25ee871e3e4e940f4aec3b6 refs/tags/jdk8-b116 ++^29edd23be2fd0898193254347c750b38a5debd4b ++8dda2f153e7355c2600b4ca90b841616d7ca6b24 refs/tags/jdk8-b117 ++^6c6d4cb7859deb89811c88ab16c56e0360f4ce83 ++e0e24f9d0cf5ae728b3852caa88ca58655ff29a3 refs/tags/jdk8-b118 ++^63c7ac362b1857cc0a2ce6cc60fec2fe1025de28 ++b0b1706af992194fb69c0b8f83f8d3e6efd69eda refs/tags/jdk8-b119 ++^9d65d79487672ea753fe278ec93520de4201f4f2 ++9c4e14e02ee2fcacfce94a282cc2bef7286f372b refs/tags/jdk8-b12 ++^55168184d686e3a43111f91a7ff10af998ac3eb2 ++be9ff95d58f9bb8620a11a813e51fc88a4d54664 refs/tags/jdk8-b120 ++^9a9add8825a040565051a09010b29b099c2e7d49 ++7ce93445814d839807ce63959d5da871cb018259 refs/tags/jdk8-b121 ++^772d2a8fc0f3a6ae5dfcb1cbffc40a7999349bc6 ++293628545daba965e643eed91dbbd3be01b1eae6 refs/tags/jdk8-b122 ++^4280dc988c91e14aca56b3d29636f50445f0c495 ++897660c10fdc849475a8cb5a90988930a0d7b94f refs/tags/jdk8-b123 ++^1d90ae6406fc8cee60aa9b4307b8cd673fd6e3e4 ++7972b7d898546a6bcea01642e11496d5a14814ab refs/tags/jdk8-b124 ++^bbe842780e4d6d1eeeb4b6ae65c7eb9ec8a182ca ++82649eb2ccd6aa750832f0cf72500f4176d16534 refs/tags/jdk8-b125 ++^e8361253c0c8ffa7d3f2b6545410de1e6cc884b9 ++ba926f3ba82e8822d102d2ebbf5dfdd0d9c4d77e refs/tags/jdk8-b126 ++^c01c6e01e2b30e38fc66c3747797f88d46479928 ++ff1f9cf416e0f6451fe67db4ab7d45a007d84157 refs/tags/jdk8-b127 ++^a8113cf800111b77af628c425ab1b846872f55ee ++4c05fb78dbdf4e742efe866243055a47c40f89aa refs/tags/jdk8-b128 ++^999d9327e23680fed1f5cd91b7918e4470b9d3cc ++79da09374ee9641f6274bdf8f620ac220bd1a1dc refs/tags/jdk8-b129 ++^81f2ab3a7c8783cc51d751c8f0a1ed38ac0e37fc ++2fb5e812337daeb99b03666ee758cd0e696290d9 refs/tags/jdk8-b13 ++^bf689f8a346b36cebf85c420281481f759d5891f ++db9dc7015e1264ce08baea5ccd3c9100d7b9516c refs/tags/jdk8-b130 ++^eaf4bf1aa69f749b8f5f7ae90d522294e77fe05c ++4ea320fc990d2676e512f6d9bb2cdd19636da913 refs/tags/jdk8-b131 ++^9205c421f33fd1938450d211369d3e2b7aec573d ++be4fbf99fdf1c0f7dd9f31ff1119c5dc6751de38 refs/tags/jdk8-b132 ++^e3d58354fd5930a8330620701212a8b0c0ffdb60 ++b4b6499c23ffc5dd76b9ae5938626e81e6be9891 refs/tags/jdk8-b14 ++^561f5f44de853e0feb6597014d60ae3331c93a0f ++8900fe0fd894cb14af02de4cc63d8b791c45ede5 refs/tags/jdk8-b15 ++^469f89911f4fd593ef2d2cfa6175e79673ed2ce2 ++c1a0394aa6a7c1224e9b210f4396d0d6b66799a8 refs/tags/jdk8-b16 ++^b81c3f5efa016772ca4e8ea3b31a1856ca4ce83e ++058ee1c7d8965630161fa4b5b2e777f2cbed0968 refs/tags/jdk8-b17 ++^0f3b43e31b3021710ccc96b3e43d7e179bc591b9 ++e1de342f56e3c45b731f2b7a93113ac1330e73e7 refs/tags/jdk8-b18 ++^c6e510d7a09129e1b8e9a3854d1342927f170a28 ++b5b97b08fd8f84b9012c1417179f8fc86772fba9 refs/tags/jdk8-b19 ++^7afb6b5a4a2028edab7bb502a85ab2d3da2cde2d ++1590b85bb87b8ea2594a6055f0530376bd44cde3 refs/tags/jdk8-b20 ++^0d3829a2c5a70961ffc539865adc1442c1a30bb1 ++228e5e5fd11137188e3cf286934caf9d4236270f refs/tags/jdk8-b21 ++^4c1a2f6b6e07693159b56025c5113f913fa5f04f ++e54520becb3a80498ca8e08c5458bb22b0b04d60 refs/tags/jdk8-b22 ++^a077a5878cd54de5080853ed8de0c1341771e69a ++0cc6a9dda6fe05647947debc5821f67816c66bac refs/tags/jdk8-b23 ++^f48f666a63864031c1e7ca3c7a294e76e3ead0c7 ++83ca3edc0bd1bb85cefb3f4d54c1fff4e5928703 refs/tags/jdk8-b24 ++^6e913f4649da7ea174b9e049c0678148b3d51762 ++c9a34851b006da5e806b9e5fab993e566558e2ea refs/tags/jdk8-b25 ++^69efabad3d8a2ff47a62a4626c574a56edec1cfd ++a3db0e1600aba0268c2816380cd89c6ba43797c0 refs/tags/jdk8-b26 ++^6f539c5cf97c6b209a03518d3be87afc06530e22 ++c0d7f825f41e1ac2f31cdd1941d58b7e4c3e5a26 refs/tags/jdk8-b27 ++^2b7ba88d551d4f22b3f78345f222a667ac69a965 ++62b5d4df52d6bd43cac7ff1c54ab322980e5d9d3 refs/tags/jdk8-b28 ++^abf322bf84d267aa51a34841f56dd15804fbe2f7 ++817f23c52cf86510b41f5d212398365ffcf1edb0 refs/tags/jdk8-b29 ++^651770b5df8b16d14338b3b20a24ece2c5b839e3 ++3eeedc2818cfcfaab60a500737502935f997c501 refs/tags/jdk8-b30 ++^61bc4ca404d1bdc5c1e4536af73b720ac67a0e04 ++c3bf3142941b5fdb259e140d7becd8ca5915e4fa refs/tags/jdk8-b31 ++^ed6697aa20e3f9c17a496a544b10bfe3543de38f ++f6fdb895dee84338c9de2e8a9850b516611365dc refs/tags/jdk8-b32 ++^94ed22086fcd5f24630747a9c997e835bf47be64 ++0a5ecb4af210a6821e0b40c8b54895dee3957c70 refs/tags/jdk8-b33 ++^a1f493478abd41c2f2cd01148ed377ec47563c26 ++d262ee9294a3eb09db871f9630e0ef1e405b826a refs/tags/jdk8-b34 ++^d2f0792c3907f9d383af488cedc1d16c95c7c999 ++399f3e012999b2bda0a5ddf98440f3455670f886 refs/tags/jdk8-b35 ++^2e7611228e247899fd50576e311cb116ef6ea2bc ++e0c6075c80c84de7cdb64e11622ee84c15588318 refs/tags/jdk8-b36 ++^4840199698c84655569cee604d72a90c6ed1e0d0 ++076f99900dc9251b72cd599c7bc9abcf12ea7649 refs/tags/jdk8-b37 ++^e3f1a15d934fe315f99c53c267095a63216e1e5f ++13a921a59bba0dd7821f2e4c11573fc4cac96395 refs/tags/jdk8-b38 ++^3de2a40e44be109eb9d222d229273e632f065738 ++2c970e04d9cdb83774a3fc7c147b19709f237f8f refs/tags/jdk8-b39 ++^2de299b0862437c3b773446ed09efbf592421477 ++0da3d5c13df505ba47c661cebaa0728c8ce84cf7 refs/tags/jdk8-b40 ++^880e09412543af479bc335faeda6196489a2a045 ++703f7c42a2fecbeaef200315ed1c0f9425e0c8d3 refs/tags/jdk8-b41 ++^ffd7528dd6cb33c64bdc27d1aa4860962789ed1c ++5b30a7757eea0587041b3ca24f351455516c32eb refs/tags/jdk8-b42 ++^6687eed67752584ebeccd7afd7e00fa854031814 ++5cd8ae46b042552ae8b3f73771e92c20c011373c refs/tags/jdk8-b43 ++^c0ed0fe43338374f9962bff468d66c2cad4b70f6 ++77b1dd9260e9ef426b87f5d511e8a62fcff3637e refs/tags/jdk8-b44 ++^2e1841432e581cd40fc24683fedc630f040f81e3 ++2c8668b2097f10ba8c88187b2462f82a2eb330a0 refs/tags/jdk8-b45 ++^7517b9d19367e1f057e5450d7871135b5f878d02 ++f52e9db9cf9586d4a13e5b48f4e6f6a077f73870 refs/tags/jdk8-b46 ++^16a39a42d1342834b4683023c4ac8a7872d13bc0 ++cb201a6450a1dbbd62d3c2222a4750b556d39323 refs/tags/jdk8-b47 ++^b56b984193acf014484e8491631bfb48e1f4884d ++a42e922abfb42cd7a94212381eda86bd797b8105 refs/tags/jdk8-b48 ++^5d039154dec1bf7eea9fee4673edd508b8409668 ++b0e8e30b0add732280a547be984a3cf9a12df68f refs/tags/jdk8-b49 ++^5d98b14bef406ee5e857a29d6d4c549f1772b408 ++f390330de75f3edeb05add4333cf0ded4d0b1ca9 refs/tags/jdk8-b50 ++^6384444219962e0cfab62d215d08d4f4254d57e9 ++c7b70a66bce35e1bccaafa389c804b505a9e4cc1 refs/tags/jdk8-b51 ++^bd04d75035a888d5034c5f7e2e0508d1d28d14af ++c280fe5b6705e79c24f0dd0e93e690b7e5691796 refs/tags/jdk8-b52 ++^9e31be3e8dd02b143fa9997732e714b0886ebee0 ++fd59f3ea44435c85a06fb0845565c76824265a89 refs/tags/jdk8-b53 ++^b07a721f8e22f724831edf215ddaf8607103d904 ++c4c77c0beb27be157948a266de74f111277dacd9 refs/tags/jdk8-b54 ++^264a0511f0c4793a027ba8874d2c74c032e5b702 ++7de35fcb4e06005859683edbd4b04c9ebc637bb2 refs/tags/jdk8-b55 ++^b41431ec801f0b1defca2461b6d25cc6459c0c2d ++8886e35f0cdb0ecb76c23c7aa66e7f95ae865214 refs/tags/jdk8-b56 ++^6c1ac6f6bb7d4533d592da576275ac91e36f7c4f ++55f0abde6d1bd79364fc2130b2505856dade5132 refs/tags/jdk8-b57 ++^901ebe2983837dbce0ffec88d7e7285ba1967022 ++fa9c6eaeb4b60c0f2604d4e09763e9776b20b0e8 refs/tags/jdk8-b58 ++^5238f9a66e5ba8771c2fe22a79cceadcca9d9220 ++bd56a1486356d36b5f221cac91a11d46b4e3c8b2 refs/tags/jdk8-b59 ++^4e637a8f8088bd3987ae03113111064fdff905b9 ++3fd7db6d40dd794b2012b2a78bb83fdada8abed6 refs/tags/jdk8-b60 ++^34063e3656db6d0cadb9168f37024e6e66fc2372 ++43de065837a30359690f262e6a9c6386a3110a16 refs/tags/jdk8-b61 ++^1c78bebc35307677de0a379ccecf99725e7246d7 ++f0bcc45527caff96c77894f3e5ee69469819f00a refs/tags/jdk8-b62 ++^650aac0d5e472ff99afc1cccf900bdee2002286b ++1a94953a17e39b6f80370aa293116e326a18a477 refs/tags/jdk8-b63 ++^59b1cb08b10a159bb08f1103c8457954a8779564 ++f40577fdfae1064bc26309c56eb00e6d00a049c0 refs/tags/jdk8-b64 ++^86c5d4aaebd148650028ba0f10e37b3825d3f983 ++7b2e4a34c59908521085550fab7341b4df551e4f refs/tags/jdk8-b65 ++^e7d87b234c444e39369e8575284f785c56113324 ++ab2191e3ccb18cef302fb7c125488067dd9c11af refs/tags/jdk8-b66 ++^81ecd2932e0caee8ed01955fccc9e958c6a5cda3 ++a7ebc22bb6d0ef7ccb66077e784bd8c779271bfc refs/tags/jdk8-b67 ++^924beaaf2cb40541f53cd8e6949eb763be65d4f0 ++fb39e375097898fd71b5cc9cef873955b0c71338 refs/tags/jdk8-b68 ++^faaa074af726290f9da3d7f66dea5a1d73f83415 ++320e1e9a1c9db090e759888fc520c3865d94d517 refs/tags/jdk8-b69 ++^902594c3e652b268f088dd846466a9b3af57d8ef ++01fdada0af68f6b7a0839840f1db3b40bfbd3b40 refs/tags/jdk8-b70 ++^78bc845d664c705679de49c5f26ba6f129d116f3 ++25da6dfa22f1aa25a85e78926487cdd8ef760412 refs/tags/jdk8-b71 ++^e7187d14db3748428c4a312203549f7ee31d4471 ++d34718c67e882dd5b5048cc0f7c8ecbdf7e8852d refs/tags/jdk8-b72 ++^8261ee6da3c5843806c20808cc4206c73bb0efac ++81b4e5400b4b6ed8a755a2993dec6cfb7487c5a0 refs/tags/jdk8-b73 ++^1c11f83e9262d3bf07b9d095a7b1d3659f1f2a9e ++9df2fa988fbd3cd25a0bc3a016b93738f7cf501e refs/tags/jdk8-b74 ++^75c48b0d1b36d9361a412ee2db2f51b7d9b6ef1c ++9c8df92261d3a50ec11a7b743891f3adf08c7639 refs/tags/jdk8-b75 ++^5f0d1aff638a1ee9df3ae019b7896eba488fcfa3 ++f8fe95d85e22efd8d55945acc30b4ef5a7fba9d7 refs/tags/jdk8-b76 ++^9a87fa5db98892de60f932a9a380e185a28f2232 ++3fb074be9d7474410efd47b744fe51f04a2ed4f8 refs/tags/jdk8-b77 ++^7ba83041b1d65545833655293d0976dfd1ffdea8 ++3ead3a74348f1687e7e903ea4346d2f820d144fa refs/tags/jdk8-b78 ++^93a052bab37ae1946b8c2893ae0d48af756df25c ++76c445339712be147e15e117b3a75d03d827c7f5 refs/tags/jdk8-b79 ++^950ddf6143fe7f2cfaa1a9d5cf96ad631231cc8a ++e47c4ad33dc38b3829baea1114ed42a3afa6c44a refs/tags/jdk8-b80 ++^ef906c89cecf724206e54125e5b619a78a3f13de ++932d2c02021d5850582651e8d1bcda95c13cc0c7 refs/tags/jdk8-b81 ++^ac2b8241a3494e37f1624601c5a4eb3239137786 ++f1c64c13d3eef0308782af9591bfdf344ff40587 refs/tags/jdk8-b82 ++^61cac54a92db2ee17ca29e3c1b43da8e5887f415 ++a622c1980a9d917793f357b77ed4b27d75d04a9a refs/tags/jdk8-b83 ++^7c84115ffb3930d3f095225d9b32075f2246177e ++a532e44f19896cd738d76f436b88817e73279a7c refs/tags/jdk8-b84 ++^9e90d15aa7babe03b4128168d2688e93c6137e0b ++fe8a12dc5fb276ea1cda6d31c03dfe52c58ee3ac refs/tags/jdk8-b85 ++^7f1db5eedb16c4db61e0596cfeb7979b4269fa62 ++fee6f44b8b104c5af10eb150967d15b42d87f073 refs/tags/jdk8-b86 ++^686ff53731493b1d66ea63f9326624a82a567aaa ++b0cebaa4bf1dc1e2165c91878e3d7caa1578e4eb refs/tags/jdk8-b87 ++^662cdf07462ccc6330bdb3d8f56694eafc49d71f ++7550f3f8ac4966ce76d90dcb57e03fc77b070c50 refs/tags/jdk8-b88 ++^39b2afb3bb63eea44e55b61e5c77477d72089e1a ++8633970b1c505c37540b2821b01ce1f3d982f352 refs/tags/jdk8-b89 ++^2fa38a69d03d34f5df18704275ae419dfbd0d85d ++4dcb0cc7ae2c2bc8f6db1366d8e1877ad7fb8cc2 refs/tags/jdk8-b90 ++^aaf112686c283a7e6b6b1efa7d05617d07e550d6 ++f17647afbae44da697d846b6719dffc5da22a533 refs/tags/jdk8-b91 ++^82d185e64838992b019c90133d508d479d5ced0a ++1b3d50b7f19d59ab6e3e552cf00c30854227dbea refs/tags/jdk8-b92 ++^22e500e3a917594cd93baaf8b5c7d29360d250d1 ++da575ccd3f5e3f063a18bd89c7b9e49c9fb1ad47 refs/tags/jdk8-b93 ++^064e98a35c8967ae7c8284128bd565369af012ff ++5263ce65860555d8eed8a08096bc147170b32903 refs/tags/jdk8-b94 ++^3e0356fb8df690fc18a1cbf51b39b2675e39e430 ++b94b179fc604891a76d1befbb7f9933b6e21fad1 refs/tags/jdk8-b95 ++^7e5fa723b7f5ec98b0d3ff50c98e138c7f145cde ++20a81b3697e720b32c88839e4ca96e40d69d5814 refs/tags/jdk8-b96 ++^d9e5bebaa657e3e796913283ec7818f85925bd91 ++477e6634a0107fcd02afc47e096476326d6b0a8f refs/tags/jdk8-b97 ++^618a81477260932007d23768f37e7dea2c32dcac ++d405ff56451054b5b0ba3f780f9ffef100aa5fe3 refs/tags/jdk8-b98 ++^5b4e46bf5f6091d3001cb2f537b99dda36bffcc8 ++f2da184e305469a090e8e5cfa50018128558036a refs/tags/jdk8-b99 ++^b7f9acdbd397234ef219df5f6292f62a1cadfab8 ++695dc0ff2b308ff6288b85b7370533e9c0c657ce refs/tags/jdk8u101-b00 ++^d22246b6632bdb12fd24717c1d944ea71b774b84 ++cbe6e4acf99c8361f31c518c1c1c753375313942 refs/tags/jdk8u101-b01 ++^7d72cefc04d0ee3eb90244bf6aceeeb57ee0bfb7 ++9182dcd0767964c2a9cfbff7b6bc8e96b8c0e9d0 refs/tags/jdk8u101-b02 ++^f7b0b061d17d818978354d94ce06977bc0490c18 ++0f44e59d36cdcae201e9a340c839028e6b17c473 refs/tags/jdk8u101-b03 ++^27cd2fead403c2cc753f01742f9d083facd27381 ++5f40144ea0f621bfa164bab1f6c3bde8c2d7def7 refs/tags/jdk8u101-b04 ++^067b1ec2d04bd55cdb75f8c7e5c29eebb695cb2d ++cbfde7603cac21146ee88b20743dab18d2e7e27f refs/tags/jdk8u101-b05 ++^168a599ab66b509a6c79c9a318f40e6ebc5d5728 ++c833748acf09dd2cd360eb71149d3aa6d992952c refs/tags/jdk8u101-b06 ++^7f23a42ea2ee8a7c8f72d9baeee4a462f99931de ++5ac2548c9188c19fa61f0939401a3a07ba1fd982 refs/tags/jdk8u101-b07 ++^c273f57adce15bb3d009dc6247e32131ec646c2e ++c856b9ae67b3a15daade4a9c03a75f8e23c974cf refs/tags/jdk8u101-b08 ++^9d42b949a1906cb215d0f1eecf8bcd0d801e3e29 ++28ff9ac828fe365bde77d424a04ecf3d1f370a18 refs/tags/jdk8u101-b09 ++^223314763b58c90fc2e9d02fc004a2ed5f4f5633 ++85536bf19dd58b8f8079100d4d3fa5837d4d3718 refs/tags/jdk8u101-b10 ++^62637d14d738e5982812782c876ec0ee287e17bd ++3b9bcb981d1422811825973197140b1fbac6386c refs/tags/jdk8u101-b11 ++^8344b3db098574ad4a0d5f3c5a435d08e6e47962 ++bf8584e8ebe492c577bac3481d1f575b6813c254 refs/tags/jdk8u101-b12 ++^6011e4b4382115d35fe72967de2b80b89fd9ec93 ++75363e51364d2d9febf30d41ce881680256425fd refs/tags/jdk8u101-b13 ++^393b4a88071d047588fb16592cb9aebb9c002b8a ++f913f6d3d0c35c798864f748f77cfddae333ef0b refs/tags/jdk8u102-b00 ++^d6c3ec2f04975dc4e76a73bf63218b266663651b ++19992405febbbf9eb5e02b72e564a33a2ce5563f refs/tags/jdk8u102-b01 ++^5e0137907d037397a05088b76cb612a65091661e ++409efeca6a8da0596945628c9d3fe49c96e71795 refs/tags/jdk8u102-b02 ++^35fea1e39316b4a403f6f6307339d275e9811d26 ++bb40f454e4da8850224c6143f59fb2559fd9501d refs/tags/jdk8u102-b03 ++^939c8081e37a1443294fef65c7207e77782ae2f6 ++80bc6dd758804c05645a9a9f00950c732749895e refs/tags/jdk8u102-b04 ++^bec5a14a22c2450d4a8010e7898746831f441f6c ++3012e44ff78a6fbd8d50898b2f2df46790e16f24 refs/tags/jdk8u102-b05 ++^c10b4755ab2a43e5f396a298d94aa0b31818207a ++ca0c2d2aee82c3012084136d0ef8c8a105b56d5d refs/tags/jdk8u102-b06 ++^89fd7d4a246311c0354407931e8be4768046a902 ++feef8fcc3d735d4f634b31d7e0307a85cf46efe3 refs/tags/jdk8u102-b07 ++^6234794d2d859de9705089cecabd9e7fd3b04109 ++aeaa5aee12e662b0591cb0c118fad08708610802 refs/tags/jdk8u102-b08 ++^abe394fdbfc2d760a8248e663af7175301f03bac ++c90b3d7df7d54fc1f5f602f8ee8bd1bfbedee780 refs/tags/jdk8u102-b09 ++^340c17b8079ee2d5128dddddb4c7415f85c4567e ++3e633c08b36ed4d5c8a64f1ae5b74092c4bb377d refs/tags/jdk8u102-b10 ++^921af7e11d1b77cda05c474c15667bd22c066e95 ++38c7a12cf7bb8bfcf6cfaec854ce51cc0a8ad6f2 refs/tags/jdk8u102-b11 ++^8054799ce13abb1e3d9243db02831df12d2ebd4d ++1d436f22a9311e0650e91a187cfdce55c9c8b5bd refs/tags/jdk8u102-b12 ++^28baf85bf512edd9ea3d93d4ec1a008460cdbc02 ++2ad7bcdff6bfef355ccca15fe1b4a9f277129b09 refs/tags/jdk8u102-b13 ++^daeec43c179bcb20d00cecc46a799424b95aded2 ++1d38088b738129a31ed17ad96266d98b5e8428db refs/tags/jdk8u102-b14 ++^78878e0966a9b69fba21d9b5b59c82c5e58e0aed ++740c04aab745e22597500d604dfddd4cb7885a2c refs/tags/jdk8u102-b31 ++^26025441ae8d756b7355132877784be86ec4bd8b ++2899b3a3f8762ea617faf5ed7886c641013e4d00 refs/tags/jdk8u102-b32 ++^9288bde52e0804602ca07eb7860108737af2efeb ++2f4d06084103d651500554a69ba374af75824dca refs/tags/jdk8u102-b33 ++^33e1a962028c80b62687fa3265310ef1b82d32e6 ++a4f1eb28acb1cbb5b05c9cef3ae97329228ae02d refs/tags/jdk8u102-b34 ++^a6bdb6f47f5dbc2f090735d0e9886eb536653752 ++886d25f0ca3b9f0bcffca8d005404e881bb3a5c9 refs/tags/jdk8u102-b35 ++^32b7277488e956a7bea85a8ffcb465430f47feb9 ++29da41b2a425146046ef899d6ae2b65577cabd8a refs/tags/jdk8u11-b00 ++^ba3b75cddee06b1a26e1d1c14b2e7394be515f59 ++b32b53214fa4c8448dfc489aba09e3d15c5d4488 refs/tags/jdk8u11-b01 ++^949db6d1a562557fc48867fb5e7702c34fbba2a5 ++c51287073d08401953a945a7c745db79322f469a refs/tags/jdk8u11-b02 ++^2167455f55ff27c3073db3ad1978e7b224c69797 ++5c4c214fa535e3eea54edce75ff334b3bd9495ea refs/tags/jdk8u11-b03 ++^65c11eb64d5f5732579a67e02aa35cc7c016c54c ++2f0548073814a3957c70a1e8e41997a2a5274e59 refs/tags/jdk8u11-b04 ++^da13a20bab095ef36fcaaef1d3a475248f57160d ++53de0120bd00ba94a94fd86b2a8e9cfaf3c31ad2 refs/tags/jdk8u11-b05 ++^eb7fdb6ba386c0e244f47dc577bcb025ba5aaedb ++8389c7791e5c8648a2e5414863f9669b2c833aad refs/tags/jdk8u11-b06 ++^6a2c9bff49f66a5671de668ea15de9b7b598300d ++b54fcc412ffdf82ea842e5cd9d43d580d81fa11f refs/tags/jdk8u11-b07 ++^34be762d3ad3ec75c339694c09e132770a6e1820 ++78782ac4d21b6663cb4dc1cbf60a25422539b76c refs/tags/jdk8u11-b08 ++^ab9655f9f06d86a6e5ce4dc6f5069608925d5a6f ++7fdeb13571efab8379a4a9c125ecf24ec2998d51 refs/tags/jdk8u11-b09 ++^c167346c7b6ace86eaeedb8938d8d3188db677dd ++67d1c101ee23ee2ea2754462bcf203591b2bc8b8 refs/tags/jdk8u11-b10 ++^f2da004e31efea8e96ef71078b7c7342aae69b94 ++b02d88c506efc3d0a3666ff5559f65030e9cfd76 refs/tags/jdk8u11-b11 ++^fbeac013f38cec8ff1d2e1799041f845c1cc0ba5 ++59e5f5a9f668fb649ab31b872ca726e3d1ead772 refs/tags/jdk8u11-b12 ++^5c59047961170c5f67a8f49ac0a303454625b00d ++d4214362d0736b08934e5c96e8ddd42682861a84 refs/tags/jdk8u11-b31 ++^87c225c43705c0dfc4b9b9d2453b0a223445a4ba ++7eab1337053a10e0d7528c485e62217d09bc2857 refs/tags/jdk8u111-b00 ++^92edd8f888121b62742ab1ae80a971b9907b4030 ++bccdc59258d326451951189d4d57eacbf5c9ebfb refs/tags/jdk8u111-b01 ++^77206a5194ae7ee3c3cd198cc3e712a3d2fb2510 ++27a41abf46f7681330e2ca48ca310accf10bbb18 refs/tags/jdk8u111-b02 ++^8e327ae177292c037ad2eb8744f4df2b46c75fa9 ++fd84a4d425a22887b95826e0f0c477773006152a refs/tags/jdk8u111-b03 ++^736cf16ebb7694f40b47e41135dbf21c7b9b41c8 ++8f311310e732103ab6013b440617e72532b7f274 refs/tags/jdk8u111-b04 ++^b9986526026c04d3b62df79d93e4e09f130351a7 ++e435f2af7eacc248f06d9bcf00062ae35842893b refs/tags/jdk8u111-b05 ++^a4d9c6ebb2abdecb1720d3c6585a2a69a99fedf9 ++bb58b9ff8549efa2c641c90e30af536943218f96 refs/tags/jdk8u111-b06 ++^f3984b6dcc6f6d4b8caf8b9ffcb204aed05ad871 ++73030db2c365709c63a8c454974f493ee4f02531 refs/tags/jdk8u111-b07 ++^b710e885c535b47df19eb2261927988e743ebcec ++e3e6c70d8a2c46acda8f2ff401cbbf86ce6e2219 refs/tags/jdk8u111-b08 ++^03e640c99cf1dbaae7e5bcf0839b343f2d883911 ++25bab47d859a300d1bbc67048a26bc41daafa776 refs/tags/jdk8u111-b09 ++^c1a4890d3f549a8bd40b82a5be730b0bccccd7a7 ++34d49491509129be360c80b4e57e5a9013389fa0 refs/tags/jdk8u111-b10 ++^6fa5bf45cdb2392e42089934803ee5f9363fc10e ++006a3165df933f51af3605860d9f685a24c0f517 refs/tags/jdk8u111-b11 ++^430fb39e89dac1db33170c059c4831d69400ae1e ++5dd204b0723fa20ef92c0f07067ae8fc184eb45a refs/tags/jdk8u111-b12 ++^45caac2b45ceb4728769a29c4c5c6b6645cd23b7 ++818ac2ae16ab25107eb8656179827fd2d2746d26 refs/tags/jdk8u111-b13 ++^967018262ebb38f3c530be291932887374cf0e5c ++344d73bd8ee6b55b66f56d6f115184226c30e552 refs/tags/jdk8u111-b14 ++^29a6553955c8c07ba7a9e67569d8f45ae0d2f79c ++946f34b846dcc425f65ea96ee20daf39609b81f2 refs/tags/jdk8u112-b00 ++^e874f4aec29b5199dabfef0091f9390d11418eb6 ++0a1cf189b7e6a8c6fa8ad7ff738fcde727156972 refs/tags/jdk8u112-b01 ++^3bf7662cbb3867c0efeba4bb26298060286a31c5 ++523d97e1bbd1ed8afd316d3393a78383c46737a1 refs/tags/jdk8u112-b02 ++^85451c0d25341c84f215e4806465eae56e52d3d2 ++fac8c9348720fdd6b45414397ec73a5cb0856c84 refs/tags/jdk8u112-b03 ++^e6b91a093e8a2ba28a8d9c67de5a165973adfe21 ++60caa3356d729ad0bf0977fd9384d4c5702a346c refs/tags/jdk8u112-b04 ++^b705e46c4f5a45a6d5f1a6dcb56d80efa827441e ++866fbfb2874f067f18a6afabbe0ee293a77eda9c refs/tags/jdk8u112-b06 ++^68ad3532e8651c20a1ef6fa60426755902aae0b5 ++b20dcc3304138c369b6d8de1eb0187bb866672f0 refs/tags/jdk8u112-b07 ++^84748240afd0bebf07baafba315a90d54950b52b ++7be70d7b94a8849c50dd1eca78080c0afd0eec53 refs/tags/jdk8u112-b08 ++^d7e7066e950fdebc3514e24eee5b85a98c045f37 ++d636102f3b8a2a2951b70885380601e779b82c89 refs/tags/jdk8u112-b09 ++^af836e1bbc8d25a3c8c5e428136b1d719d000868 ++479ccc92ecccbaaac836bd5a8bf2e4f136cec605 refs/tags/jdk8u112-b10 ++^d0ec0e02502a55975bf197f25f79e49830d8d494 ++37425b66ad1bcdaafa3285c2b69cdeb76a014bda refs/tags/jdk8u112-b11 ++^a3a01654de706c21d0cf42b7b76aef398e14f654 ++859a5ec47e8adc264ed2e928aa55630e734078fb refs/tags/jdk8u112-b12 ++^cac9b943fdffa789f5bda018972524ae36164ab5 ++dcb32e30bf83e30df1fdf7b079766484970abb57 refs/tags/jdk8u112-b13 ++^5eb8d1eadd6e1db9084362ad00ad4006e19ad111 ++b84c673d3b546f935a146c12f56c1986845b98e0 refs/tags/jdk8u112-b14 ++^39c1b9708faff2f781b21fd9b8d88c3d00c86f00 ++c2ce7192491507e1eb6a3a904bfa4ba5a872694a refs/tags/jdk8u112-b15 ++^18c6d2b34d71d717306a3d1ee4e1c3aad95dec4b ++7c70a4aa1769e39ad247ee6e5c7def292d3ceb8e refs/tags/jdk8u112-b16 ++^64ad20c00f850c3ed4c796c72ade483460a7897f ++4a77722236b100b7ceb35fd2c0f2a3a9b07b80a1 refs/tags/jdk8u112-b31 ++^0da62ccf932077eb0a52a9075bb41e30557dd242 ++7f48fffb7bc1714af4f05ddcded89efe1ae3104b refs/tags/jdk8u112-b32 ++^ab6ec73a30b6509f21965e81113a490c231ba05e ++1ff9da766f4fa12a2724bedf5c68a310297fea8b refs/tags/jdk8u112-b33 ++^20e72d16f569e823a9ecdd9951a742b4397ca978 ++e939b6626bd11576b9b65b8810b4a9d9c413756d refs/tags/jdk8u121-b00 ++^0296bac80c340e5e999c2d818658b3ac12a940c8 ++fea79824dbc1d4369b5778d902709d37aca1df74 refs/tags/jdk8u121-b01 ++^6f297bf07938af4eb3eece5705f6df1222e657fd ++00eac9c980936659b1f9eddf571c60cf76d5753b refs/tags/jdk8u121-b02 ++^77fb09f59b806932dc0e3c1216f48ba1437b455c ++a9545f4530b3b6dc8ec30613bca588402ecbaf3d refs/tags/jdk8u121-b03 ++^be7e3b5869e9766aed879eaa3ad5b5202f2fb768 ++d9a20825eb5bac14d7486131692f8375dacf7737 refs/tags/jdk8u121-b04 ++^dd79e7d731caafe5c651e6fa2540c87d7766639e ++4969813a863eb63d668cc1b6aaed46c29b0ae340 refs/tags/jdk8u121-b05 ++^dcb8f9a3802f433fc6233c67167508267ed16b1e ++0ced021b665f1e6d3580eda08b036737f0fa9a4f refs/tags/jdk8u121-b06 ++^f86ebd156f7f13dedb565c0ea7b3e3f72ce08438 ++c32f7f3a340f089146ae170b6b174d3cae0f75fd refs/tags/jdk8u121-b07 ++^1f5904adfcdcfb7ce61782c1fd3fbe6888c1bb4d ++d4bdd3da791929e3036ce628b1e7ce227550d7aa refs/tags/jdk8u121-b08 ++^f2c6a01bad1a212b7c52aa5e921c0a7efc8d30a8 ++f645c344583fca5b311ea04babcc105d424411e7 refs/tags/jdk8u121-b09 ++^1b3d4f50138cf0b2a1fae3067b1ff47b55f445dc ++318e54ff3978408f48e656d31688dacebcb0d80d refs/tags/jdk8u121-b10 ++^de06f75ef7e385e2accaf49e96b882b408af18a6 ++25d17c1f39fadd7d2eec06ecf07b48df7a3cba63 refs/tags/jdk8u121-b11 ++^1ad5151cbae12a0c29de5ce05d7a3a0330ef569b ++9694477448799f3c81f5daadf8869abe14a8c224 refs/tags/jdk8u121-b12 ++^e1447e248d0fbf0f632873b4c8b52c6cc88d66b5 ++d949051a59d8bbb024da6d1f0b521af955660201 refs/tags/jdk8u121-b13 ++^62876a00449a2a517a14ffa47a1416099feb4b21 ++f90f018ccef8e17410a8dc821f49b6e9723a9461 refs/tags/jdk8u121-b31 ++^596949611b307e3b4b06199ecbaa89cf15721757 ++cd8ab9a2d9ecd407259591a69835501681eba281 refs/tags/jdk8u121-b32 ++^e812d8054f76ed596feea0150c1dc12752e4a14d ++90e225ef646df818a3cf35c91697fc36f3c7f938 refs/tags/jdk8u121-b33 ++^b6fb6282d2a10c4aa5fef13c72ce07e9250b3b34 ++aa533e739a27ede3aecd613c5bd7e22463b0371f refs/tags/jdk8u121-b34 ++^e823052c8b863922f24f7e7b86e4f292d9bd18c6 ++54e703f44d1d62f458a6bebd2b568b22aab629c2 refs/tags/jdk8u121-b35 ++^f3924ff384da5e87b032c5f62ab4533b3f54d8a2 ++3e35a5ad129b714b050862ea48352ce2bbfd17b9 refs/tags/jdk8u121-b36 ++^c4c7f24538a93a3d1f9f119a4a1b2bc953a7af5f ++e2e29e11d8800c80d53b5ca94477dcea9a7c2e5f refs/tags/jdk8u122-b00 ++^cdf340e6c6683e545aed60c23aa03b8c81778ea6 ++3bf77f6cce400371400d9fec4cfcf4e6511acc13 refs/tags/jdk8u122-b01 ++^78c13bbdbb931deef693c3bb3e7e6af26d8cb5e5 ++9d9dba3ac2e61cd8723d7cf56f95110c65eb6bb7 refs/tags/jdk8u122-b02 ++^da02b1fd57c8694228cd94c9517ece6978865aad ++eb61f6f61aa515f184428e76940856d756e278c0 refs/tags/jdk8u122-b03 ++^8c7c75dc9bd58c71789baafc1b9db9d293988ed2 ++1878a7032ea2105eb64337ddc4800284dec87248 refs/tags/jdk8u122-b04 ++^e1f5965d0636d345364463c84edc3bcf15a7ecb0 ++2accfb242ce046ef63b7be0ff9b878c40e074e21 refs/tags/jdk8u131-b00 ++^705f51465bf3fb693e82a820733304c656334903 ++f2d97a713f8d56ff850c0f1431f458d6897b23c8 refs/tags/jdk8u131-b01 ++^bef448a637459f77622a4c746e3f8ae1d0249633 ++a0fe95fae600b23f5e520b8f7d370dc872e0be24 refs/tags/jdk8u131-b02 ++^392f165be5c4110fb0aee28e92527e1d5053cb3a ++50328fe6a8a55194dff073571bd31d6dd442eb66 refs/tags/jdk8u131-b03 ++^f806d455e9e5c2cdf919fac90f7a0fd04802e8da ++38ab8aca5350d302af6bccbccfdb6ce5f851c59a refs/tags/jdk8u131-b04 ++^2108d98216f341f98f25b12d71d2250c98f4dc8b ++12838b59bf4b357a8cd72fecfb0c32aa2dbf5c3f refs/tags/jdk8u131-b05 ++^fc39c1e4526772e8cf74bd337568cfa317435747 ++c8f3b3fabd7aaefd4bc7eab927ca8df2acf26a05 refs/tags/jdk8u131-b06 ++^1c93c353bd76548b1f574d849eccdec320bd39ab ++949a1694bb67ca2b339bf67c324a87d3e6bf0170 refs/tags/jdk8u131-b07 ++^318231971bbcc45ee6993b28c7ac63c83c5a6c8f ++28d69ca7ac182c330c59527b74518690d00af27e refs/tags/jdk8u131-b08 ++^8da5fb6d5f170a39d06255818fa17bfdfa2ee9f6 ++a5d6c2cf503ed97f5888457d5e943954e9665cbd refs/tags/jdk8u131-b09 ++^8d3037870e42a4c3cfb33275b2db3f135cec1494 ++7dcd61a45d38e4d0f0299028626e8d4d10a42aec refs/tags/jdk8u131-b10 ++^08b381a1dc0ed0d4d7108051f534f52ede1b0f86 ++6da3d52cc83db1ffaebcda3bccf82fcd13959987 refs/tags/jdk8u131-b11 ++^40e602ecfd7b426394c91121f3dbc4599185d9fa ++6ac79e8b822c741e03d5608850799eee6a86561a refs/tags/jdk8u131-b31 ++^76c1d7aca6b268dfd89e190f9e3ec58131cfaa7d ++10c10cb986ff36755603ad40699d61af4a29420f refs/tags/jdk8u131-b32 ++^302b80f83a56a4a7ae6ad57cc6889e68dd516e1c ++b25dc7b515200068233b7abc4236d092c2a063c5 refs/tags/jdk8u131-b33 ++^25547671ab1bad4da291d2c4bc4ef0153d5d82ba ++c7c92432fdf3f7e34e38016e3e95616821124227 refs/tags/jdk8u131-b34 ++^720f6133e936449180090cacffc470a435e3fd20 ++6d861884ceffdb684c54826edbe04aaab4a10eea refs/tags/jdk8u132-b00 ++^c2ca7e43af0d8c4f244bb2ca742d248161cfdc57 ++4f77ba4473042d1a7147d8561e820b2743913ef5 refs/tags/jdk8u141-b00 ++^05c186accf28d05aee575a8e40c7468235b28b2b ++1c80304e0c101a53fdf1fe4441c87ef338daa65a refs/tags/jdk8u141-b01 ++^76f342248187b4782d9a7bca4fcfb039a862bdad ++1c2276adb242c780ddfc69e77c7fde973508ebcb refs/tags/jdk8u141-b02 ++^cb3c3db4513de8279068d695b19ecc176ecacee9 ++bbbab4825be0d2101973b05093c80905d2a9c875 refs/tags/jdk8u141-b03 ++^a8aab0fe4165756425eb2901ded0940458f94d96 ++4c022cc4978bf51a31294923db61b40976bcb6f7 refs/tags/jdk8u141-b04 ++^91d7a53ee01f2829b8d5f52166477b781af8245b ++3d7d3bf777663a9618061d55d835c27bf9706ef1 refs/tags/jdk8u141-b05 ++^13ecd768d3d1fdab812a82c284af6355e423ed68 ++5375a0e98884d27958391e85f0ce0d578256a0c2 refs/tags/jdk8u141-b06 ++^e5476e3352402e38e43ee9f86f74239ba671ad14 ++309e7e8577196cd3a6496ca581da720fb9c09640 refs/tags/jdk8u141-b07 ++^8c00ccc84337e750cee256b1426edec2587b3951 ++8b7ed2935abd813af3450aac25db3df6caddecb7 refs/tags/jdk8u141-b08 ++^b2756ef9b5741839d16f86c7abfad16f0c727a26 ++64f3039e4e449a45faf8c28b7223089401bc8d02 refs/tags/jdk8u141-b09 ++^d10f50ac39084f4515731847aece58a2d4938428 ++88ae50322185bce62342413a0969b64abe78bae5 refs/tags/jdk8u141-b10 ++^59a5c8bfdec295f404f1edf84e84011a25d32c8a ++abc8fc59f72b2365e595484a5391592466c57e1b refs/tags/jdk8u141-b11 ++^dd31328f1a94777d7e6dc2cc0072c8be382865b0 ++9302c65d3107c4b009a8dd4aec88153091b7ec4e refs/tags/jdk8u141-b12 ++^9c1916b44c470bef3b5a89c5b5b2236bd61a9ad9 ++a1f05532f01db5c7998f84495f4ed019b32ef999 refs/tags/jdk8u141-b13 ++^d063e76a6d285fd5e57de374250cb1873b5f6685 ++ae56667598c348cc355ce6dcd6784e27dc8226ee refs/tags/jdk8u141-b14 ++^d4deae97ae66538d354608f0e8b0ab9b047e37bf ++bedbf36cceb644d17a93d9f48339910038aa6499 refs/tags/jdk8u141-b15 ++^36adff18f34b45ebf90103a0b3d055543a9b8c7d ++e7a9d44ce8a6308e9249418f06492decb13a1f3b refs/tags/jdk8u141-b31 ++^e1ea97e364dd499c7341926984151b441ad9a7fd ++99c807bbcc39fbae44286b5b44e7248d605f41de refs/tags/jdk8u141-b32 ++^c9a3a32991646384b6704479880e4a9769291dda ++4b96a1009812321a28813972ecccfce8fa75243a refs/tags/jdk8u144-b00 ++^f52525d07dc0af259f116fea9f4eeba4801b6f09 ++616b60f9ba2985194f237ae37cc369e1ab282778 refs/tags/jdk8u144-b01 ++^2166087b7fbe6a943b4b2ddd2c7aa743111a2fc7 ++06456993a351649be3ec9e63f297ea2be79b2de6 refs/tags/jdk8u144-b31 ++^6c797110ab14678d9a6ccf43e99bfc85d3b75abe ++9428aaa23823a1c2eb09eb54f7fc1baf840611ba refs/tags/jdk8u144-b32 ++^af11da19ab44b5481aeb1d63d88407421e49675a ++08bf31017c74790bf3600693f223a1aaa6b5dfbf refs/tags/jdk8u144-b33 ++^375c40f937c2654a15508c020b480a8ed8d855a4 ++646d960c395e8c84e228522c96e4a61ef4345966 refs/tags/jdk8u144-b34 ++^a3f2db7c45b651d5669700e9a66ffe96092d7621 ++98a491a3e8f4b41fb074f3ec1014b48dd75eadb5 refs/tags/jdk8u151-b00 ++^1aa219221ba7e689fca0de1048755e1dd7805f6c ++3036f80d70abaa6518517984855798ec3820b6dd refs/tags/jdk8u151-b01 ++^903636027ebcc955a8f9380a145fb3864785afcf ++68b95f1fb57bac7bf54c5b2a2ffd6617de44d143 refs/tags/jdk8u151-b02 ++^f2e515b73383be551527d1d0275dbb75437d6878 ++1933c9ca0001fdce8a9a8ee1854bd22fa83d8bed refs/tags/jdk8u151-b03 ++^eb4e7deff8d14fdd609d3ba24900026d6c0b0af8 ++9c4873383ff8fea493cf5e8365175c8e55af9746 refs/tags/jdk8u151-b04 ++^1cce7e204d43c72d2a5e59f1eb8ddb15788b2d02 ++b61f7711b043a7e4d70b58ca850d729d990d1c38 refs/tags/jdk8u151-b05 ++^fd7090f64ab267bd7aab5170fe6cae2a42b210a9 ++b1bcaa7a528cc22414c889c51c1009124be80127 refs/tags/jdk8u151-b06 ++^1fccce05da5e8a6f760a246db96adc64e037604f ++d9458f0afdf75f872928d2a17f61d2097ded8ceb refs/tags/jdk8u151-b07 ++^ee0d3556eb9425cdaa229b4a24b23785e545b886 ++a5103aa7cb0434343d5353d519feb9f05dbdbef4 refs/tags/jdk8u151-b08 ++^fa66df2827967b22b9a7c1211e032b1f5b9b664a ++1432f2f2ac7d088f67e2a601870800a8f14e09be refs/tags/jdk8u151-b09 ++^969abdbb3a9f615954d8ce7b59d3c9ec948bbe16 ++e38a31e77474274c6e79e4e484b4efda8cc4d092 refs/tags/jdk8u151-b10 ++^0c87f28d7a805b4c3997caae6c3cd300a1bd6f68 ++a4ea15ce111b4df22e1d1ecb9e5e1f2ceefb8e2a refs/tags/jdk8u151-b11 ++^f0452e0ada41590fdb960a3e815b3b4a2bfa95f0 ++0305a9d9fba709caeda0132adb6183df8cfd24e5 refs/tags/jdk8u151-b12 ++^fba5fa0807b9a29509f279f8921e5ad2d6a2f06d ++b23fb9f3e516c87853e46f3fbeaaa468d275edb6 refs/tags/jdk8u152-b00 ++^036cc3de7f0b52afb7d798872165e58fccde2c11 ++cda826dd3d2a70427fb660f2d301fc202ee0a644 refs/tags/jdk8u152-b01 ++^4c53c54d69dd18e185e6d7dd4d2a422967239fc4 ++09c0fd041bf5ee66067be340e29dcbe4e14b308a refs/tags/jdk8u152-b02 ++^24915473c7402bbc2b056ad038d5d592a54f6a50 ++3e1918b38addb2f3095f90c7b7e5d33ada1e4bb9 refs/tags/jdk8u152-b03 ++^cd76538e101797c97904761f9e38f9e99c9baa0f ++179d441aaeb756d598cb38bb11ccba1daefca511 refs/tags/jdk8u152-b04 ++^abc26a79da19c4073bbed3f2e4bebef1e4906fb3 ++899fe872ff35af9aa5b7f19a581dcaf19561f76e refs/tags/jdk8u152-b05 ++^901b3593337aee5057ec0367a6bf2dcfae7100ce ++f597df5014b0568889af36fbc76c87f7d51aecba refs/tags/jdk8u152-b06 ++^72e046173721b240498a9250734a9fdd1c1f6e3f ++c04350880e4d8d8291b11a5f34005837250334cf refs/tags/jdk8u152-b07 ++^c7eac680a11718001d83c72a3b251dcd02485034 ++a2c11330105b64276b568e9f3971b07688ade042 refs/tags/jdk8u152-b08 ++^cebec4e2e0efce2b877a5ee890f09e7bf2b7050b ++bee3ce5121c885f166bdfbb5f6eeb46ba966e87a refs/tags/jdk8u152-b09 ++^6ea14f1ff0a6ca717e703a478cba86aba04300ab ++642be0fddfcd55313018c5b0de0256d04230a636 refs/tags/jdk8u152-b10 ++^36c04574ae63a44a11a3259259735755799583e2 ++6fb3c2b2787070e2710856fe593630987dd35a82 refs/tags/jdk8u152-b11 ++^8f288b7155656c3d766b3f6b7bd9a76cf14f6ca4 ++2e8e07621e2835e346c6ac9bafdd46fe3da8cb1c refs/tags/jdk8u152-b12 ++^e2a53bb0523d9736444290b1bbe4d3ed1eb71391 ++ea1330a609020bba102f0876462ee41487291148 refs/tags/jdk8u152-b13 ++^196bfd64cae25421c58ffa0bd4436823a954cae5 ++df99cc83738db6b6f68f78070257a48431a6eeeb refs/tags/jdk8u152-b14 ++^1f539ff365fce1e54ebe7ace406e14f8ae379529 ++a5ed17cddf1d7b32d4652a696139e07fb433909f refs/tags/jdk8u152-b15 ++^c05c21965a2792df7a5b831b2fe291c8086ee69f ++2653b6fa878c46b95045e055a7fd8c807c453ff4 refs/tags/jdk8u152-b16 ++^aa45b4ed91493ace84130b0f6c5ba9c7982c842b ++6bad6d6059f3ecc275f991769c45585b04dd54ef refs/tags/jdk8u152-b31 ++^e83890236c3d06f351a8dae1791a334ff82cb926 ++52ac8dabd03fcec1d8eb8ec0b6ba4561b06fcd7b refs/tags/jdk8u152-b32 ++^34500add5cf606aff3bd4fa40fbc3b5af1288a8a ++47c354529b2b58b69c06140c69c5c0ad166ac3da refs/tags/jdk8u152-b33 ++^0a24855d83416a3122fe76db7aedf3d9e2bb3775 ++d6bee3530da9b8dd1bbcf8cf3bf04bdc96dfc36c refs/tags/jdk8u152-b34 ++^15eb905b44fba7c3920cb795fc17048863b9a436 ++55ad9215a4958d990249d299064a7be379f0694d refs/tags/jdk8u152-b35 ++^3ea4623d46b9dfae33464568b137ba77a8aac74a ++e22e3e977c0c7ea6b887b5c55704588cd253e087 refs/tags/jdk8u161-b00 ++^04a37bb06de9b8f66c9c5d19d439aba3365d3fd5 ++05cf629c32e68ea0d5355e8f383acc01d3dd414b refs/tags/jdk8u161-b01 ++^e7c1e33d6544cbc5c1b77ce32449bd6ce7689e6e ++8d34dcb8ef7cc6fc1612b3277a5decc3e5c90b87 refs/tags/jdk8u161-b02 ++^b6804a5556d8e01851eca90c11bfedbdd28bf7ce ++7a8e58f5e421b3bcc28fcccd31bfc3461292324e refs/tags/jdk8u161-b03 ++^915734322fbeb290c4ae1facf3c2121541b7d58a ++ae01fa9bb76b822634705c4f5c308f097314cefd refs/tags/jdk8u161-b04 ++^5e8a07a61f33193c3320d11c6fbdba96d8f9f732 ++9784b1a68448d60a92f209199563aa607eb6423b refs/tags/jdk8u161-b05 ++^1ac0867d4170020462f2f4bbdf2dc295f08197bc ++479c73d1924d520fae83c68ce85fb08ba2e179ef refs/tags/jdk8u161-b06 ++^9d3510304885d60a48402ec8d5427a1c05076359 ++d3cd1408021603b59c0d0986b4a4d212de3caf2c refs/tags/jdk8u161-b07 ++^98861ee8dedfdf014f11c33e33cdecde6b722e5a ++968f26d9b6384d23e1f719a8da06ed5c74ceabeb refs/tags/jdk8u161-b08 ++^06b11ffe03b8743032e78e1bcec24328b2604851 ++1ec3b4724883514920ad5775fc4490d3549c04d5 refs/tags/jdk8u161-b09 ++^0d71d4b899514101b1f37baa838a38fb2cf0a03a ++fdb0c1764a70ae5059a1afc8072db46f278b0c60 refs/tags/jdk8u161-b10 ++^24db2949705042796d29a0d6430c74f5fc821d02 ++d385a671aaec32045724db7ec9b29b5cc8791c6a refs/tags/jdk8u161-b11 ++^a4838613db08f3f125b70e678ed15b900becd31f ++dce29233d5aaa5703cd6281cda6954cd57f3262b refs/tags/jdk8u161-b12 ++^e0199490b29ba799488da0d951449d06c9d423e9 ++479cf159145a867b25cbd4d8e30cf15ec3cafc84 refs/tags/jdk8u162-b00 ++^070c17a6b47623bffb83eeff4d3cba64c2f57958 ++52b0dffc0dd97957a81bcaf27edcb4ff1c389625 refs/tags/jdk8u162-b01 ++^89ab905281be2d8faf539995da97e207b3636e24 ++f917866ec993369c4c2cd71487643ecfcb2e7f26 refs/tags/jdk8u162-b02 ++^099b8491e58a8a6826dd3ad20f7725b8f5e3a8a4 ++5fa965590382e4630f9a3447cdb930e14e1de9d4 refs/tags/jdk8u162-b03 ++^0d4a9cf4e90ea7845b861a55a48e842466ba2fc4 ++52be60c0f49e006432a6c5f4e5061f16022a1cd9 refs/tags/jdk8u162-b04 ++^0950bc616fc320dac9575e0a7d709808c6e408c0 ++68a4a2c0c2089d71e478f736af1880fdcfa559e8 refs/tags/jdk8u162-b05 ++^96e6a11e1eef7c4c64aa660ed7d9d419a0d32752 ++053135278f44d60b9f7f729eeb8fbfb972661b58 refs/tags/jdk8u162-b06 ++^99758d8194ce3be3cf4b4bca93ca158085e3f85e ++7ceb2e9ef177f5e4ebbb7d4760da056ca2282f28 refs/tags/jdk8u162-b07 ++^d1763228ff99d18bc61e2c24b9cdfa2ef6e625df ++880a4d1b641843be0b8571e6da68713e705cf48e refs/tags/jdk8u162-b08 ++^3527015622d3c6de97d471add5fc069173a78873 ++c9592f734dcc3640326e0e568760476feec1236d refs/tags/jdk8u162-b09 ++^00f7828455c962b652db074d6d904b7f69fbec8c ++8c360a3629f69dab09dd6e09b45627d25d3a825c refs/tags/jdk8u162-b10 ++^f7746a1d4b31beb2ec8e83bbc99f76d40311dd1b ++72f6c7c4f061fdc7443924aef7a26ab449b27367 refs/tags/jdk8u162-b11 ++^5ef0e6e02d6131c3aa822e6755475e709cb23ede ++8b6c540ba669750627a41823b0f2181f27feece7 refs/tags/jdk8u162-b12 ++^132eda0775a48ec8bd53248a9da48981667d5f85 ++b95df8c1014d2516d105729782254285c6c6b9b5 refs/tags/jdk8u162-b31 ++^890b510f53515fe7672068e7610ed3fd2a3cdb37 ++fde230a2898d005bf22f65bbd792b19e2e84dab8 refs/tags/jdk8u162-b32 ++^18af39ced80b8e892c4ab6e84254536841feb0ae ++b8843f3069917fc3df4469846d1a41f533a5d127 refs/tags/jdk8u162-b33 ++^bf795c380313d96a2603a9aefff5c2fea99f894b ++e7d3a63581a48e481be5c72b4f68bf4f2df83d9a refs/tags/jdk8u162-b34 ++^f098f59cfbdb9128e9a76bceeda677715ad7e8a5 ++83f43e2a6eb7ce3af22fbfbf9c3a1b69794b1141 refs/tags/jdk8u162-b35 ++^5a64472d21fc9f193b22566bf078371bd526aef2 ++51a9c7eb794f43f8e7e6f2ee3973280f541ce113 refs/tags/jdk8u162-b36 ++^f7a2f4ec6ca3502b927b22b7d4041bd5d88e27e4 ++5f0130b32f6c5f9fea77160b061e88d54845d937 refs/tags/jdk8u162-b37 ++^57e64a6dbf35934f4953a587cc9e34700f743959 ++08afdf3072af46959cec48846fa09b2ba6183490 refs/tags/jdk8u162-b38 ++^0ea4bdc6d7deac69cbe49497c6d4609589d79bcc ++382e5942224bd68476d68bffcfab47b05290b465 refs/tags/jdk8u171-b00 ++^b1a6bf55f414222888ed449196266f7c4ff1cb38 ++5cdbe88ce0663b0242ca010b5977761f5a66444d refs/tags/jdk8u171-b01 ++^fbcd76707617411ef5d9cf1ae3a25acd5f625826 ++4790f9e1154b18aab91d87eaf21b14fd08198855 refs/tags/jdk8u171-b02 ++^2ae9f5430f831fed3c2bbb684e08ff991c650ded ++ea34200d5043a4e62654e024dccc89a540cd0b13 refs/tags/jdk8u171-b03 ++^628c0e9b27fd295c60a8023cf02378311e60cd18 ++b0cd11bc32400ae156bb4ca7c3a8df6a030a3f29 refs/tags/jdk8u171-b04 ++^58add0d6f87a7326e9c5e22a6f1f069ac26e3de5 ++9da8d61ba4db490080fe89d9fb2c8d4501d3c9d5 refs/tags/jdk8u171-b05 ++^3c89125afe5aabec4bb153149668ed86a4d4f2a9 ++19fa7a2f9152d6d4fce122b93c899a40110bbee3 refs/tags/jdk8u171-b06 ++^694d2d1a20243a6fdf8e8d5ad41709a3c337a147 ++f65152c6c94cf3e5d4807f264f614322fccb99df refs/tags/jdk8u171-b07 ++^25501e385379c4aac6f533606aaf91842ec424b8 ++96c20510e30d2624595ad8f3f338f952240ecf98 refs/tags/jdk8u171-b08 ++^eb4c1ed6a6d051a2c831432463ab8dc8ecbd79d5 ++4c9c2fbeb9845e49aa435497b8cedcbf223bff82 refs/tags/jdk8u171-b09 ++^25af2804d63f6d39045d9adf76a6c5f62a3eab9a ++ec5e6e6a70ac1a2f7031ec510f80175f3cb25904 refs/tags/jdk8u171-b10 ++^8e4fb5c6262d645a12e9094c3fe6dfde3d28c84d ++5d0da87d8e8c7c8c74a0203ca93bf5630ada617b refs/tags/jdk8u171-b11 ++^47a2847a1c3d9b50f3f12798d52c95a9b2be7418 ++38c133465881c83297eac2df6ea385d6856e15f3 refs/tags/jdk8u172-b00 ++^a5d45acd3a1a653c9bd2f25719e91443a7913864 ++38c7a642e7ba4662fa5d1c4c2be5e37619d96126 refs/tags/jdk8u172-b01 ++^2834a42b26b1a5a8d988deec50af5d8daa71cfc0 ++98858ee0e65b21a14a4d38c85f0cffe2ade14058 refs/tags/jdk8u172-b02 ++^e294e2603738784709d322d02b9561c0673dc8a8 ++e57dd96432db6e2e9e1ee342c55cbe65687078b3 refs/tags/jdk8u172-b03 ++^d65d19df69310fd3f54b54c7f60f84f6e85409b4 ++9ca2de33e7b91f7ec88d70d30965a49d0b7cc652 refs/tags/jdk8u172-b04 ++^19c9c96af22bbb4f8173a4ad236dffea50c1ec67 ++fc96dc330a28d6031f049a96f0b043a54bb50db3 refs/tags/jdk8u172-b05 ++^7ae0b71039bd88f02a567c50ad9aefe0150b8771 ++03214887bca95c9f3a16eab3d644e5c6f0c5646f refs/tags/jdk8u172-b06 ++^2575d93c959eb296be0ae7ce0eec515164008d83 ++d3b168af166725d2b0f5f7a081a84e4c7e08e066 refs/tags/jdk8u172-b07 ++^a452ac2942103896f0ec866bb5c49b58edefbe59 ++7902460f618fa0843502315306bcd5e54ccb54ef refs/tags/jdk8u172-b08 ++^15c875fb2b5cc1aeba60f2e77ac992610da94cf4 ++056b92b608bf73c9b60f24fa2f89205f2af3078d refs/tags/jdk8u172-b09 ++^1b5f1cb1f18a48579d26b3e16e8eee3b27484f0f ++86edaac24cb9477a2ce874614852006351fe5fc0 refs/tags/jdk8u172-b10 ++^4ab010e569b3829364ee640693df44ee120a397b ++ef5cb020411b9a3b7295984b50dbc0c4fd75e94c refs/tags/jdk8u172-b11 ++^9d9b94bf530e934500d4124c9647f78629d6f8f0 ++317c241bc8259d66aa8b634ec8bb7e72100aa28e refs/tags/jdk8u172-b31 ++^0b466a9d17d39e05d4730108464803a8f0007a5a ++82ced576c3845c678694e98d1d06998f88f10535 refs/tags/jdk8u172-b32 ++^05226c697c7d1041ff55f5d9099ad725338e9ab2 ++9d7b696f6762d27488513cb39e14a88cad457c72 refs/tags/jdk8u172-b33 ++^d29e872c06bbd8ddd88606c331612441cc11122d ++92967535e8988822169a4b53f6f885601108a190 refs/tags/jdk8u172-b34 ++^4f987e5e8c76c3043aa59bf7b52d7337d1f52e96 ++ebea0c9e544db8487d166f855ebf7547b4adc592 refs/tags/jdk8u172-b35 ++^141203f69df9639660d002f9d9ac7963dd5b9cf6 ++e6e0d721bfce45694785fe9942a6a8f90e138de4 refs/tags/jdk8u172-b36 ++^d635b54b895ab58afdfcef217e81fe5c67412430 ++ddecf97c92a268577f5d80bd36f4c5e3ca1ce2de refs/tags/jdk8u172-b37 ++^59169eecb73a2d96c28cd4e9718669c9c87d1ffc ++4d791e92be60e3895f20cf2716aa94e8614bbea1 refs/tags/jdk8u181-b00 ++^22988b6b39fcceff68072919a726f3c2ec1a60f3 ++5dfc624618ee0ad45044c327df9e6624e74a1e67 refs/tags/jdk8u181-b01 ++^23cc2068dc40f0fc5ffca8646d85f341b9bb7cf4 ++aa5fd8b07a8bcf67e699e996270b1ff72c72e2e1 refs/tags/jdk8u181-b02 ++^d67c8038c33300f6ccb4cd67c27aceceac7bbf4b ++264aba445a6347c165d3c54cc1397a818ad139bd refs/tags/jdk8u181-b03 ++^2f0cc2ce7bc4df87d4f2622b86e23c8956d74d92 ++fb9fa12293577f73039293e4478525ee0b786766 refs/tags/jdk8u181-b04 ++^0aacfb2d4b4a2a852cde365a8ef093c34bccd885 ++8bba1b4aa305b07e90afbd5263f2e23a0972dd44 refs/tags/jdk8u181-b05 ++^8eb5b641a134247713a3f5767cb5d2383baa1c8f ++defb691285173c3fc31a54661674f84cf3b2d5f8 refs/tags/jdk8u181-b06 ++^5429ac340ce5dd45c6ff7031bb946a3b816118eb ++f9aacebad1e10aa34c5c650e832e73174ce331ad refs/tags/jdk8u181-b07 ++^290b8fa7925697e77daf19a31f8da58eae128d06 ++993638555dbd527df4ed70651022572d47e581a0 refs/tags/jdk8u181-b08 ++^0c718a744ab46cc90f301ebe3225b94ea8c48f63 ++8291875b4da06e9e7104355b86f04c2038766a6b refs/tags/jdk8u181-b09 ++^59c988fd84bc6d6c8a132b4178b69032f35e135f ++b720fe644c34f9c65609db36795a014445072e59 refs/tags/jdk8u181-b10 ++^017134c702bf2a31bafc43fb87b52a84bbab4487 ++4acdbdee57da5431070b919ab43097ddc3d78135 refs/tags/jdk8u181-b11 ++^51de72e1ec6a49de4788cfe1ac36573e2e9e4cab ++155c49ca2420c9af8088102e5768629e29ae0087 refs/tags/jdk8u181-b12 ++^834a56f2aff39af9ca76cd907529ffdf95c5b309 ++661168ac5cc366bd6265899bd2fc1aedc920e3f6 refs/tags/jdk8u181-b13 ++^d1ee886bc3c6dcc4b01319b966fd5410a3c932af ++87ae46c9cc941f5905762125ea02d76ee178066f refs/tags/jdk8u181-b31 ++^b15565833cb79d5f2c699138bffc5be7e397c295 ++1e53202b2d623188ac26bf453b38551ae6f8c4c3 refs/tags/jdk8u181-b32 ++^1dcbadf6dd1c3713320d71de27732747b8a586d5 ++f8b6a2a81a3237cdceb89fb9db91c95983add791 refs/tags/jdk8u181-b33 ++^e882c88cad6a8ae3de85eecedf8f28c8ed208d2c ++6b05d9a3a536c79e93dd1d707d736968731d7af0 refs/tags/jdk8u181-b34 ++^0e4b7c04d2b138166c1f79941087563f205d36e1 ++bd481bcb182f18e21e1f40811db2e3011dd41749 refs/tags/jdk8u181-b35 ++^621d89b889ec6c94619fdc52374e92292b4b8ec9 ++7180e9ee49eecdda627bc6b4563d1edb8bf11c29 refs/tags/jdk8u181-b36 ++^45288fada693a4c71908b8ecf87fa7ba757bb4b8 ++475eedeef447f1d061ab7d4780a384c651fda224 refs/tags/jdk8u181-b37 ++^ed75ae048494790c94b18adf0499c1b286c01303 ++18e7d5a4d98723a0f6d26e223e429c29a567b20e refs/tags/jdk8u182-b00 ++^674b4ac614ac7fd75cbb209c0c42ca99f60a8566 ++9b73d50f149d2a751d0247d960ed5606b7607d94 refs/tags/jdk8u191-b00 ++^e35247beabc4bba45f32bc97f27011cb200e5857 ++1a6959c4c970d37de5c14a5ede13ef718187ee65 refs/tags/jdk8u191-b01 ++^184893422f607d1d9e2d461e8433dbe11cf76576 ++0604b070f74080a718735891e00c74151746aceb refs/tags/jdk8u191-b02 ++^b0eb79c291de8eb90bdf42cc2dd90425f85956f0 ++7779a318ffdd3baa12f505bbb81c315ba7431120 refs/tags/jdk8u191-b03 ++^fe8ff4c7a1581958bfd955231022de21701ec04f ++2daa982ba49792be5f82fc56aa0f5e7f38016289 refs/tags/jdk8u191-b04 ++^4726bfcadba7efea1bd661af4e3853c3213118a1 ++c5bc18b26506c46160095a57bb28cca57c30a772 refs/tags/jdk8u191-b05 ++^147fb7aed930ed08890d81c9ebf2a6bd378177c9 ++d7d8afe0396d146734e0f2c59a925947f5e04646 refs/tags/jdk8u191-b06 ++^a152fd0749c2deed58ccd3d1328d7b389b89c74e ++30705b8c5c84c854f1a7b3d7330ff57d85d5d1ce refs/tags/jdk8u191-b07 ++^6e7bb0d302e508d50efddcbbeb96f3d6b7a2d154 ++d92122d93837b54146852b2a0a4fb79df921d906 refs/tags/jdk8u191-b08 ++^fa7b042cb3c8d55a2594e38c4e33bbf371fc2f89 ++0faffbabaa556409c39a0df1bc3bf412fe9be8c4 refs/tags/jdk8u191-b09 ++^93651cf623b68d57006dfbcceae9dca6225c055e ++cf11312798dea8000d1ce115179d1faea10fce01 refs/tags/jdk8u191-b10 ++^9ed6702b379d55b277d93578c8f65697f3c71d5b ++8cf985601d19bc27a9e73480b52b529fca6a6d28 refs/tags/jdk8u191-b11 ++^53fa97d3bd311f16ce3d919e4de87605fbeecfeb ++6c5e4f63234381c1b9520e440e89b08889b80ac5 refs/tags/jdk8u191-b12 ++^f6d2b2144c05e28b4aec475d6d7f3d1db93e6e39 ++94cae1a7d00f9a837b1b1dde8efc1bdd684a26fb refs/tags/jdk8u191-b25 ++^0da511ba32d977c4f1e6e07a2e56c890b2077a7f ++6d51d1590326a52f1d58a28299b99570979c2e20 refs/tags/jdk8u191-b26 ++^91b605d3178efd7d9003250454e48ddb4e5933ff ++b63c9f0013997ce0ccff8bb4a5e014860509002e refs/tags/jdk8u192-b00 ++^bd4f796a75bce8dd7aae22e8442308094c4645cc ++17c27272c90788a45c1be225517834c7faa2cfd0 refs/tags/jdk8u192-b01 ++^eaa9ce02022345286d6b84b74c2373699fa43eb2 ++9ba878e4c726e1013d0423537e948647f1cca763 refs/tags/jdk8u192-b02 ++^c04f1c8d6ce36ffe2b0e618c631dc90b231fc8df ++44f8070a9e0cd45e8ab9376f8e463e75b2275fb7 refs/tags/jdk8u192-b03 ++^4dc064f4245f4a83b3914a53f7f67f096965d5eb ++5897fea661cadb8ab97de230895ab932994d4322 refs/tags/jdk8u192-b04 ++^502d8f011c98ff7cdf20d0d4aea480f942879896 ++513636eb24e50c6fc7202cb77ba4737a3a1f0cbb refs/tags/jdk8u192-b05 ++^1e66e20b4f4f994b0eacb9e2a88390aab38ef776 ++6bfcac19280eff29f5aed8da772425e49b17152a refs/tags/jdk8u192-b06 ++^3eac57b7970375c50ce3f9abaf2714acce0e429a ++138cd7bae256b380ede8636554300ebf850e82dc refs/tags/jdk8u192-b07 ++^0c1c3a8038cc3a56f292606ed93874f27c33e862 ++22fa3c78770ab2caf34f86f5ae5fe621f808bc02 refs/tags/jdk8u192-b08 ++^3d54a03f8d6f6902fae1c6a8289ce35665985299 ++f8235a50108284d9a5d78c78dcae71bf36fcd5a6 refs/tags/jdk8u192-b09 ++^7b01cf7353ad801e55d771014b813c1e7a8ad190 ++6e350e4466f90bde0335d854a28dad2e04020fad refs/tags/jdk8u192-b10 ++^c76ea546148774221fc3da6f9a0a193ddc672777 ++0b003394a0c5abebe36fc9c8195b8ec4933aa8f2 refs/tags/jdk8u192-b11 ++^cfc5df6a73be08b3dbd560dc88978dac7bfaf46e ++e742309e4e54377c4eb2116b9f5521f2be8c74d1 refs/tags/jdk8u192-b12 ++^e83f736ee4591763e91764d688c5b3c7170c9b03 ++1fbb8e056cccf926eb54b4dbf6984ae9000c03dc refs/tags/jdk8u192-b25 ++^18cc04983fa4025a8eef244ec2e721cc43faea41 ++adcd9e8ea52229816b0a7d73fb2b502fce5cf12a refs/tags/jdk8u192-b26 ++^1d025f917bee62d70fb7e97c7385f059a39b3100 ++1a3a098ac9a4e1e0041a3ee5310aaf7eb40621ec refs/tags/jdk8u20-b00 ++^55e355355082933f3de6ad5b7a9259188cde9d08 ++26fa1199f3380658b9a967fe458aae79ddb799ff refs/tags/jdk8u20-b01 ++^2fa793d120023289dd8f69ae515516229ffcef41 ++b5769fe8f8e1874fe27aac95e1410cb7335d46fe refs/tags/jdk8u20-b02 ++^0bdf012f4a0b0c9a58fb4900a4fdd713d29d8ab8 ++cb5c7212a51c93607938a700e94b6fe04256b1f2 refs/tags/jdk8u20-b03 ++^a2cebe497463c82aa966e23b3283a43beb7d4d04 ++1aff1d02de5c35822c625c38103692caa6adab1e refs/tags/jdk8u20-b04 ++^4e63c52abe9aecd28033ea5b51b47f834729620f ++29b7ff37b89ae055cdb256139e9a3e8756e3fd15 refs/tags/jdk8u20-b05 ++^b4b16dff4398aee7bc5c115b3cfb052c8e640d6e ++17409bbd7eb7c4cbe372a0ee370b57a5a44ea1fd refs/tags/jdk8u20-b06 ++^e000cfcf42599af7a3f96f0d373783e14f47f6a3 ++babc7d098d5a0c12bb7548a1d7735d6987516bb4 refs/tags/jdk8u20-b07 ++^51c3e1704bd100feeceeee8846726100b28f3f4b ++e20a6ce905369a68099e7247042fbc4907609cca refs/tags/jdk8u20-b08 ++^1e07a18456034a71ea31b11177523b8087531b8e ++050ca39505a16a605ccfa31dbc735aeb1eb448f0 refs/tags/jdk8u20-b09 ++^b513b549d13ce06aa1b22faae79b00eac05c23b3 ++82fccc34217f2e584baae7bcca5bb83445de954d refs/tags/jdk8u20-b10 ++^59d79af530dc5d00693c71dbee201c058e334c90 ++907f5a08b592a5c759b3c66c214e3cc8df7d7271 refs/tags/jdk8u20-b11 ++^96d339073fc6cc06f68ed019b41311f832b921d6 ++da4273b20c972c584b32b79e38b90f35315e12aa refs/tags/jdk8u20-b12 ++^76701a83e47238e65dba3724fe5d842e7f79fa38 ++507313fbf508e62c5b113ddf4d43d46d98409462 refs/tags/jdk8u20-b13 ++^d909be7207fd56d84a301767dc5c8ff1a41fba8d ++d5e98ff6b90ee84676a9d217728c5f55ae8c4279 refs/tags/jdk8u20-b14 ++^3846a0c62aba11b8936abf7660a4ad1e36932491 ++0a44130fa2883e19cde50d9476093540d63af130 refs/tags/jdk8u20-b15 ++^8f7a710e7d9da116830fdc262e0f5a75641c5a8e ++d77aacb383fd03c9fad0be19180637c694c1485c refs/tags/jdk8u20-b16 ++^76335302591d4e82a72d0ae0b0bb440c73e3347a ++b535de6a6b0996a496e4c51f8d7a260d41588d58 refs/tags/jdk8u20-b17 ++^5d82f3a6e97b8d4ca2408a7e6e732186412e18c7 ++6571b972d37ce9321e3bd718bfa03a2e9bac70fc refs/tags/jdk8u20-b18 ++^02d2e5e4e4cf57ea910807b8c19f08c9728ee48e ++a7f5b305fb161f36e801b2a812acd80c7050e29f refs/tags/jdk8u20-b19 ++^a497ad2d4a5d10cc10d9558ab596cbaa0396ccfd ++37b7f63edf010974d8961924b6ce8ec4c1644b18 refs/tags/jdk8u20-b20 ++^ae4d9dd845f08ffb8840f29f18f704ba603aa188 ++ac2c1e1e157eab21c44d798a9c9886de4860c047 refs/tags/jdk8u20-b21 ++^823b0e2ddf72d0f54a2e01de6645512bdd269a95 ++b45b4d5547515db579a976a763e029dd8d22039c refs/tags/jdk8u20-b22 ++^62b08be118ad61e762d4f1c3171f1f7f4284145c ++c9fbe8ab78f9a9cfe650f08db902886da7b0df68 refs/tags/jdk8u20-b23 ++^8f7efc7c5e67dc27563e9cabbe3e03695a5346d3 ++c9e18a07aec8b1a33fb5ff8607c072a8e4cd0754 refs/tags/jdk8u20-b24 ++^90ab63369d945838b2c2166170bc586a1c1b89dc ++0348d016c99d7d6f411df1389b8e17840c845cdb refs/tags/jdk8u20-b25 ++^e4cf981da78b36f55e13dd25b63b62cdd724fef7 ++e6557ae6539bd16a6d43227195b621a20415b620 refs/tags/jdk8u20-b26 ++^5d75e9528e93d80caf3adfaa6eba784244cdc005 ++213a7ee06968e1c61b91f1c0e6dd96afd6996bb9 refs/tags/jdk8u20-b31 ++^bafcfe7df1be5c097ff6872b5541751d40bd7cf1 ++e22f3b00d7a791bed05a25f24f127c42f83dd2fe refs/tags/jdk8u20-b32 ++^5f497f3531a403742334496ed1e7b459b20de4f7 ++cf29ad0acee572cfd55d2651fdd163cedd18c44f refs/tags/jdk8u201-b00 ++^fa5d72a69aa833657f350f155129f5e33336a78e ++0c72c0015dc88d2df6942a889c39262edd4abc4a refs/tags/jdk8u201-b01 ++^0333e6e1fab14ac7c211fb4a91f44c41148948c4 ++041440c69bd7f7b4b7b23dabfeb24818d8877f5c refs/tags/jdk8u201-b02 ++^f54baf775e4218d2b762f77c5b8a575ab51d82df ++88e85c2604133eab9e9521782e10c45d70f6b624 refs/tags/jdk8u201-b03 ++^6b4f594d1e2e40a25ad0ac52b124a61905b9f84a ++afedbe1e599297e2419b2fab717c1e6af9fb4ee1 refs/tags/jdk8u201-b04 ++^1e00c0b67673489f38d6d278c552226fa39adba6 ++834d1b9ad87dcc04f4cf7a0109a8026c0c31abca refs/tags/jdk8u201-b05 ++^bec5da01f589e41043429ba1c2e7de545f312552 ++8199b0d16b43fc6c8f5b58661b2e4c6d23a8ed02 refs/tags/jdk8u201-b06 ++^97c230155d9c9f9b6eeda10690dc5c2d82b518d8 ++fcfbefaca5d7368305669d4dcd2a34f06c7018ec refs/tags/jdk8u201-b07 ++^43857838bca45cb3454289e432d1f8728ce10bf5 ++b43fab104a64022439292238c2a1c8d404cc0734 refs/tags/jdk8u201-b08 ++^c85898f2247878844923cd99d4b6916ca2b4f05a ++104ec7203e180b2cd5291bdd71bce09a56f17de5 refs/tags/jdk8u201-b09 ++^8b71744d41e43c8422b45d26e09f0e62528b465f ++b24de94d4c2e2538941d7cc7c5b564f39b9efde4 refs/tags/jdk8u201-b25 ++^1781ceed5b683c5625c3a472234778e21e3ffa9d ++f5b99bb6b5d1be687551e57fd8d08b8a0073b182 refs/tags/jdk8u201-b26 ++^4afc65ac8e1cba87246b30682c687bdeb1e22feb ++def91bde65ab81a3525eef95d8aee3d58dfc4848 refs/tags/jdk8u201-b74 ++^4ca1bf09a530ad04d659fb0821fe17a52375dcdc ++ba6e6a036f14eacd8760badde4aa708a323cb9e6 refs/tags/jdk8u201-b75 ++^ea2f6b6bbd141bc407c4431fcf6296149724408c ++d530f91694529bbabc377f1adb96deb3380fae97 refs/tags/jdk8u201-b76 ++^a93847862ae021249049348fd20a96996297c8f5 ++90c24af711926927fa2eb81ab07e5935105ebfd6 refs/tags/jdk8u201-b77 ++^d4f4ee5c84c231fb9e60fdb624601caf009b1919 ++c812871c8d1b5c772382f4c0164b911ac1e4cec3 refs/tags/jdk8u201-b79 ++^c4e01c71c5f7dcc66fdc351ce8a7d9c9140881bc ++8ca0fc77d4a54c16761ba4a6435767f0af4f35ee refs/tags/jdk8u201-ga ++^bfbc83cae1aac67d595e2073c1f80d2c39e04f3c ++b40bf3d08c77f440bcd9d7973d625d5cdb10396b refs/tags/jdk8u202-b00 ++^1dd984abdcd242bc6b66353939b5604cf333001f ++0957548aeb98e92ee9bf03f12f733196ad63d7a5 refs/tags/jdk8u202-b01 ++^24ddfa3148a705e0b14523930e8aa1e4e815a662 ++cc560c2057d860803f85b22568d4f0697ed62194 refs/tags/jdk8u202-b02 ++^28dba94a98c6aefe8053c524ccfa13809c35a5d8 ++c21ead8ccc3ea3fcea35dbbe2ac780abef6d8c23 refs/tags/jdk8u202-b03 ++^50eaa3aa232371895cad8abe984c256a33b8eee3 ++b00e25b38735bb3782ae09ceb6d496e3d27fb6db refs/tags/jdk8u202-b04 ++^127bcb6677d8d9009df708cfbc142f4181032aea ++f804510776aa0e167dc1e568937ab70652b18c0e refs/tags/jdk8u202-b05 ++^50a97b7c6123453a99f9ac7b0ca30783436d94d8 ++b026b69e6a966009e67e8ce712052d84d902adf1 refs/tags/jdk8u202-b06 ++^dcf2f7f5d6c185353c8d7fb5cc097667c343403e ++6ed67ba6c8be6f6ffaf51580b36a0b0227da8cb0 refs/tags/jdk8u202-b07 ++^fab2c93b2024204eb1e7325c398ad2d8e8fe1bc5 ++c035118455653e80f94dbcdfb251d0dc4589b829 refs/tags/jdk8u202-b08 ++^04a31b454cd853fb88aafffd411dd113e3f4045f ++e33caa66ebd6f800849f1fd6dbb61a1a04ea27cd refs/tags/jdk8u202-b25 ++^2558d06aa776abfa9fa3ae9099a0fa5a591b9556 ++3aed0a11e3ad3aea13fae47bf8fb9f7f46328cc4 refs/tags/jdk8u202-b26 ++^9cbc2c5952c2540ebef99d738cf560fbac208232 ++7061a66d970c8ee4d0d8aa4a9c6eaa7225b179b8 refs/tags/jdk8u202-ga ++^a5ebb6a5848d65dc4c443565b4c5d623ce40f730 ++868ce53007a6e2916dcf0c0445009a59de2605ca refs/tags/jdk8u212-b01 ++^1cd1c00d72f8b5f01598b4ffff6a5b0ddf185d57 ++bc866856b0f36755253e0519425e91c0232d8883 refs/tags/jdk8u212-b02 ++^549be5185d54115d43af4832159934ee37bcc5ba ++83a0b4b548f5fc7c82426065eab904ba34d5927a refs/tags/jdk8u212-b03 ++^5d55d1b87c698635b1a3cf9415b374333911c029 ++67bc3230487c166b561aadb06e3ab80213e3ca23 refs/tags/jdk8u212-b04 ++^b1c904d626f1b45486f83e04d86291c5df247644 ++d8ebec7924d8e492725035040bd5336f1d7b4d83 refs/tags/jdk8u212-ga ++^651893ff6ccec485e7b0d80033373bf5fecf6ea9 ++f2db5f9842dbeaef23a9ca3c2ba24e60f52a7a1d refs/tags/jdk8u222-b00 ++^3381f5fda41f375b6b16b069415376c7ebd953ac ++fc56cfee4fe29e58d9dc7136444612ff6237e395 refs/tags/jdk8u222-b01 ++^441a843e8be6a3962aa53633977b95311f4c0a48 ++7c5100c1f5beae2046510e6a2b98a7cf9f485195 refs/tags/jdk8u222-b02 ++^d4c7fbcec5cefb82cecbbe7f856921b4e4c234cd ++5eae49def24ee76fed34dd44d5fb361914f228f2 refs/tags/jdk8u222-b03 ++^6b8a02fda37896512d7161967b0a0067188cf120 ++713f3598509b2084e45ac19fdd95be9f5ec4bf40 refs/tags/jdk8u222-b04 ++^f376064db86a633c51c36985df6f424475da5810 ++8e73dfeff451c483c6cb550fb48a135a90f1fbff refs/tags/jdk8u222-b05 ++^20c94c315a5011e3dc27948d935cbbed8d3e358e ++ae7d656c6b53bb6f8b2a552c93debe393cdadbd2 refs/tags/jdk8u222-b06 ++^2d0a2afc463a4e54a12858af9ec62843359c9e74 ++19b461ca0f76ff0833f283e5922a53434b84fa8c refs/tags/jdk8u222-b07 ++^0cb7a7df2f7a10ad89d1b067332b03f24199e872 ++4e4ef7c1097c546198e332a8858de0d846eba458 refs/tags/jdk8u222-b08 ++^e54700e4e9ecc54ee0368da6ef3874482ece617d ++a83e5785dfff8c064f963fcebc48bb0fe0ab55e9 refs/tags/jdk8u222-b09 ++^9c780da9166b1984300b93f81f3a1fc850f8d4d8 ++3bebdb5a5729f7227cfa5128ca1515b8db3cf2fd refs/tags/jdk8u222-b10 ++^6c24eac1ee54c131661e4ba6439f89e30012a8f1 ++b9c5739563c463bff3a8dbfb34e92cd4b6f17180 refs/tags/jdk8u222-ga ++^a04276220b60de7f8f0c0cec95cb11c691eba982 ++09a525f17abd591de07b94d947f18cafc5574b33 refs/tags/jdk8u232-b00 ++^0e4649f4df5e0d0a6754f95743eb3cf0dd9fd260 ++77322405b0099e4229ac99422baf97f1560204d1 refs/tags/jdk8u232-b01 ++^bfc69585d43183d715ab1c208bc1e588ae8a882f ++4590c3e5820bd2d40873003ccb01093bd06dc210 refs/tags/jdk8u232-b02 ++^b09aa35c478be0736b3f28c2e39a9b389cef476c ++22f73c233b6a5f31a409250f3c48aba69af77400 refs/tags/jdk8u232-b03 ++^5328cf6804fce2c506ce561bd7d4d4d41dd9cf66 ++9ba4b13483e67219ebfefe828078c38044b50587 refs/tags/jdk8u232-b04 ++^d72e2e1f3a1cd55ab5cf8144ada220010dc891b5 ++b0e040df11ea4332563f8686a9663502de7bcdda refs/tags/jdk8u232-b05 ++^231077ea2ac7d50fdc889ff3b5b7444d94e7f70d ++68eeb9cad3b80cecf478a5efdc5dc743f95275ee refs/tags/jdk8u232-b06 ++^0e663f06708ff2b1db4ab9c977c3f5a59156c855 ++d75b7732edb102e1cbe5bd9bfbffca367c8ed823 refs/tags/jdk8u232-b07 ++^0dc2455f4b4474b6b967c46a39a51beb90307abc ++b06381c12d83325366b3cf43fe58c1951b459e8d refs/tags/jdk8u232-b08 ++^438bdb6977d5d934c3a72259c6e6b11dd45f47d5 ++520c5aaec74651ae4e1b2ca3c60c4ad28567cfa2 refs/tags/jdk8u232-b09 ++^b1f618b04b63fd53b42f5256892a8ec10ca79460 ++0cf57a4e252cf63cb5d4ba323badcc514acde918 refs/tags/jdk8u232-ga ++^a5d468050b32cb05f11f61b35e63ea723fe9885e ++cdedaa49027e43c0f18cf1b88c7e4363ed94dec8 refs/tags/jdk8u242-b00 ++^f498de247ba00407154b27b0cd913d7a4b803642 ++0ab1b8b910da0d03d46e1d8a2991b0940b14a458 refs/tags/jdk8u242-b01 ++^f667afce152040aefe2b1445d17ea9969f981d94 ++646a4081ab525ea17f403ae14e763ccc17e0aac7 refs/tags/jdk8u242-b02 ++^61278c5aa35d1beae12b8663e0cdf7268090ec4d ++f7e7737838fb11d04b463ca5d1092db37f191694 refs/tags/jdk8u242-b03 ++^ccd4d69dd13cec48c01c0c6a3956bf9efb00a721 ++3e746fb14fb2c86a4af0bde384f342a648e100d1 refs/tags/jdk8u242-b04 ++^38a7697a67dd042ba2ec597e4157842b1519b75a ++dcbe0a9a5d778d497a3820d631475cb8e61abe84 refs/tags/jdk8u242-b05 ++^2e743c7a84115bf8a29e1549ed061f6af5685479 ++aed0d4f53b73a88719c4fba7519654617d86967a refs/tags/jdk8u242-b06 ++^bbfefb5e9124c9b8bfcddffb089e7686a7c14fd7 ++85a8e9e75c99b689dd7cdb04cb6bf06c0c0a3578 refs/tags/jdk8u242-b07 ++^5b8ea04cca63e6431db91a3a2f13b9ccc3be9091 ++a79cbfe9ac9f10afd097e8da03e5b1220a4f3333 refs/tags/jdk8u242-b08 ++^1e81fc98c7475f2e222ca0ac4ed602169fade032 ++08a536b2adbcc2a861976b1c0893633cba539b56 refs/tags/jdk8u242-ga ++^b37330abd36e1051d506808803c0017cde8d0530 ++70a73ed8883a1d6034859ff35e47fe5a77d3b88e refs/tags/jdk8u25-b00 ++^36a0e177e925136888114565666911b979c32dd3 ++8a64e5bdea2e03ede933403d5a76d7b66ab2b7cc refs/tags/jdk8u25-b01 ++^665293555da1c61795cba45496a89132d3dc47ed ++79555c4c71bc7665cba4dc1cc9f30cbb65ed9240 refs/tags/jdk8u25-b02 ++^c88b250d9b1121d59eb000474a6c5f043aafa01e ++15f4d9bc2a52e8382be93d9282139b649bda2204 refs/tags/jdk8u25-b03 ++^3db0c390e30cbfd1d13c66d3da4ed1ec5b77b771 ++05cddc58ebfca1bf8b95bb5c52d38caebe9ca957 refs/tags/jdk8u25-b04 ++^8367337f8db58d3227dddc7f4d52457d9bd4e2cb ++9580ea37ccd235709ef667338058ba5b9ce8cc66 refs/tags/jdk8u25-b05 ++^33e95d3fecb61ec20b868f301e1945cc1782b08d ++57d3ca7e09fa94350d429794e0df209e26b75a78 refs/tags/jdk8u25-b06 ++^b5d24fbb7898c7593abcf4b9b0fd975203d3f225 ++91bd046ec3e7b8dbde427892185f61e7a128951e refs/tags/jdk8u25-b07 ++^b660b7276faa68b547fe06124b25124255e26fad ++16436da48d2c8364c1559b8459a6092fcc220f6a refs/tags/jdk8u25-b08 ++^29188aca04d4c7007a82a3e2528976f20f725d9c ++060561e82b261fbdb0a9001265ea90af286034e4 refs/tags/jdk8u25-b09 ++^cddec7ed8809ff8f95e3a74b5cc46abea159ea60 ++65e576e2cee69ae3daed25145d6e3534382b2974 refs/tags/jdk8u25-b10 ++^2973c1432aa64faae3b2a741652774a5c12bd5b1 ++d94c7842af4cb95773055e6341c0950aa5a13269 refs/tags/jdk8u25-b11 ++^e9b3bef3ea3f24a10d4f1cedeb0eecbfd3b495fd ++664208a1748d05a45a6294d12be32defdc960ca8 refs/tags/jdk8u25-b12 ++^390b30dda523aedd8b1392ee7bba81e68e2314d5 ++ba53d1439d1cd47d17c92119caf3341649eef83c refs/tags/jdk8u25-b13 ++^d6e7cb7c46663c64683c70fbb0f75f293b7c068e ++9c137fb9b55c7d25fbf60df0287b75790c85c2bb refs/tags/jdk8u25-b14 ++^e64955da0be46cbb6e4f58785f132da25f061710 ++fa5465b2f2624c8b821e2f3a2ff9abe60273a6d3 refs/tags/jdk8u25-b15 ++^cee3a852b576eb83ce8bceefe2c0f9ae279d5d33 ++50dee0eb8c612f9d9bee228be0dbc6cc88733c61 refs/tags/jdk8u25-b16 ++^fd855f980a236b500b52c96690c21e5407e40e7c ++eaae14c28a04e3909a1e1b387382a1aefed442b0 refs/tags/jdk8u25-b17 ++^cedd4b293836c421dd2d38ee52997338f765e71f ++2e8f65a15927c39571bb17ade62dc7555c948034 refs/tags/jdk8u25-b18 ++^ad06b5758247e99338e6def82afa8fb10cd1c32a ++a833c07b66e9b2dbaee96b038ac9169db6dfa2ed refs/tags/jdk8u25-b31 ++^e77f6407a5ecd6685393d4ccbcf4f4ac825e21f0 ++7af9269b0b7b84f8fdbbac5dfbbc48f4344f59ad refs/tags/jdk8u25-b32 ++^29b84d563a2b8f8270946421e6aebd53ebf74cbf ++8095795e34b0e8e45aa0248415c36776f4ec01f3 refs/tags/jdk8u25-b33 ++^aaab3e9ba599e27d8b2c724e7a594a85fc928e69 ++e6b6a7983fc743d53dfb2cd14fd853e203dd43bb refs/tags/jdk8u252-b00 ++^29fd2bd06790d66c612dc66ef9a0ca13e4098b17 ++f2b27a2fc71171b160503baf89312831e24806d5 refs/tags/jdk8u252-b01 ++^16866c747ebb6144a4426ada0a82e5c67d7922f2 ++1b6c18da2d169e2ab27ada9dcb3297b3dbf3b2cf refs/tags/jdk8u252-b02 ++^b76bed319aab1c6b26b93a3293e2716811322251 ++0d4b281e513c9302a9c936b13eaa96068a6bbd28 refs/tags/jdk8u252-b03 ++^6b411468e674a6b4b589933cbeb2f19b604e39e6 ++73b00acbd55ca47f49296b7d414d7220c46f00f2 refs/tags/jdk8u252-b04 ++^9d779667eb432f6f5a3de15bc5e0c7e8efd803df ++99fec99ed4e5331bf455ef7a391bdbbbfaa2578b refs/tags/jdk8u252-b05 ++^327d289ad50629cbf43b339d1e5a4181838c8159 ++47f04462a48f6b9c88503fb9d5c0d2b05752a1e9 refs/tags/jdk8u252-b06 ++^bbf30d32175eae20e6494eebe497b112a7939645 ++234e736bbc1d85955c92aa239dd98036af5300f9 refs/tags/jdk8u252-b07 ++^6188693e91c56c63048f88913e5e067eb92a32d0 ++8b8d9393e0bc127529b6a40ee30ad1a54edbb751 refs/tags/jdk8u252-b08 ++^5e388296c595705d9207950da959740d2d89da44 ++2afb6e7425618cd3d24f43ae3d8a212adfa244c1 refs/tags/jdk8u252-b09 ++^32cdf5e5e6207dfa88d012862b9f0e25b0ea5c0a ++d424a2e9fef87600c7572369ace7f02d5142f13b refs/tags/jdk8u252-ga ++^687d5b4b36053d2dd95c55ca0c148abf8745e90a ++9340e66aa7be49057a185463e8f63f69e6c027eb refs/tags/jdk8u262-b00 ++^024a5099ef9108da63fa95c755b126b8f72a12bf ++d99dd4e01d31b770f62197c3ef2f6af5ae8deeb2 refs/tags/jdk8u262-b01 ++^209d06cbc07484072751e4fd305c4606a312730c ++ce71627b4016771782bcf41162a854903ae87097 refs/tags/jdk8u262-b02 ++^10f3793d99f20ebf0a8e3388a58ad512461c46e7 ++ab783092d6978ef670b12604870b0ab2324d7ea9 refs/tags/jdk8u262-b03 ++^9a434845cbb7cdee1f9eb928d10913dfef9c901b ++99063c950a6867b1c8937662ef92399b35cde316 refs/tags/jdk8u262-b04 ++^487c05ea3be6e9f5085202384afcb6d504f829b1 ++61876d765630afdc8c5a60ca5348bf5ab3659a5d refs/tags/jdk8u262-b05 ++^776a242b8a996cf0ab1d2bc0f219b96c85a9abb4 ++e85728b52fa0dd965cb60bd389c3c3d77cd72b46 refs/tags/jdk8u262-b06 ++^bc0ad356cd220626fb1b2577eac5cde540d38349 ++63d961a3aa1ecb24bc86dab84811fc8104a47a92 refs/tags/jdk8u262-b07 ++^6227d59b472652fe837421a83fc72ac4dbe2ffa0 ++263907789dff3a9f0ec7744850e72e2db68e1274 refs/tags/jdk8u262-b08 ++^d6ac2c9110e51de7964550831a600fa7695f49a4 ++8e1ba846750a892ca01df458263bb312b358bbdf refs/tags/jdk8u262-b09 ++^56f859be6cb1e6c13964c06cabc87bb78d0b0fb8 ++ac0978203d200fe717283ebf067998b53a595598 refs/tags/jdk8u262-b10 ++^5f3ab2194e0b3a9687e2682bd35f850a23334b3c ++e4a9c732361e7c55c1391ca10cc828e0137d0e5e refs/tags/jdk8u262-ga ++^3b4919e4ea2b637dd9cdb3db9023dccc9fa5af7a ++d9aaf960ed95a08650b1c91e7dee550d79b0922f refs/tags/jdk8u265-b00 ++^eda0815e4b9a469ba3e0c583d8f7e686cb1a5ce2 ++ec65525c21b06e5c3c6ed23fad2ab456c040dd3d refs/tags/jdk8u265-b01 ++^09d1d10caf6b561d14fdb1e19168d4979177f333 ++1311085730bff0cd742f344288715274f7234b03 refs/tags/jdk8u265-ga ++^9df235ea12957326cc668033ff5ee03706ebecf5 ++fd469726fc6efba1ce484d92fd7fd2568a0e3e79 refs/tags/jdk8u272-b00 ++^019d90e615d9717af8aa237ee88b09c3b5f0ee34 ++24133b68c61784c7e18ccf44f4bc99b8dc659fd4 refs/tags/jdk8u272-b01 ++^945ac6150487ea0dc64438fd9714d3b278cf3f67 ++8a8101e7b3f7ed5ba0e65d02f391f1f87aa1900c refs/tags/jdk8u272-b02 ++^39a061e5a891b2f96e9b5a971558d337f482eda4 ++5dddb78f05ff13f8dafa7b740bc54d69b899e4ea refs/tags/jdk8u272-b03 ++^05d0affe3220fb5532f8e6104197de22fcee245c ++5f5a659e0c8a9aa76acb91a6d6e62194ebb3394d refs/tags/jdk8u272-b04 ++^e3353a812c008c9de195081f9ce5123c8f77b826 ++866844b74131f7561d3e5918e8b08c303ae20cc3 refs/tags/jdk8u272-b05 ++^3ce119215393879fc1521a04ff1fee9f29b8a45d ++1c82c540e96cf1a258c6e162c4680ec827876bfb refs/tags/jdk8u272-b06 ++^dc608173d609a88bb6c3e3b847ea5e4ba4361e1f ++1cea2f7ffc2536cee8e9253822b21819e9f1125a refs/tags/jdk8u272-b07 ++^41c8d8ce644cb65c1eb1f4d22508fe34faba371d ++da8b7357b2eca89e49573288dc57e651a9bd1192 refs/tags/jdk8u272-b08 ++^b3171c0084eb5b95a9770db0069c96c036204bef ++29e7942af24cda7df29ad69a60339e932c27d165 refs/tags/jdk8u272-b09 ++^516c97b48d31c07f96cf573fe70b00e9abe6c48c ++ae79f3fcd290727b036e244b04258ff22d8207f7 refs/tags/jdk8u272-b10 ++^c3b5603e949d6272d777ef57952833672a97b4e3 ++bced520b333e78c7d2093a57523c69e42dfaaf4b refs/tags/jdk8u272-ga ++^a858b0625c1cf24b9ebd252def41b9ed05c21bf4 ++ef0d5df15540da7733e6aaefe365acc66d1f61bc refs/tags/jdk8u275-b00 ++^ea7517c7c2ef8930dadab2430be7c1b6a141fb34 ++2074aed98ffe5b73efb5f0404c1bc386bb5c2a29 refs/tags/jdk8u275-b01 ++^79def1cc31c68622349fbe2564c4c43838cd3a4b ++b8c22ae3ece7919c02d2c470a4b1940cc166af3f refs/tags/jdk8u275-ga ++^6f9564ff411ce27c4eeb356cf946b7e78c85d79e ++0f01544a648d6c780b791e84c0fb8736e1beed7a refs/tags/jdk8u282-b00 ++^bbe02db67f5b0386ad13c43daf29d2e1b25c001f ++22d27751cb6039d7884f51832cb4b29a2cb1d9ba refs/tags/jdk8u282-b01 ++^e5c2f20803bde0651dbb31fb986652f7a4b824ff ++78f84039d8081e9aa79747bade4f3d2e335057c9 refs/tags/jdk8u282-b02 ++^ebde40673572659a108e303137bd0481dfb16e56 ++73123c4f1bb2abfb2fbf0453b2c95d5022f7f014 refs/tags/jdk8u282-b03 ++^19a2b06d53976eaab432ed065edf74ab633f44c0 ++ec8ee0c2eb972487e21fcac7cdfe77375ffafb84 refs/tags/jdk8u282-b04 ++^75f53d1658fc1d97c55ce580db297858c7acae18 ++c47d09e8e25a7a567462de797e3ed6e9611eba05 refs/tags/jdk8u282-b05 ++^389b3600d421636e02795b2ee872cf007f8e14fe ++4a46570e852a7d231e5e2753a284e2d48e9f3988 refs/tags/jdk8u282-b06 ++^ddde0e6c76814abd2128c1fc3d81a1313a487961 ++fe554d2a3e0ae7ee4cd47e99a5fe4f00dbaaf7d2 refs/tags/jdk8u282-b07 ++^9a528e5f97783ef965194cd8741a6cef1d9d93f2 ++05bdf73e6b9d7e674bf297ddee478619eef83309 refs/tags/jdk8u282-b08 ++^92095097830a1d09f375e97a6ebc958accaf8642 ++e530d0bc27d13da545e650c47a499497a37c14e6 refs/tags/jdk8u282-ga ++^8695f2b8bf9b99aad161b481b6745da2c38da10f ++cb5457c13421e300d2d2f25047feaef1692d09f9 refs/tags/jdk8u292-b00 ++^1f72175e7af48b9b5ff2ceedd7f5afc788dab73d ++83ebcc4e5f0ff2255e47492f568bb1da37a4fae5 refs/tags/jdk8u292-b01 ++^e450aaca024746ea99259ea4cd8c8f21f23e64d9 ++f231535a2936a728e3beb064cddb41b82f3edc8b refs/tags/jdk8u292-b02 ++^7b9fbd8d9f63622192b6ad05fa33fa4232ee7c5c ++f2ff5cd2889bf656a9c80192fa7f8a2145e39171 refs/tags/jdk8u292-b03 ++^356a60032656581a78ac0a871bc5a7da40c98844 ++ab9e46b6115db97f4a4cba2619548f35f6fc2a63 refs/tags/jdk8u292-b04 ++^60d6360627506f6436db9fe208917cbd25746a7f ++b577c200bccf535790389c0777a2a4795d0f1943 refs/tags/jdk8u292-b05 ++^ce694ce66cabb27e503a768c95efaab4066faeab ++598bc78fd2d5713189b6555c472737e2134ef61c refs/tags/jdk8u292-b06 ++^7765162af463e36700be151b0e43543e22a43cbc ++6d3076173e612f466afa65aacc7661ff81a5ec71 refs/tags/jdk8u292-b07 ++^ad3bc5e9e45aaeed9ab10a7149170909d5d1edde ++b46cb606e1985b484371aee4f70c9a7adc6a421a refs/tags/jdk8u292-b08 ++^673fe79797ab1a6edbed439b49588b9b10635074 ++78aa17c62f3c9cc5ceae07c8ae0885fd4ef39c1f refs/tags/jdk8u292-b09 ++^fe82b748e5cd153ff5a4749bf448192af79eef8b ++92eba4021e5408434021b91a0ad440cf08f7e9cd refs/tags/jdk8u292-b10 ++^313f52bfc54cc300b6320effbf62b5bd7f892b8e ++187057655d872588542dd8b662f5359c601410c3 refs/tags/jdk8u292-ga ++^1bef15567a895b6d33b64c11cfa5c2507b9de6b5 ++f08ff10be74e7feb62b4963c1d8e9900d6a45737 refs/tags/jdk8u302-b00 ++^608907d6435d68bb94ff40bd3e2bed76d0800328 ++355f22b7534fbbe99f0cc3eca939e726f55b38d3 refs/tags/jdk8u302-b01 ++^4a03c02299ef287a4d1c412e20603b6d11a9e7f7 ++8e588dfa28ad2880decc6728e550d122e7ab4ed9 refs/tags/jdk8u302-b02 ++^d32e0f28b662528ac40bda3728890c53f0053508 ++1dc3b980640e067ade4e87cb12dbbd781066d401 refs/tags/jdk8u302-b03 ++^f1da8c81c5c447360a31e0227a7f9736222eccc3 ++8864658db282dd8cd4229db8d9cb46e0f0c51c3d refs/tags/jdk8u302-b04 ++^f7bf745b1fc67bacd6d0684007ba44e577e58a63 ++4511f0a73d697f77ae824c3fad87fa7683c478c2 refs/tags/jdk8u302-b05 ++^9cd6828113316c7021d1cbb40581ff44962d455d ++c31683a4c7994e197a695145c8e63512edf0f647 refs/tags/jdk8u302-b06 ++^774a5904ea1b4bd9b2d5441594cfe418f968de68 ++c96b75660be9eb38ac47f8ef8f586507a879338d refs/tags/jdk8u302-b07 ++^3694559a821a0fb0a459ff54181066e9b6610c49 ++adc82d978a025b3ceabd4f30504ea45eb0cfc206 refs/tags/jdk8u302-b08 ++^f93a0c31ef87d95b528b9fb646db8af791d8f80a ++241cee9e4eac8eb91e09cf39594b116fc3982bf4 refs/tags/jdk8u302-ga ++^19be6113dd0f658f950583b751284961d8ce0458 ++d32834ffb34bdb7453bbe3b97e0d4ec7e4b62240 refs/tags/jdk8u31-b00 ++^f1b39a166e862bf380d4af0980ac57d4d4a8ada4 ++f23a243ce70e21e5be5309deb3bc744275f4b68a refs/tags/jdk8u31-b01 ++^991e02ce3f4dd57956f22a377c074804ca9de47e ++74bad2ae60f199a9dcface2f1ca5c19708dacf08 refs/tags/jdk8u31-b02 ++^3f5892ebc939d54d01747a7a7038fdc8af2ea957 ++0065c0c75565c1b9b00a9fb06780fd74a82c7d96 refs/tags/jdk8u31-b03 ++^eedf9c61b58843c65d3ffb9fce6c3caa51dda2ae ++4cbfd8c596587cba2fa8584eb81fbaf60a601c6c refs/tags/jdk8u31-b04 ++^b5cf7f73abd42f4081674e5bee7c7ed5951ab6c8 ++52e21d5b8ff3f15465187503714eef8edc25f5f0 refs/tags/jdk8u31-b05 ++^221ab3b31b27f6fc830c6d8297cbf3f9eec634ad ++79c38852864034a2f21319ae6dda564e2b1464a9 refs/tags/jdk8u31-b06 ++^7c410231d8562266f13ff2ef24a3142b400550a0 ++fbd6e7b5805d91d094e966460d9ebeb3aa27ead5 refs/tags/jdk8u31-b07 ++^2c7b0d9cf408cd9242e7237f900a0c299dc34057 ++645e57fd83d43de7f072cbb001abf4481a0c93f2 refs/tags/jdk8u31-b08 ++^497440842403c05b9162d84f689ce684ada818b6 ++8bf66aaab3b298ce8f91b6ebe89b6c8f3e719c66 refs/tags/jdk8u31-b09 ++^06a7f2fca7401fd1899361d0ab6ada18a0caebde ++155330ff6a05fecdb8b47f35853307eb6aa517c7 refs/tags/jdk8u31-b10 ++^0e1117bcb60214ec7401155ba6555b28c1589175 ++da6d8e7d3dfa26b30e59684a9e429ba2ae19fdf6 refs/tags/jdk8u31-b11 ++^490491b560df04a52a33b1ed62c100285172fda4 ++8b5e7ff8571f9c5235f8e8f2d759567132f7ee4e refs/tags/jdk8u31-b12 ++^bb3639bae89ef22510eb9844b1d0ab72b4dfc775 ++d6cd226d226c1facb4285204f5af01976df346e6 refs/tags/jdk8u31-b13 ++^32470f18cb748d8505f5020e9028f9337fd600d1 ++991239e00165c695e04f565452cf942cd3325a4f refs/tags/jdk8u31-b14 ++^50527983aa4b99a96127919c6970974b7a8590a5 ++e0b29f9c768f2644d1e4f0333b5c72322092cfde refs/tags/jdk8u31-b31 ++^bc4e961357d26dd9d72c832c9a2bc46d1cc046ce ++452da28b28c79620ad03f660baac64f1b2b67054 refs/tags/jdk8u31-b32 ++^41ff38368dab724114c4b9d5dfe4bc94ecd3a554 ++54a66a97c4dd16c45759556f8286824864e3aeb9 refs/tags/jdk8u31-b33 ++^35597793b2285e4b5ab7b99a464698bb747d2ff8 ++d8dce98cde52c422e35103c058c20afc5fdb7deb refs/tags/jdk8u31-b34 ++^129242ad061aa948970f88aed6a7d5917da5ca85 ++4f11aaaea119758bcafda190bcaf70ecd79d9af3 refs/tags/jdk8u312-b00 ++^7694663d58434196368193c66225af10e60e1329 ++96aa55d9862e7a83cc8063cefd5a0c9937c5e6b4 refs/tags/jdk8u312-b01 ++^2196b16e108278f0c922a6d87dcbe7c3ac7b29d0 ++d344ff2a9b659d6181014dce4c3139fb374c48d6 refs/tags/jdk8u312-b02 ++^362a63624697840193e66d23ba445b6b9847c3c9 ++67ce1491009b81ad111e8c5458d19c7ddbf00020 refs/tags/jdk8u312-b03 ++^297ddee3d744723ab0bc887e40db1bb399caa2b2 ++c5274652af0e14c5ec67040f0337862058b6894d refs/tags/jdk8u312-b04 ++^54e8cca4486df0b6f7cb3c905b335fa1cf36f800 ++f162d9be7332580fd9a312b358854eafadad6230 refs/tags/jdk8u312-b05 ++^edfd908c6b05f144fda734795b631de324bdc4db ++54b507680aa4d5c054345a205629fc2e9d671aae refs/tags/jdk8u312-b06 ++^34fc1cdb42ddee96fcbd5541b6116d71131254e1 ++7087d23b198e8fbc29cf44f6620350bbbbe68098 refs/tags/jdk8u312-b07 ++^7e615507aa5da147a4092ca3a79c6bb26ff8cd79 ++22e48da85be73961dcaa16609cf9049c3086d519 refs/tags/jdk8u312-ga ++^c45a404d62e4f541a3b3ea54c53c2a2d42ba05aa ++203b14dbd4b9a21defd7ac176af9f2f19c5cdb5b refs/tags/jdk8u322-b00 ++^41667c086f755ebbb5dde6ae2609e6d550b5d71f ++a5d57b1db7db38cef071224312234b38aef77ff0 refs/tags/jdk8u322-b01 ++^403c3f10d694a1cac04b74f8c4e759d69de41c78 ++eaa2094b39e601aab56c740038e984f957f712ad refs/tags/jdk8u322-b02 ++^6c763326714c9055239123daa344ce63b9e4696d ++64fc9b7e23a231b4eda3cceb16129d16722162bd refs/tags/jdk8u322-b03 ++^f0c44d2376074dbd7a46e18458fd80e7a8564e3b ++7a7a663cef628fcea505a94b756cdde0ad10b9c5 refs/tags/jdk8u322-b04 ++^e1f6c13a808e38a5e2a90724b055b6a8799d921b ++ad6d54c32725e5acaa9722bdb2ddc8d9353f0e79 refs/tags/jdk8u322-b05 ++^455c28338a39277d3d20663db8530de98fa5e74e ++9f4d5c6d85852352aedb2ea7f3c34ddea3234da4 refs/tags/jdk8u322-b06 ++^236b800cf26b8e138c925334371681e6b0a099ab ++5af3ee01de380375cd9d80c32375b066ebca31bb refs/tags/jdk8u322-ga ++^236b800cf26b8e138c925334371681e6b0a099ab ++9ee8dad7e525b7f7ae615f552c64225476c7973a refs/tags/jdk8u332-b00 ++^7d3c0bede34930cadd76644e58bf56f2a83c3d01 ++c0ffc626eb102a09cdf1646c287e3f2366f7c946 refs/tags/jdk8u332-b01 ++^ab90ea6dfeacebe04a5e94177c7e70da8f0f2fc1 ++8afe753368caf4931ebc3f05efde3c7cca40a83b refs/tags/jdk8u332-b02 ++^c84adc4e7624f263cd06e2df19286bbc4ed82d41 ++3db287b6c6c829a8f0b136c249d7b953809d512a refs/tags/jdk8u332-b03 ++^12528bb4d331ed2ec9630db0ee3f2bfeea44b632 ++1a038e3052aa1f8c88929370f06bfc4bb9d272aa refs/tags/jdk8u332-b04 ++^7f3e86c82c6c25cb2926f178029481d1ec62f0c4 ++f4777e85ba866b7206a9e4bc1076d791ca3960ff refs/tags/jdk8u332-b05 ++^3a7c38225b61ae3c21fde674921d32cdf6e19b92 ++db705c9c74d0fdccfd32fe6c4b1e8366d2c0b58d refs/tags/jdk8u332-b06 ++^9a303aef21f8db21cf6acc9dc91b6ca33819eb01 ++121e65f52ef9fe08d48fcb58248619c4654afff6 refs/tags/jdk8u332-b07 ++^6a6f2c2fe2ee99d1ec7918fe5fad2f5c0d61d599 ++e09949f4fa54a748db67f797df64715f2b3f0a6c refs/tags/jdk8u332-b08 ++^d0b8929739120d9f8850a1dffbb5d891acdcd70e ++edf3e8a495f205c55e5d67086815b751ba64a5b1 refs/tags/jdk8u332-b09 ++^3d2fe9bbb4c5f704d08982a3b1c4b424a9dd1d37 ++723994bc9fb7670307a9c77a771e0cde12ea2050 refs/tags/jdk8u332-ga ++^3d2fe9bbb4c5f704d08982a3b1c4b424a9dd1d37 ++b48371296ecfa01968a7925dd72a47b4ec58bc8c refs/tags/jdk8u342-b00 ++^3a7c38225b61ae3c21fde674921d32cdf6e19b92 ++bb27a83c8960676069b75181a6f6c7e1908df482 refs/tags/jdk8u342-b01 ++^1bc3be259a1367d0b671ee0e8a85e314d7d05637 ++834b1798d07733c2a7f444e0d3746857a612793b refs/tags/jdk8u342-b02 ++^51f69d9125e72adaf05fbd04a5ac17a9d2f6c6a0 ++2be48aa4a087f7a99fbef587a2ed7606ad0a3947 refs/tags/jdk8u342-b03 ++^3e0eb096153e9b7f4c2ed367c9282d09307bbd6b ++f066e6d1df15ff1146fcb371b91bfb856f015ea9 refs/tags/jdk8u342-b04 ++^e3b9a06ab885289e943167935912b42ab53244e3 ++a407c6f1903a75c44cc871d7de3f6fb427152e5a refs/tags/jdk8u342-b05 ++^95962f141c996834e9f12cd8780a2f6e0c56d782 ++f477ba29202fc11d5ec9a86e77e4e451878148fe refs/tags/jdk8u342-b06 ++^a18e9043fa2a0a14098e1ec25d32577aaac6c023 ++74bd617685b8f49748fdd5d78e895ca6fd8807ce refs/tags/jdk8u342-b07 ++^3dca446d440e55cbb7dc3555392f4520ec9ff3bc ++e3e968c08a3752ee24f2f98a1abb0a95a081a8ed refs/tags/jdk8u342-ga ++^3dca446d440e55cbb7dc3555392f4520ec9ff3bc ++3d5311aba8f986d5222ac7cd9f2075fb13265565 refs/tags/jdk8u345-b00 ++^3dca446d440e55cbb7dc3555392f4520ec9ff3bc ++57ef655e97d39b222cd61cdf97414f20b1b74f5a refs/tags/jdk8u345-b01 ++^2dadc2bf312d5f947e0735d5ec13c285824db31d ++a3c010842eb804f644a69f6e58e300509307c698 refs/tags/jdk8u345-ga ++^2dadc2bf312d5f947e0735d5ec13c285824db31d ++529ec051c564f80022ba3fa5af089aff2c3d02ff refs/tags/jdk8u352-b00 ++^e3b9a06ab885289e943167935912b42ab53244e3 ++bc8790ca1b3e7200de2375b4c9de0cb3db6e31c9 refs/tags/jdk8u352-b01 ++^1460bfaf8a3be7c4772bfbb75b8a3a31560a2c2c ++d051aefcbb92565891bcac659b70659bc7666bb1 refs/tags/jdk8u352-b02 ++^b52eb70faf9f4e2646400c9565af67e6916c5ac9 ++eb0837d9eec56f3f7fb943fc758e08aad71a5581 refs/tags/jdk8u352-b03 ++^0869fc0153bde09c41ad37c3c46f715e23b41966 ++1e29fa093fde702198049a4b62f57690b8f24690 refs/tags/jdk8u352-b04 ++^4cc462abdd39f24e7f09b13f64af74bc2f1318d2 ++76ce52a4e747721f5a0eef783e53e9a9215a62ba refs/tags/jdk8u352-b05 ++^0d5ea9d29e97f1e4adcc1e1d36bc109fc5cee506 ++5e31f6b32705bc5a38a65a171a5f2f3328857c19 refs/tags/jdk8u352-b06 ++^e2cbfd5acaae9a98921dc29363bce3a037cacc43 ++39942862614584c6faad0670f2e97a824b806d76 refs/tags/jdk8u352-b07 ++^46da2c31efef73b7c2914c8d252554556145b00b ++48ffb522411ba7549b61dab70e43708bfaad2451 refs/tags/jdk8u352-b08 ++^dfa5f14848df6b4b41f839004d6baf37ca2f4f8d ++1c47ea85da32108cf66393d4c0d928889bbf0d70 refs/tags/jdk8u352-ga ++^dfa5f14848df6b4b41f839004d6baf37ca2f4f8d ++ff4fb8dd9fc3b19903f1c6b52cf761ee861eae43 refs/tags/jdk8u362-b00 ++^0d5ea9d29e97f1e4adcc1e1d36bc109fc5cee506 ++abbf0fc8ebd2f23a8bf00b5221b43dcb7bd6df46 refs/tags/jdk8u362-b01 ++^f04ad96cf53385c9f8aa071a4167ad7790cb8466 ++6f8c916bff7c9dd31671014bb103735369ebc3bc refs/tags/jdk8u362-b02 ++^ae6d30dba462ee53eb89999b7b24a72693737c03 ++6f7e49c1beea734b02c662f457b74794e1edccfd refs/tags/jdk8u362-b03 ++^2f509c7070b96e2de02b27d5f752e775980cb75c ++503f8d9004d9fa212a0e91ad52154623d0022e6d refs/tags/jdk8u362-b04 ++^91d8b89ab976a8c25484da23ded6cdbb86f7b1e5 ++e44119c10002a39b353663680764642802210d0c refs/tags/jdk8u362-b05 ++^41159a51b418405e634aa08deed07d47611af37f ++57be4bf8fa2f056c3bd94171fab5b432fbd487e8 refs/tags/jdk8u362-b06 ++^53620b38d3d46344b5d5e9deb017a234828ead28 ++d254e8ed45bcccaaf25aa9bb4779c742c9784b37 refs/tags/jdk8u362-b07 ++^ebc5e190ac7a3b0c143451105d9f9b7d9e780a4b ++54b841df894f4a5c03ca808b00767f29f2263ed4 refs/tags/jdk8u362-b08 ++^056d5a79994cbaa67f901f3d89f5c140d4aa6f11 ++1bfccc323a10fce6d96055ac81d1b1760b5abdb2 refs/tags/jdk8u362-b09 ++^36321be7840c8e044340b8c162a75dce3ae7698f ++46c7794c1b850134fdb31bc8502bcca79f80a036 refs/tags/jdk8u362-ga ++^36321be7840c8e044340b8c162a75dce3ae7698f ++f83ca770935da944cf6a5bd83b2191267e77c190 refs/tags/jdk8u372-b00 ++^41159a51b418405e634aa08deed07d47611af37f ++fd9cad5173e153dfed5f058d14c11831e5884fe8 refs/tags/jdk8u372-b01 ++^f14261896825ca5aa14099576a10183f4476508d ++05e63a8a978a8bab2fc4aa47151936d9bfcbd2de refs/tags/jdk8u372-b02 ++^784f71f218bdb0e885a03fae35105d2fc96d56ec ++0938cfd525d26e0e873d88966287b1abc46a4917 refs/tags/jdk8u372-b03 ++^11a96a7907a0e6692cbf056be6e7ad4dd83e3460 ++ada9c63e8a6fe34b0a367687e42ddb69e85285cd refs/tags/jdk8u372-b04 ++^b51619d24e0643aa0afdba87ac20b371dbb594e8 ++e8a99b48612b0fb76f707c37021fc79ca8aa8b0a refs/tags/jdk8u372-b05 ++^5806429f7ac582c400896a7ac61e147e93ddeac8 ++2ffb98b64302b7e3d92e3b4451ebdbbb5edec10f refs/tags/jdk8u372-b06 ++^89aeae16e85ddfbd581cb86d0b0480b1e2d50e99 ++80970992f23e9d13dbdae144a39282f216d235cc refs/tags/jdk8u372-b07 ++^338acde33061b8af9d30aa510d86ec8156009021 ++97f50fa376bda709459fed2c5863981860b1e775 refs/tags/jdk8u372-ga ++^338acde33061b8af9d30aa510d86ec8156009021 ++a3f18d300cb7467febd0d2b5d7cb18da486a3150 refs/tags/jdk8u382-b00 ++^b51619d24e0643aa0afdba87ac20b371dbb594e8 ++f385b11ea58e386733ae37f2318577e28ba45550 refs/tags/jdk8u382-b01 ++^3147b1bafe12326a97269655de46f066931f3ee4 ++3b73b29726f1ff952d7b45ac93c4a575314b96ff refs/tags/jdk8u382-b02 ++^3e69b49f71e1c00a4761a9e63440cb7af1216389 ++44b0cf654a6a2f75d8d7778180fe0c510bdc1007 refs/tags/jdk8u382-b03 ++^8efa32e54e23a0fb99cd7df697bec7e6c6baf8bf ++405cc7252f04885f9de83b04e8e825daa8a5e57b refs/tags/jdk8u382-b04 ++^0bec498fc945a49f1134e02eb477e0af15db3b5b ++6f36d1cbc563beb9e11013bfe913e07342a6ae99 refs/tags/jdk8u382-b05 ++^50e903b61fadc20fc297aff2cc2295c35be0edde ++9b7a962cdb8fbc4c14b05495a18ae6d59620a287 refs/tags/jdk8u382-ga ++^50e903b61fadc20fc297aff2cc2295c35be0edde ++b4f80e25c7bd0b3cbd5b6f485f3d223a9e93a34d refs/tags/jdk8u392-b00 ++^8efa32e54e23a0fb99cd7df697bec7e6c6baf8bf ++335b31ee65e157ced2c307df236e0013bf239270 refs/tags/jdk8u392-b01 ++^587090ddc17c073d56f4d3f52b61f6477d6322b0 ++1a96717ce8998d3c91aa158a5697ad4150349bb8 refs/tags/jdk8u392-b02 ++^5f62e559fb5040c5d0021de4c3ee49e6fca6b087 ++7b5bca1950d0d1e33e4c2e6d8fc9510523fe36f0 refs/tags/jdk8u392-b03 ++^3232400385ca3797247bfba20a54616cf893c50f ++840b21a8e484a012f8345560afd62c1cb98068ea refs/tags/jdk8u392-b04 ++^f92926955791c810469e412927f58f2ca634fe4b ++c375aa4e60c7cee31362dca70234a066fc2c6513 refs/tags/jdk8u392-b05 ++^0ee1400d7be107fec86a40900f21be0b6edfcdfc ++6229a74cb5f2017a07d058202a501fdb083357eb refs/tags/jdk8u392-b06 ++^05254efcaeac3b6149b9b11c46c861da8bfe2249 ++7046b3b8397f10c98025089030f59ec053210b43 refs/tags/jdk8u392-b07 ++^b24b03f9dbeb2c1e8b5cb0279659eee489bd9df4 ++35917c56c01b40982227ff1a6c7c7079783b77ec refs/tags/jdk8u392-b08 ++^9499e54ebbab17b0f5e48be27c0c7f90806a3c40 ++141fec38c60a11c8149da7722f2c48f7a88e246c refs/tags/jdk8u392-ga ++^9499e54ebbab17b0f5e48be27c0c7f90806a3c40 ++fcffc4ab73c1c111a3f24cadab3dfb6d5179233c refs/tags/jdk8u40-b00 ++^1a776ada7aea2e964055921db35f79895b04cbfc ++cd9fad32f01e920efd1e4b858efabe884179eda2 refs/tags/jdk8u40-b01 ++^d8543a1302cc4a9f5b009114267f4a707e88a05d ++b45df53c472f1fee6ff33b418b285bc4fd699f45 refs/tags/jdk8u40-b02 ++^337706a4ef2e46d54270c9dc138ed12e2632547a ++53c992301f48a6f385716c4e6b5e920e1fa41b0d refs/tags/jdk8u40-b03 ++^c7cdfec9c0bbb17c45f8047bdea3c984aae3eae9 ++184802fdf9c5858d2e26067caaefebca0c0d2254 refs/tags/jdk8u40-b04 ++^626b1a1e052faf216acb7e9b9c18115e1f726132 ++5e174daf3e4a70869db1b5459963e6223c0b3068 refs/tags/jdk8u40-b05 ++^ea65353c9ec40a030f930fac951323b614a03cd5 ++92128a53128c06c71581b688f8993cafeafcf143 refs/tags/jdk8u40-b06 ++^45bf7cc049ca081ffafe69c4ef74388d694ce7dc ++4ca333a686649b2150633f8cfc4c3ca88ce0184a refs/tags/jdk8u40-b07 ++^e6fa8e7bfddf9c4f6422035fc1450d6ee1130e67 ++f3a20a4707fbb76de74ac62492c3bb0dcd758508 refs/tags/jdk8u40-b08 ++^6394d991b4bf81990167886cb61d920caf794b23 ++7796083af8faccdd2f58d1b51c90ac2c9d5e1766 refs/tags/jdk8u40-b09 ++^6934aaa8f6d5f1e1a07d5ca08bd0674b50dc1a0e ++9f87962af78b07846b5954171042d7d69e243d19 refs/tags/jdk8u40-b10 ++^45dcb904c0159a415420481a84bde3660c51138e ++831e1773013ded65156225e73388c6c4b9923136 refs/tags/jdk8u40-b11 ++^95efa115b68e0a10210f0b30c9662a7cbd9d103c ++cec7e6a537a44c88859057d44636fb8c64e03c41 refs/tags/jdk8u40-b12 ++^cd1bb3ff21c4bdffe80ed5f6147da75564b01a39 ++669e1aed827101c9f3e34e2f35efbec35e0277f6 refs/tags/jdk8u40-b13 ++^a5748f0cb3033167eb0409e39f17c361ae0d7458 ++bd492d3030d4e254478b97b24935241b7238c77b refs/tags/jdk8u40-b14 ++^8afbdea459c5028a3ce6dcca8d7c41a76aa5c833 ++1d7d9a2a2b5cd05994359f3a6e758fd3bf7eff7a refs/tags/jdk8u40-b15 ++^3961520a2d34fe6b39ae9ded4945fc97060ac4ca ++13c3d3a3d403398b817577e6eea0042a6d63e81d refs/tags/jdk8u40-b16 ++^edea682f850f9f05e1cba029b5b5078bda32bbd8 ++02e5836a92d20fcc150b54e725e83828a1eb729e refs/tags/jdk8u40-b17 ++^05a45cca40e451cc88c93f1ad70dc27359ef877f ++b0bd043b7e980fbd6fccb22c1ecfa534daa54d54 refs/tags/jdk8u40-b18 ++^111ad6cdfa0b887e5de656f67aaacb1fc2d55427 ++9830d50c296436109a1ebaadc501e4edb79c308d refs/tags/jdk8u40-b19 ++^8f9088dac3ab96da87209012bf331b8117897293 ++f6cc146687618781f7e6a2f0759ce5b3c913e44c refs/tags/jdk8u40-b20 ++^f80fd77fdbeda15ff24e0d58deebe1e1b9844063 ++3ba12246ba0e8e250116d01701028a9298347bde refs/tags/jdk8u40-b21 ++^de62b551ddd6400b9dbd5e59b1f60f030b2857e9 ++34b451c3bb3ca4059616a425b744cd3231f27d40 refs/tags/jdk8u40-b22 ++^0d25ddb6cdb41934fc62b3edb97cba97fc9f15c0 ++2741684b850837d1a01aa53a040e913a48d2620f refs/tags/jdk8u40-b23 ++^836d3494aa93e95585da470e67e13b1301ea73b3 ++9905d2820716612f0001897149b6b0821983df1e refs/tags/jdk8u40-b24 ++^1de8c944d8983f7eb70a51f53c33cc0ec91afb53 ++1f6c120a54aa6b6273cbf3900881335e1a7896f5 refs/tags/jdk8u40-b25 ++^aff06ae7e34ab241752ee5af7776b8ef04e468e7 ++9649e26f9b14144e8f1a8da85cefc899eaf50d01 refs/tags/jdk8u40-b26 ++^e8433fb7b206b408e08f82832085b94de1457e57 ++2933fe98d238dd31b8f996031ba0bc81967c4765 refs/tags/jdk8u40-b27 ++^8f74cf49b6238ea639b773116805f3221f8fb423 ++fdf8a5a5df36998756550ed07f2453cccb3c4b90 refs/tags/jdk8u40-b31 ++^a5f761ff94fe4597a536b12694f890b4af2d0204 ++018424995399b3f1c5210917b17167cd60dbac10 refs/tags/jdk8u40-b32 ++^96935e77e3fe2cbdfcaf229fd725d30b3cc09228 ++f73e494f7c4f508481e0d7c95317422539207bb3 refs/tags/jdk8u40-b33 ++^800adbb5785ad5d4e9e51a5d7e325bfa994e0bfc ++04c5675f80cb9ea60e5fb01c88ad71ad0db7ed82 refs/tags/jdk8u402-b00 ++^f92926955791c810469e412927f58f2ca634fe4b ++2530bdf68df044f6c2b1c415007bdd4c2d5540b7 refs/tags/jdk8u402-b01 ++^8de481944094886b89b9bf1d83f725f9b680a3e1 ++9ab73462a2f3b9fbb1487bf08de1595b47ea399d refs/tags/jdk8u402-b02 ++^eace2d732133accd3be9e95a9e75aee0fc1938f8 ++5864702a5f99a3788872da983246b7b6d2acf4c7 refs/tags/jdk8u402-b03 ++^10a653e5c3c07a1c823b12d295c86dc91201661c ++0868253812ad74997c0bef0257114aab0d68f2e1 refs/tags/jdk8u402-b04 ++^b372b4b502cb07eb3477c8ba1fbc8393b1bd56ff ++9fa89364f344faec1a9cb85bca27471eb7045252 refs/tags/jdk8u402-b05 ++^9c9d6b267c41e4c713cacc41befb66007cdb2601 ++25f2e4df00fb790624d51f5b4ce460309bcf6b26 refs/tags/jdk8u402-b06 ++^d4b472ff937883b62f26a39c9ddf72f07f9dfff8 ++dfdcd16e779580ff66edccafd82deff2940dca8e refs/tags/jdk8u402-ga ++^d4b472ff937883b62f26a39c9ddf72f07f9dfff8 ++24010928f171fc985cc9b7179ed4f1cec2b5d5da refs/tags/jdk8u412-b00 ++^b372b4b502cb07eb3477c8ba1fbc8393b1bd56ff ++80faaad8f1bb20fbc2f9a5cd35182ddd6c14787a refs/tags/jdk8u412-b01 ++^552c6866d8ad71ef247b5303f78fa7a65506aea9 ++92b43ce8a81f569f5b642d9d7580bceaf2d1844c refs/tags/jdk8u412-b02 ++^4a69ad286bd13b2d0723df56d08c808cd968151c ++769ebaf5359f827fb900640f8e250ff38c4a8956 refs/tags/jdk8u412-b03 ++^cfed45d15a36cd92aec873f38c46cd085ee0b9b1 ++5027a3f9929e0e24fbc6f156b63ebda3892450dd refs/tags/jdk8u412-b04 ++^dce530003ca50a94e2305dfb10653304863f799a ++55c44fffbfc0b1b0e01fa282bcf46e15cc91e090 refs/tags/jdk8u412-b05 ++^824dbfbc98c12811dc30a7391ae51c0f71158d3e ++164b76cdcc0a9cc0ab693efa81d4e465698e490e refs/tags/jdk8u412-b06 ++^78c0afa3281b59d2f9cb8675a66b839bd4e7747b ++ba7832464ead091419f74337bc87579af47772d8 refs/tags/jdk8u412-b07 ++^04ccdbf84df493283a2755c996b3381500a79aa8 ++8e422b783acd7050166f7050679db3f7779e92ed refs/tags/jdk8u412-b08 ++^43cb87550865a93c559c9e8eaa59fcb071301bd3 ++7a6f5a89de453a00586098d9d720bb8b5f48b973 refs/tags/jdk8u412-ga ++^43cb87550865a93c559c9e8eaa59fcb071301bd3 ++568a417882d85894e97ded8a9410a451b6adc0a2 refs/tags/jdk8u422-b00 ++^824dbfbc98c12811dc30a7391ae51c0f71158d3e ++cbb5b2f46300506dd16b51ca80558322ee80b1de refs/tags/jdk8u422-b01 ++^6b53212ef78ad50f9eede829c5ff87cadcdb434b ++7d3cbaec6ca389326ebc31abe47f9f3e9c886118 refs/tags/jdk8u422-b02 ++^216b8cbf51f8f5f020a57dabb763c27b36899c12 ++3d5d3442361ca09394f6830572f8f1e3dfa16e27 refs/tags/jdk8u422-b03 ++^012f59c7f63fc44c1949d905203ea3f449087cd4 ++618917eb093243de2c5d7e83d4688bfe9ad04985 refs/tags/jdk8u432-b06 ++ca99d29166bf8e835aa597dd8884df57c2d43ed9 refs/tags/jdk8u45-b00 ++^78770b4e9602f9418b920af97d57b35f72e3eba9 ++106ec30824ae7ec9722900467b22917bc4cabf37 refs/tags/jdk8u45-b01 ++^4c9e289c1b32a34a00e883197e71cf772bb044a2 ++caef2eb7394148afd1cf35f9ccec16a50a44e4d2 refs/tags/jdk8u45-b02 ++^8ee219d3c214a77f7832139a5603d69aa7e5d05d ++5747120d826ab042c853747269afb0858f5ca5ca refs/tags/jdk8u45-b03 ++^71984b3c6ec31ebb639ba56129137ce71b5ff31d ++c5d41d2bd2b553abac2277aa3d2f81e37fffbf7a refs/tags/jdk8u45-b04 ++^8b3e1618a6b4335be86dc13f1dfca2f424875274 ++e03bbfa4f2393e2ed9d75333e731b14bd7167178 refs/tags/jdk8u45-b05 ++^289d7e5f71ad104f57caed47fe377fd44c1b02ab ++14a3a6904dfa4a8469ade2e9464a8c96dd2be453 refs/tags/jdk8u45-b06 ++^c3475a329e802cf65717d6c4f3af8ccd03e0575d ++66c7d34909a40ade6eba17def9c9c1976f4de4ef refs/tags/jdk8u45-b07 ++^215cacd5fa44cf5d8547340d1e0912b7e3c041ac ++50cc7ae41b6c52d9a7b19492a629b81f77043147 refs/tags/jdk8u45-b08 ++^c95a1518a5099254203d25c0e99abdaa041fed35 ++5cd65127f3b5901bdd1e148c6f9aea118edb75bd refs/tags/jdk8u45-b09 ++^0b3cb5ba1ebe545bf7f3d5450d436f685e9b12f5 ++e98204dc223911329b84d2bd695c2901bab61308 refs/tags/jdk8u45-b10 ++^81cd9e3c80c780fa20adb340a65f1aa946cb7d7c ++73fbf906b5354a7b2ecfda7f015f2124fcffbf2c refs/tags/jdk8u45-b11 ++^37133bd62868740fe759af6124d9c72205c78e6a ++59a820fd2c4ea61a66bc30c419f559af6db205cd refs/tags/jdk8u45-b12 ++^6790efa76bc1b93181258742ef37446106cbd8be ++0b8584b5eb46f7ae5f26668bd6ecd4ae6d18c70d refs/tags/jdk8u45-b13 ++^254026172f545d2af59af0951ccaff2286327a3d ++17b1d1ad021dd36e44e4ab3580797749ac75bba4 refs/tags/jdk8u45-b14 ++^3e364977add5b5029c3475200f8d53d4fc85082b ++c0ba6ebaa6fde338a9b8b7e88576b71f1e6cb5a8 refs/tags/jdk8u45-b15 ++^f125935dfc9b9571be370fdc60c2316fe243c9ea ++bff19dad014c6c75bbb5b9ceae8309d458f33f4b refs/tags/jdk8u45-b32 ++^9f0330e8b8b7ca74464cf5cfdd9734bd6c8b8c59 ++68cd3267cf19ec6887a96ac0b0ae9811961a507f refs/tags/jdk8u45-b33 ++^bbb5b79b1d876cf68380a1426227eeb0edd172dd ++c13712ff3275ae558ec6087e0a1102dda3fa1752 refs/tags/jdk8u45-b34 ++^788198547b2e3575dda301ee51f6ccfe094eaf21 ++271088c8100a9050c60fa72784ead71d9acdd28b refs/tags/jdk8u45-b35 ++^9dd32f1ca331f31cd021aebdfea72183beddcd05 ++2366ee1ff457ab0ac9efdcd68be9f81f7663fdf6 refs/tags/jdk8u45-b36 ++^445541a3d3f52fb67430cb5c9c6a74a776444964 ++258407fb1afbe9334dc1e408a26c2479ababf7b1 refs/tags/jdk8u45-b37 ++^3bfe4cb2a8b7e98c063a0ccf0d698300d4e5084e ++4f85c6b6e0e0fb5a47e73b3ada1f56a70bd7ec67 refs/tags/jdk8u5-b01 ++^9fb34e4c9bf7dbf85f9610d598c7fa9d65346c44 ++5b95783f4cff2f60fb2e1e0d9424d3827f1f9a74 refs/tags/jdk8u5-b02 ++^357d43b2d310c7232bb7b067eac7493ab7b74c72 ++0b8a31cf913f6d27df006776b0fd3aa9cb82736c refs/tags/jdk8u5-b03 ++^b56e9480ad6c42f0e3888206d41061fcd5c4669a ++51f3c2f91dd1f1f235314dbfb46b83a374bf27f9 refs/tags/jdk8u5-b04 ++^25a6e0ac9559586c67da03ab8c4d9444e928114a ++aae9bb07a72fd5d4f80b279b0deb9bab67ab9250 refs/tags/jdk8u5-b05 ++^3854df53cbc0d66853bb1aaa77fc98b2c0de3a26 ++7c59d39973c0cc109d5ae95e2d66675a98de038e refs/tags/jdk8u5-b06 ++^3bd3359f03ab50095349236be3131f6ad3ed5e1a ++f41711b6e3d095a659a43f9916c14ea5c5c379c1 refs/tags/jdk8u5-b07 ++^4747b10669697ca74464e71216c355bc6cdc8327 ++bfb36162d4c1e069f3ed86567810ba746bbb1685 refs/tags/jdk8u5-b08 ++^145e43a8a161175b89e29b569a9a239046f8772c ++b62ac0649ccfa13568ec5cff7937ecf0d258b8ae refs/tags/jdk8u5-b09 ++^827ba0e58255afbb9670274d7c705974f0faff5e ++b20fba51ffa3cab12d988f40a3fab9ec1452bbae refs/tags/jdk8u5-b10 ++^ea1d2ce47953a3a1d49f692c1b6f48af42605c3b ++e6add67fc31363f509d5b02e0ab8ea7b776eba5d refs/tags/jdk8u5-b11 ++^73d952d6db8f27235ed4106766d7a75a9c33dcb8 ++c569c8ef905ab4614894a4b2e004a5da8d65fc1c refs/tags/jdk8u5-b12 ++^163009c72b87cb788810358992adea8fe4270816 ++d9b3265b06866eea41e6d7c5e9ef4961678f0e36 refs/tags/jdk8u5-b13 ++^03dd55032d4b32f72dbe4b4d795971dd2a4528c5 ++581429d69ac9bed4188725102ad434e37e9534a9 refs/tags/jdk8u5-b31 ++^cf5e385e256a7667cda16f01fd6fbec79b7618e8 ++2cf2614e3b3a748a1fc481ceb0dec16165088089 refs/tags/jdk8u51-b00 ++^f06fa6a98398803390a80fe5753cac23f54403bb ++40516a8225743e4cba79373b8bd9ddfbd43f18b1 refs/tags/jdk8u51-b01 ++^100a3701a1e2f58fe964d5fca4544de4191c51e1 ++6947aa1d61a07890935d6e98a926444a26820163 refs/tags/jdk8u51-b02 ++^99c521cc3c7ee775dd025e07d0637eb3ba8663d1 ++67ae26ba5b0a86affc8190a9888cb7e2fd6db9af refs/tags/jdk8u51-b03 ++^8867a2fe421fdbaff02c8e086ebda5f6d9a1af04 ++36e4c16be92e9fe4f8d39f287c7eb244bd92e0e8 refs/tags/jdk8u51-b04 ++^ac7399527fb078ca77ca856d926a3dc207249d42 ++420cc4b6281fe0a2d28c8f08928a45844a360eb1 refs/tags/jdk8u51-b05 ++^0369a1bc6821e67767d424dbd6a7abc8cd2a29cc ++1c536ddafd2385b988e735d81fe3e6347338c5cc refs/tags/jdk8u51-b06 ++^c6454ca85f51cb6192d8d4e7dd56234acc7a3a1e ++350d8736eb60e0441103bf8a1c0b0238ab1e248d refs/tags/jdk8u51-b07 ++^e08739df230c8963dc77157e34ed6cbaa5932ce8 ++e74b866e15772a88147d160a48d226e523845363 refs/tags/jdk8u51-b08 ++^c579bfdb0bea0b66ada274b382ff386cba87b08a ++52e08aad57aaac1e9aefca218c7c5b630b686b31 refs/tags/jdk8u51-b09 ++^d98a452570b03c4f5facc274f87a05b840d150f4 ++6fadce492786a504ff0ae57db9906f678a8a28bf refs/tags/jdk8u51-b10 ++^5bb14dc3e541c4f9033f924d761d02d059ecc0c0 ++ba082fe752d256576739e4da9a4c4dceb5162c8f refs/tags/jdk8u51-b11 ++^06ce731f5c3986072cf2564eb4b610bb976ed957 ++67baaad05227731ed352969d59e49bc9fbcf8700 refs/tags/jdk8u51-b12 ++^5fd86563a2a4c295e2afa694a60ca9f51c1d901c ++3ef52d7904d8a305fe41145ae1d2e1edf84a348f refs/tags/jdk8u51-b13 ++^60595bdc5036bc6061ea00eecbfc4bb74485647c ++5924f8ce912b20f94fd0f38b8b0baf9f12c57bcc refs/tags/jdk8u51-b14 ++^333790c4e04c4d89ef8c0cd695d2060278c88c5e ++80d5b88a14252e9087879a4c287ce9650f6a8033 refs/tags/jdk8u51-b15 ++^2d90263f253a0a142a4c0cf01fc4d8f35f08ee6c ++e053a39cbfcb0094d6e5713aab4c30844bcbef7e refs/tags/jdk8u51-b16 ++^f81984aa9fbf955dc493ca502493030714e1f072 ++073755d599d1b62d92cde4d2d6c92601804fd5ed refs/tags/jdk8u51-b31 ++^6bab900af0e09f793a06aa16fa9339ec7b6bf411 ++c14ac595dad3540bd67fef0c57d656b72cfd1f21 refs/tags/jdk8u51-b32 ++^d3adb4efc9dfc261189e725ff1985e6fa8b2f86b ++2f2964906f1064bd65825b807e1abfa7b82acf49 refs/tags/jdk8u51-b33 ++^9547e823981999b5d9353f5fe157749e416e119a ++01a43949a861d5c47a687e0bac528e883897aa5d refs/tags/jdk8u51-b34 ++^b721ea83d781558bcff238ee625a9e7762844ac6 ++c3af440ccf265b84e2d4240ac5a3fcb43889698d refs/tags/jdk8u60-b00 ++^403e278cfc1ebd1751639c9b0e2393eed1b3ad0a ++9e66997a6138a97acc55e172f4d799088bcd3967 refs/tags/jdk8u60-b01 ++^989a047c86c613e844941d1b7c53db8b2a8bf875 ++a51c6204abe826f095aca2769077ed4e7a36d3e8 refs/tags/jdk8u60-b02 ++^43e2674e43872e5e6724d3a1ead5e5d23ff27c92 ++db6f508296d597327ac2c3f9c0dbe25043654e55 refs/tags/jdk8u60-b03 ++^9b7b44ebe4c6571ba40076270037aa93587ec8e6 ++dec4d9ee33c134e7dfc66c64dd14111419a08117 refs/tags/jdk8u60-b04 ++^86579e3e1844737ac6c1c7cf5e06a097bcd74c34 ++08c1c20a4f0793433c426bd3650445237f5799c8 refs/tags/jdk8u60-b05 ++^3b66c2bdc0520011e2b6608b12d9b3278ad44978 ++9483d20e129f69490f2de8bf4a4b5473eb3768d2 refs/tags/jdk8u60-b06 ++^59a06d84de8354a4702028c0b3ed7b202d7613b7 ++449d47497b0964a3927ad6fce3e78a3205bdb08d refs/tags/jdk8u60-b07 ++^c121ac3a7bd46e830146eb745d6f11974ece5b5a ++94f336122a37b87b3ede43243c15285f9e99cda7 refs/tags/jdk8u60-b08 ++^06a2be70125c143d4ce6f0feca4a61c03ee68f52 ++3c4e16ad0593f651585795821a8be2ebcd5ce8a8 refs/tags/jdk8u60-b09 ++^5aa9b8227df3ce5190b8159f127f48e14691b974 ++b84cdc2f202bc15bc1202b870f4401b0812d83e8 refs/tags/jdk8u60-b10 ++^874241a3906f78de0e18a1dabdb7a9da25b56a1b ++cbcbab6f38774e434402000327416ba09c72d7a0 refs/tags/jdk8u60-b11 ++^2b398678a8d78ec84907b6645799d04d250201fc ++65146e0414876c1c699130c938a15c064e82624e refs/tags/jdk8u60-b12 ++^35b9ec9d2f9634cd24eeb091110febc80d6474d0 ++48c617b8404571a8ecbf513d6672ea670da9501c refs/tags/jdk8u60-b13 ++^75ce99ced3a7d9b8f26037d9867d0778486b4d88 ++e3b54690f87be3dd257155fa97af3f85ff420aff refs/tags/jdk8u60-b14 ++^1fb89f363e04d38bc41776bdcc277f3ddbc9c965 ++c8e14fd97746bebff970df8fd21a7f5de8b940ab refs/tags/jdk8u60-b15 ++^c48a449183b936ff577e9eead2e2da0fb93b18ad ++cf2d9aeccadc9d76213a31688aa5cd8ee4cc0a4d refs/tags/jdk8u60-b16 ++^f170598cfa969bca2da2fd39ec3e9cadf510ffa8 ++c6a480a91bc9d60d8a1c93b65ed2623be90caaf0 refs/tags/jdk8u60-b17 ++^c0bdb306787cb34112d6f0c1a78b8ddefcf27133 ++e24926a8ea7a77266bfdcb147186b62ec91e8b63 refs/tags/jdk8u60-b18 ++^4cb18c7c8c857bddbab1968e64a5d7612d0835e7 ++ceef061b0108fdcb2462e8f5589158c83e0df8a5 refs/tags/jdk8u60-b19 ++^79d16607b2803d981ab0df34b9902da4483d12fb ++ac02e553e961d29861eff50ac68d755e9b183176 refs/tags/jdk8u60-b20 ++^95ce8402e8f014c355c96c5035701fa68b25f14d ++507b3e893143c2fe657c3cf60d57022b3f53cccf refs/tags/jdk8u60-b21 ++^3c71bc0d217be60588cdcf189e31b02ee4a67fc9 ++c0bdb955bb1b0a68c94a2be085f4248903c7bf2a refs/tags/jdk8u60-b22 ++^1094ce087961d3c07bf3f21f03c0f803a4f9be5e ++68a15201d7e43a0d7c7ad444e30539fc7cfbe8ef refs/tags/jdk8u60-b23 ++^41b6ff63881b2d84748e81ad50fb1bb86f076737 ++55fa48addf885cf2f95c0b13ea0ae24b3b3bf40e refs/tags/jdk8u60-b24 ++^07ff93b627cdcf9e722d9b76f66d4c912f3500e1 ++da0a45b8ef2061153fe4461be5c9335cd46bb350 refs/tags/jdk8u60-b25 ++^49c0ce835f19cbbdaff3d661cd60d6573e8c8735 ++d4f8f67acf53a30b9deed3c2660efbeb8661e4ae refs/tags/jdk8u60-b26 ++^083bc5eee6e33cf04aeba64729bbc21b1e480a95 ++03a05c9f63f772e1538aedb606d3c88ac41fbe49 refs/tags/jdk8u60-b27 ++^9cfd6ab7f6eba5dde80a7276fcda9ce8b2953f41 ++57ad880dcd98691ea330d97fbe884ececf32b76e refs/tags/jdk8u60-b31 ++^466771e5acdd8cf365ed8f9f5a185634bf1817e9 ++b3aa3f9e15b4db0ad7c3412a2a5ffa4ab27e9fb2 refs/tags/jdk8u60-b32 ++^10dcd9fd4b14296bce9c442d948eeb5a2dcdc327 ++7c4df20599f47988431c1a33e5d7088ce95d2067 refs/tags/jdk8u65-b00 ++^9f114266d046c7d32fd52dfe454cb62eeff6c661 ++6b52161ea09c981b5b37531956c88290a4ed6b31 refs/tags/jdk8u65-b01 ++^f07ae05b5a15c701663f5382d90333c650a8f128 ++8d21c32f4b53cda5d75a5ac81963535dbca16def refs/tags/jdk8u65-b02 ++^e3fd91dbc6fba1b35b6cb99129ffb3f574c4820d ++baf733172adc13f626b198b0f24ce62a6eca651b refs/tags/jdk8u65-b03 ++^d3af1daf0adfd7c396f9db081cb6b706947d040d ++09feb2750a0b1c70a6ec3d7f27b3befbf30133cc refs/tags/jdk8u65-b04 ++^27943f94d9aad0ea62d8c0293bcecc8ac8d687d4 ++6a85bda01ebac6fe03dcb7d7d12b1890874398c9 refs/tags/jdk8u65-b05 ++^822d6bb9f6ba70c2b9fc8778cee9629ac8373fe2 ++ec4e87b1221fd0d7e82407b911ff411b7a778897 refs/tags/jdk8u65-b06 ++^7904e52ef76f34bbbcfeb96d9d59d2da65ce26e7 ++104f553f80664ee28c2120eec1304aea2334a16f refs/tags/jdk8u65-b07 ++^19a39e4dc922f46035f418590cd1a2669a1ccbdc ++688cc9575c3cf5350edfdc1b3e2f72fc1b18fe50 refs/tags/jdk8u65-b08 ++^60d30363f4e2785c85db66fad76d784fe04654dd ++2af091c893033af62a87a576b47c416d1f1a093d refs/tags/jdk8u65-b09 ++^4f2527d2a9f7c8071f88af839c38de966b286e12 ++65b8d34cdd9b9d0a975f873e9711f4a9ae6a5812 refs/tags/jdk8u65-b10 ++^00c2b595f674656e80b8cf6208db4e880b02f436 ++122bab5675a1655aff70468197a2406b0b4e939b refs/tags/jdk8u65-b11 ++^9670be874b0c77957429d4a87cd8ea02485d3806 ++be0ede9a1d4589bb03b723c06cf598b2d318b78a refs/tags/jdk8u65-b12 ++^7c032603d8631a949b7fc8dd9f55d1ac77bf6a5b ++d9f44f05c0ba1fc4d5ddae81fd1381b5d1d74c1a refs/tags/jdk8u65-b13 ++^1c343d275f3b51d7e6e1d1fbee573a11b4618ba1 ++173b559807899f4f98e5c3dd7ffff0644b270def refs/tags/jdk8u65-b14 ++^42a3826a45dfacafab9b592a475583c8766d69a8 ++993f3194cb17b93bea8b2c79a2f80fe94382f5e3 refs/tags/jdk8u65-b15 ++^8e66f123f5f9eed2def3c65e315f17f952eebda0 ++e4ce4759b39946129e17be12793bda61cbee5f02 refs/tags/jdk8u65-b16 ++^a8955e6993efe3a2aa8f2c7dd9ded05ff151f3c0 ++5e92a119d7158f59124436e37fb7638c5b89f779 refs/tags/jdk8u65-b17 ++^1679350eb4f6cc9ecdd96819c7c79732e1594d69 ++4c77449473e97b7a85ddb73a274efefc015e76a8 refs/tags/jdk8u66-b00 ++^7110af3451951abd35a40238608c4e060a1e49f8 ++521eb760d91c987e8e48b3d7f577bd33cfad3670 refs/tags/jdk8u66-b01 ++^e7b9d871dc4f15601610b8bfe398fdaa4069f3e9 ++e840e29ad69effe00bceebef498426910e77f59a refs/tags/jdk8u66-b02 ++^b7410104429e5c09ba9f5a24db83f98c2521c2f7 ++cb1e8d8d06f6e21e2a2d6c18c7e52c8e7a0d59c6 refs/tags/jdk8u66-b07 ++^de790879b27fcff8c74313164712ba0f253e8397 ++a155a5a084132149e38f7028cc1dc4f0cb938153 refs/tags/jdk8u66-b08 ++^ddc49f24c9dc7cbe1da02238f0d6fcb247345fc1 ++4d4e1ac26526de341789f4661f9772092ffc0ef3 refs/tags/jdk8u66-b09 ++^3c1985a164ea8128213a524dd0e72d4dd03f96e4 ++bf8c94f237c7459fd7e098a695b4be6d365e10b4 refs/tags/jdk8u66-b10 ++^af2c4cf93bf2e0f50739e6eef26828d5e48ed9a9 ++e2d6e536a896e910abd907c23331de6fdea5b6f6 refs/tags/jdk8u66-b11 ++^135dc68eda4fa79ff264a5eb20e26c184239cc0e ++ccfc9b6e2fc59d58f23bb6f85e89e7cf9d6169dc refs/tags/jdk8u66-b12 ++^2d697fc7b0b1373ded6ef6b30f19a7649c104604 ++bca5fc341e2905b0361a7d65d79ce05292548643 refs/tags/jdk8u66-b13 ++^13349c12fd4d13e7dbece59cd2ea3a91410115fb ++a812b03bbe251c0c360e041a9a2c174e1d484491 refs/tags/jdk8u66-b14 ++^a16b0b86dd96df9c977f339a2b1168784ecd3716 ++f8a5812f72d95a7bdd0b6f7993b5091bd4d84753 refs/tags/jdk8u66-b15 ++^25cc1e18fcbe070f63958a494dd52d2cce7ab952 ++fdf2af2fcf1e0c2137f51e81a547981a7de4d00c refs/tags/jdk8u66-b16 ++^281d2ba98e1a5e382bbf8d7ac92b00505fb0d6e7 ++49f1ab5a18d172ea564b2abb4302884536db8613 refs/tags/jdk8u66-b17 ++^a093cc64651349883009b595bb96aa38e06088dc ++9a892b3a26875c6041d683aab75de0d55a793900 refs/tags/jdk8u66-b18 ++^14db0adebbb4dc367228e0a5d48fd64ebc68dbe8 ++8f692b399e2f1403348397fb13aa29b94b02b798 refs/tags/jdk8u66-b31 ++^e42b6903f681f6a4f8c8c607df29da73d1821cb9 ++c556cb187dc5a0c70a0768d66ec231f46b6c3d07 refs/tags/jdk8u66-b32 ++^15cba1f4f83a69c6c98c8f75a1ef99afc76b9682 ++d832c36a22442a432230f2b20e1562269c41dbc1 refs/tags/jdk8u66-b33 ++^e5094090f79dbc98f3dd8589acb899623ab60967 ++1b1c44d6032798934a2e7487db0d6451a72c2799 refs/tags/jdk8u66-b34 ++^57f652a2050015bec271ec22fae9af361a16e5f2 ++8393915ddce05e8e78038dc47246be0bc68bcf9d refs/tags/jdk8u66-b35 ++^eb9c1eef60d9af857169fca6ed63472f1b6d9eae ++914e11f15d18b772fc8e1e1e7a1d46887bee8240 refs/tags/jdk8u66-b36 ++^5b72ae0630abac21600e7be7a4dc9aa4cbfa563b ++89675a8586b5cc92fd49ac782d5d66478ff70f34 refs/tags/jdk8u71-b00 ++^3f754a70ad31c73b59232e6777b73ff6225bb40b ++1936a36fcf76776cc5421ad5733a7f32a30c90e1 refs/tags/jdk8u71-b01 ++^cf56ca3599d077f30516c4100cfd0e74697ba910 ++2e9124d7cb3a113b3a970434e49f3330fcc14e36 refs/tags/jdk8u71-b02 ++^755ab60f5ae558b59a3fa83d6b2e1a3e2ab6b80f ++304afebda7cbf20e940bdbf5ee3fb2a7c9f424c3 refs/tags/jdk8u71-b03 ++^b2a04fc995f3faed254518214ce2f5a9d15f7f31 ++370c1cce35a3dabe78a54fb15466ce2d1b9fb6ba refs/tags/jdk8u71-b04 ++^a000a031bcd9b4b196e55ca991cf486781e435c9 ++dc276f0a8ff17cf9e4c1437ba039e2f3986a2078 refs/tags/jdk8u71-b05 ++^c10240b5358188a947ad0f86568420fbce3d5330 ++4cd8d42ba2cde19eba96094e1bb6614d6bfab184 refs/tags/jdk8u71-b06 ++^74f9b4fb39192640f35d43eb5122fcf551ad2afa ++07b20ccec7aec1d559be9f290df114bcf6786b2b refs/tags/jdk8u71-b07 ++^1030bfe3c8daae215ba394f4039977e1928fb679 ++f9ef9f7dbd763dcfca847096faeac7ad0467c279 refs/tags/jdk8u71-b08 ++^45cdde58aeafb2c4ad960faefd6d735d8cf23aa1 ++69b859ebd31db5e409f9f1c2a6c6cf182cac7295 refs/tags/jdk8u71-b09 ++^ce756607695b5a4438539fca489c43054ba416c0 ++54edbca0222be51e351917c1fb3a26c0e48eba5b refs/tags/jdk8u71-b10 ++^4448f7f5e5488ee79c9624818208e5f31c16d7bf ++1964fd5f47c7ce1dc7556b6d1ca101aa5c96dc06 refs/tags/jdk8u71-b11 ++^4b832926542c0dbcd1a72686dbfe9d02a2522e0d ++c9ae7f602300b100b7e67cab1a583b52d43a2afc refs/tags/jdk8u71-b12 ++^d438343cdeeb58366ec95d9c6081cae4f83d25c2 ++54f0b896bfd893fe45184ee79b2896e6de4e4c48 refs/tags/jdk8u71-b13 ++^e9604a98aa1ba8521d994e8df566cb604c135309 ++a33dc6601c1df70ecf99770247d5b92ae21edd76 refs/tags/jdk8u71-b14 ++^63ef6f5afc18de268c780580eddbf1c7a5f5d03a ++1c11d43a5922738b4527d6fafd55e55fa3be341b refs/tags/jdk8u71-b15 ++^b96fbb248e957bd3c221c2812d7f6aec78dde059 ++c05debccdae49895c3c752ef7cc5171e99142b73 refs/tags/jdk8u72-b00 ++^c5354e36ce5f4ceca96d95da9ba5dc3ccc1416a7 ++220f0c6557ca4f64b9b5b5e88e8235f9db2b5699 refs/tags/jdk8u72-b01 ++^d09d4c158cb8b64d49dfe2076210cb61a6e8c5e3 ++768bee3e6758508c9e85ca87dc5d9fa537ed262c refs/tags/jdk8u72-b02 ++^2cd477d2c6986f72f2bda14910538b94bcc95155 ++00bc440e39be22ab959361ad92be4a4de41b7197 refs/tags/jdk8u72-b03 ++^52c7cfbc479bccf8a1b52f27162f53f66a2c21d3 ++36d73d3df545363cb5f87703ca61aa45e2b9ef1b refs/tags/jdk8u72-b04 ++^00012750f40ce83748c968989fcd8caaa59ff618 ++da791371af3c96c286e1a88a1c65652382425468 refs/tags/jdk8u72-b05 ++^7659faf576173d30ca6073f073f195ad0056779a ++ac790f038dcabc37c00bb55bfd8fe1c5b557ca49 refs/tags/jdk8u72-b06 ++^545ed377377e696f55fb9a25384fe99474f5a674 ++7301f19dc2bd4cbb16ad6818f36a3948d3281a02 refs/tags/jdk8u72-b07 ++^206c1ffd39cfed9bde25054e8095a84ac5f0e31d ++4e753a92d547229985790eef9e8d17a305ca3522 refs/tags/jdk8u72-b08 ++^68a78c821bca836eeec719ccf59ba7a421e4be0b ++d5daa4548c24b09db7d0efe6ab71e1863571686e refs/tags/jdk8u72-b09 ++^e9a82d19b0ee7ffeeb5926d2d818aeb60abff70d ++0e5af6b3d0e1f22439a87fe46d8d2a5916fa2985 refs/tags/jdk8u72-b10 ++^60250e13fa221e59cf86244dbbd7a42e1f1fe1fa ++41dc2eeaf336c6525b440c132069ef7cff4a710c refs/tags/jdk8u72-b11 ++^8ae183e321dcebe300c9798497eaea600c650a54 ++c52af21eae5459934679a28402ae01e46ab8b5a4 refs/tags/jdk8u72-b12 ++^74c28b2d165d1d097569398ceea3662c7b0c5ed4 ++252fefb5f8122aec28b8570062080e1d8a888abc refs/tags/jdk8u72-b13 ++^41caa4366d0324e5f69197c27aef201b8517b96b ++628f7a76a0481919cb6ff3316bd836a71fd9b030 refs/tags/jdk8u72-b14 ++^ca8aaa9b78cf8ade5399dc6210327175fe881c24 ++a1b1569be193f45ee1c9d30f696071861be86e66 refs/tags/jdk8u72-b15 ++^60ee0e8ebeb6cb3698e7f6686137c96706e8e5ef ++406ce6d093107264677d824100f8c68b51d6ab57 refs/tags/jdk8u72-b31 ++^9d3e155950fa47e0a23cd778c7384d7a16baa661 ++6f6f7515e8fe2bc48ddbc4fd3608789d55ee4603 refs/tags/jdk8u73-b00 ++^47b8ad7672d6373b631b563a5b9752ab82278f62 ++9b2fd232d097d0d0fbfc7cd55378670789e1d448 refs/tags/jdk8u73-b01 ++^0727d0a8359adef89e48b54c7286376215bc3d8c ++e510c2e79ae3a46f0a36660ac3bd92bed3abace3 refs/tags/jdk8u73-b02 ++^6a933adf823e7b5795f0fb53f09f7ddd4d4f2b24 ++6cf05b90fc4ce9b893d462d0da7c51162eb6eadc refs/tags/jdk8u74-b00 ++^e25af50c694a1493993b154c7d04d0691cf924fe ++67402abeb462fda550b1cd9963bfc548cb90ac79 refs/tags/jdk8u74-b01 ++^a12605bb3e7c3de58e6b84e48c92ca4adee82cc2 ++52a5a146fb042b96b8e34ed2cc4689c65eeee44a refs/tags/jdk8u74-b02 ++^a22126e16694d27e4376d92181525f6c52314a17 ++0abe8fdab15db10d18f9e06f4c92fa3468c0d8a6 refs/tags/jdk8u74-b31 ++^af6414d7c87ba57e0248bba7c7a60cc21c0232b6 ++39d5438077b925e3c7d04b147249117fdb8054e9 refs/tags/jdk8u74-b32 ++^b26e2cd93e3125817b96d6045ef50c1dda8f8c0e ++0804fedb36abee911493247899f428a447807379 refs/tags/jdk8u75-b00 ++^bcbe084ef4ef6d4416909242d9bb9fee02628097 ++cff18c41e9d914e89e967bc2ec2e4c8b18c0d5a7 refs/tags/jdk8u75-b01 ++^0a5681187c04a575127994e90244158db7a18961 ++28e79b194060ad86645c5c233b4950d7ab7d29d5 refs/tags/jdk8u75-b02 ++^e4793a68fbd1077883e0a4b98f285d24e642d939 ++74fd44b78af601a52b2c8391a055a84afb9f73ab refs/tags/jdk8u75-b03 ++^42c7a64f8944ab3145ff872eba2a85d9c9254feb ++a0eb7c5f0004396a9aecd4d623a91b497ab1f889 refs/tags/jdk8u75-b04 ++^58b8ae8dc157db6a41611f078a7edabf87f73aa1 ++2153f65be40aad5d74d073c1356bca9996a42c9b refs/tags/jdk8u75-b05 ++^51691e7d585edf20b4fcabf334bce509860ea05a ++d397819ef79ab9a11feb9712baa2cdd122cfbe33 refs/tags/jdk8u75-b06 ++^5258cd99690e33e1233a79e104190b88be6ffbb3 ++d1cf03a4bdcfaa898224faa1362305f79121a6d2 refs/tags/jdk8u75-b07 ++^7494a3d0d382ad673969b709f00a3c3e70b8e6f2 ++d45fb16c7a59e8038020c6550adfb779b791b4bd refs/tags/jdk8u75-b08 ++^c546eaa0f4b6cfeb3f15c254fc53b46d3547e83d ++fe6fe0501c81939154010583f4d6aaba350cd1f8 refs/tags/jdk8u75-b09 ++^f4fd6b99ae8ec9bd0db9c46966ff79bf7397c6e9 ++64a1c9ceac0fb6f0d51cfff6115fcdf952a00a67 refs/tags/jdk8u75-b10 ++^84b4374f82234d2e9c030e594687ee501efc30bf ++76511a19081e21eb18779d65c7202cfdd0ebab39 refs/tags/jdk8u75-b12 ++^5c7b344e4fe09fad656bd979bb3b573236a88346 ++984b7364d955f148f356ac259f2f8c39a54dceaf refs/tags/jdk8u76-b00 ++^6e52b9f5b6bf92386d6a441a6c6c7dc3f2c288c3 ++2e5af43eca016ce136cdfa51e7e2d4a6bb5747d9 refs/tags/jdk8u76-b01 ++^a6c9ab478a0e2e3fcbf8ec32daf63330031236b7 ++58b738fafd9fd059af4e1e8d560b2e2be278ec28 refs/tags/jdk8u76-b02 ++^b2f3f1cef3850831ef50eccddc4f92b423f6d723 ++210b8b4f0f98228284c6d2b9fabe44909b530acb refs/tags/jdk8u76-b03 ++^648285772d4738b27f2c789ba8700bc7776147b1 ++ed3208022363d64e20e0dfd32802666fb269796d refs/tags/jdk8u76-b04 ++^9fce821a7ca848715500056dfabda045ee653426 ++f5876784f018b18a9afb0453568c9e9334c8b991 refs/tags/jdk8u76-b05 ++^d28313b6e500f67b6b2c356198b3180f0f8a9bd9 ++264ffa71090f98c03ec572225d09763945d6070e refs/tags/jdk8u76-b06 ++^57ed0f22ae219f4af1e62a7b4a5f978d0776358a ++580c545344638c4822353ca99818dd36fc2f186c refs/tags/jdk8u76-b07 ++^9964d8082bb2999fced46d89bd57a43ccff4c995 ++29cd1dd0b0bfb647f4cb6ba7cbaa7b082ecc3963 refs/tags/jdk8u76-b08 ++^8d424a8aea4f5988e04724ff87a6bf170c791d08 ++30017ca46316dfb7ea8e9f88a5d36634778cfbb0 refs/tags/jdk8u76-b09 ++^69fe5447eff77dbe545428903195c4d94c77dda3 ++da33e8d5c2c01ad56f1e94417253dbd505c8ef4e refs/tags/jdk8u76-b10 ++^75a4dc8f11c5470fe7d2c3017454d33ac4f893c2 ++8197ee2a6c675a21a4ae73222ec015804eb0922f refs/tags/jdk8u76-b11 ++^57654d1cfcf2c111b3fa19b9ca54d9c9ab90658b ++49745d2aae21ab5510dbb80a0f87589562512112 refs/tags/jdk8u76-b12 ++^ce3705ed2bc244f9c6ee4c96294603ceeed6682e ++3dd3b1de4b49ad90b15019e77cd52c5c0c8e576d refs/tags/jdk8u77-b00 ++^d6d6bd78472f84b8eb2674890e2ee749a6281219 ++00e42b862471fba57d2d68bdb224bd8ce8497b44 refs/tags/jdk8u77-b01 ++^394a666ec7015b911222e9233daf40c43250d193 ++2fcdbef436ac34f45fdb71c0db8e2a9b077b5cbb refs/tags/jdk8u77-b02 ++^f61034484ae053647441a4006457a9125a567498 ++66050a0c9d76317a79ccee903d6e0472bf1d3cf7 refs/tags/jdk8u77-b03 ++^dfc1522644d6885023e4bf1d468704bb93417185 ++c6157a46e97b3472f4558ce391993f85066eebe0 refs/tags/jdk8u77-b31 ++^7c30c09b403533db6346bef3d9bf027a77ae4299 ++c3e067614914d40350fdce0d39909258a70b3911 refs/tags/jdk8u81-b00 ++^ee10e4d87f2e3980075e1239c578de1c080ee410 ++fce8dd0563104f2d0343e5bf791b13a6bbc1f8ce refs/tags/jdk8u82-b00 ++^99944566298cfbf7337c8a0d77ff3b5dd7badefd ++3debd4c96c2caab1dc926b6be66286f711986c9b refs/tags/jdk8u91-b00 ++^60786e4496e58eb6111a4ed03c32276e8fcb9898 ++3f3b1cd571861ad110fa5a5faf64fecd79b86562 refs/tags/jdk8u91-b13 ++^482e7510b1742c6d36ae654476547fb7c0251930 ++6fc2922d80eb4be880317a26ac4a588999f55839 refs/tags/jdk8u91-b14 ++^662410eebc2c976881c16ca328c6b8e1b67dc699 ++a7ec4af7dbbcb214cf5ed0a261c5864f89ae4195 refs/tags/jdk8u91-b15 ++^e23930755cd3b9f59daa0d69528aa48403ca2c8e ++76ad50fa94a391c9bf66e6d0d08a2f01df5656df refs/tags/jdk8u92-b00 ++^1c5646a8aa0f2dd7e685b14e623bc6ebaa37fd6c ++8540eb43071227ef9ce383194a6acef38c67e6e6 refs/tags/jdk8u92-b13 ++^9d1e72997879eb4c9e8a3dd92e622a1179054a5c ++11f8266e01de7178afe8948fee82a236f36b8cc6 refs/tags/jdk8u92-b14 ++^816e5ee44037dd5d0b5230febd4e0ae8682f3965 ++02ade1217e2ee2365404f7bf3efaac57913f3ad5 refs/tags/jdk8u92-b31 ++^e143fc14939e87e4fa0117b128c24a40ede42b93 ++48e5c1721b9830dddfae8273b151998bca772512 refs/tags/jdk8u92-b32 ++^a39712b1a96d52f48cdfbb7821e641f726b8e42c ++0fe7a2e2001ba12d30f0d197985d37d7d35894cb refs/tags/jdk8u92-b33 ++^c1245feabe5850d486f79752aff5af165d692b8d ++045961db9d4fb670c6b9e631e9bedec8668c8b79 refs/tags/jdk8u92-b34 ++^e7f89363a70beb53a7ccf20fc4dcdfd9a50d0471 ++1a84458c3f150e4930a760ea937e78bc98ef5a57 refs/tags/sw1.0.0 ++65acc3f52b8c0314f67ee9e9a79a41d9abd5b1d9 refs/tags/swjdk8u212-ga ++f3134fc702df66f4727f7ac663a442f5b52895f4 refs/tags/swjdk8u212-ga.2209.SP3 ++9989de841143f41e08ac439c1e3f0cac98825e3a refs/tags/swjdk8u212-sw1.0.4-SP.4 ++b43f96245fe8c7a57691013d4ff86ad0ca888b31 refs/tags/swjdk8u212-sw1.0.4-beta.2 ++cc8df02f462f79feccb964651fb6ca6da424b281 refs/tags/swjdk8u212-sw1.0.4-beta.3 ++fec972001cb53b1ac1aa6877d53457dfbcfae629 refs/tags/swjdk8u212-sw1.0.4-beta.4 ++7c4bff518758158ffa7e0e88a5d05d20ea139b05 refs/tags/swjdk8u212-sw1.1.0 ++986f1d1cb5679412bf2260671208f7ceb433865a refs/tags/swjdk8u212-sw1.1.0-beta.1 ++6a086e46d33563fdeee7a5e9e32388dd6b03f98d refs/tags/swjdk8u242-ga ++1f5db83815a6fa41eb4b1794cce04e440df2c1fc refs/tags/swjdk8u312-ga ++68b43348991289f1160bd0d668f6d8297837cfe6 refs/tags/swjdk8u312-ga-latest ++0770cba61d3e49a4003f07e571c48b488c34b091 refs/tags/swjdk8u312-sw1.0.0 ++ec3bbaa56aecab03485831b2dcae8748d1f63113 refs/tags/swjdk8u312-sw1.0.0-beta.1 ++af9a07248e473320ed3b4f48ca06f7ddd595822c refs/tags/swjdk8u312-sw1.0.0-beta.2 ++a2fd33b48361b9662f0b224fe872a863a008d3a6 refs/tags/swjdk8u312-sw1.1.0 ++00b3e89b4000936038dee1293c069f0755522d33 refs/tags/swjdk8u312-sw1.1.0-beta.1 ++02413e72b30f763a09a8b2fa378f0c59fcb56922 refs/tags/swjdk8u312-sw1.2.0 ++0467b27347cfb2de898960884bfff8ef90e652d8 refs/tags/swjdk8u312-sw1.2.0-beta.0 ++555b53cff0575eb4c50945291cf91b1046ed234b refs/tags/swjdk8u312-sw1.3.0 ++8110cd603838338a069cd66af7e060236d7943af refs/tags/swjdk8u312-sw1.3.0-beta0 ++31a30e7615ff9f467117808212407e978c81b608 refs/tags/swjdk8u312-sw1.3.1 ++b8e6bcc5938d141e6d51ab1993f3818e9af7d05f refs/tags/swjdk8u312-sw1.3.1-beta.0 ++7abfd71f4527a314765d91c88665f3a723fdbba1 refs/tags/swjdk8u322-ga ++6f93b177998739240c71c2528da6088b57415439 refs/tags/swjdk8u332-ga ++e31e221aea1459eab4eb1e83326c6b72ae72b24c refs/tags/swjdk8u342-ga ++2e6bf0be080a5635d1b679db753dedf9c55dd0e3 refs/tags/swjdk8u345-ga ++760078c1da6b92d05a7604a1bfc30f5ad234234a refs/tags/swjdk8u352-ga ++b0fd6ecd5acc1e1082886e2281d7f2a781758979 refs/tags/swjdk8u362-ga ++f949cc56deda7bffc1fd0635baf5cfbcf976bb2d refs/tags/swjdk8u372-ga ++f131d1d45e749fa72b84e1485c10924339008b76 refs/tags/swjdk8u382-ga ++66805169452af7bdbb4d2c25df1ef5b04b5442a4 refs/tags/swjdk8u392-ga ++a8a40b54cba3247d25067dc40f6757091a99c148 refs/tags/swjdk8u402-ga ++4894336dcdeef9204a103a3aee73d6546a64c945 refs/tags/swjdk8u412-ga ++7885087b505fd541ffc17457bc372b6006a9e34d refs/tags/swjdk8u422-b05 ++1f2f704db42e1752dd7ba8f39da7addfc6290e61 refs/tags/swjdk8u432-b06 ++1f2f704db42e1752dd7ba8f39da7addfc6290e61 refs/tags/swjdk8u432-ga ++f78ad529328e12867550b8ec7e535a181dbb4c28 refs/tags/swjdk8u442-b06 ++f78ad529328e12867550b8ec7e535a181dbb4c28 refs/tags/swjdk8u442-ga +diff -uNr openjdk/.git/refs/heads/sw-master afu8u/.git/refs/heads/sw-master +--- openjdk/.git/refs/heads/sw-master 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/.git/refs/heads/sw-master 2025-05-06 10:53:44.631633657 +0800 +@@ -0,0 +1 @@ ++68b43348991289f1160bd0d668f6d8297837cfe6 +diff -uNr openjdk/.git/refs/heads/tag-swjdk8u372-ga afu8u/.git/refs/heads/tag-swjdk8u372-ga +--- openjdk/.git/refs/heads/tag-swjdk8u372-ga 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/.git/refs/heads/tag-swjdk8u372-ga 2025-05-06 11:13:08.575672966 +0800 +@@ -0,0 +1 @@ ++f949cc56deda7bffc1fd0635baf5cfbcf976bb2d +diff -uNr openjdk/.git/refs/remotes/origin/HEAD afu8u/.git/refs/remotes/origin/HEAD +--- openjdk/.git/refs/remotes/origin/HEAD 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/.git/refs/remotes/origin/HEAD 2025-05-06 10:53:44.631633657 +0800 +@@ -0,0 +1 @@ ++ref: refs/remotes/origin/sw-master +diff -uNr openjdk/.gitattributes afu8u/.gitattributes +--- openjdk/.gitattributes 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/.gitattributes 2025-05-06 11:13:08.067672948 +0800 +@@ -0,0 +1 @@ ++* -text +diff -uNr openjdk/.github/workflows/freetype.vcxproj afu8u/.github/workflows/freetype.vcxproj +--- openjdk/.github/workflows/freetype.vcxproj 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/.github/workflows/freetype.vcxproj 2025-05-06 11:13:08.067672948 +0800 +@@ -0,0 +1,1733 @@ ++ ++ ++ ++ ++ Debug ++ Win32 ++ ++ ++ Debug ++ x64 ++ ++ ++ Debug Multithreaded ++ Win32 ++ ++ ++ Debug Multithreaded ++ x64 ++ ++ ++ Debug Singlethreaded ++ Win32 ++ ++ ++ Debug Singlethreaded ++ x64 ++ ++ ++ Release ++ Win32 ++ ++ ++ Release ++ x64 ++ ++ ++ Release Multithreaded ++ Win32 ++ ++ ++ Release Multithreaded ++ x64 ++ ++ ++ Release Singlethreaded ++ Win32 ++ ++ ++ Release Singlethreaded ++ x64 ++ ++ ++ ++ ++ ++ 4.0 ++ ++ ++ ++ v100 ++ ++ ++ ++ v120 ++ ++ ++ ++ v140 ++ ++ ++ ++ v141 ++ ++ ++ ++ {78B079BD-9FC7-4B9E-B4A6-96DA0F00248B} ++ 10.0.17763.0 ++ ++ ++ ++ StaticLibrary ++ false ++ MultiByte ++ ++ ++ StaticLibrary ++ false ++ MultiByte ++ ++ ++ StaticLibrary ++ false ++ MultiByte ++ ++ ++ StaticLibrary ++ false ++ MultiByte ++ ++ ++ StaticLibrary ++ false ++ MultiByte ++ ++ ++ StaticLibrary ++ false ++ MultiByte ++ ++ ++ StaticLibrary ++ false ++ MultiByte ++ ++ ++ StaticLibrary ++ false ++ MultiByte ++ ++ ++ StaticLibrary ++ false ++ MultiByte ++ ++ ++ StaticLibrary ++ false ++ MultiByte ++ ++ ++ StaticLibrary ++ false ++ MultiByte ++ ++ ++ StaticLibrary ++ false ++ MultiByte ++ ++ ++ ++ ++ ++ <_ProjectFileVersion>10.0.30319.1 ++ .\..\..\..\objs\vc2010\$(Platform)\ ++ .\..\..\..\objs\vc2010\$(Platform)\$(Configuration)\ ++ .\..\..\..\objs\vc2010\$(Platform)\ ++ .\..\..\..\objs\vc2010\$(Platform)\$(Configuration)\ ++ .\..\..\..\objs\vc2010\$(Platform)\ ++ .\..\..\..\objs\vc2010\$(Platform)\$(Configuration)\ ++ .\..\..\..\objs\vc2010\$(Platform)\ ++ .\..\..\..\objs\vc2010\$(Platform)\$(Configuration)\ ++ .\..\..\..\objs\vc2010\$(Platform)\ ++ .\..\..\..\objs\vc2010\$(Platform)\$(Configuration)\ ++ .\..\..\..\objs\vc2010\$(Platform)\ ++ .\..\..\..\objs\vc2010\$(Platform)\$(Configuration)\ ++ .\..\..\..\objs\vc2010\$(Platform)\ ++ .\..\..\..\objs\vc2010\$(Platform)\$(Configuration)\ ++ .\..\..\..\objs\vc2010\$(Platform)\ ++ .\..\..\..\objs\vc2010\$(Platform)\$(Configuration)\ ++ .\..\..\..\objs\vc2010\$(Platform)\ ++ .\..\..\..\objs\vc2010\$(Platform)\$(Configuration)\ ++ .\..\..\..\objs\vc2010\$(Platform)\ ++ .\..\..\..\objs\vc2010\$(Platform)\$(Configuration)\ ++ .\..\..\..\objs\vc2010\$(Platform)\ ++ .\..\..\..\objs\vc2010\$(Platform)\$(Configuration)\ ++ .\..\..\..\objs\vc2010\$(Platform)\ ++ .\..\..\..\objs\vc2010\$(Platform)\$(Configuration)\ ++ AllRules.ruleset ++ AllRules.ruleset ++ ++ ++ ++ ++ AllRules.ruleset ++ AllRules.ruleset ++ ++ ++ ++ ++ AllRules.ruleset ++ AllRules.ruleset ++ ++ ++ ++ ++ AllRules.ruleset ++ AllRules.ruleset ++ ++ ++ ++ ++ AllRules.ruleset ++ AllRules.ruleset ++ ++ ++ ++ ++ AllRules.ruleset ++ AllRules.ruleset ++ ++ ++ ++ ++ freetype281d ++ freetype281d ++ freetype281MTd ++ freetype281MTd ++ freetype281STd ++ freetype281STd ++ freetype281 ++ freetype281 ++ freetype281MT ++ freetype281MT ++ freetype281ST ++ freetype281ST ++ ++ ++ ++ ++ Disabled ++ $(UserOptionDirectory);..\..\..\include;$(UserIncludeDirectories);%(AdditionalIncludeDirectories) ++ _DEBUG;WIN32;_LIB;_CRT_SECURE_NO_WARNINGS;FT_DEBUG_LEVEL_ERROR;FT_DEBUG_LEVEL_TRACE;FT2_BUILD_LIBRARY;$(UserDefines);%(PreprocessorDefinitions) ++ EnableFastChecks ++ MultiThreadedDebugDLL ++ true ++ Level4 ++ ProgramDatabase ++ Default ++ 4001 ++ true ++ false ++ $(OutDir)$(TargetName).pdb ++ Disabled ++ ++ ++ _DEBUG;$(UserDefines);%(PreprocessorDefinitions) ++ 0x0409 ++ ++ ++ true ++ MachineX86 ++ $(UserLibraryDirectories);%(AdditionalLibraryDirectories) ++ $(UserDependencies);%(AdditionalDependencies) ++ ++ ++ ++ ++ Disabled ++ $(UserOptionDirectory);..\..\..\include;$(UserIncludeDirectories);%(AdditionalIncludeDirectories) ++ _DEBUG;WIN32;_LIB;_CRT_SECURE_NO_WARNINGS;FT_DEBUG_LEVEL_ERROR;FT_DEBUG_LEVEL_TRACE;FT2_BUILD_LIBRARY;$(UserDefines);%(PreprocessorDefinitions) ++ EnableFastChecks ++ MultiThreadedDebugDLL ++ true ++ Level4 ++ ProgramDatabase ++ Default ++ 4001 ++ true ++ false ++ $(OutDir)$(TargetName).pdb ++ Disabled ++ ++ ++ _DEBUG;$(UserDefines);%(PreprocessorDefinitions) ++ 0x0409 ++ ++ ++ true ++ MachineX64 ++ $(UserLibraryDirectories);%(AdditionalLibraryDirectories) ++ $(UserDependencies);%(AdditionalDependencies) ++ ++ ++ ++ ++ Disabled ++ $(UserOptionDirectory);..\..\..\include;$(UserIncludeDirectories);%(AdditionalIncludeDirectories) ++ _DEBUG;WIN32;_LIB;_CRT_SECURE_NO_WARNINGS;FT_DEBUG_LEVEL_ERROR;FT_DEBUG_LEVEL_TRACE;FT2_BUILD_LIBRARY;_CRT_SECURE_NO_DEPRECATE;$(UserDefines);%(PreprocessorDefinitions) ++ false ++ false ++ EnableFastChecks ++ MultiThreadedDebug ++ true ++ Level4 ++ ProgramDatabase ++ Default ++ 4001 ++ true ++ false ++ $(OutDir)$(TargetName).pdb ++ Disabled ++ ++ ++ _DEBUG;$(UserDefines);%(PreprocessorDefinitions) ++ 0x0409 ++ ++ ++ true ++ MachineX86 ++ $(UserLibraryDirectories);%(AdditionalLibraryDirectories) ++ $(UserDependencies);%(AdditionalDependencies) ++ ++ ++ ++ ++ Disabled ++ $(UserOptionDirectory);..\..\..\include;$(UserIncludeDirectories);%(AdditionalIncludeDirectories) ++ _DEBUG;WIN32;_LIB;_CRT_SECURE_NO_WARNINGS;FT_DEBUG_LEVEL_ERROR;FT_DEBUG_LEVEL_TRACE;FT2_BUILD_LIBRARY;_CRT_SECURE_NO_DEPRECATE;$(UserDefines);%(PreprocessorDefinitions) ++ false ++ false ++ EnableFastChecks ++ MultiThreadedDebug ++ true ++ Level4 ++ ProgramDatabase ++ Default ++ 4001 ++ true ++ false ++ $(OutDir)$(TargetName).pdb ++ Disabled ++ ++ ++ _DEBUG;$(UserDefines);%(PreprocessorDefinitions) ++ 0x0409 ++ ++ ++ true ++ MachineX64 ++ $(UserLibraryDirectories);%(AdditionalLibraryDirectories) ++ $(UserDependencies);%(AdditionalDependencies) ++ ++ ++ ++ ++ Disabled ++ $(UserOptionDirectory);..\..\..\include;$(UserIncludeDirectories);%(AdditionalIncludeDirectories) ++ _DEBUG;WIN32;_LIB;_CRT_SECURE_NO_WARNINGS;FT_DEBUG_LEVEL_ERROR;FT_DEBUG_LEVEL_TRACE;FT2_BUILD_LIBRARY;$(UserDefines);%(PreprocessorDefinitions) ++ EnableFastChecks ++ MultiThreadedDebug ++ true ++ Level4 ++ ProgramDatabase ++ Default ++ 4001 ++ true ++ false ++ $(OutDir)$(TargetName).pdb ++ Disabled ++ ++ ++ _DEBUG;$(UserDefines);%(PreprocessorDefinitions) ++ 0x0409 ++ ++ ++ true ++ MachineX86 ++ $(UserLibraryDirectories);%(AdditionalLibraryDirectories) ++ $(UserDependencies);%(AdditionalDependencies) ++ ++ ++ ++ ++ Disabled ++ $(UserOptionDirectory);..\..\..\include;$(UserIncludeDirectories);%(AdditionalIncludeDirectories) ++ _DEBUG;WIN32;_LIB;_CRT_SECURE_NO_WARNINGS;FT_DEBUG_LEVEL_ERROR;FT_DEBUG_LEVEL_TRACE;FT2_BUILD_LIBRARY;$(UserDefines);%(PreprocessorDefinitions) ++ EnableFastChecks ++ MultiThreadedDebug ++ true ++ Level4 ++ ProgramDatabase ++ Default ++ 4001 ++ true ++ false ++ $(OutDir)$(TargetName).pdb ++ Disabled ++ ++ ++ _DEBUG;$(UserDefines);%(PreprocessorDefinitions) ++ 0x0409 ++ ++ ++ true ++ MachineX64 ++ $(UserLibraryDirectories);%(AdditionalLibraryDirectories) ++ $(UserDependencies);%(AdditionalDependencies) ++ ++ ++ ++ ++ Full ++ AnySuitable ++ $(UserOptionDirectory);..\..\..\include;$(UserIncludeDirectories);%(AdditionalIncludeDirectories) ++ NDEBUG;WIN32;_LIB;_CRT_SECURE_NO_WARNINGS;FT2_BUILD_LIBRARY;$(UserDefines);%(PreprocessorDefinitions) ++ true ++ MultiThreadedDLL ++ true ++ true ++ Level4 ++ Default ++ 4001 ++ true ++ false ++ StreamingSIMDExtensions2 ++ false ++ false ++ false ++ ++ ++ true ++ ++ ++ true ++ Neither ++ true ++ ++ ++ NDEBUG;$(UserDefines);%(PreprocessorDefinitions) ++ 0x0409 ++ ++ ++ true ++ true ++ MachineX86 ++ $(UserLibraryDirectories);%(AdditionalLibraryDirectories) ++ $(UserDependencies);%(AdditionalDependencies) ++ ++ ++ ++ ++ Full ++ AnySuitable ++ $(UserOptionDirectory);..\..\..\include;$(UserIncludeDirectories);%(AdditionalIncludeDirectories) ++ NDEBUG;WIN32;_LIB;_CRT_SECURE_NO_WARNINGS;FT2_BUILD_LIBRARY;$(UserDefines);%(PreprocessorDefinitions) ++ true ++ MultiThreadedDLL ++ true ++ true ++ Level4 ++ Default ++ 4001 ++ true ++ false ++ StreamingSIMDExtensions2 ++ false ++ false ++ false ++ ++ ++ true ++ ++ ++ true ++ Neither ++ true ++ ++ ++ NDEBUG;$(UserDefines);%(PreprocessorDefinitions) ++ 0x0409 ++ ++ ++ true ++ true ++ MachineX64 ++ $(UserLibraryDirectories);%(AdditionalLibraryDirectories) ++ $(UserDependencies);%(AdditionalDependencies) ++ ++ ++ ++ ++ Full ++ AnySuitable ++ $(UserOptionDirectory);..\..\..\include;$(UserIncludeDirectories);%(AdditionalIncludeDirectories) ++ NDEBUG;WIN32;_LIB;_CRT_SECURE_NO_WARNINGS;FT2_BUILD_LIBRARY;$(UserDefines);%(PreprocessorDefinitions) ++ true ++ MultiThreaded ++ true ++ true ++ Level4 ++ Default ++ 4001 ++ true ++ false ++ StreamingSIMDExtensions2 ++ false ++ false ++ false ++ false ++ ++ ++ true ++ ++ ++ true ++ Neither ++ true ++ ++ ++ NDEBUG;$(UserDefines);%(PreprocessorDefinitions) ++ 0x0409 ++ ++ ++ true ++ true ++ MachineX86 ++ $(UserLibraryDirectories);%(AdditionalLibraryDirectories) ++ $(UserDependencies);%(AdditionalDependencies) ++ ++ ++ ++ ++ Full ++ AnySuitable ++ $(UserOptionDirectory);..\..\..\include;$(UserIncludeDirectories);%(AdditionalIncludeDirectories) ++ NDEBUG;WIN32;_LIB;_CRT_SECURE_NO_WARNINGS;FT2_BUILD_LIBRARY;$(UserDefines);%(PreprocessorDefinitions) ++ true ++ MultiThreaded ++ true ++ true ++ Level4 ++ Default ++ 4001 ++ true ++ false ++ StreamingSIMDExtensions2 ++ false ++ false ++ false ++ false ++ ++ ++ true ++ ++ ++ true ++ Neither ++ true ++ ++ ++ NDEBUG;$(UserDefines);%(PreprocessorDefinitions) ++ 0x0409 ++ ++ ++ true ++ true ++ MachineX64 ++ $(UserLibraryDirectories);%(AdditionalLibraryDirectories) ++ $(UserDependencies);%(AdditionalDependencies) ++ ++ ++ ++ ++ Full ++ AnySuitable ++ $(UserOptionDirectory);..\..\..\include;$(UserIncludeDirectories);%(AdditionalIncludeDirectories) ++ NDEBUG;WIN32;_LIB;_CRT_SECURE_NO_WARNINGS;FT2_BUILD_LIBRARY;$(UserDefines);%(PreprocessorDefinitions) ++ true ++ MultiThreaded ++ true ++ true ++ Level4 ++ Default ++ 4001 ++ true ++ false ++ StreamingSIMDExtensions2 ++ false ++ false ++ false ++ ++ ++ true ++ ++ ++ true ++ Neither ++ true ++ ++ ++ NDEBUG;$(UserDefines);%(PreprocessorDefinitions) ++ 0x0409 ++ ++ ++ ++ true ++ MachineX86 ++ $(UserLibraryDirectories);%(AdditionalLibraryDirectories) ++ $(UserDependencies);%(AdditionalDependencies) ++ ++ ++ ++ ++ Full ++ AnySuitable ++ $(UserOptionDirectory);..\..\..\include;$(UserIncludeDirectories);%(AdditionalIncludeDirectories) ++ NDEBUG;WIN32;_LIB;_CRT_SECURE_NO_WARNINGS;FT2_BUILD_LIBRARY;$(UserDefines);%(PreprocessorDefinitions) ++ true ++ MultiThreaded ++ true ++ true ++ Level4 ++ Default ++ 4001 ++ true ++ false ++ StreamingSIMDExtensions2 ++ false ++ false ++ false ++ ++ ++ true ++ ++ ++ true ++ Neither ++ true ++ ++ ++ NDEBUG;$(UserDefines);%(PreprocessorDefinitions) ++ 0x0409 ++ ++ ++ ++ true ++ MachineX64 ++ $(UserLibraryDirectories);%(AdditionalLibraryDirectories) ++ $(UserDependencies);%(AdditionalDependencies) ++ ++ ++ ++ ++ ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ ++ ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ ++ ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ ++ ++ ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ ++ ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ false ++ false ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ false ++ false ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ false ++ false ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ false ++ false ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ false ++ false ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ false ++ false ++ ++ ++ ++ ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ ++ ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ ++ ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ ++ ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ ++ ++ ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ ++ ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ ++ ++ ++ ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ ++ ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ ++ ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ ++ ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ ++ ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ ++ ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ ++ ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ ++ ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ ++ ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ ++ ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ ++ ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ ++ ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ Disabled ++ Disabled ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ EnableFastChecks ++ EnableFastChecks ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ MaxSpeed ++ MaxSpeed ++ %(AdditionalIncludeDirectories) ++ %(AdditionalIncludeDirectories) ++ %(PreprocessorDefinitions) ++ %(PreprocessorDefinitions) ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +\ 文件尾没有换行符 +diff -uNr openjdk/.github/workflows/submit.yml afu8u/.github/workflows/submit.yml +--- openjdk/.github/workflows/submit.yml 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/.github/workflows/submit.yml 2025-05-06 11:13:08.067672948 +0800 +@@ -0,0 +1,1617 @@ ++name: Pre-submit tests ++ ++on: ++ push: ++ branches-ignore: ++ - master ++ - pr/* ++ workflow_dispatch: ++ inputs: ++ platforms: ++ description: "Platform(s) to execute on" ++ required: true ++ default: "Linux additional (hotspot only), Linux x64, Linux x86, Windows x64, Windows x86, macOS x64" ++ ++concurrency: ++ group: ${{ github.workflow }}-${{ github.ref }} ++ cancel-in-progress: true ++ ++jobs: ++ prerequisites: ++ name: Prerequisites ++ runs-on: "ubuntu-20.04" ++ outputs: ++ should_run: ${{ steps.check_submit.outputs.should_run }} ++ bundle_id: ${{ steps.check_bundle_id.outputs.bundle_id }} ++ platform_linux_additional: ${{ steps.check_platforms.outputs.platform_linux_additional }} ++ platform_linux_x64: ${{ steps.check_platforms.outputs.platform_linux_x64 }} ++ platform_linux_x86: ${{ steps.check_platforms.outputs.platform_linux_x86 }} ++ platform_windows_x64: ${{ steps.check_platforms.outputs.platform_windows_x64 }} ++ platform_windows_x86: ${{ steps.check_platforms.outputs.platform_windows_x86 }} ++ platform_macos_x64: ${{ steps.check_platforms.outputs.platform_macos_x64 }} ++ dependencies: ${{ steps.check_deps.outputs.dependencies }} ++ ++ steps: ++ - name: Check if submit tests should actually run depending on secrets and manual triggering ++ id: check_submit ++ run: echo "should_run=${{ github.event.inputs.platforms != '' || (!secrets.JDK_SUBMIT_FILTER || startsWith(github.ref, 'refs/heads/submit/')) }}" >> $GITHUB_OUTPUT ++ ++ - name: Check which platforms should be included ++ id: check_platforms ++ run: | ++ echo "platform_linux_additional=${{ contains(github.event.inputs.platforms, 'linux additional (hotspot only)') || (github.event.inputs.platforms == '' && (secrets.JDK_SUBMIT_PLATFORMS == '' || contains(secrets.JDK_SUBMIT_PLATFORMS, 'linux additional (hotspot only)'))) }}" >> $GITHUB_OUTPUT ++ echo "platform_linux_x64=${{ contains(github.event.inputs.platforms, 'linux x64') || (github.event.inputs.platforms == '' && (secrets.JDK_SUBMIT_PLATFORMS == '' || contains(secrets.JDK_SUBMIT_PLATFORMS, 'linux x64'))) }}" >> $GITHUB_OUTPUT ++ echo "platform_linux_x86=${{ contains(github.event.inputs.platforms, 'linux x86') || (github.event.inputs.platforms == '' && (secrets.JDK_SUBMIT_PLATFORMS == '' || contains(secrets.JDK_SUBMIT_PLATFORMS, 'linux x86'))) }}" >> $GITHUB_OUTPUT ++ echo "platform_windows_x64=${{ contains(github.event.inputs.platforms, 'windows x64') || (github.event.inputs.platforms == '' && (secrets.JDK_SUBMIT_PLATFORMS == '' || contains(secrets.JDK_SUBMIT_PLATFORMS, 'windows x64'))) }}" >> $GITHUB_OUTPUT ++ echo "platform_windows_x86=${{ contains(github.event.inputs.platforms, 'windows x86') || (github.event.inputs.platforms == '' && (secrets.JDK_SUBMIT_PLATFORMS == '' || contains(secrets.JDK_SUBMIT_PLATFORMS, 'windows x86'))) }}" >> $GITHUB_OUTPUT ++ echo "platform_macos_x64=${{ contains(github.event.inputs.platforms, 'macos x64') || (github.event.inputs.platforms == '' && (secrets.JDK_SUBMIT_PLATFORMS == '' || contains(secrets.JDK_SUBMIT_PLATFORMS, 'macos x64'))) }}" >> $GITHUB_OUTPUT ++ if: steps.check_submit.outputs.should_run != 'false' ++ ++ - name: Determine unique bundle identifier ++ id: check_bundle_id ++ run: echo "bundle_id=${GITHUB_ACTOR}_${GITHUB_SHA:0:8}" >> $GITHUB_OUTPUT ++ if: steps.check_submit.outputs.should_run != 'false' ++ ++ - name: Checkout the source ++ uses: actions/checkout@v3 ++ with: ++ path: jdk ++ if: steps.check_submit.outputs.should_run != 'false' ++ ++ - name: Determine versions and locations to be used for dependencies ++ id: check_deps ++ run: "echo dependencies=`cat common/autoconf/version-numbers make/conf/test-dependencies | sed -e '1i {' -e 's/#.*//g' -e 's/\"//g' -e 's/\\(.*\\)=\\(.*\\)/\"\\1\": \"\\2\",/g' -e '$s/,\\s\\{0,\\}$/\\}/'` >> $GITHUB_OUTPUT" ++ working-directory: jdk ++ if: steps.check_submit.outputs.should_run != 'false' ++ ++ - name: Print extracted dependencies to the log ++ run: "echo '${{ steps.check_deps.outputs.dependencies }}'" ++ if: steps.check_submit.outputs.should_run != 'false' ++ ++ - name: Determine the jtreg ref to checkout ++ run: "echo JTREG_REF=jtreg${{ fromJson(steps.check_deps.outputs.dependencies).JTREG_VERSION }}-${{ fromJson(steps.check_deps.outputs.dependencies).JTREG_BUILD }} >> $GITHUB_ENV" ++ if: steps.check_submit.outputs.should_run != 'false' ++ ++ - name: Determine the jtreg version to build ++ run: echo "BUILD_VERSION=${{ fromJson(steps.check_deps.outputs.dependencies).JTREG_VERSION }}" >> $GITHUB_ENV ++ if: steps.check_submit.outputs.should_run != 'false' ++ ++ - name: Determine the jtreg build number to build ++ run: echo "BUILD_NUMBER=${{ fromJson(steps.check_deps.outputs.dependencies).JTREG_BUILD }}" >> $GITHUB_ENV ++ if: steps.check_submit.outputs.should_run != 'false' ++ ++ - name: Check if a jtreg image is present in the cache ++ id: jtreg ++ uses: actions/cache@v3 ++ with: ++ path: ~/jtreg/ ++ key: jtreg-${{ env.JTREG_REF }}-v1 ++ if: steps.check_submit.outputs.should_run != 'false' ++ ++ - name: Checkout the jtreg source ++ uses: actions/checkout@v3 ++ with: ++ repository: "openjdk/jtreg" ++ ref: ${{ env.JTREG_REF }} ++ path: jtreg ++ if: steps.check_submit.outputs.should_run != 'false' && steps.jtreg.outputs.cache-hit != 'true' ++ ++ - name: Build jtreg ++ run: bash make/build-all.sh ${JAVA_HOME_8_X64} ++ working-directory: jtreg ++ if: steps.check_submit.outputs.should_run != 'false' && steps.jtreg.outputs.cache-hit != 'true' ++ ++ - name: Move jtreg image to destination folder ++ run: mv build/images/jtreg ~/ ++ working-directory: jtreg ++ if: steps.check_submit.outputs.should_run != 'false' && steps.jtreg.outputs.cache-hit != 'true' ++ ++ - name: Store jtreg for use by later steps ++ uses: actions/upload-artifact@v3 ++ with: ++ name: transient_jtreg_${{ steps.check_bundle_id.outputs.bundle_id }} ++ path: ~/jtreg/ ++ if: steps.check_submit.outputs.should_run != 'false' ++ ++ linux_x64_build: ++ name: Linux x64 ++ runs-on: "ubuntu-20.04" ++ needs: prerequisites ++ if: needs.prerequisites.outputs.should_run != 'false' && needs.prerequisites.outputs.platform_linux_x64 != 'false' ++ ++ strategy: ++ fail-fast: false ++ matrix: ++ flavor: ++ - build release ++ - build debug ++ include: ++ - flavor: build debug ++ flags: --enable-debug ++ artifact: -debug ++ ++ env: ++ JDK_VERSION: "${{ fromJson(needs.prerequisites.outputs.dependencies).JDK_MAJOR_VERSION }}.${{ fromJson(needs.prerequisites.outputs.dependencies).JDK_MINOR_VERSION }}.${{ fromJson(needs.prerequisites.outputs.dependencies).JDK_MICRO_VERSION }}" ++ ++ steps: ++ - name: Checkout the source ++ uses: actions/checkout@v3 ++ with: ++ path: jdk ++ ++ - name: Restore jtreg artifact ++ id: jtreg_restore ++ uses: actions/download-artifact@v3 ++ with: ++ name: transient_jtreg_${{ needs.prerequisites.outputs.bundle_id }} ++ path: ~/jtreg/ ++ continue-on-error: true ++ ++ - name: Restore jtreg artifact (retry) ++ uses: actions/download-artifact@v3 ++ with: ++ name: transient_jtreg_${{ needs.prerequisites.outputs.bundle_id }} ++ path: ~/jtreg/ ++ if: steps.jtreg_restore.outcome == 'failure' ++ ++ - name: Fix jtreg permissions ++ run: chmod -R a+rx ${HOME}/jtreg/ ++ ++ - name: Install dependencies ++ run: | ++ sudo apt-get update ++ sudo apt-get install openjdk-8-jdk gcc-9 g++-9 libxrandr-dev libxtst-dev libcups2-dev libasound2-dev ++ sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-9 100 --slave /usr/bin/g++ g++ /usr/bin/g++-9 ++ ++ - name: Configure ++ run: > ++ bash configure ++ --with-conf-name=linux-x64 ++ ${{ matrix.flags }} ++ --with-user-release-suffix=${GITHUB_ACTOR}-${GITHUB_SHA} ++ --with-build-number=b00 ++ --with-jtreg=${HOME}/jtreg ++ --with-zlib=bundled ++ working-directory: jdk ++ ++ - name: Build ++ run: make CONF_NAME=linux-x64 LOG_LEVEL=debug images ++ working-directory: jdk ++ ++ - name: Pack artifacts ++ run: | ++ tar -cf jdk-${{ env.JDK_VERSION }}-internal+0_linux-x64_bin${{ matrix.artifact }}.tar.gz -C jdk/build/linux-x64/images j2sdk-image ++ ++ - name: Persist test bundles ++ uses: actions/upload-artifact@v3 ++ with: ++ name: transient_jdk-linux-x64${{ matrix.artifact }}_${{ needs.prerequisites.outputs.bundle_id }} ++ path: | ++ jdk-${{ env.JDK_VERSION }}-internal+0_linux-x64_bin${{ matrix.artifact }}.tar.gz ++ ++ linux_x64_test: ++ name: Linux x64 ++ runs-on: "ubuntu-20.04" ++ needs: ++ - prerequisites ++ - linux_x64_build ++ ++ strategy: ++ fail-fast: false ++ matrix: ++ test: ++ - jdk/tier1 ++ - langtools/tier1 ++ - hotspot/tier1 ++ include: ++ - test: jdk/tier1 ++ suites: jdk_tier1 ++ - test: langtools/tier1 ++ suites: langtools_tier1 ++ - test: hotspot/tier1 ++ suites: hotspot_tier1 ++ ++ env: ++ JDK_VERSION: "${{ fromJson(needs.prerequisites.outputs.dependencies).JDK_MAJOR_VERSION }}.${{ fromJson(needs.prerequisites.outputs.dependencies).JDK_MINOR_VERSION }}.${{ fromJson(needs.prerequisites.outputs.dependencies).JDK_MICRO_VERSION }}" ++ BOOT_JDK: "/usr/lib/jvm/java-8-openjdk-amd64" ++ ++ steps: ++ - name: Checkout the source ++ uses: actions/checkout@v3 ++ ++ - name: Restore jtreg artifact ++ id: jtreg_restore ++ uses: actions/download-artifact@v3 ++ with: ++ name: transient_jtreg_${{ needs.prerequisites.outputs.bundle_id }} ++ path: ~/jtreg/ ++ continue-on-error: true ++ ++ - name: Restore jtreg artifact (retry) ++ uses: actions/download-artifact@v3 ++ with: ++ name: transient_jtreg_${{ needs.prerequisites.outputs.bundle_id }} ++ path: ~/jtreg/ ++ if: steps.jtreg_restore.outcome == 'failure' ++ ++ - name: Restore build artifacts ++ id: build_restore ++ uses: actions/download-artifact@v3 ++ with: ++ name: transient_jdk-linux-x64${{ matrix.artifact }}_${{ needs.prerequisites.outputs.bundle_id }} ++ path: ~/jdk-linux-x64${{ matrix.artifact }} ++ continue-on-error: true ++ ++ - name: Restore build artifacts (retry) ++ uses: actions/download-artifact@v3 ++ with: ++ name: transient_jdk-linux-x64${{ matrix.artifact }}_${{ needs.prerequisites.outputs.bundle_id }} ++ path: ~/jdk-linux-x64${{ matrix.artifact }} ++ if: steps.build_restore.outcome == 'failure' ++ ++ - name: Install dependencies ++ run: | ++ sudo apt-get update ++ sudo apt-get install openjdk-8-jdk ++ ++ - name: Unpack jdk ++ run: | ++ mkdir -p "${HOME}/jdk-linux-x64${{ matrix.artifact }}/jdk-${{ env.JDK_VERSION }}-internal+0_linux-x64_bin${{ matrix.artifact }}" ++ tar -xf "${HOME}/jdk-linux-x64${{ matrix.artifact }}/jdk-${{ env.JDK_VERSION }}-internal+0_linux-x64_bin${{ matrix.artifact }}.tar.gz" -C "${HOME}/jdk-linux-x64${{ matrix.artifact }}/jdk-${{ env.JDK_VERSION }}-internal+0_linux-x64_bin${{ matrix.artifact }}" ++ ++ - name: Run tests ++ run: > ++ chmod +x "${HOME}/jtreg/bin/jtreg" && ++ mkdir test-results && ++ cd test && ++ PRODUCT_HOME="${HOME}/jdk-linux-x64${{ matrix.artifact }}/jdk-${{ env.JDK_VERSION }}-internal+0_linux-x64_bin${{ matrix.artifact }}/j2sdk-image" ++ JT_HOME="${HOME}/jtreg" ++ ALT_OUTPUTDIR="${GITHUB_WORKSPACE}/test-results" ++ JAVA_ARGS="-Djdk.test.docker.image.name=ubuntu -Djdk.test.docker.image.version=latest" ++ JTREG_TIMEOUT_FACTOR="4" ++ make ++ "${{ matrix.suites }}" ++ ++ - name: Check that all tests executed successfully ++ if: always() ++ run: > ++ if cat test-results/testoutput/*/exitcode.txt | grep -q -v '^0$' ++ || ! cat test-results/testoutput/*/Stats.txt | grep -q 'fail=0' ; then ++ cat test-results/testoutput/*/JTreport/text/newfailures.txt ; ++ exit 1 ; ++ fi ++ ++ - name: Create suitable test log artifact name ++ if: always() ++ run: echo "logsuffix=`echo ${{ matrix.test }} | sed -e 's!/!_!'g -e 's! !_!'g`" >> $GITHUB_ENV ++ ++ - name: Package test results ++ if: always() ++ working-directory: test-results ++ run: > ++ zip -r9 ++ "$HOME/linux-x64${{ matrix.artifact }}_testresults_${{ env.logsuffix }}.zip" ++ . ++ -x "*ARCHIVE_BUNDLE.zip" ++ continue-on-error: true ++ ++ - name: Persist test results ++ if: always() ++ uses: actions/upload-artifact@v3 ++ with: ++ path: ~/linux-x64${{ matrix.artifact }}_testresults_${{ env.logsuffix }}.zip ++ continue-on-error: true ++ ++ linux_additional_build: ++ name: Linux additional ++ runs-on: "ubuntu-20.04" ++ needs: ++ - prerequisites ++ if: needs.prerequisites.outputs.should_run != 'false' && needs.prerequisites.outputs.platform_linux_additional != 'false' ++ ++ strategy: ++ fail-fast: false ++ matrix: ++ flavor: ++ - hs x64 build only ++ - hs x64 zero build only ++ - hs x86 minimal build only ++ - hs x86 client build only ++ - hs aarch64 build only ++ - hs arm build only ++ - hs s390x build only ++ - hs ppc64le build only ++ include: ++ - flavor: hs x64 build only ++ flags: --enable-debug --disable-precompiled-headers ++ - flavor: hs x64 zero build only ++ flags: --enable-debug --disable-precompiled-headers --with-jvm-variants=zero ++ - flavor: hs x86 minimal build only ++ flags: --enable-debug --disable-precompiled-headers --with-jvm-variants=minimal1 --with-target-bits=32 ++ multilib: true ++ - flavor: hs x86 client build only ++ flags: --enable-debug --disable-precompiled-headers --with-jvm-variants=client --with-target-bits=32 ++ multilib: true ++ - flavor: hs aarch64 build only ++ flags: --enable-debug --disable-precompiled-headers ++ debian-arch: arm64 ++ gnu-arch: aarch64 ++ - flavor: hs arm build only ++ flags: --enable-debug --disable-precompiled-headers --with-jvm-variants=zero ++ debian-arch: armhf ++ gnu-arch: arm ++ gnu-flavor: eabihf ++ - flavor: hs s390x build only ++ flags: --enable-debug --disable-precompiled-headers --with-jvm-variants=zero ++ debian-arch: s390x ++ gnu-arch: s390x ++ - flavor: hs ppc64le build only ++ flags: --enable-debug --disable-precompiled-headers ++ debian-arch: ppc64el ++ gnu-arch: powerpc64le ++ ++ env: ++ JDK_VERSION: "${{ fromJson(needs.prerequisites.outputs.dependencies).JDK_MAJOR_VERSION }}.${{ fromJson(needs.prerequisites.outputs.dependencies).JDK_MINOR_VERSION }}.${{ fromJson(needs.prerequisites.outputs.dependencies).JDK_MICRO_VERSION }}" ++ BOOT_JDK: "/usr/lib/jvm/java-8-openjdk-amd64" ++ ++ steps: ++ - name: Checkout the source ++ uses: actions/checkout@v3 ++ with: ++ path: jdk ++ ++ - name: Update apt ++ run: sudo apt-get update ++ ++ - name: Install native host dependencies ++ run: | ++ sudo apt-get install openjdk-8-jdk gcc-9 g++-9 libxrandr-dev libxtst-dev libcups2-dev libasound2-dev ++ sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-9 100 --slave /usr/bin/g++ g++ /usr/bin/g++-9 ++ if: matrix.debian-arch == '' ++ ++ - name: Install multilib dependencies ++ run: | ++ sudo dpkg --add-architecture i386 ++ sudo apt-get update ++ sudo apt-get install --only-upgrade apt ++ sudo apt-get install openjdk-8-jdk gcc-9-multilib g++-9-multilib libfreetype6-dev:i386 libxrandr-dev:i386 libxtst-dev:i386 libtiff-dev:i386 libcupsimage2-dev:i386 libcups2-dev:i386 libasound2-dev:i386 ++ sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-9 100 --slave /usr/bin/g++ g++ /usr/bin/g++-9 ++ if: matrix.multilib != '' ++ ++ - name: Install cross-compilation host dependencies ++ run: sudo apt-get install openjdk-8-jdk gcc-9-${{ matrix.gnu-arch }}-linux-gnu${{ matrix.gnu-flavor}} g++-9-${{ matrix.gnu-arch }}-linux-gnu${{ matrix.gnu-flavor}} ++ if: matrix.debian-arch != '' ++ ++ - name: Cache sysroot ++ id: cache-sysroot ++ uses: actions/cache@v3 ++ with: ++ path: ~/sysroot-${{ matrix.debian-arch }}/ ++ key: sysroot-${{ matrix.debian-arch }}-${{ hashFiles('jdk/.github/workflows/submit.yml') }} ++ if: matrix.debian-arch != '' ++ ++ - name: Install sysroot host dependencies ++ run: sudo apt-get install debootstrap qemu-user-static ++ if: matrix.debian-arch != '' && steps.cache-sysroot.outputs.cache-hit != 'true' ++ ++ - name: Create sysroot ++ run: > ++ sudo qemu-debootstrap ++ --arch=${{ matrix.debian-arch }} ++ --verbose ++ --include=fakeroot,symlinks,build-essential,libx11-dev,libxext-dev,libxrender-dev,libxrandr-dev,libxtst-dev,libxt-dev,libcups2-dev,libfontconfig1-dev,libasound2-dev,libfreetype6-dev,libpng-dev,libffi-dev ++ --resolve-deps ++ buster ++ ~/sysroot-${{ matrix.debian-arch }} ++ http://httpredir.debian.org/debian/ ++ if: matrix.debian-arch != '' && steps.cache-sysroot.outputs.cache-hit != 'true' ++ ++ - name: Prepare sysroot for caching ++ run: | ++ sudo chroot ~/sysroot-${{ matrix.debian-arch }} symlinks -cr . ++ sudo chown ${USER} -R ~/sysroot-${{ matrix.debian-arch }} ++ rm -rf ~/sysroot-${{ matrix.debian-arch }}/{dev,proc,run,sys} ++ if: matrix.debian-arch != '' && steps.cache-sysroot.outputs.cache-hit != 'true' ++ ++ - name: Configure cross compiler ++ run: | ++ echo "CC=${{ matrix.gnu-arch }}-linux-gnu${{ matrix.gnu-flavor}}-gcc-9" >> $GITHUB_ENV ++ echo "CXX=${{ matrix.gnu-arch }}-linux-gnu${{ matrix.gnu-flavor}}-g++-9" >> $GITHUB_ENV ++ if: matrix.debian-arch != '' ++ ++ - name: Configure cross specific flags ++ run: > ++ echo "cross_flags= ++ --openjdk-target=${{ matrix.gnu-arch }}-linux-gnu${{ matrix.gnu-flavor}} ++ --with-sysroot=${HOME}/sysroot-${{ matrix.debian-arch }}/ ++ --with-cups=${HOME}/sysroot-${{ matrix.debian-arch }}/usr ++ --with-freetype-lib=${HOME}/sysroot-${{ matrix.debian-arch }}/usr/lib/${{ matrix.gnu-arch }}-linux-gnu${{ matrix.gnu-flavor}} ++ --with-freetype-include=${HOME}/sysroot-${{ matrix.debian-arch }}/usr/include/freetype2 ++ --with-alsa=${HOME}/sysroot-${{ matrix.debian-arch }}/usr ++ --with-fontconfig=${HOME}/sysroot-${{ matrix.debian-arch }}/usr ++ " >> $GITHUB_ENV ++ && echo "cross_conf_env= ++ CFLAGS=--sysroot=${HOME}/sysroot-${{ matrix.debian-arch }} ++ CXXFLAGS=--sysroot=${HOME}/sysroot-${{ matrix.debian-arch }} ++ LDFLAGS=--sysroot=${HOME}/sysroot-${{ matrix.debian-arch }} ++ " >> $GITHUB_ENV ++ if: matrix.debian-arch != '' ++ ++ - name: Configure ++ run: > ++ ${{ env.cross_conf_env }} ++ bash configure ++ --with-conf-name=linux-${{ matrix.gnu-arch }}-hotspot ++ ${{ matrix.flags }} ++ ${{ env.cross_flags }} ++ --with-user-release-suffix=${GITHUB_ACTOR}-${GITHUB_SHA} ++ --with-build-number=b00 ++ --with-boot-jdk=${BOOT_JDK} ++ --disable-headful ++ --with-zlib=bundled ++ working-directory: jdk ++ ++ - name: Build ++ run: make CONF_NAME=linux-${{ matrix.gnu-arch }}-hotspot hotspot ++ working-directory: jdk ++ ++ linux_x86_build: ++ name: Linux x86 ++ runs-on: "ubuntu-20.04" ++ needs: prerequisites ++ if: needs.prerequisites.outputs.should_run != 'false' && needs.prerequisites.outputs.platform_linux_x86 != 'false' ++ ++ strategy: ++ fail-fast: false ++ matrix: ++ flavor: ++ - build release ++ - build debug ++ include: ++ - flavor: build debug ++ flags: --enable-debug ++ artifact: -debug ++ ++ # Reduced 32-bit build uses the same boot JDK as 64-bit build ++ env: ++ JDK_VERSION: "${{ fromJson(needs.prerequisites.outputs.dependencies).JDK_MAJOR_VERSION }}.${{ fromJson(needs.prerequisites.outputs.dependencies).JDK_MINOR_VERSION }}.${{ fromJson(needs.prerequisites.outputs.dependencies).JDK_MICRO_VERSION }}" ++ BOOT_JDK: "/usr/lib/jvm/java-8-openjdk-amd64" ++ ++ steps: ++ - name: Checkout the source ++ uses: actions/checkout@v3 ++ with: ++ path: jdk ++ ++ - name: Restore jtreg artifact ++ id: jtreg_restore ++ uses: actions/download-artifact@v3 ++ with: ++ name: transient_jtreg_${{ needs.prerequisites.outputs.bundle_id }} ++ path: ~/jtreg/ ++ continue-on-error: true ++ ++ - name: Restore jtreg artifact (retry) ++ uses: actions/download-artifact@v3 ++ with: ++ name: transient_jtreg_${{ needs.prerequisites.outputs.bundle_id }} ++ path: ~/jtreg/ ++ if: steps.jtreg_restore.outcome == 'failure' ++ ++ - name: Fix jtreg permissions ++ run: chmod -R a+rx ${HOME}/jtreg/ ++ ++ # Roll in the multilib environment and its dependencies. ++ # Some multilib libraries do not have proper inter-dependencies, so we have to ++ # install their dependencies manually. Additionally, upgrading apt solves ++ # the libc6 installation bugs until base image catches up, see JDK-8260460. ++ - name: Install dependencies ++ run: | ++ sudo dpkg --add-architecture i386 ++ sudo apt-get update ++ sudo apt-get install --only-upgrade apt ++ sudo apt-get install openjdk-8-jdk gcc-9-multilib g++-9-multilib libfreetype6-dev:i386 libxrandr-dev:i386 libxtst-dev:i386 libtiff-dev:i386 libcupsimage2-dev:i386 libcups2-dev:i386 libasound2-dev:i386 ++ sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-9 100 --slave /usr/bin/g++ g++ /usr/bin/g++-9 ++ ++ - name: Configure ++ run: > ++ bash configure ++ --with-conf-name=linux-x86 ++ --with-target-bits=32 ++ ${{ matrix.flags }} ++ --with-user-release-suffix=${GITHUB_ACTOR}-${GITHUB_SHA} ++ --with-build-number=b00 ++ --with-boot-jdk=${BOOT_JDK} ++ --with-jtreg=${HOME}/jtreg ++ --with-zlib=bundled ++ working-directory: jdk ++ ++ - name: Build ++ run: make CONF_NAME=linux-x86 images ++ working-directory: jdk ++ ++ - name: Pack artifacts ++ run: | ++ tar -cf jdk-${{ env.JDK_VERSION }}-internal+0_linux-x86_bin${{ matrix.artifact }}.tar.gz -C jdk/build/linux-x86/images j2sdk-image ++ ++ - name: Persist test bundles ++ uses: actions/upload-artifact@v3 ++ with: ++ name: transient_jdk-linux-x86${{ matrix.artifact }}_${{ needs.prerequisites.outputs.bundle_id }} ++ path: | ++ jdk-${{ env.JDK_VERSION }}-internal+0_linux-x86_bin${{ matrix.artifact }}.tar.gz ++ ++ linux_x86_test: ++ name: Linux x86 ++ runs-on: "ubuntu-20.04" ++ needs: ++ - prerequisites ++ - linux_x86_build ++ ++ strategy: ++ fail-fast: false ++ matrix: ++ test: ++ - jdk/tier1 ++ - langtools/tier1 ++ - hotspot/tier1 ++ include: ++ - test: jdk/tier1 ++ suites: jdk_tier1 ++ - test: langtools/tier1 ++ suites: langtools_tier1 ++ - test: hotspot/tier1 ++ suites: hotspot_tier1 ++ ++ # Reduced 32-bit build uses the same boot JDK as 64-bit build ++ env: ++ JDK_VERSION: "${{ fromJson(needs.prerequisites.outputs.dependencies).JDK_MAJOR_VERSION }}.${{ fromJson(needs.prerequisites.outputs.dependencies).JDK_MINOR_VERSION }}.${{ fromJson(needs.prerequisites.outputs.dependencies).JDK_MICRO_VERSION }}" ++ BOOT_JDK: "/usr/lib/jvm/java-8-openjdk-amd64" ++ ++ steps: ++ - name: Checkout the source ++ uses: actions/checkout@v3 ++ ++ - name: Restore jtreg artifact ++ id: jtreg_restore ++ uses: actions/download-artifact@v3 ++ with: ++ name: transient_jtreg_${{ needs.prerequisites.outputs.bundle_id }} ++ path: ~/jtreg/ ++ continue-on-error: true ++ ++ - name: Restore jtreg artifact (retry) ++ uses: actions/download-artifact@v3 ++ with: ++ name: transient_jtreg_${{ needs.prerequisites.outputs.bundle_id }} ++ path: ~/jtreg/ ++ if: steps.jtreg_restore.outcome == 'failure' ++ ++ - name: Restore build artifacts ++ id: build_restore ++ uses: actions/download-artifact@v3 ++ with: ++ name: transient_jdk-linux-x86${{ matrix.artifact }}_${{ needs.prerequisites.outputs.bundle_id }} ++ path: ~/jdk-linux-x86${{ matrix.artifact }} ++ continue-on-error: true ++ ++ - name: Restore build artifacts (retry) ++ uses: actions/download-artifact@v3 ++ with: ++ name: transient_jdk-linux-x86${{ matrix.artifact }}_${{ needs.prerequisites.outputs.bundle_id }} ++ path: ~/jdk-linux-x86${{ matrix.artifact }} ++ if: steps.build_restore.outcome == 'failure' ++ ++ - name: Install dependencies ++ run: | ++ sudo apt-get update ++ sudo apt-get install openjdk-8-jdk gcc-9-multilib g++-9-multilib ++ ++ - name: Unpack jdk ++ run: | ++ mkdir -p "${HOME}/jdk-linux-x86${{ matrix.artifact }}/jdk-${{ env.JDK_VERSION }}-internal+0_linux-x86_bin${{ matrix.artifact }}" ++ tar -xf "${HOME}/jdk-linux-x86${{ matrix.artifact }}/jdk-${{ env.JDK_VERSION }}-internal+0_linux-x86_bin${{ matrix.artifact }}.tar.gz" -C "${HOME}/jdk-linux-x86${{ matrix.artifact }}/jdk-${{ env.JDK_VERSION }}-internal+0_linux-x86_bin${{ matrix.artifact }}" ++ ++ - name: Build multilib docker image ++ if: matrix.test == 'hotspot/tier1' ++ run: > ++ printf '%s\n%s\n' ++ 'FROM ubuntu:latest' ++ 'RUN dpkg --add-architecture i386 && apt-get update && apt-get -y install libc6:i386' ++ | docker build -t 'ubuntu-multilib:latest' - ++ ++ - name: Run tests ++ run: > ++ chmod +x "${HOME}/jtreg/bin/jtreg" && ++ mkdir test-results && ++ cd test && ++ PRODUCT_HOME="${HOME}/jdk-linux-x86${{ matrix.artifact }}/jdk-${{ env.JDK_VERSION }}-internal+0_linux-x86_bin${{ matrix.artifact }}/j2sdk-image" ++ JT_HOME="${HOME}/jtreg" ++ ALT_OUTPUTDIR="${GITHUB_WORKSPACE}/test-results" ++ JAVA_ARGS="-Djdk.test.docker.image.name=ubuntu-multilib -Djdk.test.docker.image.version=latest" ++ JTREG_TIMEOUT_FACTOR="4" ++ make ++ "${{ matrix.suites }}" ++ ++ - name: Check that all tests executed successfully ++ if: always() ++ run: > ++ if cat test-results/testoutput/*/exitcode.txt | grep -q -v '^0$' ++ || ! cat test-results/testoutput/*/Stats.txt | grep -q 'fail=0' ; then ++ cat test-results/testoutput/*/JTreport/text/newfailures.txt ; ++ exit 1 ; ++ fi ++ ++ - name: Create suitable test log artifact name ++ if: always() ++ run: echo "logsuffix=`echo ${{ matrix.test }} | sed -e 's!/!_!'g -e 's! !_!'g`" >> $GITHUB_ENV ++ ++ - name: Package test results ++ if: always() ++ working-directory: test-results ++ run: > ++ zip -r9 ++ "$HOME/linux-x86${{ matrix.artifact }}_testresults_${{ env.logsuffix }}.zip" ++ . ++ -x "*ARCHIVE_BUNDLE.zip" ++ continue-on-error: true ++ ++ - name: Persist test results ++ if: always() ++ uses: actions/upload-artifact@v3 ++ with: ++ path: ~/linux-x86${{ matrix.artifact }}_testresults_${{ env.logsuffix }}.zip ++ continue-on-error: true ++ ++ windows_x64_build: ++ name: Windows x64 ++ runs-on: "windows-2019" ++ needs: prerequisites ++ if: needs.prerequisites.outputs.should_run != 'false' && needs.prerequisites.outputs.platform_windows_x64 != 'false' ++ ++ strategy: ++ fail-fast: false ++ matrix: ++ flavor: ++ - build release ++ - build debug ++ include: ++ - flavor: build debug ++ flags: --enable-debug ++ artifact: -debug ++ ++ env: ++ JDK_VERSION: "${{ fromJson(needs.prerequisites.outputs.dependencies).JDK_MAJOR_VERSION }}.${{ fromJson(needs.prerequisites.outputs.dependencies).JDK_MINOR_VERSION }}.${{ fromJson(needs.prerequisites.outputs.dependencies).JDK_MICRO_VERSION }}" ++ BOOT_JDK_VERSION: "${{ fromJson(needs.prerequisites.outputs.dependencies).BOOT_JDK_VERSION }}" ++ BOOT_JDK_FILENAME: "${{ fromJson(needs.prerequisites.outputs.dependencies).WINDOWS_X64_BOOT_JDK_FILENAME }}" ++ BOOT_JDK_URL: "${{ fromJson(needs.prerequisites.outputs.dependencies).WINDOWS_X64_BOOT_JDK_URL }}" ++ BOOT_JDK_SHA256: "${{ fromJson(needs.prerequisites.outputs.dependencies).WINDOWS_X64_BOOT_JDK_SHA256 }}" ++ VS2017_FILENAME: "${{ fromJson(needs.prerequisites.outputs.dependencies).VS2017_FILENAME }}" ++ VS2017_URL: "${{ fromJson(needs.prerequisites.outputs.dependencies).VS2017_URL }}" ++ VS2017_SHA256: "${{ fromJson(needs.prerequisites.outputs.dependencies).VS2017_SHA256 }}" ++ ++ steps: ++ - name: Restore cygwin installer from cache ++ id: cygwin-installer ++ uses: actions/cache@v3 ++ with: ++ path: ~/cygwin/setup-x86_64.exe ++ key: cygwin-installer ++ ++ - name: Download cygwin installer ++ run: | ++ New-Item -Force -ItemType directory -Path "$HOME\cygwin" ++ & curl -L "https://www.cygwin.com/setup-x86_64.exe" -o "$HOME/cygwin/setup-x86_64.exe" ++ if: steps.cygwin-installer.outputs.cache-hit != 'true' ++ ++ - name: Restore cygwin packages from cache ++ id: cygwin ++ uses: actions/cache@v3 ++ with: ++ path: ~/cygwin/packages ++ key: cygwin-packages-${{ runner.os }}-v1 ++ ++ - name: Install cygwin ++ run: | ++ Start-Process -FilePath "$HOME\cygwin\setup-x86_64.exe" -ArgumentList "--quiet-mode --packages autoconf,make,zip,unzip --root $HOME\cygwin\cygwin64 --local-package-dir $HOME\cygwin\packages --site http://mirrors.kernel.org/sourceware/cygwin --no-desktop --no-shortcuts --no-startmenu --no-admin" -Wait -NoNewWindow ++ ++ - name: Checkout the source ++ uses: actions/checkout@v3 ++ with: ++ path: jdk ++ ++ - name: Checkout the FreeType source ++ uses: actions/checkout@v3 ++ with: ++ repository: "freetype/freetype" ++ ref: VER-2-8-1 ++ path: freetype ++ ++ - name: Restore boot JDK from cache ++ id: bootjdk ++ uses: actions/cache@v3 ++ with: ++ path: ~/bootjdk/${{ env.BOOT_JDK_VERSION }} ++ key: bootjdk-${{ runner.os }}-${{ env.BOOT_JDK_VERSION }}-${{ env.BOOT_JDK_SHA256 }}-v1 ++ ++ - name: Download boot JDK ++ run: | ++ mkdir -p "$HOME\bootjdk\$env:BOOT_JDK_VERSION" ++ & curl -L "$env:BOOT_JDK_URL" -o "$HOME/bootjdk/$env:BOOT_JDK_FILENAME" ++ $FileHash = Get-FileHash -Algorithm SHA256 "$HOME/bootjdk/$env:BOOT_JDK_FILENAME" ++ $FileHash.Hash -eq $env:BOOT_JDK_SHA256 ++ & tar -xf "$HOME/bootjdk/$env:BOOT_JDK_FILENAME" -C "$HOME/bootjdk/$env:BOOT_JDK_VERSION" ++ Get-ChildItem "$HOME\bootjdk\$env:BOOT_JDK_VERSION\*\*" | Move-Item -Destination "$HOME\bootjdk\$env:BOOT_JDK_VERSION" ++ if: steps.bootjdk.outputs.cache-hit != 'true' ++ ++ - name: Restore Visual Studio 2017 from cache ++ id: vs2017 ++ uses: actions/cache@v3 ++ with: ++ path: ~/${{ env.VS2017_FILENAME }} ++ key: vs2017 ++ ++ - name: Restore jtreg artifact ++ id: jtreg_restore ++ uses: actions/download-artifact@v3 ++ with: ++ name: transient_jtreg_${{ needs.prerequisites.outputs.bundle_id }} ++ path: ~/jtreg/ ++ continue-on-error: true ++ ++ - name: Restore jtreg artifact (retry) ++ uses: actions/download-artifact@v3 ++ with: ++ name: transient_jtreg_${{ needs.prerequisites.outputs.bundle_id }} ++ path: ~/jtreg/ ++ if: steps.jtreg_restore.outcome == 'failure' ++ ++ - name: Uninstall WinSDKs ++ run: > ++ Start-Process -FilePath 'C:\Program Files (x86)\Microsoft Visual Studio\Installer\vs_installer.exe' -Wait -NoNewWindow -ArgumentList ++ 'modify --installPath "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise" ++ --remove Microsoft.VisualStudio.Component.Windows10SDK.18362 ++ --remove Microsoft.VisualStudio.Component.Windows10SDK.19041 ++ --remove Microsoft.VisualStudio.Component.Windows10SDK.20348 ++ --remove Microsoft.VisualStudio.Component.Windows10SDK.22000 ++ --quiet' ++ ++ - name: Download Visual Studio 2017 ++ run: | ++ curl -L "$env:VS2017_URL" -o "$HOME/$env:VS2017_FILENAME" ++ if: steps.vs2017.outputs.cache-hit != 'true' ++ ++ - name: Install Visual Studio 2017 ++ run: > ++ Start-Process -FilePath "$HOME\$env:VS2017_FILENAME" -Wait -NoNewWindow -ArgumentList ++ 'install --productId Microsoft.VisualStudio.Product.Community --channelId VisualStudio.15.Release ++ --add Microsoft.VisualStudio.Workload.NativeDesktop ++ --add Microsoft.VisualStudio.Component.VC.Tools.x86.x64 ++ --add Microsoft.VisualStudio.Component.Windows10SDK.17763 ++ --quiet --wait' ++ ++ - name: Fix OpenJDK permissions ++ run: bash -c "chmod -R 0777 jdk" ++ ++ - name: Copy FreeType project file ++ run: > ++ Remove-Item "$env:GITHUB_WORKSPACE\freetype\builds\windows\vc2010\freetype.vcxproj" ; ++ Copy-Item -Path "$env:GITHUB_WORKSPACE\jdk\.github\workflows\freetype.vcxproj" ++ -Destination "$env:GITHUB_WORKSPACE\freetype\builds\windows\vc2010\freetype.vcxproj" ; ++ ++ - name: Configure ++ run: > ++ $env:Path = "$HOME\cygwin\cygwin64\bin;$HOME\cygwin\cygwin64\bin;$env:Path" ; ++ $env:Path = $env:Path -split ";" -match "C:\\Windows|PowerShell|cygwin" -join ";" ; ++ $env:BOOT_JDK = cygpath "$HOME/bootjdk/$env:BOOT_JDK_VERSION" ; ++ $env:FREETYPE_HOME = "$env:GITHUB_WORKSPACE\freetype" ; ++ $env:JT_HOME = cygpath "$HOME/jtreg" ; ++ & bash configure ++ --with-toolchain-version=2017 ++ --with-conf-name=windows-x64 ++ ${{ matrix.flags }} ++ --with-user-release-suffix="$env:GITHUB_ACTOR-$env:GITHUB_SHA" ++ --with-build-number=b00 ++ --with-boot-jdk="$env:BOOT_JDK" ++ --with-freetype-src="$env:FREETYPE_HOME" ++ --with-jtreg="$env:JT_HOME" ; ++ bash -c "cat $(find -name config.log)" ++ working-directory: jdk ++ ++ - name: Build ++ run: | ++ $env:Path = "$HOME\cygwin\cygwin64\bin;$HOME\cygwin\cygwin64\bin;$env:Path" ; ++ $env:Path = $env:Path -split ";" -match "C:\\Windows|PowerShell|cygwin" -join ";" ; ++ & make CONF_NAME=windows-x64 FORCE_MSC_VER=1912 FORCE_LD_VER=1412 images ++ working-directory: jdk ++ ++ - name: Pack artifacts ++ run: > ++ dir ; ++ $env:Path = "$HOME\cygwin\cygwin64\bin;$env:Path" ; ++ zip -r9 ++ "${{ github.workspace }}/jdk-${{ env.JDK_VERSION }}-internal+0_windows-x64_bin${{ matrix.artifact }}.zip" ++ j2sdk-image ++ working-directory: jdk/build/windows-x64/images ++ ++ - name: Persist test bundles ++ uses: actions/upload-artifact@v3 ++ with: ++ name: transient_jdk-windows-x64${{ matrix.artifact }}_${{ needs.prerequisites.outputs.bundle_id }} ++ path: | ++ jdk-${{ env.JDK_VERSION }}-internal+0_windows-x64_bin${{ matrix.artifact }}.zip ++ ++ windows_x86_build: ++ name: Windows x86 ++ runs-on: "windows-2019" ++ needs: prerequisites ++ if: needs.prerequisites.outputs.should_run != 'false' && needs.prerequisites.outputs.platform_windows_x86 != 'false' ++ ++ strategy: ++ fail-fast: false ++ matrix: ++ flavor: ++ - build release ++ - build debug ++ include: ++ - flavor: build debug ++ flags: --enable-debug ++ artifact: -debug ++ ++ env: ++ JDK_VERSION: "${{ fromJson(needs.prerequisites.outputs.dependencies).JDK_MAJOR_VERSION }}.${{ fromJson(needs.prerequisites.outputs.dependencies).JDK_MINOR_VERSION }}.${{ fromJson(needs.prerequisites.outputs.dependencies).JDK_MICRO_VERSION }}" ++ BOOT_JDK_VERSION: "${{ fromJson(needs.prerequisites.outputs.dependencies).BOOT_JDK_VERSION }}" ++ BOOT_JDK_FILENAME: "${{ fromJson(needs.prerequisites.outputs.dependencies).WINDOWS_X86_BOOT_JDK_FILENAME }}" ++ BOOT_JDK_URL: "${{ fromJson(needs.prerequisites.outputs.dependencies).WINDOWS_X86_BOOT_JDK_URL }}" ++ BOOT_JDK_SHA256: "${{ fromJson(needs.prerequisites.outputs.dependencies).WINDOWS_X86_BOOT_JDK_SHA256 }}" ++ VS2010_DIR: "${{ fromJson(needs.prerequisites.outputs.dependencies).VS2010_DIR }}" ++ VS2010_FILENAME: "${{ fromJson(needs.prerequisites.outputs.dependencies).VS2010_FILENAME }}" ++ VS2010_URL: "${{ fromJson(needs.prerequisites.outputs.dependencies).VS2010_URL }}" ++ VS2010_SHA256: "${{ fromJson(needs.prerequisites.outputs.dependencies).VS2010_SHA256 }}" ++ ++ steps: ++ - name: Restore cygwin installer from cache ++ id: cygwin-installer ++ uses: actions/cache@v3 ++ with: ++ path: ~/cygwin/setup-x86_64.exe ++ key: cygwin-installer ++ ++ - name: Download cygwin installer ++ run: | ++ New-Item -Force -ItemType directory -Path "$HOME\cygwin" ++ & curl -L "https://www.cygwin.com/setup-x86_64.exe" -o "$HOME/cygwin/setup-x86_64.exe" ++ if: steps.cygwin-installer.outputs.cache-hit != 'true' ++ ++ - name: Restore cygwin packages from cache ++ id: cygwin ++ uses: actions/cache@v3 ++ with: ++ path: ~/cygwin/packages ++ key: cygwin-packages-${{ runner.os }}-v1 ++ ++ - name: Install cygwin ++ run: | ++ Start-Process -FilePath "$HOME\cygwin\setup-x86_64.exe" -ArgumentList "--quiet-mode --packages autoconf,make,zip,unzip --root $HOME\cygwin\cygwin64 --local-package-dir $HOME\cygwin\packages --site http://mirrors.kernel.org/sourceware/cygwin --no-desktop --no-shortcuts --no-startmenu --no-admin" -Wait -NoNewWindow ++ ++ - name: Checkout the source ++ uses: actions/checkout@v3 ++ with: ++ path: jdk ++ ++ - name: Checkout the FreeType source ++ uses: actions/checkout@v3 ++ with: ++ repository: "freetype/freetype" ++ ref: VER-2-8-1 ++ path: freetype ++ ++ - name: Restore boot JDK from cache ++ id: bootjdk ++ uses: actions/cache@v3 ++ with: ++ path: ~/bootjdk/${{ env.BOOT_JDK_VERSION }} ++ key: bootjdk-${{ runner.os }}-${{ env.BOOT_JDK_VERSION }}-${{ env.BOOT_JDK_SHA256 }}-v1 ++ ++ - name: Download boot JDK ++ run: | ++ mkdir -p "$HOME\bootjdk\$env:BOOT_JDK_VERSION" ++ & curl -L "$env:BOOT_JDK_URL" -o "$HOME/bootjdk/$env:BOOT_JDK_FILENAME" ++ $FileHash = Get-FileHash -Algorithm SHA256 "$HOME/bootjdk/$env:BOOT_JDK_FILENAME" ++ $FileHash.Hash -eq $env:BOOT_JDK_SHA256 ++ & tar -xf "$HOME/bootjdk/$env:BOOT_JDK_FILENAME" -C "$HOME/bootjdk/$env:BOOT_JDK_VERSION" ++ Get-ChildItem "$HOME\bootjdk\$env:BOOT_JDK_VERSION\*\*" | Move-Item -Destination "$HOME\bootjdk\$env:BOOT_JDK_VERSION" ++ if: steps.bootjdk.outputs.cache-hit != 'true' ++ ++ - name: Restore Visual Studio 2010 from cache ++ id: vs2010 ++ uses: actions/cache@v3 ++ with: ++ path: ~/${{ env.VS2010_DIR }} ++ key: vs2010 ++ ++ - name: Download and unpack Visual Studio 2010 ++ run: | ++ mkdir "$HOME\$env:VS2010_DIR" ++ & curl -L "$env:VS2010_URL" -o "$HOME/$env:VS2010_FILENAME" ++ $FileHash = Get-FileHash -Algorithm SHA256 "$HOME/$env:VS2010_FILENAME" ++ $FileHash.Hash -eq $env:VS2010_SHA256 ++ & 7z x -o"$HOME/$env:VS2010_DIR" "$HOME/$env:VS2010_FILENAME" ++ & dir "$HOME/$env:VS2010_DIR" ++ if: steps.vs2010.outputs.cache-hit != 'true' ++ ++ - name: Install VS2010Express ++ run: | ++ Start-Process -FilePath "$HOME\$env:VS2010_DIR\VCExpress\setup.exe" -ArgumentList "/q /norestart" -Wait -NoNewWindow ++ ++ - name: Restore jtreg artifact ++ id: jtreg_restore ++ uses: actions/download-artifact@v3 ++ with: ++ name: transient_jtreg_${{ needs.prerequisites.outputs.bundle_id }} ++ path: ~/jtreg/ ++ continue-on-error: true ++ ++ - name: Restore jtreg artifact (retry) ++ uses: actions/download-artifact@v3 ++ with: ++ name: transient_jtreg_${{ needs.prerequisites.outputs.bundle_id }} ++ path: ~/jtreg/ ++ if: steps.jtreg_restore.outcome == 'failure' ++ ++ - name: Fix OpenJDK permissions ++ run: bash -c "chmod -R 0777 jdk" ++ ++ - name: Copy FreeType project file ++ run: > ++ Remove-Item "$env:GITHUB_WORKSPACE\freetype\builds\windows\vc2010\freetype.vcxproj" ; ++ Copy-Item -Path "$env:GITHUB_WORKSPACE\jdk\.github\workflows\freetype.vcxproj" ++ -Destination "$env:GITHUB_WORKSPACE\freetype\builds\windows\vc2010\freetype.vcxproj" ; ++ ++ - name: Configure ++ run: > ++ $env:Path = "$HOME\cygwin\cygwin64\bin;$HOME\cygwin\cygwin64\bin;$env:Path" ; ++ $env:Path = $env:Path -split ";" -match "C:\\Windows|PowerShell|cygwin" -join ";" ; ++ $env:BOOT_JDK = cygpath "$HOME/bootjdk/$env:BOOT_JDK_VERSION" ; ++ $env:FREETYPE_HOME = "$env:GITHUB_WORKSPACE\freetype" ; ++ $env:JT_HOME = cygpath "$HOME/jtreg" ; ++ & bash configure ++ --with-conf-name=windows-x86 ++ --with-toolchain-version=2010 ++ --with-msvcr-dll='C:\Windows\SysWOW64\msvcr100.dll' ++ --with-target-bits=32 ++ ${{ matrix.flags }} ++ --with-user-release-suffix="$env:GITHUB_ACTOR-$env:GITHUB_SHA" ++ --with-build-number=b00 ++ --with-boot-jdk="$env:BOOT_JDK" ++ --with-freetype-src="$env:FREETYPE_HOME" ++ --with-jtreg="$env:JT_HOME" ; ++ bash -c "cat $(find -name config.log)" ++ working-directory: jdk ++ ++ - name: Build ++ run: | ++ $env:Path = "$HOME\cygwin\cygwin64\bin;$HOME\cygwin\cygwin64\bin;$env:Path" ; ++ $env:Path = $env:Path -split ";" -match "C:\\Windows|PowerShell|cygwin" -join ";" ; ++ & make CONF_NAME=windows-x86 images ++ working-directory: jdk ++ ++ - name: Pack artifacts ++ run: > ++ dir ; ++ $env:Path = "$HOME\cygwin\cygwin64\bin;$env:Path" ; ++ zip -r9 ++ "${{ github.workspace }}/jdk-${{ env.JDK_VERSION }}-internal+0_windows-x86_bin${{ matrix.artifact }}.zip" ++ j2sdk-image ++ working-directory: jdk/build/windows-x86/images ++ ++ - name: Persist test bundles ++ uses: actions/upload-artifact@v3 ++ with: ++ name: transient_jdk-windows-x86${{ matrix.artifact }}_${{ needs.prerequisites.outputs.bundle_id }} ++ path: | ++ jdk-${{ env.JDK_VERSION }}-internal+0_windows-x86_bin${{ matrix.artifact }}.zip ++ ++ windows_x64_test: ++ name: Windows x64 ++ runs-on: "windows-2019" ++ needs: ++ - prerequisites ++ - windows_x64_build ++ ++ strategy: ++ fail-fast: false ++ matrix: ++ test: ++ - jdk/tier1 ++ - langtools/tier1 ++ - hotspot/tier1 ++ include: ++ - test: jdk/tier1 ++ suites: jdk_tier1 ++ - test: langtools/tier1 ++ suites: langtools_tier1 ++ - test: hotspot/tier1 ++ suites: hotspot_tier1 ++ ++ env: ++ JDK_VERSION: "${{ fromJson(needs.prerequisites.outputs.dependencies).JDK_MAJOR_VERSION }}.${{ fromJson(needs.prerequisites.outputs.dependencies).JDK_MINOR_VERSION }}.${{ fromJson(needs.prerequisites.outputs.dependencies).JDK_MICRO_VERSION }}" ++ BOOT_JDK_VERSION: "${{ fromJson(needs.prerequisites.outputs.dependencies).BOOT_JDK_VERSION }}" ++ BOOT_JDK_FILENAME: "${{ fromJson(needs.prerequisites.outputs.dependencies).WINDOWS_X64_BOOT_JDK_FILENAME }}" ++ BOOT_JDK_URL: "${{ fromJson(needs.prerequisites.outputs.dependencies).WINDOWS_X64_BOOT_JDK_URL }}" ++ BOOT_JDK_SHA256: "${{ fromJson(needs.prerequisites.outputs.dependencies).WINDOWS_X64_BOOT_JDK_SHA256 }}" ++ ++ steps: ++ - name: Checkout the source ++ uses: actions/checkout@v3 ++ ++ - name: Restore boot JDK from cache ++ id: bootjdk ++ uses: actions/cache@v3 ++ with: ++ path: ~/bootjdk/${{ env.BOOT_JDK_VERSION }} ++ key: bootjdk-${{ runner.os }}-${{ env.BOOT_JDK_VERSION }}-${{ env.BOOT_JDK_SHA256 }}-v1 ++ ++ - name: Download boot JDK ++ run: | ++ mkdir -p "$HOME\bootjdk\$env:BOOT_JDK_VERSION" ++ & curl -L "$env:BOOT_JDK_URL" -o "$HOME/bootjdk/$env:BOOT_JDK_FILENAME" ++ $FileHash = Get-FileHash -Algorithm SHA256 "$HOME/bootjdk/$env:BOOT_JDK_FILENAME" ++ $FileHash.Hash -eq $env:BOOT_JDK_SHA256 ++ & tar -xf "$HOME/bootjdk/$env:BOOT_JDK_FILENAME" -C "$HOME/bootjdk/$env:BOOT_JDK_VERSION" ++ Get-ChildItem "$HOME\bootjdk\$env:BOOT_JDK_VERSION\*\*" | Move-Item -Destination "$HOME\bootjdk\$env:BOOT_JDK_VERSION" ++ if: steps.bootjdk.outputs.cache-hit != 'true' ++ ++ - name: Restore cygwin installer from cache ++ id: cygwin-installer ++ uses: actions/cache@v3 ++ with: ++ path: ~/cygwin/setup-x86_64.exe ++ key: cygwin-installer ++ ++ - name: Download cygwin installer ++ run: | ++ New-Item -Force -ItemType directory -Path "$HOME\cygwin" ++ & curl -L "https://www.cygwin.com/setup-x86_64.exe" -o "$HOME/cygwin/setup-x86_64.exe" ++ if: steps.cygwin-installer.outputs.cache-hit != 'true' ++ ++ - name: Restore cygwin packages from cache ++ id: cygwin ++ uses: actions/cache@v3 ++ with: ++ path: ~/cygwin/packages ++ key: cygwin-packages-${{ runner.os }}-v1 ++ ++ - name: Install cygwin ++ run: | ++ Start-Process -FilePath "$HOME\cygwin\setup-x86_64.exe" -ArgumentList "--quiet-mode --packages autoconf,make,zip,unzip --root $HOME\cygwin\cygwin64 --local-package-dir $HOME\cygwin\packages --site http://mirrors.kernel.org/sourceware/cygwin --no-desktop --no-shortcuts --no-startmenu --no-admin" -Wait -NoNewWindow ++ ++ - name: Restore jtreg artifact ++ id: jtreg_restore ++ uses: actions/download-artifact@v3 ++ with: ++ name: transient_jtreg_${{ needs.prerequisites.outputs.bundle_id }} ++ path: ~/jtreg/ ++ continue-on-error: true ++ ++ - name: Restore jtreg artifact (retry) ++ uses: actions/download-artifact@v3 ++ with: ++ name: transient_jtreg_${{ needs.prerequisites.outputs.bundle_id }} ++ path: ~/jtreg/ ++ if: steps.jtreg_restore.outcome == 'failure' ++ ++ - name: Restore build artifacts ++ id: build_restore ++ uses: actions/download-artifact@v3 ++ with: ++ name: transient_jdk-windows-x64${{ matrix.artifact }}_${{ needs.prerequisites.outputs.bundle_id }} ++ path: ~/jdk-windows-x64${{ matrix.artifact }} ++ continue-on-error: true ++ ++ - name: Restore build artifacts (retry) ++ uses: actions/download-artifact@v3 ++ with: ++ name: transient_jdk-windows-x64${{ matrix.artifact }}_${{ needs.prerequisites.outputs.bundle_id }} ++ path: ~/jdk-windows-x64${{ matrix.artifact }} ++ if: steps.build_restore.outcome == 'failure' ++ ++ - name: Unpack jdk ++ run: | ++ mkdir -p "${HOME}/jdk-windows-x64${{ matrix.artifact }}/jdk-${{ env.JDK_VERSION }}-internal+0_windows-x64_bin${{ matrix.artifact }}" ++ tar -xf "${HOME}/jdk-windows-x64${{ matrix.artifact }}/jdk-${{ env.JDK_VERSION }}-internal+0_windows-x64_bin${{ matrix.artifact }}.zip" -C "${HOME}/jdk-windows-x64${{ matrix.artifact }}/jdk-${{ env.JDK_VERSION }}-internal+0_windows-x64_bin${{ matrix.artifact }}" ++ ++ - name: Create results dir ++ run: | ++ mkdir test-results ++ ++ - name: Run tests ++ working-directory: test ++ run: > ++ $env:Path = "$HOME\cygwin\cygwin64\bin;$HOME\cygwin\cygwin64\bin;$env:Path" ; ++ $env:Path = $env:Path -split ";" -match "C:\\Windows|PowerShell|cygwin" -join ";" ; ++ $env:PRODUCT_HOME = cygpath "$HOME/jdk-windows-x64${{ matrix.artifact }}/jdk-${{ env.JDK_VERSION }}-internal+0_windows-x64_bin${{ matrix.artifact }}/j2sdk-image" ; ++ $env:JT_HOME = cygpath "$HOME/jtreg" ; ++ $env:ALT_OUTPUTDIR = cygpath "$env:GITHUB_WORKSPACE/test-results" ; ++ $env:JAVA_ARGS = "-XX:-CreateMinidumpOnCrash -Djdk.test.container.command=skipcontianer" ; ++ $env:JTREG_TIMEOUT_FACTOR = "4" ; ++ & make ++ "${{ matrix.suites }}" ++ ++ - name: Check that all tests executed successfully ++ if: always() ++ run: > ++ if ((Get-ChildItem -Path test-results\testoutput\*\exitcode.txt -Recurse | Select-String -Pattern '^0$' -NotMatch ).Count -gt 0) { ++ Get-Content -Path test-results\testoutput\*\JTreport\text\newfailures.txt ; ++ exit 1 ++ } ++ ++ - name: Create suitable test log artifact name ++ if: always() ++ run: echo ("logsuffix=" + ("${{ matrix.test }}" -replace "/", "_" -replace " ", "_")) | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 ++ ++ - name: Package test results ++ if: always() ++ working-directory: test-results ++ run: > ++ $env:Path = "$HOME\cygwin\cygwin64\bin;$env:Path" ; ++ zip -r9 ++ "$HOME/windows-x64${{ matrix.artifact }}_testresults_${{ env.logsuffix }}.zip" ++ . ++ -x "*ARCHIVE_BUNDLE.zip" ++ continue-on-error: true ++ ++ - name: Persist test results ++ if: always() ++ uses: actions/upload-artifact@v3 ++ with: ++ path: ~/windows-x64${{ matrix.artifact }}_testresults_${{ env.logsuffix }}.zip ++ continue-on-error: true ++ ++ windows_x86_test: ++ name: Windows x86 ++ runs-on: "windows-2019" ++ needs: ++ - prerequisites ++ - windows_x86_build ++ ++ strategy: ++ fail-fast: false ++ matrix: ++ test: ++ - jdk/tier1 ++ - langtools/tier1 ++ - hotspot/tier1 ++ include: ++ - test: jdk/tier1 ++ suites: jdk_tier1 ++ - test: langtools/tier1 ++ suites: langtools_tier1 ++ - test: hotspot/tier1 ++ suites: hotspot_tier1 ++ ++ env: ++ JDK_VERSION: "${{ fromJson(needs.prerequisites.outputs.dependencies).JDK_MAJOR_VERSION }}.${{ fromJson(needs.prerequisites.outputs.dependencies).JDK_MINOR_VERSION }}.${{ fromJson(needs.prerequisites.outputs.dependencies).JDK_MICRO_VERSION }}" ++ BOOT_JDK_VERSION: "${{ fromJson(needs.prerequisites.outputs.dependencies).BOOT_JDK_VERSION }}" ++ BOOT_JDK_FILENAME: "${{ fromJson(needs.prerequisites.outputs.dependencies).WINDOWS_X86_BOOT_JDK_FILENAME }}" ++ BOOT_JDK_URL: "${{ fromJson(needs.prerequisites.outputs.dependencies).WINDOWS_X86_BOOT_JDK_URL }}" ++ BOOT_JDK_SHA256: "${{ fromJson(needs.prerequisites.outputs.dependencies).WINDOWS_X86_BOOT_JDK_SHA256 }}" ++ ++ steps: ++ - name: Checkout the source ++ uses: actions/checkout@v3 ++ ++ - name: Restore boot JDK from cache ++ id: bootjdk ++ uses: actions/cache@v3 ++ with: ++ path: ~/bootjdk/${{ env.BOOT_JDK_VERSION }} ++ key: bootjdk-${{ runner.os }}-${{ env.BOOT_JDK_VERSION }}-${{ env.BOOT_JDK_SHA256 }}-v1 ++ ++ - name: Download boot JDK ++ run: | ++ mkdir -p "$HOME\bootjdk\$env:BOOT_JDK_VERSION" ++ & curl -L "$env:BOOT_JDK_URL" -o "$HOME/bootjdk/$env:BOOT_JDK_FILENAME" ++ $FileHash = Get-FileHash -Algorithm SHA256 "$HOME/bootjdk/$env:BOOT_JDK_FILENAME" ++ $FileHash.Hash -eq $env:BOOT_JDK_SHA256 ++ & tar -xf "$HOME/bootjdk/$env:BOOT_JDK_FILENAME" -C "$HOME/bootjdk/$env:BOOT_JDK_VERSION" ++ Get-ChildItem "$HOME\bootjdk\$env:BOOT_JDK_VERSION\*\*" | Move-Item -Destination "$HOME\bootjdk\$env:BOOT_JDK_VERSION" ++ if: steps.bootjdk.outputs.cache-hit != 'true' ++ ++ - name: Restore cygwin installer from cache ++ id: cygwin-installer ++ uses: actions/cache@v3 ++ with: ++ path: ~/cygwin/setup-x86_64.exe ++ key: cygwin-installer ++ ++ - name: Download cygwin installer ++ run: | ++ New-Item -Force -ItemType directory -Path "$HOME\cygwin" ++ & curl -L "https://www.cygwin.com/setup-x86_64.exe" -o "$HOME/cygwin/setup-x86_64.exe" ++ if: steps.cygwin-installer.outputs.cache-hit != 'true' ++ ++ - name: Restore cygwin packages from cache ++ id: cygwin ++ uses: actions/cache@v3 ++ with: ++ path: ~/cygwin/packages ++ key: cygwin-packages-${{ runner.os }}-v1 ++ ++ - name: Install cygwin ++ run: | ++ Start-Process -FilePath "$HOME\cygwin\setup-x86_64.exe" -ArgumentList "--quiet-mode --packages autoconf,make,zip,unzip --root $HOME\cygwin\cygwin64 --local-package-dir $HOME\cygwin\packages --site http://mirrors.kernel.org/sourceware/cygwin --no-desktop --no-shortcuts --no-startmenu --no-admin" -Wait -NoNewWindow ++ ++ - name: Restore jtreg artifact ++ id: jtreg_restore ++ uses: actions/download-artifact@v3 ++ with: ++ name: transient_jtreg_${{ needs.prerequisites.outputs.bundle_id }} ++ path: ~/jtreg/ ++ continue-on-error: true ++ ++ - name: Restore jtreg artifact (retry) ++ uses: actions/download-artifact@v3 ++ with: ++ name: transient_jtreg_${{ needs.prerequisites.outputs.bundle_id }} ++ path: ~/jtreg/ ++ if: steps.jtreg_restore.outcome == 'failure' ++ ++ - name: Restore build artifacts ++ id: build_restore ++ uses: actions/download-artifact@v3 ++ with: ++ name: transient_jdk-windows-x86${{ matrix.artifact }}_${{ needs.prerequisites.outputs.bundle_id }} ++ path: ~/jdk-windows-x86${{ matrix.artifact }} ++ continue-on-error: true ++ ++ - name: Restore build artifacts (retry) ++ uses: actions/download-artifact@v3 ++ with: ++ name: transient_jdk-windows-x86${{ matrix.artifact }}_${{ needs.prerequisites.outputs.bundle_id }} ++ path: ~/jdk-windows-x86${{ matrix.artifact }} ++ if: steps.build_restore.outcome == 'failure' ++ ++ - name: Unpack jdk ++ run: | ++ mkdir -p "${HOME}/jdk-windows-x86${{ matrix.artifact }}/jdk-${{ env.JDK_VERSION }}-internal+0_windows-x86_bin${{ matrix.artifact }}" ++ tar -xf "${HOME}/jdk-windows-x86${{ matrix.artifact }}/jdk-${{ env.JDK_VERSION }}-internal+0_windows-x86_bin${{ matrix.artifact }}.zip" -C "${HOME}/jdk-windows-x86${{ matrix.artifact }}/jdk-${{ env.JDK_VERSION }}-internal+0_windows-x86_bin${{ matrix.artifact }}" ++ ++ - name: Create results dir ++ run: | ++ mkdir test-results ++ ++ - name: Run tests ++ working-directory: test ++ run: > ++ $env:Path = "$HOME\cygwin\cygwin64\bin;$HOME\cygwin\cygwin64\bin;$env:Path" ; ++ $env:Path = $env:Path -split ";" -match "C:\\Windows|PowerShell|cygwin" -join ";" ; ++ $env:PRODUCT_HOME = cygpath "$HOME/jdk-windows-x86${{ matrix.artifact }}/jdk-${{ env.JDK_VERSION }}-internal+0_windows-x86_bin${{ matrix.artifact }}/j2sdk-image" ; ++ $env:JT_HOME = cygpath "$HOME/jtreg" ; ++ $env:ALT_OUTPUTDIR = cygpath "$env:GITHUB_WORKSPACE/test-results" ; ++ $env:JAVA_ARGS = "-XX:-CreateMinidumpOnCrash -Djdk.test.container.command=skipcontianer" ; ++ $env:JTREG_TIMEOUT_FACTOR = "4" ; ++ & make ++ "${{ matrix.suites }}" ++ ++ - name: Check that all tests executed successfully ++ if: always() ++ run: > ++ if ((Get-ChildItem -Path test-results\testoutput\*\exitcode.txt -Recurse | Select-String -Pattern '^0$' -NotMatch ).Count -gt 0) { ++ Get-Content -Path test-results\testoutput\*\JTreport\text\newfailures.txt ; ++ exit 1 ++ } ++ ++ - name: Create suitable test log artifact name ++ if: always() ++ run: echo ("logsuffix=" + ("${{ matrix.test }}" -replace "/", "_" -replace " ", "_")) | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 ++ ++ - name: Package test results ++ if: always() ++ working-directory: test-results ++ run: > ++ $env:Path = "$HOME\cygwin\cygwin64\bin;$env:Path" ; ++ zip -r9 ++ "$HOME/windows-x86${{ matrix.artifact }}_testresults_${{ env.logsuffix }}.zip" ++ . ++ -x "*ARCHIVE_BUNDLE.zip" ++ continue-on-error: true ++ ++ - name: Persist test results ++ if: always() ++ uses: actions/upload-artifact@v3 ++ with: ++ path: ~/windows-x86${{ matrix.artifact }}_testresults_${{ env.logsuffix }}.zip ++ continue-on-error: true ++ ++ macos_x64_build: ++ name: macOS x64 ++ runs-on: "macos-11" ++ needs: prerequisites ++ if: needs.prerequisites.outputs.should_run != 'false' && needs.prerequisites.outputs.platform_macos_x64 != 'false' ++ ++ strategy: ++ fail-fast: false ++ matrix: ++ flavor: ++ - build release ++ - build debug ++ include: ++ - flavor: build release ++ - flavor: build debug ++ flags: --enable-debug ++ artifact: -debug ++ ++ env: ++ JDK_VERSION: "${{ fromJson(needs.prerequisites.outputs.dependencies).JDK_MAJOR_VERSION }}.${{ fromJson(needs.prerequisites.outputs.dependencies).JDK_MINOR_VERSION }}.${{ fromJson(needs.prerequisites.outputs.dependencies).JDK_MICRO_VERSION }}" ++ BOOT_JDK_VERSION: "${{ fromJson(needs.prerequisites.outputs.dependencies).BOOT_JDK_VERSION }}" ++ BOOT_JDK_FILENAME: "${{ fromJson(needs.prerequisites.outputs.dependencies).MACOS_X64_BOOT_JDK_FILENAME }}" ++ BOOT_JDK_URL: "${{ fromJson(needs.prerequisites.outputs.dependencies).MACOS_X64_BOOT_JDK_URL }}" ++ BOOT_JDK_SHA256: "${{ fromJson(needs.prerequisites.outputs.dependencies).MACOS_X64_BOOT_JDK_SHA256 }}" ++ ++ steps: ++ - name: Checkout the source ++ uses: actions/checkout@v3 ++ with: ++ path: jdk ++ ++ - name: Restore boot JDK from cache ++ id: bootjdk ++ uses: actions/cache@v3 ++ with: ++ path: ~/bootjdk/${{ env.BOOT_JDK_VERSION }} ++ key: bootjdk-${{ runner.os }}-${{ env.BOOT_JDK_VERSION }}-${{ env.BOOT_JDK_SHA256 }}-v1 ++ ++ - name: Download boot JDK ++ run: | ++ mkdir -p ${HOME}/bootjdk/${BOOT_JDK_VERSION} || true ++ wget -O "${HOME}/bootjdk/${BOOT_JDK_FILENAME}" "${BOOT_JDK_URL}" ++ echo "${BOOT_JDK_SHA256} ${HOME}/bootjdk/${BOOT_JDK_FILENAME}" | shasum -a 256 -c >/dev/null - ++ tar -xf "${HOME}/bootjdk/${BOOT_JDK_FILENAME}" -C "${HOME}/bootjdk/${BOOT_JDK_VERSION}" ++ mv "${HOME}/bootjdk/${BOOT_JDK_VERSION}/"*/* "${HOME}/bootjdk/${BOOT_JDK_VERSION}/" ++ if: steps.bootjdk.outputs.cache-hit != 'true' ++ ++ - name: Restore jtreg artifact ++ id: jtreg_restore ++ uses: actions/download-artifact@v3 ++ with: ++ name: transient_jtreg_${{ needs.prerequisites.outputs.bundle_id }} ++ path: ~/jtreg/ ++ continue-on-error: true ++ ++ - name: Restore jtreg artifact (retry) ++ uses: actions/download-artifact@v3 ++ with: ++ name: transient_jtreg_${{ needs.prerequisites.outputs.bundle_id }} ++ path: ~/jtreg/ ++ if: steps.jtreg_restore.outcome == 'failure' ++ ++ - name: Fix jtreg permissions ++ run: chmod -R a+rx ${HOME}/jtreg/ ++ ++ - name: Install dependencies ++ run: brew install make ++ ++ - name: Select Xcode version ++ run: sudo xcode-select --switch /Applications/Xcode_11.7.app/Contents/Developer ++ ++ - name: Configure ++ run: > ++ bash configure ++ --with-conf-name=macos-x64 ++ ${{ matrix.flags }} ++ --with-user-release-suffix=${GITHUB_ACTOR}-${GITHUB_SHA} ++ --with-build-number=b00 ++ --with-boot-jdk=${HOME}/bootjdk/${BOOT_JDK_VERSION}/Contents/Home ++ --with-jtreg=${HOME}/jtreg ++ --with-zlib=system ++ working-directory: jdk ++ ++ - name: Build ++ run: make CONF_NAME=macos-x64 images ++ working-directory: jdk ++ ++ - name: Pack artifacts ++ run: | ++ tar -cf jdk-${{ env.JDK_VERSION }}-internal+0_osx-x64_bin${{ matrix.artifact }}.tar.gz -C jdk/build/macos-x64/images j2sdk-image ++ ++ - name: Persist test bundles ++ uses: actions/upload-artifact@v3 ++ with: ++ name: transient_jdk-macos-x64${{ matrix.artifact }}_${{ needs.prerequisites.outputs.bundle_id }} ++ path: | ++ jdk-${{ env.JDK_VERSION }}-internal+0_osx-x64_bin${{ matrix.artifact }}.tar.gz ++ ++ macos_x64_test: ++ name: macOS x64 ++ runs-on: "macos-11" ++ needs: ++ - prerequisites ++ - macos_x64_build ++ ++ strategy: ++ fail-fast: false ++ matrix: ++ test: ++ - jdk/tier1 ++ - langtools/tier1 ++ - hotspot/tier1 ++ include: ++ - test: jdk/tier1 ++ suites: jdk_tier1 ++ - test: langtools/tier1 ++ suites: langtools_tier1 ++ - test: hotspot/tier1 ++ suites: hotspot_tier1 ++ ++ env: ++ JDK_VERSION: "${{ fromJson(needs.prerequisites.outputs.dependencies).JDK_MAJOR_VERSION }}.${{ fromJson(needs.prerequisites.outputs.dependencies).JDK_MINOR_VERSION }}.${{ fromJson(needs.prerequisites.outputs.dependencies).JDK_MICRO_VERSION }}" ++ BOOT_JDK_VERSION: "${{ fromJson(needs.prerequisites.outputs.dependencies).BOOT_JDK_VERSION }}" ++ BOOT_JDK_FILENAME: "${{ fromJson(needs.prerequisites.outputs.dependencies).MACOS_X64_BOOT_JDK_FILENAME }}" ++ BOOT_JDK_URL: "${{ fromJson(needs.prerequisites.outputs.dependencies).MACOS_X64_BOOT_JDK_URL }}" ++ BOOT_JDK_SHA256: "${{ fromJson(needs.prerequisites.outputs.dependencies).MACOS_X64_BOOT_JDK_SHA256 }}" ++ ++ steps: ++ - name: Checkout the source ++ uses: actions/checkout@v3 ++ ++ - name: Restore boot JDK from cache ++ id: bootjdk ++ uses: actions/cache@v3 ++ with: ++ path: ~/bootjdk/${{ env.BOOT_JDK_VERSION }} ++ key: bootjdk-${{ runner.os }}-${{ env.BOOT_JDK_VERSION }}-${{ env.BOOT_JDK_SHA256 }}-v1 ++ ++ - name: Download boot JDK ++ run: | ++ mkdir -p ${HOME}/bootjdk/${BOOT_JDK_VERSION} || true ++ wget -O "${HOME}/bootjdk/${BOOT_JDK_FILENAME}" "${BOOT_JDK_URL}" ++ echo "${BOOT_JDK_SHA256} ${HOME}/bootjdk/${BOOT_JDK_FILENAME}" | shasum -a 256 -c >/dev/null - ++ tar -xf "${HOME}/bootjdk/${BOOT_JDK_FILENAME}" -C "${HOME}/bootjdk/${BOOT_JDK_VERSION}" ++ mv "${HOME}/bootjdk/${BOOT_JDK_VERSION}/"*/* "${HOME}/bootjdk/${BOOT_JDK_VERSION}/" ++ if: steps.bootjdk.outputs.cache-hit != 'true' ++ ++ - name: Restore jtreg artifact ++ id: jtreg_restore ++ uses: actions/download-artifact@v3 ++ with: ++ name: transient_jtreg_${{ needs.prerequisites.outputs.bundle_id }} ++ path: ~/jtreg/ ++ continue-on-error: true ++ ++ - name: Restore jtreg artifact (retry) ++ uses: actions/download-artifact@v3 ++ with: ++ name: transient_jtreg_${{ needs.prerequisites.outputs.bundle_id }} ++ path: ~/jtreg/ ++ if: steps.jtreg_restore.outcome == 'failure' ++ ++ - name: Restore build artifacts ++ id: build_restore ++ uses: actions/download-artifact@v3 ++ with: ++ name: transient_jdk-macos-x64${{ matrix.artifact }}_${{ needs.prerequisites.outputs.bundle_id }} ++ path: ~/jdk-macos-x64${{ matrix.artifact }} ++ continue-on-error: true ++ ++ - name: Restore build artifacts (retry) ++ uses: actions/download-artifact@v3 ++ with: ++ name: transient_jdk-macos-x64${{ matrix.artifact }}_${{ needs.prerequisites.outputs.bundle_id }} ++ path: ~/jdk-macos-x64${{ matrix.artifact }} ++ if: steps.build_restore.outcome == 'failure' ++ ++ - name: Unpack jdk ++ run: | ++ mkdir -p "${HOME}/jdk-macos-x64${{ matrix.artifact }}/jdk-${{ env.JDK_VERSION }}-internal+0_osx-x64_bin${{ matrix.artifact }}" ++ tar -xf "${HOME}/jdk-macos-x64${{ matrix.artifact }}/jdk-${{ env.JDK_VERSION }}-internal+0_osx-x64_bin${{ matrix.artifact }}.tar.gz" -C "${HOME}/jdk-macos-x64${{ matrix.artifact }}/jdk-${{ env.JDK_VERSION }}-internal+0_osx-x64_bin${{ matrix.artifact }}" ++ ++ - name: Install dependencies ++ run: brew install make ++ ++ - name: Select Xcode version ++ run: sudo xcode-select --switch /Applications/Xcode_11.7.app/Contents/Developer ++ ++ - name: Run tests ++ run: > ++ chmod +x "${HOME}/jtreg/bin/jtreg" && ++ mkdir test-results && ++ cd test && ++ PRODUCT_HOME="${HOME}/jdk-macos-x64${{ matrix.artifact }}/jdk-${{ env.JDK_VERSION }}-internal+0_osx-x64_bin${{ matrix.artifact }}/j2sdk-image" ++ JT_HOME="${HOME}/jtreg" ++ ALT_OUTPUTDIR="${GITHUB_WORKSPACE}/test-results" ++ JTREG_TIMEOUT_FACTOR="4" ++ gmake ++ "${{ matrix.suites }}" ++ ++ - name: Check that all tests executed successfully ++ if: always() ++ run: > ++ if cat test-results/testoutput/*/exitcode.txt | grep -q -v '^0$' ++ || ! cat test-results/testoutput/*/Stats.txt | grep -q 'fail=0' ; then ++ cat test-results/testoutput/*/JTreport/text/newfailures.txt ; ++ exit 1 ; ++ fi ++ ++ - name: Create suitable test log artifact name ++ if: always() ++ run: echo "logsuffix=`echo ${{ matrix.test }} | sed -e 's!/!_!'g -e 's! !_!'g`" >> $GITHUB_ENV ++ ++ - name: Package test results ++ if: always() ++ working-directory: test-results ++ run: > ++ zip -r9 ++ "$HOME/macos-x64${{ matrix.artifact }}_testresults_${{ env.logsuffix }}.zip" ++ . ++ -x "*ARCHIVE_BUNDLE.zip" ++ continue-on-error: true ++ ++ - name: Persist test results ++ if: always() ++ uses: actions/upload-artifact@v3 ++ with: ++ path: ~/macos-x64${{ matrix.artifact }}_testresults_${{ env.logsuffix }}.zip ++ continue-on-error: true ++ ++ artifacts: ++ name: Post-process artifacts ++ runs-on: "ubuntu-20.04" ++ if: always() ++ continue-on-error: true ++ needs: ++ - prerequisites ++ - linux_additional_build ++ - linux_x64_test ++ - linux_x86_test ++ - windows_x64_test ++ - windows_x86_test ++ - macos_x64_test ++ ++ steps: ++ - name: Determine current artifacts endpoint ++ id: actions_runtime ++ uses: actions/github-script@v6 ++ with: ++ script: "return { url: process.env['ACTIONS_RUNTIME_URL'], token: process.env['ACTIONS_RUNTIME_TOKEN'] }" ++ ++ - name: Display current artifacts ++ run: > ++ curl -s -H 'Accept: application/json;api-version=6.0-preview' ++ -H 'Authorization: Bearer ${{ fromJson(steps.actions_runtime.outputs.result).token }}' ++ '${{ fromJson(steps.actions_runtime.outputs.result).url }}_apis/pipelines/workflows/${{ github.run_id }}/artifacts?api-version=6.0-preview' ++ ++ - name: Delete transient artifacts ++ run: > ++ for url in ` ++ curl -s -H 'Accept: application/json;api-version=6.0-preview' ++ -H 'Authorization: Bearer ${{ fromJson(steps.actions_runtime.outputs.result).token }}' ++ '${{ fromJson(steps.actions_runtime.outputs.result).url }}_apis/pipelines/workflows/${{ github.run_id }}/artifacts?api-version=6.0-preview' | ++ jq -r -c '.value | map(select(.name|startswith("transient_"))) | .[].url'`; do ++ curl -s -H 'Accept: application/json;api-version=6.0-preview' ++ -H 'Authorization: Bearer ${{ fromJson(steps.actions_runtime.outputs.result).token }}' ++ -X DELETE "${url}"; ++ done ++ ++ - name: Fetch remaining artifacts (test results) ++ uses: actions/download-artifact@v3 ++ with: ++ path: test-results ++ ++ - name: Delete remaining artifacts ++ run: > ++ for url in ` ++ curl -s -H 'Accept: application/json;api-version=6.0-preview' ++ -H 'Authorization: Bearer ${{ fromJson(steps.actions_runtime.outputs.result).token }}' ++ '${{ fromJson(steps.actions_runtime.outputs.result).url }}_apis/pipelines/workflows/${{ github.run_id }}/artifacts?api-version=6.0-preview' | ++ jq -r -c '.value | .[].url'`; do ++ curl -s -H 'Accept: application/json;api-version=6.0-preview' ++ -H 'Authorization: Bearer ${{ fromJson(steps.actions_runtime.outputs.result).token }}' ++ -X DELETE "${url}"; ++ done ++ ++ - name: Upload a combined test results artifact ++ uses: actions/upload-artifact@v3 ++ with: ++ name: test-results_${{ needs.prerequisites.outputs.bundle_id }} ++ path: test-results +diff -uNr openjdk/.gitignore afu8u/.gitignore +--- openjdk/.gitignore 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/.gitignore 2025-05-06 11:13:08.067672948 +0800 +@@ -0,0 +1,15 @@ ++/build/ ++/dist/ ++/.idea/ ++nbproject/private/ ++/webrev ++/.src-rev ++/.jib/ ++.DS_Store ++.metadata/ ++.recommenders/ ++test/nashorn/script/external ++test/nashorn/lib ++NashornProfile.txt ++**/JTreport/** ++**/JTwork/** +diff -uNr openjdk/hotspot/agent/make/Makefile afu8u/hotspot/agent/make/Makefile +--- openjdk/hotspot/agent/make/Makefile 2023-04-19 05:53:02.000000000 +0800 ++++ afu8u/hotspot/agent/make/Makefile 2025-05-06 10:53:44.795633662 +0800 +@@ -60,6 +60,7 @@ + sun.jvm.hotspot.debugger.linux.amd64 \ + sun.jvm.hotspot.debugger.linux.aarch64 \ + sun.jvm.hotspot.debugger.linux.x86 \ ++sun.jvm.hotspot.debugger.linux.sw64 \ + sun.jvm.hotspot.debugger.posix \ + sun.jvm.hotspot.debugger.posix.elf \ + sun.jvm.hotspot.debugger.proc \ +@@ -67,11 +68,14 @@ + sun.jvm.hotspot.debugger.proc.aarch64 \ + sun.jvm.hotspot.debugger.proc.sparc \ + sun.jvm.hotspot.debugger.proc.x86 \ ++sun.jvm.hotspot.debugger.proc.sw64 \ + sun.jvm.hotspot.debugger.remote \ + sun.jvm.hotspot.debugger.remote.amd64 \ + sun.jvm.hotspot.debugger.remote.sparc \ ++sun.jvm.hotspot.debugger.remote.sw64 \ + sun.jvm.hotspot.debugger.remote.x86 \ + sun.jvm.hotspot.debugger.sparc \ ++sun.jvm.hotspot.debugger.sw64 \ + sun.jvm.hotspot.debugger.win32.coff \ + sun.jvm.hotspot.debugger.windbg \ + sun.jvm.hotspot.debugger.windbg.amd64 \ +@@ -98,12 +102,14 @@ + sun.jvm.hotspot.runtime.linux_amd64 \ + sun.jvm.hotspot.runtime.linux_aarch64 \ + sun.jvm.hotspot.runtime.linux_sparc \ ++sun.jvm.hotspot.runtime.linux_sw64 \ + sun.jvm.hotspot.runtime.linux_x86 \ + sun.jvm.hotspot.runtime.posix \ + sun.jvm.hotspot.runtime.solaris_amd64 \ + sun.jvm.hotspot.runtime.solaris_sparc \ + sun.jvm.hotspot.runtime.solaris_x86 \ + sun.jvm.hotspot.runtime.sparc \ ++sun.jvm.hotspot.runtime.sw64 \ + sun.jvm.hotspot.runtime.win32_amd64 \ + sun.jvm.hotspot.runtime.win32_x86 \ + sun.jvm.hotspot.runtime.x86 \ +@@ -146,6 +152,7 @@ + sun/jvm/hotspot/debugger/cdbg/basic/x86/*.java \ + sun/jvm/hotspot/debugger/dummy/*.java \ + sun/jvm/hotspot/debugger/linux/*.java \ ++sun/jvm/hotspot/debugger/linux/sw64/*.java \ + sun/jvm/hotspot/debugger/linux/x86/*.java \ + sun/jvm/hotspot/debugger/linux/aarch64/*.java \ + sun/jvm/hotspot/debugger/posix/*.java \ +@@ -153,11 +160,13 @@ + sun/jvm/hotspot/debugger/proc/*.java \ + sun/jvm/hotspot/debugger/proc/amd64/*.java \ + sun/jvm/hotspot/debugger/proc/sparc/*.java \ ++sun/jvm/hotspot/debugger/proc/sw64/*.java \ + sun/jvm/hotspot/debugger/proc/x86/*.java \ + sun/jvm/hotspot/debugger/proc/aarch64/*.java \ + sun/jvm/hotspot/debugger/remote/*.java \ + sun/jvm/hotspot/debugger/remote/amd64/*.java \ + sun/jvm/hotspot/debugger/remote/sparc/*.java \ ++sun/jvm/hotspot/debugger/remote/sw64/*.java \ + sun/jvm/hotspot/debugger/remote/x86/*.java \ + sun/jvm/hotspot/debugger/sparc/*.java \ + sun/jvm/hotspot/debugger/win32/coff/*.java \ +@@ -183,12 +192,14 @@ + sun/jvm/hotspot/runtime/linux_amd64/*.java \ + sun/jvm/hotspot/runtime/linux_aarch64/*.java \ + sun/jvm/hotspot/runtime/linux_sparc/*.java \ ++sun/jvm/hotspot/runtime/linux_sw64/*.java \ + sun/jvm/hotspot/runtime/linux_x86/*.java \ + sun/jvm/hotspot/runtime/posix/*.java \ + sun/jvm/hotspot/runtime/solaris_amd64/*.java \ + sun/jvm/hotspot/runtime/solaris_sparc/*.java \ + sun/jvm/hotspot/runtime/solaris_x86/*.java \ + sun/jvm/hotspot/runtime/sparc/*.java \ ++sun/jvm/hotspot/runtime/sw64/*.java \ + sun/jvm/hotspot/runtime/win32_amd64/*.java \ + sun/jvm/hotspot/runtime/win32_x86/*.java \ + sun/jvm/hotspot/runtime/x86/*.java \ +diff -uNr openjdk/hotspot/agent/make/saenv.sh afu8u/hotspot/agent/make/saenv.sh +--- openjdk/hotspot/agent/make/saenv.sh 2023-04-19 05:53:02.000000000 +0800 ++++ afu8u/hotspot/agent/make/saenv.sh 2025-05-06 10:53:44.795633662 +0800 +@@ -42,6 +42,10 @@ + SA_LIBPATH=$STARTDIR/../src/os/linux/amd64:$STARTDIR/linux/amd64 + OPTIONS="-Dsa.library.path=$SA_LIBPATH" + CPU=amd64 ++ elif [ "$ARCH" = "sw_64" ] ; then ++ SA_LIBPATH=$STARTDIR/../src/os/linux/sw64:$STARTDIR/linux/sw64 ++ OPTIONS="-Dsa.library.path=$SA_LIBPATH" ++ CPU=sw64 + else + SA_LIBPATH=$STARTDIR/../src/os/linux/i386:$STARTDIR/linux/i386 + OPTIONS="-Dsa.library.path=$SA_LIBPATH" +diff -uNr openjdk/hotspot/agent/src/os/linux/libproc.h afu8u/hotspot/agent/src/os/linux/libproc.h +--- openjdk/hotspot/agent/src/os/linux/libproc.h 2023-04-19 05:53:02.000000000 +0800 ++++ afu8u/hotspot/agent/src/os/linux/libproc.h 2025-05-06 10:53:44.799633662 +0800 +@@ -80,6 +80,21 @@ + #define user_regs_struct user_pt_regs + #endif + ++#if defined(sw64) ++struct j_user_regs_struct { ++ // V0, T0-T7, S0-S5, FP, A3-A5, ++ // T8-T11, RA, T12, AT ++ unsigned long regs[26]; ++ unsigned long sp; ++ unsigned long ps; ++ unsigned long pc; ++ unsigned long gp; ++ // remainder regs (A0-A2) ++ unsigned long r_regs[3]; ++}; ++#define user_regs_struct j_user_regs_struct ++#endif ++ + // This C bool type must be int for compatibility with Linux calls and + // it would be a mistake to equivalence it to C++ bool on many platforms + +diff -uNr openjdk/hotspot/agent/src/os/linux/libproc_impl.h afu8u/hotspot/agent/src/os/linux/libproc_impl.h +--- openjdk/hotspot/agent/src/os/linux/libproc_impl.h 2023-04-19 05:53:02.000000000 +0800 ++++ afu8u/hotspot/agent/src/os/linux/libproc_impl.h 2025-05-06 10:53:44.799633662 +0800 +@@ -32,6 +32,13 @@ + + // data structures in this file mimic those of Solaris 8.0 - libproc's Pcontrol.h + ++#ifndef NAME_MAX ++#define NAME_MAX 255 /* # chars in a file name */ ++#endif ++#ifndef PATH_MAX ++#define PATH_MAX 4096 /* # chars in a path name including nul */ ++#endif ++ + #define BUF_SIZE (PATH_MAX + NAME_MAX + 1) + + // list of shared objects +diff -uNr openjdk/hotspot/agent/src/os/linux/LinuxDebuggerLocal.c afu8u/hotspot/agent/src/os/linux/LinuxDebuggerLocal.c +--- openjdk/hotspot/agent/src/os/linux/LinuxDebuggerLocal.c 2023-04-19 05:53:02.000000000 +0800 ++++ afu8u/hotspot/agent/src/os/linux/LinuxDebuggerLocal.c 2025-05-06 10:53:44.799633662 +0800 +@@ -49,6 +49,10 @@ + #include "sun_jvm_hotspot_debugger_sparc_SPARCThreadContext.h" + #endif + ++#ifdef sw64 ++#include "sun_jvm_hotspot_debugger_sw64_SW64ThreadContext.h" ++#endif ++ + #ifdef aarch64 + #include "sun_jvm_hotspot_debugger_aarch64_AARCH64ThreadContext.h" + #endif +@@ -337,7 +341,7 @@ + return (err == PS_OK)? array : 0; + } + +-#if defined(i386) || defined(amd64) || defined(sparc) || defined(sparcv9) || defined(aarch64) ++#if defined(i386) || defined(amd64) || defined(sparc) || defined(sparcv9) || defined(aarch64) || defined(sw64) + JNIEXPORT jlongArray JNICALL Java_sun_jvm_hotspot_debugger_linux_LinuxDebuggerLocal_getThreadIntegerRegisterSet0 + (JNIEnv *env, jobject this_obj, jint lwp_id) { + +@@ -365,6 +369,9 @@ + #if defined(sparc) || defined(sparcv9) + #define NPRGREG sun_jvm_hotspot_debugger_sparc_SPARCThreadContext_NPRGREG + #endif ++#ifdef sw64 ++#define NPRGREG sun_jvm_hotspot_debugger_sw64_SW64ThreadContext_NPRGREG ++#endif + + array = (*env)->NewLongArray(env, NPRGREG); + CHECK_EXCEPTION_(0); +@@ -457,6 +464,44 @@ + regs[REG_INDEX(R_O7)] = gregs.u_regs[14]; + #endif /* sparc */ + ++#ifdef sw64 ++ ++#define REG_INDEX(reg) sun_jvm_hotspot_debugger_sw64_SW64ThreadContext_##reg ++ ++ regs[REG_INDEX(V0)] = gregs.regs[0]; ++ regs[REG_INDEX(T0)] = gregs.regs[1]; ++ regs[REG_INDEX(T1)] = gregs.regs[2]; ++ regs[REG_INDEX(T2)] = gregs.regs[3]; ++ regs[REG_INDEX(T3)] = gregs.regs[4]; ++ regs[REG_INDEX(T4)] = gregs.regs[5]; ++ regs[REG_INDEX(T5)] = gregs.regs[6]; ++ regs[REG_INDEX(T6)] = gregs.regs[7]; ++ regs[REG_INDEX(T7)] = gregs.regs[8]; ++ regs[REG_INDEX(S0)] = gregs.regs[9]; ++ regs[REG_INDEX(S1)] = gregs.regs[10]; ++ regs[REG_INDEX(S2)] = gregs.regs[11]; ++ regs[REG_INDEX(S3)] = gregs.regs[12]; ++ regs[REG_INDEX(S4)] = gregs.regs[13]; ++ regs[REG_INDEX(S5)] = gregs.regs[14]; ++ regs[REG_INDEX(FP)] = gregs.regs[15]; ++ regs[REG_INDEX(A3)] = gregs.regs[16]; ++ regs[REG_INDEX(A4)] = gregs.regs[17]; ++ regs[REG_INDEX(A5)] = gregs.regs[18]; ++ regs[REG_INDEX(T8)] = gregs.regs[19]; ++ regs[REG_INDEX(T9)] = gregs.regs[20]; ++ regs[REG_INDEX(T10)] = gregs.regs[21]; ++ regs[REG_INDEX(T11)] = gregs.regs[22]; ++ regs[REG_INDEX(RA)] = gregs.regs[23]; ++ regs[REG_INDEX(T12)] = gregs.regs[24]; ++ regs[REG_INDEX(AT)] = gregs.regs[25]; ++ regs[REG_INDEX(SP)] = gregs.sp; ++ regs[REG_INDEX(PS)] = gregs.ps; ++ regs[REG_INDEX(PC)] = gregs.pc; ++ regs[REG_INDEX(GP)] = gregs.gp; ++ regs[REG_INDEX(A0)] = gregs.r_regs[0]; ++ regs[REG_INDEX(A1)] = gregs.r_regs[1]; ++ regs[REG_INDEX(A2)] = gregs.r_regs[2]; ++#endif /* sw64 */ + #if defined(aarch64) + + #define REG_INDEX(reg) sun_jvm_hotspot_debugger_aarch64_AARCH64ThreadContext_##reg +diff -uNr openjdk/hotspot/agent/src/os/linux/Makefile afu8u/hotspot/agent/src/os/linux/Makefile +--- openjdk/hotspot/agent/src/os/linux/Makefile 2023-04-19 05:53:02.000000000 +0800 ++++ afu8u/hotspot/agent/src/os/linux/Makefile 2025-05-06 10:53:44.799633662 +0800 +@@ -53,8 +53,9 @@ + $(JAVAH) -jni -classpath ../../../build/classes -d $(ARCH) \ + sun.jvm.hotspot.debugger.x86.X86ThreadContext \ + sun.jvm.hotspot.debugger.sparc.SPARCThreadContext \ ++ sun.jvm.hotspot.debugger.sw64.SW64ThreadContext \ + sun.jvm.hotspot.debugger.amd64.AMD64ThreadContext \ +- sun.jvm.hotspot.debugger.aarch64.AARCH64ThreadContext ++ sun.jvm.hotspot.debugger.aarch64.AARCH64ThreadContext + $(GCC) $(CFLAGS) $< -o $@ + + $(ARCH)/sadis.o: ../../share/native/sadis.c +diff -uNr openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxCDebugger.java afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxCDebugger.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxCDebugger.java 2023-04-19 05:53:02.000000000 +0800 ++++ afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxCDebugger.java 2025-05-06 11:13:08.087672949 +0800 +@@ -31,9 +31,11 @@ + import sun.jvm.hotspot.debugger.cdbg.*; + import sun.jvm.hotspot.debugger.x86.*; + import sun.jvm.hotspot.debugger.amd64.*; ++import sun.jvm.hotspot.debugger.sw64.*; + import sun.jvm.hotspot.debugger.sparc.*; + import sun.jvm.hotspot.debugger.linux.x86.*; + import sun.jvm.hotspot.debugger.linux.amd64.*; ++import sun.jvm.hotspot.debugger.linux.sw64.*; + import sun.jvm.hotspot.debugger.aarch64.*; + import sun.jvm.hotspot.debugger.linux.aarch64.*; + import sun.jvm.hotspot.debugger.linux.sparc.*; +@@ -106,6 +108,13 @@ + Address pc = context.getRegisterAsAddress(AARCH64ThreadContext.PC); + if (pc == null) return null; + return new LinuxAARCH64CFrame(dbg, fp, pc); ++ } else if (cpu.equals("sw64")) { ++ SW64ThreadContext context = (SW64ThreadContext) thread.getContext(); ++ Address sp = context.getRegisterAsAddress(SW64ThreadContext.SP); ++ if (sp == null) return null; ++ Address pc = context.getRegisterAsAddress(SW64ThreadContext.PC); ++ if (pc == null) return null; ++ return new LinuxSW64CFrame(dbg, sp, pc); + } else { + // Runtime exception thrown by LinuxThreadContextFactory if unknown cpu + ThreadContext context = (ThreadContext) thread.getContext(); +diff -uNr openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxDebuggerLocal.java afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxDebuggerLocal.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxDebuggerLocal.java 2023-04-19 05:53:02.000000000 +0800 ++++ afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxDebuggerLocal.java 2025-05-06 10:53:44.815633663 +0800 +@@ -212,6 +212,8 @@ + // (FIXME: should pick this up from the debugger.) + if (getCPU().equals("ia64")) { + initCache(16384, parseCacheNumPagesProperty(1024)); ++ } else if (getCPU().equals("sw64") || getCPU().equals("sw_64")) { ++ initCache(8192, parseCacheNumPagesProperty(2048)); // = 16*1024*1024/8192 + } else { + initCache(4096, parseCacheNumPagesProperty(4096)); + } +diff -uNr openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/sw64/LinuxSW64CFrame.java afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/sw64/LinuxSW64CFrame.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/sw64/LinuxSW64CFrame.java 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/sw64/LinuxSW64CFrame.java 2025-05-06 10:53:44.815633663 +0800 +@@ -0,0 +1,80 @@ ++/* ++ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++package sun.jvm.hotspot.debugger.linux.sw64; ++ ++import sun.jvm.hotspot.debugger.*; ++import sun.jvm.hotspot.debugger.linux.*; ++import sun.jvm.hotspot.debugger.cdbg.*; ++import sun.jvm.hotspot.debugger.cdbg.basic.*; ++import sun.jvm.hotspot.debugger.sw64.*; ++ ++final public class LinuxSW64CFrame extends BasicCFrame { ++ // package/class internals only ++ public LinuxSW64CFrame(LinuxDebugger dbg, Address fp, Address pc) { ++ super(dbg.getCDebugger()); ++ this.fp = fp; ++ this.pc = pc; ++ this.dbg = dbg; ++ } ++ ++ // override base class impl to avoid ELF parsing ++ public ClosestSymbol closestSymbolToPC() { ++ // try native lookup in debugger. ++ return dbg.lookup(dbg.getAddressValue(pc())); ++ } ++ ++ public Address pc() { ++ return pc; ++ } ++ ++ public Address localVariableBase() { ++ return fp; ++ } ++ ++ public CFrame sender(ThreadProxy thread) { ++ SW64ThreadContext context = (SW64ThreadContext) thread.getContext(); ++ Address rsp = context.getRegisterAsAddress(SW64ThreadContext.SP); ++ ++ if ( (fp == null) || fp.lessThan(rsp) ) { ++ return null; ++ } ++ ++ Address nextPC = fp.getAddressAt( 0 * ADDRESS_SIZE); ++ if (nextPC == null) { ++ return null; ++ } ++ Address nextFP = fp.getAddressAt( 1 * ADDRESS_SIZE); ++ if (nextFP == null) { ++ return null; ++ } ++ return new LinuxSW64CFrame(dbg, nextFP, nextPC); ++ } ++ ++ // package/class internals only ++ private static final int ADDRESS_SIZE = 8; ++ private Address pc; ++ private Address fp; ++ private LinuxDebugger dbg; ++} +diff -uNr openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/sw64/LinuxSW64ThreadContext.java afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/sw64/LinuxSW64ThreadContext.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/sw64/LinuxSW64ThreadContext.java 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/sw64/LinuxSW64ThreadContext.java 2025-05-06 10:53:44.815633663 +0800 +@@ -0,0 +1,46 @@ ++/* ++ * Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++package sun.jvm.hotspot.debugger.linux.sw64; ++ ++import sun.jvm.hotspot.debugger.*; ++import sun.jvm.hotspot.debugger.sw64.*; ++import sun.jvm.hotspot.debugger.linux.*; ++ ++public class LinuxSW64ThreadContext extends SW64ThreadContext { ++ private LinuxDebugger debugger; ++ ++ public LinuxSW64ThreadContext(LinuxDebugger debugger) { ++ super(); ++ this.debugger = debugger; ++ } ++ ++ public void setRegisterAsAddress(int index, Address value) { ++ setRegister(index, debugger.getAddressValue(value)); ++ } ++ ++ public Address getRegisterAsAddress(int index) { ++ return debugger.newAddress(getRegister(index)); ++ } ++} +diff -uNr openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/MachineDescriptionSW64.java afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/MachineDescriptionSW64.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/MachineDescriptionSW64.java 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/MachineDescriptionSW64.java 2025-05-06 10:53:44.807633663 +0800 +@@ -0,0 +1,39 @@ ++/* ++ * Copyright (c) 2003, 2008, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++package sun.jvm.hotspot.debugger; ++ ++public class MachineDescriptionSW64 extends MachineDescriptionTwosComplement implements MachineDescription { ++ public long getAddressSize() { ++ return 8; ++ } ++ ++ public boolean isLP64() { ++ return true; ++ } ++ ++ public boolean isBigEndian() { ++ return false; ++ } ++} +diff -uNr openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/ProcDebuggerLocal.java afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/ProcDebuggerLocal.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/ProcDebuggerLocal.java 2023-04-19 05:53:02.000000000 +0800 ++++ afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/ProcDebuggerLocal.java 2025-05-06 10:53:44.815633663 +0800 +@@ -33,10 +33,12 @@ + import sun.jvm.hotspot.debugger.proc.amd64.*; + import sun.jvm.hotspot.debugger.proc.aarch64.*; + import sun.jvm.hotspot.debugger.proc.sparc.*; ++import sun.jvm.hotspot.debugger.proc.sw64.*; + import sun.jvm.hotspot.debugger.proc.x86.*; + import sun.jvm.hotspot.debugger.amd64.*; + import sun.jvm.hotspot.debugger.aarch64.*; + import sun.jvm.hotspot.debugger.sparc.*; ++import sun.jvm.hotspot.debugger.sw64.*; + import sun.jvm.hotspot.debugger.x86.*; + import sun.jvm.hotspot.utilities.*; + +@@ -88,6 +90,10 @@ + threadFactory = new ProcAMD64ThreadFactory(this); + pcRegIndex = AMD64ThreadContext.RIP; + fpRegIndex = AMD64ThreadContext.RBP; ++ } else if (cpu.equals("sw64") || cpu.equals("sw_64")) { ++ threadFactory = new ProcSW64ThreadFactory(this); ++ pcRegIndex = SW64ThreadContext.PC; ++ fpRegIndex = SW64ThreadContext.FP; + } else if (cpu.equals("aarch64")) { + threadFactory = new ProcAARCH64ThreadFactory(this); + pcRegIndex = AARCH64ThreadContext.PC; +@@ -398,8 +404,9 @@ + if (pagesize == -1) { + // return the hard coded default value. + if (PlatformInfo.getCPU().equals("sparc") || ++ PlatformInfo.getCPU().equals("sw64") || PlatformInfo.getCPU().equals("sw_64") || + PlatformInfo.getCPU().equals("amd64") ) +- pagesize = 8196; ++ pagesize = 8192; + else + pagesize = 4096; + } +diff -uNr openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/sw64/ProcSW64ThreadContext.java afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/sw64/ProcSW64ThreadContext.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/sw64/ProcSW64ThreadContext.java 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/sw64/ProcSW64ThreadContext.java 2025-05-06 10:53:44.815633663 +0800 +@@ -0,0 +1,46 @@ ++/* ++ * Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++package sun.jvm.hotspot.debugger.proc.sw64; ++ ++import sun.jvm.hotspot.debugger.*; ++import sun.jvm.hotspot.debugger.sw64.*; ++import sun.jvm.hotspot.debugger.proc.*; ++ ++public class ProcSW64ThreadContext extends SW64ThreadContext { ++ private ProcDebugger debugger; ++ ++ public ProcSW64ThreadContext(ProcDebugger debugger) { ++ super(); ++ this.debugger = debugger; ++ } ++ ++ public void setRegisterAsAddress(int index, Address value) { ++ setRegister(index, debugger.getAddressValue(value)); ++ } ++ ++ public Address getRegisterAsAddress(int index) { ++ return debugger.newAddress(getRegister(index)); ++ } ++} +diff -uNr openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/sw64/ProcSW64ThreadFactory.java afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/sw64/ProcSW64ThreadFactory.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/sw64/ProcSW64ThreadFactory.java 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/sw64/ProcSW64ThreadFactory.java 2025-05-06 10:53:44.815633663 +0800 +@@ -0,0 +1,44 @@ ++/* ++ * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++package sun.jvm.hotspot.debugger.proc.sw64; ++ ++import sun.jvm.hotspot.debugger.*; ++import sun.jvm.hotspot.debugger.proc.*; ++ ++public class ProcSW64ThreadFactory implements ProcThreadFactory { ++ private ProcDebugger debugger; ++ ++ public ProcSW64ThreadFactory(ProcDebugger debugger) { ++ this.debugger = debugger; ++ } ++ ++ public ThreadProxy createThreadWrapper(Address threadIdentifierAddr) { ++ return new ProcSW64Thread(debugger, threadIdentifierAddr); ++ } ++ ++ public ThreadProxy createThreadWrapper(long id) { ++ return new ProcSW64Thread(debugger, id); ++ } ++} +diff -uNr openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/sw64/ProcSW64Thread.java afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/sw64/ProcSW64Thread.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/sw64/ProcSW64Thread.java 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/proc/sw64/ProcSW64Thread.java 2025-05-06 10:53:44.815633663 +0800 +@@ -0,0 +1,87 @@ ++/* ++ * Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++package sun.jvm.hotspot.debugger.proc.sw64; ++ ++import sun.jvm.hotspot.debugger.*; ++import sun.jvm.hotspot.debugger.sw64.*; ++import sun.jvm.hotspot.debugger.proc.*; ++import sun.jvm.hotspot.utilities.*; ++ ++public class ProcSW64Thread implements ThreadProxy { ++ private ProcDebugger debugger; ++ private int id; ++ ++ public ProcSW64Thread(ProcDebugger debugger, Address addr) { ++ this.debugger = debugger; ++ ++ // FIXME: the size here should be configurable. However, making it ++ // so would produce a dependency on the "types" package from the ++ // debugger package, which is not desired. ++ this.id = (int) addr.getCIntegerAt(0, 4, true); ++ } ++ ++ public ProcSW64Thread(ProcDebugger debugger, long id) { ++ this.debugger = debugger; ++ this.id = (int) id; ++ } ++ ++ public ThreadContext getContext() throws IllegalThreadStateException { ++ ProcSW64ThreadContext context = new ProcSW64ThreadContext(debugger); ++ long[] regs = debugger.getThreadIntegerRegisterSet(id); ++ ++ if (Assert.ASSERTS_ENABLED) { ++ Assert.that(regs.length <= SW64ThreadContext.NPRGREG, "size of register set is greater than " + SW64ThreadContext.NPRGREG); ++ } ++ for (int i = 0; i < regs.length; i++) { ++ context.setRegister(i, regs[i]); ++ } ++ return context; ++ } ++ ++ public boolean canSetContext() throws DebuggerException { ++ return false; ++ } ++ ++ public void setContext(ThreadContext context) ++ throws IllegalThreadStateException, DebuggerException { ++ throw new DebuggerException("Unimplemented"); ++ } ++ ++ public String toString() { ++ return "t@" + id; ++ } ++ ++ public boolean equals(Object obj) { ++ if ((obj == null) || !(obj instanceof ProcSW64Thread)) { ++ return false; ++ } ++ ++ return (((ProcSW64Thread) obj).id == id); ++ } ++ ++ public int hashCode() { ++ return id; ++ } ++} +diff -uNr openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebuggerClient.java afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebuggerClient.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebuggerClient.java 2023-04-19 05:53:02.000000000 +0800 ++++ afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/RemoteDebuggerClient.java 2025-05-06 10:53:44.815633663 +0800 +@@ -31,6 +31,7 @@ + import sun.jvm.hotspot.debugger.*; + import sun.jvm.hotspot.debugger.cdbg.*; + import sun.jvm.hotspot.debugger.remote.sparc.*; ++import sun.jvm.hotspot.debugger.remote.sw64.*; + import sun.jvm.hotspot.debugger.remote.x86.*; + import sun.jvm.hotspot.debugger.remote.amd64.*; + +@@ -70,6 +71,10 @@ + cachePageSize = 4096; + cacheNumPages = parseCacheNumPagesProperty(cacheSize / cachePageSize); + unalignedAccessesOkay = true; ++ } else if (cpu.equals("sw64") || cpu.equals("sw_64")) { ++ threadFactory = new RemoteSW64ThreadFactory(this); ++ cachePageSize = 8192; ++ cacheNumPages = parseCacheNumPagesProperty(cacheSize / cachePageSize); + } else { + try { + Class tf = Class.forName("sun.jvm.hotspot.debugger.remote." + +diff -uNr openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/sw64/RemoteSW64ThreadContext.java afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/sw64/RemoteSW64ThreadContext.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/sw64/RemoteSW64ThreadContext.java 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/sw64/RemoteSW64ThreadContext.java 2025-05-06 10:53:44.819633663 +0800 +@@ -0,0 +1,47 @@ ++/* ++ * Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2015, Red Hat Inc. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++package sun.jvm.hotspot.debugger.remote.sw64; ++ ++import sun.jvm.hotspot.debugger.*; ++import sun.jvm.hotspot.debugger.sw64.*; ++import sun.jvm.hotspot.debugger.remote.*; ++ ++public class RemoteSW64ThreadContext extends SW64ThreadContext { ++ private RemoteDebuggerClient debugger; ++ ++ public RemoteSW64ThreadContext(RemoteDebuggerClient debugger) { ++ super(); ++ this.debugger = debugger; ++ } ++ ++ public void setRegisterAsAddress(int index, Address value) { ++ setRegister(index, debugger.getAddressValue(value)); ++ } ++ ++ public Address getRegisterAsAddress(int index) { ++ return debugger.newAddress(getRegister(index)); ++ } ++} +diff -uNr openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/sw64/RemoteSW64ThreadFactory.java afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/sw64/RemoteSW64ThreadFactory.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/sw64/RemoteSW64ThreadFactory.java 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/sw64/RemoteSW64ThreadFactory.java 2025-05-06 10:53:44.819633663 +0800 +@@ -0,0 +1,45 @@ ++/* ++ * Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2015, Red Hat Inc. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++package sun.jvm.hotspot.debugger.remote.sw64; ++ ++import sun.jvm.hotspot.debugger.*; ++import sun.jvm.hotspot.debugger.remote.*; ++ ++public class RemoteSW64ThreadFactory implements RemoteThreadFactory { ++ private RemoteDebuggerClient debugger; ++ ++ public RemoteSW64ThreadFactory(RemoteDebuggerClient debugger) { ++ this.debugger = debugger; ++ } ++ ++ public ThreadProxy createThreadWrapper(Address threadIdentifierAddr) { ++ return new RemoteSW64Thread(debugger, threadIdentifierAddr); ++ } ++ ++ public ThreadProxy createThreadWrapper(long id) { ++ return new RemoteSW64Thread(debugger, id); ++ } ++} +diff -uNr openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/sw64/RemoteSW64Thread.java afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/sw64/RemoteSW64Thread.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/sw64/RemoteSW64Thread.java 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/remote/sw64/RemoteSW64Thread.java 2025-05-06 10:53:44.819633663 +0800 +@@ -0,0 +1,54 @@ ++/* ++ * Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2015, Red Hat Inc. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++package sun.jvm.hotspot.debugger.remote.sw64; ++ ++import sun.jvm.hotspot.debugger.*; ++import sun.jvm.hotspot.debugger.sw64.*; ++import sun.jvm.hotspot.debugger.remote.*; ++import sun.jvm.hotspot.utilities.*; ++ ++public class RemoteSW64Thread extends RemoteThread { ++ public RemoteSW64Thread(RemoteDebuggerClient debugger, Address addr) { ++ super(debugger, addr); ++ } ++ ++ public RemoteSW64Thread(RemoteDebuggerClient debugger, long id) { ++ super(debugger, id); ++ } ++ ++ public ThreadContext getContext() throws IllegalThreadStateException { ++ RemoteSW64ThreadContext context = new RemoteSW64ThreadContext(debugger); ++ long[] regs = (addr != null)? debugger.getThreadIntegerRegisterSet(addr) : ++ debugger.getThreadIntegerRegisterSet(id); ++ if (Assert.ASSERTS_ENABLED) { ++ Assert.that(regs.length == SW64ThreadContext.NPRGREG, "size of register set must match"); ++ } ++ for (int i = 0; i < regs.length; i++) { ++ context.setRegister(i, regs[i]); ++ } ++ return context; ++ } ++} +diff -uNr openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/sw64/SW64ThreadContext.java afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/sw64/SW64ThreadContext.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/sw64/SW64ThreadContext.java 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/sw64/SW64ThreadContext.java 2025-05-06 10:53:44.819633663 +0800 +@@ -0,0 +1,124 @@ ++/* ++ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++package sun.jvm.hotspot.debugger.sw64; ++ ++import sun.jvm.hotspot.debugger.*; ++import sun.jvm.hotspot.debugger.cdbg.*; ++ ++/** Specifies the thread context on sw64 platforms; only a sub-portion ++ of the context is guaranteed to be present on all operating ++ systems. */ ++ ++public abstract class SW64ThreadContext implements ThreadContext { ++ // Taken from /usr/include/asm/reg.h on Linux/sw* ++ ++ // NOTE: the indices for the various registers must be maintained as ++ // listed across various operating systems. However, only a small ++ // subset of the registers' values are guaranteed to be present (and ++ // must be present for the SA's stack walking to work): EF_V0, ... ++ ++ // Exception frame offsets, copy from asm/reg.h ++ public static final int V0 = 0; ++ public static final int T0 = 1; ++ public static final int T1 = 2; ++ public static final int T2 = 3; ++ public static final int T3 = 4; ++ public static final int T4 = 5; ++ public static final int T5 = 6; ++ public static final int T6 = 7; ++ public static final int T7 = 8; ++ public static final int S0 = 9; ++ public static final int S1 = 10; ++ public static final int S2 = 11; ++ public static final int S3 = 12; ++ public static final int S4 = 13; ++ public static final int S5 = 14; ++ public static final int FP = 15; ++ public static final int A3 = 16; ++ public static final int A4 = 17; ++ public static final int A5 = 18; ++ public static final int T8 = 19; ++ public static final int T9 = 20; ++ public static final int T10 = 21; ++ public static final int T11 = 22; ++ public static final int RA = 23; ++ public static final int T12 = 24; ++ public static final int AT = 25; ++ public static final int SP = 26; ++ public static final int PS = 27; ++ public static final int PC = 28; ++ public static final int GP = 29; ++ public static final int A0 = 30; ++ public static final int A1 = 31; ++ public static final int A2 = 32; ++ ++ ++ public static final int NPRGREG = 33; ++ ++ private static final String[] regNames = { ++ "V0", "T0", "T1", "T2", ++ "T3", "T4", "T5", "T6", ++ "T7", "S0", "S1", "S2", ++ "S3", "S4", "S5", "FP", ++ "A3", "A4", "A5", "T8", "T9", ++ "T10", "T11", "RA", "T12", ++ "AT", "SP", "PS", "PC", ++ "GP", "A0", "A1", "A2" ++ }; ++ ++ private long[] data; ++ ++ public SW64ThreadContext() { ++ data = new long[NPRGREG]; ++ } ++ ++ public int getNumRegisters() { ++ return NPRGREG; ++ } ++ ++ public String getRegisterName(int index) { ++ return regNames[index]; ++ } ++ ++ public void setRegister(int index, long value) { ++ data[index] = value; ++ } ++ ++ public long getRegister(int index) { ++ return data[index]; ++ } ++ ++ public CFrame getTopFrame(Debugger dbg) { ++ return null; ++ } ++ ++ /** This can't be implemented in this class since we would have to ++ tie the implementation to, for example, the debugging system */ ++ public abstract void setRegisterAsAddress(int index, Address value); ++ ++ /** This can't be implemented in this class since we would have to ++ tie the implementation to, for example, the debugging system */ ++ public abstract Address getRegisterAsAddress(int index); ++} +diff -uNr openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/shenandoah/ShenandoahHeap.java afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/shenandoah/ShenandoahHeap.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/shenandoah/ShenandoahHeap.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/shenandoah/ShenandoahHeap.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,114 +0,0 @@ +-/* +- * Copyright (c) 2017, 2019, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-package sun.jvm.hotspot.gc_implementation.shenandoah; +- +-import sun.jvm.hotspot.gc_interface.CollectedHeap; +-import sun.jvm.hotspot.gc_interface.CollectedHeapName; +-import sun.jvm.hotspot.debugger.Address; +-import sun.jvm.hotspot.runtime.VM; +-import sun.jvm.hotspot.runtime.VMObjectFactory; +-import sun.jvm.hotspot.types.Type; +-import sun.jvm.hotspot.types.TypeDataBase; +-import sun.jvm.hotspot.memory.MemRegion; +-import sun.jvm.hotspot.memory.SpaceClosure; +-import sun.jvm.hotspot.types.AddressField; +-import sun.jvm.hotspot.types.CIntegerField; +-import sun.jvm.hotspot.types.JLongField; +-import java.io.PrintStream; +-import java.util.ArrayList; +-import java.util.List; +-import java.util.Observable; +-import java.util.Observer; +- +-public class ShenandoahHeap extends CollectedHeap { +- static private CIntegerField numRegions; +- static private JLongField used; +- static private CIntegerField committed; +- static private AddressField regions; +- +- static { +- VM.registerVMInitializedObserver(new Observer() { +- public void update(Observable o, Object data) { +- initialize(VM.getVM().getTypeDataBase()); +- } +- }); +- } +- +- static private synchronized void initialize(TypeDataBase db) { +- Type type = db.lookupType("ShenandoahHeap"); +- numRegions = type.getCIntegerField("_num_regions"); +- used = type.getJLongField("_used"); +- committed = type.getCIntegerField("_committed"); +- regions = type.getAddressField("_regions"); +- } +- +- @Override +- public CollectedHeapName kind() { +- return CollectedHeapName.SHENANDOAH_HEAP; +- } +- +- public long numOfRegions() { +- return numRegions.getValue(addr); +- } +- +- @Override +- public long used() { +- return used.getValue(addr); +- } +- +- public long committed() { +- return committed.getValue(addr); +- } +- +- @Override +- public void printOn(PrintStream tty) { +- MemRegion mr = reservedRegion(); +- tty.print("Shenandoah heap"); +- tty.print(" [" + mr.start() + ", " + mr.end() + "]"); +- tty.println(" region size " + ShenandoahHeapRegion.regionSizeBytes() / 1024 + " K"); +- } +- +- public ShenandoahHeap(Address addr) { +- super(addr); +- } +- +- private ShenandoahHeapRegion at(long index) { +- Address arrayAddr = regions.getValue(addr); +- // Offset of &_regions[index] +- long offset = index * VM.getVM().getAddressSize(); +- Address regionAddr = arrayAddr.getAddressAt(offset); +- return (ShenandoahHeapRegion) VMObjectFactory.newObject(ShenandoahHeapRegion.class, +- regionAddr); +- } +- +- public List/**/ getLiveRegions() { +- List res = new ArrayList(); +- for (int i = 0; i < numOfRegions(); i++) { +- ShenandoahHeapRegion r = at(i); +- res.add(new MemRegion(r.bottom(), r.top())); +- } +- return res; +- } +- +-} +diff -uNr openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/shenandoah/ShenandoahHeapRegion.java afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/shenandoah/ShenandoahHeapRegion.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/shenandoah/ShenandoahHeapRegion.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/shenandoah/ShenandoahHeapRegion.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,80 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-package sun.jvm.hotspot.gc_implementation.shenandoah; +- +-import sun.jvm.hotspot.memory.ContiguousSpace; +-import sun.jvm.hotspot.types.AddressField; +-import sun.jvm.hotspot.types.CIntegerField; +-import sun.jvm.hotspot.runtime.VM; +-import sun.jvm.hotspot.runtime.VMObject; +-import sun.jvm.hotspot.types.Type; +-import sun.jvm.hotspot.types.TypeDataBase; +-import sun.jvm.hotspot.debugger.Address; +- +-import java.util.Observable; +-import java.util.Observer; +- +- +-public class ShenandoahHeapRegion extends VMObject { +- private static CIntegerField RegionSizeBytes; +- +- private static AddressField BottomField; +- private static AddressField TopField; +- private static AddressField EndField; +- +- static { +- VM.registerVMInitializedObserver(new Observer() { +- public void update(Observable o, Object data) { +- initialize(VM.getVM().getTypeDataBase()); +- } +- }); +- } +- +- static private synchronized void initialize(TypeDataBase db) { +- Type type = db.lookupType("ShenandoahHeapRegion"); +- RegionSizeBytes = type.getCIntegerField("RegionSizeBytes"); +- +- BottomField = type.getAddressField("_bottom"); +- TopField = type.getAddressField("_top"); +- EndField = type.getAddressField("_end"); +- } +- +- public static long regionSizeBytes() { return RegionSizeBytes.getValue(); } +- +- public ShenandoahHeapRegion(Address addr) { +- super(addr); +- } +- +- public Address bottom() { +- return BottomField.getValue(addr); +- } +- +- public Address top() { +- return TopField.getValue(addr); +- } +- +- public Address end() { +- return EndField.getValue(addr); +- } +-} +diff -uNr openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_interface/CollectedHeapName.java afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_interface/CollectedHeapName.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_interface/CollectedHeapName.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_interface/CollectedHeapName.java 2025-05-06 10:53:44.823633663 +0800 +@@ -36,7 +36,6 @@ + public static final CollectedHeapName GEN_COLLECTED_HEAP = new CollectedHeapName("GenCollectedHeap"); + public static final CollectedHeapName G1_COLLECTED_HEAP = new CollectedHeapName("G1CollectedHeap"); + public static final CollectedHeapName PARALLEL_SCAVENGE_HEAP = new CollectedHeapName("ParallelScavengeHeap"); +- public static final CollectedHeapName SHENANDOAH_HEAP = new CollectedHeapName("ShenandoahHeap"); + + public String toString() { + return name; +diff -uNr openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_interface/GCCause.java afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_interface/GCCause.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_interface/GCCause.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_interface/GCCause.java 2025-05-06 10:53:44.823633663 +0800 +@@ -55,11 +55,6 @@ + _g1_inc_collection_pause ("G1 Evacuation Pause"), + _g1_humongous_allocation ("G1 Humongous Allocation"), + +- _shenandoah_allocation_failure_evac ("Allocation Failure During Evacuation"), +- _shenandoah_stop_vm ("Stopping VM"), +- _shenandoah_concurrent_gc ("Concurrent GC"), +- _shenandoah_upgrade_to_full_gc ("Upgrade to Full GC"), +- + _last_ditch_collection ("Last ditch collection"), + _last_gc_cause ("ILLEGAL VALUE - last gc cause - ILLEGAL VALUE"); + +diff -uNr openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_interface/GCName.java afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_interface/GCName.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_interface/GCName.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_interface/GCName.java 2025-05-06 10:53:44.823633663 +0800 +@@ -36,7 +36,6 @@ + G1New ("G1New"), + ConcurrentMarkSweep ("ConcurrentMarkSweep"), + G1Old ("G1Old"), +- Shenandoah ("Shenandoah"), + GCNameEndSentinel ("GCNameEndSentinel"); + + private final String value; +diff -uNr openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/HotSpotAgent.java afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/HotSpotAgent.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/HotSpotAgent.java 2023-04-19 05:53:02.000000000 +0800 ++++ afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/HotSpotAgent.java 2025-05-06 10:53:44.803633662 +0800 +@@ -34,6 +34,7 @@ + import sun.jvm.hotspot.debugger.MachineDescription; + import sun.jvm.hotspot.debugger.MachineDescriptionAMD64; + import sun.jvm.hotspot.debugger.MachineDescriptionIA64; ++import sun.jvm.hotspot.debugger.MachineDescriptionSW64; + import sun.jvm.hotspot.debugger.MachineDescriptionIntelX86; + import sun.jvm.hotspot.debugger.MachineDescriptionSPARC32Bit; + import sun.jvm.hotspot.debugger.MachineDescriptionSPARC64Bit; +@@ -588,6 +589,8 @@ + machDesc = new MachineDescriptionIA64(); + } else if (cpu.equals("amd64")) { + machDesc = new MachineDescriptionAMD64(); ++ } else if (cpu.equals("sw64")) { ++ machDesc = new MachineDescriptionSW64(); + } else if (cpu.equals("sparc")) { + if (LinuxDebuggerLocal.getAddressSize()==8) { + machDesc = new MachineDescriptionSPARC64Bit(); +diff -uNr openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/HSDB.java afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/HSDB.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/HSDB.java 2023-04-19 05:53:02.000000000 +0800 ++++ afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/HSDB.java 2025-05-06 10:53:44.803633662 +0800 +@@ -36,7 +36,6 @@ + import sun.jvm.hotspot.compiler.*; + import sun.jvm.hotspot.debugger.*; + import sun.jvm.hotspot.gc_implementation.parallelScavenge.*; +-import sun.jvm.hotspot.gc_implementation.shenandoah.*; + import sun.jvm.hotspot.gc_interface.*; + import sun.jvm.hotspot.interpreter.*; + import sun.jvm.hotspot.memory.*; +@@ -1092,10 +1091,6 @@ + anno = "PSOldGen "; + bad = false; + } +- } else if (collHeap instanceof ShenandoahHeap) { +- ShenandoahHeap heap = (ShenandoahHeap) collHeap; +- anno = "ShenandoahHeap "; +- bad = false; + } else { + // Optimistically assume the oop isn't bad + anno = "[Unknown generation] "; +diff -uNr openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/Universe.java afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/Universe.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/Universe.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/Universe.java 2025-05-06 10:53:44.831633663 +0800 +@@ -29,7 +29,6 @@ + import sun.jvm.hotspot.debugger.*; + import sun.jvm.hotspot.gc_interface.*; + import sun.jvm.hotspot.gc_implementation.g1.G1CollectedHeap; +-import sun.jvm.hotspot.gc_implementation.shenandoah.ShenandoahHeap; + import sun.jvm.hotspot.gc_implementation.parallelScavenge.*; + import sun.jvm.hotspot.oops.*; + import sun.jvm.hotspot.types.*; +@@ -80,7 +79,6 @@ + heapConstructor.addMapping("GenCollectedHeap", GenCollectedHeap.class); + heapConstructor.addMapping("ParallelScavengeHeap", ParallelScavengeHeap.class); + heapConstructor.addMapping("G1CollectedHeap", G1CollectedHeap.class); +- heapConstructor.addMapping("ShenandoahHeap", ShenandoahHeap.class); + + mainThreadGroupField = type.getOopField("_main_thread_group"); + systemThreadGroupField = type.getOopField("_system_thread_group"); +diff -uNr openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ObjectHeap.java afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ObjectHeap.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ObjectHeap.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ObjectHeap.java 2025-05-06 10:53:44.835633664 +0800 +@@ -34,7 +34,6 @@ + import sun.jvm.hotspot.debugger.*; + import sun.jvm.hotspot.gc_interface.*; + import sun.jvm.hotspot.gc_implementation.g1.*; +-import sun.jvm.hotspot.gc_implementation.shenandoah.*; + import sun.jvm.hotspot.gc_implementation.parallelScavenge.*; + import sun.jvm.hotspot.memory.*; + import sun.jvm.hotspot.runtime.*; +@@ -438,13 +437,10 @@ + } else if (heap instanceof G1CollectedHeap) { + G1CollectedHeap g1h = (G1CollectedHeap) heap; + g1h.heapRegionIterate(lrc); +- } else if (heap instanceof ShenandoahHeap) { +- ShenandoahHeap sh = (ShenandoahHeap) heap; +- addLiveRegions("heap", sh.getLiveRegions(), liveRegions); + } else { + if (Assert.ASSERTS_ENABLED) { + Assert.that(false, "Expecting GenCollectedHeap, G1CollectedHeap, " + +- "SheandoahHeap or ParallelScavengeHeap, but got " + ++ "or ParallelScavengeHeap, but got " + + heap.getClass().getName()); + } + } +diff -uNr openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/linux_sw64/LinuxSW64JavaThreadPDAccess.java afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/linux_sw64/LinuxSW64JavaThreadPDAccess.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/linux_sw64/LinuxSW64JavaThreadPDAccess.java 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/linux_sw64/LinuxSW64JavaThreadPDAccess.java 2025-05-06 10:53:44.839633664 +0800 +@@ -0,0 +1,132 @@ ++/* ++ * Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2015, Red Hat Inc. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++package sun.jvm.hotspot.runtime.linux_sw64; ++ ++import java.io.*; ++import java.util.*; ++import sun.jvm.hotspot.debugger.*; ++import sun.jvm.hotspot.debugger.sw64.*; ++import sun.jvm.hotspot.runtime.*; ++import sun.jvm.hotspot.runtime.sw64.*; ++import sun.jvm.hotspot.types.*; ++import sun.jvm.hotspot.utilities.*; ++ ++public class LinuxSW64JavaThreadPDAccess implements JavaThreadPDAccess { ++ private static AddressField lastJavaFPField; ++ private static AddressField osThreadField; ++ ++ // Field from OSThread ++ private static CIntegerField osThreadThreadIDField; ++ ++ // This is currently unneeded but is being kept in case we change ++ // the currentFrameGuess algorithm ++ private static final long GUESS_SCAN_RANGE = 128 * 1024; ++ ++ static { ++ VM.registerVMInitializedObserver(new Observer() { ++ public void update(Observable o, Object data) { ++ initialize(VM.getVM().getTypeDataBase()); ++ } ++ }); ++ } ++ ++ private static synchronized void initialize(TypeDataBase db) { ++ Type type = db.lookupType("JavaThread"); ++ osThreadField = type.getAddressField("_osthread"); ++ ++ Type anchorType = db.lookupType("JavaFrameAnchor"); ++ lastJavaFPField = anchorType.getAddressField("_last_Java_fp"); ++ ++ Type osThreadType = db.lookupType("OSThread"); ++ osThreadThreadIDField = osThreadType.getCIntegerField("_thread_id"); ++ } ++ ++ public Address getLastJavaFP(Address addr) { ++ return lastJavaFPField.getValue(addr.addOffsetTo(sun.jvm.hotspot.runtime.JavaThread.getAnchorField().getOffset())); ++ } ++ ++ public Address getLastJavaPC(Address addr) { ++ return null; ++ } ++ ++ public Address getBaseOfStackPointer(Address addr) { ++ return null; ++ } ++ ++ public Frame getLastFramePD(JavaThread thread, Address addr) { ++ Address fp = thread.getLastJavaFP(); ++ if (fp == null) { ++ return null; // no information ++ } ++ return new SW64Frame(thread.getLastJavaSP(), fp); ++ } ++ ++ public RegisterMap newRegisterMap(JavaThread thread, boolean updateMap) { ++ return new SW64RegisterMap(thread, updateMap); ++ } ++ ++ public Frame getCurrentFrameGuess(JavaThread thread, Address addr) { ++ ThreadProxy t = getThreadProxy(addr); ++ SW64ThreadContext context = (SW64ThreadContext) t.getContext(); ++ SW64CurrentFrameGuess guesser = new SW64CurrentFrameGuess(context, thread); ++ if (!guesser.run(GUESS_SCAN_RANGE)) { ++ return null; ++ } ++ if (guesser.getPC() == null) { ++ return new SW64Frame(guesser.getSP(), guesser.getFP()); ++ } else { ++ return new SW64Frame(guesser.getSP(), guesser.getFP(), guesser.getPC()); ++ } ++ } ++ ++ public void printThreadIDOn(Address addr, PrintStream tty) { ++ tty.print(getThreadProxy(addr)); ++ } ++ ++ public void printInfoOn(Address threadAddr, PrintStream tty) { ++ tty.print("Thread id: "); ++ printThreadIDOn(threadAddr, tty); ++// tty.println("\nPostJavaState: " + getPostJavaState(threadAddr)); ++ } ++ ++ public Address getLastSP(Address addr) { ++ ThreadProxy t = getThreadProxy(addr); ++ SW64ThreadContext context = (SW64ThreadContext) t.getContext(); ++ return context.getRegisterAsAddress(SW64ThreadContext.SP); ++ } ++ ++ public ThreadProxy getThreadProxy(Address addr) { ++ // Addr is the address of the JavaThread. ++ // Fetch the OSThread (for now and for simplicity, not making a ++ // separate "OSThread" class in this package) ++ Address osThreadAddr = osThreadField.getValue(addr); ++ // Get the address of the _thread_id from the OSThread ++ Address threadIdAddr = osThreadAddr.addOffsetTo(osThreadThreadIDField.getOffset()); ++ ++ JVMDebugger debugger = VM.getVM().getDebugger(); ++ return debugger.getThreadForIdentifierAddress(threadIdAddr); ++ } ++} +diff -uNr openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/sw64/SW64CurrentFrameGuess.java afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/sw64/SW64CurrentFrameGuess.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/sw64/SW64CurrentFrameGuess.java 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/sw64/SW64CurrentFrameGuess.java 2025-05-06 10:53:44.839633664 +0800 +@@ -0,0 +1,244 @@ ++/* ++ * Copyright (c) 2003, 2006, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2015, Red Hat Inc. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++package sun.jvm.hotspot.runtime.sw64; ++ ++import sun.jvm.hotspot.debugger.*; ++import sun.jvm.hotspot.debugger.sw64.*; ++import sun.jvm.hotspot.code.*; ++import sun.jvm.hotspot.interpreter.*; ++import sun.jvm.hotspot.runtime.*; ++import sun.jvm.hotspot.runtime.sw64.*; ++ ++/**

Should be able to be used on all sw64 platforms we support ++ (Linux/sw64) to implement JavaThread's "currentFrameGuess()" ++ functionality. Input is an SW64ThreadContext; output is SP, FP, ++ and PC for an SW64Frame. Instantiation of the SW64Frame is ++ left to the caller, since we may need to subclass SW64Frame to ++ support signal handler frames on Unix platforms.

++ ++

Algorithm is to walk up the stack within a given range (say, ++ 512K at most) looking for a plausible PC and SP for a Java frame, ++ also considering those coming in from the context. If we find a PC ++ that belongs to the VM (i.e., in generated code like the ++ interpreter or CodeCache) then we try to find an associated FP. ++ We repeat this until we either find a complete frame or run out of ++ stack to look at.

*/ ++ ++public class SW64CurrentFrameGuess { ++ private SW64ThreadContext context; ++ private JavaThread thread; ++ private Address spFound; ++ private Address fpFound; ++ private Address pcFound; ++ ++ private static final boolean DEBUG = System.getProperty("sun.jvm.hotspot.runtime.sw64.SW64Frame.DEBUG") ++ != null; ++ ++ public SW64CurrentFrameGuess(SW64ThreadContext context, ++ JavaThread thread) { ++ this.context = context; ++ this.thread = thread; ++ } ++ ++ /** Returns false if not able to find a frame within a reasonable range. */ ++ public boolean run(long regionInBytesToSearch) { ++ Address sp = context.getRegisterAsAddress(SW64ThreadContext.SP); ++ Address pc = context.getRegisterAsAddress(SW64ThreadContext.PC); ++ Address fp = context.getRegisterAsAddress(SW64ThreadContext.FP); ++ if (sp == null) { ++ // Bail out if no last java frame either ++ if (thread.getLastJavaSP() != null) { ++ setValues(thread.getLastJavaSP(), thread.getLastJavaFP(), null); ++ return true; ++ } ++ return false; ++ } ++ Address end = sp.addOffsetTo(regionInBytesToSearch); ++ VM vm = VM.getVM(); ++ ++ setValues(null, null, null); // Assume we're not going to find anything ++ ++ if (vm.isJavaPCDbg(pc)) { ++ if (vm.isClientCompiler()) { ++ // If the topmost frame is a Java frame, we are (pretty much) ++ // guaranteed to have a viable FP. We should be more robust ++ // than this (we have the potential for losing entire threads' ++ // stack traces) but need to see how much work we really have ++ // to do here. Searching the stack for an (SP, FP) pair is ++ // hard since it's easy to misinterpret inter-frame stack ++ // pointers as base-of-frame pointers; we also don't know the ++ // sizes of C1 frames (not registered in the nmethod) so can't ++ // derive them from SP. ++ ++ setValues(sp, fp, pc); ++ return true; ++ } else { ++ if (vm.getInterpreter().contains(pc)) { ++ if (DEBUG) { ++ System.out.println("CurrentFrameGuess: choosing interpreter frame: sp = " + ++ sp + ", fp = " + fp + ", pc = " + pc); ++ } ++ setValues(sp, fp, pc); ++ return true; ++ } ++ ++ // For the server compiler, FP is not guaranteed to be valid ++ // for compiled code. In addition, an earlier attempt at a ++ // non-searching algorithm (see below) failed because the ++ // stack pointer from the thread context was pointing ++ // (considerably) beyond the ostensible end of the stack, into ++ // garbage; walking from the topmost frame back caused a crash. ++ // ++ // This algorithm takes the current PC as a given and tries to ++ // find the correct corresponding SP by walking up the stack ++ // and repeatedly performing stackwalks (very inefficient). ++ // ++ // FIXME: there is something wrong with stackwalking across ++ // adapter frames...this is likely to be the root cause of the ++ // failure with the simpler algorithm below. ++ ++ for (long offset = 0; ++ offset < regionInBytesToSearch; ++ offset += vm.getAddressSize()) { ++ try { ++ Address curSP = sp.addOffsetTo(offset); ++ Frame frame = new SW64Frame(curSP, null, pc); ++ RegisterMap map = thread.newRegisterMap(false); ++ while (frame != null) { ++ if (frame.isEntryFrame() && frame.entryFrameIsFirst()) { ++ // We were able to traverse all the way to the ++ // bottommost Java frame. ++ // This sp looks good. Keep it. ++ if (DEBUG) { ++ System.out.println("CurrentFrameGuess: Choosing sp = " + curSP + ", pc = " + pc); ++ } ++ setValues(curSP, null, pc); ++ return true; ++ } ++ frame = frame.sender(map); ++ } ++ } catch (Exception e) { ++ if (DEBUG) { ++ System.out.println("CurrentFrameGuess: Exception " + e + " at offset " + offset); ++ } ++ // Bad SP. Try another. ++ } ++ } ++ ++ // Were not able to find a plausible SP to go with this PC. ++ // Bail out. ++ return false; ++ ++ /* ++ // Original algorithm which does not work because SP was ++ // pointing beyond where it should have: ++ ++ // For the server compiler, FP is not guaranteed to be valid ++ // for compiled code. We see whether the PC is in the ++ // interpreter and take care of that, otherwise we run code ++ // (unfortunately) duplicated from SW64Frame.senderForCompiledFrame. ++ ++ CodeCache cc = vm.getCodeCache(); ++ if (cc.contains(pc)) { ++ CodeBlob cb = cc.findBlob(pc); ++ ++ // See if we can derive a frame pointer from SP and PC ++ // NOTE: This is the code duplicated from SW64Frame ++ Address saved_fp = null; ++ int llink_offset = cb.getLinkOffset(); ++ if (llink_offset >= 0) { ++ // Restore base-pointer, since next frame might be an interpreter frame. ++ Address fp_addr = sp.addOffsetTo(VM.getVM().getAddressSize() * llink_offset); ++ saved_fp = fp_addr.getAddressAt(0); ++ } ++ ++ setValues(sp, saved_fp, pc); ++ return true; ++ } ++ */ ++ } ++ } else { ++ // If the current program counter was not known to us as a Java ++ // PC, we currently assume that we are in the run-time system ++ // and attempt to look to thread-local storage for saved SP and ++ // FP. Note that if these are null (because we were, in fact, ++ // in Java code, i.e., vtable stubs or similar, and the SA ++ // didn't have enough insight into the target VM to understand ++ // that) then we are going to lose the entire stack trace for ++ // the thread, which is sub-optimal. FIXME. ++ ++ if (DEBUG) { ++ System.out.println("CurrentFrameGuess: choosing last Java frame: sp = " + ++ thread.getLastJavaSP() + ", fp = " + thread.getLastJavaFP()); ++ } ++ if (thread.getLastJavaSP() == null) { ++ return false; // No known Java frames on stack ++ } ++ ++ // The runtime has a nasty habit of not saving fp in the frame ++ // anchor, leaving us to grovel about in the stack to find a ++ // plausible address. Fortunately, this only happens in ++ // compiled code; there we always have a valid PC, and we always ++ // push LR and FP onto the stack as a pair, with FP at the lower ++ // address. ++ pc = thread.getLastJavaPC(); ++ fp = thread.getLastJavaFP(); ++ sp = thread.getLastJavaSP(); ++ ++ if (fp == null) { ++ CodeCache cc = vm.getCodeCache(); ++ if (cc.contains(pc)) { ++ CodeBlob cb = cc.findBlob(pc); ++ if (DEBUG) { ++ System.out.println("FP is null. Found blob frame size " + cb.getFrameSize()); ++ } ++ // See if we can derive a frame pointer from SP and PC ++ long link_offset = cb.getFrameSize() - 2 * VM.getVM().getAddressSize(); ++ if (link_offset >= 0) { ++ fp = sp.addOffsetTo(link_offset); ++ } ++ } ++ } ++ ++ setValues(sp, fp, null); ++ ++ return true; ++ } ++ } ++ ++ public Address getSP() { return spFound; } ++ public Address getFP() { return fpFound; } ++ /** May be null if getting values from thread-local storage; take ++ care to call the correct SW64Frame constructor to recover this if ++ necessary */ ++ public Address getPC() { return pcFound; } ++ ++ private void setValues(Address sp, Address fp, Address pc) { ++ spFound = sp; ++ fpFound = fp; ++ pcFound = pc; ++ } ++} +diff -uNr openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/sw64/SW64Frame.java afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/sw64/SW64Frame.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/sw64/SW64Frame.java 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/sw64/SW64Frame.java 2025-05-06 10:53:44.839633664 +0800 +@@ -0,0 +1,557 @@ ++/* ++ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2015, Red Hat Inc. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++package sun.jvm.hotspot.runtime.sw64; ++ ++import java.util.*; ++import sun.jvm.hotspot.code.*; ++import sun.jvm.hotspot.compiler.*; ++import sun.jvm.hotspot.debugger.*; ++import sun.jvm.hotspot.oops.*; ++import sun.jvm.hotspot.runtime.*; ++import sun.jvm.hotspot.types.*; ++import sun.jvm.hotspot.utilities.*; ++ ++/** Specialization of and implementation of abstract methods of the ++ Frame class for the sw64 family of CPUs. */ ++ ++public class SW64Frame extends Frame { ++ private static final boolean DEBUG; ++ static { ++ DEBUG = System.getProperty("sun.jvm.hotspot.runtime.sw64.SW64Frame.DEBUG") != null; ++ } ++ ++ // All frames ++ private static final int LINK_OFFSET = 0; ++ private static final int RETURN_ADDR_OFFSET = 1; ++ private static final int SENDER_SP_OFFSET = 2; ++ ++ // Interpreter frames ++ private static final int INTERPRETER_FRAME_MIRROR_OFFSET = 2; // for native calls only ++ private static final int INTERPRETER_FRAME_SENDER_SP_OFFSET = -1; ++ private static final int INTERPRETER_FRAME_LAST_SP_OFFSET = INTERPRETER_FRAME_SENDER_SP_OFFSET - 1; ++ private static final int INTERPRETER_FRAME_LOCALS_OFFSET = INTERPRETER_FRAME_LAST_SP_OFFSET - 1; ++ private static int INTERPRETER_FRAME_METHOD_OFFSET; ++ private static int INTERPRETER_FRAME_MDX_OFFSET; // Non-core builds only ++ private static int INTERPRETER_FRAME_CACHE_OFFSET; ++//ZHJ private static int INTERPRETER_FRAME_LOCALS_OFFSET; ++ private static int INTERPRETER_FRAME_BCX_OFFSET; ++ private static int INTERPRETER_FRAME_INITIAL_SP_OFFSET; ++ private static int INTERPRETER_FRAME_MONITOR_BLOCK_TOP_OFFSET; ++ private static int INTERPRETER_FRAME_MONITOR_BLOCK_BOTTOM_OFFSET; ++ ++ // Entry frames ++ private static int ENTRY_FRAME_CALL_WRAPPER_OFFSET = -9; ++ ++ // Native frames ++ private static final int NATIVE_FRAME_INITIAL_PARAM_OFFSET = 2; ++ ++ private static VMReg fp = new VMReg(15); ++ ++ static { ++ VM.registerVMInitializedObserver(new Observer() { ++ public void update(Observable o, Object data) { ++ initialize(VM.getVM().getTypeDataBase()); ++ } ++ }); ++ } ++ ++ private static synchronized void initialize(TypeDataBase db) { ++ INTERPRETER_FRAME_METHOD_OFFSET = INTERPRETER_FRAME_LOCALS_OFFSET - 1; ++ INTERPRETER_FRAME_MDX_OFFSET = INTERPRETER_FRAME_METHOD_OFFSET - 1; ++ INTERPRETER_FRAME_CACHE_OFFSET = INTERPRETER_FRAME_MDX_OFFSET - 1; ++//ZHJ INTERPRETER_FRAME_LOCALS_OFFSET = INTERPRETER_FRAME_CACHE_OFFSET - 1; ++ INTERPRETER_FRAME_BCX_OFFSET = INTERPRETER_FRAME_CACHE_OFFSET - 1; ++ INTERPRETER_FRAME_INITIAL_SP_OFFSET = INTERPRETER_FRAME_BCX_OFFSET - 1; ++ INTERPRETER_FRAME_MONITOR_BLOCK_TOP_OFFSET = INTERPRETER_FRAME_INITIAL_SP_OFFSET; ++ INTERPRETER_FRAME_MONITOR_BLOCK_BOTTOM_OFFSET = INTERPRETER_FRAME_INITIAL_SP_OFFSET; ++ } ++ ++ ++ // an additional field beyond sp and pc: ++ Address raw_fp; // frame pointer ++ private Address raw_unextendedSP; ++ ++ private SW64Frame() { ++ } ++ ++ private void adjustForDeopt() { ++ if ( pc != null) { ++ // Look for a deopt pc and if it is deopted convert to original pc ++ CodeBlob cb = VM.getVM().getCodeCache().findBlob(pc); ++ if (cb != null && cb.isJavaMethod()) { ++ NMethod nm = (NMethod) cb; ++ if (pc.equals(nm.deoptHandlerBegin())) { ++ if (Assert.ASSERTS_ENABLED) { ++ Assert.that(this.getUnextendedSP() != null, "null SP in Java frame"); ++ } ++ // adjust pc if frame is deoptimized. ++ pc = this.getUnextendedSP().getAddressAt(nm.origPCOffset()); ++ deoptimized = true; ++ } ++ } ++ } ++ } ++ ++ public SW64Frame(Address raw_sp, Address raw_fp, Address pc) { ++ this.raw_sp = raw_sp; ++ this.raw_unextendedSP = raw_sp; ++ this.raw_fp = raw_fp; ++ this.pc = pc; ++ adjustUnextendedSP(); ++ ++ // Frame must be fully constructed before this call ++ adjustForDeopt(); ++ ++ if (DEBUG) { ++ System.out.println("SW64Frame(sp, fp, pc): " + this); ++ dumpStack(); ++ } ++ } ++ ++ public SW64Frame(Address raw_sp, Address raw_fp) { ++ this.raw_sp = raw_sp; ++ this.raw_unextendedSP = raw_sp; ++ this.raw_fp = raw_fp; ++ this.pc = raw_sp.getAddressAt(-1 * VM.getVM().getAddressSize()); ++ adjustUnextendedSP(); ++ ++ // Frame must be fully constructed before this call ++ adjustForDeopt(); ++ ++ if (DEBUG) { ++ System.out.println("SW64Frame(sp, fp): " + this); ++ dumpStack(); ++ } ++ } ++ ++ public SW64Frame(Address raw_sp, Address raw_unextendedSp, Address raw_fp, Address pc) { ++ this.raw_sp = raw_sp; ++ this.raw_unextendedSP = raw_unextendedSp; ++ this.raw_fp = raw_fp; ++ this.pc = pc; ++ adjustUnextendedSP(); ++ ++ // Frame must be fully constructed before this call ++ adjustForDeopt(); ++ ++ if (DEBUG) { ++ System.out.println("SW64Frame(sp, unextendedSP, fp, pc): " + this); ++ dumpStack(); ++ } ++ ++ } ++ ++ public Object clone() { ++ SW64Frame frame = new SW64Frame(); ++ frame.raw_sp = raw_sp; ++ frame.raw_unextendedSP = raw_unextendedSP; ++ frame.raw_fp = raw_fp; ++ frame.pc = pc; ++ frame.deoptimized = deoptimized; ++ return frame; ++ } ++ ++ public boolean equals(Object arg) { ++ if (arg == null) { ++ return false; ++ } ++ ++ if (!(arg instanceof SW64Frame)) { ++ return false; ++ } ++ ++ SW64Frame other = (SW64Frame) arg; ++ ++ return (AddressOps.equal(getSP(), other.getSP()) && ++ AddressOps.equal(getUnextendedSP(), other.getUnextendedSP()) && ++ AddressOps.equal(getFP(), other.getFP()) && ++ AddressOps.equal(getPC(), other.getPC())); ++ } ++ ++ public int hashCode() { ++ if (raw_sp == null) { ++ return 0; ++ } ++ ++ return raw_sp.hashCode(); ++ } ++ ++ public String toString() { ++ return "sp: " + (getSP() == null? "null" : getSP().toString()) + ++ ", unextendedSP: " + (getUnextendedSP() == null? "null" : getUnextendedSP().toString()) + ++ ", fp: " + (getFP() == null? "null" : getFP().toString()) + ++ ", pc: " + (pc == null? "null" : pc.toString()); ++ } ++ ++ // accessors for the instance variables ++ public Address getFP() { return raw_fp; } ++ public Address getSP() { return raw_sp; } ++ public Address getID() { return raw_sp; } ++ ++ // FIXME: not implemented yet ++ public boolean isSignalHandlerFrameDbg() { return false; } ++ public int getSignalNumberDbg() { return 0; } ++ public String getSignalNameDbg() { return null; } ++ ++ public boolean isInterpretedFrameValid() { ++ if (Assert.ASSERTS_ENABLED) { ++ Assert.that(isInterpretedFrame(), "Not an interpreted frame"); ++ } ++ ++ // These are reasonable sanity checks ++ if (getFP() == null || getFP().andWithMask(0x3) != null) { ++ return false; ++ } ++ ++ if (getSP() == null || getSP().andWithMask(0x3) != null) { ++ return false; ++ } ++ ++ if (getFP().addOffsetTo(INTERPRETER_FRAME_INITIAL_SP_OFFSET * VM.getVM().getAddressSize()).lessThan(getSP())) { ++ return false; ++ } ++ ++ // These are hacks to keep us out of trouble. ++ // The problem with these is that they mask other problems ++ if (getFP().lessThanOrEqual(getSP())) { ++ // this attempts to deal with unsigned comparison above ++ return false; ++ } ++ ++ if (getFP().minus(getSP()) > 8192 * VM.getVM().getAddressSize()) { ++ // stack frames shouldn't be large. ++ return false; ++ } ++ ++ return true; ++ } ++ ++ // FIXME: not applicable in current system ++ // void patch_pc(Thread* thread, address pc); ++ ++ public Frame sender(RegisterMap regMap, CodeBlob cb) { ++ SW64RegisterMap map = (SW64RegisterMap) regMap; ++ ++ if (Assert.ASSERTS_ENABLED) { ++ Assert.that(map != null, "map must be set"); ++ } ++ ++ // Default is we done have to follow them. The sender_for_xxx will ++ // update it accordingly ++ map.setIncludeArgumentOops(false); ++ ++ if (isEntryFrame()) return senderForEntryFrame(map); ++ if (isInterpretedFrame()) return senderForInterpreterFrame(map); ++ ++ if(cb == null) { ++ cb = VM.getVM().getCodeCache().findBlob(getPC()); ++ } else { ++ if (Assert.ASSERTS_ENABLED) { ++ Assert.that(cb.equals(VM.getVM().getCodeCache().findBlob(getPC())), "Must be the same"); ++ } ++ } ++ ++ if (cb != null) { ++ return senderForCompiledFrame(map, cb); ++ } ++ ++ // Must be native-compiled frame, i.e. the marshaling code for native ++ // methods that exists in the core system. ++ return new SW64Frame(getSenderSP(), getLink(), getSenderPC()); ++ } ++ ++ private Frame senderForEntryFrame(SW64RegisterMap map) { ++ if (DEBUG) { ++ System.out.println("senderForEntryFrame"); ++ } ++ if (Assert.ASSERTS_ENABLED) { ++ Assert.that(map != null, "map must be set"); ++ } ++ // Java frame called from C; skip all C frames and return top C ++ // frame of that chunk as the sender ++ SW64JavaCallWrapper jcw = (SW64JavaCallWrapper) getEntryFrameCallWrapper(); ++ if (Assert.ASSERTS_ENABLED) { ++ Assert.that(!entryFrameIsFirst(), "next Java fp must be non zero"); ++ Assert.that(jcw.getLastJavaSP().greaterThan(getSP()), "must be above this frame on stack"); ++ } ++ SW64Frame fr; ++ if (jcw.getLastJavaPC() != null) { ++ fr = new SW64Frame(jcw.getLastJavaSP(), jcw.getLastJavaFP(), jcw.getLastJavaPC()); ++ } else { ++ fr = new SW64Frame(jcw.getLastJavaSP(), jcw.getLastJavaFP()); ++ } ++ map.clear(); ++ if (Assert.ASSERTS_ENABLED) { ++ Assert.that(map.getIncludeArgumentOops(), "should be set by clear"); ++ } ++ return fr; ++ } ++ ++ //------------------------------------------------------------------------------ ++ // frame::adjust_unextended_sp ++ private void adjustUnextendedSP() { ++ // If we are returning to a compiled MethodHandle call site, the ++ // saved_fp will in fact be a saved value of the unextended SP. The ++ // simplest way to tell whether we are returning to such a call site ++ // is as follows: ++ ++ CodeBlob cb = cb(); ++ NMethod senderNm = (cb == null) ? null : cb.asNMethodOrNull(); ++ if (senderNm != null) { ++ // If the sender PC is a deoptimization point, get the original ++ // PC. For MethodHandle call site the unextended_sp is stored in ++ // saved_fp. ++ if (senderNm.isDeoptMhEntry(getPC())) { ++ // DEBUG_ONLY(verifyDeoptMhOriginalPc(senderNm, getFP())); ++ raw_unextendedSP = getFP(); ++ } ++ else if (senderNm.isDeoptEntry(getPC())) { ++ // DEBUG_ONLY(verifyDeoptOriginalPc(senderNm, raw_unextendedSp)); ++ } ++ else if (senderNm.isMethodHandleReturn(getPC())) { ++ raw_unextendedSP = getFP(); ++ } ++ } ++ } ++ ++ private Frame senderForInterpreterFrame(SW64RegisterMap map) { ++ if (DEBUG) { ++ System.out.println("senderForInterpreterFrame"); ++ } ++ Address unextendedSP = addressOfStackSlot(INTERPRETER_FRAME_SENDER_SP_OFFSET).getAddressAt(0); ++ Address sp = addressOfStackSlot(SENDER_SP_OFFSET); ++ // We do not need to update the callee-save register mapping because above ++ // us is either another interpreter frame or a converter-frame, but never ++ // directly a compiled frame. ++ // 11/24/04 SFG. With the removal of adapter frames this is no longer true. ++ // However c2 no longer uses callee save register for java calls so there ++ // are no callee register to find. ++ ++ if (map.getUpdateMap()) ++ updateMapWithSavedLink(map, addressOfStackSlot(LINK_OFFSET)); ++ ++ return new SW64Frame(sp, unextendedSP, getLink(), getSenderPC()); ++ } ++ ++ private void updateMapWithSavedLink(RegisterMap map, Address savedFPAddr) { ++ map.setLocation(fp, savedFPAddr); ++ } ++ ++ private Frame senderForCompiledFrame(SW64RegisterMap map, CodeBlob cb) { ++ if (DEBUG) { ++ System.out.println("senderForCompiledFrame"); ++ } ++ ++ // ++ // NOTE: some of this code is (unfortunately) duplicated SW64CurrentFrameGuess ++ // ++ ++ if (Assert.ASSERTS_ENABLED) { ++ Assert.that(map != null, "map must be set"); ++ } ++ ++ // frame owned by optimizing compiler ++ if (Assert.ASSERTS_ENABLED) { ++ Assert.that(cb.getFrameSize() >= 0, "must have non-zero frame size"); ++ } ++ Address senderSP = getUnextendedSP().addOffsetTo(cb.getFrameSize()); ++ ++ // The return_address is always the word on the stack ++ Address senderPC = senderSP.getAddressAt(-1 * VM.getVM().getAddressSize()); ++ ++ // This is the saved value of FP which may or may not really be an FP. ++ // It is only an FP if the sender is an interpreter frame. ++ Address savedFPAddr = senderSP.addOffsetTo(- SENDER_SP_OFFSET * VM.getVM().getAddressSize()); ++ ++ if (map.getUpdateMap()) { ++ // Tell GC to use argument oopmaps for some runtime stubs that need it. ++ // For C1, the runtime stub might not have oop maps, so set this flag ++ // outside of update_register_map. ++ map.setIncludeArgumentOops(cb.callerMustGCArguments()); ++ ++ if (cb.getOopMaps() != null) { ++ OopMapSet.updateRegisterMap(this, cb, map, true); ++ } ++ ++ // Since the prolog does the save and restore of FP there is no oopmap ++ // for it so we must fill in its location as if there was an oopmap entry ++ // since if our caller was compiled code there could be live jvm state in it. ++ updateMapWithSavedLink(map, savedFPAddr); ++ } ++ ++ return new SW64Frame(senderSP, savedFPAddr.getAddressAt(0), senderPC); ++ } ++ ++ protected boolean hasSenderPD() { ++ return true; ++ } ++ ++ public long frameSize() { ++ return (getSenderSP().minus(getSP()) / VM.getVM().getAddressSize()); ++ } ++ ++ public Address getLink() { ++ try { ++ if (DEBUG) { ++ System.out.println("Reading link at " + addressOfStackSlot(LINK_OFFSET) ++ + " = " + addressOfStackSlot(LINK_OFFSET).getAddressAt(0)); ++ } ++ return addressOfStackSlot(LINK_OFFSET).getAddressAt(0); ++ } catch (Exception e) { ++ if (DEBUG) ++ System.out.println("Returning null"); ++ return null; ++ } ++ } ++ ++ // FIXME: not implementable yet ++ //inline void frame::set_link(intptr_t* addr) { *(intptr_t **)addr_at(link_offset) = addr; } ++ ++ public Address getUnextendedSP() { return raw_unextendedSP; } ++ ++ // Return address: ++ public Address getSenderPCAddr() { return addressOfStackSlot(RETURN_ADDR_OFFSET); } ++ public Address getSenderPC() { return getSenderPCAddr().getAddressAt(0); } ++ ++ // return address of param, zero origin index. ++ public Address getNativeParamAddr(int idx) { ++ return addressOfStackSlot(NATIVE_FRAME_INITIAL_PARAM_OFFSET + idx); ++ } ++ ++ public Address getSenderSP() { return addressOfStackSlot(SENDER_SP_OFFSET); } ++ ++ public Address addressOfInterpreterFrameLocals() { ++ return addressOfStackSlot(INTERPRETER_FRAME_LOCALS_OFFSET); ++ } ++ ++ private Address addressOfInterpreterFrameBCX() { ++ return addressOfStackSlot(INTERPRETER_FRAME_BCX_OFFSET); ++ } ++ ++ public int getInterpreterFrameBCI() { ++ // FIXME: this is not atomic with respect to GC and is unsuitable ++ // for use in a non-debugging, or reflective, system. Need to ++ // figure out how to express this. ++ Address bcp = addressOfInterpreterFrameBCX().getAddressAt(0); ++ Address methodHandle = addressOfInterpreterFrameMethod().getAddressAt(0); ++ Method method = (Method)Metadata.instantiateWrapperFor(methodHandle); ++ return bcpToBci(bcp, method); ++ } ++ ++ public Address addressOfInterpreterFrameMDX() { ++ return addressOfStackSlot(INTERPRETER_FRAME_MDX_OFFSET); ++ } ++ ++ // FIXME ++ //inline int frame::interpreter_frame_monitor_size() { ++ // return BasicObjectLock::size(); ++ //} ++ ++ // expression stack ++ // (the max_stack arguments are used by the GC; see class FrameClosure) ++ ++ public Address addressOfInterpreterFrameExpressionStack() { ++ Address monitorEnd = interpreterFrameMonitorEnd().address(); ++ return monitorEnd.addOffsetTo(-1 * VM.getVM().getAddressSize()); ++ } ++ ++ public int getInterpreterFrameExpressionStackDirection() { return -1; } ++ ++ // top of expression stack ++ public Address addressOfInterpreterFrameTOS() { ++ return getSP(); ++ } ++ ++ /** Expression stack from top down */ ++ public Address addressOfInterpreterFrameTOSAt(int slot) { ++ return addressOfInterpreterFrameTOS().addOffsetTo(slot * VM.getVM().getAddressSize()); ++ } ++ ++ public Address getInterpreterFrameSenderSP() { ++ if (Assert.ASSERTS_ENABLED) { ++ Assert.that(isInterpretedFrame(), "interpreted frame expected"); ++ } ++ return addressOfStackSlot(INTERPRETER_FRAME_SENDER_SP_OFFSET).getAddressAt(0); ++ } ++ ++ // Monitors ++ public BasicObjectLock interpreterFrameMonitorBegin() { ++ return new BasicObjectLock(addressOfStackSlot(INTERPRETER_FRAME_MONITOR_BLOCK_BOTTOM_OFFSET)); ++ } ++ ++ public BasicObjectLock interpreterFrameMonitorEnd() { ++ Address result = addressOfStackSlot(INTERPRETER_FRAME_MONITOR_BLOCK_TOP_OFFSET).getAddressAt(0); ++ if (Assert.ASSERTS_ENABLED) { ++ // make sure the pointer points inside the frame ++ Assert.that(AddressOps.gt(getFP(), result), "result must < than frame pointer"); ++ Assert.that(AddressOps.lte(getSP(), result), "result must >= than stack pointer"); ++ } ++ return new BasicObjectLock(result); ++ } ++ ++ public int interpreterFrameMonitorSize() { ++ return BasicObjectLock.size(); ++ } ++ ++ // Method ++ public Address addressOfInterpreterFrameMethod() { ++ return addressOfStackSlot(INTERPRETER_FRAME_METHOD_OFFSET); ++ } ++ ++ // Constant pool cache ++ public Address addressOfInterpreterFrameCPCache() { ++ return addressOfStackSlot(INTERPRETER_FRAME_CACHE_OFFSET); ++ } ++ ++ // Entry frames ++ public JavaCallWrapper getEntryFrameCallWrapper() { ++ return new SW64JavaCallWrapper(addressOfStackSlot(ENTRY_FRAME_CALL_WRAPPER_OFFSET).getAddressAt(0)); ++ } ++ ++ protected Address addressOfSavedOopResult() { ++ // offset is 2 for compiler2 and 3 for compiler1 ++ return getSP().addOffsetTo((VM.getVM().isClientCompiler() ? 2 : 3) * ++ VM.getVM().getAddressSize()); ++ } ++ ++ protected Address addressOfSavedReceiver() { ++ return getSP().addOffsetTo(-4 * VM.getVM().getAddressSize()); ++ } ++ ++ private void dumpStack() { ++ for (Address addr = getSP().addOffsetTo(-4 * VM.getVM().getAddressSize()); ++ AddressOps.lt(addr, getSP()); ++ addr = addr.addOffsetTo(VM.getVM().getAddressSize())) { ++ System.out.println(addr + ": " + addr.getAddressAt(0)); ++ } ++ System.out.println("-----------------------"); ++ for (Address addr = getSP(); ++ AddressOps.lte(addr, getSP().addOffsetTo(20 * VM.getVM().getAddressSize())); ++ addr = addr.addOffsetTo(VM.getVM().getAddressSize())) { ++ System.out.println(addr + ": " + addr.getAddressAt(0)); ++ } ++ } ++} +diff -uNr openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/sw64/SW64JavaCallWrapper.java afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/sw64/SW64JavaCallWrapper.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/sw64/SW64JavaCallWrapper.java 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/sw64/SW64JavaCallWrapper.java 2025-05-06 10:53:44.839633664 +0800 +@@ -0,0 +1,57 @@ ++/* ++ * Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2015, Red Hat Inc. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++package sun.jvm.hotspot.runtime.sw64; ++ ++import java.util.*; ++import sun.jvm.hotspot.debugger.*; ++import sun.jvm.hotspot.types.*; ++import sun.jvm.hotspot.runtime.*; ++ ++public class SW64JavaCallWrapper extends JavaCallWrapper { ++ private static AddressField lastJavaFPField; ++ ++ static { ++ VM.registerVMInitializedObserver(new Observer() { ++ public void update(Observable o, Object data) { ++ initialize(VM.getVM().getTypeDataBase()); ++ } ++ }); ++ } ++ ++ private static synchronized void initialize(TypeDataBase db) { ++ Type type = db.lookupType("JavaFrameAnchor"); ++ ++ lastJavaFPField = type.getAddressField("_last_Java_fp"); ++ } ++ ++ public SW64JavaCallWrapper(Address addr) { ++ super(addr); ++ } ++ ++ public Address getLastJavaFP() { ++ return lastJavaFPField.getValue(addr.addOffsetTo(anchorField.getOffset())); ++ } ++} +diff -uNr openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/sw64/SW64RegisterMap.java afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/sw64/SW64RegisterMap.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/sw64/SW64RegisterMap.java 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/sw64/SW64RegisterMap.java 2025-05-06 10:53:44.839633664 +0800 +@@ -0,0 +1,52 @@ ++/* ++ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2015, Red Hat Inc. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++package sun.jvm.hotspot.runtime.sw64; ++ ++import sun.jvm.hotspot.debugger.*; ++import sun.jvm.hotspot.runtime.*; ++ ++public class SW64RegisterMap extends RegisterMap { ++ ++ /** This is the only public constructor */ ++ public SW64RegisterMap(JavaThread thread, boolean updateMap) { ++ super(thread, updateMap); ++ } ++ ++ protected SW64RegisterMap(RegisterMap map) { ++ super(map); ++ } ++ ++ public Object clone() { ++ SW64RegisterMap retval = new SW64RegisterMap(this); ++ return retval; ++ } ++ ++ // no PD state to clear or copy: ++ protected void clearPD() {} ++ protected void initializePD() {} ++ protected void initializeFromPD(RegisterMap map) {} ++ protected Address getLocationPD(VMReg reg) { return null; } ++} +diff -uNr openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/Threads.java afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/Threads.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/Threads.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/Threads.java 2025-05-06 10:53:44.839633664 +0800 +@@ -35,6 +35,7 @@ + import sun.jvm.hotspot.runtime.linux_x86.LinuxX86JavaThreadPDAccess; + import sun.jvm.hotspot.runtime.linux_amd64.LinuxAMD64JavaThreadPDAccess; + import sun.jvm.hotspot.runtime.linux_sparc.LinuxSPARCJavaThreadPDAccess; ++import sun.jvm.hotspot.runtime.linux_sw64.LinuxSW64JavaThreadPDAccess; + import sun.jvm.hotspot.runtime.linux_aarch64.LinuxAARCH64JavaThreadPDAccess; + import sun.jvm.hotspot.runtime.bsd_x86.BsdX86JavaThreadPDAccess; + import sun.jvm.hotspot.runtime.bsd_amd64.BsdAMD64JavaThreadPDAccess; +@@ -88,6 +89,8 @@ + access = new LinuxAMD64JavaThreadPDAccess(); + } else if (cpu.equals("sparc")) { + access = new LinuxSPARCJavaThreadPDAccess(); ++ } else if (cpu.equals("sw64")) { ++ access = new LinuxSW64JavaThreadPDAccess(); + } else if (cpu.equals("aarch64")) { + access = new LinuxAARCH64JavaThreadPDAccess(); + } else { +diff -uNr openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java 2025-05-06 10:53:44.839633664 +0800 +@@ -206,37 +206,39 @@ + }; + + private static void checkVMVersion(String vmRelease) { +- if (System.getProperty("sun.jvm.hotspot.runtime.VM.disableVersionCheck") == null) { +- // read sa build version. +- String versionProp = "sun.jvm.hotspot.runtime.VM.saBuildVersion"; +- String saVersion = saProps.getProperty(versionProp); +- if (saVersion == null) +- throw new RuntimeException("Missing property " + versionProp); +- +- // Strip nonproduct VM version substring (note: saVersion doesn't have it). +- String vmVersion = vmRelease.replaceAll("(-fastdebug)|(-debug)|(-jvmg)|(-optimized)|(-profiled)",""); +- +- if (saVersion.equals(vmVersion)) { +- // Exact match +- return; +- } +- if (saVersion.indexOf('-') == saVersion.lastIndexOf('-') && +- vmVersion.indexOf('-') == vmVersion.lastIndexOf('-')) { +- // Throw exception if different release versions: +- // .-b +- throw new VMVersionMismatchException(saVersion, vmRelease); +- } else { +- // Otherwise print warning to allow mismatch not release versions +- // during development. +- System.err.println("WARNING: Hotspot VM version " + vmRelease + +- " does not match with SA version " + saVersion + +- "." + " You may see unexpected results. "); +- } +- } else { +- System.err.println("WARNING: You have disabled SA and VM version check. You may be " + +- "using incompatible version of SA and you may see unexpected " + +- "results."); +- } ++//ZHJ20191021 Disable checkVMVersion for all kinds of java tools in agent ++//ZHJ20191021 if (System.getProperty("sun.jvm.hotspot.runtime.VM.disableVersionCheck") == null) { ++//ZHJ20191021 // read sa build version. ++//ZHJ20191021 String versionProp = "sun.jvm.hotspot.runtime.VM.saBuildVersion"; ++//ZHJ20191021 String saVersion = saProps.getProperty(versionProp); ++//ZHJ20191021 if (saVersion == null) ++//ZHJ20191021 throw new RuntimeException("Missing property " + versionProp); ++//ZHJ20191021 ++//ZHJ20191021 // Strip nonproduct VM version substring (note: saVersion doesn't have it). ++//ZHJ20191021 String vmVersion = vmRelease.replaceAll("(-fastdebug)|(-debug)|(-jvmg)|(-optimized)|(-profiled)",""); ++//ZHJ20191021 ++//ZHJ20191021 if (saVersion.equals(vmVersion)) { ++//ZHJ20191021 // Exact match ++//ZHJ20191021 return; ++//ZHJ20191021 } ++//ZHJ20191021 if (saVersion.indexOf('-') == saVersion.lastIndexOf('-') && ++//ZHJ20191021 vmVersion.indexOf('-') == vmVersion.lastIndexOf('-')) { ++//ZHJ20191021 // Throw exception if different release versions: ++//ZHJ20191021 // .-b ++//ZHJ20191021 throw new VMVersionMismatchException(saVersion, vmRelease); ++//ZHJ20191021 } else { ++//ZHJ20191021 // Otherwise print warning to allow mismatch not release versions ++//ZHJ20191021 // during development. ++//ZHJ20191021 System.err.println("WARNING: Hotspot VM version " + vmRelease + ++//ZHJ20191021 " does not match with SA version " + saVersion + ++//ZHJ20191021 "." + " You may see unexpected results. "); ++//ZHJ20191021 } ++//ZHJ20191021 } else { ++//ZHJ20191021 System.err.println("WARNING: You have disabled SA and VM version check. You may be " + ++//ZHJ20191021 "using incompatible version of SA and you may see unexpected " + ++//ZHJ20191021 "results."); ++//ZHJ20191021 } ++// throw new RuntimeException("Missing property "); + } + + private static final boolean disableDerivedPointerTableCheck; +diff -uNr openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VMOps.java afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VMOps.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VMOps.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VMOps.java 2025-05-06 10:53:44.839633664 +0800 +@@ -56,7 +56,6 @@ + G1CollectFull, + G1CollectForAllocation, + G1IncCollectionPause, +- ShenandoahOperation, + EnableBiasedLocking, + RevokeBias, + BulkRevokeBias, +diff -uNr openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java 2025-05-06 10:53:44.839633664 +0800 +@@ -27,7 +27,6 @@ + import java.util.*; + import sun.jvm.hotspot.gc_interface.*; + import sun.jvm.hotspot.gc_implementation.g1.*; +-import sun.jvm.hotspot.gc_implementation.shenandoah.*; + import sun.jvm.hotspot.gc_implementation.parallelScavenge.*; + import sun.jvm.hotspot.gc_implementation.shared.*; + import sun.jvm.hotspot.debugger.JVMDebugger; +@@ -77,11 +76,7 @@ + printValMB("MetaspaceSize = ", getFlagValue("MetaspaceSize", flagMap)); + printValMB("CompressedClassSpaceSize = ", getFlagValue("CompressedClassSpaceSize", flagMap)); + printValMB("MaxMetaspaceSize = ", getFlagValue("MaxMetaspaceSize", flagMap)); +- if (heap instanceof ShenandoahHeap) { +- printValMB("ShenandoahRegionSize = ", ShenandoahHeapRegion.regionSizeBytes()); +- } else { +- printValMB("G1HeapRegionSize = ", HeapRegion.grainBytes()); +- } ++ printValMB("G1HeapRegionSize = ", HeapRegion.grainBytes()); + + System.out.println(); + System.out.println("Heap Usage:"); +@@ -145,14 +140,6 @@ + printValMB("used = ", oldGen.used()); + printValMB("free = ", oldFree); + System.out.println(alignment + (double)oldGen.used() * 100.0 / oldGen.capacity() + "% used"); +- } else if (heap instanceof ShenandoahHeap) { +- ShenandoahHeap sh = (ShenandoahHeap) heap; +- long num_regions = sh.numOfRegions(); +- System.out.println("Shenandoah Heap:"); +- System.out.println(" regions = " + num_regions); +- printValMB("capacity = ", num_regions * ShenandoahHeapRegion.regionSizeBytes()); +- printValMB("used = ", sh.used()); +- printValMB("committed = ", sh.committed()); + } else { + throw new RuntimeException("unknown CollectedHeap type : " + heap.getClass()); + } +@@ -195,14 +182,6 @@ + l = getFlagValue("ParallelGCThreads", flagMap); + System.out.println("with " + l + " thread(s)"); + return; +- } +- +- l = getFlagValue("UseShenandoahGC", flagMap); +- if (l == 1L) { +- System.out.print("Shenandoah GC "); +- l = getFlagValue("ParallelGCThreads", flagMap); +- System.out.println("with " + l + " thread(s)"); +- return; + } + + System.out.println("Mark Sweep Compact GC"); +diff -uNr openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/PlatformInfo.java afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/PlatformInfo.java +--- openjdk/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/PlatformInfo.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/PlatformInfo.java 2025-05-06 10:53:44.851633664 +0800 +@@ -61,6 +61,8 @@ + return "x86"; + } else if (cpu.equals("sparc") || cpu.equals("sparcv9")) { + return "sparc"; ++ } else if (cpu.equals("sw_64") || cpu.equals("sw64")) { ++ return "sw64"; + } else if (cpu.equals("ia64") || cpu.equals("amd64") || cpu.equals("x86_64")) { + return cpu; + } else if (cpu.equals("aarch64")) { +diff -uNr openjdk/hotspot/make/bsd/makefiles/gcc.make afu8u/hotspot/make/bsd/makefiles/gcc.make +--- openjdk/hotspot/make/bsd/makefiles/gcc.make 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/make/bsd/makefiles/gcc.make 2025-05-06 10:53:44.859633664 +0800 +@@ -190,7 +190,7 @@ + CFLAGS += -fno-exceptions + ifeq ($(USE_CLANG),) + CFLAGS += -pthread +- CFLAGS += -fcheck-new -fstack-protector ++ CFLAGS += -fcheck-new + # version 4 and above support fvisibility=hidden (matches jni_x86.h file) + # except 4.1.2 gives pointless warnings that can't be disabled (afaik) + ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0" +diff -uNr openjdk/hotspot/make/defs.make afu8u/hotspot/make/defs.make +--- openjdk/hotspot/make/defs.make 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/make/defs.make 2025-05-06 10:53:44.859633664 +0800 +@@ -285,7 +285,7 @@ + + # Use uname output for SRCARCH, but deal with platform differences. If ARCH + # is not explicitly listed below, it is treated as x86. +- SRCARCH ?= $(ARCH/$(filter sparc sparc64 ia64 amd64 x86_64 ppc ppc64 ppc64le zero aarch64,$(ARCH))) ++ SRCARCH ?= $(ARCH/$(filter sparc sparc64 ia64 amd64 x86_64 ppc ppc64 ppc64le zero aarch64 sw64,$(ARCH))) + ARCH/ = x86 + ARCH/sparc = sparc + ARCH/sparc64= sparc +@@ -295,6 +295,7 @@ + ARCH/ppc64 = ppc + ARCH/ppc64le= ppc + ARCH/ppc = ppc ++ ARCH/sw64 = sw64 + ARCH/zero = zero + ARCH/aarch64 = aarch64 + +@@ -332,9 +333,10 @@ + LIBARCH/sparcv9 = sparcv9 + LIBARCH/ia64 = ia64 + LIBARCH/ppc64 = ppc64 ++ LIBARCH/sw64 = sw64 + LIBARCH/zero = $(ZERO_LIBARCH) + +- LP64_ARCH += sparcv9 amd64 ia64 ppc64 aarch64 zero ++ LP64_ARCH += sparcv9 amd64 ia64 ppc64 aarch64 sw64 zero + endif + + # Required make macro settings for all platforms +diff -uNr openjdk/hotspot/make/excludeSrc.make afu8u/hotspot/make/excludeSrc.make +--- openjdk/hotspot/make/excludeSrc.make 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/make/excludeSrc.make 2025-05-06 10:53:44.859633664 +0800 +@@ -83,7 +83,7 @@ + + gc_impl := $(HS_COMMON_SRC)/share/vm/gc_implementation + gc_impl_alt := $(HS_ALT_SRC)/share/vm/gc_implementation +- gc_subdirs := concurrentMarkSweep g1 shenandoah shenandoah/heuristics shenandoah/mode shenandoah/c1 shenandoah/c2 parallelScavenge parNew ++ gc_subdirs := concurrentMarkSweep g1 parallelScavenge parNew + gc_exclude := $(foreach gc,$(gc_subdirs), \ + $(notdir $(wildcard $(gc_impl)/$(gc)/*.cpp)) \ + $(notdir $(wildcard $(gc_impl_alt)/$(gc)/*.cpp))) +@@ -116,15 +116,7 @@ + # src/share/vm/services + Src_Files_EXCLUDE += \ + g1MemoryPool.cpp \ +- shenandoahMemoryPool.cpp \ + psMemoryPool.cpp +- +- Src_Files_EXCLUDE += \ +- shenandoahBarrierSetAssembler_x86.cpp \ +- shenandoahBarrierSetAssembler_aarch64.cpp \ +- shenandoahBarrierSetAssembler_ppc.cpp \ +- shenandoahBarrierSetAssembler_sparc.cpp \ +- shenandoahBarrierSetAssembler_zero.cpp + endif + + ifeq ($(INCLUDE_NMT), false) +diff -uNr openjdk/hotspot/make/linux/Makefile afu8u/hotspot/make/linux/Makefile +--- openjdk/hotspot/make/linux/Makefile 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/make/linux/Makefile 2025-05-06 10:53:44.859633664 +0800 +@@ -75,6 +75,12 @@ + endif + endif + ++# C1 is not ported on sw64, so we cannot build a tiered VM: ++# SW TODO ++ifeq ($(ARCH),sw64) ++ FORCE_TIERED=0 ++endif ++ + ifdef LP64 + ifeq ("$(filter $(LP64_ARCH),$(BUILDARCH))","") + _JUNK_ := $(shell echo >&2 \ +diff -uNr openjdk/hotspot/make/linux/makefiles/buildtree.make afu8u/hotspot/make/linux/makefiles/buildtree.make +--- openjdk/hotspot/make/linux/makefiles/buildtree.make 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/make/linux/makefiles/buildtree.make 2025-05-06 10:53:44.859633664 +0800 +@@ -202,6 +202,7 @@ + DATA_MODE/amd64 = 64 + DATA_MODE/ppc64 = 64 + DATA_MODE/aarch64 = 64 ++DATA_MODE/sw64 = 64 + + DATA_MODE = $(DATA_MODE/$(BUILDARCH)) + +diff -uNr openjdk/hotspot/make/linux/makefiles/defs.make afu8u/hotspot/make/linux/makefiles/defs.make +--- openjdk/hotspot/make/linux/makefiles/defs.make 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/make/linux/makefiles/defs.make 2025-05-06 10:53:44.859633664 +0800 +@@ -83,6 +83,15 @@ + HS_ARCH = sparc + endif + ++# sw64 ++ifeq ($(ARCH), sw64) ++ ARCH_DATA_MODEL = 64 ++ MAKE_ARGS += LP64=1 ++ VM_PLATFORM = linux_sw64 ++ PLATFORM = linux-sw64 ++ HS_ARCH = sw64 ++endif ++ + # i686/i586 and amd64/x86_64 + ifneq (,$(findstring $(ARCH), amd64 x86_64 i686 i586)) + ifeq ($(ARCH_DATA_MODEL), 64) +@@ -309,6 +318,8 @@ + $(EXPORT_LIB_DIR)/sa-jdi.jar + ADD_SA_BINARIES/sparc = $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \ + $(EXPORT_LIB_DIR)/sa-jdi.jar ++ADD_SA_BINARIES/sw64 = $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \ ++ $(EXPORT_LIB_DIR)/sa-jdi.jar + ADD_SA_BINARIES/aarch64 = $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \ + $(EXPORT_LIB_DIR)/sa-jdi.jar + ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) +@@ -317,10 +328,12 @@ + ADD_SA_BINARIES/x86 += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.diz + ADD_SA_BINARIES/sparc += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.diz + ADD_SA_BINARIES/aarch64 += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.diz ++ ADD_SA_BINARIES/sw64 += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.diz + else + ADD_SA_BINARIES/x86 += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.debuginfo + ADD_SA_BINARIES/sparc += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.debuginfo + ADD_SA_BINARIES/aarch64 += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.debuginfo ++ ADD_SA_BINARIES/sw64 += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.debuginfo + endif + endif + endif +diff -uNr openjdk/hotspot/make/linux/makefiles/dtrace.make afu8u/hotspot/make/linux/makefiles/dtrace.make +--- openjdk/hotspot/make/linux/makefiles/dtrace.make 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/make/linux/makefiles/dtrace.make 2025-05-06 10:53:44.859633664 +0800 +@@ -42,7 +42,11 @@ + else + SDT_H_FILE = /usr/include/sys/sdt.h + endif ++ifeq ($(ARCH), sw64) # SW TODO ++DTRACE_ENABLED = ++else + DTRACE_ENABLED = $(shell test -f $(SDT_H_FILE) && echo $(SDT_H_FILE)) ++endif + REASON = "$(SDT_H_FILE) not found" + + ifneq ($(DTRACE_ENABLED),) +diff -uNr openjdk/hotspot/make/linux/makefiles/gcc.make afu8u/hotspot/make/linux/makefiles/gcc.make +--- openjdk/hotspot/make/linux/makefiles/gcc.make 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/make/linux/makefiles/gcc.make 2025-05-06 10:53:44.859633664 +0800 +@@ -150,7 +150,7 @@ + CFLAGS += -fno-exceptions + CFLAGS += -D_REENTRANT + ifeq ($(USE_CLANG),) +- CFLAGS += -fcheck-new -fstack-protector ++ CFLAGS += -fcheck-new + # version 4 and above support fvisibility=hidden (matches jni_x86.h file) + # except 4.1.2 gives pointless warnings that can't be disabled (afaik) + ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0" +@@ -179,6 +179,7 @@ + ARCHFLAG/sparcv9 = -m64 -mcpu=v9 + ARCHFLAG/zero = $(ZERO_ARCHFLAG) + ARCHFLAG/ppc64 = -m64 ++ARCHFLAG/sw64 = -mieee + + CFLAGS += $(ARCHFLAG) + AOUT_FLAGS += $(ARCHFLAG) +@@ -202,7 +203,7 @@ + endif + + # Compiler warnings are treated as errors +-WARNINGS_ARE_ERRORS = -Werror ++#WARNINGS_ARE_ERRORS = -Werror # SW TODO + + ifeq ($(USE_CLANG), true) + # However we need to clean the code up before we can unrestrictedly enable this option with Clang +@@ -240,7 +241,7 @@ + + # The flags to use for an Optimized g++ build + OPT_CFLAGS/SIZE=-Os +-OPT_CFLAGS/SPEED=-O3 ++OPT_CFLAGS/SPEED=-O2 # SW TODO -O3 + + # Hotspot uses very unstrict aliasing turn this optimization off + # This option is added to CFLAGS rather than OPT_CFLAGS +@@ -277,9 +278,6 @@ + endif + endif + +-# Need extra inlining to collapse all the templated closures into the hot loop +-OPT_CFLAGS/shenandoahConcurrentMark.o += $(OPT_CFLAGS) --param inline-unit-growth=1000 +- + # Flags for generating make dependency flags. + DEPFLAGS = -MMD -MP -MF $(DEP_DIR)/$(@:%=%.d) + ifeq ($(USE_CLANG),) +@@ -308,7 +306,7 @@ + endif + + # Enable linker optimization +-LFLAGS += -Xlinker -O1 ++LFLAGS += -Xlinker -O0 # SW TODO -O1 + + ifeq ($(USE_CLANG),) + # If this is a --hash-style=gnu system, use --hash-style=both +@@ -353,6 +351,7 @@ + ifeq ($(DEBUG_BINARIES), true) + CFLAGS += -g + else ++ DEBUG_CFLAGS/sw64 = -g -O0 -mieee # SW TODO + DEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH)) + ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),) + DEBUG_CFLAGS += -g +diff -uNr openjdk/hotspot/make/linux/makefiles/rules.make afu8u/hotspot/make/linux/makefiles/rules.make +--- openjdk/hotspot/make/linux/makefiles/rules.make 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/make/linux/makefiles/rules.make 2025-05-06 10:53:44.859633664 +0800 +@@ -163,11 +163,6 @@ + $(QUIETLY) $(REMOVE_TARGET) + $(QUIETLY) $(AS.S) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE) + +-%.o: %.S +- @echo Assembling $< +- $(QUIETLY) $(REMOVE_TARGET) +- $(COMPILE.CC) -o $@ $< $(COMPILE_DONE) +- + %.s: %.cpp + @echo Generating assembly for $< + $(QUIETLY) $(GENASM.CXX) -o $@ $< +diff -uNr openjdk/hotspot/make/linux/makefiles/sa.make afu8u/hotspot/make/linux/makefiles/sa.make +--- openjdk/hotspot/make/linux/makefiles/sa.make 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/make/linux/makefiles/sa.make 2025-05-06 10:53:44.859633664 +0800 +@@ -108,6 +108,7 @@ + $(QUIETLY) $(REMOTE) $(RUN.JAR) uf $@ -C $(AGENT_SRC_DIR) META-INF/services/com.sun.jdi.connect.Connector + $(QUIETLY) $(REMOTE) $(RUN.JAVAH) -classpath $(SA_CLASSDIR) -d $(GENERATED) -jni sun.jvm.hotspot.debugger.x86.X86ThreadContext + $(QUIETLY) $(REMOTE) $(RUN.JAVAH) -classpath $(SA_CLASSDIR) -d $(GENERATED) -jni sun.jvm.hotspot.debugger.amd64.AMD64ThreadContext ++ $(QUIETLY) $(REMOTE) $(RUN.JAVAH) -classpath $(SA_CLASSDIR) -d $(GENERATED) -jni sun.jvm.hotspot.debugger.sw64.SW64ThreadContext + $(QUIETLY) $(REMOTE) $(RUN.JAVAH) -classpath $(SA_CLASSDIR) -d $(GENERATED) -jni sun.jvm.hotspot.debugger.aarch64.AARCH64ThreadContext + $(QUIETLY) $(REMOTE) $(RUN.JAVAH) -classpath $(SA_CLASSDIR) -d $(GENERATED) -jni sun.jvm.hotspot.debugger.sparc.SPARCThreadContext + $(QUIETLY) $(REMOTE) $(RUN.JAVAH) -classpath $(SA_CLASSDIR) -d $(GENERATED) -jni sun.jvm.hotspot.asm.Disassembler +diff -uNr openjdk/hotspot/make/linux/makefiles/sw64.make afu8u/hotspot/make/linux/makefiles/sw64.make +--- openjdk/hotspot/make/linux/makefiles/sw64.make 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/make/linux/makefiles/sw64.make 2025-05-06 10:53:44.859633664 +0800 +@@ -0,0 +1,49 @@ ++# ++# Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. ++# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++# ++# This code is free software; you can redistribute it and/or modify it ++# under the terms of the GNU General Public License version 2 only, as ++# published by the Free Software Foundation. ++# ++# This code is distributed in the hope that it will be useful, but WITHOUT ++# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++# version 2 for more details (a copy is included in the LICENSE file that ++# accompanied this code). ++# ++# You should have received a copy of the GNU General Public License version ++# 2 along with this work; if not, write to the Free Software Foundation, ++# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++# ++# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++# or visit www.oracle.com if you need additional information or have any ++# questions. ++# ++# ++ ++# Not included in includeDB because it has no dependencies ++Obj_Files += linux_sw64.o ++ ++# The copied fdlibm routines in sharedRuntimeTrig.o must not be optimized ++OPT_CFLAGS/sharedRuntimeTrig.o = $(OPT_CFLAGS/NOOPT) ++# The copied fdlibm routines in sharedRuntimeTrans.o must not be optimized ++OPT_CFLAGS/sharedRuntimeTrans.o = $(OPT_CFLAGS/NOOPT) ++# Must also specify if CPU is little endian ++CFLAGS += -DVM_LITTLE_ENDIAN ++ ++CFLAGS += -D_LP64=1 ++ ++# The serviceability agent relies on frame pointer (%rbp) to walk thread stack ++CFLAGS += -fno-omit-frame-pointer ++ ++OPT_CFLAGS/compactingPermGenGen.o = -O1 ++OPT_CFLAGS/cardTableExtension.o= -O1 ++#OPT_CFLAGS/subnode.o = -mieee -O3 ++#OPT_CFLAGS/mulnode.o = -mieee -O3 ++#OPT_CFLAGS/connode.o = -mieee -O3 ++#OPT_CFLAGS/addnode.o = -mieee -O3 ++#OPT_CFLAGS/parse2.o = -mieee -O3 ++#OPT_CFLAGS/heapDumper.o = -mieee -O3 ++#OPT_CFLAGS/sharedRuntime.o = -mieee -O2 ++#OPT_CFLAGS/copy.o = -O2 +diff -uNr openjdk/hotspot/make/linux/makefiles/vm.make afu8u/hotspot/make/linux/makefiles/vm.make +--- openjdk/hotspot/make/linux/makefiles/vm.make 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/make/linux/makefiles/vm.make 2025-05-06 10:53:44.859633664 +0800 +@@ -168,7 +168,6 @@ + + COMPILER1_PATHS := $(call altsrc,$(HS_COMMON_SRC)/share/vm/c1) + COMPILER1_PATHS += $(HS_COMMON_SRC)/share/vm/c1 +-COMPILER1_PATHS += $(HS_COMMON_SRC)/share/vm/gc_implementation/shenandoah/c1 + + COMPILER2_PATHS := $(call altsrc,$(HS_COMMON_SRC)/share/vm/opto) + COMPILER2_PATHS += $(call altsrc,$(HS_COMMON_SRC)/share/vm/libadt) +@@ -187,8 +186,8 @@ + Src_Dirs/SHARK := $(CORE_PATHS) $(SHARK_PATHS) + Src_Dirs := $(Src_Dirs/$(TYPE)) + +-COMPILER2_SPECIFIC_FILES := opto libadt bcEscapeAnalyzer.cpp c2_\* runtime_\* shenandoahBarrierSetC2.cpp shenandoahSupport.cpp +-COMPILER1_SPECIFIC_FILES := c1_\* shenandoahBarrierSetC1.cpp ++COMPILER2_SPECIFIC_FILES := opto libadt bcEscapeAnalyzer.cpp c2_\* runtime_\* ++COMPILER1_SPECIFIC_FILES := c1_\* + SHARK_SPECIFIC_FILES := shark + ZERO_SPECIFIC_FILES := zero + +@@ -222,7 +221,7 @@ + # Locate all source files in the given directory, excluding files in Src_Files_EXCLUDE. + define findsrc + $(notdir $(shell find $(1)/. ! -name . -prune \ +- -a \( -name \*.c -o -name \*.cpp -o -name \*.s -o -name \*.S \) \ ++ -a \( -name \*.c -o -name \*.cpp -o -name \*.s \) \ + -a ! \( -name DUMMY $(addprefix -o -name ,$(Src_Files_EXCLUDE)) \))) + endef + +diff -uNr openjdk/hotspot/make/linux/platform_sw64 afu8u/hotspot/make/linux/platform_sw64 +--- openjdk/hotspot/make/linux/platform_sw64 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/make/linux/platform_sw64 2025-05-06 10:53:44.863633664 +0800 +@@ -0,0 +1,17 @@ ++os_family = linux ++ ++arch = sw64 ++ ++arch_model = sw64 ++ ++os_arch = linux_sw64 ++ ++os_arch_model = linux_sw64 ++ ++lib_arch = sw64 ++ ++compiler = gcc ++ ++gnu_dis_arch = sw64 ++ ++sysdefs = -DLINUX -D_GNU_SOURCE -DSW64 +diff -uNr openjdk/hotspot/make/sa.files afu8u/hotspot/make/sa.files +--- openjdk/hotspot/make/sa.files 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/make/sa.files 2025-05-06 10:53:44.863633664 +0800 +@@ -55,6 +55,7 @@ + $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/x86/*.java \ + $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/aarch64/*.java \ + $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/sparc/*.java \ ++$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/sw64/*.java \ + $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/posix/*.java \ + $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/posix/elf/*.java \ + $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/proc/*.java \ +@@ -62,10 +63,12 @@ + $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/proc/aarch64/*.java \ + $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/proc/sparc/*.java \ + $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/proc/x86/*.java \ ++$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/proc/sw64/*.java \ + $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/remote/*.java \ + $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/remote/amd64/*.java \ + $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/remote/sparc/*.java \ + $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/remote/x86/*.java \ ++$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/remote/sw64/*.java \ + $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/remote/aarch64/*.java \ + $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/sparc/*.java \ + $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/win32/coff/*.java \ +@@ -75,6 +78,7 @@ + $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windows/x86/*.java \ + $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windows/amd64/*.java \ + $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/x86/*.java \ ++$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/sw64/*.java \ + $(AGENT_SRC_DIR)/sun/jvm/hotspot/gc_implementation/g1/*.java \ + $(AGENT_SRC_DIR)/sun/jvm/hotspot/gc_implementation/parallelScavenge/*.java \ + $(AGENT_SRC_DIR)/sun/jvm/hotspot/gc_implementation/shared/*.java \ +@@ -96,11 +100,13 @@ + $(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/linux_aarch64/*.java \ + $(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/linux_x86/*.java \ + $(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/linux_sparc/*.java \ ++$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/linux_sw64/*.java \ + $(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/posix/*.java \ + $(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/solaris_amd64/*.java \ + $(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/solaris_sparc/*.java \ + $(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/solaris_x86/*.java \ + $(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/sparc/*.java \ ++$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/sw64/*.java \ + $(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/x86/*.java \ + $(AGENT_SRC_DIR)/sun/jvm/hotspot/tools/*.java \ + $(AGENT_SRC_DIR)/sun/jvm/hotspot/tools/jcore/*.java \ +diff -uNr openjdk/hotspot/make/solaris/makefiles/gcc.make afu8u/hotspot/make/solaris/makefiles/gcc.make +--- openjdk/hotspot/make/solaris/makefiles/gcc.make 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/make/solaris/makefiles/gcc.make 2025-05-06 10:53:44.863633664 +0800 +@@ -75,7 +75,6 @@ + CFLAGS += -fno-exceptions + CFLAGS += -D_REENTRANT + CFLAGS += -fcheck-new +-CFLAGS += -fstack-protector + + ARCHFLAG = $(ARCHFLAG/$(BUILDARCH)) + +diff -uNr openjdk/hotspot/make/windows/create_obj_files.sh afu8u/hotspot/make/windows/create_obj_files.sh +--- openjdk/hotspot/make/windows/create_obj_files.sh 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/make/windows/create_obj_files.sh 2025-05-06 10:53:44.863633664 +0800 +@@ -78,10 +78,6 @@ + fi + + BASE_PATHS="${BASE_PATHS} ${COMMONSRC}/share/vm/prims/wbtestmethods" +-BASE_PATHS="${BASE_PATHS} ${COMMONSRC}/share/vm/gc_implementation/shenandoah/c1" +-BASE_PATHS="${BASE_PATHS} ${COMMONSRC}/share/vm/gc_implementation/shenandoah/c2" +-BASE_PATHS="${BASE_PATHS} ${COMMONSRC}/share/vm/gc_implementation/shenandoah/heuristics" +-BASE_PATHS="${BASE_PATHS} ${COMMONSRC}/share/vm/gc_implementation/shenandoah/mode" + + # shared is already in BASE_PATHS. Should add vm/memory but that one is also in BASE_PATHS. + if [ -d "${ALTSRC}/share/vm/gc_implementation" ]; then +diff -uNr openjdk/hotspot/make/windows/makefiles/vm.make afu8u/hotspot/make/windows/makefiles/vm.make +--- openjdk/hotspot/make/windows/makefiles/vm.make 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/make/windows/makefiles/vm.make 2025-05-06 11:13:08.091672949 +0800 +@@ -153,11 +153,6 @@ + VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/gc_implementation/parNew + VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/gc_implementation/concurrentMarkSweep + VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/gc_implementation/g1 +-VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/gc_implementation/shenandoah +-VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/gc_implementation/shenandoah/c1 +-VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/gc_implementation/shenandoah/c2 +-VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/gc_implementation/shenandoah/heuristics +-VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/gc_implementation/shenandoah/mode + VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/gc_interface + VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/asm + VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/memory +@@ -253,21 +248,6 @@ + {$(COMMONSRC)\share\vm\gc_implementation\g1}.cpp.obj:: + $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< + +-{$(COMMONSRC)\share\vm\gc_implementation\shenandoah}.cpp.obj:: +- $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< +- +-{$(COMMONSRC)\share\vm\gc_implementation\shenandoah\c1}.cpp.obj:: +- $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< +- +-{$(COMMONSRC)\share\vm\gc_implementation\shenandoah\c2}.cpp.obj:: +- $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< +- +-{$(COMMONSRC)\share\vm\gc_implementation\shenandoah\heuristics}.cpp.obj:: +- $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< +- +-{$(COMMONSRC)\share\vm\gc_implementation\shenandoah\mode}.cpp.obj:: +- $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< +- + {$(COMMONSRC)\share\vm\gc_interface}.cpp.obj:: + $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< + +@@ -351,12 +331,6 @@ + {$(ALTSRC)\share\vm\gc_implementation\g1}.cpp.obj:: + $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< + +-{$(ALTSRC)\share\vm\gc_implementation\shenandoah}.cpp.obj:: +- $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< +- +-{$(ALTSRC)\share\vm\gc_implementation\shenandoah\heuristics}.cpp.obj:: +- $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< +- + {$(ALTSRC)\share\vm\gc_interface}.cpp.obj:: + $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $< + +diff -uNr openjdk/hotspot/src/cpu/aarch64/vm/aarch64.ad afu8u/hotspot/src/cpu/aarch64/vm/aarch64.ad +--- openjdk/hotspot/src/cpu/aarch64/vm/aarch64.ad 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/cpu/aarch64/vm/aarch64.ad 2025-05-06 11:13:08.091672949 +0800 +@@ -926,9 +926,6 @@ + source_hpp %{ + + #include "opto/addnode.hpp" +-#if INCLUDE_ALL_GCS +-#include "shenandoahBarrierSetAssembler_aarch64.hpp" +-#endif + + class CallStubImpl { + +@@ -2902,15 +2899,6 @@ + %} + + +- enc_class aarch64_enc_cmpxchg_oop_shenandoah(memory mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, iRegINoSp res) %{ +- MacroAssembler _masm(&cbuf); +- guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); +- Register tmp = $tmp$$Register; +- __ mov(tmp, $oldval$$Register); // Must not clobber oldval. +- ShenandoahBarrierSetAssembler::bsasm()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register, +- /*acquire*/ false, /*release*/ true, /*weak*/ false, /*is_cae*/ false, $res$$Register); +- %} +- + // The only difference between aarch64_enc_cmpxchg and + // aarch64_enc_cmpxchg_acq is that we use load-acquire in the + // CompareAndSwap sequence to serve as a barrier on acquiring a +@@ -2929,16 +2917,6 @@ + Assembler::word, /*acquire*/ true, /*release*/ true); + %} + +- enc_class aarch64_enc_cmpxchg_acq_oop_shenandoah(memory mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, iRegINoSp res) %{ +- MacroAssembler _masm(&cbuf); +- guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); +- Register tmp = $tmp$$Register; +- __ mov(tmp, $oldval$$Register); // Must not clobber oldval. +- ShenandoahBarrierSetAssembler::bsasm()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register, +- /*acquire*/ true, /*release*/ true, /*weak*/ false, /*is_cae*/ false, +- $res$$Register); +- %} +- + // auxiliary used for CompareAndSwapX to set result register + enc_class aarch64_enc_cset_eq(iRegINoSp res) %{ + MacroAssembler _masm(&cbuf); +@@ -4337,8 +4315,8 @@ + operand immByteMapBase() + %{ + // Get base of card map +- predicate(!UseShenandoahGC && // TODO: Should really check for BS::is_a, see JDK-8193193 +- (jbyte*)n->get_ptr() == ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base); ++ predicate((jbyte*)n->get_ptr() == ++ ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base); + match(ConP); + + op_cost(0); +@@ -7062,53 +7040,7 @@ + ins_cost(INSN_COST); + format %{ "str $src, $mem\t# ptr" %} + +- ins_encode %{ +- int opcode = $mem->opcode(); +- Register base = as_Register($mem$$base); +- int index = $mem$$index; +- int size = $mem$$scale; +- int disp = $mem$$disp; +- Register reg = as_Register($src$$reg); +- +- // we sometimes get asked to store the stack pointer into the +- // current thread -- we cannot do that directly on AArch64 +- if (reg == r31_sp) { +- MacroAssembler _masm(&cbuf); +- assert(as_Register($mem$$base) == rthread, "unexpected store for sp"); +- __ mov(rscratch2, sp); +- reg = rscratch2; +- } +- Address::extend scale; +- +- // Hooboy, this is fugly. We need a way to communicate to the +- // encoder that the index needs to be sign extended, so we have to +- // enumerate all the cases. +- switch (opcode) { +- case INDINDEXSCALEDOFFSETI2L: +- case INDINDEXSCALEDI2L: +- case INDINDEXSCALEDOFFSETI2LN: +- case INDINDEXSCALEDI2LN: +- case INDINDEXOFFSETI2L: +- case INDINDEXOFFSETI2LN: +- scale = Address::sxtw(size); +- break; +- default: +- scale = Address::lsl(size); +- } +- Address adr; +- if (index == -1) { +- adr = Address(base, disp); +- } else { +- if (disp == 0) { +- adr = Address(base, as_Register(index), scale); +- } else { +- __ lea(rscratch1, Address(base, disp)); +- adr = Address(rscratch1, as_Register(index), scale); +- } +- } +- +- __ str(reg, adr); +- %} ++ ins_encode(aarch64_enc_str(src, mem)); + + ins_pipe(istore_reg_mem); + %} +@@ -8232,7 +8164,6 @@ + + instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{ + +- predicate(!UseShenandoahGC || !ShenandoahCASBarrier || n->in(3)->in(1)->bottom_type() == TypePtr::NULL_PTR); + match(Set res (CompareAndSwapP mem (Binary oldval newval))); + ins_cost(2 * VOLATILE_REF_COST); + +@@ -8249,26 +8180,8 @@ + ins_pipe(pipe_slow); + %} + +-instruct compareAndSwapP_shenandoah(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{ +- +- predicate(UseShenandoahGC && ShenandoahCASBarrier && n->in(3)->in(1)->bottom_type() != TypePtr::NULL_PTR); +- match(Set res (CompareAndSwapP mem (Binary oldval newval))); +- ins_cost(2 * VOLATILE_REF_COST); +- +- effect(TEMP tmp, KILL cr); +- +- format %{ +- "cmpxchg_shenandoah_oop $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp" +- %} +- +- ins_encode(aarch64_enc_cmpxchg_oop_shenandoah(mem, oldval, newval, tmp, res)); +- +- ins_pipe(pipe_slow); +-%} +- + instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{ + +- predicate(!UseShenandoahGC || !ShenandoahCASBarrier || n->in(3)->in(1)->bottom_type() == TypeNarrowOop::NULL_PTR); + match(Set res (CompareAndSwapN mem (Binary oldval newval))); + ins_cost(2 * VOLATILE_REF_COST); + +@@ -8285,26 +8198,6 @@ + ins_pipe(pipe_slow); + %} + +-instruct compareAndSwapN_shenandoah(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{ +- +- predicate(UseShenandoahGC && ShenandoahCASBarrier && n->in(3)->in(1)->bottom_type() != TypeNarrowOop::NULL_PTR); +- match(Set res (CompareAndSwapN mem (Binary oldval newval))); +- ins_cost(2 * VOLATILE_REF_COST); +- +- effect(TEMP tmp, KILL cr); +- +- format %{ +- "cmpxchgw_shenandoah_narrow_oop $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp" +- %} +- +- ins_encode %{ +- Register tmp = $tmp$$Register; +- __ mov(tmp, $oldval$$Register); // Must not clobber oldval. +- ShenandoahBarrierSetAssembler::bsasm()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register, /*acquire*/ false, /*release*/ true, /*weak*/ false, /*is_cae*/ false, $res$$Register); +- %} +- +- ins_pipe(pipe_slow); +-%} + + // alternative CompareAndSwapX when we are eliding barriers + +@@ -8348,7 +8241,7 @@ + + instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{ + +- predicate(needs_acquiring_load_exclusive(n) && (!UseShenandoahGC || !ShenandoahCASBarrier || n->in(3)->in(1)->bottom_type() == TypePtr::NULL_PTR)); ++ predicate(needs_acquiring_load_exclusive(n)); + match(Set res (CompareAndSwapP mem (Binary oldval newval))); + ins_cost(VOLATILE_REF_COST); + +@@ -8365,26 +8258,9 @@ + ins_pipe(pipe_slow); + %} + +-instruct compareAndSwapPAcq_shenandoah(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{ +- +- predicate(needs_acquiring_load_exclusive(n) && UseShenandoahGC && ShenandoahCASBarrier && n->in(3)->in(1)->bottom_type() != TypePtr::NULL_PTR); +- match(Set res (CompareAndSwapP mem (Binary oldval newval))); +- ins_cost(VOLATILE_REF_COST); +- +- effect(TEMP tmp, KILL cr); +- +- format %{ +- "cmpxchg_acq_shenandoah_oop $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp" +- %} +- +- ins_encode(aarch64_enc_cmpxchg_acq_oop_shenandoah(mem, oldval, newval, tmp, res)); +- +- ins_pipe(pipe_slow); +-%} +- + instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{ + +- predicate(needs_acquiring_load_exclusive(n) && (!UseShenandoahGC || !ShenandoahCASBarrier|| n->in(3)->in(1)->bottom_type() == TypeNarrowOop::NULL_PTR)); ++ predicate(needs_acquiring_load_exclusive(n)); + match(Set res (CompareAndSwapN mem (Binary oldval newval))); + ins_cost(VOLATILE_REF_COST); + +@@ -8401,26 +8277,6 @@ + ins_pipe(pipe_slow); + %} + +-instruct compareAndSwapNAcq_shenandoah(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, iRegNNoSp tmp, rFlagsReg cr) %{ +- +- predicate(needs_acquiring_load_exclusive(n) && UseShenandoahGC && ShenandoahCASBarrier && n->in(3)->in(1)->bottom_type() != TypeNarrowOop::NULL_PTR); +- match(Set res (CompareAndSwapN mem (Binary oldval newval))); +- ins_cost(VOLATILE_REF_COST); +- +- effect(TEMP tmp, KILL cr); +- +- format %{ +- "cmpxchgw_acq_shenandoah_narrow_oop $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval with temp $tmp" +- %} +- +- ins_encode %{ +- Register tmp = $tmp$$Register; +- __ mov(tmp, $oldval$$Register); // Must not clobber oldval. +- ShenandoahBarrierSetAssembler::bsasm()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register, /*acquire*/ true, /*release*/ true, /*weak*/ false, /*is_cae*/ false, $res$$Register); +- %} +- +- ins_pipe(pipe_slow); +-%} + + instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{ + match(Set prev (GetAndSetI mem newv)); +diff -uNr openjdk/hotspot/src/cpu/aarch64/vm/assembler_aarch64.cpp afu8u/hotspot/src/cpu/aarch64/vm/assembler_aarch64.cpp +--- openjdk/hotspot/src/cpu/aarch64/vm/assembler_aarch64.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/cpu/aarch64/vm/assembler_aarch64.cpp 2025-05-06 10:53:44.867633665 +0800 +@@ -33,7 +33,7 @@ + #include "interpreter/interpreter.hpp" + + #ifndef PRODUCT +-const unsigned long Assembler::asm_bp = 0x0000007fa8092b5c; ++const unsigned long Assembler::asm_bp = 0x00007fffee09ac88; + #endif + + #include "compiler/disassembler.hpp" +@@ -1266,10 +1266,6 @@ + __ movptr(r, (uint64_t)target()); + break; + } +- case post: { +- __ mov(r, _base); +- break; +- } + default: + ShouldNotReachHere(); + } +diff -uNr openjdk/hotspot/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp afu8u/hotspot/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp +--- openjdk/hotspot/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp 2025-05-06 11:13:08.095672949 +0800 +@@ -43,9 +43,6 @@ + #include "vmreg_aarch64.inline.hpp" + + +-#if INCLUDE_ALL_GCS +-#include "shenandoahBarrierSetAssembler_aarch64.hpp" +-#endif + + #ifndef PRODUCT + #define COMMENT(x) do { __ block_comment(x); } while (0) +@@ -1638,55 +1635,29 @@ + } + + +-// Return 1 in rscratch1 if the CAS fails. + void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { + assert(VM_Version::supports_cx8(), "wrong machine"); + Register addr = as_reg(op->addr()); + Register newval = as_reg(op->new_value()); + Register cmpval = as_reg(op->cmp_value()); + Label succeed, fail, around; +- Register res = op->result_opr()->as_register(); + + if (op->code() == lir_cas_obj) { +- assert(op->tmp1()->is_valid(), "must be"); +- Register t1 = op->tmp1()->as_register(); + if (UseCompressedOops) { +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC && ShenandoahCASBarrier) { +- __ encode_heap_oop(t1, cmpval); +- cmpval = t1; +- assert(op->tmp2()->is_valid(), "must be"); +- Register t2 = op->tmp2()->as_register(); +- __ encode_heap_oop(t2, newval); +- newval = t2; +- ShenandoahBarrierSetAssembler::bsasm()->cmpxchg_oop(_masm, addr, cmpval, newval, /*acquire*/ false, /*release*/ true, /*weak*/ false, /*is_cae*/ false, res); +- } else +-#endif +- { +- __ encode_heap_oop(t1, cmpval); +- cmpval = t1; +- __ encode_heap_oop(rscratch2, newval); +- newval = rscratch2; +- casw(addr, newval, cmpval); +- __ eorw (res, r8, 1); +- } ++ Register t1 = op->tmp1()->as_register(); ++ assert(op->tmp1()->is_valid(), "must be"); ++ __ encode_heap_oop(t1, cmpval); ++ cmpval = t1; ++ __ encode_heap_oop(rscratch2, newval); ++ newval = rscratch2; ++ casw(addr, newval, cmpval); + } else { +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC && ShenandoahCASBarrier) { +- ShenandoahBarrierSetAssembler::bsasm()->cmpxchg_oop(_masm, addr, cmpval, newval, /*acquire*/ false, /*release*/ true, /*weak*/ false, /*is_cae*/ false, res); +- } else +-#endif +- { +- casl(addr, newval, cmpval); +- __ eorw (res, r8, 1); +- } ++ casl(addr, newval, cmpval); + } + } else if (op->code() == lir_cas_int) { + casw(addr, newval, cmpval); +- __ eorw (res, r8, 1); + } else { + casl(addr, newval, cmpval); +- __ eorw (res, r8, 1); + } + } + +@@ -2934,14 +2905,7 @@ + } + + +-void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC && patch_code != lir_patch_none) { +- deoptimize_trap(info); +- return; +- } +-#endif +- ++void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest) { + __ lea(dest->as_register_lo(), as_Address(addr->as_address_ptr())); + } + +diff -uNr openjdk/hotspot/src/cpu/aarch64/vm/c1_LIRGenerator_aarch64.cpp afu8u/hotspot/src/cpu/aarch64/vm/c1_LIRGenerator_aarch64.cpp +--- openjdk/hotspot/src/cpu/aarch64/vm/c1_LIRGenerator_aarch64.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/cpu/aarch64/vm/c1_LIRGenerator_aarch64.cpp 2025-05-06 10:53:44.871633665 +0800 +@@ -39,10 +39,6 @@ + #include "runtime/stubRoutines.hpp" + #include "vmreg_aarch64.inline.hpp" + +-#if INCLUDE_ALL_GCS +-#include "gc_implementation/shenandoah/c1/shenandoahBarrierSetC1.hpp" +-#endif +- + #ifdef ASSERT + #define __ gen()->lir(__FILE__, __LINE__)-> + #else +@@ -834,18 +830,19 @@ + LIR_Opr result = rlock_result(x); + + LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience +- +- if (type == objectType) { ++ if (type == objectType) + __ cas_obj(addr, cmp.result(), val.result(), new_register(T_INT), new_register(T_INT), + result); +- } else if (type == intType) +- __ cas_int(addr, cmp.result(), val.result(), ill, ill, result); ++ else if (type == intType) ++ __ cas_int(addr, cmp.result(), val.result(), ill, ill); + else if (type == longType) +- __ cas_long(addr, cmp.result(), val.result(), ill, ill, result); ++ __ cas_long(addr, cmp.result(), val.result(), ill, ill); + else { + ShouldNotReachHere(); + } + ++ __ logical_xor(FrameMap::r8_opr, LIR_OprFact::intConst(1), result); ++ + if (type == objectType) { // Write-barrier needed for Object fields. + // Seems to be precise + post_barrier(addr, val.result()); +@@ -1440,12 +1437,6 @@ + true /* do_load */, false /* patch */, NULL); + } + __ xchg(LIR_OprFact::address(addr), data, dst, tmp); +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC && is_obj) { +- LIR_Opr tmp = ShenandoahBarrierSet::barrier_set()->bsc1()->load_reference_barrier(this, dst, LIR_OprFact::addressConst(0)); +- __ move(tmp, dst); +- } +-#endif + if (is_obj) { + post_barrier(ptr, data); + } +diff -uNr openjdk/hotspot/src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp afu8u/hotspot/src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp +--- openjdk/hotspot/src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp 2025-05-06 10:53:44.871633665 +0800 +@@ -43,9 +43,7 @@ + #include "runtime/vframeArray.hpp" + #include "vmreg_aarch64.inline.hpp" + #if INCLUDE_ALL_GCS +-#include "gc_implementation/shenandoah/shenandoahBarrierSet.hpp" + #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" +-#include "gc_implementation/shenandoah/shenandoahRuntime.hpp" + #endif + + +@@ -1180,7 +1178,7 @@ + // arg0 : previous value of memory + + BarrierSet* bs = Universe::heap()->barrier_set(); +- if (bs->kind() != BarrierSet::G1SATBCTLogging && bs->kind() != BarrierSet::ShenandoahBarrierSet) { ++ if (bs->kind() != BarrierSet::G1SATBCTLogging) { + __ mov(r0, (int)id); + __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), r0); + __ should_not_reach_here(); +@@ -1230,13 +1228,6 @@ + Address store_addr(rfp, 2*BytesPerWord); + + BarrierSet* bs = Universe::heap()->barrier_set(); +- if (bs->kind() == BarrierSet::ShenandoahBarrierSet) { +- __ movptr(r0, (int)id); +- __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), r0); +- __ should_not_reach_here(); +- break; +- } +- + CardTableModRefBS* ct = (CardTableModRefBS*)bs; + assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); + +@@ -1300,25 +1291,6 @@ + + } + break; +- case shenandoah_lrb_slow_id: +- { +- StubFrame f(sasm, "shenandoah_load_reference_barrier", dont_gc_arguments); +- // arg0 : object to be resolved +- +- __ push_call_clobbered_registers(); +- f.load_argument(0, r0); +- f.load_argument(1, r1); +- if (UseCompressedOops) { +- __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow)); +- } else { +- __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier)); +- } +- __ blr(lr); +- __ mov(rscratch1, r0); +- __ pop_call_clobbered_registers(); +- __ mov(r0, rscratch1); +- } +- break; + #endif + + case predicate_failed_trap_id: +diff -uNr openjdk/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp afu8u/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp +--- openjdk/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp 2025-05-06 10:53:44.875633665 +0800 +@@ -1,4 +1,5 @@ + /* ++/* + * Copyright (c) 2013, Red Hat Inc. + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. + * All rights reserved. +@@ -32,7 +33,6 @@ + #include "interpreter/interpreter.hpp" + + #include "compiler/disassembler.hpp" +-#include "gc_interface/collectedHeap.inline.hpp" + #include "memory/resourceArea.hpp" + #include "runtime/biasedLocking.hpp" + #include "runtime/interfaceSupport.hpp" +@@ -53,7 +53,6 @@ + #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" + #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" + #include "gc_implementation/g1/heapRegion.hpp" +-#include "shenandoahBarrierSetAssembler_aarch64.hpp" + #endif + + #ifdef COMPILER2 +@@ -1645,12 +1644,6 @@ + } + } + +-void MacroAssembler::mov(Register dst, address addr) { +- assert(Universe::heap() == NULL +- || !Universe::heap()->is_in(addr), "use movptr for oop pointers"); +- mov_immediate64(dst, (uintptr_t)addr); +-} +- + // Form an address from base + offset in Rd. Rd may or may + // not actually be used: you must use the Address that is returned. + // It is up to you to ensure that the shift provided matches the size +@@ -2374,7 +2367,9 @@ + } + } + +-void MacroAssembler::push_call_clobbered_fp_registers() { ++void MacroAssembler::push_call_clobbered_registers() { ++ push(RegSet::range(r0, r18) - RegSet::of(rscratch1, rscratch2), sp); ++ + // Push v0-v7, v16-v31. + for (int i = 30; i >= 0; i -= 2) { + if (i <= v7->encoding() || i >= v16->encoding()) { +@@ -2384,7 +2379,7 @@ + } + } + +-void MacroAssembler::pop_call_clobbered_fp_registers() { ++void MacroAssembler::pop_call_clobbered_registers() { + + for (int i = 0; i < 32; i += 2) { + if (i <= v7->encoding() || i >= v16->encoding()) { +@@ -2392,17 +2387,6 @@ + Address(post(sp, 2 * wordSize))); + } + } +-} +- +-void MacroAssembler::push_call_clobbered_registers() { +- push(RegSet::range(r0, r18) - RegSet::of(rscratch1, rscratch2), sp); +- +- push_call_clobbered_fp_registers(); +-} +- +-void MacroAssembler::pop_call_clobbered_registers() { +- +- pop_call_clobbered_fp_registers(); + + pop(RegSet::range(r0, r18) - RegSet::of(rscratch1, rscratch2), sp); + } +@@ -3477,13 +3461,6 @@ + + void MacroAssembler::load_heap_oop(Register dst, Address src) + { +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- ShenandoahBarrierSetAssembler::bsasm()->load_heap_oop(this, dst, src); +- return; +- } +-#endif +- + if (UseCompressedOops) { + ldrw(dst, src); + decode_heap_oop(dst); +@@ -3494,13 +3471,6 @@ + + void MacroAssembler::load_heap_oop_not_null(Register dst, Address src) + { +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- ShenandoahBarrierSetAssembler::bsasm()->load_heap_oop(this, dst, src); +- return; +- } +-#endif +- + if (UseCompressedOops) { + ldrw(dst, src); + decode_heap_oop_not_null(dst); +@@ -3644,13 +3614,6 @@ + assert(store_addr != noreg && new_val != noreg && tmp != noreg + && tmp2 != noreg, "expecting a register"); + +- if (UseShenandoahGC) { +- // No need for this in Shenandoah. +- return; +- } +- +- assert(UseG1GC, "expect G1 GC"); +- + Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() + + PtrQueue::byte_offset_of_index())); + Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() + +diff -uNr openjdk/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp afu8u/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp +--- openjdk/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp 2025-05-06 11:13:08.095672949 +0800 +@@ -454,13 +454,14 @@ + // 64 bits of each vector register. + void push_call_clobbered_registers(); + void pop_call_clobbered_registers(); +- void push_call_clobbered_fp_registers(); +- void pop_call_clobbered_fp_registers(); + + // now mov instructions for loading absolute addresses and 32 or + // 64 bit integers + +- void mov(Register dst, address addr); ++ inline void mov(Register dst, address addr) ++ { ++ mov_immediate64(dst, (u_int64_t)addr); ++ } + + inline void mov(Register dst, u_int64_t imm64) + { +diff -uNr openjdk/hotspot/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp afu8u/hotspot/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp +--- openjdk/hotspot/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp 2025-05-06 10:53:44.879633665 +0800 +@@ -1946,7 +1946,7 @@ + __ ldr(r0, Address(r0, -JNIHandles::weak_tag_value)); + __ verify_oop(r0); + #if INCLUDE_ALL_GCS +- if (UseG1GC || (UseShenandoahGC && ShenandoahSATBBarrier)) { ++ if (UseG1GC) { + __ g1_write_barrier_pre(noreg /* obj */, + r0 /* pre_val */, + rthread /* thread */, +diff -uNr openjdk/hotspot/src/cpu/aarch64/vm/shenandoahBarrierSetAssembler_aarch64.cpp afu8u/hotspot/src/cpu/aarch64/vm/shenandoahBarrierSetAssembler_aarch64.cpp +--- openjdk/hotspot/src/cpu/aarch64/vm/shenandoahBarrierSetAssembler_aarch64.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/cpu/aarch64/vm/shenandoahBarrierSetAssembler_aarch64.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,328 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +-#include "c1/c1_MacroAssembler.hpp" +-#include "c1/c1_LIRAssembler.hpp" +-#include "macroAssembler_aarch64.hpp" +-#include "shenandoahBarrierSetAssembler_aarch64.hpp" +-#include "gc_implementation/shenandoah/shenandoahBarrierSet.hpp" +-#include "gc_implementation/shenandoah/shenandoahForwarding.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegion.hpp" +-#include "gc_implementation/shenandoah/shenandoahRuntime.hpp" +-#include "gc_implementation/shenandoah/c1/shenandoahBarrierSetC1.hpp" +-#include "runtime/stubCodeGenerator.hpp" +-#include "runtime/thread.hpp" +- +-ShenandoahBarrierSetAssembler* ShenandoahBarrierSetAssembler::bsasm() { +- return ShenandoahBarrierSet::barrier_set()->bsasm(); +-} +- +-#define __ masm-> +- +-void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, bool dest_uninitialized, +- Register src, Register dst, Register count) { +- if ((ShenandoahSATBBarrier && !dest_uninitialized) || ShenandoahStoreValEnqueueBarrier || ShenandoahLoadRefBarrier) { +- +- Label done; +- +- // Avoid calling runtime if count == 0 +- __ cbz(count, done); +- +- // Is GC active? +- Address gc_state(rthread, in_bytes(JavaThread::gc_state_offset())); +- __ ldrb(rscratch1, gc_state); +- if (ShenandoahSATBBarrier && dest_uninitialized) { +- __ tbz(rscratch1, ShenandoahHeap::HAS_FORWARDED_BITPOS, done); +- } else { +- __ mov(rscratch2, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::MARKING); +- __ tst(rscratch1, rscratch2); +- __ br(Assembler::EQ, done); +- } +- +- __ push_call_clobbered_registers(); +- if (UseCompressedOops) { +- __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_narrow_oop_entry), src, dst, count); +- } else { +- __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_oop_entry), src, dst, count); +- } +- __ pop_call_clobbered_registers(); +- __ bind(done); +- } +-} +- +-void ShenandoahBarrierSetAssembler::resolve_forward_pointer(MacroAssembler* masm, Register dst, Register tmp) { +- assert(ShenandoahCASBarrier, "should be enabled"); +- Label is_null; +- __ cbz(dst, is_null); +- resolve_forward_pointer_not_null(masm, dst, tmp); +- __ bind(is_null); +-} +- +-// IMPORTANT: This must preserve all registers, even rscratch1 and rscratch2, except those explicitely +-// passed in. +-void ShenandoahBarrierSetAssembler::resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp) { +- assert(ShenandoahCASBarrier || ShenandoahLoadRefBarrier, "should be enabled"); +- // The below loads the mark word, checks if the lowest two bits are +- // set, and if so, clear the lowest two bits and copy the result +- // to dst. Otherwise it leaves dst alone. +- // Implementing this is surprisingly awkward. I do it here by: +- // - Inverting the mark word +- // - Test lowest two bits == 0 +- // - If so, set the lowest two bits +- // - Invert the result back, and copy to dst +- +- bool borrow_reg = (tmp == noreg); +- if (borrow_reg) { +- // No free registers available. Make one useful. +- tmp = rscratch1; +- if (tmp == dst) { +- tmp = rscratch2; +- } +- __ push(RegSet::of(tmp), sp); +- } +- +- assert_different_registers(tmp, dst); +- +- Label done; +- __ ldr(tmp, Address(dst, oopDesc::mark_offset_in_bytes())); +- __ eon(tmp, tmp, zr); +- __ ands(zr, tmp, markOopDesc::lock_mask_in_place); +- __ br(Assembler::NE, done); +- __ orr(tmp, tmp, markOopDesc::marked_value); +- __ eon(dst, tmp, zr); +- __ bind(done); +- +- if (borrow_reg) { +- __ pop(RegSet::of(tmp), sp); +- } +-} +- +-void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, Register dst, Address load_addr) { +- if (!ShenandoahLoadRefBarrier) { +- return; +- } +- +- assert(dst != rscratch2, "need rscratch2"); +- assert_different_registers(load_addr.base(), load_addr.index(), rscratch1, rscratch2); +- +- bool is_narrow = UseCompressedOops; +- +- Label heap_stable, not_cset; +- __ enter(); +- Address gc_state(rthread, in_bytes(JavaThread::gc_state_offset())); +- __ ldrb(rscratch2, gc_state); +- +- // Check for heap stability +- __ tbz(rscratch2, ShenandoahHeap::HAS_FORWARDED_BITPOS, heap_stable); +- +- // use r1 for load address +- Register result_dst = dst; +- if (dst == r1) { +- __ mov(rscratch1, dst); +- dst = rscratch1; +- } +- +- // Save r0 and r1, unless it is an output register +- RegSet to_save = RegSet::of(r0, r1) - result_dst; +- __ push(to_save, sp); +- __ lea(r1, load_addr); +- __ mov(r0, dst); +- +- // Test for in-cset +- __ mov(rscratch2, ShenandoahHeap::in_cset_fast_test_addr()); +- __ lsr(rscratch1, r0, ShenandoahHeapRegion::region_size_bytes_shift_jint()); +- __ ldrb(rscratch2, Address(rscratch2, rscratch1)); +- __ tbz(rscratch2, 0, not_cset); +- +- __ push_call_clobbered_registers(); +- if (is_narrow) { +- __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow)); +- } else { +- __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier)); +- } +- __ blr(lr); +- __ mov(rscratch1, r0); +- __ pop_call_clobbered_registers(); +- __ mov(r0, rscratch1); +- +- __ bind(not_cset); +- +- __ mov(result_dst, r0); +- __ pop(to_save, sp); +- +- __ bind(heap_stable); +- __ leave(); +-} +- +-void ShenandoahBarrierSetAssembler::storeval_barrier(MacroAssembler* masm, Register dst, Register tmp) { +- if (ShenandoahStoreValEnqueueBarrier) { +- // Save possibly live regs. +- RegSet live_regs = RegSet::range(r0, r4) - dst; +- __ push(live_regs, sp); +- __ strd(v0, __ pre(sp, 2 * -wordSize)); +- +- __ g1_write_barrier_pre(noreg, dst, rthread, tmp, true, false); +- +- // Restore possibly live regs. +- __ ldrd(v0, __ post(sp, 2 * wordSize)); +- __ pop(live_regs, sp); +- } +-} +- +-void ShenandoahBarrierSetAssembler::load_heap_oop(MacroAssembler* masm, Register dst, Address src) { +- Register result_dst = dst; +- +- // Preserve src location for LRB +- if (dst == src.base() || dst == src.index()) { +- dst = rscratch1; +- } +- assert_different_registers(dst, src.base(), src.index()); +- +- if (UseCompressedOops) { +- __ ldrw(dst, src); +- __ decode_heap_oop(dst); +- } else { +- __ ldr(dst, src); +- } +- +- load_reference_barrier(masm, dst, src); +- +- if (dst != result_dst) { +- __ mov(result_dst, dst); +- dst = result_dst; +- } +-} +- +-void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm, Register addr, Register expected, Register new_val, +- bool acquire, bool release, bool weak, bool is_cae, +- Register result) { +- +- Register tmp1 = rscratch1; +- Register tmp2 = rscratch2; +- bool is_narrow = UseCompressedOops; +- Assembler::operand_size size = is_narrow ? Assembler::word : Assembler::xword; +- +- assert_different_registers(addr, expected, new_val, tmp1, tmp2); +- +- Label retry, done, fail; +- +- // CAS, using LL/SC pair. +- __ bind(retry); +- __ load_exclusive(tmp1, addr, size, acquire); +- if (is_narrow) { +- __ cmpw(tmp1, expected); +- } else { +- __ cmp(tmp1, expected); +- } +- __ br(Assembler::NE, fail); +- __ store_exclusive(tmp2, new_val, addr, size, release); +- if (weak) { +- __ cmpw(tmp2, 0u); // If the store fails, return NE to our caller +- } else { +- __ cbnzw(tmp2, retry); +- } +- __ b(done); +- +- __ bind(fail); +- // Check if rb(expected)==rb(tmp1) +- // Shuffle registers so that we have memory value ready for next expected. +- __ mov(tmp2, expected); +- __ mov(expected, tmp1); +- if (is_narrow) { +- __ decode_heap_oop(tmp1, tmp1); +- __ decode_heap_oop(tmp2, tmp2); +- } +- resolve_forward_pointer(masm, tmp1); +- resolve_forward_pointer(masm, tmp2); +- __ cmp(tmp1, tmp2); +- // Retry with expected now being the value we just loaded from addr. +- __ br(Assembler::EQ, retry); +- if (is_cae && is_narrow) { +- // For cmp-and-exchange and narrow oops, we need to restore +- // the compressed old-value. We moved it to 'expected' a few lines up. +- __ mov(result, expected); +- } +- __ bind(done); +- +- if (is_cae) { +- __ mov(result, tmp1); +- } else { +- __ cset(result, Assembler::EQ); +- } +-} +- +-#undef __ +- +-#ifdef COMPILER1 +- +-#define __ ce->masm()-> +- +-void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub) { +- __ bind(*stub->entry()); +- +- Register obj = stub->obj()->as_register(); +- Register res = stub->result()->as_register(); +- Register addr = stub->addr()->as_pointer_register(); +- Register tmp1 = stub->tmp1()->as_register(); +- Register tmp2 = stub->tmp2()->as_register(); +- +- assert(res == r0, "result must arrive in r0"); +- +- if (res != obj) { +- __ mov(res, obj); +- } +- +- // Check for null. +- __ cbz(res, *stub->continuation()); +- +- // Check for object in cset. +- __ mov(tmp2, ShenandoahHeap::in_cset_fast_test_addr()); +- __ lsr(tmp1, res, ShenandoahHeapRegion::region_size_bytes_shift_jint()); +- __ ldrb(tmp2, Address(tmp2, tmp1)); +- __ cbz(tmp2, *stub->continuation()); +- +- // Check if object is already forwarded. +- Label slow_path; +- __ ldr(tmp1, Address(res, oopDesc::mark_offset_in_bytes())); +- __ eon(tmp1, tmp1, zr); +- __ ands(zr, tmp1, markOopDesc::lock_mask_in_place); +- __ br(Assembler::NE, slow_path); +- +- // Decode forwarded object. +- __ orr(tmp1, tmp1, markOopDesc::marked_value); +- __ eon(res, tmp1, zr); +- __ b(*stub->continuation()); +- +- __ bind(slow_path); +- ce->store_parameter(res, 0); +- ce->store_parameter(addr, 1); +- __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::shenandoah_lrb_slow_id))); +- +- __ b(*stub->continuation()); +-} +- +-#undef __ +- +-#endif // COMPILER1 +diff -uNr openjdk/hotspot/src/cpu/aarch64/vm/shenandoahBarrierSetAssembler_aarch64.hpp afu8u/hotspot/src/cpu/aarch64/vm/shenandoahBarrierSetAssembler_aarch64.hpp +--- openjdk/hotspot/src/cpu/aarch64/vm/shenandoahBarrierSetAssembler_aarch64.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/cpu/aarch64/vm/shenandoahBarrierSetAssembler_aarch64.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,63 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef CPU_AARCH64_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_AARCH64_HPP +-#define CPU_AARCH64_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_AARCH64_HPP +- +-#include "asm/macroAssembler.hpp" +-#include "memory/allocation.hpp" +-#ifdef COMPILER1 +-class LIR_Assembler; +-class ShenandoahLoadReferenceBarrierStub; +-class StubAssembler; +-class StubCodeGenerator; +-#endif +- +-class ShenandoahBarrierSetAssembler : public CHeapObj { +-private: +- +- void resolve_forward_pointer(MacroAssembler* masm, Register dst, Register tmp = noreg); +- void resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp = noreg); +- +-public: +- static ShenandoahBarrierSetAssembler* bsasm(); +- +- void storeval_barrier(MacroAssembler* masm, Register dst, Register tmp); +- +-#ifdef COMPILER1 +- void gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub); +-#endif +- +- void load_reference_barrier(MacroAssembler* masm, Register dst, Address src); +- +- void load_heap_oop(MacroAssembler* masm, Register dst, Address src); +- +- virtual void arraycopy_prologue(MacroAssembler* masm, bool dest_uninitialized, +- Register src, Register dst, Register count); +- virtual void cmpxchg_oop(MacroAssembler* masm, +- Register addr, Register expected, Register new_val, +- bool acquire, bool release, bool weak, bool is_cae, +- Register result); +-}; +- +-#endif // CPU_AARCH64_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_AARCH64_HPP +diff -uNr openjdk/hotspot/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp afu8u/hotspot/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp +--- openjdk/hotspot/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp 2025-05-06 10:53:44.879633665 +0800 +@@ -40,17 +40,10 @@ + #include "runtime/stubCodeGenerator.hpp" + #include "runtime/stubRoutines.hpp" + #include "runtime/thread.inline.hpp" +-#include "utilities/macros.hpp" + #include "utilities/top.hpp" +- +-#include "stubRoutines_aarch64.hpp" +- + #ifdef COMPILER2 + #include "opto/runtime.hpp" + #endif +-#if INCLUDE_ALL_GCS +-#include "shenandoahBarrierSetAssembler_aarch64.hpp" +-#endif + + // Declaration and definition of StubGenerator (no .hpp file). + // For a more detailed description of the stub routine structure +@@ -604,12 +597,12 @@ + // + // Destroy no registers except rscratch1 and rscratch2 + // +- void gen_write_ref_array_pre_barrier(Register src, Register addr, Register count, bool dest_uninitialized) { ++ void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) { + BarrierSet* bs = Universe::heap()->barrier_set(); + switch (bs->kind()) { + case BarrierSet::G1SATBCT: + case BarrierSet::G1SATBCTLogging: +- // Don't generate the call if we statically know that the target is uninitialized ++ // With G1, don't generate the call if we statically know that the target in uninitialized + if (!dest_uninitialized) { + __ push_call_clobbered_registers(); + if (count == c_rarg0) { +@@ -633,11 +626,6 @@ + case BarrierSet::CardTableExtension: + case BarrierSet::ModRef: + break; +-#if INCLUDE_ALL_GCS +- case BarrierSet::ShenandoahBarrierSet: +- ShenandoahBarrierSetAssembler::bsasm()->arraycopy_prologue(_masm, dest_uninitialized, src, addr, count); +- break; +-#endif + default: + ShouldNotReachHere(); + +@@ -706,10 +694,6 @@ + __ br(Assembler::GE, L_loop); + } + break; +-#if INCLUDE_ALL_GCS +- case BarrierSet::ShenandoahBarrierSet: +- break; +-#endif + default: + ShouldNotReachHere(); + +@@ -1427,7 +1411,7 @@ + if (is_oop) { + __ push(RegSet::of(d, count), sp); + // no registers are destroyed by this call +- gen_write_ref_array_pre_barrier(s, d, count, dest_uninitialized); ++ gen_write_ref_array_pre_barrier(d, count, dest_uninitialized); + } + copy_memory(aligned, s, d, count, rscratch1, size); + if (is_oop) { +@@ -1483,7 +1467,7 @@ + if (is_oop) { + __ push(RegSet::of(d, count), sp); + // no registers are destroyed by this call +- gen_write_ref_array_pre_barrier(s, d, count, dest_uninitialized); ++ gen_write_ref_array_pre_barrier(d, count, dest_uninitialized); + } + copy_memory(aligned, s, d, count, rscratch1, -size); + if (is_oop) { +@@ -1617,7 +1601,7 @@ + // used by generate_conjoint_int_oop_copy(). + // + address generate_disjoint_int_copy(bool aligned, address *entry, +- const char *name) { ++ const char *name, bool dest_uninitialized = false) { + const bool not_oop = false; + return generate_disjoint_copy(sizeof (jint), aligned, not_oop, entry, name); + } +@@ -1825,7 +1809,7 @@ + } + #endif //ASSERT + +- gen_write_ref_array_pre_barrier(from, to, count, dest_uninitialized); ++ gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); + + // save the original count + __ mov(count_save, count); +diff -uNr openjdk/hotspot/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp afu8u/hotspot/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp +--- openjdk/hotspot/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp 2025-05-06 10:53:44.879633665 +0800 +@@ -692,13 +692,12 @@ + const int referent_offset = java_lang_ref_Reference::referent_offset; + guarantee(referent_offset > 0, "referent offset not initialized"); + +- if (UseG1GC || (UseShenandoahGC && ShenandoahSATBBarrier)) { ++ if (UseG1GC) { + Label slow_path; + const Register local_0 = c_rarg0; + // Check if local 0 != NULL + // If the receiver is null then it is OK to jump to the slow path. + __ ldr(local_0, Address(esp, 0)); +- __ mov(r19, r13); // First call-saved register + __ cbz(local_0, slow_path); + + // Load the value of the referent field. +@@ -709,18 +708,12 @@ + // Generate the G1 pre-barrier code to log the value of + // the referent field in an SATB buffer. + __ enter(); // g1_write may call runtime +- if (UseShenandoahGC) { +- __ push_call_clobbered_registers(); +- } + __ g1_write_barrier_pre(noreg /* obj */, + local_0 /* pre_val */, + rthread /* thread */, + rscratch2 /* tmp */, + true /* tosca_live */, + true /* expand_call */); +- if (UseShenandoahGC) { +- __ pop_call_clobbered_registers(); +- } + __ leave(); + // areturn + __ andr(sp, r19, -16); // done with stack +@@ -1195,7 +1188,7 @@ + // Resolve jweak. + __ ldr(r0, Address(r0, -JNIHandles::weak_tag_value)); + #if INCLUDE_ALL_GCS +- if (UseG1GC || (UseShenandoahGC && ShenandoahSATBBarrier)) { ++ if (UseG1GC) { + __ enter(); // Barrier may call runtime. + __ g1_write_barrier_pre(noreg /* obj */, + r0 /* pre_val */, +diff -uNr openjdk/hotspot/src/cpu/aarch64/vm/templateTable_aarch64.cpp afu8u/hotspot/src/cpu/aarch64/vm/templateTable_aarch64.cpp +--- openjdk/hotspot/src/cpu/aarch64/vm/templateTable_aarch64.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/cpu/aarch64/vm/templateTable_aarch64.cpp 2025-05-06 10:53:44.879633665 +0800 +@@ -38,9 +38,6 @@ + #include "runtime/sharedRuntime.hpp" + #include "runtime/stubRoutines.hpp" + #include "runtime/synchronizer.hpp" +-#if INCLUDE_ALL_GCS +-#include "shenandoahBarrierSetAssembler_aarch64.hpp" +-#endif + + #ifndef CC_INTERP + +@@ -188,35 +185,6 @@ + } + + } +- break; +- case BarrierSet::ShenandoahBarrierSet: +- { +- // flatten object address if needed +- if (obj.index() == noreg && obj.offset() == 0) { +- if (obj.base() != r3) { +- __ mov(r3, obj.base()); +- } +- } else { +- __ lea(r3, obj); +- } +- if (ShenandoahSATBBarrier) { +- __ g1_write_barrier_pre(r3 /* obj */, +- r1 /* pre_val */, +- rthread /* thread */, +- r10 /* tmp */, +- val != noreg /* tosca_live */, +- false /* expand_call */); +- } +- if (val == noreg) { +- __ store_heap_oop_null(Address(r3, 0)); +- } else { +- if (ShenandoahStoreValEnqueueBarrier) { +- ShenandoahBarrierSetAssembler::bsasm()->storeval_barrier(_masm, val, r10); +- } +- __ store_heap_oop(Address(r3, 0), val); +- } +- +- } + break; + #endif // INCLUDE_ALL_GCS + case BarrierSet::CardTableModRef: +diff -uNr openjdk/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp afu8u/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp +--- openjdk/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp 2025-05-06 11:13:08.095672949 +0800 +@@ -683,9 +683,6 @@ + // The peephole pass fills the delay slot + } + +-void LIR_Assembler::emit_opShenandoahWriteBarrier(LIR_OpShenandoahWriteBarrier* op) { +- Unimplemented(); +-} + + void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { + Bytecodes::Code code = op->bytecode(); +@@ -3498,7 +3495,7 @@ + } + + +-void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { ++void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest) { + LIR_Address* addr = addr_opr->as_address_ptr(); + assert(addr->index()->is_illegal() && addr->scale() == LIR_Address::times_1, "can't handle complex addresses yet"); + +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/assembler_sw64.cpp afu8u/hotspot/src/cpu/sw64/vm/assembler_sw64.cpp +--- openjdk/hotspot/src/cpu/sw64/vm/assembler_sw64.cpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/assembler_sw64.cpp 2025-05-06 10:53:44.903633666 +0800 +@@ -0,0 +1,432 @@ ++/* ++ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++#include "asm/assembler.hpp" ++#include "asm/assembler.inline.hpp" ++#include "gc_interface/collectedHeap.inline.hpp" ++#include "interpreter/interpreter.hpp" ++#include "memory/cardTableModRefBS.hpp" ++#include "memory/resourceArea.hpp" ++#include "prims/methodHandles.hpp" ++#include "runtime/biasedLocking.hpp" ++#include "runtime/interfaceSupport.hpp" ++#include "runtime/objectMonitor.hpp" ++#include "runtime/os.hpp" ++#include "runtime/sharedRuntime.hpp" ++#include "runtime/stubRoutines.hpp" ++#if INCLUDE_ALL_GCS ++#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" ++#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" ++#include "gc_implementation/g1/heapRegion.hpp" ++#endif // INCLUDE_ALL_GCS ++ ++#ifdef PRODUCT ++#define BLOCK_COMMENT(str) /* nothing */ ++#define STOP(error) stop(error) ++#else ++#define BLOCK_COMMENT(str) { char line[1024]; sprintf(line,"%s:%s:%d",str,__FILE__, __LINE__); block_comment(line); } ++#define STOP(error) block_comment(error); stop(error) ++#endif ++ ++#define BIND(label) bind(label); BLOCK_COMMENT(#label ":") ++ ++// Implementation of AddressLiteral ++ ++AddressLiteral::AddressLiteral(address target, relocInfo::relocType rtype) { ++ _is_lval = false; ++ _target = target; ++ _rspec = rspec_from_rtype(rtype, target); ++} ++ ++// Implementation of Address ++ ++Address Address::make_array(ArrayAddress adr) { ++ AddressLiteral base = adr.base(); ++ Address index = adr.index(); ++ assert(index._disp == 0, "must not have disp"); // maybe it can? ++ Address array(index._base, index._index, index._scale, (intptr_t) base.target()); ++ array._rspec = base._rspec; ++ return array; ++} ++ ++// exceedingly dangerous constructor ++Address::Address(address loc, RelocationHolder spec) { ++ _base = noreg; ++ _index = noreg; ++ _scale = no_scale; ++ _disp = (intptr_t) loc; ++ _rspec = spec; ++} ++ ++int Assembler::is_int_mask(int x) { ++ int xx = x; ++ int count = 0; ++ ++ while (x != 0) { ++ x &= (x - 1); ++ count++; ++ } ++ ++ if ((1<>16; ++ int16_t low = (int16_t)(imm); ++ if(is_simm16(imm)){ ++ ldi(reg, R0, imm); ++ } else { ++ ldih(reg, R0, high); ++ ldi(reg, reg, low); ++ if( ((int)high == (-32768)) && (low < 0) ) ++ addw(reg, reg, 0); ++ } ++} ++ ++void Assembler::lldw(Register rt, Address src){ ++ assert(src.index() == noreg, "bad address in Assembler::lldw"); ++ lldw(rt, src.base(), src.disp()); ++} ++ ++void Assembler::lldl(Register rt, Address src){ ++ assert(src.index() == noreg, "bad address in Assembler::lldl"); ++ lldl(rt, src.base(), src.disp()); ++} ++ ++ ++void Assembler::ldw_unsigned(Register rt, Address src){ ++ assert(src.index() == noreg, "bad address in Assembler::ldw_unsigned"); ++ ldw_unsigned(rt, src.base(), src.disp()); ++} ++ ++void Assembler::stb(Register rt, Address dst) { ++ assert(dst.index() == noreg, "bad address in Assembler::stb"); ++ stb(rt, dst.base(), dst.disp()); ++} ++ ++void Assembler::stl(Register rt, Address dst) { ++ Register src = rt; ++ Register base = dst.base(); ++ Register index = dst.index(); ++ ++ int scale = dst.scale(); ++ int disp = dst.disp(); ++ ++ if(index != noreg) { ++ if(is_simm16(disp)) { ++ if (scale == 0) { ++ addl(AT, base, index); ++ } else { ++ slll(AT, index, scale); ++ addl(AT, base, AT); ++ } ++ stl(src, AT, disp); ++ } else { ++ if (scale == 0) { ++ li32(AT,disp); ++ addl(AT, AT, base); ++ addl(AT, AT, index); ++ stl(src, AT, 0); ++ } else { ++ stl(T12, SP, -wordSize);//This should be T12 instead of T9. jx ++ add_simm16(SP, SP, -wordSize); ++ ++ slll(AT, index, scale); ++ addl(AT, base, AT); ++ li32(T12,disp); ++ addl(AT, AT, T12); ++ stl(src, AT, 0); ++ ++ ldl(T12, SP, 0); ++ add_simm16(SP, SP, wordSize); ++ } ++ } ++ } else { ++ if(is_simm16(disp)) { ++ stl(src, base, disp); ++ } else { ++ li32(AT,disp); ++ addl(AT, base, AT); ++ stl(src, AT, 0); ++ } ++ } ++} ++ ++void Assembler::sth(Register rt, Address dst) { ++ assert(dst.index() == noreg, "bad address in Assembler::sth"); ++ sth(rt, dst.base(), dst.disp()); ++} ++ ++void Assembler::stw(Register rt, Address dst) { ++ Register src = rt; ++ Register base = dst.base(); ++ Register index = dst.index(); ++ ++ int scale = dst.scale(); ++ int disp = dst.disp(); ++ ++ if(index != noreg) { ++ if( Assembler::is_simm16(disp) ) { ++ if (scale == 0) { ++ addl(AT, base, index); ++ } else { ++ slll(AT, index, scale); ++ addl(AT, base, AT); ++ } ++ stw(src, AT, disp); ++ } else { ++ if (scale == 0) { ++ li32(AT,disp); ++ addl(AT, AT, base); ++ addl(AT, AT, index); ++ stw(src, AT, 0); ++ } else { ++ stl(T12, SP, -wordSize); ++ add_simm16(SP, SP, -wordSize); ++ ++ slll(AT, index, scale); ++ addl(AT, base, AT); ++ li32(T12,disp); ++ addl(AT, AT, T12); ++ stw(src, AT, 0); ++ ++ ldl(T12, SP, 0); ++ add_simm16(SP, SP, wordSize); ++ } ++ } ++ } else { ++ if( Assembler::is_simm16(disp) ) { ++ stw(src, base, disp); ++ } else { ++ li32(AT,disp); ++ addl(AT, base, AT); ++ stw(src, AT, 0); ++ } ++} ++} ++ ++void Assembler::lstw(Register rt, Address dst) { ++ assert(dst.index() == noreg, "bad address in Assembler::lstw"); ++ lstw(rt, dst.base(), dst.disp()); ++} ++ ++void Assembler::lstl(Register rt, Address dst) { ++ assert(dst.index() == noreg, "bad address in Assembler::lstl"); ++ lstl(rt, dst.base(), dst.disp()); ++} ++ ++void Assembler::flds(FloatRegister rt, Address src) { ++ assert(src.index() == noreg, "bad address in Assembler::flds"); ++ flds(rt, src.base(), src.disp()); ++} ++ ++void Assembler::fldd(FloatRegister rt, Address src) { ++ assert(src.index() == noreg, "bad address in Assembler::fldd"); ++ fldd(rt, src.base(), src.disp()); ++} ++ ++void Assembler::fsts(FloatRegister rt, Address dst) { ++ assert(dst.index() == noreg, "bad address in Assembler::fsts"); ++ fsts(rt, dst.base(), dst.disp()); ++} ++ ++void Assembler::fstd(FloatRegister rt, Address dst) { ++ assert(dst.index() == noreg, "bad address in Assembler::fstd"); ++ fstd(rt, dst.base(), dst.disp()); ++} ++ ++////void Assembler::j(address entry) { ++//// Unimplemented(); ++////} ++//// ++////void Assembler::jal(address entry) { ++//// Unimplemented(); ++////} +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/assembler_sw64.hpp afu8u/hotspot/src/cpu/sw64/vm/assembler_sw64.hpp +--- openjdk/hotspot/src/cpu/sw64/vm/assembler_sw64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/assembler_sw64.hpp 2025-05-06 10:53:44.903633666 +0800 +@@ -0,0 +1,2437 @@ ++/* ++ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef CPU_SW64_VM_ASSEMBLER_SW64_HPP ++#define CPU_SW64_VM_ASSEMBLER_SW64_HPP ++ ++#include "asm/register.hpp" ++ ++class BiasedLockingCounters; ++ ++// Define some macros to help SW64 Instructions' implementation. ++#define OP(x) (((x) & 0x3F) << 26) ++#define PCD(oo) (OP(oo)) ++#define OPMEM(oo) (OP(oo)) ++#define BRA(oo) (OP(oo)) ++#define OFP(oo,ff) (OP(oo) | (((ff) & 0xFF) << 5)) ++#define FMA(oo,ff) (OP(oo) | (((ff) & 0x3F) << 10)) ++#define MFC(oo,ff) (OP(oo) | ((ff) & 0xFFFF)) ++#define MBR(oo,h) (OP(oo) | (((h) & 3) << 14)) ++#define OPR(oo,ff) (OP(oo) | (((ff) & 0xFF) << 5)) ++#define OPRL(oo,ff) (OP(oo) | (((ff) & 0xFF) << 5)) ++#define TOPR(oo,ff) (OP(oo) | (((ff) & 0x07) << 10)) ++#define TOPRL(oo,ff) (OP(oo) | (((ff) & 0x07) << 10)) ++ ++#define ATMEM(oo,h) (OP(oo) | (((h) & 0xF) << 12)) ++#define PRIRET(oo,h) (OP(oo) | (((h) & 0x1) << 20)) ++#define EV6HWMEM(oo,ff) (OP(oo) | (((ff) & 0xF) << 12)) ++#define CSR(oo,ff) (OP(oo) | (((ff) & 0xFF) << 8)) ++ ++#define LOGX(oo,ff) (OP(oo) | (((ff) & 0x3F) << 10)) ++#define PSE_LOGX(oo,ff) (OP(oo) | (((ff) & 0x3F) << 10) | (((ff) >> 0x6) << 26 ) | 0x3E0 ) ++ ++// Note: A register location is represented via a Register, not ++// via an address for efficiency & simplicity reasons. ++ ++class ArrayAddress; ++ ++class Address VALUE_OBJ_CLASS_SPEC { ++ public: ++ enum ScaleFactor { ++ no_scale = -1, ++ times_1 = 0, ++ times_2 = 1, ++ times_4 = 2, ++ times_8 = 3, ++ times_ptr = times_8 ++ }; ++ ++ static ScaleFactor times(int size) { ++ assert(size >= 1 && size <= 8 && is_power_of_2(size), "bad scale size"); ++ if (size == 8) return times_8; ++ if (size == 4) return times_4; ++ if (size == 2) return times_2; ++ return times_1; ++ } ++ ++ private: ++ Register _base; ++ Register _index; ++ ScaleFactor _scale; ++ int _disp; ++ RelocationHolder _rspec; ++ ++ // Easily misused constructors make them private ++ Address(address loc, RelocationHolder spec); ++ Address(int disp, address loc, relocInfo::relocType rtype); ++ Address(int disp, address loc, RelocationHolder spec); ++ ++ public: ++ ++ // creation ++ Address() ++ : _base(noreg), ++ _index(noreg), ++ _scale(no_scale), ++ _disp(0) { ++ } ++ ++ // No default displacement otherwise Register can be implicitly ++ // converted to 0(Register) which is quite a different animal. ++ ++ Address(Register base, int disp = 0) ++ : _base(base), ++ _index(noreg), ++ _scale(no_scale), ++ _disp(disp) { ++ } ++ ++ Address(Register base, Register index, ScaleFactor scale, int disp = 0) ++ : _base (base), ++ _index(index), ++ _scale(scale), ++ _disp (disp) { ++ assert(!index->is_valid() == (scale == Address::no_scale), ++ "inconsistent address"); ++ } ++ ++ // The following two overloads are used in connection with the ++ // ByteSize type (see sizes.hpp). They simplify the use of ++ // ByteSize'd arguments in assembly code. Note that their equivalent ++ // for the optimized build are the member functions with int disp ++ // argument since ByteSize is mapped to an int type in that case. ++ // ++ // Note: DO NOT introduce similar overloaded functions for WordSize ++ // arguments as in the optimized mode, both ByteSize and WordSize ++ // are mapped to the same type and thus the compiler cannot make a ++ // distinction anymore (=> compiler errors). ++ ++#ifdef ASSERT ++ Address(Register base, ByteSize disp) ++ : _base(base), ++ _index(noreg), ++ _scale(no_scale), ++ _disp(in_bytes(disp)) { ++ } ++ ++ Address(Register base, Register index, ScaleFactor scale, ByteSize disp) ++ : _base(base), ++ _index(index), ++ _scale(scale), ++ _disp(in_bytes(disp)) { ++ assert(!index->is_valid() == (scale == Address::no_scale), ++ "inconsistent address"); ++ } ++#endif // ASSERT ++ ++ // accessors ++ bool uses(Register reg) const { return _base == reg || _index == reg; } ++ Register base() const { return _base; } ++ Register index() const { return _index; } ++ ScaleFactor scale() const { return _scale; } ++ int disp() const { return _disp; } ++ ++ static Address make_array(ArrayAddress); ++ ++ friend class Assembler; ++ friend class MacroAssembler; ++ friend class LIR_Assembler; // base/index/scale/disp ++}; ++ ++ ++// Calling convention ++class Argument VALUE_OBJ_CLASS_SPEC { ++ private: ++ int _number; ++ public: ++ enum { ++ n_register_parameters = 6, // 6 integer registers used to pass parameters ++ n_float_register_parameters = 6 // 6 float registers used to pass parameters ++ }; ++ ++ Argument(int number):_number(number){ } ++ Argument successor() {return Argument(number() + 1);} ++ ++ int number()const {return _number;} ++ bool is_Register()const {return _number < n_register_parameters;} ++ bool is_FloatRegister()const {return _number < n_float_register_parameters;} ++ ++ Register as_Register()const { ++ assert(is_Register(), "must be a register argument"); ++ return ::as_Register(A0->encoding() + _number); ++ } ++ FloatRegister as_FloatRegister()const { ++ assert(is_FloatRegister(), "must be a float register argument"); ++ return ::as_FloatRegister(F16->encoding() + _number); ++ } ++ ++ Address as_caller_address()const {return Address(SP, (number() LP64_ONLY( -n_register_parameters)) * wordSize);} ++}; ++ ++ ++ ++// ++// AddressLiteral has been split out from Address because operands of this type ++// need to be treated specially on 32bit vs. 64bit platforms. By splitting it out ++// the few instructions that need to deal with address literals are unique and the ++// MacroAssembler does not have to implement every instruction in the Assembler ++// in order to search for address literals that may need special handling depending ++// on the instruction and the platform. As small step on the way to merging i486/amd64 ++// directories. ++// ++class AddressLiteral VALUE_OBJ_CLASS_SPEC { ++ friend class ArrayAddress; ++ RelocationHolder _rspec; ++ // Typically we use AddressLiterals we want to use their rval ++ // However in some situations we want the lval (effect address) of the item. ++ // We provide a special factory for making those lvals. ++ bool _is_lval; ++ ++ // If the target is far we'll need to load the ea of this to ++ // a register to reach it. Otherwise if near we can do rip ++ // relative addressing. ++ address _target; ++ ++ protected: ++ // creation ++ AddressLiteral() ++ : _is_lval(false), ++ _target(NULL) ++ {} ++ ++ public: ++ ++ AddressLiteral(address target, relocInfo::relocType rtype); ++ ++ AddressLiteral(address target, RelocationHolder const& rspec) ++ : _rspec(rspec), ++ _is_lval(false), ++ _target(target) ++ {} ++ ++ // 32-bit complains about a multiple declaration for int*. ++ AddressLiteral(intptr_t* addr, relocInfo::relocType rtype = relocInfo::none) ++ : _target((address) addr), ++ _rspec(rspec_from_rtype(rtype, (address) addr)) {} ++ ++ AddressLiteral addr() { ++ AddressLiteral ret = *this; ++ ret._is_lval = true; ++ return ret; ++ } ++ ++ ++ private: ++ ++ address target() { return _target; } ++ bool is_lval() { return _is_lval; } ++ ++ relocInfo::relocType reloc() const { return _rspec.type(); } ++ const RelocationHolder& rspec() const { return _rspec; } ++ ++ friend class Assembler; ++ friend class MacroAssembler; ++ friend class Address; ++ friend class LIR_Assembler; ++ ++ RelocationHolder rspec_from_rtype(relocInfo::relocType rtype, address addr) { ++ switch (rtype) { ++ case relocInfo::external_word_type: ++ return external_word_Relocation::spec(addr); ++ case relocInfo::internal_word_type: ++ return internal_word_Relocation::spec(addr); ++ case relocInfo::opt_virtual_call_type: ++ return opt_virtual_call_Relocation::spec(); ++ case relocInfo::static_call_type: ++ return static_call_Relocation::spec(); ++ case relocInfo::runtime_call_type: ++ return runtime_call_Relocation::spec(); ++ case relocInfo::poll_type: ++ case relocInfo::poll_return_type: ++ return Relocation::spec_simple(rtype); ++ case relocInfo::none: ++ case relocInfo::oop_type: ++ // Oops are a special case. Normally they would be their own section ++ // but in cases like icBuffer they are literals in the code stream that ++ // we don't have a section for. We use none so that we get a literal address ++ // which is always patchable. ++ return RelocationHolder(); ++ default: ++ ShouldNotReachHere(); ++ return RelocationHolder(); ++ } ++ } ++ ++}; ++ ++// Convience classes ++class RuntimeAddress: public AddressLiteral { ++ ++ public: ++ ++ RuntimeAddress(address target) : AddressLiteral(target, relocInfo::runtime_call_type) {} ++ ++}; ++ ++class OopAddress: public AddressLiteral { ++ ++ public: ++ ++ OopAddress(address target) : AddressLiteral(target, relocInfo::oop_type){} ++ ++}; ++ ++class ExternalAddress: public AddressLiteral { ++ ++ public: ++ ++ ExternalAddress(address target) : AddressLiteral(target, relocInfo::external_word_type){} ++ ++}; ++ ++class InternalAddress: public AddressLiteral { ++ ++ public: ++ ++ InternalAddress(address target) : AddressLiteral(target, relocInfo::internal_word_type) {} ++ ++}; ++ ++// x86 can do array addressing as a single operation since disp can be an absolute ++// address amd64 can't. We create a class that expresses the concept but does extra ++// magic on amd64 to get the final result ++ ++class ArrayAddress VALUE_OBJ_CLASS_SPEC { ++ private: ++ ++ AddressLiteral _base; ++ Address _index; ++ ++ public: ++ ++ ArrayAddress() {}; ++ ArrayAddress(AddressLiteral base, Address index): _base(base), _index(index) {}; ++ AddressLiteral base() { return _base; } ++ Address index() { return _index; } ++ ++}; ++ ++const int FPUStateSizeInWords = NOT_LP64(27) LP64_ONLY( 512 / wordSize); ++ ++// The SW Assembler: Pure assembler doing NO optimizations on the instruction ++// level ; i.e., what you write is what you get. The Assembler is generating code into ++// a CodeBuffer. ++ ++class Assembler : public AbstractAssembler { ++ friend class AbstractAssembler; // for the non-virtual hack ++ friend class LIR_Assembler; // as_Address() ++ friend class StubGenerator; ++ ++ public: ++ enum Condition { ++ zero , ++ notZero , ++ equal , ++ notEqual , ++ less , ++ lessEqual , ++ greater , ++ greaterEqual , ++ below , ++ belowEqual , ++ above , ++ aboveEqual ++ }; ++ ++ static const int LogInstructionSize = 2; ++ static const int InstructionSize = 1 << LogInstructionSize; ++ ++ enum WhichOperand { ++ imm_operand = 0, // embedded 32-bit|64-bit immediate operand ++ disp32_operand = 1, // embedded 32-bit displacement or address ++ call32_operand = 2, // embedded 32-bit self-relative displacement ++ narrow_oop_operand = 3, // embedded 32-bit immediate narrow oop ++ _WhichOperand_limit = 4 ++ }; ++ ++ static int opcode(int insn) { return (insn>>26)&0x3f; } ++ static int rs(int insn) { return (insn>>21)&0x1f; } ++ static int rt(int insn) { return (insn>>16)&0x1f; } ++ static int rd(int insn) { return (insn>>11)&0x1f; } ++ static int sa(int insn) { return (insn>>6)&0x1f; } ++ static int special(int insn) { return insn&0x3f; } ++ static int imm_off(int insn) { return (short)low16(insn); } ++ ++ static int low (int x, int l) { return bitfield(x, 0, l); } ++ static int low16(int x) { return low(x, 16); } ++ static int low26(int x) { return low(x, 26); } ++ ++/* SW64 new instrunction enum code */ ++ public: ++ enum ops_mem { ++ op_call = OPMEM(0x01), ++ op_ret = OPMEM(0x02), ++ op_jmp = OPMEM(0x03), ++ op_ldwe = OPMEM(0x09), op_fillcs = op_ldwe, ++ op_ldse = OPMEM(0x0A), op_e_fillcs = op_ldse, ++ op_ldde = OPMEM(0x0B), op_fillcs_e = op_ldde, ++ op_vlds = OPMEM(0x0C), op_e_fillde = op_vlds, ++ op_vldd = OPMEM(0x0D), ++ op_vsts = OPMEM(0x0E), ++ op_vstd = OPMEM(0x0F), ++ op_ldbu = OPMEM(0x20), op_flushd = op_ldbu, ++ op_ldhu = OPMEM(0x21), op_evictdg = op_ldhu, ++ op_ldw = OPMEM(0x22), op_s_fillcs = op_ldw, ++ op_ldl = OPMEM(0x23), op_s_fillde = op_ldl, ++ op_ldl_u = OPMEM(0x24), op_evictdl = op_ldl_u, ++ op_flds = OPMEM(0x26), op_fillde = op_flds, ++ op_fldd = OPMEM(0x27), op_fillde_e = op_fldd, ++ op_stb = OPMEM(0x28), ++ op_sth = OPMEM(0x29), ++ op_stw = OPMEM(0x2A), ++ op_stl = OPMEM(0x2B), ++ op_stl_u = OPMEM(0x2C), ++ op_fsts = OPMEM(0x2E), ++ op_fstd = OPMEM(0x2F), ++ op_ldi = OPMEM(0x3E), ++ op_ldih = OPMEM(0x3F) ++// unop = OPMEM(0x3F) | (30 << 16), ++ }; ++ ++ enum ops_atmem { ++ op_lldw = ATMEM(0x08, 0x0), ++ op_lldl = ATMEM(0x08, 0x1), ++ op_ldw_inc = ATMEM(0x08, 0x2), //SW2F ++ op_ldl_inc = ATMEM(0x08, 0x3), //SW2F ++ op_ldw_dec = ATMEM(0x08, 0x4), //SW2F ++ op_ldl_dec = ATMEM(0x08, 0x5), //SW2F ++ op_ldw_set = ATMEM(0x08, 0x6), //SW2F ++ op_ldl_set = ATMEM(0x08, 0x7), //SW2F ++ op_lstw = ATMEM(0x08, 0x8), ++ op_lstl = ATMEM(0x08, 0x9), ++ op_ldw_nc = ATMEM(0x08, 0xA), ++ op_ldl_nc = ATMEM(0x08, 0xB), ++ op_ldd_nc = ATMEM(0x08, 0xC), ++ op_stw_nc = ATMEM(0x08, 0xD), ++ op_stl_nc = ATMEM(0x08, 0xE), ++ op_std_nc = ATMEM(0x08, 0xF), ++ op_vldw_u = ATMEM(0x1C, 0x0), ++ op_vstw_u = ATMEM(0x1C, 0x1), ++ op_vlds_u = ATMEM(0x1C, 0x2), ++ op_vsts_u = ATMEM(0x1C, 0x3), ++ op_vldd_u = ATMEM(0x1C, 0x4), ++ op_vstd_u = ATMEM(0x1C, 0x5), ++ op_vstw_ul = ATMEM(0x1C, 0x8), ++ op_vstw_uh = ATMEM(0x1C, 0x9), ++ op_vsts_ul = ATMEM(0x1C, 0xA), ++ op_vsts_uh = ATMEM(0x1C, 0xB), ++ op_vstd_ul = ATMEM(0x1C, 0xC), ++ op_vstd_uh = ATMEM(0x1C, 0xD), ++ op_vldd_nc = ATMEM(0x1C, 0xE), ++ op_vstd_nc = ATMEM(0x1C, 0xF), ++ op_ldbu_a = ATMEM(0x1E, 0x0), //SW8A ++ op_ldhu_a = ATMEM(0x1E, 0x1), //SW8A ++ op_ldw_a = ATMEM(0x1E, 0x2), //SW8A ++ op_ldl_a = ATMEM(0x1E, 0x3), //SW8A ++ op_flds_a = ATMEM(0x1E, 0x4), //SW8A ++ op_fldd_a = ATMEM(0x1E, 0x5), //SW8A ++ op_stb_a = ATMEM(0x1E, 0x6), //SW8A ++ op_sth_a = ATMEM(0x1E, 0x7), //SW8A ++ op_stw_a = ATMEM(0x1E, 0x8), //SW8A ++ op_stl_a = ATMEM(0x1E, 0x9), //SW8A ++ op_fsts_a = ATMEM(0x1E, 0xA), //SW8A ++ op_fstd_a = ATMEM(0x1E, 0xB) //SW8A ++ }; ++ ++ enum ops_ev6hwmem { ++ op_pri_ld = EV6HWMEM(0x25, 0x0), ++ op_pri_st = EV6HWMEM(0x2D, 0x0), ++ }; ++ ++ enum ops_opr { ++ op_addw = OPR(0x10, 0x00), ++ op_subw = OPR(0x10, 0x01), ++ op_s4addw = OPR(0x10, 0x02), ++ op_s4subw = OPR(0x10, 0x03), ++ op_s8addw = OPR(0x10, 0x04), ++ op_s8subw = OPR(0x10, 0x05), ++ op_addl = OPR(0x10, 0x08), ++ op_subl = OPR(0x10, 0x09), ++ op_s4addl = OPR(0x10, 0x0A), ++ op_s4subl = OPR(0x10, 0x0B), ++ op_s8addl = OPR(0x10, 0x0C), ++ op_s8subl = OPR(0x10, 0x0D), ++ op_mulw = OPR(0x10, 0x10), ++ op_divw = OPR(0x10, 0x11), //SW8A ++ op_udivw = OPR(0x10, 0x12), //SW8A ++ op_remw = OPR(0x10, 0x13), //SW8A ++ op_uremw = OPR(0x10, 0x14), //SW8A ++ op_mull = OPR(0x10, 0x18), ++ op_umulh = OPR(0x10, 0x19), ++ op_divl = OPR(0x10, 0x1A), //SW8A ++ op_udivl = OPR(0x10, 0x1B), //SW8A ++ op_reml = OPR(0x10, 0x1C), //SW8A ++ op_ureml = OPR(0x10, 0x1D), //SW8A ++ op_addpi = OPR(0x10, 0x1E), //SW8A ++ op_addpis = OPR(0x10, 0x1F), //SW8A ++ op_crc32b = OPR(0x10, 0x20), //SW8A ++ op_crc32h = OPR(0x10, 0x21), //SW8A ++ op_crc32w = OPR(0x10, 0x22), //SW8A ++ op_crc32l = OPR(0x10, 0x23), //SW8A ++ op_crc32cb = OPR(0x10, 0x24), //SW8A ++ op_crc32ch = OPR(0x10, 0x25), //SW8A ++ op_crc32cw = OPR(0x10, 0x26), //SW8A ++ op_crc32cl = OPR(0x10, 0x27), //SW8A ++ op_cmpeq = OPR(0x10, 0x28), ++ op_cmplt = OPR(0x10, 0x29), ++ op_cmple = OPR(0x10, 0x2A), ++ op_cmpult = OPR(0x10, 0x2B), ++ op_cmpule = OPR(0x10, 0x2C), ++ op_sbt = OPR(0x10, 0x2D), //SW8A ++ op_cbt = OPR(0x10, 0x2E), //SW8A ++ op_and = OPR(0x10, 0x38), ++ op_bic = OPR(0x10, 0x39), ++ op_bis = OPR(0x10, 0x3A), ++ op_ornot = OPR(0x10, 0x3B), ++ op_xor = OPR(0x10, 0x3C), ++ op_eqv = OPR(0x10, 0x3D), ++ op_inslb = OPR(0x10, 0x40), //0x10.40~0x10.47 ++ op_inslh = OPR(0x10, 0x41), ++ op_inslw = OPR(0x10, 0x42), ++ op_insll = OPR(0x10, 0x43), ++ op_inshb = OPR(0x10, 0x44), ++ op_inshh = OPR(0x10, 0x45), ++ op_inshw = OPR(0x10, 0x46), ++ op_inshl = OPR(0x10, 0x47), ++ op_slll = OPR(0x10, 0x48), ++ op_srll = OPR(0x10, 0x49), ++ op_sral = OPR(0x10, 0x4A), ++ op_roll = OPR(0x10, 0x4B), //SW8A ++ op_sllw = OPR(0x10, 0x4C), //SW8A ++ op_srlw = OPR(0x10, 0x4D), //SW8A ++ op_sraw = OPR(0x10, 0x4E), //SW8A ++ op_rolw = OPR(0x10, 0x4F), //SW8A ++ op_extlb = OPR(0x10, 0x50), //0x10.50~0x10.57 ++ op_extlh = OPR(0x10, 0x51), ++ op_extlw = OPR(0x10, 0x52), ++ op_extll = OPR(0x10, 0x53), ++ op_exthb = OPR(0x10, 0x54), ++ op_exthh = OPR(0x10, 0x55), ++ op_exthw = OPR(0x10, 0x56), ++ op_exthl = OPR(0x10, 0x57), ++ op_ctpop = OPR(0x10, 0x58), ++ op_ctlz = OPR(0x10, 0x59), ++ op_cttz = OPR(0x10, 0x5A), ++ op_revbh = OPR(0x10, 0x5B), //SW8A ++ op_revbw = OPR(0x10, 0x5C), //SW8A ++ op_revbl = OPR(0x10, 0x5D), //SW8A ++ op_casw = OPR(0x10, 0x5E), //SW8A ++ op_casl = OPR(0x10, 0x5F), //SW8A ++ op_masklb = OPR(0x10, 0x60), //0x10.60~0x10.67 ++ op_masklh = OPR(0x10, 0x61), ++ op_masklw = OPR(0x10, 0x62), ++ op_maskll = OPR(0x10, 0x63), ++ op_maskhb = OPR(0x10, 0x64), ++ op_maskhh = OPR(0x10, 0x65), ++ op_maskhw = OPR(0x10, 0x66), ++ op_maskhl = OPR(0x10, 0x67), ++ op_zap = OPR(0x10, 0x68), ++ op_zapnot = OPR(0x10, 0x69), ++ op_sextb = OPR(0x10, 0x6A), ++ op_sexth = OPR(0x10, 0x6B), ++ op_cmpgeb = OPR(0x10, 0x6C), //0x10.6C ++ op_fimovs = OPR(0x10, 0x70), ++ op_fimovd = OPR(0x10, 0x78), ++ op_cmovdl = OFP(0x10, 0x72), ++ op_cmovdl_g = OFP(0x10, 0x74), ++ op_cmovdl_p = OFP(0x10, 0x7A), ++ op_cmovdl_z = OFP(0x10, 0x7C), ++ op_cmovdl_n = OFP(0x10, 0x80), ++ op_cmovdlu = OFP(0x10, 0x81), ++ op_cmovdlu_g= OFP(0x10, 0x82), ++ op_cmovdlu_p= OFP(0x10, 0x83), ++ op_cmovdlu_z= OFP(0x10, 0x84), ++ op_cmovdlu_n= OFP(0x10, 0x85), ++ op_cmovdw = OFP(0x10, 0x8B), ++ op_cmovdw_g = OFP(0x10, 0x8C), ++ op_cmovdw_p = OFP(0x10, 0x8D), ++ op_cmovdw_z = OFP(0x10, 0x8E), ++ op_cmovdw_n = OFP(0x10, 0x8F), ++ op_cmovdwu = OFP(0x10, 0x86), ++ op_cmovdwu_g= OFP(0x10, 0x87), ++ op_cmovdwu_p= OFP(0x10, 0x88), ++ op_cmovdwu_z= OFP(0x10, 0x89), ++ op_cmovdwu_n= OFP(0x10, 0x8A), ++ op_seleq = TOPR(0x11, 0x0), ++ op_selge = TOPR(0x11, 0x1), ++ op_selgt = TOPR(0x11, 0x2), ++ op_selle = TOPR(0x11, 0x3), ++ op_sellt = TOPR(0x11, 0x4), ++ op_selne = TOPR(0x11, 0x5), ++ op_sellbc = TOPR(0x11, 0x6), ++ op_sellbs = TOPR(0x11, 0x7) ++ }; ++ ++ enum ops_oprl{ ++ op_addw_l = OPRL(0x12, 0x00), ++ op_subw_l = OPRL(0x12, 0x01), ++ op_s4addw_l = OPRL(0x12, 0x02), ++ op_s4subw_l = OPRL(0x12, 0x03), ++ op_s8addw_l = OPRL(0x12, 0x04), ++ op_s8subw_l = OPRL(0x12, 0x05), ++ op_addl_l = OPRL(0x12, 0x08), ++ op_subl_l = OPRL(0x12, 0x09), ++ op_s4addl_l = OPRL(0x12, 0x0A), ++ op_s4subl_l = OPRL(0x12, 0x0B), ++ op_s8addl_l = OPRL(0x12, 0x0C), ++ op_s8subl_l = OPRL(0x12, 0x0D), ++ op_mulw_l = OPRL(0x12, 0x10), ++ op_mull_l = OPRL(0x12, 0x18), ++ op_umulh_l = OPRL(0x12, 0x19), ++ op_cmpeq_l = OPRL(0x12, 0x28), ++ op_cmplt_l = OPRL(0x12, 0x29), ++ op_cmple_l = OPRL(0x12, 0x2A), ++ op_cmpult_l = OPRL(0x12, 0x2B), ++ op_cmpule_l = OPRL(0x12, 0x2C), ++ op_sbt_l = OPRL(0x12, 0x2D), //SW8A ++ op_cbt_l = OPRL(0x12, 0x2E), //SW8A ++ op_and_l = OPRL(0x12, 0x38), ++ op_bic_l = OPRL(0x12, 0x39), ++ op_bis_l = OPRL(0x12, 0x3A), ++ op_ornot_l = OPRL(0x12, 0x3B), ++ op_xor_l = OPRL(0x12, 0x3C), ++ op_eqv_l = OPRL(0x12, 0x3D), ++ op_inslb_l = OPRL(0x12, 0x40), //0x12.40~0x12.47 ++ op_inslh_l = OPRL(0x12, 0x41), ++ op_inslw_l = OPRL(0x12, 0x42), ++ op_insll_l = OPRL(0x12, 0x43), ++ op_inshb_l = OPRL(0x12, 0x44), ++ op_inshh_l = OPRL(0x12, 0x45), ++ op_inshw_l = OPRL(0x12, 0x46), ++ op_inshl_l = OPRL(0x12, 0x47), ++ op_slll_l = OPRL(0x12, 0x48), ++ op_srll_l = OPRL(0x12, 0x49), ++ op_sral_l = OPRL(0x12, 0x4A), ++ op_roll_l = OPRL(0x12, 0x4B), //SW8A ++ op_sllw_l = OPRL(0x12, 0x4C), //SW8A ++ op_srlw_l = OPRL(0x12, 0x4D), //SW8A ++ op_sraw_l = OPRL(0x12, 0x4E), //SW8A ++ op_rolw_l = OPRL(0x12, 0x4F), //SW8A ++ op_extlb_l = OPRL(0x12, 0x50), //0x12.50~0x12.57 ++ op_extlh_l = OPRL(0x12, 0x51), ++ op_extlw_l = OPRL(0x12, 0x52), ++ op_extll_l = OPRL(0x12, 0x53), ++ op_exthb_l = OPRL(0x12, 0x54), ++ op_exthh_l = OPRL(0x12, 0x55), ++ op_exthw_l = OPRL(0x12, 0x56), ++ op_exthl_l = OPRL(0x12, 0x57), ++ op_masklb_l = OPRL(0x12, 0x60), //0x12.60~0x12.67 ++ op_masklh_l = OPRL(0x12, 0x61), ++ op_masklw_l = OPRL(0x12, 0x62), ++ op_maskll_l = OPRL(0x12, 0x63), ++ op_maskhb_l = OPRL(0x12, 0x64), ++ op_maskhh_l = OPRL(0x12, 0x65), ++ op_maskhw_l = OPRL(0x12, 0x66), ++ op_maskhl_l = OPRL(0x12, 0x67), ++ op_zap_l = OPRL(0x12, 0x68), ++ op_zapnot_l = OPRL(0x12, 0x69), ++ op_sextb_l = OPRL(0x12, 0x6A), ++ op_sexth_l = OPRL(0x12, 0x6B), ++ op_cmpgeb_l = OPRL(0x12, 0x6C), //0x12.6C ++ op_seleq_l = TOPRL(0x13, 0x0), ++ op_selge_l = TOPRL(0x13, 0x1), ++ op_selgt_l = TOPRL(0x13, 0x2), ++ op_selle_l = TOPRL(0x13, 0x3), ++ op_sellt_l = TOPRL(0x13, 0x4), ++ op_selne_l = TOPRL(0x13, 0x5), ++ op_sellbc_l = TOPRL(0x13, 0x6), ++ op_sellbs_l = TOPRL(0x13, 0x7) ++ }; ++ ++ enum ops_bra { ++ op_br = BRA(0x04), ++ op_bsr = BRA(0x05), ++ op_beq = BRA(0x30), ++ op_bne = BRA(0x31), ++ op_blt = BRA(0x32), ++ op_ble = BRA(0x33), ++ op_bgt = BRA(0x34), ++ op_bge = BRA(0x35), ++ op_blbc = BRA(0x36), ++ op_blbs = BRA(0x37), ++ op_fbeq = BRA(0x38), ++ op_fbne = BRA(0x39), ++ op_fblt = BRA(0x3A), ++ op_fble = BRA(0x3B), ++ op_fbgt = BRA(0x3C), ++ op_fbge = BRA(0x3D) ++ }; ++ ++ enum ops_fp { ++ op_fadds = OFP(0x18, 0x00), ++ op_faddd = OFP(0x18, 0x01), ++ op_fsubs = OFP(0x18, 0x02), ++ op_fsubd = OFP(0x18, 0x03), ++ op_fmuls = OFP(0x18, 0x04), ++ op_fmuld = OFP(0x18, 0x05), ++ op_fdivs = OFP(0x18, 0x06), ++ op_fdivd = OFP(0x18, 0x07), ++ op_fsqrts = OFP(0x18, 0x08), ++ op_fsqrtd = OFP(0x18, 0x09), ++ op_fcmpeq = OFP(0x18, 0x10), ++ op_fcmple = OFP(0x18, 0x11), ++ op_fcmplt = OFP(0x18, 0x12), ++ op_fcmpun = OFP(0x18, 0x13), ++ op_fcvtsd = OFP(0x18, 0x20), ++ op_fcvtds = OFP(0x18, 0x21), ++ op_fcvtdl_g = OFP(0x18, 0x22), ++ op_fcvtdl_p = OFP(0x18, 0x23), ++ op_fcvtdl_z = OFP(0x18, 0x24), ++ op_fcvtdl_n = OFP(0x18, 0x25), ++ op_fcvtdl = OFP(0x18, 0x27), ++ op_fcvtwl = OFP(0x18, 0x28), ++ op_fcvtlw = OFP(0x18, 0x29), ++ op_fcvtls = OFP(0x18, 0x2D), ++ op_fcvtld = OFP(0x18, 0x2F), ++ op_fcpys = OFP(0x18, 0x30), ++ op_fcpyse = OFP(0x18, 0x31), ++ op_fcpysn = OFP(0x18, 0x32), ++ op_ifmovs = OFP(0x18, 0x40), ++ op_ifmovd = OFP(0x18, 0x41), ++ op_cmovls = OFP(0x18, 0x48), ++ op_cmovld = OFP(0x18, 0x4A), ++ op_cmovuls = OFP(0x18, 0x4C), ++ op_cmovuld = OFP(0x18, 0x4E), ++ op_cmovws = OFP(0x18, 0x49), ++ op_cmovwd = OFP(0x18, 0x4B), ++ op_cmovuws = OFP(0x18, 0x4D), ++ op_cmovuwd = OFP(0x18, 0x4F), ++ op_rfpcr = OFP(0x18, 0x50), ++ op_wfpcr = OFP(0x18, 0x51), ++ op_setfpec0 = OFP(0x18, 0x54), ++ op_setfpec1 = OFP(0x18, 0x55), ++ op_setfpec2 = OFP(0x18, 0x56), ++ op_setfpec3 = OFP(0x18, 0x57), ++ op_frecs = OFP(0x18, 0x58), //SW8A ++ op_frecd = OFP(0x18, 0x59), //SW8A ++ op_fris = OFP(0x18, 0x5A), //SW8A ++ op_fris_g = OFP(0x18, 0x5B), //SW8A ++ op_fris_p = OFP(0x18, 0x5C), //SW8A ++ op_fris_z = OFP(0x18, 0x5D), //SW8A ++ op_fris_n = OFP(0x18, 0x5F), //SW8A ++ op_frid = OFP(0x18, 0x60), //SW8A ++ op_frid_g = OFP(0x18, 0x61), //SW8A ++ op_frid_p = OFP(0x18, 0x62), //SW8A ++ op_frid_z = OFP(0x18, 0x63), //SW8A ++ op_frid_n = OFP(0x18, 0x64), //SW8A ++ op_vaddw = OFP(0x1A, 0x00), ++ op_vsubw = OFP(0x1A, 0x01), ++ op_vcmpgew = OFP(0x1A, 0x02), ++ op_vcmpeqw = OFP(0x1A, 0x03), ++ op_vcmplew = OFP(0x1A, 0x04), ++ op_vcmpltw = OFP(0x1A, 0x05), ++ op_vcmpulew = OFP(0x1A, 0x06), ++ op_vcmpultw = OFP(0x1A, 0x07), ++ op_vsllw = OFP(0x1A, 0x08), ++ op_vsrlw = OFP(0x1A, 0x09), ++ op_vsraw = OFP(0x1A, 0x0A), ++ op_vrolw = OFP(0x1A, 0x0B), ++ op_sllow = OFP(0x1A, 0x0C), ++ op_srlow = OFP(0x1A, 0x0D), ++ op_vaddl = OFP(0x1A, 0x0E), ++ op_vsubl = OFP(0x1A, 0x0F), ++ op_vsllb = OFP(0x1A, 0x10), //SW8A ++ op_vsrlb = OFP(0x1A, 0x11), //SW8A ++ op_vsrab = OFP(0x1A, 0x12), //SW8A ++ op_vrolb = OFP(0x1A, 0x13), //SW8A ++ op_vsllh = OFP(0x1A, 0x14), //SW8A ++ op_vsrlh = OFP(0x1A, 0x15), //SW8A ++ op_vsrah = OFP(0x1A, 0x16), //SW8A ++ op_vrolh = OFP(0x1A, 0x17), //SW8A ++ op_ctpopow = OFP(0x1A, 0x18), ++ op_ctlzow = OFP(0x1A, 0x19), ++ op_vslll = OFP(0x1A, 0x1A), //SW8A ++ op_vsrll = OFP(0x1A, 0x1B), //SW8A ++ op_vsral = OFP(0x1A, 0x1C), //SW8A ++ op_vroll = OFP(0x1A, 0x1D), //SW8A ++ op_vmaxb = OFP(0x1A, 0x1E), //SW8A ++ op_vminb = OFP(0x1A, 0x1F), //SW8A ++ op_vucaddw = OFP(0x1A, 0x40), ++ op_vucsubw = OFP(0x1A, 0x41), ++ op_vucaddh = OFP(0x1A, 0x42), ++ op_vucsubh = OFP(0x1A, 0x43), ++ op_vucaddb = OFP(0x1A, 0x44), ++ op_vucsubb = OFP(0x1A, 0x45), ++ op_sraow = OFP(0x1A, 0x46), //SW8A ++ op_vsumw = OFP(0x1A, 0x47), //SW8A ++ op_vsuml = OFP(0x1A, 0x48), //SW8A ++ op_vsm4r = OFP(0x1A, 0x49), //SW8A, ENCRYPT ++ op_vbinvw = OFP(0x1A, 0x4A), //SW8A, ENCRYPT ++ op_vcmpueqb = OFP(0x1A, 0x4B), //SW8A ++ op_vcmpugtb = OFP(0x1A, 0x4C), //SW8A ++ op_vsm3msw = OFP(0x1A, 0x4D), //SW8A, ENCRYPT ++ op_vmaxh = OFP(0x1A, 0x50), //SW8A ++ op_vminh = OFP(0x1A, 0x51), //SW8A ++ op_vmaxw = OFP(0x1A, 0x52), //SW8A ++ op_vminw = OFP(0x1A, 0x53), //SW8A ++ op_vmaxl = OFP(0x1A, 0x54), //SW8A ++ op_vminl = OFP(0x1A, 0x55), //SW8A ++ op_vumaxb = OFP(0x1A, 0x56), //SW8A ++ op_vuminb = OFP(0x1A, 0x57), //SW8A ++ op_vumaxh = OFP(0x1A, 0x58), //SW8A ++ op_vuminh = OFP(0x1A, 0x59), //SW8A ++ op_vumaxw = OFP(0x1A, 0x5A), //SW8A ++ op_vuminw = OFP(0x1A, 0x5B), //SW8A ++ op_vumaxl = OFP(0x1A, 0x5C), //SW8A ++ op_vuminl = OFP(0x1A, 0x5D), //SW8A ++ op_vadds = OFP(0x1A, 0x80), ++ op_vaddd = OFP(0x1A, 0x81), ++ op_vsubs = OFP(0x1A, 0x82), ++ op_vsubd = OFP(0x1A, 0x83), ++ op_vmuls = OFP(0x1A, 0x84), ++ op_vmuld = OFP(0x1A, 0x85), ++ op_vdivs = OFP(0x1A, 0x86), ++ op_vdivd = OFP(0x1A, 0x87), ++ op_vsqrts = OFP(0x1A, 0x88), ++ op_vsqrtd = OFP(0x1A, 0x89), ++ op_vfcmpeq = OFP(0x1A, 0x8C), ++ op_vfcmple = OFP(0x1A, 0x8D), ++ op_vfcmplt = OFP(0x1A, 0x8E), ++ op_vfcmpun = OFP(0x1A, 0x8F), ++ op_vcpys = OFP(0x1A, 0x90), ++ op_vcpyse = OFP(0x1A, 0x91), ++ op_vcpysn = OFP(0x1A, 0x92), ++ op_vsums = OFP(0x1A, 0x93), //SW8A ++ op_vsumd = OFP(0x1A, 0x94), //SW8A ++ op_vfcvtsd = OFP(0x1A, 0x95), //SW8A ++ op_vfcvtds = OFP(0x1A, 0x96), //SW8A ++ op_vfcvtls = OFP(0x1A, 0x99), //SW8A ++ op_vfcvtld = OFP(0x1A, 0x9A), //SW8A ++ op_vfcvtdl = OFP(0x1A, 0x9B), //SW8A ++ op_vfcvtdl_g = OFP(0x1A, 0x9C), //SW8A ++ op_vfcvtdl_p = OFP(0x1A, 0x9D), //SW8A ++ op_vfcvtdl_z = OFP(0x1A, 0x9E), //SW8A ++ op_vfcvtdl_n = OFP(0x1A, 0x9F), //SW8A ++ op_vfris = OFP(0x1A, 0xA0), //SW8A ++ op_vfris_g = OFP(0x1A, 0xA1), //SW8A ++ op_vfris_p = OFP(0x1A, 0xA2), //SW8A ++ op_vfris_z = OFP(0x1A, 0xA3), //SW8A ++ op_vfris_n = OFP(0x1A, 0xA4), //SW8A ++ op_vfrid = OFP(0x1A, 0xA5), //SW8A ++ op_vfrid_g = OFP(0x1A, 0xA6), //SW8A ++ op_vfrid_p = OFP(0x1A, 0xA7), //SW8A ++ op_vfrid_z = OFP(0x1A, 0xA8), //SW8A ++ op_vfrid_n = OFP(0x1A, 0xA9), //SW8A ++ op_vfrecs = OFP(0x1A, 0xAA), //SW8A ++ op_vfrecd = OFP(0x1A, 0xAB), //SW8A ++ op_vmaxs = OFP(0x1A, 0xAC), //SW8A ++ op_vmins = OFP(0x1A, 0xAD), //SW8A ++ op_vmaxd = OFP(0x1A, 0xAE), //SW8A ++ op_vmind = OFP(0x1A, 0xAF), //SW8A ++ }; ++ ++ enum ops_fpl { ++ op_vaddw_l = OFP(0x1A, 0x20), ++ op_vsubw_l = OFP(0x1A, 0x21), ++ op_vcmpgew_l = OFP(0x1A, 0x22), ++ op_vcmpeqw_l = OFP(0x1A, 0x23), ++ op_vcmplew_l = OFP(0x1A, 0x24), ++ op_vcmpltw_l = OFP(0x1A, 0x25), ++ op_vcmpulew_l = OFP(0x1A, 0x26), ++ op_vcmpultw_l = OFP(0x1A, 0x27), ++ op_vsllw_l = OFP(0x1A, 0x28), ++ op_vsrlw_l = OFP(0x1A, 0x29), ++ op_vsraw_l = OFP(0x1A, 0x2A), ++ op_vrolw_l = OFP(0x1A, 0x2B), ++ op_sllow_l = OFP(0x1A, 0x2C), ++ op_srlow_l = OFP(0x1A, 0x2D), ++ op_vaddl_l = OFP(0x1A, 0x2E), ++ op_vsubl_l = OFP(0x1A, 0x2F), ++ op_vsllb_l = OFP(0x1A, 0x30), //SW8A ++ op_vsrlb_l = OFP(0x1A, 0x31), //SW8A ++ op_vsrab_l = OFP(0x1A, 0x32), //SW8A ++ op_vrolb_l = OFP(0x1A, 0x33), //SW8A ++ op_vsllh_l = OFP(0x1A, 0x34), //SW8A ++ op_vsrlh_l = OFP(0x1A, 0x35), //SW8A ++ op_vsrah_l = OFP(0x1A, 0x36), //SW8A ++ op_vrolh_l = OFP(0x1A, 0x37), //SW8A ++ op_vslll_l = OFP(0x1A, 0x3A), //SW8A ++ op_vsrll_l = OFP(0x1A, 0x3B), //SW8A ++ op_vsral_l = OFP(0x1A, 0x3C), //SW8A ++ op_vroll_l = OFP(0x1A, 0x3D), //SW8A ++ op_vucaddw_l = OFP(0x1A, 0x60), ++ op_vucsubw_l = OFP(0x1A, 0x61), ++ op_vucaddh_l = OFP(0x1A, 0x62), ++ op_vucsubh_l = OFP(0x1A, 0x63), ++ op_vucaddb_l = OFP(0x1A, 0x64), ++ op_vucsubb_l = OFP(0x1A, 0x65), ++ op_sraow_l = OFP(0x1A, 0x66), //SW8A ++ op_vsm4key_l = OFP(0x1A, 0x68), //SW8A, ENCRYPT ++ op_vcmpueqb_l = OFP(0x1A, 0x6B), //SW8A ++ op_vcmpugtb_l = OFP(0x1A, 0x6C), //SW8A ++ op_vfcvtsh_l = OFP(0x1B, 0x35), //SW8A ++ op_vfcvths_l = OFP(0x1B, 0x36) //SW8A ++ }; ++ ++ enum ops_fma { ++ op_fmas = FMA(0x19, 0x00), ++ op_fmad = FMA(0x19, 0x01), ++ op_fmss = FMA(0x19, 0x02), ++ op_fmsd = FMA(0x19, 0x03), ++ op_fnmas = FMA(0x19, 0x04), ++ op_fnmad = FMA(0x19, 0x05), ++ op_fnmss = FMA(0x19, 0x06), ++ op_fnmsd = FMA(0x19, 0x07), ++ op_fseleq = FMA(0x19, 0x10), ++ op_fselne = FMA(0x19, 0x11), ++ op_fsellt = FMA(0x19, 0x12), ++ op_fselle = FMA(0x19, 0x13), ++ op_fselgt = FMA(0x19, 0x14), ++ op_fselge = FMA(0x19, 0x15), ++ op_vmas = FMA(0x1B, 0x00), ++ op_vmad = FMA(0x1B, 0x01), ++ op_vmss = FMA(0x1B, 0x02), ++ op_vmsd = FMA(0x1B, 0x03), ++ op_vnmas = FMA(0x1B, 0x04), ++ op_vnmad = FMA(0x1B, 0x05), ++ op_vnmss = FMA(0x1B, 0x06), ++ op_vnmsd = FMA(0x1B, 0x07), ++ op_vfseleq = FMA(0x1B, 0x10), ++ op_vfsellt = FMA(0x1B, 0x12), ++ op_vfselle = FMA(0x1B, 0x13), ++ op_vseleqw = FMA(0x1B, 0x18), ++ op_vsellbcw = FMA(0x1B, 0x19), ++ op_vselltw = FMA(0x1B, 0x1A), ++ op_vsellew = FMA(0x1B, 0x1B), ++ op_vcpyw = FMA(0x1B, 0x24), ++ op_vcpyf = FMA(0x1B, 0x25), ++ op_vconw = FMA(0x1B, 0x26), ++ op_vshfw = FMA(0x1B, 0x27), ++ op_vcons = FMA(0x1B, 0x28), ++ op_vcond = FMA(0x1B, 0x29), ++ op_vinsectlh = FMA(0x1B, 0x2C), //SW8A ++ op_vinsectlw = FMA(0x1B, 0x2D), //SW8A ++ op_vinsectll = FMA(0x1B, 0x2E), //SW8A ++ op_vinsectlb = FMA(0x1B, 0x2F), //SW8A ++ op_vshfqb = FMA(0x1B, 0x31), //SW8A ++ op_vcpyb = FMA(0x1B, 0x32), //SW8A ++ op_vcpyh = FMA(0x1B, 0x33) //SW8A ++ }; ++ ++ enum ops_fmal { ++ op_vinsw_l = FMA(0x1B, 0x20), ++ op_vinsf_l = FMA(0x1B, 0x21), ++ op_vextw_l = FMA(0x1B, 0x22), ++ op_vextf_l = FMA(0x1B, 0x23), ++ op_vinsb_l = FMA(0x1B, 0x2A), //SW8A ++ op_vinsh_l = FMA(0x1B, 0x2B), //SW8A ++ op_vshfq_l = FMA(0x1B, 0x30), //SW8A ++ op_vsm3r_l = FMA(0x1B, 0x34), //SW8A, ENCRYPT ++ op_vseleqw_l = FMA(0x1B, 0x38), ++ op_vsellbcw_l = FMA(0x1B, 0x39), ++ op_vselltw_l = FMA(0x1B, 0x3A), ++ op_vsellew_l = FMA(0x1B, 0x3B) ++ }; ++ ++ enum ops_extra { ++ op_sys_call = PCD(0x00), ++ op_memb = MFC(0x06, 0x0000), ++ op_imemb = MFC(0x06, 0x0001), //SW8A ++ op_wmemb = MFC(0x06, 0x0002), //SW8A ++ op_rtc = MFC(0x06, 0x0020), ++ op_rcid = MFC(0x06, 0x0040), ++ op_halt = MFC(0x06, 0x0080), ++ op_rd_f = MFC(0x06, 0x1000), //SW2F ++ op_wr_f = MFC(0x06, 0x1020), //SW2F ++ op_rtid = MFC(0x06, 0x1040), ++ op_csrws = CSR(0x06, 0xFC), //SW8A ++ op_csrwc = CSR(0x06, 0xFD), //SW8A ++ op_csrr = CSR(0x06, 0xFE), ++ op_csrw = CSR(0x06, 0xFF), ++ op_pri_ret = PRIRET(0x07, 0x0), ++ op_vlog = LOGX(0x14, 0x00), ++ op_vbisw = PSE_LOGX(0x14, 0x30), ++ op_vxorw = PSE_LOGX(0x14, 0x3c), ++ op_vandw = PSE_LOGX(0x14, 0xc0), ++ op_veqvw = PSE_LOGX(0x14, 0xc3), ++ op_vornotw = PSE_LOGX(0x14, 0xf3), ++ op_vbicw = PSE_LOGX(0x14, 0xfc), ++ op_lbr = PCD(0x1D), //SW8A ++ op_dpfhr = ATMEM(0x1E, 0xE), //SW8A ++ op_dpfhw = ATMEM(0x1E, 0xF), //SW8A ++ }; ++ ++ static int sw2_op(int inst) {return (int)(inst & OP(-1)); } ++ static int sw2_arith_op(int inst) {return (int)(inst & OPR(-1, -1)); } ++ static int sw2_bra_op(int inst) {return (int)(inst & BRA(-1)); } ++ static int sw2_mfc_op(int inst) {return (int)(inst & MFC(-1, -1)); } ++ ++ static Register sw2_ra( int x ) { return as_Register(inv_u_field(x, 25, 21)); } ++ static Register sw2_rb( int x ) { return as_Register(inv_u_field(x, 20, 16)); } ++ static Register sw2_rc( int x ) { return as_Register(inv_u_field(x, 4, 0)); } ++ static int sw2_lit( int x ) { return inv_u_field(x, 20, 13); } ++ static int sw2_bdisp( int x ) { return inv_wdisp(x, 0, 21); } ++ static int sw2_mdisp( int x ) { return inv_simm16(x); } ++ ++ protected: ++ ++ static int insn_ORRI(int op, int rs, int rt, int imm) { assert(is_simm16(imm), "not a signed 16-bit int"); return (op<<26) | (rs<<21) | (rt<<16) | low16(imm); } ++ ++ static int insn_RRRO(int rs, int rt, int rd, int op) { return (rs<<21) | (rt<<16) | (rd<<11) | op; } ++ static int insn_RRSO(int rt, int rd, int sa, int op) { return (rt<<16) | (rd<<11) | (sa<<6) | op; } ++ static int insn_RRCO(int rs, int rt, int code, int op) { return (rs<<21) | (rt<<16) | (code<<6) | op; } ++ ++ //get the offset field of jump/branch instruction ++ int offset(address entry) { ++ assert(is_simm21((entry - pc() - 4) / 4), "change this code"); ++ if (!is_simm21((entry - pc() - 4) / 4)) { ++ tty->print_cr("!!! is_simm21: %x", (entry - pc() - 4) / 4); ++ } ++ return (entry - pc() - 4) / 4; ++ } ++ ++ // x is supposed to fit in a field "nbits" wide ++ // and be sign-extended. Check the range. ++ ++ static void assert_signed_range(intptr_t x, int nbits) { ++ assert(nbits == 32 || (-(1 << nbits-1) <= x && x < ( 1 << nbits-1)), ++ err_msg("value out of range: x=" INTPTR_FORMAT ", nbits=%d", x, nbits)); ++ } ++ ++ static void assert_unsigned_range(intptr_t x, int nbits) { ++ assert( nbits == 32 ++ || 0 <= x && x < ( 1 << nbits), ++ err_msg("value out of range: x=" INTPTR_FORMAT ", nbits=%d", x, nbits)); ++ } ++ ++ static void assert_signed_word_disp_range(intptr_t x, int nbits) { ++ assert( (x & 3) == 0, "not word aligned"); ++ assert_signed_range(x, nbits + 2); ++ } ++ ++ static void assert_unsigned_const(int x, int nbits) { ++ assert( juint(x) < juint(1 << nbits), "unsigned constant out of range"); ++ } ++ ++ // fields: note bits numbered from LSB = 0, ++ // fields known by inclusive bit range ++ ++ static int fmask(juint hi_bit, juint lo_bit) { ++ assert( hi_bit >= lo_bit && 0 <= lo_bit && hi_bit < 32, "bad bits"); ++ return (1 << ( hi_bit-lo_bit + 1 )) - 1; ++ } ++ ++ // inverse of u_field ++ ++ static int inv_u_field(int x, int hi_bit, int lo_bit) { ++ juint r = juint(x) >> lo_bit; ++ r &= fmask( hi_bit, lo_bit); ++ return int(r); ++ } ++ ++#ifdef ASSERT ++ static int u_field(int x, int hi_bit, int lo_bit) { ++ assert( ( x & ~fmask(hi_bit, lo_bit)) == 0, ++ "value out of range"); ++ int r = x << lo_bit; ++ assert( inv_u_field(r, hi_bit, lo_bit) == x, "just checking"); ++ return r; ++ } ++#else ++ // make sure this is inlined as it will reduce code size significantly ++ #define u_field(x, hi_bit, lo_bit) ((x) << (lo_bit)) ++#endif ++ ++ static intptr_t inv_wdisp( int x, intptr_t pos, int nbits ) { ++ int pre_sign_extend = x & (( 1 << nbits ) - 1); ++ int r = pre_sign_extend >= ( 1 << (nbits-1) ) ++ ? pre_sign_extend | ~(( 1 << nbits ) - 1) ++ : pre_sign_extend; ++ return (r << 2) + pos; ++ } ++ ++ static int wdisp( intptr_t x, intptr_t off, int nbits ) { ++ intptr_t xx = x - off; ++ assert_signed_word_disp_range(xx, nbits); ++ int r = (xx >> 2) & (( 1 << nbits ) - 1); ++ assert( inv_wdisp( r, off, nbits ) == x, "inverse not inverse"); ++ return r; ++ } ++ ++ // compute inverse of simm ++ static int inv_simm(int x, int nbits) { ++ return (int)(x << (32 - nbits)) >> (32 - nbits); ++ } ++ ++ static int inv_simm16( int x ) { return inv_simm(x, 16); } //ZHJ20110307 modified ++ ++ // signed immediate, in low bits, nbits long ++ static int simm(int x, int nbits) { ++ assert_signed_range(x, nbits); ++ return x & (( 1 << nbits ) - 1); ++ } ++ ++ // unsigned immediate, in low bits, nbits long //ZHJ20110307 added. ++ static int uimm(int x, int nbits) { ++ assert_unsigned_range(x, nbits); ++ return x & (( 1 << nbits ) - 1); ++ } ++ ++ // instruction only in sw2, including sw2f, sw4a, sw6a ++ static void sw2_only() { assert( VM_Version::sw2only(), "This instruction only works on sw2f, sw4a or sw6a"); } ++ // instruction only in sw3, including sw6b ++ static void sw3_only() { assert( VM_Version::sw3only(), "This instruction only works on sw6b"); } ++ // instruction only in sw3, including sw8A ++ static void sw4_only() { assert( VM_Version::is_sw8a(), "This instruction only works on sw8a"); } ++public: ++ using AbstractAssembler::offset; ++ ++ //sign expand with the sign bit is h ++ static int expand(int x, int h) { return -(x & (1<encoding(), 25, 21 ); }; ++ static int is_rb (Register rb) { return u_field ( rb->encoding(), 20, 16 ); }; ++ static int is_rc (Register rc) { return u_field ( rc->encoding(), 4, 0 ); }; ++ /* for the third operand of ternary operands integer insn. */ ++ static int is_r3 (Register r3) { return u_field ( r3->encoding(), 9, 5 ); }; ++ /* th th fields for dpfhr and dpfhw instructions */ ++ static int is_th (int th) { return u_field ( th, 25, 21 ); }; ++ ++ /* the plain fp register fields. */ ++ static int is_fa (FloatRegister fa) { return u_field ( fa->encoding() ,25, 21 ); }; ++ static int is_fb (FloatRegister fb) { return u_field ( fb->encoding() ,20, 16 ); }; ++ static int is_fc (FloatRegister fc) { return u_field ( fc->encoding(), 4, 0 ); }; ++ /* the plain fp register fields */ ++ static int is_f3 (FloatRegister f3) { return u_field ( f3->encoding(), 9, 5 ); }; ++ ++ /* the integer registers when they are zero. */ ++ static int is_za (void ) { return u_field ( 31 ,25, 21 ); }; ++ static int is_zb (void ) { return u_field ( 31, 20, 16 ); }; ++ static int is_zc (void ) { return u_field ( 31, 4, 0 ); }; ++ ++ /* the rb field when it needs parentheses. */ ++ static int is_prb (Register prb) { return u_field ( prb->encoding(), 20, 16 ); }; ++ ++ /* the rb field when it needs parentheses _and_ a preceding comma. */ ++ static int is_cprb (Register cprb) { return u_field ( cprb->encoding(),20, 16); }; ++ ++ /* the unsigned 8-bit literal of operate format insns. */ ++ static int is_lit (int lit) { return u_field ( lit ,20, 13 ); }; ++ ++ /* the unsigned 13-bit literal of operate format insns. */ ++ static int is_apint (int apint) { return u_field ( apint ,25, 13 ); }; ++ ++ /* the signed 16-bit displacement of memory format insns. from here ++ we can't tell what relocation should be used, so don't use a default. */ ++ static int is_mdisp (int mdisp) { return simm ( mdisp ,16 ); }; ++ ++ /* the signed "23-bit" aligned displacement of branch format insns. */ ++ static int is_bdisp (int bdisp) { return simm ( bdisp ,21 ); }; ++ ++ /* the 26-bit palcode function */ ++ static int is_palfn (int palfn) { return simm ( palfn, 26 ); }; ++ /* the optional signed "16-bit" aligned displacement of the jmp/jsr hint */ ++ static int is_jmphint (int jmphint) { return simm ( jmphint, 16); }; ++ ++ /* the optional hint to ret/jsr_coroutine */ ++ static int is_rethint (int rethint) { return simm ( rethint, 16); }; ++ /* the 12-bit displacement for the ev[46] hw_{ return u_field (ld,st} (pal1b/pal1f) insns. */ ++ static int is_ev6hwdisp (int ev6hwdisp) { return simm ( ev6hwdisp, 12 ); }; ++ ++ /* the 16-bit combined index/scoreboard mask for the ev6 ++ hw_m[ft]pr (pal19/pal1d) insns. */ ++ static int is_ev6hwindex (int ev6hwindex) { return simm ( ev6hwindex,16 ); }; ++ ++ /* the 13-bit branch hint for the ev6 hw_jmp/jsr (pal1e) insn. */ ++ static int is_ev6hwjmphint (int ev6wjmphint) { return simm ( ev6wjmphint,8); }; ++ /* sw2 simd settle instruction lit */ ++ static int is_fmalit (int fmalit) { return u_field ( fmalit ,9 ,5 ); };//v1.1 ++ /*for pal to check disp which must be plus sign and less than 0x8000,wch20080901*/ ++ static int is_lmdisp (int lmdisp ) { return u_field ( lmdisp, 14, 0 ); }; ++ static int is_rpiindex (int rpiindex) { return u_field ( rpiindex ,7, 0 ); }; ++ ++ static int is_atmdisp ( int atmdisp ) { return u_field ( atmdisp, 10, 0 ); }; ++ ++ static int is_vlog_h ( int vlog ) { return u_field ( (vlog & 0xff) >>6 , 27, 26 ); }; ++ static int is_vlog_l ( int vlog ) { return u_field ( vlog & 0x3f , 15, 10 ); }; ++ ++public: ++ ++ void flush() { ++ AbstractAssembler::flush(); ++ } ++ ++ inline void emit_long(int); // shadows AbstractAssembler::emit_long ++ inline void emit_data(int x) { emit_long(x); } ++ inline void emit_data(int, RelocationHolder const&); ++ inline void emit_data(int, relocInfo::relocType rtype); ++ ++ inline void emit_sw2_long(int); // shadows AbstractAssembler::emit_long ++ inline void emit_sw2_data(int x) { emit_sw2_long(x); } ++ inline void emit_sw2_data(int, RelocationHolder const&); ++ inline void emit_sw2_data(int, relocInfo::relocType rtype); ++ ++ ++ // Generic instructions ++ // Does 32bit or 64bit as needed for the platform. In some sense these ++ // belong in macro assembler but there is no need for both varieties to exist ++ ++public: // arithmetic instructions ++ ++ void add_simm16 (Register rt, Register rs, int imm) ++ { ++ ldi(rt, rs, imm); ++ } ++ void add_s(FloatRegister fd, FloatRegister fs, FloatRegister ft) { ++ if (FRegisterConflict) { ++ assert_different_registers(fs, F28); ++ assert_different_registers(ft, F28); ++ if (fs == ft && ft == fd){ ++ fadds(F28, fs, ft); ++ fmovs(fd, F28); ++ }else if (fs == fd){ ++ fmovs(F28, fs); ++ fadds(fd, F28, ft); ++ }else if (ft == fd){ ++ fmovs(F28, ft); ++ fadds(fd, fs, F28); ++ }else{ ++ fadds(fd, fs, ft); ++ } ++ } else ++ fadds(fd, fs, ft); ++ } ++ void sub_s(FloatRegister fd, FloatRegister fs, FloatRegister ft) { ++ if (FRegisterConflict) { ++ assert_different_registers(fs, F28); ++ assert_different_registers(ft, F28); ++ if (fs == ft && ft == fd){ ++ fsubs(F28, fs, ft); ++ fmovs(fd, F28); ++ }else if (fs == fd){ ++ fmovs(F28, fs); ++ fsubs(fd, F28, ft); ++ }else if (ft == fd){ ++ fmovs(F28, ft); ++ fsubs(fd, fs, F28); ++ }else{ ++ fsubs(fd, fs, ft); ++ } ++ } else ++ fsubs(fd, fs, ft); ++ } ++ void mul_s(FloatRegister fd, FloatRegister fs, FloatRegister ft) { ++ if (FRegisterConflict) { ++ assert_different_registers(fs, F28); ++ assert_different_registers(ft, F28); ++ if (fs == ft && ft == fd){ ++ fmuls(F28, fs, ft); ++ fmovs(fd, F28); ++ }else if (fs == fd){ ++ fmovs(F28, fs); ++ fmuls(fd, F28, ft); ++ }else if (ft == fd){ ++ fmovs(F28, ft); ++ fmuls(fd, fs, F28); ++ }else{ ++ fmuls(fd, fs, ft); ++ } ++ } else ++ fmuls(fd, fs, ft); ++ } ++ void div_s(FloatRegister fd, FloatRegister fs, FloatRegister ft) { ++ if (FRegisterConflict) { ++ assert_different_registers(fs, F28); ++ assert_different_registers(ft, F28); ++ if (fs == ft && ft == fd){ ++ fdivs(F28, fs, ft); ++ fmovs(fd, F28); ++ }else if (fs == fd){ ++ fmovs(F28, fs); ++ fdivs(fd, F28, ft); ++ }else if (ft == fd){ ++ fmovs(F28, ft); ++ fdivs(fd, fs, F28); ++ }else{ ++ fdivs(fd, fs, ft); ++ } ++ } else ++ fdivs(fd, fs, ft); ++ } ++ void idiv_sw(Register rs, Register rt, Register rd){ ++ if(rt == R0){ ++ ShouldNotReachHere(); ++ }else{ ++ FloatRegister fsrc1 = f22; ++ FloatRegister fsrc2 = f23; ++ FloatRegister fdest = f24; ++ ifmovd(fsrc1, rs); ++ ifmovd(fsrc2, rt); ++ fcvtld(fsrc1, fsrc1); ++ fcvtld(fsrc2, fsrc2); ++ fdivd(fdest, fsrc1, fsrc2); ++ fcvtdl_z(fdest, fdest); ++ fcvtlw(fsrc1, fdest); ++ fimovs(rd, fsrc1); ++ } ++ } ++ void irem_sw(Register rs, Register rt, Register rd){ ++ if(rt == R0){ ++ ShouldNotReachHere(); ++ }else{ ++ FloatRegister fsrc1 = f22; ++ FloatRegister fsrc2 = f23; ++ FloatRegister fdest = f24; ++ Register tem1 = AT; ++ Register tem2 = GP; ++ ifmovd(fsrc1, rs); ++ ifmovd(fsrc2, rt); ++ fcvtld(fsrc1, fsrc1); ++ fcvtld(fsrc2, fsrc2); ++ fdivd(fdest, fsrc1, fsrc2); ++ fcvtdl_z(fdest, fdest); ++ fimovd(tem1, fdest); ++ mulw(tem2, tem1, rt); ++ subw(rd, rs, tem2); ++ } ++ } ++ void ldiv_sw(Register rs, Register rt, Register rd){ ++ if(rt == R0){ ++ ShouldNotReachHere(); ++ }else{ ++ FloatRegister fsrc1 = f23; ++ FloatRegister fsrc2 = f24; ++ FloatRegister fdest = f25; ++ ifmovd(fsrc1, rs); ++ ifmovd(fsrc2, rt); ++ fcvtld(fsrc1, fsrc1); ++ fcvtld(fsrc2, fsrc2); ++ fdivd(fdest, fsrc1, fsrc2); ++ fcvtdl_z(fdest, fdest); ++ fimovd(rd, fdest); ++ } ++ } ++ void lrem_sw(Register rs, Register rt, Register rd){ ++ if(rt == R0){ ++ ShouldNotReachHere(); ++ }else{ ++ FloatRegister fsrc1 = f23; ++ FloatRegister fsrc2 = f24; ++ FloatRegister fdest = f25; ++ Register tem1 = AT; ++ Register tem2 = GP; ++ ifmovd(fsrc1, rs); ++ ifmovd(fsrc2, rt); ++ fcvtld(fsrc1, fsrc1); ++ fcvtld(fsrc2, fsrc2); ++ fdivd(fdest, fsrc1, fsrc2); ++ fcvtdl_z(fdest, fdest); ++ fimovd(tem1, fdest); ++ mull(tem1, rt, tem2); ++ subl(rd, rs, tem2); ++ } ++ } ++ void sqrt_s(FloatRegister fd, FloatRegister fs) { ++ if (FRegisterConflict) { ++ if(fs == fd){ ++ fmovs(F28, fs); ++ fsqrts(fd, F28); ++ } else ++ fsqrts(fd, fs); ++ } else ++ fsqrts(fd, fs); ++ } ++ void sllw_signed (Register rd, Register rt, int sa){ ++ slll(rd, rt, sa&0x1f); addw(rd, rd, 0); ++ } ++ ++ void corrected_idivw(Register ra, Register rb, Register rc) { ++ Label special_case, done; ++ Register tem1 = GP; ++ ++ if(rb == R0) { ++ ShouldNotReachHere(); ++ } else { ++ // load -1 in register ++ ldi(tem1, R0, -1); ++ ++ // check for special case, e.g. rb = -1 ++ cmpeq(tem1, rb, tem1); ++ bne(tem1, special_case); ++ ++ // handle normal case ++ divw(ra, rb, rc); ++ beq(R0, done); ++ ++ // handle special case ++ bind(special_case); ++ subw(rc, R0, ra); ++ ++ // normal and special case exit ++ bind(done); ++ } ++ } ++ ++ void corrected_idivl(Register ra, Register rb, Register rc) { ++ Label special_case, done; ++ Register tem1 = GP; ++ ++ if (rb == R0) { ++ ShouldNotReachHere(); ++ } else { ++ // load -1 in register ++ ldi(tem1, R0, -1); ++ ++ // check for special case, e.g. rb = -1l ++ cmpeq(tem1, rb, tem1); ++ bne(tem1, special_case); ++ ++ // handle normal case ++ divl(ra, rb, rc); ++ beq(R0, done); ++ ++ // handle special case ++ bind(special_case); ++ subl(rc, R0, ra); ++ ++ // normal and special case exit ++ bind(done); ++ } ++ } ++ ++public: //load&store instructions ++ ++ void load( int width, Register ra, int mdisp, Register rb ){ ++ if(width == 0) ldbu(ra, rb , mdisp); ++ else if(width == 1) ldhu(ra, rb, mdisp); ++ else if(width == 2) ldw( ra, rb , mdisp); ++ else ldl( ra,rb ,mdisp); ++ } ++ void store( int width, Register ra, int mdisp, Register rb ){ ++ if(width == 0) stb(ra, rb, mdisp); ++ else if(width == 1) sth(ra, rb, mdisp); ++ else if(width == 2) stw( ra, rb, mdisp); ++ else stl( ra, rb , mdisp); ++ } ++ void ldb_signed(Register rt, Register base, int off) { ldbu(rt, base, off); sextb (rt, rt); } ++ void ldbu(Register rt, Register base, int off); ++ void ldh_signed(Register rt, Register base, int off) { ldhu(rt, base, off); sexth(rt, rt); } ++ void ldhu( Register rt, Register rs, int off ); ++ void ldw(Register rt, Register rs, int off); ++ void ldw_unsigned(Register rt, Register base, int off) { ldw(rt, base, off); zapnot(rt, rt, 0xF); } ++ void ldl(Register rt, Register base, int off); ++ void ldl_unaligned (Register rt, Register addr/*, int off*/, Register tmp1) { ++ assert_different_registers(addr, tmp1, rt); ++ ldl_u(tmp1, addr, 0) ; ++ ldl_u(rt, addr, 7) ; ++ extll(tmp1, tmp1, addr); ++ exthl(rt, rt, addr); ++ or_ins(rt, tmp1, rt); ++ } ++ void ldb_signed(Register rt, Address src); ++ void ldbu(Register rt, Address src); ++ void ldh_signed(Register rt, Address src); ++ void ldh_unsigned(Register rt, Address src); ++ void ldw_signed(Register rt, Address src); ++ void ldw_unsigned(Register rt, Address src); ++ void ldw (Register rt, Address src); ++ void ldl (Register rt, Address src); ++ void lea (Register rt, Address src); ++ void li32(Register rt, int imm); ++ void lldw(Register rt, Address src); ++ void lldl(Register rt, Address src); ++ void stw (Register rt, Register base, int off); ++ void stl (Register rt, Register base, int off); ++ void stb (Register rt, Address dst); ++ void sth (Register rt, Address dst); ++ void stw (Register rt, Address dst); ++ void stl (Register rt, Address dst); ++ void lstw(Register rt, Address dst); ++ void lstl(Register rt, Address dst); ++ ++ ++public: //branch instructions ++ void beq (Register rs, Register rt, int off, Register cc = GP) { ++ if ( rt == R0 ) { ++ beq(rs, off); ++ } else if (rs == R0) { ++ beq(rt, off); ++ } else { ++ cmpeq(cc, rs, rt); ++ bne(cc, off); ++ } ++ } ++ void beq (Register rs, Register rt, Label& L, Register cc = GP) { ++ if ( rt == R0 ) { ++ beq(rs, offset(target(L))); ++ } else if (rs == R0) { ++ beq(rt, offset(target(L))); ++ } else { ++ cmpeq(cc, rs, rt); ++ bne(cc, offset(target(L))); ++ } ++ } ++ void beq (Register rs, Register rt, address entry, Register cc = GP) { ++ if ( rt == R0 ) { ++ beq(rs, offset(entry)); ++ } else if (rs == R0) { ++ beq(rt, offset(entry)); ++ } else { ++ cmpeq(cc, rs, rt); ++ bne(cc, offset(entry)); ++ } ++ } ++ void bne (Register rs, Register rt, int off, Register cc = GP) { ++ if ( rt == R0 ) { ++ bne(rs, off); ++ } else if (rs == R0) { ++ bne(rt, off); ++ } else { ++ cmpeq(cc, rs, rt); ++ beq(cc, off); ++ } ++ } ++ void bne (Register rs, Register rt, Label& L, Register cc = GP){ ++ if ( rt == R0 ) { ++ bne(rs, offset(target(L))); ++ } else if (rs == R0) { ++ bne(rt, offset(target(L))); ++ } else { ++ cmpeq(cc, rs, rt); ++ beq(cc, offset(target(L))); ++ } ++ } ++ void bne (Register rs, Register rt, address entry, Register cc = GP) { ++ if ( rt == R0 ) { ++ bne(rs, offset(entry)); ++ } else if (rs == R0) { ++ bne(rt, offset(entry)); ++ } else { ++ cmpeq(cc, rs, rt); ++ beq(cc, offset(entry)); ++ } ++ } ++ void blt(Register rs, Register rt, Label& L, Register cc = GP) { ++ cmpult(cc, rs, rt); ++ bne(cc, offset(target(L))); ++ } ++ void bge(Register rs, Register rt, Label& L, Register cc = GP) { ++ cmpule(cc, rt, rs); ++ bne(cc, offset(target(L))); ++ } ++ void bge(Register rs, Register rt, address entry, Register cc = GP) { ++ cmpule(cc, rt, rs); ++ bne(cc, offset(entry)); ++ } ++ void bge(Register rs, address entry) { bge (rs, offset(entry)); } ++ void bgt(Register rs, address entry) { bgt (rs, offset(entry)); } ++ void ble(Register rs, address entry) { ble (rs, offset(entry)); } ++ void blt(Register rs, address entry) { blt (rs, offset(entry)); } ++ void beq( Register a, Label& L ) { beq( a, offset(target(L))); } ++ void beq_a (Register a, address entry) { beq(a, offset(entry)); } ++ void bne( Register a, Label& L ) { bne( a, offset(target(L))); } ++ void blt( Register a, Label& L ) { blt( a, offset(target(L))); } ++ void ble( Register a, Label& L ) { ble( a, offset(target(L))); } ++ void bgt( Register a, Label& L ) { bgt( a, offset(target(L))); } ++ void bge( Register a, Label& L ) { bge( a, offset(target(L))); } ++ ++ ++public: //float arithmrtic instructions ++ ++ void fabs(FloatRegister fd, FloatRegister fs) { fcpys(fd, F31, fs); } ++ void fmovs(FloatRegister fd, FloatRegister fs); ++ void fmovd(FloatRegister fd, FloatRegister fs); ++ void fneg (FloatRegister fd, FloatRegister fs){ fcpysn(fd, fs, fs); } ++ ++ void add_d(FloatRegister fd, FloatRegister fs, FloatRegister ft) { ++ if (FRegisterConflict) { ++ assert_different_registers(fs, F28); ++ assert_different_registers(ft, F28); ++ if (fs == ft && ft == fd){ ++ faddd(F28, fs, ft); ++ fmovd(fd, F28); ++ }else if (fs == fd){ ++ fmovd(F28, fs); ++ faddd(fd, F28, ft); ++ }else if (ft == fd){ ++ fmovd(F28, ft); ++ faddd(fd, fs, F28); ++ }else{ ++ faddd(fd, fs, ft); ++ } ++ } else ++ faddd(fd, fs, ft); ++ } ++ void sub_d(FloatRegister fd, FloatRegister fs, FloatRegister ft) { ++ if (FRegisterConflict) { ++ assert_different_registers(fs, F28); ++ assert_different_registers(ft, F28); ++ if (fs == ft && ft == fd){ ++ fsubd(F28, fs, ft); ++ fmovd(fd, F28); ++ }else if (fs == fd){ ++ fmovd(F28, fs); ++ fsubd(fd, F28, ft); ++ }else if (ft == fd){ ++ fmovd(F28, ft); ++ fsubd(fd, fs, F28); ++ }else{ ++ fsubd(fd, fs, ft); ++ } ++ } else ++ fsubd(fd, fs, ft); ++ } ++ void mul_d(FloatRegister fd, FloatRegister fs, FloatRegister ft) { ++ if (FRegisterConflict) { ++ assert_different_registers(fs, F28); ++ assert_different_registers(ft, F28); ++ if (fs == ft && ft == fd){ ++ fmuld(F28, fs, ft); ++ fmovd(fd, F28); ++ }else if (fs == fd){ ++ fmovd(F28, fs); ++ fmuld(fd, F28, ft); ++ }else if (ft == fd){ ++ fmovd(F28, ft); ++ fmuld(fd, fs, F28); ++ }else{ ++ fmuld(fd, fs, ft); ++ } ++ } else ++ fmuld(fd, fs, ft); ++ } ++ void div_d(FloatRegister fd, FloatRegister fs, FloatRegister ft) { ++ if (FRegisterConflict) { ++ assert_different_registers(fs, F28); ++ assert_different_registers(ft, F28); ++ if (fs == ft && ft == fd){ ++ fdivd(F28, fs, ft); ++ fmovd(fd, F28); ++ }else if (fs == fd){ ++ fmovd(F28, fs); ++ fdivd(fd, F28, ft); ++ }else if (ft == fd){ ++ fmovd(F28, ft); ++ fdivd(fd, fs, F28); ++ }else{ ++ fdivd(fd, fs, ft); ++ } ++ } else ++ fdivd(fd, fs, ft); ++ } ++ void sqrt_d(FloatRegister fd, FloatRegister fs) { ++ if (FRegisterConflict) { ++ if (fs == fd) { ++ fmovd(F28, fs); ++ fsqrtd(fd, F28); ++ } else ++ fsqrtd(fd, fs); ++ } else ++ fsqrtd(fd, fs); ++ } ++ ++ ++public: //float load&store instructions ++ void fldd(FloatRegister ft, Register rs, int off); ++ void flds(FloatRegister ft, Register rs, int off); ++ void fldd(FloatRegister ft, Address src); ++ void flds(FloatRegister ft, Address src); ++ ++ void fstd(FloatRegister ft, Register base, int off); ++ void fstd(FloatRegister ft, Address dst); ++ void fsts(FloatRegister ft, Register base, int off); ++ void fsts(FloatRegister ft, Address dst); ++ ++ ++public: //float compare instructions ++ // return true if fs lt ft, or either of fs & ft is NaN ++ void fcmplt_un (FloatRegister fd, FloatRegister fs, FloatRegister ft) { ++ fcmpun(fd, fs, ft); ++ fbne(fd, 1); ++ fcmplt(fd, fs, ft); ++ } ++ // return true if fs le ft, or either of fs & ft is NaN ++ void fcmple_un (FloatRegister fd, FloatRegister fs, FloatRegister ft) { ++ fcmpun(fd, fs, ft); ++ fbne(fd, 1); ++ fcmple(fd, fs, ft); ++ } ++ ++ ++public: //float branch instructions ++ void fbne(FloatRegister rs, Label& L) { fbne(rs, offset(target(L))); } ++ void fbeq(FloatRegister rs, Label& L) { fbeq(rs, offset(target(L))); } ++ void fblt(FloatRegister a, Label& L) { fblt( a, offset(target(L))); } ++ void fble(FloatRegister a, Label& L) { fble( a, offset(target(L))); } ++ void fbgt(FloatRegister a, Label& L) { fbgt( a, offset(target(L))); } ++ void fbge(FloatRegister a, Label& L) { fbge( a, offset(target(L))); } ++ ++ ++public: //fcvt instructions ++ void trunc_l_s(FloatRegister fd, FloatRegister fs) { ++ assert_different_registers(fs, F28); ++ assert_different_registers(fd, F28); ++ fcvtsd(F28, fd); ++ fcvtdl_z(fd, F28); ++ } ++ void trunc_l_d(FloatRegister fd, FloatRegister fs) { ++ if (FRegisterConflict) { ++ assert_different_registers(fs, F28); ++ assert_different_registers(fd, F28); ++ if(fs == fd){ ++ fmovd(F28, fs); ++ fcvtdl_z(fd, F28); ++ } else ++ fcvtdl_z(fd, fs); ++ } else ++ fcvtdl_z(fd, fs); ++ } ++ void fcvtS2D(FloatRegister fd, FloatRegister fs) { ++ if (FRegisterConflict) { ++ assert_different_registers(fs, F28); ++ assert_different_registers(fd, F28); ++ if (fs == fd){ ++ fmovd(F28, fs); ++ fcvtds(fd, F28); ++ } else ++ fcvtds(fd, fs); ++ } else ++ fcvtds(fd, fs); ++ } ++ void fcvtS2W(FloatRegister fd, FloatRegister fs) { ++ assert_different_registers(fs, F28); ++ assert_different_registers(fd, F28); ++ fcvtwl(F28, fs); ++ fcvtls(fd, F28); ++ } ++ void fcvtS2L(FloatRegister fd, FloatRegister fs) { ++ if (FRegisterConflict) { ++ assert_different_registers(fs, F28); ++ assert_different_registers(fd, F28); ++ if (fs == fd){ ++ fmovd(F28, fs); ++ fcvtls(fd, F28); ++ }else{ ++ fcvtls(fd, fs); ++ } ++ } else ++ fcvtls(fd, fs); ++ } ++ void fcvtD2S(FloatRegister fd, FloatRegister fs) { ++ if (FRegisterConflict) { ++ assert_different_registers(fs, F28); ++ assert_different_registers(fd, F28); ++ if (fs == fd){ ++ fmovs(F28, fs); ++ fcvtsd(fd, F28); ++ }else{ ++ fcvtsd(fd, fs); ++ } ++ } else ++ fcvtsd(fd, fs); ++ } ++ void fcvtD2W(FloatRegister fd, FloatRegister fs) { ++ assert_different_registers(fs, F28); ++ assert_different_registers(fd, F28); ++ fcvtwl(F28, fs); ++ fcvtld(fd, F28); ++ } ++ void fcvtD2L(FloatRegister fd, FloatRegister fs) { ++ if (FRegisterConflict) { ++ assert_different_registers(fs, F28); ++ assert_different_registers(fd, F28); ++ if (fs == fd){ ++ fmovd(F28, fs); ++ fcvtld(fd, F28); ++ }else{ ++ fcvtld(fd, fs); ++ } ++ } else ++ fcvtld(fd, fs); ++ } ++ ++ void ret() { ret(R0, RA, 0); } ++ //void sync () { memb(); } ++ void syscall(int code) { sys_call(code); } ++ void brk (int code) { sys_call(0x80); } ++ ++ void nop() { emit_sw2_long( op_ldi | is_ra(R0) ); } ++ ++ void int3(); ++ static void print_instruction(int); ++ int patched_branch(int dest_pos, int inst, int inst_pos); ++ int branch_destination(int inst, int pos); ++ ++// SW64 extension ++ ++ public: ++ // SW64 Generic instructions ++ inline void sys_call_b( int palfn ); ++ inline void sys_call ( int palfn ); ++ //jump ++ inline void call ( Register ra, Register rb, int jmphint ); ++ inline void ret ( Register ra, Register rb, int rethint ); ++ inline void jmp ( Register ra, Register rb, int jmphint ); ++ //arithmetic ++ inline void addw ( Register rd, Register rs, Register rt ); ++ inline void addw ( Register rd, Register rs, int lit ); ++ inline void subw ( Register rd, Register rs, Register rt ); ++ inline void subw ( Register rd, Register rs, int lit ); ++ inline void s4addw ( Register rd, Register rs, Register rt ); ++ inline void s4addw ( Register rd, Register rs, int lit ); ++ inline void s4subw ( Register rd, Register rs, Register rt ); ++ inline void s4subw ( Register rd, Register rs, int lit ); ++ inline void s8addw ( Register rd, Register rs, Register rt ); ++ inline void s8addw ( Register rd, Register rs, int lit ); ++ inline void s8subw ( Register rd, Register rs, Register rt ); ++ inline void s8subw ( Register rd, Register rs, int lit ); ++ inline void addl ( Register rd, Register rs, Register rt ); ++ inline void addl ( Register rd, Register rs, int lit ); ++ inline void subl ( Register rd, Register rs, Register rt ); ++ inline void subl ( Register rd, Register rs, int lit ); ++ inline void s4addl ( Register rd, Register rs, Register rt ); ++ inline void s4addl ( Register rd, Register rs, int lit ); ++ inline void s4subl ( Register rd, Register rs, Register rt ); ++ inline void s4subl ( Register rd, Register rs, int lit ); ++ inline void s8addl ( Register rd, Register rs, Register rt ); ++ inline void s8addl ( Register rd, Register rs, int lit ); ++ inline void s8subl ( Register rd, Register rs, Register rt ); ++ inline void s8subl ( Register rd, Register rs, int lit ); ++ inline void mulw ( Register rd, Register rs, Register rt ); ++ inline void mulw ( Register rd, Register rs, int lit ); ++ inline void and_reg ( Register rd, Register rs, Register rt ); ++ inline void and_imm8 ( Register rd, Register rs, int lit ); ++ inline void bic ( Register rd, Register rs, Register rt ); ++ inline void bic ( Register rd, Register rs, int lit ); ++ inline void andnot ( Register rd, Register rs, Register rt ) { ++ bic( rd, rs, rt ); ++ } ++ inline void andnot ( Register rd, Register rs, int lit ) { ++ bic( rd, rs, lit ); ++ } ++ inline void bis ( Register rd, Register rs, Register rt ); ++ inline void bis ( Register rd, Register rs, int lit ); ++ inline void or_ins ( Register rd, Register rs, Register rt ) { ++ bis( rd, rs, rt ); ++ } ++ inline void or_ins ( Register rd, Register rs, int lit ) { ++ bis( rd, rs, lit ); ++ } ++ inline void ornot ( Register rd, Register rs, Register rt ); ++ inline void ornot ( Register rd, Register rs, int lit ); ++ inline void xor_ins ( Register rd, Register rs, Register rt ); ++ inline void xor_ins ( Register rd, Register rs, int lit ); ++ inline void eqv ( Register rd, Register rs, Register rt ); ++ inline void eqv ( Register rd, Register rs, int lit ); ++ inline void inslb ( Register rd, Register rs, Register rt ); ++ inline void inslb ( Register rd, Register rs, int lit ); ++ inline void inslh ( Register rd, Register rs, Register rt ); ++ inline void inslh ( Register rd, Register rs, int lit ); ++ inline void inslw ( Register rd, Register rs, Register rt ); ++ inline void inslw ( Register rd, Register rs, int lit ); ++ inline void insll ( Register rd, Register rs, Register rt ); ++ inline void insll ( Register rd, Register rs, int lit ); ++ inline void inshb ( Register rd, Register rs, Register rt ); ++ inline void inshb ( Register rd, Register rs, int lit ); ++ inline void inshh ( Register rd, Register rs, Register rt ); ++ inline void inshh ( Register rd, Register rs, int lit ); ++ inline void inshw ( Register rd, Register rs, Register rt ); ++ inline void inshw ( Register rd, Register rs, int lit ); ++ inline void inshl ( Register rd, Register rs, Register rt ); ++ inline void inshl ( Register rd, Register rs, int lit ); ++ inline void slll ( Register rd, Register rs, Register rt ); ++ inline void slll ( Register rt, Register rs, int lit ); ++ inline void srll ( Register rd, Register rt, Register rs ); ++ inline void srll ( Register rt, Register rs, int lit ); ++ inline void sral ( Register rd, Register rt, Register rs ); ++ inline void sral ( Register rt, Register rs, int lit ); ++ inline void extlb ( Register rd, Register rs, Register rt ); ++ inline void extlb ( Register rd, Register rs, int lit ); ++ inline void extlh ( Register rd, Register rs, Register rt ); ++ inline void extlh ( Register rd, Register rs, int lit ); ++ inline void extlw ( Register rd, Register rt, Register rs ); ++ inline void extlw ( Register rd, Register rs, int lit ); ++ inline void extll ( Register rd, Register rs, Register rt ); ++ inline void extll ( Register rd, Register rs, int lit ); ++ inline void exthb ( Register rd, Register rs, Register rt ); ++ inline void exthb ( Register rd, Register rs, int lit ); ++ inline void exthh ( Register rd, Register rs, Register rt ); ++ inline void exthh ( Register rd, Register rs, int lit ); ++ inline void exthw ( Register rd, Register rs, Register rt ); ++ inline void exthw ( Register rd, Register rs, int lit ); ++ inline void exthl ( Register rd, Register rs, Register rt ); ++ inline void exthl ( Register rd, Register rs, int lit ); ++ inline void ctpop ( Register rc, Register rb ); ++ inline void ctlz ( Register rc, Register rb ); ++ inline void cttz ( Register rc, Register rb ); ++ inline void masklb ( Register rd, Register rs, Register rt ); ++ inline void masklb ( Register rd, Register rs, int lit ); ++ inline void masklh ( Register rd, Register rs, Register rt ); ++ inline void masklh ( Register rd, Register rs, int lit ); ++ inline void masklw ( Register rd, Register rs, Register rt ); ++ inline void masklw ( Register rd, Register rs, int lit ); ++ inline void maskll ( Register rd, Register rs, Register rt ); ++ inline void maskll ( Register rd, Register rs, int lit ); ++ inline void maskhb ( Register rd, Register rs, Register rt ); ++ inline void maskhb ( Register rd, Register rs, int lit ); ++ inline void maskhh ( Register rd, Register rs, Register rt ); ++ inline void maskhh ( Register rd, Register rs, int lit ); ++ inline void maskhw ( Register rd, Register rs, Register rt ); ++ inline void maskhw ( Register rd, Register rs, int lit ); ++ inline void maskhl ( Register rd, Register rs, Register rt ); ++ inline void maskhl ( Register rd, Register rs, int lit ); ++ inline void zap ( Register rd, Register rs, Register rt ); ++ inline void zap ( Register rd, Register rs, int lit ); ++ inline void zapnot ( Register rd, Register rs, Register rt ); ++ inline void zapnot ( Register rd, Register rs, int lit ); ++ inline void sextb ( Register rc, Register rb ); ++ inline void sextb ( Register rc, int lit ); ++ inline void sexth ( Register rc, Register rb ); ++ inline void sexth ( Register rc, int lit ); ++ //load&store ++ inline void ldl_u ( Register ra, Register rb, int mdisp ); ++ inline void stb ( Register ra, Register rb, int mdisp ); ++ inline void sth ( Register ra, Register rb, int mdisp ); ++ inline void stl_u ( Register ra, Register rb, int mdisp ); ++ inline void ldi ( Register ra, Register rb, int mdisp ); ++ inline void ldih ( Register ra, Register rb, int mdisp ); ++ inline void ldw_nc ( Register rt, Register rs, int atmdisp ); ++ inline void ldl_nc ( Register rt, Register rs, int atmdisp ); ++ inline void ldd_nc ( Register rt, Register rs, int atmdisp ); ++ inline void stw_nc ( Register rt, Register rs, int atmdisp ); ++ inline void stl_nc ( Register rt, Register rs, int atmdisp ); ++ inline void std_nc ( Register rt, Register rs, int atmdisp ); ++ inline void lldw ( Register rt, Register rs, int atmdisp );//lock ++ inline void lldl ( Register rt, Register rs, int atmdisp ); ++ inline void lstw ( Register ra, Register rb, int atmdisp ); ++ inline void lstl ( Register ra, Register rb, int atmdisp ); ++ inline void rd_f ( Register ra ); //SW2F ++ inline void wr_f ( Register ra ); //SW2F ++ inline void ldw_inc ( Register rt, Register rs, int atmdisp );//atom ++ inline void ldl_inc ( Register rt, Register rs, int atmdisp ); ++ inline void ldw_dec ( Register rt, Register rs, int atmdisp ); ++ inline void ldl_dec ( Register rt, Register rs, int atmdisp ); ++ inline void ldw_set ( Register rt, Register rs, int atmdisp ); ++ inline void ldl_set ( Register rt, Register rs, int atmdisp ); ++ //compare ++ inline void cmpeq ( Register rd, Register rs, Register rt ); ++ inline void cmpeq ( Register rd, Register rs, int lit ); ++ inline void cmplt ( Register rd, Register rs, Register rt ); ++ inline void cmplt ( Register rd, Register rs, int lit ); ++ inline void cmple ( Register rd, Register rs, Register rt ); ++ inline void cmple ( Register rd, Register rs, int lit ); ++ inline void cmpult ( Register rd, Register rs, Register rt ); ++ inline void cmpult ( Register rd, Register rs, int lit ); ++ inline void cmpule ( Register rd, Register rs, Register rt ); ++ inline void cmpule ( Register rd, Register rs, int lit ); ++ inline void cmpgeb ( Register rc, Register ra, Register rb ); ++ inline void cmpgeb ( Register rc, Register ra, int lit ); ++ //branch ++ inline void br ( Register ra, int bdisp ); ++ inline void bsr ( Register ra, int bdisp ); ++ inline void beq ( Register ra, int bdisp ); ++ inline void bne ( Register ra, int bdisp ); ++ inline void blt ( Register ra, int bdisp ); ++ inline void ble ( Register ra, int bdisp ); ++ inline void bgt ( Register ra, int bdisp ); ++ inline void bge ( Register ra, int bdisp ); ++ inline void blbc ( Register ra, int bdisp ); ++ inline void blbs ( Register ra, int bdisp ); ++ //select ++ inline void seleq ( Register ra, Register rb,Register r3, Register rc ); ++ inline void seleq ( Register ra, int lit, Register r3, Register rc ); ++ inline void selge ( Register ra, Register rb,Register r3, Register rc ); ++ inline void selge ( Register ra, int lit, Register r3, Register rc ); ++ inline void selgt ( Register ra, Register rb,Register r3, Register rc ); ++ inline void selgt ( Register ra, int lit, Register r3, Register rc ); ++ inline void selle ( Register ra, Register rb,Register r3, Register rc ); ++ inline void selle ( Register ra, int lit, Register r3, Register rc ); ++ inline void sellt ( Register ra, Register rb,Register r3, Register rc ); ++ inline void sellt ( Register ra, int lit, Register r3, Register rc ); ++ inline void selne ( Register ra, Register rb,Register r3, Register rc ); ++ inline void selne ( Register ra, int lit, Register r3, Register rc ); ++ inline void sellbc ( Register ra, Register rb,Register r3, Register rc ); ++ inline void sellbc ( Register ra, int lit, Register r3, Register rc ); ++ inline void sellbs ( Register ra, Register rb,Register r3, Register rc ); ++ inline void sellbs ( Register ra, int lit, Register r3, Register rc ); ++ //mov ++ inline void fimovs ( Register rc, FloatRegister fa); // For sw4a SQData ++ inline void fimovd ( Register rc, FloatRegister fa); // For sw4a SQData ++ ++ //float ++ //arithmetic instrctions ++ inline void fadds ( FloatRegister fd, FloatRegister fs, FloatRegister ft ); ++ inline void faddd ( FloatRegister fd, FloatRegister fs, FloatRegister ft ); ++ inline void fsubs ( FloatRegister fd, FloatRegister fs, FloatRegister ft ); ++ inline void fsubd ( FloatRegister fd, FloatRegister fs, FloatRegister ft ); ++ inline void fmuls ( FloatRegister fd, FloatRegister fs, FloatRegister ft ); ++ inline void fmuld ( FloatRegister fd, FloatRegister fs, FloatRegister ft ); ++ inline void fdivs ( FloatRegister fd, FloatRegister fs, FloatRegister ft ); ++ inline void fdivd ( FloatRegister fd, FloatRegister fs, FloatRegister ft ); ++ inline void fsqrts ( FloatRegister fc, FloatRegister fb ); ++ inline void fsqrtd ( FloatRegister fc, FloatRegister fb ); ++ inline void fmas ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ inline void fmad ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ inline void fmss ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ inline void fmsd ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ inline void fnmas ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ inline void fnmad ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ inline void fnmss ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ inline void fnmsd ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ //compare ++ inline void fcmpeq ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ inline void fcmple ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ inline void fcmplt ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ inline void fcmpun ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ //branch ++ inline void fbeq ( FloatRegister fa, int bdisp ); ++ inline void fbne ( FloatRegister fa, int bdisp ); ++ inline void fblt ( FloatRegister fa, int bdisp ); ++ inline void fble ( FloatRegister fa, int bdisp ); ++ inline void fbgt ( FloatRegister fa, int bdisp ); ++ inline void fbge ( FloatRegister fa, int bdisp ); ++ //select ++ inline void fseleq ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ inline void fselne ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ inline void fsellt ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ inline void fselle ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ inline void fselgt ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ inline void fselge ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ //mov ++ inline void fcpys ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ inline void fcpyse ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ inline void fcpysn ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ inline void ifmovs ( FloatRegister fc, Register ra ); // For sw4a SQData ++ inline void ifmovd ( FloatRegister fc, Register ra ); // For sw4a SQData ++ //cmov ++ inline void cmovdl ( Register rc, FloatRegister fb ); ++ inline void cmovdl_g ( Register rc, FloatRegister fb ); ++ inline void cmovdl_p ( Register rc, FloatRegister fb ); ++ inline void cmovdl_z ( Register rc, FloatRegister fb ); ++ inline void cmovdl_n ( Register rc, FloatRegister fb ); ++ ++ inline void cmovdlu ( Register rc, FloatRegister fb ); ++ inline void cmovdlu_g ( Register rc, FloatRegister fb ); ++ inline void cmovdlu_p ( Register rc, FloatRegister fb ); ++ inline void cmovdlu_z ( Register rc, FloatRegister fb ); ++ inline void cmovdlu_n ( Register rc, FloatRegister fb ); ++ ++ inline void cmovdw ( Register rc, FloatRegister fb ); ++ inline void cmovdw_g ( Register rc, FloatRegister fb ); ++ inline void cmovdw_p ( Register rc, FloatRegister fb ); ++ inline void cmovdw_z ( Register rc, FloatRegister fb ); ++ inline void cmovdw_n ( Register rc, FloatRegister fb ); ++ ++ inline void cmovdwu ( Register rc, FloatRegister fb ); ++ inline void cmovdwu_g ( Register rc, FloatRegister fb ); ++ inline void cmovdwu_p ( Register rc, FloatRegister fb ); ++ inline void cmovdwu_z ( Register rc, FloatRegister fb ); ++ inline void cmovdwu_n ( Register rc, FloatRegister fb ); ++ ++ inline void cmovls ( FloatRegister fc, Register rb ); ++ inline void cmovld ( FloatRegister fc, Register rb ); ++ inline void cmovuls ( FloatRegister fc, Register rb ); ++ inline void cmovuld ( FloatRegister fc, Register rb ); ++ inline void cmovws ( FloatRegister fc, Register rb ); ++ inline void cmovwd ( FloatRegister fc, Register rb ); ++ inline void cmovuws ( FloatRegister fc, Register rb ); ++ inline void cmovuwd ( FloatRegister fc, Register rb ); ++ //fcvt ++ inline void fcvtsd ( FloatRegister fc, FloatRegister fb ); ++ inline void fcvtds ( FloatRegister fc, FloatRegister fb ); ++ inline void fcvtdl_g ( FloatRegister fc, FloatRegister fb ); ++ inline void fcvtdl_p ( FloatRegister fc, FloatRegister fb ); ++ inline void fcvtdl_z ( FloatRegister fc, FloatRegister fb ); ++ inline void fcvtdl_n ( FloatRegister fc, FloatRegister fb ); ++ inline void fcvtdl ( FloatRegister fc, FloatRegister fb ); ++ inline void fcvtwl ( FloatRegister fc, FloatRegister fb ); ++ inline void fcvtlw ( FloatRegister fc, FloatRegister fb ); ++ inline void fcvtls ( FloatRegister fc, FloatRegister fb ); ++ inline void fcvtld ( FloatRegister fc, FloatRegister fb ); ++ //set FPCR ++ inline void rfpcr ( FloatRegister fa); ++ inline void wfpcr ( FloatRegister fa); ++ inline void setfpec0 (); ++ inline void setfpec1 (); ++ inline void setfpec2 (); ++ inline void setfpec3 (); ++ //SIMD ++ inline void ldwe ( FloatRegister fa, Register rb, int mdisp ); ++ inline void ldse ( FloatRegister fa, Register rb, int mdisp ); ++ inline void ldde ( FloatRegister fa, Register rb, int mdisp ); ++ inline void vlds ( FloatRegister fa, Register rb, int mdisp ); ++ inline void vldd ( FloatRegister fa, Register rb, int mdisp ); ++ inline void vsts ( FloatRegister fa, Register rb, int mdisp ); ++ inline void vstd ( FloatRegister fa, Register rb, int mdisp ); ++ inline void vaddw ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ inline void vaddw ( FloatRegister fc, FloatRegister fa, int lit ); ++ inline void vsubw ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ inline void vsubw ( FloatRegister fc, FloatRegister fa, int lit ); ++ inline void vcmpgew ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ inline void vcmpgew ( FloatRegister fc, FloatRegister fa, int lit ); ++ inline void vcmpeqw ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ inline void vcmpeqw ( FloatRegister fc, FloatRegister fa, int lit ); ++ inline void vcmplew ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ inline void vcmplew ( FloatRegister fc, FloatRegister fa, int lit ); ++ inline void vcmpltw ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ inline void vcmpltw ( FloatRegister fc, FloatRegister fa, int lit ); ++ inline void vcmpulew ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ inline void vcmpulew ( FloatRegister fc, FloatRegister fa, int lit ); ++ inline void vcmpultw ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ inline void vcmpultw ( FloatRegister fc, FloatRegister fa, int lit ); ++ inline void vsllw ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ inline void vsllw ( FloatRegister fc, FloatRegister fa, int lit ); ++ inline void vsrlw ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ inline void vsrlw ( FloatRegister fc, FloatRegister fa, int lit ); ++ inline void vsraw ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ inline void vsraw ( FloatRegister fc, FloatRegister fa, int lit ); ++ inline void vrolw ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ inline void vrolw ( FloatRegister fc, FloatRegister fa, int lit ); ++ inline void sllow ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ inline void sllow ( FloatRegister fc, FloatRegister fa, int lit ); ++ inline void srlow ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ inline void srlow ( FloatRegister fc, FloatRegister fa, int lit ); ++ inline void vaddl ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ inline void vaddl ( FloatRegister fc, FloatRegister fa, int lit ); ++ inline void vsubl ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ inline void vsubl ( FloatRegister fc, FloatRegister fa, int lit ); ++ inline void ctpopow ( FloatRegister fc, FloatRegister fa ); ++ inline void ctlzow ( FloatRegister fc, FloatRegister fa ); ++ ++ inline void vucaddw ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ inline void vucaddw ( FloatRegister fc, FloatRegister fa, int lit ); ++ inline void vucsubw ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ inline void vucsubw ( FloatRegister fc, FloatRegister fa, int lit ); ++ inline void vucaddh ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ inline void vucaddh ( FloatRegister fc, FloatRegister fa, int lit ); ++ inline void vucsubh ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ inline void vucsubh ( FloatRegister fc, FloatRegister fa, int lit ); ++ inline void vucaddb ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ inline void vucaddb ( FloatRegister fc, FloatRegister fa, int lit ); ++ inline void vucsubb ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ inline void vucsubb ( FloatRegister fc, FloatRegister fa, int lit ); ++ ++ inline void vseleqw ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ inline void vseleqw ( FloatRegister fa, FloatRegister fb, int fmalit, FloatRegister fc ); ++ inline void vsellbcw ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ inline void vsellbcw ( FloatRegister fa, FloatRegister fb, int fmalit, FloatRegister fc ); ++ inline void vselltw ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ inline void vselltw ( FloatRegister fa, FloatRegister fb, int fmalit, FloatRegister fc ); ++ inline void vsellew ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ inline void vsellew ( FloatRegister fa, FloatRegister fb, int fmalit, FloatRegister fc ); ++ ++ inline void vadds ( FloatRegister fd, FloatRegister fs, FloatRegister ft ); ++ inline void vaddd ( FloatRegister fd, FloatRegister fs, FloatRegister ft ); ++ inline void vsubs ( FloatRegister fd, FloatRegister fs, FloatRegister ft ); ++ inline void vsubd ( FloatRegister fd, FloatRegister fs, FloatRegister ft ); ++ inline void vmuls ( FloatRegister fd, FloatRegister fs, FloatRegister ft ); ++ inline void vmuld ( FloatRegister fd, FloatRegister fs, FloatRegister ft ); ++ inline void vdivs ( FloatRegister fd, FloatRegister fs, FloatRegister ft ); ++ inline void vdivd ( FloatRegister fd, FloatRegister fs, FloatRegister ft ); ++ inline void vsqrts ( FloatRegister fc, FloatRegister fb ); ++ inline void vsqrtd ( FloatRegister fc, FloatRegister fb ); ++ inline void vfcmpeq ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ inline void vfcmple ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ inline void vfcmplt ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ inline void vfcmpun ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ inline void vcpys ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ inline void vfmov ( FloatRegister fc, FloatRegister fa ); ++ inline void vcpyse ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ inline void vcpysn ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ ++ inline void vmas ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ inline void vmad ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ inline void vmss ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ inline void vmsd ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ inline void vnmas ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ inline void vnmad ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ inline void vnmss ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ inline void vnmsd ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ ++ inline void vfseleq ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ inline void vfsellt ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ inline void vfselle ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ ++ inline void vinsw ( FloatRegister fa, FloatRegister fb, int fmalit, FloatRegister fc ); ++ inline void vinsf ( FloatRegister fa, FloatRegister fb, int fmalit, FloatRegister fc ); ++ inline void vextw ( FloatRegister fa, int fmalit, FloatRegister fc); ++ inline void vextf ( FloatRegister fa, int fmalit, FloatRegister fc); ++ inline void vcpyw ( FloatRegister fa, FloatRegister fc); ++ inline void vcpyf ( FloatRegister fa, FloatRegister fc); ++ inline void vconw ( FloatRegister va, FloatRegister vb, FloatRegister fc, FloatRegister vd ); ++ inline void vshfw ( FloatRegister va, FloatRegister vb, FloatRegister fc, FloatRegister vd ); ++ inline void vcons ( FloatRegister va, FloatRegister vb, FloatRegister fc, FloatRegister vd ); ++ inline void vcond ( FloatRegister va, FloatRegister vb, FloatRegister fc, FloatRegister vd ); ++ inline void vldw_u ( FloatRegister fa, Register rb, int atmdisp );//load&store ++ inline void vstw_u ( FloatRegister fa, Register rb, int atmdisp ); ++ inline void vlds_u ( FloatRegister fa, Register rb, int atmdisp ); ++ inline void vsts_u ( FloatRegister fa, Register rb, int atmdisp ); ++ inline void vldd_u ( FloatRegister fa, Register rb, int atmdisp ); ++ inline void vstd_u ( FloatRegister fa, Register rb, int atmdisp ); ++ inline void vstw_ul ( FloatRegister fa, Register rb, int atmdisp ); ++ inline void vstw_uh ( FloatRegister fa, Register rb, int atmdisp ); ++ inline void vsts_ul ( FloatRegister fa, Register rb, int atmdisp ); ++ inline void vsts_uh ( FloatRegister fa, Register rb, int atmdisp ); ++ inline void vstd_ul ( FloatRegister fa, Register rb, int atmdisp ); ++ inline void vstd_uh ( FloatRegister fa, Register rb, int atmdisp ); ++ ++ inline void vlog ( int vlog, FloatRegister fa,FloatRegister fb,FloatRegister f3, FloatRegister fc ); ++ inline void vbisw ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ inline void vxorw ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ inline void vandw ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ inline void veqvw ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ inline void vornotw ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ inline void vbicw ( FloatRegister fc, FloatRegister fa, FloatRegister fb ); ++ /* ++ inline void vseleq ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ inline void vselne ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ inline void vsellt ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ inline void vselle ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ inline void vselgt ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ inline void vselge ( FloatRegister fa, FloatRegister fb, FloatRegister f3, FloatRegister fc ); ++ */ ++ ++ //priority instructions ++ inline void pri_ret ( Register ra ); ++ inline void pri_ld ( Register ra, Register rb, int ev6hwdisp ); ++ inline void pri_st ( Register ra, Register rb, int ev6hwdisp ); ++ // cache control instructions ++ inline void s_fillcs ( Register rb, int mdisp ); ++ inline void s_fillde ( Register rb, int mdisp ); ++ inline void fillde ( Register rb, int mdisp ); ++ inline void fillde_e ( Register rb, int mdisp ); ++ inline void fillcs ( Register rb, int mdisp ); ++ inline void fillcs_e ( Register rb, int mdisp ); ++ inline void e_fillcs ( Register rb, int mdisp ); ++ inline void e_fillde ( Register rb, int mdisp ); ++ inline void flushd ( Register rb, int mdisp ); ++ inline void evictdl ( Register rb, int mdisp ); ++ inline void evictdg ( Register rb, int mdisp ); ++ //others ++ inline void memb ( void ); ++ inline void rtc ( Register ra, Register rb ); ++ inline void rcid ( Register ra); ++ inline void halt ( void); ++ inline void rtid ( Register ra); ++ inline void csrr ( Register ra, int rpiindex ); ++ inline void csrw ( Register ra, int rpiindex ); ++ //SW8A instructions ++ inline void imemb ( void ); //SW8A ++ inline void wmemb ( void ); //SW8A ++ inline void csrws ( Register ra, int rpiindex ); //SW8A ++ inline void csrwc ( Register ra, int rpiindex ); //SW8A ++ inline void divw ( Register ra, Register rb, Register rc ); //SW8A ++ inline void udivw ( Register ra, Register rb, Register rc ); //SW8A ++ inline void remw ( Register ra, Register rb, Register rc ); //SW8A ++ inline void uremw ( Register ra, Register rb, Register rc ); //SW8A ++ inline void mull ( Register ra, Register rb, Register rc ); //SW8A ++ inline void mull ( Register ra, int lit, Register rc ); //SW8A ++ inline void umulh ( Register ra, Register rb, Register rc ); //SW8A ++ inline void umulh ( Register ra, int lit, Register rc ); //SW8A ++ inline void divl ( Register ra, Register rb, Register rc ); //SW8A ++ inline void udivl ( Register ra, Register rb, Register rc ); //SW8A ++ inline void reml ( Register ra, Register rb, Register rc ); //SW8A ++ inline void ureml ( Register ra, Register rb, Register rc ); //SW8A ++ inline void addpi ( int apint, Register rc ); //SW8A ++ inline void addpis ( int apint, Register rc ); //SW8A ++ inline void sbt ( Register ra, Register rb, Register rc ); //SW8A ++ inline void sbt ( Register ra, int lit, Register rc ); //SW8A ++ inline void cbt ( Register ra, Register rb, Register rc ); //SW8A ++ inline void cbt ( Register ra, int lit, Register rc ); //SW8A ++ inline void roll ( Register ra, Register rb, Register rc ); //SW8A ++ inline void roll ( Register ra, int lit, Register rc ); //SW8A ++ inline void sllw ( Register ra, Register rb, Register rc ); //SW8A ++ inline void sllw ( Register ra, int lit, Register rc ); //SW8A ++ inline void srlw ( Register ra, Register rb, Register rc ); //SW8A ++ inline void srlw ( Register ra, int lit, Register rc ); //SW8A ++ inline void sraw ( Register ra, Register rb, Register rc ); //SW8A ++ inline void sraw ( Register ra, int lit, Register rc ); //SW8A ++ inline void rolw ( Register ra, Register rb, Register rc ); //SW8A ++ inline void rolw ( Register ra, int lit, Register rc ); //SW8A ++ inline void revbh ( Register rb, Register rc ); //SW8A ++ inline void revbw ( Register rb, Register rc ); //SW8A ++ inline void revbl ( Register rb, Register rc ); //SW8A ++ inline void casw ( Register ra, Register rb, Register rc ); //SW8A ++ inline void casl ( Register ra, Register rb, Register rc ); //SW8A ++ inline void frecs ( FloatRegister fa, FloatRegister fc ); //SW8A ++ inline void frecd ( FloatRegister fa, FloatRegister fc ); //SW8A ++ inline void fris ( FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void fris_g ( FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void fris_p ( FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void fris_z ( FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void fris_n ( FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void frid ( FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void frid_g ( FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void frid_p ( FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void frid_z ( FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void frid_n ( FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vsllb ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vsllb ( FloatRegister fa, int lit, FloatRegister fc ); //SW8A ++ inline void vsrlb ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vsrlb ( FloatRegister fa, int lit, FloatRegister fc ); //SW8A ++ inline void vsrab ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vsrab ( FloatRegister fa, int lit, FloatRegister fc ); //SW8A ++ inline void vrolb ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vrolb ( FloatRegister fa, int lit, FloatRegister fc ); //SW8A ++ inline void vsllh ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vsllh ( FloatRegister fa, int lit, FloatRegister fc ); //SW8A ++ inline void vsrlh ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vsrlh ( FloatRegister fa, int lit, FloatRegister fc ); //SW8A ++ inline void vsrah ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vsrah ( FloatRegister fa, int lit, FloatRegister fc ); //SW8A ++ inline void vrolh ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vrolh ( FloatRegister fa, int lit, FloatRegister fc ); //SW8A ++ inline void vslll ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vslll ( FloatRegister fa, int lit, FloatRegister fc ); //SW8A ++ inline void vsrll ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vsrll ( FloatRegister fa, int lit, FloatRegister fc ); //SW8A ++ inline void vsral ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vsral ( FloatRegister fa, int lit, FloatRegister fc ); //SW8A ++ inline void vroll ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vroll ( FloatRegister fa, int lit, FloatRegister fc ); //SW8A ++ inline void vmaxb ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vminb ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void sraow ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void sraow ( FloatRegister fa, int lit, FloatRegister fc ); //SW8A ++ inline void vsumw ( FloatRegister fa, FloatRegister fc ); //SW8A ++ inline void vsuml ( FloatRegister fa, FloatRegister fc ); //SW8A ++ inline void vcmpueqb ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vcmpueqb ( FloatRegister fa, int lit, FloatRegister fc ); //SW8A ++ inline void vcmpugtb ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vcmpugtb ( FloatRegister fa, int lit, FloatRegister fc ); //SW8A ++ inline void vmaxh ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vminh ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vmaxw ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vminw ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vmaxl ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vminl ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vsm3msw ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vsm4key ( FloatRegister fa, int lit, FloatRegister fc ); //SW8A ++ inline void vsm4r ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vbinvw ( FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vumaxb ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vuminb ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vumaxh ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vuminh ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vumaxw ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vuminw ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vumaxl ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vuminl ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vsums ( FloatRegister fa, FloatRegister fc ); //SW8A ++ inline void vsumd ( FloatRegister fa, FloatRegister fc ); //SW8A ++ inline void vfrecs ( FloatRegister fa, FloatRegister fc ); //SW8A ++ inline void vfrecd ( FloatRegister fa, FloatRegister fc ); //SW8A ++ inline void vfcvtsd ( FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vfcvtds ( FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vfcvtls ( FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vfcvtld ( FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vfcvtdl ( FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vfcvtdl_g ( FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vfcvtdl_p ( FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vfcvtdl_z ( FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vfcvtdl_n ( FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vfris ( FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vfris_g ( FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vfris_p ( FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vfris_z ( FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vfris_n ( FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vfrid ( FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vfrid_g ( FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vfrid_p ( FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vfrid_z ( FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vfrid_n ( FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vmaxs ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vmins ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vmaxd ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vmind ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vinsb ( FloatRegister fa, FloatRegister fb, int fmalit, FloatRegister fc ); //SW8A ++ inline void vinsh ( FloatRegister fa, FloatRegister fb, int fmalit, FloatRegister fc ); //SW8A ++ inline void vinsectlh ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vinsectlw ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vinsectll ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vinsectlb ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vshfq ( FloatRegister fa, FloatRegister fb, int fmalit, FloatRegister fc ); //SW8A ++ inline void vshfqb ( FloatRegister fa, FloatRegister fb, FloatRegister fc ); //SW8A ++ inline void vcpyb ( FloatRegister fa, FloatRegister fc ); //SW8A ++ inline void vcpyh ( FloatRegister fa, FloatRegister fc ); //SW8A ++ inline void vsm3r ( FloatRegister fa, FloatRegister fb, int fmalit, FloatRegister fc ); //SW8A ++ inline void vfcvtsh ( FloatRegister fa, FloatRegister fb, int fmalit, FloatRegister fc ); //SW8A ++ inline void vfcvths ( FloatRegister fa, FloatRegister fb, int fmalit, FloatRegister fc ); //SW8A ++ inline void lbr ( int palfn ); //SW8A ++ inline void ldbu_a ( Register ra, int atmdisp, Register rb ); //SW8A ++ inline void ldhu_a ( Register ra, int atmdisp, Register rb ); //SW8A ++ inline void ldw_a ( Register ra, int atmdisp, Register rb ); //SW8A ++ inline void ldl_a ( Register ra, int atmdisp, Register rb ); //SW8A ++ inline void stb_a ( Register ra, int atmdisp, Register rb ); //SW8A ++ inline void sth_a ( Register ra, int atmdisp, Register rb ); //SW8A ++ inline void stw_a ( Register ra, int atmdisp, Register rb ); //SW8A ++ inline void stl_a ( Register ra, int atmdisp, Register rb ); //SW8A ++ inline void flds_a ( FloatRegister fa, int atmdisp, Register rb ); //SW8A ++ inline void fldd_a ( FloatRegister fa, int atmdisp, Register rb ); //SW8A ++ inline void fsts_a ( FloatRegister fa, int atmdisp, Register rb ); //SW8A ++ inline void fstd_a ( FloatRegister fa, int atmdisp, Register rb ); //SW8A ++ inline void dpfhr ( int th, int atmdisp, Register rb ); //SW8A ++ inline void dpfhw ( int th, int atmdisp, Register rb ); //SW8A ++ inline void crc32b ( Register rd, Register rs, Register rt ); //SW8A ++ inline void crc32h ( Register rd, Register rs, Register rt ); //SW8A ++ inline void crc32w ( Register rd, Register rs, Register rt ); //SW8A ++ inline void crc32l ( Register rd, Register rs, Register rt ); //SW8A ++ inline void crc32cb ( Register rd, Register rs, Register rt ); //SW8A ++ inline void crc32ch ( Register rd, Register rs, Register rt ); //SW8A ++ inline void crc32cw ( Register rd, Register rs, Register rt ); //SW8A ++ inline void crc32cl ( Register rd, Register rs, Register rt ); //SW8A ++ ++public: ++ // Creation ++ Assembler(CodeBuffer* code) : AbstractAssembler(code) { ++ } ++}; ++ ++ ++#endif // CPU_SW64_VM_ASSEMBLER_SW64_HPP +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/assembler_sw64.inline.hpp afu8u/hotspot/src/cpu/sw64/vm/assembler_sw64.inline.hpp +--- openjdk/hotspot/src/cpu/sw64/vm/assembler_sw64.inline.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/assembler_sw64.inline.hpp 2025-05-06 10:53:44.903633666 +0800 +@@ -0,0 +1,1292 @@ ++/* ++ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2015, 2018, Wuxi Institute of Advanced Technology. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef CPU_SW64_VM_ASSEMBLER_SW64_INLINE_HPP ++#define CPU_SW64_VM_ASSEMBLER_SW64_INLINE_HPP ++ ++#include "asm/assembler.inline.hpp" ++#include "asm/codeBuffer.hpp" ++#include "code/codeCache.hpp" ++ ++ ++inline void Assembler::emit_long(int x) { ++ Unimplemented(); ++ AbstractAssembler::emit_int32(x); ++} ++ ++inline void Assembler::emit_data(int x, relocInfo::relocType rtype) { ++ Unimplemented(); ++ relocate(rtype); ++ emit_long(x); ++} ++ ++inline void Assembler::emit_data(int x, RelocationHolder const& rspec) { ++ Unimplemented(); ++ relocate(rspec); ++ emit_long(x); ++} ++ ++inline void Assembler::emit_sw2_long(int x) { ++ AbstractAssembler::emit_int32(x); ++} ++ ++inline void Assembler::emit_sw2_data(int x, relocInfo::relocType rtype) { ++ relocate(rtype); ++ emit_sw2_long(x); ++} ++ ++inline void Assembler::emit_sw2_data(int x, RelocationHolder const& rspec) { ++ relocate(rspec); ++ emit_sw2_long(x); ++} ++ ++ inline void Assembler::sys_call_b( int palfn ) ++ { emit_sw2_long( op_sys_call | is_palfn(palfn) ); } ++ inline void Assembler::sys_call( int palfn ) ++ { sys_call_b(palfn); /* emit_sw2_long( op_sys_call | ( 0x1 << 25 ) | is_palfn(palfn) );*/ } ++ ++//jump instructions ++ inline void Assembler::call( Register ra, Register rb, int jmphint ) ++ { emit_sw2_long( op_call | is_ra(ra) | is_rb(rb) | is_jmphint(jmphint) ); } ++ inline void Assembler::ret( Register ra, Register rb, int rethint ) ++ { emit_sw2_long( op_ret | is_ra(ra) | is_rb(rb) | is_rethint(rethint) ); } ++ inline void Assembler::jmp( Register ra, Register rb, int jmphint ) ++#ifdef YJDEBUG ++ { emit_sw2_long( op_call | is_ra(GP) | is_rb(rb) | is_jmphint(jmphint) ); } ++#else ++ { emit_sw2_long( op_jmp | is_ra(ra) | is_rb(rb) | is_jmphint(jmphint) ); } ++#endif ++ ++ //arithmetic ++ inline void Assembler::addw( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_addw | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::addw(Register rd, Register rs , int lit) ++ { emit_sw2_long( op_addw_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::subw(Register rd, Register rs, Register rt) ++ { emit_sw2_long( op_subw | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::subw( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_subw_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::s4addw( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_s4addw | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::s4addw( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_s4addw_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::s4subw( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_s4subw | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::s4subw( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_s4subw_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::s8addw( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_s8addw | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::s8addw( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_s8addw_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::s8subw( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_s8subw | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::s8subw( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_s8subw_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ ++ inline void Assembler::addl( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_addl | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::addl(Register rd, Register rs, int lit) ++ { emit_sw2_long( op_addl_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::subl( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_subl | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::subl(Register rd, Register rs, int lit) ++ { emit_sw2_long( op_subl_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::s4addl( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_s4addl | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::s4addl( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_s4addl_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::s4subl( Register ra, Register rb, Register rc ) ++ { emit_sw2_long( op_s4subl | is_ra(ra) | is_rb(rb) | is_rc(rc) ); } ++ inline void Assembler::s4subl( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_s4subl_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::s8addl( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_s8addl | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::s8addl( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_s8addl_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::s8subl( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_s8subl | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::s8subl( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_s8subl_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ ++ inline void Assembler::mulw(Register rd, Register rs, Register rt) ++ { emit_sw2_long( op_mulw | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::mulw( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_mulw_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ ++ inline void Assembler::mull( Register ra, Register rb, Register rc ) ++ { emit_sw2_long( op_mull | is_ra(ra) | is_rb(rb) | is_rc(rc) ); } ++ inline void Assembler::mull( Register ra, int lit, Register rc ) ++ { emit_sw2_long( op_mull_l | is_ra(ra) | is_lit(lit) | is_rc(rc) ); } ++ inline void Assembler::umulh( Register ra, Register rb, Register rc ) ++ { emit_sw2_long( op_umulh | is_ra(ra) | is_rb(rb) | is_rc(rc) ); } ++ inline void Assembler::umulh( Register ra, int lit, Register rc ) ++ { emit_sw2_long( op_umulh_l | is_ra(ra) | is_lit(lit) | is_rc(rc) ); } ++ ++ inline void Assembler::and_reg(Register rd, Register rs, Register rt) ++ { emit_sw2_long( op_and | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::and_imm8(Register rd, Register rs, int lit) ++ { emit_sw2_long( op_and_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::bic( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_bic | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::bic( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_bic_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::bis(Register rd, Register rs, Register rt) ++ { emit_sw2_long( op_bis | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::bis( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_bis_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::ornot( Register rd, Register rs, Register rt) ++ { emit_sw2_long( op_ornot | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::ornot( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_ornot_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::xor_ins(Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_xor | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::xor_ins( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_xor_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::eqv( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_eqv | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::eqv( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_eqv_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ ++ inline void Assembler::inslb( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_inslb | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::inslb( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_inslb_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::inslh( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_inslh | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::inslh( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_inslh_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::inslw( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_inslw | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::inslw( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_inslw_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::insll( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_insll | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::insll( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_insll_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::inshb( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_inshb | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::inshb( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_inshb_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::inshh( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_inshh | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::inshh( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_inshh_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::inshw( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_inshw | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::inshw( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_inshw_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::inshl( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_inshl | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::inshl( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_inshl_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::slll( Register rd, Register rt, Register rs ) ++ { emit_sw2_long( op_slll | is_ra(rt) | is_rb(rs) | is_rc(rd) ); } ++ inline void Assembler::slll( Register rt, Register rs, int lit ) ++ { emit_sw2_long( op_slll_l | is_ra(rs) | is_lit(lit) | is_rc(rt) ); } ++ inline void Assembler::srll(Register rd, Register rt, Register rs) ++ { emit_sw2_long( op_srll | is_ra(rt) | is_rb(rs) | is_rc(rd) ); } ++ inline void Assembler::srll(Register rd, Register rs, int lit) ++ { emit_sw2_long( op_srll_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::sral(Register rd, Register rt, Register rs) ++ { emit_sw2_long( op_sral | is_ra(rt) | is_rb(rs) | is_rc(rd) ); } ++ inline void Assembler::sral(Register rt, Register rs, int lit) ++ { emit_sw2_long( op_sral_l | is_ra(rs) | is_lit(lit) | is_rc(rt) ); } ++ ++ inline void Assembler::extlb( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_extlb | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::extlb( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_extlb_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::extlh( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_extlh | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::extlh( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_extlh_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::extlw( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_extlw | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::extlw( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_extlw_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::extll( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_extll | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::extll( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_extll_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::exthb( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_exthb | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::exthb( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_exthb_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::exthh( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_exthh | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::exthh( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_exthh_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::exthw( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_exthw | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::exthw( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_exthw_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::exthl( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_exthl | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::exthl( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_exthl_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ ++ inline void Assembler::ctpop( Register rc, Register rb ) ++ { emit_sw2_long( op_ctpop | is_rb(rb) | is_rc(rc) ); } ++ inline void Assembler::ctlz( Register rc, Register rb ) ++ { emit_sw2_long( op_ctlz | is_rb(rb) | is_rc(rc) ); } ++ inline void Assembler::cttz( Register rc, Register rb ) ++ { emit_sw2_long( op_cttz | is_rb(rb) | is_rc(rc) ); } ++ ++ inline void Assembler::masklb( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_masklb | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::masklb( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_masklb_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::masklh( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_masklh | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::masklh( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_masklh_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::masklw( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_masklw | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::masklw( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_masklw_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::maskll( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_maskll | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::maskll( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_maskll_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::maskhb( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_maskhb | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::maskhb( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_maskhb_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::maskhh( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_maskhh | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::maskhh( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_maskhh_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::maskhw( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_maskhw | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::maskhw( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_maskhw_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::maskhl( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_maskhl | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::maskhl( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_maskhl_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ ++ inline void Assembler::zap( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_zap | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::zap( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_zap_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::zapnot( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_zapnot | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::zapnot( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_zapnot_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::sextb( Register rc, Register rb) ++ { emit_sw2_long( op_sextb | is_ra(R0) | is_rb(rb) | is_rc(rc) ); } ++ inline void Assembler::sextb( Register rc, int lit ) ++ { emit_sw2_long( op_sextb_l | is_ra(R0) | is_lit(lit) | is_rc(rc) ); } ++ inline void Assembler::sexth( Register rc, Register rb ) ++ { emit_sw2_long( op_sexth | is_ra(R0) | is_rb(rb) | is_rc(rc) ); } ++ inline void Assembler::sexth( Register rc, int lit ) ++ { emit_sw2_long( op_sexth_l | is_ra(R0) | is_lit(lit) | is_rc(rc) ); } ++ //load&store ++ inline void Assembler::ldbu( Register rt, Register rs, int mdisp ) ++ { emit_sw2_long( op_ldbu | is_ra(rt) | is_mdisp(mdisp) | is_rb(rs) ); } ++ inline void Assembler::ldhu( Register rt, Register rs, int mdisp ) ++ { emit_sw2_long( op_ldhu | is_ra(rt) | is_mdisp(mdisp) | is_rb(rs) ); } ++ inline void Assembler::ldw( Register rt, Register rs, int mdisp ) ++ { emit_sw2_long( op_ldw | is_ra(rt) | is_mdisp(mdisp) | is_rb(rs) ); } ++ inline void Assembler::ldl( Register rt, Register rs, int mdisp ) ++ { emit_sw2_long( op_ldl | is_ra(rt) | is_mdisp(mdisp) | is_rb(rs) ); } ++ inline void Assembler::ldl_u( Register ra, Register rb, int mdisp ) ++ { emit_sw2_long( op_ldl_u | is_ra(ra) | is_mdisp(mdisp) | is_rb(rb) ); } ++ ++ inline void Assembler::stb( Register rt, Register rs, int mdisp ) ++ { emit_sw2_long( op_stb | is_ra(rt) | is_mdisp(mdisp) | is_rb(rs) ); } ++ inline void Assembler::sth(Register ra, Register rb , int mdisp) ++ { emit_sw2_long( op_sth | is_ra(ra) | is_mdisp(mdisp) | is_rb(rb) ); } ++ inline void Assembler::stw( Register rt, Register rs, int mdisp ) ++ { emit_sw2_long( op_stw | is_ra(rt) | is_mdisp(mdisp) | is_rb(rs) ); } ++ inline void Assembler::stl( Register rt, Register rs, int mdisp ) ++ { emit_sw2_long( op_stl | is_ra(rt) | is_mdisp(mdisp) | is_rb(rs) ); } ++ inline void Assembler::stl_u( Register ra, Register rb, int mdisp ) ++ { emit_sw2_long( op_stl_u | is_ra(ra) | is_mdisp(mdisp) | is_rb(rb) ); } ++ ++ inline void Assembler::ldi( Register ra, Register rb, int mdisp ) ++ { emit_sw2_long( op_ldi | is_ra(ra) | is_mdisp(mdisp) | is_rb(rb) ); } ++ inline void Assembler::ldih( Register ra, Register rb, int mdisp ) ++ { emit_sw2_long( op_ldih | is_ra(ra) | is_mdisp(mdisp) | is_rb(rb) ); } ++ ++ inline void Assembler::ldw_nc( Register rd, Register rs , int atmdisp ) ++ { emit_sw2_long( op_ldw_nc | is_ra(rd) | is_atmdisp(atmdisp) | is_rc(rs) ); } ++ inline void Assembler::ldl_nc( Register rd, Register rs , int atmdisp ) ++ { emit_sw2_long( op_ldl_nc | is_ra(rd) | is_atmdisp(atmdisp) | is_rc(rs) ); } ++ inline void Assembler::ldd_nc( Register rd, Register rs , int atmdisp ) ++ { emit_sw2_long( op_ldd_nc | is_ra(rd) | is_atmdisp(atmdisp) | is_rc(rs) ); } ++ inline void Assembler::stw_nc( Register rd, Register rs , int atmdisp ) ++ { emit_sw2_long( op_stw_nc | is_ra(rd) | is_atmdisp(atmdisp) | is_rc(rs) ); } ++ inline void Assembler::stl_nc( Register rd, Register rs , int atmdisp ) ++ { emit_sw2_long( op_stl_nc | is_ra(rd) | is_atmdisp(atmdisp) | is_rc(rs) ); } ++ inline void Assembler::std_nc( Register rd, Register rs , int atmdisp ) ++ { emit_sw2_long( op_std_nc | is_ra(rd) | is_atmdisp(atmdisp) | is_rc(rs) ); } ++ ++ inline void Assembler::lldw( Register rt, Register rs, int atmdisp )//lock ++ { emit_sw2_long( op_lldw | is_ra(rt) | is_atmdisp(atmdisp) | is_rb(rs) ); } ++ inline void Assembler::lldl( Register rt, Register rs, int atmdisp ) ++ { emit_sw2_long( op_lldl | is_ra(rt) | is_atmdisp(atmdisp) | is_rb(rs) ); } ++ inline void Assembler::lstw( Register rt, Register rs, int atmdisp ) ++ { emit_sw2_long( op_lstw | is_ra(rt) | is_atmdisp(atmdisp) | is_rb(rs) ); } ++ inline void Assembler::lstl( Register rt, Register rs, int atmdisp ) ++ { emit_sw2_long( op_lstl | is_ra(rt) | is_atmdisp(atmdisp) | is_rb(rs) ); } ++ inline void Assembler::rd_f( Register ra ) { ++ if (UseSW8A) { nop(); } else { sw2_only(); emit_sw2_long( op_rd_f | is_ra(ra) | is_rb(R0) ); } ++ } ++ inline void Assembler::wr_f( Register ra ) { ++ if (UseSW8A) { nop(); } else { sw2_only(); emit_sw2_long( op_wr_f | is_ra(ra) | is_rb(R0) ); } ++ } ++ ++ inline void Assembler::ldw_inc( Register rd, Register rs , int atmdisp )//atom ++ { sw2_only(); emit_sw2_long( op_ldw_inc | is_ra(rs) | is_atmdisp(atmdisp) | is_rc(rd) ); } ++ inline void Assembler::ldl_inc( Register rd, Register rs , int atmdisp ) ++ { sw2_only(); emit_sw2_long( op_ldl_inc | is_ra(rs) | is_atmdisp(atmdisp) | is_rc(rd) ); } ++ inline void Assembler::ldw_dec( Register rd, Register rs , int atmdisp ) ++ { sw2_only(); emit_sw2_long( op_ldw_dec | is_ra(rs) | is_atmdisp(atmdisp) | is_rc(rd) ); } ++ inline void Assembler::ldl_dec( Register rd, Register rs , int atmdisp ) ++ { sw2_only(); emit_sw2_long( op_ldl_dec | is_ra(rs) | is_atmdisp(atmdisp) | is_rc(rd) ); } ++ inline void Assembler::ldw_set( Register rd, Register rs , int atmdisp ) ++ { sw2_only(); emit_sw2_long( op_ldw_set | is_ra(rs) | is_atmdisp(atmdisp) | is_rc(rd) ); } ++ inline void Assembler::ldl_set( Register rd, Register rs , int atmdisp ) ++ { sw2_only(); emit_sw2_long( op_ldl_set | is_ra(rs) | is_atmdisp(atmdisp) | is_rc(rd) ); } ++ //compare ++ inline void Assembler::cmpeq( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_cmpeq | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::cmpeq( Register rd, Register rs, int lit) ++ { emit_sw2_long( op_cmpeq_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::cmplt(Register rd, Register rs, Register rt) ++ { emit_sw2_long( op_cmplt | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::cmplt( Register rd, Register rs, int lit) ++ { emit_sw2_long( op_cmplt_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::cmple( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_cmpule | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::cmple( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_cmple_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::cmpult( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_cmpult | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::cmpult(Register rd, Register rs, int lit) ++ { emit_sw2_long( op_cmpult_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::cmpule( Register rd, Register rs, Register rt ) ++ { emit_sw2_long( op_cmpule | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::cmpule( Register rd, Register rs, int lit ) ++ { emit_sw2_long( op_cmpule_l | is_ra(rs) | is_lit(lit) | is_rc(rd) ); } ++ inline void Assembler::cmpgeb( Register rc, Register ra, Register rb ) ++ { emit_sw2_long( op_cmpgeb | is_ra(ra) | is_rb(rb) | is_rc(rc) ); } ++ inline void Assembler::cmpgeb( Register rc, Register ra, int lit ) ++ { emit_sw2_long( op_cmpgeb_l | is_ra(ra) | is_lit(lit) | is_rc(rc) ); } ++ //branch ++ inline void Assembler::br( Register ra, int bdisp ) ++ { emit_sw2_long( op_br | is_ra(ra) | is_bdisp(bdisp) ); } ++ inline void Assembler::bsr( Register ra, int bdisp ) ++ { emit_sw2_long( op_bsr | is_ra(ra) | is_bdisp(bdisp) ); } ++ inline void Assembler::beq( Register ra, int bdisp ) ++ { emit_sw2_long( op_beq | is_ra(ra) | is_bdisp(bdisp) ); } ++ inline void Assembler::bne( Register ra, int bdisp ) ++ { emit_sw2_long( op_bne | is_ra(ra) | is_bdisp(bdisp) ); } ++ inline void Assembler::blt( Register ra, int bdisp ) ++ { emit_sw2_long( op_blt | is_ra(ra) | is_bdisp(bdisp) ); } ++ inline void Assembler::ble( Register ra, int bdisp ) ++ { emit_sw2_long( op_ble | is_ra(ra) | is_bdisp(bdisp) ); } ++ inline void Assembler::bgt( Register ra, int bdisp ) ++ { emit_sw2_long( op_bgt | is_ra(ra) | is_bdisp(bdisp) ); } ++ inline void Assembler::bge( Register ra, int bdisp ) ++ { emit_sw2_long( op_bge | is_ra(ra) | is_bdisp(bdisp) ); } ++ inline void Assembler::blbc( Register ra, int bdisp ) ++ { emit_sw2_long( op_blbc | is_ra(ra) | is_bdisp(bdisp) ); } ++ inline void Assembler::blbs( Register ra, int bdisp ) ++ { emit_sw2_long( op_blbs | is_ra(ra) | is_bdisp(bdisp) ); } ++ //select ++ inline void Assembler::seleq( Register ra, Register rb,Register r3, Register rc ) ++ { emit_sw2_long( op_seleq | is_ra(ra) | is_rb(rb) | is_r3(r3) | is_rc(rc) ); } ++ inline void Assembler::seleq( Register ra, int lit, Register r3,Register rc ) ++ { emit_sw2_long( op_seleq_l | is_ra(ra) | is_lit(lit) | is_r3(r3) | is_rc(rc) ); } ++ inline void Assembler::selge( Register ra, Register rb,Register r3, Register rc ) ++ { emit_sw2_long( op_selge | is_ra(ra) | is_rb(rb) | is_r3(r3) | is_rc(rc) ); } ++ inline void Assembler::selge( Register ra, int lit, Register r3,Register rc ) ++ { emit_sw2_long( op_selge_l | is_ra(ra) | is_lit(lit) | is_r3(r3) | is_rc(rc) ); } ++ inline void Assembler::selgt( Register ra, Register rb,Register r3, Register rc ) ++ { emit_sw2_long( op_selgt | is_ra(ra) | is_rb(rb) | is_r3(r3) | is_rc(rc) ); } ++ inline void Assembler::selgt( Register ra, int lit, Register r3,Register rc ) ++ { emit_sw2_long( op_selgt_l | is_ra(ra) | is_lit(lit) | is_r3(r3) | is_rc(rc) ); } ++ inline void Assembler::selle( Register ra, Register rb,Register r3, Register rc ) ++ { emit_sw2_long( op_selle | is_ra(ra) | is_rb(rb) | is_r3(r3) | is_rc(rc) ); } ++ inline void Assembler::selle( Register ra, int lit, Register r3,Register rc ) ++ { emit_sw2_long( op_selle_l | is_ra(ra) | is_lit(lit) | is_r3(r3) | is_rc(rc) ); } ++ inline void Assembler::sellt( Register ra, Register rb,Register r3, Register rc ) ++ { emit_sw2_long( op_sellt | is_ra(ra) | is_rb(rb) | is_r3(r3) | is_rc(rc) ); } ++ inline void Assembler::sellt( Register ra, int lit, Register r3,Register rc ) ++ { emit_sw2_long( op_sellt_l | is_ra(ra) | is_lit(lit) | is_r3(r3) | is_rc(rc) ); } ++ inline void Assembler::selne( Register ra, Register rb,Register r3, Register rc ) ++ { emit_sw2_long( op_selne | is_ra(ra) | is_rb(rb) | is_r3(r3) | is_rc(rc) ); } ++ inline void Assembler::selne( Register ra, int lit, Register r3,Register rc ) ++ { emit_sw2_long( op_selne_l | is_ra(ra) | is_lit(lit) | is_r3(r3) | is_rc(rc) ); } ++ inline void Assembler::sellbc( Register ra, Register rb,Register r3, Register rc ) ++ { emit_sw2_long( op_sellbc | is_ra(ra) | is_rb(rb) | is_r3(r3) | is_rc(rc) ); } ++ inline void Assembler::sellbc( Register ra, int lit, Register r3,Register rc ) ++ { emit_sw2_long( op_sellbc_l | is_ra(ra) | is_lit(lit) | is_r3(r3) | is_rc(rc) ); } ++ inline void Assembler::sellbs( Register ra, Register rb,Register r3, Register rc ) ++ { emit_sw2_long( op_sellbs | is_ra(ra) | is_rb(rb) | is_r3(r3) | is_rc(rc) ); } ++ inline void Assembler::sellbs( Register ra, int lit, Register r3,Register rc ) ++ { emit_sw2_long( op_sellbs_l | is_ra(ra) | is_lit(lit) | is_r3(r3) | is_rc(rc) ); } ++ ++ //mov ++ inline void Assembler::fimovs( Register rc, FloatRegister fa ) // For sw4a SQData ++ { emit_sw2_long( op_fimovs | is_fa(fa) | is_rc(rc) ); } ++ inline void Assembler::fimovd( Register rt, FloatRegister fs ) // For sw4a SQData ++ { emit_sw2_long( op_fimovd | is_fa(fs) |is_rc(rt) ); } ++ //priority instructions ++ inline void Assembler::pri_ret( Register ra ) ++ { emit_sw2_long( op_pri_ret | is_ra(ra) ); } ++ inline void Assembler::pri_ld( Register ra, Register rb, int ev6hwdisp ) ++ { emit_sw2_long( op_pri_ld | is_ra(ra) | is_ev6hwdisp(ev6hwdisp) | is_rb(rb) ); } ++ inline void Assembler::pri_st( Register ra, Register rb, int ev6hwdisp ) ++ { emit_sw2_long( op_pri_st | is_ra(ra) | is_ev6hwdisp(ev6hwdisp) | is_rb(rb) ); } ++ // cache control instruction ++ inline void Assembler::s_fillcs( Register rb, int mdisp ) ++ { ldw( R0, rb, mdisp); } ++ inline void Assembler::s_fillde( Register rb, int mdisp ) ++ { ldl( R0, rb, mdisp); } ++ inline void Assembler::fillde( Register rb, int mdisp ) ++ { flds( F31, rb, mdisp); } ++ inline void Assembler::fillde_e( Register rb, int mdisp ) ++ { fldd( F31, rb, mdisp); } ++ inline void Assembler::fillcs( Register rb, int mdisp ) ++ { ldwe( F31, rb, mdisp); } ++ inline void Assembler::fillcs_e( Register rb, int mdisp ) ++ { ldde( F31, rb, mdisp); } ++ inline void Assembler::e_fillcs( Register rb, int mdisp ) ++ { ldse( F31, rb, mdisp); } ++ inline void Assembler::e_fillde( Register rb, int mdisp ) ++ { vlds( F31/*V31*/, rb, mdisp); } ++ inline void Assembler::flushd( Register rb, int mdisp ) ++ { ldbu( R0, rb, mdisp); } ++ inline void Assembler::evictdl( Register rb, int mdisp ) ++ { ldl_u( R0, rb, mdisp); } ++ inline void Assembler::evictdg( Register rb, int mdisp ) ++ { ldhu( R0, rb, mdisp); } ++ ++ //other ++ inline void Assembler::memb( void ) ++ { emit_sw2_long( op_memb); } ++ inline void Assembler::rtc( Register ra, Register rb ) ++ { emit_sw2_long( op_rtc | is_ra(ra) | is_rb(rb) ); } ++ inline void Assembler::rcid( Register ra ) ++ { emit_sw2_long( op_rcid | is_ra(ra) ); } ++ inline void Assembler::halt( void ) ++ { emit_sw2_long( op_halt ); } ++ inline void Assembler::rtid( Register ra ) ++ { emit_sw2_long( op_rtid | is_ra(ra) ); } ++ inline void Assembler::csrr( Register ra, int rpiindex ) ++ { emit_sw2_long( op_csrr | is_ra(ra) | is_rpiindex(rpiindex) ); } ++ inline void Assembler::csrw( Register ra, int rpiindex ) ++ { emit_sw2_long( op_csrw | is_ra(ra) | is_rpiindex(rpiindex) ); } ++ ++ //float ++ //arithmetic instrctions ++ inline void Assembler::fadds( FloatRegister fd, FloatRegister fs, FloatRegister ft ) ++ { emit_sw2_long( op_fadds | is_fa(fs) | is_fb(ft) | is_fc(fd) ); } ++ inline void Assembler::faddd( FloatRegister fd, FloatRegister fs, FloatRegister ft ) ++ { emit_sw2_long( op_faddd | is_fa(fs) | is_fb(ft) | is_fc(fd) ); } ++ inline void Assembler::fsubs( FloatRegister fd, FloatRegister fs, FloatRegister ft ) ++ { emit_sw2_long( op_fsubs | is_fa(fs) | is_fb(ft) | is_fc(fd) ); } ++ inline void Assembler::fsubd( FloatRegister fd, FloatRegister fs, FloatRegister ft ) ++ { emit_sw2_long( op_fsubd | is_fa(fs) | is_fb(ft) | is_fc(fd) ); } ++ inline void Assembler::fmuls( FloatRegister fd, FloatRegister fs, FloatRegister ft ) ++ { emit_sw2_long( op_fmuls | is_fa(fs) | is_fb(ft) | is_fc(fd) ); } ++ inline void Assembler::fmuld( FloatRegister fd, FloatRegister fs, FloatRegister ft ) ++ { emit_sw2_long( op_fmuld | is_fa(fs) | is_fb(ft) | is_fc(fd) ); } ++ inline void Assembler::fdivs( FloatRegister fd, FloatRegister fs, FloatRegister ft ) ++ { emit_sw2_long( op_fdivs | is_fa(fs) | is_fb(ft) | is_fc(fd) ); } ++ inline void Assembler::fdivd( FloatRegister fd, FloatRegister fs, FloatRegister ft ) ++ { emit_sw2_long( op_fdivd | is_fa(fs) | is_fb(ft) | is_fc(fd) ); } ++ inline void Assembler::fsqrts( FloatRegister fc, FloatRegister fb ) ++ { emit_sw2_long( op_fsqrts | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::fsqrtd( FloatRegister fc, FloatRegister fb ) ++ { emit_sw2_long( op_fsqrtd | is_fb(fb) | is_fc(fc) ); } ++ //load&store ++ inline void Assembler::flds( FloatRegister ft, Register rs, int mdisp ) ++ { emit_sw2_long( op_flds | is_fa(ft) | is_mdisp(mdisp) | is_rb(rs) ); } ++ inline void Assembler::fldd( FloatRegister ft, Register rs, int mdisp ) ++ { emit_sw2_long( op_fldd | is_fa(ft) | is_mdisp(mdisp) | is_rb(rs) ); } ++ inline void Assembler::fsts( FloatRegister ft, Register rs, int mdisp ) ++ { emit_sw2_long( op_fsts | is_fa(ft) | is_mdisp(mdisp) | is_rb(rs) ); } ++ inline void Assembler::fstd( FloatRegister fa, Register rb, int mdisp ) ++ { emit_sw2_long( op_fstd | is_fa(fa) | is_mdisp(mdisp) | is_rb(rb) ); } ++ inline void Assembler::fmas( FloatRegister fa,FloatRegister fb,FloatRegister f3, FloatRegister fc ) ++ { emit_sw2_long( op_fmas | is_fa(fa) | is_fb(fb) | is_f3(f3) | is_fc(fc) ); } ++ inline void Assembler::fmad( FloatRegister fa,FloatRegister fb,FloatRegister f3, FloatRegister fc ) ++ { emit_sw2_long( op_fmad | is_fa(fa) | is_fb(fb) | is_f3(f3) | is_fc(fc) ); } ++ inline void Assembler::fmss( FloatRegister fa,FloatRegister fb,FloatRegister f3, FloatRegister fc ) ++ { emit_sw2_long( op_fmss | is_fa(fa) | is_fb(fb) | is_f3(f3) | is_fc(fc) ); } ++ inline void Assembler::fmsd( FloatRegister fa,FloatRegister fb,FloatRegister f3, FloatRegister fc ) ++ { emit_sw2_long( op_fmsd | is_fa(fa) | is_fb(fb) | is_f3(f3) | is_fc(fc) ); } ++ inline void Assembler::fnmas( FloatRegister fa,FloatRegister fb,FloatRegister f3, FloatRegister fc ) ++ { emit_sw2_long( op_fnmas | is_fa(fa) | is_fb(fb) | is_f3(f3) | is_fc(fc) ); } ++ inline void Assembler::fnmad( FloatRegister fa,FloatRegister fb,FloatRegister f3, FloatRegister fc ) ++ { emit_sw2_long( op_fnmad | is_fa(fa) | is_fb(fb) | is_f3(f3) | is_fc(fc) ); } ++ inline void Assembler::fnmss( FloatRegister fa,FloatRegister fb,FloatRegister f3, FloatRegister fc ) ++ { emit_sw2_long( op_fnmss | is_fa(fa) | is_fb(fb) | is_f3(f3) | is_fc(fc) ); } ++ inline void Assembler::fnmsd( FloatRegister fa,FloatRegister fb,FloatRegister f3, FloatRegister fc ) ++ { emit_sw2_long( op_fnmsd | is_fa(fa) | is_fb(fb) | is_f3(f3) | is_fc(fc) ); } ++ //compare ++ // return true if fa eq fb, either flase (including NaN) ++ inline void Assembler::fcmpeq( FloatRegister fc, FloatRegister fa, FloatRegister fb ) ++ { emit_sw2_long( op_fcmpeq | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ // return true if fa le fb, either flase (including NaN) ++ inline void Assembler::fcmple( FloatRegister fc, FloatRegister fa, FloatRegister fb ) ++ { emit_sw2_long( op_fcmple | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ // return true if fa lt fb, either flase (including NaN) ++ inline void Assembler::fcmplt( FloatRegister fc, FloatRegister fa, FloatRegister fb ) ++ { emit_sw2_long( op_fcmplt | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ // return true if either of fa & fb is NaN ++ inline void Assembler::fcmpun( FloatRegister fc, FloatRegister fa, FloatRegister fb ) ++ { emit_sw2_long( op_fcmpun | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ //branch ++ inline void Assembler::fbeq( FloatRegister fa, int bdisp ) ++ { emit_sw2_long( op_fbeq | is_fa(fa) | is_bdisp(bdisp) ); } ++ inline void Assembler::fbne( FloatRegister fa, int bdisp ) ++ { emit_sw2_long( op_fbne | is_fa(fa) | is_bdisp(bdisp) ); } ++ inline void Assembler::fblt( FloatRegister fa, int bdisp ) ++ { emit_sw2_long( op_fblt | is_fa(fa) | is_bdisp(bdisp) ); } ++ inline void Assembler::fble( FloatRegister fa, int bdisp ) ++ { emit_sw2_long( op_fble | is_fa(fa) | is_bdisp(bdisp) ); } ++ inline void Assembler::fbgt( FloatRegister fa, int bdisp ) ++ { emit_sw2_long( op_fbgt | is_fa(fa) | is_bdisp(bdisp) ); } ++ inline void Assembler::fbge( FloatRegister fa, int bdisp ) ++ { emit_sw2_long( op_fbge | is_fa(fa) | is_bdisp(bdisp) ); } ++ //select ++ inline void Assembler::fseleq( FloatRegister fa,FloatRegister fb,FloatRegister f3, FloatRegister fc ) ++ { emit_sw2_long( op_fseleq | is_fa(fa) | is_fb(fb) | is_f3(f3) | is_fc(fc) ); } ++ inline void Assembler::fselne( FloatRegister fa,FloatRegister fb,FloatRegister f3, FloatRegister fc ) ++ { emit_sw2_long( op_fselne | is_fa(fa) | is_fb(fb) | is_f3(f3) | is_fc(fc) ); } ++ inline void Assembler::fsellt( FloatRegister fa,FloatRegister fb,FloatRegister f3, FloatRegister fc ) ++ { emit_sw2_long( op_fsellt | is_fa(fa) | is_fb(fb) | is_f3(f3) | is_fc(fc) ); } ++ inline void Assembler::fselle( FloatRegister fa,FloatRegister fb,FloatRegister f3, FloatRegister fc ) ++ { emit_sw2_long( op_fselle | is_fa(fa) | is_fb(fb) | is_f3(f3) | is_fc(fc) ); } ++ inline void Assembler::fselgt( FloatRegister fa,FloatRegister fb,FloatRegister f3, FloatRegister fc ) ++ { emit_sw2_long( op_fselgt | is_fa(fa) | is_fb(fb) | is_f3(f3) | is_fc(fc) ); } ++ inline void Assembler::fselge( FloatRegister fa,FloatRegister fb,FloatRegister f3, FloatRegister fc ) ++ { emit_sw2_long( op_fselge | is_fa(fa) | is_fb(fb) | is_f3(f3) | is_fc(fc) ); } ++ //mov ++ inline void Assembler::fcpys( FloatRegister fc, FloatRegister fa, FloatRegister fb ) ++ { emit_sw2_long( op_fcpys | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::fmovd( FloatRegister fd, FloatRegister fs ) ++ { emit_sw2_long( op_fcpys | is_fa(fs) | is_fb(fs) | is_fc(fd) ); } ++ inline void Assembler::fmovs( FloatRegister fd, FloatRegister fs ) ++ { emit_sw2_long( op_fcpys | is_fa(fs) | is_fb(fs) | is_fc(fd) ); } ++ inline void Assembler::fcpyse( FloatRegister fc, FloatRegister fa, FloatRegister fb ) ++ { emit_sw2_long( op_fcpyse | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::fcpysn( FloatRegister fc, FloatRegister fa, FloatRegister fb ) ++ { emit_sw2_long( op_fcpysn | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::ifmovs( FloatRegister fc, Register ra ) ++ { emit_sw2_long( op_ifmovs | is_ra(ra) | is_fc(fc) ); } ++ inline void Assembler::ifmovd( FloatRegister fc, Register ra ) ++ { emit_sw2_long( op_ifmovd | is_ra(ra) | is_fc(fc) ); } ++ //cmov ++ inline void Assembler::cmovdl( Register rc, FloatRegister fb ) ++ {sw4_only(); emit_sw2_long( op_cmovdl | is_fb(fb) | is_rc(rc) ); } ++ inline void Assembler::cmovdl_g( Register rc, FloatRegister fb ) ++ {sw4_only(); emit_sw2_long( op_cmovdl_g | is_fb(fb) | is_rc(rc) ); } ++ inline void Assembler::cmovdl_p( Register rc, FloatRegister fb ) ++ {sw4_only(); emit_sw2_long( op_cmovdl_p | is_fb(fb) | is_rc(rc) ); } ++ inline void Assembler::cmovdl_z( Register rc, FloatRegister fb ) ++ {sw4_only(); emit_sw2_long( op_cmovdl_z | is_fb(fb) | is_rc(rc) ); } ++ inline void Assembler::cmovdl_n( Register rc, FloatRegister fb ) ++ {sw4_only(); emit_sw2_long( op_cmovdl_n | is_fb(fb) | is_rc(rc) ); } ++ ++ inline void Assembler::cmovdlu( Register rc, FloatRegister fb ) ++ {sw4_only(); emit_sw2_long( op_cmovdlu | is_fb(fb) | is_rc(rc) ); } ++ inline void Assembler::cmovdlu_g( Register rc, FloatRegister fb ) ++ {sw4_only(); emit_sw2_long( op_cmovdlu_g | is_fb(fb) | is_rc(rc) ); } ++ inline void Assembler::cmovdlu_p( Register rc, FloatRegister fb ) ++ {sw4_only(); emit_sw2_long( op_cmovdlu_p | is_fb(fb) | is_rc(rc) ); } ++ inline void Assembler::cmovdlu_z( Register rc, FloatRegister fb ) ++ {sw4_only(); emit_sw2_long( op_cmovdlu_z | is_fb(fb) | is_rc(rc) ); } ++ inline void Assembler::cmovdlu_n( Register rc, FloatRegister fb ) ++ {sw4_only(); emit_sw2_long( op_cmovdlu_n | is_fb(fb) | is_rc(rc) ); } ++ ++ inline void Assembler::cmovdw( Register rc, FloatRegister fb ) ++ {sw4_only(); emit_sw2_long( op_cmovdw | is_fb(fb) | is_rc(rc) ); } ++ inline void Assembler::cmovdw_g( Register rc, FloatRegister fb ) ++ {sw4_only(); emit_sw2_long( op_cmovdw_g | is_fb(fb) | is_rc(rc) ); } ++ inline void Assembler::cmovdw_p( Register rc, FloatRegister fb ) ++ {sw4_only(); emit_sw2_long( op_cmovdw_p | is_fb(fb) | is_rc(rc) ); } ++ inline void Assembler::cmovdw_z( Register rc, FloatRegister fb ) ++ {sw4_only(); emit_sw2_long( op_cmovdw_z | is_fb(fb) | is_rc(rc) ); } ++ inline void Assembler::cmovdw_n( Register rc, FloatRegister fb ) ++ {sw4_only(); emit_sw2_long( op_cmovdw_n | is_fb(fb) | is_rc(rc) ); } ++ ++ inline void Assembler::cmovdwu( Register rc, FloatRegister fb ) ++ {sw4_only(); emit_sw2_long( op_cmovdwu | is_fb(fb) | is_rc(rc) ); } ++ inline void Assembler::cmovdwu_g( Register rc, FloatRegister fb ) ++ {sw4_only(); emit_sw2_long( op_cmovdwu_g | is_fb(fb) | is_rc(rc) ); } ++ inline void Assembler::cmovdwu_p( Register rc, FloatRegister fb ) ++ {sw4_only(); emit_sw2_long( op_cmovdwu_p | is_fb(fb) | is_rc(rc) ); } ++ inline void Assembler::cmovdwu_z( Register rc, FloatRegister fb ) ++ {sw4_only(); emit_sw2_long( op_cmovdwu_z | is_fb(fb) | is_rc(rc) ); } ++ inline void Assembler::cmovdwu_n( Register rc, FloatRegister fb ) ++ {sw4_only(); emit_sw2_long( op_cmovdwu_n | is_fb(fb) | is_rc(rc) ); } ++ ++ inline void Assembler::cmovls( FloatRegister fc, Register rb ) ++ {sw4_only(); emit_sw2_long( op_cmovls | is_rb(rb) | is_fc(fc) ); } ++ inline void Assembler::cmovld( FloatRegister fc, Register rb ) ++ {sw4_only(); emit_sw2_long( op_cmovld | is_rb(rb) | is_fc(fc) ); } ++ inline void Assembler::cmovuls( FloatRegister fc, Register rb ) ++ {sw4_only(); emit_sw2_long( op_cmovuls | is_rb(rb) | is_fc(fc) ); } ++ inline void Assembler::cmovuld( FloatRegister fc, Register rb ) ++ {sw4_only(); emit_sw2_long( op_cmovuld | is_rb(rb) | is_fc(fc) ); } ++ inline void Assembler::cmovws( FloatRegister fc, Register rb ) ++ {sw4_only(); emit_sw2_long( op_cmovws | is_rb(rb) | is_fc(fc) ); } ++ inline void Assembler::cmovwd( FloatRegister fc, Register rb ) ++ {sw4_only(); emit_sw2_long( op_cmovwd | is_rb(rb) | is_fc(fc) ); } ++ inline void Assembler::cmovuws( FloatRegister fc, Register rb ) ++ {sw4_only(); emit_sw2_long( op_cmovuws | is_rb(rb) | is_fc(fc) ); } ++ inline void Assembler::cmovuwd( FloatRegister fc, Register rb ) ++ {sw4_only(); emit_sw2_long( op_cmovuwd | is_rb(rb) | is_fc(fc) ); } ++ //fcvt ++ inline void Assembler::fcvtsd( FloatRegister fc, FloatRegister fb ) ++ { emit_sw2_long( op_fcvtsd | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::fcvtds( FloatRegister fc, FloatRegister fb ) ++ { emit_sw2_long( op_fcvtds | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::fcvtdl_g( FloatRegister fc, FloatRegister fb ) ++ { emit_sw2_long( op_fcvtdl_g | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::fcvtdl_p( FloatRegister fc, FloatRegister fb ) ++ { emit_sw2_long( op_fcvtdl_p | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::fcvtdl_z( FloatRegister fc, FloatRegister fb ) ++ { emit_sw2_long( op_fcvtdl_z | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::fcvtdl_n( FloatRegister fc, FloatRegister fb ) ++ { emit_sw2_long( op_fcvtdl_n | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::fcvtdl( FloatRegister fc, FloatRegister fb ) ++ { emit_sw2_long( op_fcvtdl | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::fcvtwl( FloatRegister fc, FloatRegister fb ) ++ { emit_sw2_long( op_fcvtwl | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::fcvtlw( FloatRegister fc, FloatRegister fb ) ++ { emit_sw2_long( op_fcvtlw | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::fcvtls( FloatRegister fc, FloatRegister fb ) ++ { emit_sw2_long( op_fcvtls | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::fcvtld( FloatRegister fc, FloatRegister fb ) ++ { emit_sw2_long( op_fcvtld | is_fb(fb) | is_fc(fc) ); } ++ //set FPCR ++ inline void Assembler::rfpcr( FloatRegister fa) ++ { emit_sw2_long( op_rfpcr | is_fa(fa) ); } ++ inline void Assembler::wfpcr( FloatRegister fa) ++ { emit_sw2_long( op_wfpcr | is_fa(fa) ); } ++ inline void Assembler::setfpec0() { emit_sw2_long( op_setfpec0 ); } ++ inline void Assembler::setfpec1() { emit_sw2_long( op_setfpec1 ); } ++ inline void Assembler::setfpec2() { emit_sw2_long( op_setfpec2 ); } ++ inline void Assembler::setfpec3() { emit_sw2_long( op_setfpec3 ); } ++ ++ //SIMD ++ inline void Assembler::ldwe( FloatRegister fa, Register rb, int mdisp ) ++ { emit_sw2_long( op_ldwe | is_fa(fa) | is_mdisp(mdisp) | is_rb(rb) ); } ++ inline void Assembler::ldse( FloatRegister fa, Register rb, int mdisp ) ++ { emit_sw2_long( op_ldse | is_fa(fa) | is_mdisp(mdisp) | is_rb(rb) ); } ++ inline void Assembler::ldde( FloatRegister fa, Register rb, int mdisp ) ++ { emit_sw2_long( op_ldde | is_fa(fa) | is_mdisp(mdisp) | is_rb(rb) ); } ++ inline void Assembler::vlds( FloatRegister fa, Register rb, int mdisp ) ++ { emit_sw2_long( op_vlds | is_fa(fa) | is_mdisp(mdisp) | is_rb(rb) ); } ++ inline void Assembler::vldd( FloatRegister fa, Register rb, int mdisp ) ++ { emit_sw2_long( op_vldd | is_fa(fa) | is_mdisp(mdisp) | is_rb(rb) ); } ++ inline void Assembler::vsts( FloatRegister fa, Register rb, int mdisp ) ++ { emit_sw2_long( op_vsts | is_fa(fa) | is_mdisp(mdisp) | is_rb(rb) ); } ++ inline void Assembler::vstd( FloatRegister fa, Register rb, int mdisp ) ++ { emit_sw2_long( op_vstd | is_fa(fa) | is_mdisp(mdisp) | is_rb(rb) ); } ++ inline void Assembler::vaddw( FloatRegister fc, FloatRegister fa, FloatRegister fb ) ++ { emit_sw2_long( op_vaddw | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vaddw( FloatRegister fc, FloatRegister fa, int lit ) ++ { emit_sw2_long( op_vaddw_l | is_fa(fa) | is_lit(lit) | is_fc(fc) ); } ++ inline void Assembler::vsubw( FloatRegister fc, FloatRegister fa, FloatRegister fb ) ++ { emit_sw2_long( op_vsubw | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vsubw( FloatRegister fc, FloatRegister fa, int lit ) ++ { emit_sw2_long( op_vsubw_l | is_fa(fa) | is_lit(lit) | is_fc(fc) ); } ++ ++ inline void Assembler::vcmpgew( FloatRegister fc, FloatRegister fa, FloatRegister fb ) ++ { emit_sw2_long( op_vcmpgew | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vcmpgew( FloatRegister fc, FloatRegister fa, int lit ) ++ { emit_sw2_long( op_vcmpgew_l | is_fa(fa) | is_lit(lit) | is_fc(fc) ); } ++ inline void Assembler::vcmpeqw( FloatRegister fc, FloatRegister fa, FloatRegister fb ) ++ { emit_sw2_long( op_vcmpeqw | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vcmpeqw( FloatRegister fc, FloatRegister fa, int lit ) ++ { emit_sw2_long( op_vcmpeqw_l | is_fa(fa) | is_lit(lit) | is_fc(fc) ); } ++ inline void Assembler::vcmplew( FloatRegister fc, FloatRegister fa, FloatRegister fb ) ++ { emit_sw2_long( op_vcmplew | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vcmplew( FloatRegister fc, FloatRegister fa, int lit ) ++ { emit_sw2_long( op_vcmplew_l | is_fa(fa) | is_lit(lit) | is_fc(fc) ); } ++ inline void Assembler::vcmpltw( FloatRegister fc, FloatRegister fa, FloatRegister fb ) ++ { emit_sw2_long( op_vcmpltw | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vcmpltw( FloatRegister fc, FloatRegister fa, int lit ) ++ { emit_sw2_long( op_vcmpltw_l | is_fa(fa) | is_lit(lit) | is_fc(fc) ); } ++ inline void Assembler::vcmpulew( FloatRegister fc, FloatRegister fa, FloatRegister fb ) ++ { emit_sw2_long( op_vcmpulew | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vcmpulew( FloatRegister fc, FloatRegister fa, int lit ) ++ { emit_sw2_long( op_vcmpulew_l | is_fa(fa) | is_lit(lit) | is_fc(fc) ); } ++ inline void Assembler::vcmpultw( FloatRegister fc, FloatRegister fa, FloatRegister fb ) ++ { emit_sw2_long( op_vcmpultw | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vcmpultw( FloatRegister fc, FloatRegister fa, int lit ) ++ { emit_sw2_long( op_vcmpultw_l | is_fa(fa) | is_lit(lit) | is_fc(fc) ); } ++ ++ inline void Assembler::vsllw( FloatRegister fc, FloatRegister fa, FloatRegister fb ) ++ { emit_sw2_long( op_vsllw | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vsllw( FloatRegister fc, FloatRegister fa, int lit ) ++ { emit_sw2_long( op_vsllw_l | is_fa(fa) | is_lit(lit) | is_fc(fc) ); } ++ inline void Assembler::vsrlw( FloatRegister fc, FloatRegister fa, FloatRegister fb ) ++ { emit_sw2_long( op_vsrlw | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vsrlw( FloatRegister fc, FloatRegister fa, int lit ) ++ { emit_sw2_long( op_vsrlw_l | is_fa(fa) | is_lit(lit) | is_fc(fc) ); } ++ inline void Assembler::vsraw( FloatRegister fc, FloatRegister fa, FloatRegister fb ) ++ { emit_sw2_long( op_vsraw | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vsraw( FloatRegister fc, FloatRegister fa, int lit ) ++ { emit_sw2_long( op_vsraw_l | is_fa(fa) | is_lit(lit) | is_fc(fc) ); } ++ inline void Assembler::vrolw( FloatRegister fc, FloatRegister fa, FloatRegister fb ) ++ { emit_sw2_long( op_vrolw | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vrolw( FloatRegister fc, FloatRegister fa, int lit ) ++ { emit_sw2_long( op_vrolw_l | is_fa(fa) | is_lit(lit) | is_fc(fc) ); } ++ inline void Assembler::sllow( FloatRegister fc, FloatRegister fa, FloatRegister fb ) ++ { emit_sw2_long( op_sllow | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::sllow( FloatRegister fc, FloatRegister fa, int lit ) ++ { emit_sw2_long( op_sllow_l | is_fa(fa) | is_lit(lit) | is_fc(fc) ); } ++ inline void Assembler::srlow( FloatRegister fc, FloatRegister fa, FloatRegister fb ) ++ { emit_sw2_long( op_srlow | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::srlow( FloatRegister fc, FloatRegister fa, int lit ) ++ { emit_sw2_long( op_srlow_l | is_fa(fa) | is_lit(lit) | is_fc(fc) ); } ++ ++ inline void Assembler::vaddl( FloatRegister fc, FloatRegister fa, FloatRegister fb ) ++ { emit_sw2_long( op_vaddl | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vaddl( FloatRegister fc, FloatRegister fa, int lit ) ++ { emit_sw2_long( op_vaddl_l | is_fa(fa) | is_lit(lit) | is_fc(fc) ); } ++ inline void Assembler::vsubl( FloatRegister fc, FloatRegister fa, FloatRegister fb ) ++ { emit_sw2_long( op_vsubl | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vsubl( FloatRegister fc, FloatRegister fa, int lit ) ++ { emit_sw2_long( op_vsubl_l | is_fa(fa) | is_lit(lit) | is_fc(fc) ); } ++ ++ ++ inline void Assembler::ctpopow( FloatRegister fc, FloatRegister fa ) ++ { emit_sw2_long( op_ctpopow | is_fa(fa) | is_fc(fc) ); } ++ inline void Assembler::ctlzow( FloatRegister fc, FloatRegister fa ) ++ { emit_sw2_long( op_ctlzow | is_fa(fa) | is_fc(fc) ); } ++ ++ inline void Assembler::vucaddw( FloatRegister fc, FloatRegister fa, FloatRegister fb ) ++ { emit_sw2_long( op_vucaddw | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vucaddw( FloatRegister fc, FloatRegister fa, int lit ) ++ { emit_sw2_long( op_vucaddw_l | is_fa(fa) | is_lit(lit) | is_fc(fc) ); } ++ inline void Assembler::vucsubw( FloatRegister fc, FloatRegister fa, FloatRegister fb ) ++ { emit_sw2_long( op_vucsubw | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vucsubw( FloatRegister fc, FloatRegister fa, int lit ) ++ { emit_sw2_long( op_vucsubw_l | is_fa(fa) | is_lit(lit) | is_fc(fc) ); } ++ inline void Assembler::vucaddh( FloatRegister fc, FloatRegister fa, FloatRegister fb ) ++ { emit_sw2_long( op_vucaddh | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vucaddh( FloatRegister fc, FloatRegister fa, int lit ) ++ { emit_sw2_long( op_vucaddh_l | is_fa(fa) | is_lit(lit) | is_fc(fc) ); } ++ inline void Assembler::vucsubh( FloatRegister fc, FloatRegister fa, FloatRegister fb ) ++ { emit_sw2_long( op_vucsubh | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vucsubh( FloatRegister fc, FloatRegister fa, int lit ) ++ { emit_sw2_long( op_vucsubh_l | is_fa(fa) | is_lit(lit) | is_fc(fc) ); } ++ inline void Assembler::vucaddb( FloatRegister fc, FloatRegister fa, FloatRegister fb ) ++ { emit_sw2_long( op_vucaddb | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vucaddb( FloatRegister fc, FloatRegister fa, int lit ) ++ { emit_sw2_long( op_vucaddb_l | is_fa(fa) | is_lit(lit) | is_fc(fc) ); } ++ inline void Assembler::vucsubb( FloatRegister fc, FloatRegister fa, FloatRegister fb ) ++ { emit_sw2_long( op_vucsubb | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vucsubb( FloatRegister fc, FloatRegister fa, int lit ) ++ { emit_sw2_long( op_vucsubb_l | is_fa(fa) | is_lit(lit) | is_fc(fc) ); } ++ ++ inline void Assembler::vseleqw( FloatRegister fa,FloatRegister fb,FloatRegister f3, FloatRegister fc ) ++ { emit_sw2_long( op_vseleqw | is_fa(fa) | is_fb(fb) | is_f3(f3) | is_fc(fc) ); } ++ inline void Assembler::vseleqw( FloatRegister fa,FloatRegister fb, int fmalit, FloatRegister fc ) ++ { emit_sw2_long( op_vseleqw_l | is_fa(fa) | is_fb(fb) | is_fmalit(fmalit) | is_fc(fc) ); } ++ inline void Assembler::vsellbcw( FloatRegister fa,FloatRegister fb,FloatRegister f3, FloatRegister fc ) ++ { emit_sw2_long( op_vsellbcw | is_fa(fa) | is_fb(fb) | is_f3(f3) | is_fc(fc) ); } ++ inline void Assembler::vsellbcw( FloatRegister fa,FloatRegister fb, int fmalit, FloatRegister fc ) ++ { emit_sw2_long( op_vsellbcw_l | is_fa(fa) | is_fb(fb) | is_fmalit(fmalit) | is_fc(fc) ); } ++ inline void Assembler::vselltw( FloatRegister fa,FloatRegister fb,FloatRegister f3, FloatRegister fc ) ++ { emit_sw2_long( op_vselltw | is_fa(fa) | is_fb(fb) | is_f3(f3) | is_fc(fc) ); } ++ inline void Assembler::vselltw( FloatRegister fa,FloatRegister fb, int fmalit, FloatRegister fc ) ++ { emit_sw2_long( op_vselltw_l | is_fa(fa) | is_fb(fb) | is_fmalit(fmalit) | is_fc(fc) ); } ++ inline void Assembler::vsellew( FloatRegister fa,FloatRegister fb,FloatRegister f3, FloatRegister fc ) ++ { emit_sw2_long( op_vsellew | is_fa(fa) | is_fb(fb) | is_f3(f3) | is_fc(fc) ); } ++ inline void Assembler::vsellew( FloatRegister fa,FloatRegister fb, int fmalit, FloatRegister fc ) ++ { emit_sw2_long( op_vsellew_l | is_fa(fa) | is_fb(fb) | is_fmalit(fmalit) | is_fc(fc) ); } ++ ++ inline void Assembler::vadds( FloatRegister fd, FloatRegister fs, FloatRegister ft ) ++ { emit_sw2_long( op_vadds | is_fa(fs) | is_fb(ft) | is_fc(fd) ); } ++ inline void Assembler::vaddd( FloatRegister fd, FloatRegister fs, FloatRegister ft ) ++ { emit_sw2_long( op_vaddd | is_fa(fs) | is_fb(ft) | is_fc(fd) ); } ++ inline void Assembler::vsubs( FloatRegister fd, FloatRegister fs, FloatRegister ft ) ++ { emit_sw2_long( op_vsubs | is_fa(fs) | is_fb(ft) | is_fc(fd) ); } ++ inline void Assembler::vsubd( FloatRegister fd, FloatRegister fs, FloatRegister ft ) ++ { emit_sw2_long( op_vsubd | is_fa(fs) | is_fb(ft) | is_fc(fd) ); } ++ inline void Assembler::vmuls( FloatRegister fd, FloatRegister fs, FloatRegister ft ) ++ { emit_sw2_long( op_vmuls | is_fa(fs) | is_fb(ft) | is_fc(fd) ); } ++ inline void Assembler::vmuld( FloatRegister fd, FloatRegister fs, FloatRegister ft ) ++ { emit_sw2_long( op_vmuld | is_fa(fs) | is_fb(ft) | is_fc(fd) ); } ++ inline void Assembler::vdivs( FloatRegister fd, FloatRegister fs, FloatRegister ft ) ++ { emit_sw2_long( op_vdivs | is_fa(fs) | is_fb(ft) | is_fc(fd) ); } ++ inline void Assembler::vdivd( FloatRegister fd, FloatRegister fs, FloatRegister ft ) ++ { emit_sw2_long( op_vdivd | is_fa(fs) | is_fb(ft) | is_fc(fd) ); } ++ inline void Assembler::vsqrts( FloatRegister fc, FloatRegister fb ) ++ { emit_sw2_long( op_vsqrts | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vsqrtd( FloatRegister fc, FloatRegister fb ) ++ { emit_sw2_long( op_vsqrtd | is_fb(fb) | is_fc(fc) ); } ++ ++ inline void Assembler::vfcmpeq( FloatRegister fc, FloatRegister fa, FloatRegister fb ) ++ { emit_sw2_long( op_vfcmpeq | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vfcmple( FloatRegister fc, FloatRegister fa, FloatRegister fb ) ++ { emit_sw2_long( op_vfcmple | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vfcmplt( FloatRegister fc, FloatRegister fa, FloatRegister fb ) ++ { emit_sw2_long( op_vfcmplt | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vfcmpun( FloatRegister fc, FloatRegister fa, FloatRegister fb ) ++ { emit_sw2_long( op_vfcmpun | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vcpys( FloatRegister fc, FloatRegister fa, FloatRegister fb ) ++ { emit_sw2_long( op_vcpys | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vfmov( FloatRegister fc, FloatRegister fa ) ++ { emit_sw2_long( op_vcpys | is_fa(fa) | is_fb(fa) | is_fc(fc) ); } ++ inline void Assembler::vcpyse( FloatRegister fc, FloatRegister fa, FloatRegister fb ) ++ { emit_sw2_long( op_vcpyse | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vcpysn( FloatRegister fc, FloatRegister fa, FloatRegister fb ) ++ { emit_sw2_long( op_vcpysn | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ ++ inline void Assembler::vmas( FloatRegister fa,FloatRegister fb,FloatRegister f3, FloatRegister fc ) ++ { emit_sw2_long( op_vmas | is_fa(fa) | is_fb(fb) | is_f3(f3) | is_fc(fc) ); } ++ inline void Assembler::vmad( FloatRegister fa,FloatRegister fb,FloatRegister f3, FloatRegister fc ) ++ { emit_sw2_long( op_vmad | is_fa(fa) | is_fb(fb) | is_f3(f3) | is_fc(fc) ); } ++ inline void Assembler::vmss( FloatRegister fa,FloatRegister fb,FloatRegister f3, FloatRegister fc ) ++ { emit_sw2_long( op_vmss | is_fa(fa) | is_fb(fb) | is_f3(f3) | is_fc(fc) ); } ++ inline void Assembler::vmsd( FloatRegister fa,FloatRegister fb,FloatRegister f3, FloatRegister fc ) ++ { emit_sw2_long( op_vmsd | is_fa(fa) | is_fb(fb) | is_f3(f3) | is_fc(fc) ); } ++ inline void Assembler::vnmas( FloatRegister fa,FloatRegister fb,FloatRegister f3, FloatRegister fc ) ++ { emit_sw2_long( op_vnmas | is_fa(fa) | is_fb(fb) | is_f3(f3) | is_fc(fc) ); } ++ inline void Assembler::vnmad( FloatRegister fa,FloatRegister fb,FloatRegister f3, FloatRegister fc ) ++ { emit_sw2_long( op_vnmad | is_fa(fa) | is_fb(fb) | is_f3(f3) | is_fc(fc) ); } ++ inline void Assembler::vnmss( FloatRegister fa,FloatRegister fb,FloatRegister f3, FloatRegister fc ) ++ { emit_sw2_long( op_vnmss | is_fa(fa) | is_fb(fb) | is_f3(f3) | is_fc(fc) ); } ++ inline void Assembler::vnmsd( FloatRegister fa,FloatRegister fb,FloatRegister f3, FloatRegister fc ) ++ { emit_sw2_long( op_vnmsd | is_fa(fa) | is_fb(fb) | is_f3(f3) | is_fc(fc) ); } ++ ++ inline void Assembler::vfseleq( FloatRegister fa,FloatRegister fb,FloatRegister f3, FloatRegister fc ) ++ { emit_sw2_long( op_vfseleq | is_fa(fa) | is_fb(fb) | is_f3(f3) | is_fc(fc) ); } ++ inline void Assembler::vfsellt( FloatRegister fa,FloatRegister fb,FloatRegister f3, FloatRegister fc ) ++ { emit_sw2_long( op_vfsellt | is_fa(fa) | is_fb(fb) | is_f3(f3) | is_fc(fc) ); } ++ inline void Assembler::vfselle( FloatRegister fa,FloatRegister fb,FloatRegister f3, FloatRegister fc ) ++ { emit_sw2_long( op_vfselle | is_fa(fa) | is_fb(fb) | is_f3(f3) | is_fc(fc) ); } ++ ++ inline void Assembler::vinsw( FloatRegister fa,FloatRegister fb, int fmalit, FloatRegister fc ) ++ { emit_sw2_long( op_vinsw_l | is_fa(fa) | is_fb(fb) | is_fmalit(fmalit) | is_fc(fc) ); } ++ inline void Assembler::vinsf( FloatRegister fa,FloatRegister fb, int fmalit, FloatRegister fc ) ++ { emit_sw2_long( op_vinsf_l | is_fa(fa) | is_fb(fb) | is_fmalit(fmalit) | is_fc(fc) ); } ++ inline void Assembler::vextw( FloatRegister fa, int fmalit, FloatRegister fc) ++ { emit_sw2_long( op_vextw_l | is_fa(fa) | is_fmalit(fmalit) | is_fc(fc) ); } ++ inline void Assembler::vextf( FloatRegister fa, int fmalit, FloatRegister fc) ++ { emit_sw2_long( op_vextf_l | is_fa(fa) | is_fmalit(fmalit) | is_fc(fc) ); } ++ inline void Assembler::vcpyw( FloatRegister fa, FloatRegister fc) ++ { emit_sw2_long( op_vcpyw | is_fa(fa) | is_fc(fc) ); } ++ inline void Assembler::vcpyf( FloatRegister fa, FloatRegister fc) ++ { emit_sw2_long( op_vcpyf | is_fa(fa) | is_fc(fc) ); } ++ inline void Assembler::vconw( FloatRegister fa,FloatRegister fb,FloatRegister f3, FloatRegister fc ) ++ { emit_sw2_long( op_vconw | is_fa(fa) | is_fb(fb) | is_f3(f3) | is_fc(fc) ); } ++ inline void Assembler::vshfw( FloatRegister fa,FloatRegister fb,FloatRegister f3, FloatRegister fc ) ++ { emit_sw2_long( op_vshfw | is_fa(fa) | is_fb(fb) | is_f3(f3) | is_fc(fc) ); } ++ inline void Assembler::vcons( FloatRegister fa,FloatRegister fb,FloatRegister f3, FloatRegister fc ) ++ { emit_sw2_long( op_vcons | is_fa(fa) | is_fb(fb) | is_f3(f3) | is_fc(fc) ); } ++ inline void Assembler::vcond( FloatRegister fa,FloatRegister fb,FloatRegister f3, FloatRegister fc ) ++ { emit_sw2_long( op_vcond | is_fa(fa) | is_fb(fb) | is_f3(f3) | is_fc(fc) ); } ++ inline void Assembler::vldw_u( FloatRegister fa, Register rb, int atmdisp )//load&store ++ { emit_sw2_long( op_vldw_u | is_fa(fa) | is_atmdisp(atmdisp) | is_rb(rb) ); } ++ inline void Assembler::vstw_u( FloatRegister fa, Register rb, int atmdisp ) ++ { emit_sw2_long( op_vstw_u | is_fa(fa) | is_atmdisp(atmdisp) | is_rb(rb) ); } ++ inline void Assembler::vlds_u( FloatRegister fa, Register rb, int atmdisp ) ++ { emit_sw2_long( op_vsts_u | is_fa(fa) | is_atmdisp(atmdisp) | is_rb(rb) ); } ++ inline void Assembler::vsts_u( FloatRegister fa, Register rb, int atmdisp ) ++ { emit_sw2_long( op_vsts_u | is_fa(fa) | is_atmdisp(atmdisp) | is_rb(rb) ); } ++ inline void Assembler::vldd_u( FloatRegister fa, Register rb, int atmdisp ) ++ { emit_sw2_long( op_vldd_u | is_fa(fa) | is_atmdisp(atmdisp) | is_rb(rb) ); } ++ inline void Assembler::vstd_u( FloatRegister fa, Register rb, int atmdisp ) ++ { emit_sw2_long( op_vstd_u | is_fa(fa) | is_atmdisp(atmdisp) | is_rb(rb) ); } ++ inline void Assembler::vstw_ul( FloatRegister fa, Register rb, int atmdisp ) ++ { emit_sw2_long( op_vstw_ul | is_fa(fa) | is_atmdisp(atmdisp) | is_rb(rb) ); } ++ inline void Assembler::vstw_uh( FloatRegister fa, Register rb, int atmdisp ) ++ { emit_sw2_long( op_vstw_uh | is_fa(fa) | is_atmdisp(atmdisp) | is_rb(rb) ); } ++ inline void Assembler::vsts_ul( FloatRegister fa, Register rb, int atmdisp ) ++ { emit_sw2_long( op_vsts_ul | is_fa(fa) | is_atmdisp(atmdisp) | is_rb(rb) ); } ++ inline void Assembler::vsts_uh( FloatRegister fa, Register rb, int atmdisp ) ++ { emit_sw2_long( op_vsts_uh | is_fa(fa) | is_atmdisp(atmdisp) | is_rb(rb) ); } ++ inline void Assembler::vstd_ul( FloatRegister fa, Register rb, int atmdisp ) ++ { emit_sw2_long( op_vstd_ul | is_fa(fa) | is_atmdisp(atmdisp) | is_rb(rb) ); } ++ inline void Assembler::vstd_uh( FloatRegister fa, Register rb, int atmdisp ) ++ { emit_sw2_long( op_vstd_uh | is_fa(fa) | is_atmdisp(atmdisp) | is_rb(rb) ); } ++ ++ inline void Assembler::vlog( int vlog ,FloatRegister fa,FloatRegister fb,FloatRegister f3, FloatRegister fc ) ++ { emit_sw2_long( op_vlog | is_vlog_h(vlog) | is_vlog_l(vlog) | is_fa(fa) | is_fb(fb) | is_f3(f3) | is_fc(fc) ); } ++ inline void Assembler::vbisw( FloatRegister fc , FloatRegister fa , FloatRegister fb ) ++ { emit_sw2_long( op_vbisw | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vxorw( FloatRegister fc , FloatRegister fa , FloatRegister fb ) ++ { emit_sw2_long( op_vxorw | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vandw( FloatRegister fc , FloatRegister fa , FloatRegister fb ) ++ { emit_sw2_long( op_vandw | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::veqvw( FloatRegister fc , FloatRegister fa , FloatRegister fb ) ++ { emit_sw2_long( op_veqvw | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vornotw( FloatRegister fc , FloatRegister fa , FloatRegister fb ) ++ { emit_sw2_long( op_vornotw | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vbicw( FloatRegister fc , FloatRegister fa , FloatRegister fb ) ++ { emit_sw2_long( op_vbicw | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ ++ ++ // SW8A instructions ++ inline void Assembler::imemb( void ) ++ { sw3_only(); emit_sw2_long( op_imemb); } ++ inline void Assembler::wmemb( void ) ++ { sw4_only(); emit_sw2_long( op_wmemb); } ++ inline void Assembler::csrws( Register ra, int rpiindex ) ++ { sw4_only(); emit_sw2_long( op_csrws | is_ra(ra) | is_rpiindex(rpiindex) ); } ++ inline void Assembler::csrwc( Register ra, int rpiindex ) ++ { sw4_only(); emit_sw2_long( op_csrwc | is_ra(ra) | is_rpiindex(rpiindex) ); } ++ inline void Assembler::divw( Register ra, Register rb, Register rc ) ++ { sw4_only(); emit_sw2_long( op_divw | is_ra(ra) | is_rb(rb) | is_rc(rc) ); } ++ inline void Assembler::udivw( Register ra, Register rb, Register rc ) ++ { sw4_only(); emit_sw2_long( op_udivw | is_ra(ra) | is_rb(rb) | is_rc(rc) ); } ++ inline void Assembler::remw( Register ra, Register rb, Register rc ) ++ { sw4_only(); emit_sw2_long( op_remw | is_ra(ra) | is_rb(rb) | is_rc(rc) ); } ++ inline void Assembler::uremw( Register ra, Register rb, Register rc ) ++ { sw4_only(); emit_sw2_long( op_uremw | is_ra(ra) | is_rb(rb) | is_rc(rc) ); } ++ inline void Assembler::divl( Register ra, Register rb, Register rc ) ++ { sw4_only(); emit_sw2_long( op_divl | is_ra(ra) | is_rb(rb) | is_rc(rc) ); } ++ inline void Assembler::udivl( Register ra, Register rb, Register rc ) ++ { sw4_only(); emit_sw2_long( op_udivl | is_ra(ra) | is_rb(rb) | is_rc(rc) ); } ++ inline void Assembler::reml( Register ra, Register rb, Register rc ) ++ { sw4_only(); emit_sw2_long( op_reml | is_ra(ra) | is_rb(rb) | is_rc(rc) ); } ++ inline void Assembler::ureml( Register ra, Register rb, Register rc ) ++ { sw4_only(); emit_sw2_long( op_ureml | is_ra(ra) | is_rb(rb) | is_rc(rc) ); } ++ ++ inline void Assembler::addpi( int apint, Register rc ) ++ { sw4_only(); emit_sw2_long( op_addpi | is_apint(apint) | is_rc(rc) ); } ++ inline void Assembler::addpis( int apint, Register rc ) ++ { sw4_only(); emit_sw2_long( op_addpis | is_apint(apint) | is_rc(rc) ); } ++ inline void Assembler::sbt( Register ra, Register rb, Register rc ) ++ { sw4_only(); emit_sw2_long( op_sbt | is_ra(ra) | is_rb(rb) | is_rc(rc) ); } ++ inline void Assembler::sbt( Register ra, int lit, Register rc ) ++ { sw4_only(); emit_sw2_long( op_sbt_l | is_ra(ra) | is_lit(lit) | is_rc(rc) ); } ++ inline void Assembler::cbt( Register ra, Register rb, Register rc ) ++ { sw4_only(); emit_sw2_long( op_cbt | is_ra(ra) | is_rb(rb) | is_rc(rc) ); } ++ inline void Assembler::cbt( Register ra, int lit, Register rc ) ++ { sw4_only(); emit_sw2_long( op_cbt_l | is_ra(ra) | is_lit(lit) | is_rc(rc) ); } ++ ++ inline void Assembler::roll( Register ra, Register rb, Register rc ) ++ { sw4_only(); emit_sw2_long( op_roll | is_ra(ra) | is_rb(rb) | is_rc(rc) ); } ++ inline void Assembler::roll( Register ra, int lit, Register rc ) ++ { sw4_only(); emit_sw2_long( op_roll_l | is_ra(ra) | is_lit(lit) | is_rc(rc) ); } ++ inline void Assembler::sllw( Register ra, Register rb, Register rc ) ++ { sw4_only(); emit_sw2_long( op_sllw | is_ra(ra) | is_rb(rb) | is_rc(rc) ); } ++ inline void Assembler::sllw( Register ra, int lit, Register rc ) ++ { sw4_only(); emit_sw2_long( op_sllw_l | is_ra(ra) | is_lit(lit) | is_rc(rc) ); } ++ inline void Assembler::srlw( Register ra, Register rb, Register rc ) ++ { sw4_only(); emit_sw2_long( op_srlw | is_ra(ra) | is_rb(rb) | is_rc(rc) ); } ++ inline void Assembler::srlw( Register ra, int lit, Register rc ) ++ { sw4_only(); emit_sw2_long( op_srlw_l | is_ra(ra) | is_lit(lit) | is_rc(rc) ); } ++ inline void Assembler::sraw( Register ra, Register rb, Register rc ) ++ { sw4_only(); emit_sw2_long( op_sraw | is_ra(ra) | is_rb(rb) | is_rc(rc) ); } ++ inline void Assembler::sraw( Register ra, int lit, Register rc ) ++ { sw4_only(); emit_sw2_long( op_sraw_l | is_ra(ra) | is_lit(lit) | is_rc(rc) ); } ++ inline void Assembler::rolw( Register ra, Register rb, Register rc ) ++ { sw4_only(); emit_sw2_long( op_rolw | is_ra(ra) | is_rb(rb) | is_rc(rc) ); } ++ inline void Assembler::rolw( Register ra, int lit, Register rc ) ++ { sw4_only(); emit_sw2_long( op_rolw_l | is_ra(ra) | is_lit(lit) | is_rc(rc) ); } ++ ++ inline void Assembler::crc32b( Register rd, Register rs, Register rt ) ++ { sw4_only(); emit_sw2_long( op_crc32b | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::crc32h( Register rd, Register rs, Register rt ) ++ { sw4_only(); emit_sw2_long( op_crc32h | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::crc32w( Register rd, Register rs, Register rt ) ++ { sw4_only(); emit_sw2_long( op_crc32w | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::crc32l( Register rd, Register rs, Register rt ) ++ { sw4_only(); emit_sw2_long( op_crc32l | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::crc32cb( Register rd, Register rs, Register rt ) ++ { sw4_only(); emit_sw2_long( op_crc32cb | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::crc32ch( Register rd, Register rs, Register rt ) ++ { sw4_only(); emit_sw2_long( op_crc32ch | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::crc32cw( Register rd, Register rs, Register rt ) ++ { sw4_only(); emit_sw2_long( op_crc32cw | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ inline void Assembler::crc32cl( Register rd, Register rs, Register rt ) ++ { sw4_only(); emit_sw2_long( op_crc32cl | is_ra(rs) | is_rb(rt) | is_rc(rd) ); } ++ ++ inline void Assembler::revbh( Register rb, Register rc ) ++ { sw4_only(); emit_sw2_long( op_revbh | is_rb(rb) | is_rc(rc) ); } ++ inline void Assembler::revbw( Register rb, Register rc ) ++ { sw4_only(); emit_sw2_long( op_revbw | is_rb(rb) | is_rc(rc) ); } ++ inline void Assembler::revbl( Register rb, Register rc ) ++ { sw4_only(); emit_sw2_long( op_revbl | is_rb(rb) | is_rc(rc) ); } ++ inline void Assembler::casw( Register ra, Register rb, Register rc ) ++ { sw4_only(); emit_sw2_long( op_casw | is_ra(ra) | is_rb(rb) | is_rc(rc) ); } ++ inline void Assembler::casl( Register ra, Register rb, Register rc ) ++ { sw4_only(); emit_sw2_long( op_casl | is_ra(ra) | is_rb(rb) | is_rc(rc) ); } ++ inline void Assembler::frecs( FloatRegister fa, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_frecs | is_fa(fa) | is_fc(fc) ); } ++ inline void Assembler::frecd( FloatRegister fa, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_frecd | is_fa(fa) | is_fc(fc) ); } ++ inline void Assembler::fris( FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_fris | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::fris_g( FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_fris_g | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::fris_p( FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_fris_p | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::fris_z( FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_fris_z | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::fris_n( FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_fris_n | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::frid( FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_frid | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::frid_g( FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_frid_g | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::frid_p( FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_frid_p | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::frid_z( FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_frid_z | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::frid_n( FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_frid_n | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vsllb( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vsllb | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vsllb( FloatRegister fa, int lit, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vsllb_l | is_fa(fa) | is_lit(lit) | is_fc(fc) ); } ++ inline void Assembler::vsrlb( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vsrlb | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vsrlb( FloatRegister fa, int lit, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vsrlb_l | is_fa(fa) | is_lit(lit) | is_fc(fc) ); } ++ inline void Assembler::vsrab( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vsrab | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vsrab( FloatRegister fa, int lit, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vsrab_l | is_fa(fa) | is_lit(lit) | is_fc(fc) ); } ++ inline void Assembler::vrolb( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vrolb | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vrolb( FloatRegister fa, int lit, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vrolb_l | is_fa(fa) | is_lit(lit) | is_fc(fc) ); } ++ inline void Assembler::vsllh( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vsllh | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vsllh( FloatRegister fa, int lit, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vsllh_l | is_fa(fa) | is_lit(lit) | is_fc(fc) ); } ++ inline void Assembler::vsrlh( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vsrlh | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vsrlh( FloatRegister fa, int lit, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vsrlh_l | is_fa(fa) | is_lit(lit) | is_fc(fc) ); } ++ inline void Assembler::vsrah( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vsrah | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vsrah( FloatRegister fa, int lit, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vsrah_l | is_fa(fa) | is_lit(lit) | is_fc(fc) ); } ++ inline void Assembler::vrolh( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vrolh | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vrolh( FloatRegister fa, int lit, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vrolh_l | is_fa(fa) | is_lit(lit) | is_fc(fc) ); } ++ ++ inline void Assembler::vslll( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vslll | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vslll( FloatRegister fa, int lit, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vslll_l | is_fa(fa) | is_lit(lit) | is_fc(fc) ); } ++ inline void Assembler::vsrll( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vsrll | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vsrll( FloatRegister fa, int lit, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vsrll_l | is_fa(fa) | is_lit(lit) | is_fc(fc) ); } ++ inline void Assembler::vsral( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vsral | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vsral( FloatRegister fa, int lit, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vsral_l | is_fa(fa) | is_lit(lit) | is_fc(fc) ); } ++ inline void Assembler::vroll( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vroll | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vroll( FloatRegister fa, int lit, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vroll_l | is_fa(fa) | is_lit(lit) | is_fc(fc) ); } ++ inline void Assembler::vmaxb( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vmaxb | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vminb( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vminb | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::sraow( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_sraow | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::sraow( FloatRegister fa, int lit, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long(op_sraow_l | is_fa(fa) | is_lit(lit) | is_fc(fc) ); } ++ inline void Assembler::vsumw( FloatRegister fa, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vsumw | is_fa(fa) | is_fc(fc) ); } ++ inline void Assembler::vsuml( FloatRegister fa, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vsuml | is_fa(fa) | is_fc(fc) ); } ++ inline void Assembler::vcmpueqb( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vcmpueqb | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vcmpueqb( FloatRegister fa, int lit, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vcmpueqb_l | is_fa(fa) | is_lit(lit) | is_fc(fc) ); } ++ inline void Assembler::vcmpugtb( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vcmpugtb | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vcmpugtb( FloatRegister fa, int lit, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vcmpugtb_l | is_fa(fa) | is_lit(lit) | is_fc(fc) ); } ++ inline void Assembler::vmaxh( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vmaxh | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vminh( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vminh | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vmaxw( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vmaxw | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vminw( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vminw | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vmaxl( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vmaxl | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vminl( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vminl | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vsm3msw( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vsm3msw | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vsm4key( FloatRegister fa, int lit, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vsm4key_l | is_fa(fa) | is_lit(lit) | is_fc(fc) ); } ++ inline void Assembler::vsm4r( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vsm4r | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vbinvw( FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vbinvw | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vumaxb( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vumaxb | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vuminb( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vuminb | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vumaxh( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vumaxh | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vuminh( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vuminh | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vumaxw( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vumaxw | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vuminw( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vuminw | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vumaxl( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vumaxl | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vuminl( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vuminl | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vsums( FloatRegister fa, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vsums | is_fa(fa) | is_fc(fc) ); } ++ inline void Assembler::vsumd( FloatRegister fa, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vsumd | is_fa(fa) | is_fc(fc) ); } ++ inline void Assembler::vfcvtsd( FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vfcvtsd | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vfcvtds( FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vfcvtds | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vfcvtls( FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vfcvtls | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vfcvtld( FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vfcvtld | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vfcvtdl( FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vfcvtdl | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vfcvtdl_g( FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vfcvtdl_g | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vfcvtdl_p( FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vfcvtdl_p | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vfcvtdl_z( FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vfcvtdl_z | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vfcvtdl_n( FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vfcvtdl_n | is_fb(fb) | is_fc(fc) ); } ++ ++ inline void Assembler::vfris( FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vfris | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vfris_g( FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vfris_g | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vfris_p( FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vfris_p | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vfris_z( FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vfris_z | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vfris_n( FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vfris_n | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vfrid( FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vfrid | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vfrid_g( FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vfrid_g | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vfrid_p( FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vfrid_p | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vfrid_z( FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vfrid_z | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vfrid_n( FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vfrid_n | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vfrecs( FloatRegister fa, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vfrecs | is_fa(fa) | is_fc(fc) ); } ++ inline void Assembler::vfrecd( FloatRegister fa, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vfrecd | is_fa(fa) | is_fc(fc) ); } ++ inline void Assembler::vmaxs( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vmaxs | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vmins( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vmins | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vmaxd( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vmaxd | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vmind( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vmind | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ ++ inline void Assembler::vinsb( FloatRegister fa,FloatRegister fb, int fmalit, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vinsb_l | is_fa(fa) | is_fb(fb) | is_fmalit(fmalit) | is_fc(fc) ); } ++ inline void Assembler::vinsh( FloatRegister fa,FloatRegister fb, int fmalit, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vinsh_l | is_fa(fa) | is_fb(fb) | is_fmalit(fmalit) | is_fc(fc) ); } ++ inline void Assembler::vinsectlh( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vinsectlh | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vinsectlw( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vinsectlw | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vinsectll( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vinsectll | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vinsectlb( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vinsectlb | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vshfq( FloatRegister fa,FloatRegister fb, int fmalit, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vshfq_l | is_fa(fa) | is_fb(fb) | is_fmalit(fmalit) | is_fc(fc) ); } ++ inline void Assembler::vshfqb( FloatRegister fa, FloatRegister fb, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vshfqb | is_fa(fa) | is_fb(fb) | is_fc(fc) ); } ++ inline void Assembler::vcpyb( FloatRegister fa, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vcpyb | is_fa(fa) | is_fc(fc) ); } ++ inline void Assembler::vcpyh( FloatRegister fa, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vcpyh | is_fa(fa) | is_fc(fc) ); } ++ inline void Assembler::vsm3r( FloatRegister fa,FloatRegister fb, int fmalit, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vsm3r_l | is_fa(fa) | is_fb(fb) | is_fmalit(fmalit) | is_fc(fc) ); } ++ inline void Assembler::vfcvtsh( FloatRegister fa,FloatRegister fb, int fmalit, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vfcvtsh_l | is_fa(fa) | is_fb(fb) | is_fmalit(fmalit) | is_fc(fc) ); } ++ inline void Assembler::vfcvths( FloatRegister fa,FloatRegister fb, int fmalit, FloatRegister fc ) ++ { sw4_only(); emit_sw2_long( op_vfcvths_l | is_fa(fa) | is_fb(fb) | is_fmalit(fmalit) | is_fc(fc) ); } ++ inline void Assembler::lbr( int palfn ) ++ { sw4_only(); emit_sw2_long( op_lbr | is_palfn(palfn) ); } ++ ++ inline void Assembler::ldbu_a( Register ra, int atmdisp, Register rb ) ++ { sw4_only(); emit_sw2_long( op_ldbu_a | is_ra(ra) | is_atmdisp(atmdisp) | is_rb(rb) ); } ++ inline void Assembler::ldhu_a( Register ra, int atmdisp, Register rb ) ++ { sw4_only(); emit_sw2_long( op_ldhu_a | is_ra(ra) | is_atmdisp(atmdisp) | is_rb(rb) ); } ++ inline void Assembler::ldw_a( Register ra, int atmdisp, Register rb ) ++ { sw4_only(); emit_sw2_long( op_ldw_a | is_ra(ra) | is_atmdisp(atmdisp) | is_rb(rb) ); } ++ inline void Assembler::ldl_a( Register ra, int atmdisp, Register rb ) ++ { sw4_only(); emit_sw2_long( op_ldl_a | is_ra(ra) | is_atmdisp(atmdisp) | is_rb(rb) ); } ++ inline void Assembler::stb_a( Register ra, int atmdisp, Register rb ) ++ { sw4_only(); emit_sw2_long( op_stb_a | is_ra(ra) | is_atmdisp(atmdisp) | is_rb(rb) ); } ++ inline void Assembler::sth_a( Register ra, int atmdisp, Register rb ) ++ { sw4_only(); emit_sw2_long( op_sth_a | is_ra(ra) | is_atmdisp(atmdisp) | is_rb(rb) ); } ++ inline void Assembler::stw_a( Register ra, int atmdisp, Register rb ) ++ { sw4_only(); emit_sw2_long( op_stw_a | is_ra(ra) | is_atmdisp(atmdisp) | is_rb(rb) ); } ++ inline void Assembler::stl_a( Register ra, int atmdisp, Register rb ) ++ { sw4_only(); emit_sw2_long( op_stl_a | is_ra(ra) | is_atmdisp(atmdisp) | is_rb(rb) ); } ++ inline void Assembler::flds_a( FloatRegister fa, int atmdisp, Register rb ) ++ { sw4_only(); emit_sw2_long( op_flds_a | is_fa(fa) | is_atmdisp(atmdisp) | is_rb(rb) ); } ++ inline void Assembler::fldd_a( FloatRegister fa, int atmdisp, Register rb ) ++ { sw4_only(); emit_sw2_long( op_fldd_a | is_fa(fa) | is_atmdisp(atmdisp) | is_rb(rb) ); } ++ inline void Assembler::fsts_a( FloatRegister fa, int atmdisp, Register rb ) ++ { sw4_only(); emit_sw2_long( op_fsts_a | is_fa(fa) | is_atmdisp(atmdisp) | is_rb(rb) ); } ++ inline void Assembler::fstd_a( FloatRegister fa, int atmdisp, Register rb ) ++ { sw4_only(); emit_sw2_long( op_fstd_a | is_fa(fa) | is_atmdisp(atmdisp) | is_rb(rb) ); } ++ inline void Assembler::dpfhr( int th, int atmdisp, Register rb ) ++ { sw4_only(); emit_sw2_long( op_dpfhr | is_th(th) | is_atmdisp(atmdisp) | is_rb(rb) ); } ++ inline void Assembler::dpfhw( int th, int atmdisp, Register rb ) ++ { sw4_only(); emit_sw2_long( op_dpfhw | is_th(th) | is_atmdisp(atmdisp) | is_rb(rb) ); } ++ ++ ++ ++ ++ ++ ++ ++ ++#endif // CPU_SW64_VM_ASSEMBLER_SW64_INLINE_HPP +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/bytecodeInterpreter_sw64.cpp afu8u/hotspot/src/cpu/sw64/vm/bytecodeInterpreter_sw64.cpp +--- openjdk/hotspot/src/cpu/sw64/vm/bytecodeInterpreter_sw64.cpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/bytecodeInterpreter_sw64.cpp 2025-05-06 10:53:44.903633666 +0800 +@@ -0,0 +1,47 @@ ++/* ++ * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++#include "asm/assembler.hpp" ++#include "interpreter/bytecodeInterpreter.hpp" ++#include "interpreter/bytecodeInterpreter.inline.hpp" ++#include "interpreter/interpreter.hpp" ++#include "interpreter/interpreterRuntime.hpp" ++#include "oops/methodData.hpp" ++#include "oops/method.hpp" ++#include "oops/oop.inline.hpp" ++#include "prims/jvmtiExport.hpp" ++#include "prims/jvmtiThreadState.hpp" ++#include "runtime/deoptimization.hpp" ++#include "runtime/frame.inline.hpp" ++#include "runtime/sharedRuntime.hpp" ++#include "runtime/stubRoutines.hpp" ++#include "runtime/synchronizer.hpp" ++#include "runtime/vframeArray.hpp" ++#include "utilities/debug.hpp" ++#include "interp_masm_sw64.hpp" ++ ++#ifdef CC_INTERP ++ ++#endif // CC_INTERP (all) +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/bytecodeInterpreter_sw64.hpp afu8u/hotspot/src/cpu/sw64/vm/bytecodeInterpreter_sw64.hpp +--- openjdk/hotspot/src/cpu/sw64/vm/bytecodeInterpreter_sw64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/bytecodeInterpreter_sw64.hpp 2025-05-06 10:53:44.903633666 +0800 +@@ -0,0 +1,110 @@ ++/* ++ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef CPU_SW64_VM_BYTECODEINTERPRETER_SW64_HPP ++#define CPU_SW64_VM_BYTECODEINTERPRETER_SW64_HPP ++ ++// Platform specific for C++ based Interpreter ++#define LOTS_OF_REGS /* Lets interpreter use plenty of registers */ ++ ++private: ++ ++ // save the bottom of the stack after frame manager setup. For ease of restoration after return ++ // from recursive interpreter call ++ intptr_t* _frame_bottom; /* saved bottom of frame manager frame */ ++ intptr_t* _last_Java_pc; /* pc to return to in frame manager */ ++ intptr_t* _sender_sp; /* sender's sp before stack (locals) extension */ ++ interpreterState _self_link; /* Previous interpreter state */ /* sometimes points to self??? */ ++ double _native_fresult; /* save result of native calls that might return floats */ ++ intptr_t _native_lresult; /* save result of native calls that might return handle/longs */ ++ ++public: ++ ++ static void pd_layout_interpreterState(interpreterState istate, address last_Java_pc, intptr_t* last_Java_fp); ++ inline intptr_t* sender_sp() { ++ return _sender_sp; ++ } ++ ++ ++#define SET_LAST_JAVA_FRAME() ++ ++#define RESET_LAST_JAVA_FRAME() THREAD->frame_anchor()->set_flags(0); ++ ++/* ++ * Macros for accessing the stack. ++ */ ++#undef STACK_INT ++#undef STACK_FLOAT ++#undef STACK_ADDR ++#undef STACK_OBJECT ++#undef STACK_DOUBLE ++#undef STACK_LONG ++ ++// JavaStack Implementation ++ ++#define GET_STACK_SLOT(offset) (*((intptr_t*) &topOfStack[-(offset)])) ++#define STACK_SLOT(offset) ((address) &topOfStack[-(offset)]) ++#define STACK_ADDR(offset) (*((address *) &topOfStack[-(offset)])) ++#define STACK_INT(offset) (*((jint*) &topOfStack[-(offset)])) ++#define STACK_FLOAT(offset) (*((jfloat *) &topOfStack[-(offset)])) ++#define STACK_OBJECT(offset) (*((oop *) &topOfStack [-(offset)])) ++#define STACK_DOUBLE(offset) (((VMJavaVal64*) &topOfStack[-(offset)])->d) ++#define STACK_LONG(offset) (((VMJavaVal64 *) &topOfStack[-(offset)])->l) ++ ++#define SET_STACK_SLOT(value, offset) (*(intptr_t*)&topOfStack[-(offset)] = *(intptr_t*)(value)) ++#define SET_STACK_ADDR(value, offset) (*((address *)&topOfStack[-(offset)]) = (value)) ++#define SET_STACK_INT(value, offset) (*((jint *)&topOfStack[-(offset)]) = (value)) ++#define SET_STACK_FLOAT(value, offset) (*((jfloat *)&topOfStack[-(offset)]) = (value)) ++#define SET_STACK_OBJECT(value, offset) (*((oop *)&topOfStack[-(offset)]) = (value)) ++#define SET_STACK_DOUBLE(value, offset) (((VMJavaVal64*)&topOfStack[-(offset)])->d = (value)) ++#define SET_STACK_DOUBLE_FROM_ADDR(addr, offset) (((VMJavaVal64*)&topOfStack[-(offset)])->d = \ ++ ((VMJavaVal64*)(addr))->d) ++#define SET_STACK_LONG(value, offset) (((VMJavaVal64*)&topOfStack[-(offset)])->l = (value)) ++#define SET_STACK_LONG_FROM_ADDR(addr, offset) (((VMJavaVal64*)&topOfStack[-(offset)])->l = \ ++ ((VMJavaVal64*)(addr))->l) ++// JavaLocals implementation ++ ++#define LOCALS_SLOT(offset) ((intptr_t*)&locals[-(offset)]) ++#define LOCALS_ADDR(offset) ((address)locals[-(offset)]) ++#define LOCALS_INT(offset) (*((jint*)&locals[-(offset)])) ++#define LOCALS_FLOAT(offset) (*((jfloat*)&locals[-(offset)])) ++#define LOCALS_OBJECT(offset) (cast_to_oop(locals[-(offset)])) ++#define LOCALS_DOUBLE(offset) (((VMJavaVal64*)&locals[-((offset) + 1)])->d) ++#define LOCALS_LONG(offset) (((VMJavaVal64*)&locals[-((offset) + 1)])->l) ++#define LOCALS_LONG_AT(offset) (((address)&locals[-((offset) + 1)])) ++#define LOCALS_DOUBLE_AT(offset) (((address)&locals[-((offset) + 1)])) ++ ++#define SET_LOCALS_SLOT(value, offset) (*(intptr_t*)&locals[-(offset)] = *(intptr_t *)(value)) ++#define SET_LOCALS_ADDR(value, offset) (*((address *)&locals[-(offset)]) = (value)) ++#define SET_LOCALS_INT(value, offset) (*((jint *)&locals[-(offset)]) = (value)) ++#define SET_LOCALS_FLOAT(value, offset) (*((jfloat *)&locals[-(offset)]) = (value)) ++#define SET_LOCALS_OBJECT(value, offset) (*((oop *)&locals[-(offset)]) = (value)) ++#define SET_LOCALS_DOUBLE(value, offset) (((VMJavaVal64*)&locals[-((offset)+1)])->d = (value)) ++#define SET_LOCALS_LONG(value, offset) (((VMJavaVal64*)&locals[-((offset)+1)])->l = (value)) ++#define SET_LOCALS_DOUBLE_FROM_ADDR(addr, offset) (((VMJavaVal64*)&locals[-((offset)+1)])->d = \ ++ ((VMJavaVal64*)(addr))->d) ++#define SET_LOCALS_LONG_FROM_ADDR(addr, offset) (((VMJavaVal64*)&locals[-((offset)+1)])->l = \ ++ ((VMJavaVal64*)(addr))->l) ++ ++#endif // CPU_SW64_VM_BYTECODEINTERPRETER_SW64_HPP +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/bytecodeInterpreter_sw64.inline.hpp afu8u/hotspot/src/cpu/sw64/vm/bytecodeInterpreter_sw64.inline.hpp +--- openjdk/hotspot/src/cpu/sw64/vm/bytecodeInterpreter_sw64.inline.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/bytecodeInterpreter_sw64.inline.hpp 2025-05-06 10:53:44.903633666 +0800 +@@ -0,0 +1,282 @@ ++/* ++ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef CPU_SW64_VM_BYTECODEINTERPRETER_SW64_INLINE_HPP ++#define CPU_SW64_VM_BYTECODEINTERPRETER_SW64_INLINE_HPP ++ ++// Inline interpreter functions for SW64 ++ ++inline jfloat BytecodeInterpreter::VMfloatAdd(jfloat op1, jfloat op2) { return op1 + op2; } ++inline jfloat BytecodeInterpreter::VMfloatSub(jfloat op1, jfloat op2) { return op1 - op2; } ++inline jfloat BytecodeInterpreter::VMfloatMul(jfloat op1, jfloat op2) { return op1 * op2; } ++inline jfloat BytecodeInterpreter::VMfloatDiv(jfloat op1, jfloat op2) { return op1 / op2; } ++inline jfloat BytecodeInterpreter::VMfloatRem(jfloat op1, jfloat op2) { return fmod(op1, op2); } ++ ++inline jfloat BytecodeInterpreter::VMfloatNeg(jfloat op) { return -op; } ++ ++inline int32_t BytecodeInterpreter::VMfloatCompare(jfloat op1, jfloat op2, int32_t direction) { ++ return ( op1 < op2 ? -1 : ++ op1 > op2 ? 1 : ++ op1 == op2 ? 0 : ++ (direction == -1 || direction == 1) ? direction : 0); ++ ++} ++ ++inline void BytecodeInterpreter::VMmemCopy64(uint32_t to[2], const uint32_t from[2]) { ++ // x86 can do unaligned copies but not 64bits at a time ++ to[0] = from[0]; to[1] = from[1]; ++} ++ ++// The long operations depend on compiler support for "long long" on x86 ++ ++inline jlong BytecodeInterpreter::VMlongAdd(jlong op1, jlong op2) { ++ return op1 + op2; ++} ++ ++inline jlong BytecodeInterpreter::VMlongAnd(jlong op1, jlong op2) { ++ return op1 & op2; ++} ++ ++inline jlong BytecodeInterpreter::VMlongDiv(jlong op1, jlong op2) { ++ return op1 / op2; ++} ++ ++inline jlong BytecodeInterpreter::VMlongMul(jlong op1, jlong op2) { ++ return op1 * op2; ++} ++ ++inline jlong BytecodeInterpreter::VMlongOr(jlong op1, jlong op2) { ++ return op1 | op2; ++} ++ ++inline jlong BytecodeInterpreter::VMlongSub(jlong op1, jlong op2) { ++ return op1 - op2; ++} ++ ++inline jlong BytecodeInterpreter::VMlongXor(jlong op1, jlong op2) { ++ return op1 ^ op2; ++} ++ ++inline jlong BytecodeInterpreter::VMlongRem(jlong op1, jlong op2) { ++ return op1 % op2; ++} ++ ++inline jlong BytecodeInterpreter::VMlongUshr(jlong op1, jint op2) { ++ return ((unsigned long long) op1) >> (op2 & 0x3F); ++} ++ ++inline jlong BytecodeInterpreter::VMlongShr(jlong op1, jint op2) { ++ return op1 >> (op2 & 0x3F); ++} ++ ++inline jlong BytecodeInterpreter::VMlongShl(jlong op1, jint op2) { ++ return op1 << (op2 & 0x3F); ++} ++ ++inline jlong BytecodeInterpreter::VMlongNeg(jlong op) { ++ return -op; ++} ++ ++inline jlong BytecodeInterpreter::VMlongNot(jlong op) { ++ return ~op; ++} ++ ++inline int32_t BytecodeInterpreter::VMlongLtz(jlong op) { ++ return (op <= 0); ++} ++ ++inline int32_t BytecodeInterpreter::VMlongGez(jlong op) { ++ return (op >= 0); ++} ++ ++inline int32_t BytecodeInterpreter::VMlongEqz(jlong op) { ++ return (op == 0); ++} ++ ++inline int32_t BytecodeInterpreter::VMlongEq(jlong op1, jlong op2) { ++ return (op1 == op2); ++} ++ ++inline int32_t BytecodeInterpreter::VMlongNe(jlong op1, jlong op2) { ++ return (op1 != op2); ++} ++ ++inline int32_t BytecodeInterpreter::VMlongGe(jlong op1, jlong op2) { ++ return (op1 >= op2); ++} ++ ++inline int32_t BytecodeInterpreter::VMlongLe(jlong op1, jlong op2) { ++ return (op1 <= op2); ++} ++ ++inline int32_t BytecodeInterpreter::VMlongLt(jlong op1, jlong op2) { ++ return (op1 < op2); ++} ++ ++inline int32_t BytecodeInterpreter::VMlongGt(jlong op1, jlong op2) { ++ return (op1 > op2); ++} ++ ++inline int32_t BytecodeInterpreter::VMlongCompare(jlong op1, jlong op2) { ++ return (VMlongLt(op1, op2) ? -1 : VMlongGt(op1, op2) ? 1 : 0); ++} ++ ++// Long conversions ++ ++inline jdouble BytecodeInterpreter::VMlong2Double(jlong val) { ++ return (jdouble) val; ++} ++ ++inline jfloat BytecodeInterpreter::VMlong2Float(jlong val) { ++ return (jfloat) val; ++} ++ ++inline jint BytecodeInterpreter::VMlong2Int(jlong val) { ++ return (jint) val; ++} ++ ++// Double Arithmetic ++ ++inline jdouble BytecodeInterpreter::VMdoubleAdd(jdouble op1, jdouble op2) { ++ return op1 + op2; ++} ++ ++inline jdouble BytecodeInterpreter::VMdoubleDiv(jdouble op1, jdouble op2) { ++ return op1 / op2; ++} ++ ++inline jdouble BytecodeInterpreter::VMdoubleMul(jdouble op1, jdouble op2) { ++ return op1 * op2; ++} ++ ++inline jdouble BytecodeInterpreter::VMdoubleNeg(jdouble op) { ++ return -op; ++} ++ ++inline jdouble BytecodeInterpreter::VMdoubleRem(jdouble op1, jdouble op2) { ++ return fmod(op1, op2); ++} ++ ++inline jdouble BytecodeInterpreter::VMdoubleSub(jdouble op1, jdouble op2) { ++ return op1 - op2; ++} ++ ++inline int32_t BytecodeInterpreter::VMdoubleCompare(jdouble op1, jdouble op2, int32_t direction) { ++ return ( op1 < op2 ? -1 : ++ op1 > op2 ? 1 : ++ op1 == op2 ? 0 : ++ (direction == -1 || direction == 1) ? direction : 0); ++} ++ ++// Double Conversions ++ ++inline jfloat BytecodeInterpreter::VMdouble2Float(jdouble val) { ++ return (jfloat) val; ++} ++ ++// Float Conversions ++ ++inline jdouble BytecodeInterpreter::VMfloat2Double(jfloat op) { ++ return (jdouble) op; ++} ++ ++// Integer Arithmetic ++ ++inline jint BytecodeInterpreter::VMintAdd(jint op1, jint op2) { ++ return op1 + op2; ++} ++ ++inline jint BytecodeInterpreter::VMintAnd(jint op1, jint op2) { ++ return op1 & op2; ++} ++ ++inline jint BytecodeInterpreter::VMintDiv(jint op1, jint op2) { ++ // it's possible we could catch this special case implicitly ++ if ((juint)op1 == 0x80000000 && op2 == -1) return op1; ++ else return op1 / op2; ++} ++ ++inline jint BytecodeInterpreter::VMintMul(jint op1, jint op2) { ++ return op1 * op2; ++} ++ ++inline jint BytecodeInterpreter::VMintNeg(jint op) { ++ return -op; ++} ++ ++inline jint BytecodeInterpreter::VMintOr(jint op1, jint op2) { ++ return op1 | op2; ++} ++ ++inline jint BytecodeInterpreter::VMintRem(jint op1, jint op2) { ++ // it's possible we could catch this special case implicitly ++ if ((juint)op1 == 0x80000000 && op2 == -1) return 0; ++ else return op1 % op2; ++} ++ ++inline jint BytecodeInterpreter::VMintShl(jint op1, jint op2) { ++ return op1 << op2; ++} ++ ++inline jint BytecodeInterpreter::VMintShr(jint op1, jint op2) { ++ return op1 >> (op2 & 0x1f); // QQ op2 & 0x1f?? ++} ++ ++inline jint BytecodeInterpreter::VMintSub(jint op1, jint op2) { ++ return op1 - op2; ++} ++ ++inline jint BytecodeInterpreter::VMintUshr(jint op1, jint op2) { ++ return ((juint) op1) >> (op2 & 0x1f); // QQ op2 & 0x1f?? ++} ++ ++inline jint BytecodeInterpreter::VMintXor(jint op1, jint op2) { ++ return op1 ^ op2; ++} ++ ++inline jdouble BytecodeInterpreter::VMint2Double(jint val) { ++ return (jdouble) val; ++} ++ ++inline jfloat BytecodeInterpreter::VMint2Float(jint val) { ++ return (jfloat) val; ++} ++ ++inline jlong BytecodeInterpreter::VMint2Long(jint val) { ++ return (jlong) val; ++} ++ ++inline jchar BytecodeInterpreter::VMint2Char(jint val) { ++ return (jchar) val; ++} ++ ++inline jshort BytecodeInterpreter::VMint2Short(jint val) { ++ return (jshort) val; ++} ++ ++inline jbyte BytecodeInterpreter::VMint2Byte(jint val) { ++ return (jbyte) val; ++} ++ ++#endif // CPU_SW64_VM_BYTECODEINTERPRETER_SW64_INLINE_HPP +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/bytecodes_sw64.cpp afu8u/hotspot/src/cpu/sw64/vm/bytecodes_sw64.cpp +--- openjdk/hotspot/src/cpu/sw64/vm/bytecodes_sw64.cpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/bytecodes_sw64.cpp 2025-05-06 10:53:44.903633666 +0800 +@@ -0,0 +1,43 @@ ++/* ++ * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++#include "interpreter/bytecodes.hpp" ++ ++ ++void Bytecodes::pd_initialize() { ++ // No sw64 specific initialization ++ if (SafePatch) { ++ NativeCall::instruction_size = 6 * BytesPerInstWord; ++ NativeCall::return_address_offset = 6 * BytesPerInstWord; ++ NativeCall::return_address_offset_long = 6 * BytesPerInstWord; ++ NativeGeneralJump::instruction_size = 6 * BytesPerInstWord; ++ } ++} ++ ++ ++Bytecodes::Code Bytecodes::pd_base_code_for(Code code) { ++ // No sw64 specific bytecodes ++ return code; ++} +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/bytecodes_sw64.hpp afu8u/hotspot/src/cpu/sw64/vm/bytecodes_sw64.hpp +--- openjdk/hotspot/src/cpu/sw64/vm/bytecodes_sw64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/bytecodes_sw64.hpp 2025-05-06 10:53:44.903633666 +0800 +@@ -0,0 +1,30 @@ ++/* ++ * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef CPU_SW64_VM_BYTECODES_SW64_HPP ++#define CPU_SW64_VM_BYTECODES_SW64_HPP ++ ++// No Sw64 specific bytecodes ++ ++#endif // CPU_SW64_VM_BYTECODES_SW64_HPP +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/bytes_sw64.hpp afu8u/hotspot/src/cpu/sw64/vm/bytes_sw64.hpp +--- openjdk/hotspot/src/cpu/sw64/vm/bytes_sw64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/bytes_sw64.hpp 2025-05-06 10:53:44.903633666 +0800 +@@ -0,0 +1,265 @@ ++/* ++ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef CPU_SW64_VM_BYTES_SW64_HPP ++#define CPU_SW64_VM_BYTES_SW64_HPP ++ ++#include "memory/allocation.hpp" ++ ++class Bytes: AllStatic { ++ private: ++ // Helper function for swap_u8, not used in SW. ++ static inline u8 swap_u8_base(u4 x, u4 y) {} // compiler-dependent implementation ++ ++ public: ++ // Returns true if the byte ordering used by Java is different from the native byte ordering ++ // of the underlying machine. For example, this is true for Intel x86, but false for Solaris ++ // on Sparc. ++ static inline bool is_Java_byte_ordering_different(){ return true; } ++ ++ ++ // Efficient reading and writing of unaligned unsigned data in platform-specific byte ordering ++ // (no special code is needed since x86 CPUs can access unaligned data) ++ static inline u2 get_native_u2(address p) { ++ if ((intptr_t)p & 0x1) { ++ return ((u2)p[1] << 8) | (u2)p[0]; ++ } else { ++ return *(u2*)p; ++ } ++ } ++ ++ static inline u4 get_native_u4(address p) { ++// return *(u4*)p; ++ switch (intptr_t(p) & 3) { ++ case 0: return *(u4*)p; ++ ++ case 2: return ( u4( ((u2*)p)[1] ) << 16 ) ++ | ( u4( ((u2*)p)[0] ) ); ++ ++ default: return ( u4(p[3]) << 24 ) ++ | ( u4(p[2]) << 16 ) ++ | ( u4(p[1]) << 8 ) ++ | u4(p[0]); ++ } ++ } ++ ++ static inline u8 get_native_u8(address p) { ++// return *(u8*)p; ++ switch (intptr_t(p) & 7) { ++ case 0: return *(u8*)p; ++ ++ case 4: return ( u8( ((u4*)p)[1] ) << 32 ) ++ | ( u8( ((u4*)p)[0] ) ); ++ ++ case 2: return ( u8( ((u2*)p)[3] ) << 48 ) ++ | ( u8( ((u2*)p)[2] ) << 32 ) ++ | ( u8( ((u2*)p)[1] ) << 16 ) ++ | ( u8( ((u2*)p)[0] ) ); ++ ++ default: return ( u8(p[7]) << 56 ) ++ | ( u8(p[6]) << 48 ) ++ | ( u8(p[5]) << 40 ) ++ | ( u8(p[4]) << 32 ) ++ | ( u8(p[3]) << 24 ) ++ | ( u8(p[2]) << 16 ) ++ | ( u8(p[1]) << 8 ) ++ | u8(p[0]); ++ } ++ } ++ ++ static inline void put_native_u2(address p, u2 x) { ++ if((intptr_t)p & 0x1) { ++ p[0] = (u_char)(x); ++ p[1] = (u_char)(x>>8); ++ } else { ++ *(u2*)p = x; ++ } ++ } ++ ++ static inline void put_native_u4(address p, u4 x) { ++ // refer to sparc implementation. ++ switch ( intptr_t(p) & 3 ) { ++ case 0: *(u4*)p = x; ++ break; ++ ++ case 2: ((u2*)p)[1] = x >> 16; ++ ((u2*)p)[0] = x; ++ break; ++ ++ default: ((u1*)p)[3] = x >> 24; ++ ((u1*)p)[2] = x >> 16; ++ ((u1*)p)[1] = x >> 8; ++ ((u1*)p)[0] = x; ++ break; ++ } ++ } ++ ++ static inline void put_native_u8(address p, u8 x) { ++ // refer to sparc implementation. ++ // Note that sparc is big-endian, while sw64 is little-endian ++ switch ( intptr_t(p) & 7 ) { ++ case 0: *(u8*)p = x; ++ break; ++ ++ case 4: ((u4*)p)[1] = x >> 32; ++ ((u4*)p)[0] = x; ++ break; ++ ++ case 2: ((u2*)p)[3] = x >> 48; ++ ((u2*)p)[2] = x >> 32; ++ ((u2*)p)[1] = x >> 16; ++ ((u2*)p)[0] = x; ++ break; ++ ++ default: ((u1*)p)[7] = x >> 56; ++ ((u1*)p)[6] = x >> 48; ++ ((u1*)p)[5] = x >> 40; ++ ((u1*)p)[4] = x >> 32; ++ ((u1*)p)[3] = x >> 24; ++ ((u1*)p)[2] = x >> 16; ++ ((u1*)p)[1] = x >> 8; ++ ((u1*)p)[0] = x; ++ } ++ } ++ ++ ++ // Efficient reading and writing of unaligned unsigned data in Java ++ // byte ordering (i.e. big-endian ordering). Byte-order reversal is ++ // needed since SW64 CPUs use little-endian format. ++ static inline u2 get_Java_u2(address p) { ++// return (p[0] << 8) | p[1]; ++ if((intptr_t)p & 0x1) { ++ return (p[0] << 8) | p[1]; ++ } else { ++ return swap_u2(*(u2*)p); ++ } ++ } ++ static inline u4 get_Java_u4(address p) { ++// return swap_u4(get_native_u4(p)); ++ switch (intptr_t(p) & 3) { ++ case 0: return swap_u4(*(u4*)p); ++ ++ case 2: return swap_u4(( u4( ((u2*)p)[1] << 16)) ++ | ( u4( ((u2*)p)[0] ) )); ++ ++ default: return ( u4(p[3]) ) ++ | ( u4(p[2]) << 8 ) ++ | ( u4(p[1]) << 16 ) ++ | ( u4(p[0]) << 24 ); ++ } ++ } ++ static inline u8 get_Java_u8(address p) { ++// return swap_u8(get_native_u8(p)); ++ switch (intptr_t(p) & 7) { ++ case 0: return swap_u8(*(u8*)p); ++ ++ case 4: return swap_u8(( u8( ((u4*)p)[1] ) << 32 ) ++ | ( u8( ((u4*)p)[0] ) )); ++ ++ case 2: return swap_u8(( u8( ((u2*)p)[3] ) << 48 ) ++ | ( u8( ((u2*)p)[2] ) << 32 ) ++ | ( u8( ((u2*)p)[1] ) << 16 ) ++ | ( u8( ((u2*)p)[0] ) )); ++ ++ default: return ( u8(p[7]) ) ++ | ( u8(p[6]) << 8 ) ++ | ( u8(p[5]) << 16 ) ++ | ( u8(p[4]) << 24 ) ++ | ( u8(p[3]) << 32 ) ++ | ( u8(p[2]) << 40 ) ++ | ( u8(p[1]) << 48 ) ++ | ( u8(p[0]) << 56 ); ++ } ++ } ++ ++ static inline void put_Java_u2(address p, u2 x) { ++ if((intptr_t)p & 0x1) { ++ p[0] = (u_char)(x >> 8); ++ p[1] = (u_char)(x); ++ } else { ++ *(u2*)p = swap_u2(x); ++ } ++ } ++ ++ static inline void put_Java_u4(address p, u4 x) { ++// put_native_u4(p, swap_u4(x)); ++ switch ( intptr_t(p) & 3 ) { ++ case 0: *(u4*)p = swap_u4(x); ++ break; ++ ++ case 2: x = swap_u4(x); ++ ((u2*)p)[1] = x >> 16; ++ ((u2*)p)[0] = x; ++ break; ++ ++ default: ((u1*)p)[0] = x >> 24; ++ ((u1*)p)[1] = x >> 16; ++ ((u1*)p)[2] = x >> 8; ++ ((u1*)p)[3] = x; ++ break; ++ } ++ } ++ static inline void put_Java_u8(address p, u8 x) { ++// put_native_u8(p, swap_u8(x)); ++ switch ( intptr_t(p) & 7 ) { ++ case 0: *(u8*)p = swap_u8(x); ++ break; ++ ++ case 4: x = swap_u8(x); ++ ((u4*)p)[1] = x >> 32; ++ ((u4*)p)[0] = x; ++ break; ++ ++ case 2: x = swap_u8(x); ++ ((u2*)p)[3] = x >> 48; ++ ((u2*)p)[2] = x >> 32; ++ ((u2*)p)[1] = x >> 16; ++ ((u2*)p)[0] = x; ++ break; ++ ++ default: ((u1*)p)[0] = x >> 56; ++ ((u1*)p)[1] = x >> 48; ++ ((u1*)p)[2] = x >> 40; ++ ((u1*)p)[3] = x >> 32; ++ ((u1*)p)[4] = x >> 24; ++ ((u1*)p)[5] = x >> 16; ++ ((u1*)p)[6] = x >> 8; ++ ((u1*)p)[7] = x; ++ } ++ } ++ ++ ++ // Efficient swapping of byte ordering ++ static inline u2 swap_u2(u2 x); // compiler-dependent implementation ++ static inline u4 swap_u4(u4 x); // compiler-dependent implementation ++ static inline u8 swap_u8(u8 x); ++}; ++ ++ ++// The following header contains the implementations of swap_u2, swap_u4, and swap_u8[_base] ++#ifdef TARGET_OS_ARCH_linux_sw64 ++# include "bytes_linux_sw64.inline.hpp" ++#endif ++ ++#endif // CPU_SW64_VM_BYTES_SW64_HPP +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/c2_globals_sw64.hpp afu8u/hotspot/src/cpu/sw64/vm/c2_globals_sw64.hpp +--- openjdk/hotspot/src/cpu/sw64/vm/c2_globals_sw64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/c2_globals_sw64.hpp 2025-05-06 10:53:44.903633666 +0800 +@@ -0,0 +1,87 @@ ++/* ++ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef CPU_SW64_VM_C2_GLOBALS_SW64_HPP ++#define CPU_SW64_VM_C2_GLOBALS_SW64_HPP ++ ++#include "utilities/globalDefinitions.hpp" ++#include "utilities/macros.hpp" ++ ++// Sets the default values for platform dependent flags used by the server compiler. ++// (see c2_globals.hpp). Alpha-sorted. ++define_pd_global(bool, BackgroundCompilation, true); ++define_pd_global(bool, UseTLAB, true); ++define_pd_global(bool, ResizeTLAB, true); ++define_pd_global(bool, CICompileOSR, true); ++define_pd_global(bool, InlineIntrinsics, true); ++define_pd_global(bool, PreferInterpreterNativeStubs, false); ++define_pd_global(bool, ProfileTraps, true); ++define_pd_global(bool, UseOnStackReplacement, true); ++#ifdef CC_INTERP ++define_pd_global(bool, ProfileInterpreter, false); ++#else ++define_pd_global(bool, ProfileInterpreter, true); ++#endif // CC_INTERP ++define_pd_global(bool, TieredCompilation, false); // Disable C1 in server JIT ++define_pd_global(intx, CompileThreshold, 10000); ++define_pd_global(intx, BackEdgeThreshold, 100000); ++ ++define_pd_global(intx, OnStackReplacePercentage, 140); ++define_pd_global(intx, ConditionalMoveLimit, 3); ++define_pd_global(intx, FLOATPRESSURE, 12); //TODO check lsp set 32 caused SPECjvm2008 sunflow failed ++define_pd_global(intx, FreqInlineSize, 325); ++define_pd_global(intx, MinJumpTableSize, 10); ++define_pd_global(intx, INTPRESSURE, 22); //TODO check lsp ++define_pd_global(intx, InteriorEntryAlignment, 16); ++define_pd_global(intx, NewSizeThreadIncrease, ScaleForWordSize(4*K)); ++define_pd_global(intx, LoopUnrollLimit, 60); ++ ++// InitialCodeCacheSize derived from specjbb2000 run. ++define_pd_global(intx, InitialCodeCacheSize, 2496*K); // Integral multiple of CodeCacheExpansionSize ++define_pd_global(intx, CodeCacheExpansionSize, 64*K); ++ ++// Ergonomics related flags ++define_pd_global(uint64_t,MaxRAM, 128ULL*G); ++define_pd_global(intx, RegisterCostAreaRatio, 16000); ++ ++// Peephole and CISC spilling both break the graph, and so makes the ++// scheduler sick. ++define_pd_global(bool, OptoPeephole, false); ++define_pd_global(bool, UseCISCSpill, false); ++define_pd_global(bool, OptoScheduling, false); ++define_pd_global(bool, OptoBundling, false); ++ ++define_pd_global(intx, ReservedCodeCacheSize, 240*M); ++define_pd_global(uintx, CodeCacheMinBlockLength, 4); ++define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K /*8*M*/); ++ ++define_pd_global(bool, TrapBasedRangeChecks, false); // Not needed on x86. ++ ++// Heap related flags ++define_pd_global(uintx,MetaspaceSize, ScaleForWordSize(16*M)); ++ ++// Ergonomics related flags ++define_pd_global(bool, NeverActAsServerClassMachine, false); ++ ++#endif // CPU_SW64_VM_C2_GLOBALS_SW64_HPP +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/c2_init_sw64.cpp afu8u/hotspot/src/cpu/sw64/vm/c2_init_sw64.cpp +--- openjdk/hotspot/src/cpu/sw64/vm/c2_init_sw64.cpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/c2_init_sw64.cpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,33 @@ ++/* ++ * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++#include "opto/compile.hpp" ++#include "opto/node.hpp" ++ ++// processor dependent initialization for SW64 ++ ++void Compile::pd_compiler2_init() { ++ guarantee(CodeEntryAlignment >= InteriorEntryAlignment, "" ); ++} +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/codeBuffer_sw64.hpp afu8u/hotspot/src/cpu/sw64/vm/codeBuffer_sw64.hpp +--- openjdk/hotspot/src/cpu/sw64/vm/codeBuffer_sw64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/codeBuffer_sw64.hpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,34 @@ ++/* ++ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef CPU_SW64_VM_CODEBUFFER_SW64_HPP ++#define CPU_SW64_VM_CODEBUFFER_SW64_HPP ++ ++private: ++ void pd_initialize() {} ++ ++public: ++ void flush_bundle(bool start_new_bundle) {} ++ ++#endif // CPU_SW64_VM_CODEBUFFER_SW64_HPP +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/compiledIC_sw64.cpp afu8u/hotspot/src/cpu/sw64/vm/compiledIC_sw64.cpp +--- openjdk/hotspot/src/cpu/sw64/vm/compiledIC_sw64.cpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/compiledIC_sw64.cpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,158 @@ ++/* ++ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++#include "asm/macroAssembler.inline.hpp" ++#include "code/compiledIC.hpp" ++#include "code/icBuffer.hpp" ++#include "code/nmethod.hpp" ++#include "memory/resourceArea.hpp" ++#include "runtime/mutexLocker.hpp" ++#include "runtime/safepoint.hpp" ++ ++// Release the CompiledICHolder* associated with this call site is there is one. ++void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site) { ++ // This call site might have become stale so inspect it carefully. ++ NativeCall* call = nativeCall_at(call_site->addr()); ++ if (is_icholder_entry(call->destination())) { ++ NativeMovConstReg* value = nativeMovConstReg_at(call_site->cached_value()); ++ InlineCacheBuffer::queue_for_release((CompiledICHolder*)value->data()); ++ } ++} ++ ++bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) { ++ // This call site might have become stale so inspect it carefully. ++ NativeCall* call = nativeCall_at(call_site->addr()); ++ return is_icholder_entry(call->destination()); ++} ++ ++// ---------------------------------------------------------------------------- ++ ++#define __ _masm. ++address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) { ++ ++ address mark = cbuf.insts_mark(); // get mark within main instrs section ++ ++ // Note that the code buffer's insts_mark is always relative to insts. ++ // That's why we must use the macroassembler to generate a stub. ++ MacroAssembler _masm(&cbuf); ++ ++ address base = __ start_a_stub(Compile::MAX_stubs_size); ++ if (base == NULL) return NULL; // CodeBuffer::expand failed ++ // static stub relocation stores the instruction address of the call ++ ++ __ relocate(static_stub_Relocation::spec(mark), 0); ++ ++ // Rmethod contains methodOop, it should be relocated for GC ++ ++ // static stub relocation also tags the methodOop in the code-stream. ++ __ patchable_set48(S3, (long)0); ++ // This is recognized as unresolved by relocs/nativeInst/ic code ++ ++ __ relocate(relocInfo::runtime_call_type); ++ ++ cbuf.set_insts_mark(); ++ address call_pc = (address)-1; ++ __ patchable_jump(call_pc); ++ __ align(16); ++ // Update current stubs pointer and restore code_end. ++ __ end_a_stub(); ++ return base; ++} ++#undef __ ++ ++int CompiledStaticCall::to_interp_stub_size() { ++ int size = 4 * 4 + NativeCall::instruction_size; // sizeof(li48) + NativeCall::instruction_size ++ return round_to(size, 16); ++} ++ ++// Relocation entries for call stub, compiled java to interpreter. ++int CompiledStaticCall::reloc_to_interp_stub() { ++ return 16; ++} ++ ++void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) { ++ address stub = find_stub(); ++ guarantee(stub != NULL, "stub not found"); ++ ++ if (TraceICs) { ++ ResourceMark rm; ++ tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s", ++ instruction_address(), ++ callee->name_and_sig_as_C_string()); ++ } ++ ++ // Creation also verifies the object. ++ NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); ++ NativeGeneralJump* jump = nativeGeneralJump_at(method_holder->next_instruction_address()); ++ ++ assert(method_holder->data() == 0 || method_holder->data() == (intptr_t)callee(), ++ "a) MT-unsafe modification of inline cache"); ++ assert(jump->jump_destination() == (address)-1 || jump->jump_destination() == entry, ++ "b) MT-unsafe modification of inline cache"); ++ ++ // Update stub. ++ method_holder->set_data((intptr_t)callee()); ++ jump->set_jump_destination(entry); ++ ++ // Update jump to call. ++ set_destination_mt_safe(stub); ++} ++ ++void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) { ++ assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); ++ // Reset stub. ++ address stub = static_stub->addr(); ++ assert(stub != NULL, "stub not found"); ++ // Creation also verifies the object. ++ NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); ++ NativeGeneralJump* jump = nativeGeneralJump_at(method_holder->next_instruction_address()); ++ method_holder->set_data(0); ++ jump->set_jump_destination((address)-1); ++} ++ ++//----------------------------------------------------------------------------- ++// Non-product mode code ++#ifndef PRODUCT ++ ++void CompiledStaticCall::verify() { ++ // Verify call. ++ NativeCall::verify(); ++ if (os::is_MP()) { ++ verify_alignment(); ++ } ++ ++ // Verify stub. ++ address stub = find_stub(); ++ assert(stub != NULL, "no stub found for static call"); ++ // Creation also verifies the object. ++ NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); ++ NativeGeneralJump* jump = nativeGeneralJump_at(method_holder->next_instruction_address()); ++ ++ ++ // Verify state. ++ assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check"); ++} ++ ++#endif // !PRODUCT +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/copy_sw64.hpp afu8u/hotspot/src/cpu/sw64/vm/copy_sw64.hpp +--- openjdk/hotspot/src/cpu/sw64/vm/copy_sw64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/copy_sw64.hpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,71 @@ ++/* ++ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef CPU_SW64_VM_COPY_SW64_HPP ++#define CPU_SW64_VM_COPY_SW64_HPP ++ ++// Inline functions for memory copy and fill. ++// ++// // Contains inline asm implementations ++#ifdef TARGET_OS_ARCH_linux_sw64 ++# include "copy_linux_sw64.inline.hpp" ++#endif ++#ifdef TARGET_OS_ARCH_solaris_sw64 ++# include "copy_solaris_sw64.inline.hpp" ++#endif ++#ifdef TARGET_OS_ARCH_windows_sw64 ++# include "copy_windows_sw64.inline.hpp" ++#endif ++#ifdef TARGET_OS_ARCH_bsd_sw64 ++# include "copy_bsd_sw64.inline.hpp" ++#endif ++// Inline functions for memory copy and fill. ++ ++// Contains inline asm implementations ++ ++static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) { ++ julong* to = (julong*) tohw; ++ julong v = ((julong) value << 32) | value; ++ while (count-- > 0) { ++ *to++ = v; ++ } ++} ++ ++static void pd_fill_to_aligned_words(HeapWord* tohw, size_t count, juint value) { ++ pd_fill_to_words(tohw, count, value); ++} ++ ++static void pd_fill_to_bytes(void* to, size_t count, jubyte value) { ++ (void)memset(to, value, count); ++} ++ ++static void pd_zero_to_words(HeapWord* tohw, size_t count) { ++ pd_fill_to_words(tohw, count, 0); ++} ++ ++static void pd_zero_to_bytes(void* to, size_t count) { ++ (void)memset(to, 0, count); ++} ++ ++#endif //CPU_SW64_VM_COPY_SW64_HPP +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/cppInterpreterGenerator_sw64.hpp afu8u/hotspot/src/cpu/sw64/vm/cppInterpreterGenerator_sw64.hpp +--- openjdk/hotspot/src/cpu/sw64/vm/cppInterpreterGenerator_sw64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/cppInterpreterGenerator_sw64.hpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,37 @@ ++/* ++ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef CPU_SW64_VM_CPPINTERPRETERGENERATOR_SW64_HPP ++#define CPU_SW64_VM_CPPINTERPRETERGENERATOR_SW64_HPP ++ ++ protected: ++ void generate_more_monitors(); ++ void generate_deopt_handling(); ++ address generate_interpreter_frame_manager(bool synchronized); // C++ interpreter only ++ void generate_compute_interpreter_state(const Register state, ++ const Register prev_state, ++ const Register sender_sp, ++ bool native); // C++ interpreter only ++ ++#endif // CPU_SW64_VM_CPPINTERPRETERGENERATOR_SW64_HPP +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/cppInterpreter_sw64.cpp afu8u/hotspot/src/cpu/sw64/vm/cppInterpreter_sw64.cpp +--- openjdk/hotspot/src/cpu/sw64/vm/cppInterpreter_sw64.cpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/cppInterpreter_sw64.cpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,214 @@ ++/* ++ * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++#include "asm/macroAssembler.hpp" ++#include "interpreter/bytecodeHistogram.hpp" ++#include "interpreter/cppInterpreter.hpp" ++#include "interpreter/interpreter.hpp" ++#include "interpreter/interpreterGenerator.hpp" ++#include "interpreter/interpreterRuntime.hpp" ++#include "oops/arrayOop.hpp" ++#include "oops/methodData.hpp" ++#include "oops/method.hpp" ++#include "oops/oop.inline.hpp" ++#include "prims/jvmtiExport.hpp" ++#include "prims/jvmtiThreadState.hpp" ++#include "runtime/arguments.hpp" ++#include "runtime/deoptimization.hpp" ++#include "runtime/frame.inline.hpp" ++#include "runtime/interfaceSupport.hpp" ++#include "runtime/sharedRuntime.hpp" ++#include "runtime/stubRoutines.hpp" ++#include "runtime/synchronizer.hpp" ++#include "runtime/timer.hpp" ++#include "runtime/vframeArray.hpp" ++#include "utilities/debug.hpp" ++#ifdef SHARK ++#include "shark/shark_globals.hpp" ++#endif ++ ++#ifdef CC_INTERP ++ ++// Routine exists to make tracebacks look decent in debugger ++// while "shadow" interpreter frames are on stack. It is also ++// used to distinguish interpreter frames. ++ ++extern "C" void RecursiveInterpreterActivation(interpreterState istate) { ++ ShouldNotReachHere(); ++} ++ ++bool CppInterpreter::contains(address pc) { ++ Unimplemented(); ++} ++ ++#define STATE(field_name) Lstate, in_bytes(byte_offset_of(BytecodeInterpreter, field_name)) ++#define __ _masm-> ++ ++Label frame_manager_entry; ++Label fast_accessor_slow_entry_path; // fast accessor methods need to be able to jmp to unsynchronized ++ // c++ interpreter entry point this holds that entry point label. ++ ++static address unctrap_frame_manager_entry = NULL; ++ ++static address interpreter_return_address = NULL; ++static address deopt_frame_manager_return_atos = NULL; ++static address deopt_frame_manager_return_btos = NULL; ++static address deopt_frame_manager_return_itos = NULL; ++static address deopt_frame_manager_return_ltos = NULL; ++static address deopt_frame_manager_return_ftos = NULL; ++static address deopt_frame_manager_return_dtos = NULL; ++static address deopt_frame_manager_return_vtos = NULL; ++ ++const Register prevState = G1_scratch; ++ ++void InterpreterGenerator::save_native_result(void) { ++ Unimplemented(); ++} ++ ++void InterpreterGenerator::restore_native_result(void) { ++ Unimplemented(); ++} ++ ++// A result handler converts/unboxes a native call result into ++// a java interpreter/compiler result. The current frame is an ++// interpreter frame. The activation frame unwind code must be ++// consistent with that of TemplateTable::_return(...). In the ++// case of native methods, the caller's SP was not modified. ++address CppInterpreterGenerator::generate_result_handler_for(BasicType type) { ++ Unimplemented(); ++} ++ ++address CppInterpreterGenerator::generate_tosca_to_stack_converter(BasicType type) { ++ Unimplemented(); ++} ++ ++address CppInterpreterGenerator::generate_stack_to_stack_converter(BasicType type) { ++ Unimplemented(); ++} ++ ++address CppInterpreterGenerator::generate_stack_to_native_abi_converter(BasicType type) { ++ Unimplemented(); ++} ++ ++address CppInterpreter::return_entry(TosState state, int length) { ++ Unimplemented(); ++} ++ ++address CppInterpreter::deopt_entry(TosState state, int length) { ++ Unimplemented(); ++} ++ ++void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { ++ Unimplemented(); ++} ++ ++address InterpreterGenerator::generate_empty_entry(void) { ++ Unimplemented(); ++} ++ ++address InterpreterGenerator::generate_accessor_entry(void) { ++ Unimplemented(); ++} ++ ++address InterpreterGenerator::generate_native_entry(bool synchronized) { ++ Unimplemented(); ++} ++ ++void CppInterpreterGenerator::generate_compute_interpreter_state(const Register state, ++ const Register prev_state, ++ bool native) { ++ Unimplemented(); ++} ++ ++void InterpreterGenerator::lock_method(void) { ++ Unimplemented(); ++} ++ ++void CppInterpreterGenerator::generate_deopt_handling() { ++ Unimplemented(); ++} ++ ++void CppInterpreterGenerator::generate_more_monitors() { ++ Unimplemented(); ++} ++ ++ ++static address interpreter_frame_manager = NULL; ++ ++void CppInterpreterGenerator::adjust_callers_stack(Register args) { ++ Unimplemented(); ++} ++ ++address InterpreterGenerator::generate_normal_entry(bool synchronized) { ++ Unimplemented(); ++} ++ ++InterpreterGenerator::InterpreterGenerator(StubQueue* code) ++ : CppInterpreterGenerator(code) { ++ Unimplemented(); ++} ++ ++ ++static int size_activation_helper(int callee_extra_locals, int max_stack, int monitor_size) { ++ Unimplemented(); ++} ++ ++int AbstractInterpreter::size_top_interpreter_activation(methodOop method) { ++ Unimplemented(); ++} ++ ++void BytecodeInterpreter::layout_interpreterState(interpreterState to_fill, ++ frame* caller, ++ frame* current, ++ methodOop method, ++ intptr_t* locals, ++ intptr_t* stack, ++ intptr_t* stack_base, ++ intptr_t* monitor_base, ++ intptr_t* frame_bottom, ++ bool is_top_frame ++ ) ++{ ++ Unimplemented(); ++} ++ ++void BytecodeInterpreter::pd_layout_interpreterState(interpreterState istate, address last_Java_pc, intptr_t* last_Java_fp) { ++ Unimplemented(); ++} ++ ++ ++int AbstractInterpreter::layout_activation(methodOop method, ++ int tempcount, // Number of slots on java expression stack in use ++ int popframe_extra_args, ++ int moncount, // Number of active monitors ++ int callee_param_size, ++ int callee_locals_size, ++ frame* caller, ++ frame* interpreter_frame, ++ bool is_top_frame) { ++ Unimplemented(); ++} ++ ++#endif // CC_INTERP +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/cppInterpreter_sw64.hpp afu8u/hotspot/src/cpu/sw64/vm/cppInterpreter_sw64.hpp +--- openjdk/hotspot/src/cpu/sw64/vm/cppInterpreter_sw64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/cppInterpreter_sw64.hpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,39 @@ ++/* ++ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef CPU_SW64_VM_CPPINTERPRETER_SW64_HPP ++#define CPU_SW64_VM_CPPINTERPRETER_SW64_HPP ++ // Size of interpreter code. Increase if too small. Interpreter will ++ // fail with a guarantee ("not enough space for interpreter generation"); ++ // if too small. ++ // Run with +PrintInterpreter to get the VM to print out the size. ++ // Max size with JVMTI and TaggedStackInterpreter ++ ++ // QQQ this is proably way too large for c++ interpreter ++ ++ // The sethi() instruction generates lots more instructions when shell ++ // stack limit is unlimited, so that's why this is much bigger. ++ const static int InterpreterCodeSize = 210 * K; ++ ++#endif // CPU_SW64_VM_CPPINTERPRETER_SW64_HPP +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/debug_sw64.cpp afu8u/hotspot/src/cpu/sw64/vm/debug_sw64.cpp +--- openjdk/hotspot/src/cpu/sw64/vm/debug_sw64.cpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/debug_sw64.cpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,50 @@ ++/* ++ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++#include "code/codeCache.hpp" ++#include "code/nmethod.hpp" ++#include "runtime/frame.hpp" ++#include "runtime/init.hpp" ++#include "runtime/os.hpp" ++#include "utilities/debug.hpp" ++#include "utilities/top.hpp" ++ ++#ifndef PRODUCT ++ ++void pd_ps(frame f) { ++ intptr_t* sp = f.sp(); ++ intptr_t* prev_sp = sp - 1; ++ intptr_t *pc = NULL; ++ intptr_t *next_pc = NULL; ++ int count = 0; ++ tty->print("register window backtrace from %#x:\n", sp); ++} ++ ++// This function is used to add platform specific info ++// to the error reporting code. ++ ++void pd_obfuscate_location(char *buf,int buflen) {} ++ ++#endif // PRODUCT +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/depChecker_sw64.cpp afu8u/hotspot/src/cpu/sw64/vm/depChecker_sw64.cpp +--- openjdk/hotspot/src/cpu/sw64/vm/depChecker_sw64.cpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/depChecker_sw64.cpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,29 @@ ++/* ++ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++#include "compiler/disassembler.hpp" ++#include "depChecker_sw64.hpp" ++ ++// Nothing to do on sw64 +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/depChecker_sw64.hpp afu8u/hotspot/src/cpu/sw64/vm/depChecker_sw64.hpp +--- openjdk/hotspot/src/cpu/sw64/vm/depChecker_sw64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/depChecker_sw64.hpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,30 @@ ++/* ++ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef CPU_SW64_VM_DEPCHECKER_SW64_HPP ++#define CPU_SW64_VM_DEPCHECKER_SW64_HPP ++ ++// Nothing to do on SW64 ++ ++#endif // CPU_SW64_VM_DEPCHECKER_SW64_HPP +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/disassembler_sw64.cpp afu8u/hotspot/src/cpu/sw64/vm/disassembler_sw64.cpp +--- openjdk/hotspot/src/cpu/sw64/vm/disassembler_sw64.cpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/disassembler_sw64.cpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,23 @@ ++/* ++ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ +\ 文件尾没有换行符 +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/disassembler_sw64.hpp afu8u/hotspot/src/cpu/sw64/vm/disassembler_sw64.hpp +--- openjdk/hotspot/src/cpu/sw64/vm/disassembler_sw64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/disassembler_sw64.hpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,36 @@ ++/* ++ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef CPU_SW64_VM_DISASSEMBLER_SW64_HPP ++#define CPU_SW64_VM_DISASSEMBLER_SW64_HPP ++ ++ static int pd_instruction_alignment() { ++ return sizeof(int); ++ } ++ ++ static const char* pd_cpu_opts() { ++ return "sw64only"; ++ } ++ ++#endif // CPU_SW64_VM_DISASSEMBLER_SW64_HPP +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/frame_sw64.cpp afu8u/hotspot/src/cpu/sw64/vm/frame_sw64.cpp +--- openjdk/hotspot/src/cpu/sw64/vm/frame_sw64.cpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/frame_sw64.cpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,668 @@ ++/* ++ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++#include "interpreter/interpreter.hpp" ++#include "memory/resourceArea.hpp" ++#include "oops/markOop.hpp" ++#include "oops/method.hpp" ++#include "oops/oop.inline.hpp" ++#include "prims/methodHandles.hpp" ++#include "runtime/frame.inline.hpp" ++#include "runtime/handles.inline.hpp" ++#include "runtime/javaCalls.hpp" ++#include "runtime/monitorChunk.hpp" ++#include "runtime/signature.hpp" ++#include "runtime/stubCodeGenerator.hpp" ++#include "runtime/stubRoutines.hpp" ++#include "vmreg_sw64.inline.hpp" ++#ifdef COMPILER1 ++#include "c1/c1_Runtime1.hpp" ++#include "runtime/vframeArray.hpp" ++#endif ++ ++#ifdef ASSERT ++void RegisterMap::check_location_valid() { ++} ++#endif ++ ++ ++// Profiling/safepoint support ++// for Profiling - acting on another frame. walks sender frames ++// if valid. ++// frame profile_find_Java_sender_frame(JavaThread *thread); ++ ++bool frame::safe_for_sender(JavaThread *thread) { ++ address sp = (address)_sp; ++ address fp = (address)_fp; ++ bool sp_safe = (sp != NULL && ++ (sp <= thread->stack_base()) && ++ (sp >= thread->stack_base() - thread->stack_size())); ++ bool fp_safe = (fp != NULL && ++ (fp <= thread->stack_base()) && ++ (fp >= thread->stack_base() - thread->stack_size())); ++ ++ // init_sender_for_c_frame can only be called when fp is guarenteed be safe. ++ if(_is_c_frame && !_is_sender_for_c_frame_initialized && fp_safe)init_sender_for_c_frame(); ++ ++ if (sp_safe && fp_safe) { ++ CodeBlob *cb = CodeCache::find_blob_unsafe(_pc); ++ // First check if frame is complete and tester is reliable ++ // Unfortunately we can only check frame complete for runtime stubs and nmethod ++ // other generic buffer blobs are more problematic so we just assume they are ++ // ok. adapter blobs never have a frame complete and are never ok. ++ if (cb != NULL){ ++ // `is_frame_complete_at` always return true for `AdapterBlob`? ++ if(cb->is_adapter_blob())return false; ++ // no `fp` and `ra` saved. ++ if(!is_interpreted_frame()) ++ if(0 == cb->frame_size())return false; ++ ++ if (!cb->is_frame_complete_at(_pc) && (cb->is_nmethod() || cb->is_runtime_stub())) { ++ return false; ++ } ++ } ++ return true; ++ } ++ // Note: fp == NULL is not really a prerequisite for this to be safe to ++ // walk for c2. However we've modified the code such that if we get ++ // a failure with fp != NULL that we then try with FP == NULL. ++ // This is basically to mimic what a last_frame would look like if ++ // c2 had generated it. ++ if (sp_safe && fp == NULL) { ++ CodeBlob *cb = CodeCache::find_blob_unsafe(_pc); ++ // frame must be complete if fp == NULL as fp == NULL is only sensible ++ // if we are looking at a nmethod and frame complete assures us of that. ++ if (cb != NULL && cb->is_frame_complete_at(_pc) && cb->is_compiled_by_c2()) { ++ return true; ++ } ++ } ++ return false; ++} ++ ++ ++void frame::patch_pc(Thread* thread, address pc) { ++ if (TracePcPatching) { ++ tty->print_cr("patch_pc at address 0x%x [0x%x -> 0x%x] ", &((address *)_sp)[-1], ((address *)_sp)[-1], pc); ++ } ++ ++ RegisterMap map((JavaThread *)thread, false); ++ frame check = ((JavaThread *)thread)->last_frame(); ++ if (id() != check.id()) ++ { ++ while (id() != check.sender(&map).id()) { ++ check = check.sender(&map); ++ } ++ if (check.is_interpreted_frame()) ++ *(check.fp() + 1) = (intptr_t)pc; ++ else ++ ((address *)_sp)[-1] = pc; ++ } ++ ++ _cb = CodeCache::find_blob(pc); ++ if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) { ++ address orig = (((nmethod*)_cb)->get_original_pc(this)); ++ assert(orig == _pc, "expected original to be stored before patching"); ++ _deopt_state = is_deoptimized; ++ // leave _pc as is ++ } else { ++ _deopt_state = not_deoptimized; ++ _pc = pc; ++ } ++} ++ ++bool frame::is_interpreted_frame() const { ++ return Interpreter::contains(pc()); ++} ++ ++int frame::frame_size(RegisterMap* map) const { ++ frame sender = this->sender(map); ++ return sender.sp() - sp(); ++} ++ ++intptr_t* frame::entry_frame_argument_at(int offset) const { ++ // convert offset to index to deal with tsi ++ int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize); ++ // Entry frame's arguments are always in relation to unextended_sp() ++ return &unextended_sp()[index]; ++} ++ ++// sender_sp ++#ifdef CC_INTERP ++intptr_t* frame::interpreter_frame_sender_sp() const { ++ assert(is_interpreted_frame(), "interpreted frame expected"); ++ // QQQ why does this specialize method exist if frame::sender_sp() does same thing? ++ // seems odd and if we always know interpreted vs. non then sender_sp() is really ++ // doing too much work. ++ return get_interpreterState()->sender_sp(); ++} ++ ++// monitor elements ++ ++BasicObjectLock* frame::interpreter_frame_monitor_begin() const { ++ return get_interpreterState()->monitor_base(); ++} ++ ++BasicObjectLock* frame::interpreter_frame_monitor_end() const { ++ return (BasicObjectLock*) get_interpreterState()->stack_base(); ++} ++ ++#else // CC_INTERP ++ ++intptr_t* frame::interpreter_frame_sender_sp() const { ++ assert(is_interpreted_frame(), "interpreted frame expected"); ++ return (intptr_t*) at(interpreter_frame_sender_sp_offset); ++} ++ ++void frame::set_interpreter_frame_sender_sp(intptr_t* sender_sp) { ++ assert(is_interpreted_frame(), "interpreted frame expected"); ++ int_at_put(interpreter_frame_sender_sp_offset, (intptr_t) sender_sp); ++} ++ ++ ++// monitor elements ++ ++BasicObjectLock* frame::interpreter_frame_monitor_begin() const { ++ return (BasicObjectLock*) addr_at(interpreter_frame_monitor_block_bottom_offset); ++} ++ ++BasicObjectLock* frame::interpreter_frame_monitor_end() const { ++ BasicObjectLock* result = (BasicObjectLock*) *addr_at(interpreter_frame_monitor_block_top_offset); ++ // make sure the pointer points inside the frame ++ assert((intptr_t) fp() > (intptr_t) result, "result must < than frame pointer"); ++ assert((intptr_t) sp() <= (intptr_t) result, "result must >= than stack pointer"); ++ return result; ++} ++ ++void frame::interpreter_frame_set_monitor_end(BasicObjectLock* value) { ++ *((BasicObjectLock**)addr_at(interpreter_frame_monitor_block_top_offset)) = value; ++} ++ ++// Used by template based interpreter deoptimization ++void frame::interpreter_frame_set_last_sp(intptr_t* sp) { ++ *((intptr_t**)addr_at(interpreter_frame_last_sp_offset)) = sp; ++} ++#endif // CC_INTERP ++ ++frame frame::sender_for_entry_frame(RegisterMap* map) const { ++ assert(map != NULL, "map must be set"); ++ // Java frame called from C; skip all C frames and return top C ++ // frame of that chunk as the sender ++ JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor(); ++ assert(!entry_frame_is_first(), "next Java fp must be non zero"); ++ assert(jfa->last_Java_sp() > sp(), "must be above this frame on stack"); ++ map->clear(); ++ assert(map->include_argument_oops(), "should be set by clear"); ++ if (jfa->last_Java_pc() != NULL ) { ++ frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc()); ++ return fr; ++ } ++ frame fr(jfa->last_Java_sp(), jfa->last_Java_fp()); ++ return fr; ++} ++ ++frame frame::sender_for_interpreter_frame(RegisterMap* map) const { ++ // sp is the raw sp from the sender after adapter or interpreter extension ++ intptr_t* sp = (intptr_t*) at(interpreter_frame_sender_sp_offset); ++ ++ // This is the sp before any possible extension (adapter/locals). ++ //intptr_t* unextended_sp = interpreter_frame_sender_sp(); ++ ++ // The interpreter and compiler(s) always save FP in a known ++ // location on entry. We must record where that location is ++ // so this if FP was live on callout from c2 we can find ++ // the saved copy no matter what it called. ++ ++ // Since the interpreter always saves FP if we record where it is then ++ // we don't have to always save FP on entry and exit to c2 compiled ++ // code, on entry will be enough. ++#ifdef COMPILER2 ++ if (map->update_map()) { ++ map->set_location(FP->as_VMReg(), (address) addr_at(link_offset)); ++ } ++#endif /* COMPILER2 */ ++ return frame(sp, link(), sender_pc()); ++} ++ ++ ++//------------------------------------------------------------------------------ ++// frame::verify_deopt_original_pc ++// ++// Verifies the calculated original PC of a deoptimization PC for the ++// given unextended SP. The unextended SP might also be the saved SP ++// for MethodHandle call sites. ++#ifdef ASSERT ++void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp, bool is_method_handle_return) { ++ frame fr; ++ ++ // This is ugly but it's better than to change {get,set}_original_pc ++ // to take an SP value as argument. And it's only a debugging ++ // method anyway. ++ fr._unextended_sp = unextended_sp; ++ ++ address original_pc = nm->get_original_pc(&fr); ++ assert(nm->insts_contains(original_pc), "original PC must be in nmethod"); ++ assert(nm->is_method_handle_return(original_pc) == is_method_handle_return, "must be"); ++} ++#endif ++ ++ ++//------------------------------------------------------------------------------ ++// frame::adjust_unextended_sp ++void frame::adjust_unextended_sp() { ++ // On SW64, sites calling method handle intrinsics and lambda forms are treated ++ // as any other call site. Therefore, no special action is needed when we are ++ // returning to any of these call sites. ++ ++ nmethod* sender_nm = (_cb == NULL) ? NULL : _cb->as_nmethod_or_null(); ++ if (sender_nm != NULL) { ++ // If the sender PC is a deoptimization point, get the original PC. ++ if (sender_nm->is_deopt_entry(_pc) || ++ sender_nm->is_deopt_mh_entry(_pc)) { ++ DEBUG_ONLY(verify_deopt_original_pc(sender_nm, _unextended_sp)); ++ } ++ } ++} ++ ++//------------------------------sender_for_compiled_frame----------------------- ++frame frame::sender_for_compiled_frame(RegisterMap* map) const { ++ assert(map != NULL, "map must be set"); ++ ++ // frame owned by optimizing compiler ++ assert(_cb->frame_size() >= 0, "must have non-zero frame size"); ++ ++ intptr_t* sender_sp = sender_sp = sp() + _cb->frame_size(); ++#ifdef ASSERT ++ const bool c1_compiled = _cb->is_compiled_by_c1(); ++ bool native = _cb->is_nmethod() && ((nmethod*)_cb)->is_native_method(); ++ if (c1_compiled && native) { ++ assert(sender_sp == fp() + frame::sender_sp_offset, "incorrect frame size"); ++ } ++#endif // ASSERT ++ // On Intel the return_address is always the word on the stack ++ // the fp in compiler points to sender fp, but in interpreter, fp points to return address, ++ // so getting sender for compiled frame is not same as interpreter frame. ++ // we hard code here temporarily ++ // spark ++ address sender_pc = (address) *(sender_sp-1); ++ ++ intptr_t** saved_fp_addr = (intptr_t**) (sender_sp - frame::sender_sp_offset); ++ ++ if (map->update_map()) { ++ // Tell GC to use argument oopmaps for some runtime stubs that need it. ++ // For C1, the runtime stub might not have oop maps, so set this flag ++ // outside of update_register_map. ++ map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread())); ++ if (_cb->oop_maps() != NULL) { ++ OopMapSet::update_register_map(this, map); ++ } ++ ++ // Since the prolog does the save and restore of epb there is no oopmap ++ // for it so we must fill in its location as if there was an oopmap entry ++ // since if our caller was compiled code there could be live jvm state in it. ++ map->set_location(FP->as_VMReg(), (address) saved_fp_addr); ++ } ++ assert(sender_sp != sp(), "must have changed"); ++ return frame(sender_sp, *saved_fp_addr, sender_pc); ++} ++ ++frame frame::sender(RegisterMap* map) const { ++ // Default is we done have to follow them. The sender_for_xxx will ++ // update it accordingly ++ map->set_include_argument_oops(false); ++ ++ if (is_entry_frame()) return sender_for_entry_frame(map); ++ if (is_interpreted_frame()) return sender_for_interpreter_frame(map); ++ assert(_cb == CodeCache::find_blob(pc()),"Must be the same"); ++ ++ if (_cb != NULL) { ++ return sender_for_compiled_frame(map); ++ } ++ // Must be native-compiled frame, i.e. the marshaling code for native ++ // methods that exists in the core system. ++ frame ret(sender_sp(), link(), sender_pc()); ++ ++ // For compiled sender frame with locals, sp and fp should never equal, but if ++ // the callee is native frame, it will set sp == fp, this is definitly a mistake. ++ // So fix sp here according to the frame size. ++ if(_is_c_frame)ret.fix_sp(); ++ ++ return ret; ++} ++ ++void frame::fix_sp(){ ++ if(is_compiled_frame() && _sp == _fp && 2 < _cb->frame_size()){ ++ assert(NULL != _sp, "sp can't be NULL."); ++ _sp = _sp - (_cb->frame_size() - 2); ++ _unextended_sp = _sp; ++ } ++} ++ ++ ++bool frame::interpreter_frame_equals_unpacked_fp(intptr_t* fp) { ++ assert(is_interpreted_frame(), "must be interpreter frame"); ++ Method* method = interpreter_frame_method(); ++ // When unpacking an optimized frame the frame pointer is ++ // adjusted with: ++ int diff = (method->max_locals() - method->size_of_parameters()) * ++ Interpreter::stackElementWords; ++ printf("^^^^^^^^^^^^^^^adjust fp in deopt fp = 0%x \n", (intptr_t)(fp - diff)); ++ return _fp == (fp - diff); ++} ++ ++void frame::pd_gc_epilog() { ++ // nothing done here now ++} ++ ++bool frame::is_interpreted_frame_valid(JavaThread* thread) const { ++// QQQ ++#ifdef CC_INTERP ++#else ++ assert(is_interpreted_frame(), "Not an interpreted frame"); ++ // These are reasonable sanity checks ++ if (fp() == 0 || (intptr_t(fp()) & (wordSize-1)) != 0) { ++ return false; ++ } ++ if (sp() == 0 || (intptr_t(sp()) & (wordSize-1)) != 0) { ++ return false; ++ } ++ if (fp() + interpreter_frame_initial_sp_offset < sp()) { ++ return false; ++ } ++ // These are hacks to keep us out of trouble. ++ // The problem with these is that they mask other problems ++ if (fp() <= sp()) { // this attempts to deal with unsigned comparison above ++ return false; ++ } ++ ++ // do some validation of frame elements ++ ++ // first the method ++ ++ Method* m = *interpreter_frame_method_addr(); ++ ++ // validate the method we'd find in this potential sender ++ if (!m->is_valid_method()) return false; ++ ++ // stack frames shouldn't be much larger than max_stack elements ++ ++ //if (fp() - sp() > 1024 + m->max_stack()*Interpreter::stackElementSize()) { ++ if (fp() - sp() > 4096) { // stack frames shouldn't be large. ++ return false; ++ } ++ ++ // validate bci/bcx ++ ++ intptr_t bcx = interpreter_frame_bcx(); ++ if (m->validate_bci_from_bcx(bcx) < 0) { ++ return false; ++ } ++ ++ // validate ConstantPoolCache* ++ ++ ConstantPoolCache* cp = *interpreter_frame_cache_addr(); ++ ++ if (cp == NULL || !cp->is_metaspace_object()) return false; ++ ++ // validate locals ++ ++ address locals = (address) *interpreter_frame_locals_addr(); ++ ++ if (locals > thread->stack_base() || locals < (address) fp()) return false; ++ ++ // We'd have to be pretty unlucky to be mislead at this point ++ ++#endif // CC_INTERP ++ return true; ++} ++ ++BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) { ++#ifdef CC_INTERP ++ // Needed for JVMTI. The result should always be in the interpreterState object ++ assert(false, "NYI"); ++ interpreterState istate = get_interpreterState(); ++#endif // CC_INTERP ++ assert(is_interpreted_frame(), "interpreted frame expected"); ++ Method* method = interpreter_frame_method(); ++ BasicType type = method->result_type(); ++ ++ intptr_t* tos_addr; ++ if (method->is_native()) { ++ // Prior to calling into the runtime to report the method_exit the possible ++ // return value is pushed to the native stack. If the result is a jfloat/jdouble ++ // then ST0 is saved. See the note in generate_native_result ++ tos_addr = (intptr_t*)sp(); ++ if (type == T_FLOAT || type == T_DOUBLE) { ++ tos_addr += 2; ++ } ++ } else { ++ tos_addr = (intptr_t*)interpreter_frame_tos_address(); ++ } ++ ++ switch (type) { ++ case T_OBJECT : ++ case T_ARRAY : { ++ oop obj; ++ if (method->is_native()) { ++#ifdef CC_INTERP ++ obj = istate->_oop_temp; ++#else ++ obj = cast_to_oop(at(interpreter_frame_oop_temp_offset)); ++#endif // CC_INTERP ++ } else { ++ oop* obj_p = (oop*)tos_addr; ++ obj = (obj_p == NULL) ? (oop)NULL : *obj_p; ++ } ++ assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check"); ++ *oop_result = obj; ++ break; ++ } ++ case T_BOOLEAN : value_result->z = *(jboolean*)tos_addr; break; ++ case T_BYTE : value_result->b = *(jbyte*)tos_addr; break; ++ case T_CHAR : value_result->c = *(jchar*)tos_addr; break; ++ case T_SHORT : value_result->s = *(jshort*)tos_addr; break; ++ case T_INT : value_result->i = *(jint*)tos_addr; break; ++ case T_LONG : value_result->j = *(jlong*)tos_addr; break; ++ case T_FLOAT : value_result->f = *(jfloat*)tos_addr; break; ++// yj not sure ++// case T_FLOAT : { ++// if (method->is_native()) { ++// jdouble d = *(jdouble*)tos_addr; // Result was in ST0 so need to convert to jfloat ++// value_result->f = (jfloat)d; ++// } else { ++// value_result->f = *(jfloat*)tos_addr; ++// } ++// break; ++// } ++ case T_DOUBLE : value_result->d = *(jdouble*)tos_addr; break; ++ case T_VOID : /* Nothing to do */ break; ++ default : ShouldNotReachHere(); ++ } ++ ++ return type; ++} ++ ++ ++intptr_t* frame::interpreter_frame_tos_at(jint offset) const { ++ int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize); ++ return &interpreter_frame_tos_address()[index]; ++} ++ ++#ifndef PRODUCT ++ ++#define DESCRIBE_FP_OFFSET(name) \ ++ values.describe(frame_no, fp() + frame::name##_offset, #name) ++ ++void frame::describe_pd(FrameValues& values, int frame_no) { ++ if (is_interpreted_frame()) { ++ DESCRIBE_FP_OFFSET(interpreter_frame_sender_sp); ++ DESCRIBE_FP_OFFSET(interpreter_frame_last_sp); ++ DESCRIBE_FP_OFFSET(interpreter_frame_method); ++ DESCRIBE_FP_OFFSET(interpreter_frame_mdx); ++ DESCRIBE_FP_OFFSET(interpreter_frame_cache); ++ DESCRIBE_FP_OFFSET(interpreter_frame_locals); ++ DESCRIBE_FP_OFFSET(interpreter_frame_bcx); ++ DESCRIBE_FP_OFFSET(interpreter_frame_initial_sp); ++ } ++} ++#endif ++ ++intptr_t *frame::initial_deoptimization_info() { ++ // used to reset the saved FP ++ return fp(); ++} ++ ++intptr_t* frame::real_fp() const { ++ if (_cb != NULL) { ++ // use the frame size if valid ++ int size = _cb->frame_size(); ++ if (size > 0) { ++ return unextended_sp() + size; ++ } ++ } ++ // else rely on fp() ++ assert(! is_compiled_frame(), "unknown compiled frame size"); ++ return fp(); ++} ++ ++ ++void frame::init(intptr_t* sp, intptr_t* fp, address pc) { ++ _sp = sp; ++ _unextended_sp = sp; ++ _fp = fp; ++ _pc = pc; ++ assert(pc != NULL, "no pc?"); ++ _cb = CodeCache::find_blob(pc); ++ adjust_unextended_sp(); ++ ++ address original_pc = nmethod::get_deopt_original_pc(this); ++ if (original_pc != NULL) { ++ _pc = original_pc; ++ _deopt_state = is_deoptimized; ++ } else { ++ _deopt_state = not_deoptimized; ++ } ++ ++ _sender_fp_for_c_frame = NULL; ++ _sender_address_for_c_frame = NULL; ++ ++ if (is_java_frame() || ++ is_native_frame() || ++ is_runtime_frame() || ++ is_stub_frame()) { ++ _is_c_frame = false; ++ _is_sender_for_c_frame_initialized = true; ++ }else{ ++ _is_c_frame = true; ++ _is_sender_for_c_frame_initialized = false; ++ } ++} ++ ++void frame::init_sender_for_c_frame() { ++ _is_sender_for_c_frame_initialized = true; ++ if(!_is_c_frame)return; ++ if (_fp == NULL) return; ++ if ((long)_pc < 0x8000000 || (long)_pc > 0x4000000000000000) return; ++ bool stop_flag = false; ++ address pinsn = _pc ; ++ while ((_sender_fp_for_c_frame == NULL || _sender_address_for_c_frame == NULL) && (*((int *) pinsn)) && !stop_flag) { ++ int insn = *((int *) pinsn); ++ if (_sender_fp_for_c_frame == NULL && (insn & 0xffff0000) == 0xadfe0000) { // stl fp,yy(sp) ++ int yy = (insn & 0x0000ffff) / 8; ++ _sender_fp_for_c_frame = (intptr_t *) (*(_fp + yy)); ++ } else if ( _sender_address_for_c_frame == NULL && (insn & 0xffff0000) == 0xaf5e0000) { // stl ra,xx(sp) ++ int xx = (insn & 0x0000ffff) / 8; ++ _sender_address_for_c_frame = (address) (*(_fp + xx)); ++ } else if ((insn & 0xffff0000) == 0xffbb0000){ // ldih gp,zz(t12) ++ stop_flag = true; ++ } ++ pinsn -= 4; ++ // scan function to _pc ++ } ++} ++void frame::init_sender_for_c_frame(address f_start_pc) { ++ do{ ++ int insn = *((int *) f_start_pc); ++ if ( _sender_address_for_c_frame == NULL && (insn & 0xffff0000) == 0xaf5e0000) { // stl ra,xx(sp) ++ int xx = (insn & 0x0000ffff) / 8; ++ _sender_address_for_c_frame = (address) (*(_sp + xx)); ++ } else if (_sender_fp_for_c_frame == NULL && (insn & 0xffff0000) == 0xadfe0000) { // stl fp,yy(sp) ++ int yy = (insn & 0x0000ffff) / 8; ++ _sender_fp_for_c_frame = (intptr_t *) (*(_sp + yy)); ++ } ++ f_start_pc += 4; ++ // scan function to _pc ++ } while ((_sender_fp_for_c_frame == NULL || _sender_address_for_c_frame == NULL) && (*((int *) f_start_pc))); ++} ++// when thread stop before stl ra at stack ++void frame::fixRa(const void* ucVoid) { ++ if (!_is_c_frame) return; ++ if (_sender_address_for_c_frame != NULL) { ++ return; ++ } else { ++ const ucontext_t *uc = (const ucontext_t *) ucVoid; ++ if (uc != NULL) { ++ _sender_address_for_c_frame = os::ucontext_get_ra(uc); ++ } else { ++ _sender_address_for_c_frame = NULL; ++ } ++ } ++} ++ ++intptr_t* frame::sender_sp() const { ++ if (_is_c_frame) { ++ assert(_is_sender_for_c_frame_initialized, "safe_for_sender must be called first."); ++ return _sender_fp_for_c_frame;// for sw C frame, sp is always the same as fp ++ } else { ++ return addr_at(sender_sp_offset); ++ } ++} ++ ++intptr_t* frame::link() const { ++ if (_is_c_frame) ++ { ++ assert(_is_sender_for_c_frame_initialized, "safe_for_sender must be called first."); ++ return _sender_fp_for_c_frame; ++ } ++ else ++ return (intptr_t*) *(intptr_t **)addr_at(link_offset); ++} ++ ++address frame::sender_pc() const { ++ if (_is_c_frame) ++ { ++ assert(_is_sender_for_c_frame_initialized, "safe_for_sender must be called first."); ++ return _sender_address_for_c_frame; ++ } ++ else { ++ return *sender_pc_addr(); ++ } ++} ++ ++#ifndef PRODUCT ++// This is a generic constructor which is only used by pns() in debug.cpp. ++frame::frame(void* sp, void* fp, void* pc) { ++ init((intptr_t*)sp, (intptr_t*)fp, (address)pc); ++} ++#endif +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/frame_sw64.hpp afu8u/hotspot/src/cpu/sw64/vm/frame_sw64.hpp +--- openjdk/hotspot/src/cpu/sw64/vm/frame_sw64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/frame_sw64.hpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,233 @@ ++/* ++ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef CPU_SW64_VM_FRAME_SW64_HPP ++#define CPU_SW64_VM_FRAME_SW64_HPP ++ ++#include "runtime/synchronizer.hpp" ++#include "utilities/top.hpp" ++ ++// A frame represents a physical stack frame (an activation). Frames can be ++// C or Java frames, and the Java frames can be interpreted or compiled. ++// In contrast, vframes represent source-level activations, so that one physical frame ++// can correspond to multiple source level frames because of inlining. ++// A frame is comprised of {pc, fp, sp} ++// ------------------------------ Asm interpreter ---------------------------------------- ++// Layout of asm interpreter frame: ++// [expression stack ] * <- sp ++// [monitors ] \ ++// ... | monitor block size ++// [monitors ] / ++// [monitor block size ] ++// [byte code index/pointr] = bcx() bcx_offset ++// [pointer to locals ] = locals() locals_offset ++// [constant pool cache ] = cache() cache_offset ++// [methodData ] = mdp() mdx_offset ++// [methodOop ] = method() method_offset ++// [last sp ] = last_sp() last_sp_offset ++// [old stack pointer ] (sender_sp) sender_sp_offset ++// [old frame pointer ] <- fp = link() ++// [return pc ] ++// [oop temp ] (only for native calls) ++// [locals and parameters ] ++// <- sender sp ++// ------------------------------ Asm interpreter ---------------------------------------- ++ ++// ------------------------------ C++ interpreter ---------------------------------------- ++// ++// Layout of C++ interpreter frame: (While executing in BytecodeInterpreter::run) ++// ++// <- SP (current sp) ++// [local variables ] BytecodeInterpreter::run local variables ++// ... BytecodeInterpreter::run local variables ++// [local variables ] BytecodeInterpreter::run local variables ++// [old frame pointer ] fp [ BytecodeInterpreter::run's fp ] ++// [return pc ] (return to frame manager) ++// [interpreter_state* ] (arg to BytecodeInterpreter::run) -------------- ++// [expression stack ] <- last_Java_sp | ++// [... ] * <- interpreter_state.stack | ++// [expression stack ] * <- interpreter_state.stack_base | ++// [monitors ] \ | ++// ... | monitor block size | ++// [monitors ] / <- interpreter_state.monitor_base | ++// [struct interpretState ] <-----------------------------------------| ++// [return pc ] (return to callee of frame manager [1] ++// [locals and parameters ] ++// <- sender sp ++ ++// [1] When the c++ interpreter calls a new method it returns to the frame ++// manager which allocates a new frame on the stack. In that case there ++// is no real callee of this newly allocated frame. The frame manager is ++// aware of the additional frame(s) and will pop them as nested calls ++// complete. Howevers tTo make it look good in the debugger the frame ++// manager actually installs a dummy pc pointing to RecursiveInterpreterActivation ++// with a fake interpreter_state* parameter to make it easy to debug ++// nested calls. ++ ++// Note that contrary to the layout for the assembly interpreter the ++// expression stack allocated for the C++ interpreter is full sized. ++// However this is not as bad as it seems as the interpreter frame_manager ++// will truncate the unused space on succesive method calls. ++// ++// ------------------------------ C++ interpreter ---------------------------------------- ++ ++// Layout of interpreter frame: ++// ++// [ monitor entry ] <--- sp ++// ... ++// [ monitor entry ] ++// -7 [ monitor block top ] ( the top monitor entry ) ++// -6 [ byte code pointer ] (if native, bcp = 0) ++// -5 [ constant pool cache ] ++// -4 [ methodData ] mdx_offset(not core only) ++// -3 [ methodOop ] ++// -2 [ locals offset ] ++// -1 [ sender's sp ] ++// 0 [ sender's fp ] <--fp ++// 1 [ return address ] ++// 2 [ oop temp offset ] (only for native calls) ++// 3 [ result handler offset ] (only for native calls) ++// 4 [ result type info ] (only for native calls) ++// [ local var m-1 ] ++// ... ++// [ local var 0 ] ++// [ argumnet word n-1 ] <--- ( sender's sp ) ++// ... ++// [ argument word 0 ] <--- S1 ++ ++ public: ++ enum { ++ pc_return_offset = 0, ++ // All frames ++ link_offset = 0, ++ return_addr_offset = 1, ++ // non-interpreter frames ++ sender_sp_offset = 2, ++ ++#ifndef CC_INTERP ++ ++ // Interpreter frames ++ interpreter_frame_return_addr_offset = 1, ++ interpreter_frame_result_handler_offset = 3, // for native calls only ++ interpreter_frame_oop_temp_offset = 2, // for native calls only ++ ++ interpreter_frame_sender_fp_offset = 0, ++ interpreter_frame_sender_sp_offset = -1, ++ // outgoing sp before a call to an invoked method ++ interpreter_frame_last_sp_offset = interpreter_frame_sender_sp_offset - 1, ++ interpreter_frame_locals_offset = interpreter_frame_last_sp_offset - 1, ++ interpreter_frame_method_offset = interpreter_frame_locals_offset - 1, ++ interpreter_frame_mdx_offset = interpreter_frame_method_offset - 1, ++ interpreter_frame_cache_offset = interpreter_frame_mdx_offset - 1, ++ interpreter_frame_bcx_offset = interpreter_frame_cache_offset - 1, ++ interpreter_frame_initial_sp_offset = interpreter_frame_bcx_offset - 1, ++ ++ interpreter_frame_monitor_block_top_offset = interpreter_frame_initial_sp_offset, ++ interpreter_frame_monitor_block_bottom_offset = interpreter_frame_initial_sp_offset, ++ ++#endif // CC_INTERP ++ ++ // Entry frames ++ entry_frame_call_wrapper_offset = -17, ++ ++ // Native frames ++ ++ native_frame_initial_param_offset = 2 ++ ++ }; ++ ++ intptr_t ptr_at(int offset) const { ++ return *ptr_at_addr(offset); ++ } ++ ++ void ptr_at_put(int offset, intptr_t value) { ++ *ptr_at_addr(offset) = value; ++ } ++ ++ void fix_sp(); ++ ++private: ++ // an additional field beyond _sp and _pc: ++ intptr_t* _fp; // frame pointer ++ // The interpreter and adapters will extend the frame of the caller. ++ // Since oopMaps are based on the sp of the caller before extension ++ // we need to know that value. However in order to compute the address ++ // of the return address we need the real "raw" sp. Since sparc already ++ // uses sp() to mean "raw" sp and unextended_sp() to mean the caller's ++ // original sp we use that convention. ++ bool _is_c_frame; ++ bool _is_sender_for_c_frame_initialized; ++ intptr_t* _sender_fp_for_c_frame; ++ address _sender_address_for_c_frame; ++ // The caller must guarantee fp is safe. ++ void init_sender_for_c_frame(); ++ ++ intptr_t* _unextended_sp; ++ void adjust_unextended_sp(); ++ ++ intptr_t* ptr_at_addr(int offset) const { ++ return (intptr_t*) addr_at(offset); ++ } ++#ifdef ASSERT ++ // Used in frame::sender_for_{interpreter,compiled}_frame ++ static void verify_deopt_original_pc( nmethod* nm, intptr_t* unextended_sp, bool is_method_handle_return = false); ++ static void verify_deopt_mh_original_pc(nmethod* nm, intptr_t* unextended_sp) { ++ verify_deopt_original_pc(nm, unextended_sp, true); ++ } ++#endif ++ ++ public: ++ // Constructors ++ ++ frame(intptr_t* sp, intptr_t* fp, address pc); ++ ++ frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc); ++ ++ frame(intptr_t* sp, intptr_t* fp); ++ ++ void init(intptr_t* sp, intptr_t* fp, address pc); ++ ++ // accessors for the instance variables ++ intptr_t* fp() const { return _fp; } ++ ++ inline address* sender_pc_addr() const; ++ ++ // return address of param, zero origin index. ++ inline address* native_param_addr(int idx) const; ++ ++ // expression stack tos if we are nested in a java call ++ intptr_t* interpreter_frame_last_sp() const; ++ void fixRa(const void* ucVoid); ++ void init_sender_for_c_frame(address f_start_pc); ++ ++#ifndef CC_INTERP ++ // deoptimization support ++ void interpreter_frame_set_last_sp(intptr_t* sp); ++#endif // CC_INTERP ++ ++#ifdef CC_INTERP ++ inline interpreterState get_interpreterState() const; ++#endif // CC_INTERP ++ ++#endif // CPU_SW64_VM_FRAME_SW64_HPP +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/frame_sw64.inline.hpp afu8u/hotspot/src/cpu/sw64/vm/frame_sw64.inline.hpp +--- openjdk/hotspot/src/cpu/sw64/vm/frame_sw64.inline.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/frame_sw64.inline.hpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,319 @@ ++/* ++ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef CPU_SW64_VM_FRAME_SW64_INLINE_HPP ++#define CPU_SW64_VM_FRAME_SW64_INLINE_HPP ++ ++#include "code/codeCache.hpp" ++ ++// Inline functions for SW frames: ++ ++// Constructors: ++ ++inline frame::frame() { ++ _pc = NULL; ++ _sp = NULL; ++ _unextended_sp = NULL; ++ _fp = NULL; ++ _cb = NULL; ++ _deopt_state = unknown; ++} ++ ++//inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) { ++// _sp = sp; ++// _unextended_sp = sp; ++// _fp = fp; ++// _pc = pc; ++// assert(pc != NULL, "no pc?"); ++// _cb = CodeCache::find_blob(pc); ++// adjust_unextended_sp(); ++// ++// address original_pc = nmethod::get_deopt_original_pc(this); ++// if (original_pc != NULL) { ++// _pc = original_pc; ++// _deopt_state = is_deoptimized; ++// } else { ++// _deopt_state = not_deoptimized; ++// } ++//} ++ ++inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) { ++ init(sp, fp, pc); ++} ++ ++inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) { ++ _sp = sp; ++ _unextended_sp = unextended_sp; ++ _fp = fp; ++ _pc = pc; ++ assert(pc != NULL, "no pc?"); ++ _cb = CodeCache::find_blob(pc); ++ adjust_unextended_sp(); ++ ++ address original_pc = nmethod::get_deopt_original_pc(this); ++ if (original_pc != NULL) { ++ _pc = original_pc; ++ _deopt_state = is_deoptimized; ++ } else { ++ if (_cb->is_deoptimization_stub()) { ++ _deopt_state = is_deoptimized; ++ } else { ++ _deopt_state = not_deoptimized; ++ } ++ } ++ _is_c_frame = false; ++} ++ ++inline frame::frame(intptr_t* sp, intptr_t* fp) { ++ _sp = sp; ++ _unextended_sp = sp; ++ _fp = fp; ++ _pc = (address)(sp[-1]); ++ Unimplemented(); //ZHJ20170712 ++ ++ // Here's a sticky one. This constructor can be called via AsyncGetCallTrace ++ // when last_Java_sp is non-null but the pc fetched is junk. If we are truly ++ // unlucky the junk value could be to a zombied method and we'll die on the ++ // find_blob call. This is also why we can have no asserts on the validity ++ // of the pc we find here. AsyncGetCallTrace -> pd_get_top_frame_for_signal_handler ++ // -> pd_last_frame should use a specialized version of pd_last_frame which could ++ // call a specilaized frame constructor instead of this one. ++ // Then we could use the assert below. However this assert is of somewhat dubious ++ // value. ++ // assert(_pc != NULL, "no pc?"); ++ ++ _cb = CodeCache::find_blob(_pc); ++ adjust_unextended_sp(); ++ address original_pc = nmethod::get_deopt_original_pc(this); ++ if (original_pc != NULL) { ++ _pc = original_pc; ++ _deopt_state = is_deoptimized; ++ } else { ++ _deopt_state = not_deoptimized; ++ } ++} ++ ++// Accessors ++ ++inline bool frame::equal(frame other) const { ++ bool ret = sp() == other.sp() ++ && unextended_sp() == other.unextended_sp() ++ && fp() == other.fp() ++ && pc() == other.pc(); ++ assert(!ret || ret && cb() == other.cb() && _deopt_state == other._deopt_state, "inconsistent construction"); ++ return ret; ++} ++ ++// Return unique id for this frame. The id must have a value where we can distinguish ++// identity and younger/older relationship. NULL represents an invalid (incomparable) ++// frame. ++inline intptr_t* frame::id(void) const { return unextended_sp(); } ++ ++// Relationals on frames based ++// Return true if the frame is younger (more recent activation) than the frame represented by id ++inline bool frame::is_younger(intptr_t* id) const { assert(this->id() != NULL && id != NULL, "NULL frame id"); ++ return this->id() < id ; } ++ ++// Return true if the frame is older (less recent activation) than the frame represented by id ++inline bool frame::is_older(intptr_t* id) const { assert(this->id() != NULL && id != NULL, "NULL frame id"); ++ return this->id() > id ; } ++ ++ ++ ++//inline intptr_t* frame::link() const { return (intptr_t*) *(intptr_t **)addr_at(link_offset); } ++inline void frame::set_link(intptr_t* addr) { *(intptr_t **)addr_at(link_offset) = addr; } ++ ++ ++inline intptr_t* frame::unextended_sp() const { return _unextended_sp; } ++ ++// Return address: ++ ++inline address* frame::sender_pc_addr() const { return (address*) addr_at( return_addr_offset); } ++//inline address frame::sender_pc() const { return *sender_pc_addr(); } ++ ++// return address of param, zero origin index. ++inline address* frame::native_param_addr(int idx) const { return (address*) addr_at( native_frame_initial_param_offset+idx); } ++ ++#ifdef CC_INTERP ++ ++inline interpreterState frame::get_interpreterState() const { ++ return ((interpreterState)addr_at( -sizeof(BytecodeInterpreter)/wordSize )); ++} ++ ++inline intptr_t* frame::sender_sp() const { ++ // Hmm this seems awfully expensive QQQ, is this really called with interpreted frames? ++ if (is_interpreted_frame()) { ++ assert(false, "should never happen"); ++ return get_interpreterState()->sender_sp(); ++ } else { ++ return addr_at(sender_sp_offset); ++ } ++} ++ ++inline intptr_t** frame::interpreter_frame_locals_addr() const { ++ assert(is_interpreted_frame(), "must be interpreted"); ++ return &(get_interpreterState()->_locals); ++} ++ ++inline intptr_t* frame::interpreter_frame_bcx_addr() const { ++ assert(is_interpreted_frame(), "must be interpreted"); ++ return (intptr_t*) &(get_interpreterState()->_bcp); ++} ++ ++ ++// Constant pool cache ++ ++inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const { ++ assert(is_interpreted_frame(), "must be interpreted"); ++ return &(get_interpreterState()->_constants); ++} ++ ++// Method ++ ++inline Method** frame::interpreter_frame_method_addr() const { ++ assert(is_interpreted_frame(), "must be interpreted"); ++ return &(get_interpreterState()->_method); ++} ++ ++inline intptr_t* frame::interpreter_frame_mdx_addr() const { ++ assert(is_interpreted_frame(), "must be interpreted"); ++ return (intptr_t*) &(get_interpreterState()->_mdx); ++} ++ ++// top of expression stack ++inline intptr_t* frame::interpreter_frame_tos_address() const { ++ assert(is_interpreted_frame(), "wrong frame type"); ++ return get_interpreterState()->_stack + 1; ++} ++ ++#else // asm interpreter ++//inline intptr_t* frame::sender_sp() const { return addr_at(sender_sp_offset); } ++ ++inline intptr_t** frame::interpreter_frame_locals_addr() const { ++ return (intptr_t**)addr_at(interpreter_frame_locals_offset); ++} ++ ++inline intptr_t* frame::interpreter_frame_last_sp() const { ++ return *(intptr_t**)addr_at(interpreter_frame_last_sp_offset); ++} ++ ++inline intptr_t* frame::interpreter_frame_bcx_addr() const { ++ return (intptr_t*)addr_at(interpreter_frame_bcx_offset); ++} ++ ++ ++inline intptr_t* frame::interpreter_frame_mdx_addr() const { ++ return (intptr_t*)addr_at(interpreter_frame_mdx_offset); ++} ++ ++ ++ ++// Constant pool cache ++ ++inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const { ++ return (ConstantPoolCache**)addr_at(interpreter_frame_cache_offset); ++} ++ ++// Method ++ ++inline Method** frame::interpreter_frame_method_addr() const { ++ static Method* invalid_method = NULL; ++ if(NULL == _fp)return &invalid_method; ++ return (Method**)addr_at(interpreter_frame_method_offset); ++} ++ ++// top of expression stack ++inline intptr_t* frame::interpreter_frame_tos_address() const { ++ intptr_t* last_sp = interpreter_frame_last_sp(); ++ if (last_sp == NULL ) { ++ return sp(); ++ } else { ++ // sp() may have been extended by an adapter ++ assert(last_sp <= (intptr_t*)interpreter_frame_monitor_end(), "bad tos"); ++ return last_sp; ++ } ++} ++ ++inline oop* frame::interpreter_frame_temp_oop_addr() const { ++ return (oop *)(fp() + interpreter_frame_oop_temp_offset); ++} ++ ++#endif // CC_INTERP ++ ++inline int frame::pd_oop_map_offset_adjustment() const { ++ return 0; ++} ++ ++inline int frame::interpreter_frame_monitor_size() { ++ return BasicObjectLock::size(); ++} ++ ++ ++// expression stack ++// (the max_stack arguments are used by the GC; see class FrameClosure) ++ ++inline intptr_t* frame::interpreter_frame_expression_stack() const { ++ intptr_t* monitor_end = (intptr_t*) interpreter_frame_monitor_end(); ++ return monitor_end-1; ++} ++ ++ ++inline jint frame::interpreter_frame_expression_stack_direction() { return -1; } ++ ++ ++// Entry frames ++ ++inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const { ++ return (JavaCallWrapper**)addr_at(entry_frame_call_wrapper_offset); ++} ++ ++// Compiled frames ++ ++inline int frame::local_offset_for_compiler(int local_index, int nof_args, int max_nof_locals, int max_nof_monitors) { ++ return (nof_args - local_index + (local_index < nof_args ? 1: -1)); ++} ++ ++inline int frame::monitor_offset_for_compiler(int local_index, int nof_args, int max_nof_locals, int max_nof_monitors) { ++ return local_offset_for_compiler(local_index, nof_args, max_nof_locals, max_nof_monitors); ++} ++ ++inline int frame::min_local_offset_for_compiler(int nof_args, int max_nof_locals, int max_nof_monitors) { ++ return (nof_args - (max_nof_locals + max_nof_monitors*2) - 1); ++} ++ ++inline bool frame::volatile_across_calls(Register reg) { ++ return true; ++} ++ ++ ++ ++inline oop frame::saved_oop_result(RegisterMap* map) const { ++ return *((oop*) map->location(V0->as_VMReg())); ++} ++ ++inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) { ++ *((oop*) map->location(V0->as_VMReg())) = obj; ++} ++ ++#endif // CPU_SW64_VM_FRAME_SW64_INLINE_HPP +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/globalDefinitions_sw64.hpp afu8u/hotspot/src/cpu/sw64/vm/globalDefinitions_sw64.hpp +--- openjdk/hotspot/src/cpu/sw64/vm/globalDefinitions_sw64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/globalDefinitions_sw64.hpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,41 @@ ++/* ++ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef CPU_SW64_VM_GLOBALDEFINITIONS_SW64_HPP ++#define CPU_SW64_VM_GLOBALDEFINITIONS_SW64_HPP ++const int BytesPerInstWord = 4; ++ ++const int StackAlignmentInBytes = (2*wordSize); ++ ++#define SUPPORTS_NATIVE_CX8 ++// Indicates whether the C calling conventions require that ++// 32-bit integer argument values are properly extended to 64 bits. ++// If set, SharedRuntime::c_calling_convention() must adapt ++// signatures accordingly. ++const bool CCallingConventionRequiresIntsAsLongs = false; ++ ++#define SUPPORT_NATIVE_CX8 ++#define SUPPORTS_NATIVE_CX8 ++ ++#endif // CPU_SW64_VM_GLOBALDEFINITIONS_SW64_HPP +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/globals_sw64.hpp afu8u/hotspot/src/cpu/sw64/vm/globals_sw64.hpp +--- openjdk/hotspot/src/cpu/sw64/vm/globals_sw64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/globals_sw64.hpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,155 @@ ++/* ++ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef CPU_SW64_VM_GLOBALS_SW64_HPP ++#define CPU_SW64_VM_GLOBALS_SW64_HPP ++ ++#include "utilities/globalDefinitions.hpp" ++#include "utilities/macros.hpp" ++ ++// Sets the default values for platform dependent flags used by the runtime system. ++// (see globals.hpp) ++ ++#ifdef CORE ++define_pd_global(bool, UseSSE, 0); ++#endif /* CORE */ ++define_pd_global(bool, ConvertSleepToYield, true); ++define_pd_global(bool, ShareVtableStubs, true); ++define_pd_global(bool, CountInterpCalls, true); ++ ++define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks ++define_pd_global(bool, TrapBasedNullChecks, false); // Not needed on x86. ++define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs passed to check cast ++define_pd_global(bool, NeedsDeoptSuspend, false); // only register window machines need this ++ ++// See 4827828 for this change. There is no globals_core_i486.hpp. I can't ++// assign a different value for C2 without touching a number of files. Use ++// #ifdef to minimize the change as it's late in Mantis. -- FIXME. ++// c1 doesn't have this problem because the fix to 4858033 assures us ++// the the vep is aligned at CodeEntryAlignment whereas c2 only aligns ++// the uep and the vep doesn't get real alignment but just slops on by ++// only assured that the entry instruction meets the 5 byte size requirement. ++define_pd_global(intx, CodeEntryAlignment, 16); ++define_pd_global(intx, OptoLoopAlignment, 16); ++define_pd_global(intx, InlineFrequencyCount, 100); ++define_pd_global(intx, InlineSmallCode, 4000); ++ ++define_pd_global(uintx, TLABSize, 0); ++define_pd_global(uintx, NewSize, 1024 * K); ++define_pd_global(intx, PreInflateSpin, 10); ++ ++//define_pd_global(intx, PrefetchCopyIntervalInBytes, 256); ++//define_pd_global(intx, PrefetchScanIntervalInBytes, 256); ++//define_pd_global(intx, PrefetchFieldsAhead, -1); ++ ++define_pd_global(intx, StackYellowPages, 2); ++define_pd_global(intx, StackRedPages, 1); ++define_pd_global(intx, StackShadowPages, 3 DEBUG_ONLY(+1)); ++ ++define_pd_global(bool, RewriteBytecodes, true); ++define_pd_global(bool, RewriteFrequentPairs, true); ++#ifdef _ALLBSD_SOURCE ++define_pd_global(bool, UseMembar, true); ++#else ++define_pd_global(bool, UseMembar, false); ++#endif ++// GC Ergo Flags ++define_pd_global(intx, CMSYoungGenPerWorker, 64*M); // default max size of CMS young gen, per GC worker thread ++ ++define_pd_global(uintx, TypeProfileLevel, 111); ++ ++define_pd_global(bool, PreserveFramePointer, false); ++// Only c2 cares about this at the moment ++//define_pd_global(intx, AllocatePrefetchStyle, 2); ++//define_pd_global(intx, AllocatePrefetchDistance, 256); ++ ++#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \ ++ \ ++ product(bool, UseSW6B, false, \ ++ "Use SW6B on Shenwei CPUs") \ ++ \ ++ product(bool, UseSW8A, false, \ ++ "Use SW8A on Shenwei CPUs") \ ++ \ ++ product(bool, UseAddpi, false, \ ++ "Use addpi of SW8A's instructions") \ ++ \ ++ product(bool, UseCAS, false, \ ++ "Use CASx of SW8A's instructions") \ ++ \ ++ product(bool, UseWmemb, false, \ ++ "Use wmemb on SW8A CPU") \ ++ product(bool, UseNecessaryMembar, true, \ ++ "Use necessary Membar") \ ++ product(bool, UseCRC32, false, \ ++ "Use CRC32 instructions for CRC32 computation") \ ++ product(bool, SolveAlignment, false, \ ++ "solve alignment when SolveAlignment is true, otherwise ignore") \ ++ product(bool, UseUnsafeCopyIntrinsic, true, \ ++ "Use unsafe_arraycopy Intrinsic") \ ++ product(bool, UseSimdForward, false, \ ++ "arraycopy disjoint stubs with SIMD instructions") \ ++ product(bool, UseSimdBackward, false, \ ++ "arraycopy conjoint stubs with SIMD instructions") \ ++ product(bool, UseSimdLongOop, false, \ ++ "conjoint oop copy with SIMD instructions") \ ++ /* product(bool, UseCodeCacheAllocOpt, true, */ \ ++ /* "Allocate code cache within 32-bit memory address space") */ \ ++ \ ++ product(bool, UseCountLeadingZerosInstruction, true, \ ++ "Use count leading zeros instruction") \ ++ \ ++ product(bool, UseCountTrailingZerosInstruction, false, \ ++ "Use count trailing zeros instruction") \ ++ \ ++ product(bool, FastIntDiv, false, \ ++ "make Integer division faster") \ ++ \ ++ product(bool, FastLongDiv, false, \ ++ "make Long division faster") \ ++ \ ++ product(bool, FastIntRem, false, \ ++ "make Integer remainder faster") \ ++ \ ++ product(bool, FastLongRem, false, \ ++ "make Long remainder faster") \ ++ \ ++ product(bool, SafePatch, true, \ ++ "make patch operations safer, for SPECjvm2008 performance lost about 2%") \ ++ \ ++ product(bool, FRegisterConflict, true, \ ++ "When FRegisterConflict is true, prevent source and destination FloatRegisters from being the same. " \ ++ "When FRegisterConflict is false, ignore the conflict") \ ++ \ ++ product(bool, UseGetLongIntrinsic, false, \ ++ "Use Unsafe.getLong intrinsic") \ ++ \ ++ product(intx, PrefetchUnsafeCopyInBytes, -1, \ ++ "How far ahead to prefetch destination area (<= 0 means off)") \ ++ \ ++ product(bool, Usesetfpec1, true, \ ++ "When Usesetfpec1 is true, which is used for 9906" \ ++ "When Usesetfpec1 is false, which is used for 9916") ++ ++#endif // CPU_SW64_VM_GLOBALS_SW64_HPP +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/icache_sw64.cpp afu8u/hotspot/src/cpu/sw64/vm/icache_sw64.cpp +--- openjdk/hotspot/src/cpu/sw64/vm/icache_sw64.cpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/icache_sw64.cpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,57 @@ ++/* ++ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++#include "asm/macroAssembler.hpp" ++#include "runtime/icache.hpp" ++ ++ #define CACHE_OPT 1 ++ ++//no need, we just call cacheflush system call to flush cache ++//flush cache is a very frequent operation, flush all the cache decrease the performance sharply, so i modify it. ++void ICacheStubGenerator::generate_icache_flush(ICache::flush_icache_stub_t* flush_icache_stub) {}; ++ ++void ICache::call_flush_stub(address start, int lines) { ++ //in fact, the current os implementation simply flush all ICACHE&DCACHE ++// __asm__ __volatile__ ("ldi $0,266"); ++// __asm__ __volatile__ ("sys_call 0x83"); ++} ++ ++void ICache::invalidate_word(address addr) { ++ //cacheflush(addr, 4, ICACHE); ++ ++// __asm__ __volatile__ ("ldi $0,266"); ++// __asm__ __volatile__ ("sys_call 0x83"); ++} ++ ++void ICache::invalidate_range(address start, int nbytes) { ++// __asm__ __volatile__ ("ldi $0,266"); ++// __asm__ __volatile__ ("sys_call 0x83"); ++} ++ ++void ICache::invalidate_all() { ++// __asm__ __volatile__ ("ldi $0,266"); ++// __asm__ __volatile__ ("sys_call 0x83"); ++} ++ +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/icache_sw64.hpp afu8u/hotspot/src/cpu/sw64/vm/icache_sw64.hpp +--- openjdk/hotspot/src/cpu/sw64/vm/icache_sw64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/icache_sw64.hpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,62 @@ ++/* ++ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef CPU_SW64_VM_ICACHE_SW64_HPP ++#define CPU_SW64_VM_ICACHE_SW64_HPP ++ ++// Interface for updating the instruction cache. Whenever the VM modifies ++// code, part of the processor instruction cache potentially has to be flushed. ++ ++// On the x86, this is a no-op -- the I-cache is guaranteed to be consistent ++// after the next jump, and the VM never modifies instructions directly ahead ++// of the instruction fetch path. ++ ++// [phh] It's not clear that the above comment is correct, because on an MP ++// system where the dcaches are not snooped, only the thread doing the invalidate ++// will see the update. Even in the snooped case, a memory fence would be ++// necessary if stores weren't ordered. Fortunately, they are on all known ++// x86 implementations. ++ ++class ICache : public AbstractICache { ++ public: ++ enum { ++ stub_size = 0, // Size of the icache flush stub in bytes ++ line_size = 32, // flush instruction affects a dword ++ log2_line_size = 5 // log2(line_size) ++ }; ++ ++ //nothing to do ++ static void initialize() {} ++ ++ static void call_flush_stub(address start, int lines); ++ ++ static void invalidate_word(address addr); ++ ++ static void invalidate_range(address start, int nbytes); ++ ++ static void invalidate_all(); ++ ++}; ++ ++#endif // CPU_SW64_VM_ICACHE_SW64_HPP +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/icBuffer_sw64.cpp afu8u/hotspot/src/cpu/sw64/vm/icBuffer_sw64.cpp +--- openjdk/hotspot/src/cpu/sw64/vm/icBuffer_sw64.cpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/icBuffer_sw64.cpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,81 @@ ++/* ++ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++#include "asm/macroAssembler.hpp" ++#include "asm/macroAssembler.inline.hpp" ++#include "code/icBuffer.hpp" ++#include "gc_interface/collectedHeap.inline.hpp" ++#include "interpreter/bytecodes.hpp" ++#include "memory/resourceArea.hpp" ++#include "nativeInst_sw64.hpp" ++#include "oops/oop.inline.hpp" ++#include "oops/oop.inline2.hpp" ++ ++int InlineCacheBuffer::ic_stub_code_size() { ++ return NativeMovConstReg::instruction_size + ++ NativeGeneralJump::instruction_size + ++ 1; ++ // so that code_end can be set in CodeBuffer ++ // 64bit 15 = 6 + 8 bytes + 1 byte ++ // 32bit 7 = 2 + 4 bytes + 1 byte ++} ++ ++ ++// we use T1 as cached oop(klass) now. this is the target of virtual call, ++// when reach here, the receiver in A1 (not T0) ++// refer to shareRuntime_sw64.cpp,gen_i2c2i_adapters ++void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point) { ++ ResourceMark rm; ++ CodeBuffer code(code_begin, ic_stub_code_size()); ++ MacroAssembler* masm = new MacroAssembler(&code); ++ // note: even though the code contains an embedded oop, we do not need reloc info ++ // because ++ // (1) the oop is old (i.e., doesn't matter for scavenges) ++ // (2) these ICStubs are removed *before* a GC happens, so the roots disappear ++// assert(cached_oop == NULL || cached_oop->is_perm(), "must be perm oop"); ++#define __ masm-> ++ __ patchable_set48(T1, (long)cached_value); ++ ++ __ patchable_jump(entry_point); ++ __ flush(); ++#undef __ ++} ++ ++ ++address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) { ++ NativeMovConstReg* move = nativeMovConstReg_at(code_begin); // creation also verifies the object ++ NativeGeneralJump* jump = nativeGeneralJump_at(move->next_instruction_address()); ++ return jump->jump_destination(); ++} ++ ++ ++void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) { ++ // creation also verifies the object ++ NativeMovConstReg* move = nativeMovConstReg_at(code_begin); ++ // Verifies the jump ++ NativeGeneralJump* jump = nativeGeneralJump_at(move->next_instruction_address()); ++ void* o= (void*)move->data(); ++ return o; ++} +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/interp_masm_sw64.cpp afu8u/hotspot/src/cpu/sw64/vm/interp_masm_sw64.cpp +--- openjdk/hotspot/src/cpu/sw64/vm/interp_masm_sw64.cpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/interp_masm_sw64.cpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,2032 @@ ++/* ++ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++#include "interp_masm_sw64.hpp" ++#include "interpreter/interpreter.hpp" ++#include "interpreter/interpreterRuntime.hpp" ++#include "oops/arrayOop.hpp" ++#include "oops/markOop.hpp" ++#include "oops/methodData.hpp" ++#include "oops/method.hpp" ++#include "prims/jvmtiExport.hpp" ++#include "prims/jvmtiRedefineClassesTrace.hpp" ++#include "prims/jvmtiThreadState.hpp" ++#include "runtime/basicLock.hpp" ++#include "runtime/biasedLocking.hpp" ++#include "runtime/sharedRuntime.hpp" ++#include "runtime/thread.inline.hpp" ++ ++ ++#ifdef PRODUCT ++#define BLOCK_COMMENT(str) /* nothing */ ++#define STOP(error) stop(error) ++#else ++#define BLOCK_COMMENT(str) { char line[1024]; \ ++ sprintf(line, "%s:%s:%d",str, __FILE__, __LINE__); \ ++ block_comment(line); } ++#define STOP(error) block_comment(error); stop(error) ++#endif ++ ++#define BIND(label) bind(label); BLOCK_COMMENT(#label ":") ++ ++// Implementation of InterpreterMacroAssembler ++ ++#ifdef CC_INTERP ++void InterpreterMacroAssembler::get_method(Register reg) { ++} ++#endif // CC_INTERP ++ ++void InterpreterMacroAssembler::get_2_byte_integer_at_bcp(Register reg, Register tmp, int offset) { ++ // The runtime address of BCP may be unaligned. ++ // Refer to the SPARC implementation. ++ ldbu(reg, BCP, offset+1); ++ ldbu(tmp, BCP, offset); ++ slll(reg, reg, 8); ++ addl(reg, tmp, reg); ++} ++ ++void InterpreterMacroAssembler::get_4_byte_integer_at_bcp(Register reg, Register tmp, int offset) { ++ assert(reg != tmp, "need separate temp register"); ++ if (offset & 3) { // Offset unaligned? ++ ldbu(reg, BCP, offset+3); ++ ldbu(tmp, BCP, offset+2); ++#ifdef _LP64 ++ slll(reg, reg, 8); ++ addl(reg, tmp, reg); ++ ldbu(tmp, BCP, offset+1); ++ slll(reg, reg, 8); ++ addl(reg, tmp, reg); ++ ldbu(tmp, BCP, offset); ++ slll(reg, reg, 8); ++ addl(reg, tmp, reg); ++#endif ++ } else { ++ ldw_unsigned(reg, BCP, offset); ++ } ++} ++ ++#ifndef CC_INTERP ++ ++void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point, ++ int number_of_arguments) { ++ // interpreter specific ++ // ++ // Note: No need to save/restore bcp & locals (r13 & r14) pointer ++ // since these are callee saved registers and no blocking/ ++ // GC can happen in leaf calls. ++ // Further Note: DO NOT save/restore bcp/locals. If a caller has ++ // already saved them so that it can use BCP/LVP as temporaries ++ // then a save/restore here will DESTROY the copy the caller ++ // saved! There used to be a save_bcp() that only happened in ++ // the ASSERT path (no restore_bcp). Which caused bizarre failures ++ // when jvm built with ASSERTs. ++#ifdef ASSERT ++ save_bcp(); ++ { ++ Label L; ++ ldl(AT,FP,frame::interpreter_frame_last_sp_offset * wordSize); ++ beq(AT, L); ++ stop("InterpreterMacroAssembler::call_VM_leaf_base: last_sp != NULL"); ++ BIND(L); ++ } ++#endif ++ // super call ++ MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments); ++ // interpreter specific ++ // LP64: Used to ASSERT that BCP/LVP were equal to frame's bcp/locals ++ // but since they may not have been saved (and we don't want to ++ // save them here (see note above) the assert is invalid. ++} ++ ++void InterpreterMacroAssembler::call_VM_base(Register oop_result, ++ Register java_thread, ++ Register last_java_sp, ++ address entry_point, ++ int number_of_arguments, ++ bool check_exceptions) { ++ // interpreter specific ++ // ++ // Note: Could avoid restoring locals ptr (callee saved) - however doesn't ++ // really make a difference for these runtime calls, since they are ++ // slow anyway. Btw., bcp must be saved/restored since it may change ++ // due to GC. ++ assert(java_thread == noreg , "not expecting a precomputed java thread"); ++ save_bcp(); ++#ifdef ASSERT ++ { ++ Label L; ++ ldl(AT, FP, frame::interpreter_frame_last_sp_offset * wordSize); ++ beq(AT, L); ++ stop("InterpreterMacroAssembler::call_VM_base: last_sp != NULL"); ++ BIND(L); ++ } ++#endif /* ASSERT */ ++ // super call ++ MacroAssembler::call_VM_base(oop_result, java_thread, last_java_sp, ++ entry_point, number_of_arguments, ++ check_exceptions); ++ // interpreter specific ++ restore_bcp(); ++ restore_locals(); ++} ++ ++ ++void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread) { ++ if (JvmtiExport::can_pop_frame()) { ++ Label L; ++ // Initiate popframe handling only if it is not already being ++ // processed. If the flag has the popframe_processing bit set, it ++ // means that this code is called *during* popframe handling - we ++ // don't want to reenter. ++ // This method is only called just after the call into the vm in ++ // call_VM_base, so the arg registers are available. ++ // Not clear if any other register is available, so load AT twice ++ assert(AT != java_thread, "check"); ++ ldw(AT, java_thread, in_bytes(JavaThread::popframe_condition_offset())); ++ and_imm8(AT, AT, JavaThread::popframe_pending_bit); ++ beq(AT, L); ++ ldw(AT, java_thread, in_bytes(JavaThread::popframe_condition_offset())); ++ and_imm8(AT, AT, JavaThread::popframe_processing_bit); ++ bne(AT, L); ++ call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry)); ++ jmp(V0); ++ BIND(L); ++ } ++} ++ ++ ++void InterpreterMacroAssembler::load_earlyret_value(TosState state) { ++ Register thread = T11; ++ move(T11, S2thread); ++ ld_ptr(thread, thread, in_bytes(JavaThread::jvmti_thread_state_offset())); ++ const Address tos_addr (thread, in_bytes(JvmtiThreadState::earlyret_tos_offset())); ++ const Address oop_addr (thread, in_bytes(JvmtiThreadState::earlyret_oop_offset())); ++ const Address val_addr (thread, in_bytes(JvmtiThreadState::earlyret_value_offset())); ++ //V0, oop_addr,V1,val_addr ++ switch (state) { ++ case atos: ++ ld_ptr(V0, oop_addr); ++ st_ptr(R0, oop_addr); ++ verify_oop(V0, state); ++ break; ++ case ltos: ++ ld_ptr(V0, val_addr); // fall through ++ break; ++ case btos: // fall through ++ case ztos: // fall through ++ case ctos: // fall through ++ case stos: // fall through ++ case itos: ++ ldw_signed(V0, val_addr); ++ break; ++ case ftos: ++ flds(F0, thread, in_bytes(JvmtiThreadState::earlyret_value_offset())); ++ break; ++ case dtos: ++ fldd(F0, thread, in_bytes(JvmtiThreadState::earlyret_value_offset())); ++ break; ++ case vtos: /* nothing to do */ break; ++ default : ShouldNotReachHere(); ++ } ++ // Clean up tos value in the thread object ++ move(AT, (int)ilgl); ++ stw(AT, tos_addr); ++ stw(R0, thread, in_bytes(JvmtiThreadState::earlyret_value_offset())); ++} ++ ++ ++void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread) { ++ if (JvmtiExport::can_force_early_return()) { ++ Label L; ++ Register tmp = T12; ++ ++ assert(java_thread != AT, "check"); ++ assert(java_thread != tmp, "check"); ++ ld_ptr(AT,java_thread, in_bytes(JavaThread::jvmti_thread_state_offset())); ++ beq(AT, L); ++ ++ // Initiate earlyret handling only if it is not already being processed. ++ // If the flag has the earlyret_processing bit set, it means that this code ++ // is called *during* earlyret handling - we don't want to reenter. ++ ldw(AT, AT, in_bytes(JvmtiThreadState::earlyret_state_offset())); ++ move(tmp, JvmtiThreadState::earlyret_pending); ++ bne(tmp, AT, L); ++ ++ // Call Interpreter::remove_activation_early_entry() to get the address of the ++ // same-named entrypoint in the generated interpreter code. ++ ld_ptr(tmp,java_thread, in_bytes(JavaThread::jvmti_thread_state_offset())); ++ ldw(AT,tmp, in_bytes(JvmtiThreadState::earlyret_tos_offset())); ++ move(A0, AT); ++ call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), A0); ++ jmp(V0); ++ BIND(L); ++ } ++} ++ ++ ++void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(Register reg, ++ int bcp_offset) { ++ assert(bcp_offset >= 0, "bcp is still pointing to start of bytecode"); ++#if 1 //ZHJ20180716 ++ ldbu(AT, BCP, bcp_offset); ++ ldbu(reg, BCP, bcp_offset + 1); ++ slll(AT, AT, 8); ++ or_ins(reg, reg, AT); ++#else // it is also OK! ++ get_2_byte_integer_at_bcp(reg, AT, bcp_offset); ++ huswap(reg); //ZHJ20180716 hswap(reg); ++#endif ++} ++ ++ ++void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index, ++ int bcp_offset, ++ size_t index_size) { ++ assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); ++ if (index_size == sizeof(u2)) { ++ get_2_byte_integer_at_bcp(index, AT, bcp_offset); ++ } else if (index_size == sizeof(u4)) { ++ assert(EnableInvokeDynamic, "giant index used only for JSR 292"); ++ get_4_byte_integer_at_bcp(index, AT, bcp_offset); ++ // Check if the secondary index definition is still ~x, otherwise ++ // we have to change the following assembler code to calculate the ++ // plain index. ++ assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line"); ++ ornot(index, R0, index); ++ addw(index, index, 0); ++ } else if (index_size == sizeof(u1)) { ++ ldbu(index, BCP, bcp_offset); ++ } else { ++ ShouldNotReachHere(); ++ } ++} ++ ++ ++void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, ++ Register index, ++ int bcp_offset, ++ size_t index_size) { ++ assert_different_registers(cache, index); ++ get_cache_index_at_bcp(index, bcp_offset, index_size); ++ ldl(cache, FP, frame::interpreter_frame_cache_offset * wordSize); ++ assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below"); ++ assert(exact_log2(in_words(ConstantPoolCacheEntry::size())) == 2, "else change next line"); ++ shl(index, 2); ++} ++ ++ ++void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache, ++ Register index, ++ Register bytecode, ++ int byte_no, ++ int bcp_offset, ++ size_t index_size) { ++ get_cache_and_index_at_bcp(cache, index, bcp_offset, index_size); ++ // We use a 32-bit load here since the layout of 64-bit words on ++ // little-endian machines allow us that. ++ slll(AT, index, Address::times_ptr); ++ addl(AT, cache, AT); ++ ldw(bytecode, AT, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset())); ++ ++ const int shift_count = (1 + byte_no) * BitsPerByte; ++ assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) || ++ (byte_no == TemplateTable::f2_byte && shift_count == ConstantPoolCacheEntry::bytecode_2_shift), ++ "correct shift count"); ++ srll(bytecode, bytecode, shift_count); ++ assert(ConstantPoolCacheEntry::bytecode_1_mask == ConstantPoolCacheEntry::bytecode_2_mask, "common mask"); ++ move(AT, ConstantPoolCacheEntry::bytecode_1_mask); ++ and_reg(bytecode, bytecode, AT); ++} ++ ++void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, ++ Register tmp, ++ int bcp_offset, ++ size_t index_size) { ++ assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); ++ assert(cache != tmp, "must use different register"); ++ get_cache_index_at_bcp(tmp, bcp_offset, index_size); ++ assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below"); ++ // convert from field index to ConstantPoolCacheEntry index ++ // and from word offset to byte offset ++ assert(exact_log2(in_bytes(ConstantPoolCacheEntry::size_in_bytes())) == 2 + LogBytesPerWord, "else change next line"); ++ shl(tmp, 2 + LogBytesPerWord); ++ ldl(cache, FP, frame::interpreter_frame_cache_offset * wordSize); ++ // skip past the header ++ add_simm16(cache, cache, in_bytes(ConstantPoolCache::base_offset())); ++ addl(cache, cache, tmp); ++} ++ ++void InterpreterMacroAssembler::get_method_counters(Register method, ++ Register mcs, Label& skip) { ++ Label has_counters; ++ ldl(mcs, method, in_bytes(Method::method_counters_offset())); ++ bne(mcs, has_counters); ++ call_VM(noreg, CAST_FROM_FN_PTR(address, ++ InterpreterRuntime::build_method_counters), method); ++ ldl(mcs, method, in_bytes(Method::method_counters_offset())); ++ beq(mcs, skip); // No MethodCounters allocated, OutOfMemory ++ BIND(has_counters); ++} ++ ++// Load object from cpool->resolved_references(index) ++void InterpreterMacroAssembler::load_resolved_reference_at_index( ++ Register result, Register index) { ++ assert_different_registers(result, index); ++ // convert from field index to resolved_references() index and from ++ // word index to byte offset. Since this is a java object, it can be compressed ++ Register tmp = index; // reuse ++ shl(tmp, LogBytesPerHeapOop); ++ ++ get_constant_pool(result); ++ // load pointer for resolved_references[] objArray ++ ldl(result, result, ConstantPool::resolved_references_offset_in_bytes()); ++ // JNIHandles::resolve(obj); ++ ldl(result, result, 0); //? is needed? ++ // Add in the index ++ addl(result, result, tmp); ++ load_heap_oop(result, Address(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); ++} ++ ++// Resets LVP to locals. Register sub_klass cannot be any of the above. ++void InterpreterMacroAssembler::gen_subtype_check( Register Rsup_klass, Register Rsub_klass, Label &ok_is_subtype ) { ++ assert( Rsub_klass != Rsup_klass, "Rsup_klass holds superklass" ); ++ assert( Rsub_klass != T1, "T1 holds 2ndary super array length" ); ++ assert( Rsub_klass != T0, "T0 holds 2ndary super array scan ptr" ); ++ // Profile the not-null value's klass. ++ // Here T12 and T1 are used as temporary registers. ++ profile_typecheck(T12, Rsub_klass, T1); // blows T12, reloads T1 ++ ++// Do the check. ++ check_klass_subtype(Rsub_klass, Rsup_klass, T1, ok_is_subtype); // blows T1 ++ ++// Profile the failure of the check. ++ profile_typecheck_failed(T12); // blows T12 ++} ++ ++ ++ ++// Java Expression Stack ++ ++void InterpreterMacroAssembler::pop_ptr(Register r) { ++ if (UseSW8A) { ++ ldl_a(r, Interpreter::stackElementSize, SP); ++ } else { ++ ldl(r, SP, 0); ++ addl(SP, SP, Interpreter::stackElementSize); ++ } ++} ++ ++void InterpreterMacroAssembler::pop_i(Register r) { ++ if (UseSW8A) { ++ ldw_a(r, Interpreter::stackElementSize, SP); ++ } else { ++ ldw(r, SP, 0); ++ addl(SP, SP, Interpreter::stackElementSize); ++ } ++} ++ ++void InterpreterMacroAssembler::pop_l(Register r) { ++ if (UseSW8A) { ++ ldl_a(r, 2 * Interpreter::stackElementSize, SP); ++ } else { ++ ldl(r, SP, 0); ++ addl(SP, SP, 2 * Interpreter::stackElementSize); ++ } ++} ++ ++void InterpreterMacroAssembler::pop_f(FloatRegister r) { ++ if (UseSW8A) { ++ flds_a(r, Interpreter::stackElementSize, SP); ++ } else { ++ flds(r, SP, 0); ++ addl(SP, SP, Interpreter::stackElementSize); ++ } ++} ++ ++void InterpreterMacroAssembler::pop_d(FloatRegister r) { ++ if (UseSW8A) { ++ fldd_a(r, 2 * Interpreter::stackElementSize, SP); ++ } else { ++ fldd(r, SP, 0); ++ addl(SP, SP, 2 * Interpreter::stackElementSize); ++ } ++} ++ ++void InterpreterMacroAssembler::push_ptr(Register r) { ++ subl(SP, SP, Interpreter::stackElementSize); ++ stl(r, SP, 0); ++} ++ ++void InterpreterMacroAssembler::push_i(Register r) { ++ subl(SP, SP, Interpreter::stackElementSize); ++ // For compatibility reason, don't change to sw. ++ stl(r, SP, 0); ++} ++ ++void InterpreterMacroAssembler::push_l(Register r) { ++ subl(SP, SP, 2 * Interpreter::stackElementSize); ++ stl(r, SP, 0); ++} ++ ++void InterpreterMacroAssembler::push_f(FloatRegister r) { ++ subl(SP, SP, Interpreter::stackElementSize); ++ fsts(r, SP, 0); ++} ++ ++void InterpreterMacroAssembler::push_d(FloatRegister r) { ++ subl(SP, SP, 2 * Interpreter::stackElementSize); ++ fstd(r, SP, 0); ++} ++ ++void InterpreterMacroAssembler::pop(TosState state) { ++ switch (state) { ++ case atos: pop_ptr(); break; ++ case btos: ++ case ztos: ++ case ctos: ++ case stos: ++ case itos: pop_i(); break; ++ case ltos: pop_l(); break; ++ case ftos: pop_f(); break; ++ case dtos: pop_d(); break; ++ case vtos: /* nothing to do */ break; ++ default: ShouldNotReachHere(); ++ } ++ verify_oop(FSR, state); ++} ++ ++//FSR=V0,SSR=T4 ++void InterpreterMacroAssembler::push(TosState state) { ++ verify_oop(FSR, state); ++ switch (state) { ++ case atos: push_ptr(); break; ++ case btos: ++ case ztos: ++ case ctos: ++ case stos: ++ case itos: push_i(); break; ++ case ltos: push_l(); break; ++ case ftos: push_f(); break; ++ case dtos: push_d(); break; ++ case vtos: /* nothing to do */ break; ++ default : ShouldNotReachHere(); ++ } ++} ++ ++ ++ ++void InterpreterMacroAssembler::load_ptr(int n, Register val) { ++ ldl(val, SP, Interpreter::expr_offset_in_bytes(n)); ++} ++ ++void InterpreterMacroAssembler::store_ptr(int n, Register val) { ++ stl(val, SP, Interpreter::expr_offset_in_bytes(n)); ++} ++ ++// Jump to from_interpreted entry of a call unless single stepping is possible ++// in this thread in which case we must call the i2i entry ++void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register temp) { ++ // record last_sp ++ move(Rsender, SP); ++ stl(SP, FP, frame::interpreter_frame_last_sp_offset * wordSize); ++ ++ if (JvmtiExport::can_post_interpreter_events()) { ++ Label run_compiled_code; ++ // JVMTI events, such as single-stepping, are implemented partly by avoiding running ++ // compiled code in threads for which the event is enabled. Check here for ++ // interp_only_mode if these events CAN be enabled. ++ move(temp, S2thread); ++ // interp_only is an int, on little endian it is sufficient to test the byte only ++ // Is a cmpl faster? ++ ldw(AT, temp, in_bytes(JavaThread::interp_only_mode_offset())); ++ beq(AT, run_compiled_code); ++ ldl(AT, method, in_bytes(Method::interpreter_entry_offset())); ++ jmp(AT); ++ BIND(run_compiled_code); ++ } ++ ++ ldl(AT, method, in_bytes(Method::from_interpreted_offset())); ++ jmp(AT); ++} ++ ++ ++// The following two routines provide a hook so that an implementation ++// can schedule the dispatch in two parts. sw64 does not do this. ++void InterpreterMacroAssembler::dispatch_prolog(TosState state, int step) { ++ // Nothing sw64 specific to be done here ++} ++ ++void InterpreterMacroAssembler::dispatch_epilog(TosState state, int step) { ++ dispatch_next(state, step); ++} ++ ++// assume the next bytecode in T11. ++void InterpreterMacroAssembler::dispatch_base(TosState state, ++ address* table, ++ bool verifyoop) { ++ if (VerifyActivationFrameSize) { ++ Label L; ++ ++ subl(T2, FP, SP); ++ int min_frame_size = (frame::link_offset - ++ frame::interpreter_frame_initial_sp_offset) * wordSize; ++ add_simm16(T2, T2,- min_frame_size); ++ bge(T2, L); ++ stop("broken stack frame"); ++ BIND(L); ++ } ++ // FIXME: I do not know which register should pass to verify_oop ++ if (verifyoop) verify_oop(FSR, state); ++ slll(T2, Rnext, LogBytesPerWord); ++ ++ li(GP, (long)table); ++ addl(T3, T2, GP); ++ ldl(T3, T3, 0); ++ jmp(T3); ++} ++ ++void InterpreterMacroAssembler::dispatch_only(TosState state) { ++ dispatch_base(state, Interpreter::dispatch_table(state)); ++} ++ ++void InterpreterMacroAssembler::dispatch_only_normal(TosState state) { ++ dispatch_base(state, Interpreter::normal_table(state)); ++} ++ ++void InterpreterMacroAssembler::dispatch_only_noverify(TosState state) { ++ dispatch_base(state, Interpreter::normal_table(state), false); ++} ++ ++ ++void InterpreterMacroAssembler::dispatch_next(TosState state, int step) { ++ // load next bytecode (load before advancing r13 to prevent AGI) ++ ldbu(Rnext, BCP, step); ++ increment(BCP, step); ++ dispatch_base(state, Interpreter::dispatch_table(state)); ++} ++ ++void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) { ++ // load current bytecode ++ ldbu(Rnext, BCP, 0); ++ dispatch_base(state, table); ++} ++ ++// remove activation ++// ++// Unlock the receiver if this is a synchronized method. ++// Unlock any Java monitors from syncronized blocks. ++// Remove the activation from the stack. ++// ++// If there are locked Java monitors ++// If throw_monitor_exception ++// throws IllegalMonitorStateException ++// Else if install_monitor_exception ++// installs IllegalMonitorStateException ++// Else ++// no error processing ++// used registers : T1, T2, T3, T11 ++// T1 : thread, method access flags ++// T2 : monitor entry pointer ++// T3 : method, monitor top ++// T11 : unlock flag ++void InterpreterMacroAssembler::remove_activation( ++ TosState state, ++ Register ret_addr, ++ bool throw_monitor_exception, ++ bool install_monitor_exception, ++ bool notify_jvmdi) { ++ // Note: Registers V0, T4 and F0, F1 may be in use for the result ++ // check if synchronized method ++ Label unlocked, unlock, no_unlock; ++ ++ // get the value of _do_not_unlock_if_synchronized into T11 ++ Register thread = S2thread; ++ ldbu(T11, thread, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); ++ // reset the flag ++ stb(R0, thread, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); ++ // get method access flags ++ ldl(T3, FP, frame::interpreter_frame_method_offset * wordSize); ++ ldw(T1, T3, in_bytes(Method::access_flags_offset())); ++ and_imm8(T1, T1, JVM_ACC_SYNCHRONIZED); ++ beq(T1, unlocked); ++ ++ // Don't unlock anything if the _do_not_unlock_if_synchronized flag is set. ++ bne(T11, no_unlock); ++ // unlock monitor ++ push(state); // save result ++ ++ // BasicObjectLock will be first in list, since this is a ++ // synchronized method. However, need to check that the object has ++ // not been unlocked by an explicit monitorexit bytecode. ++ add_simm16(c_rarg0, FP, frame::interpreter_frame_initial_sp_offset * wordSize ++ - (int)sizeof(BasicObjectLock)); ++ // address of first monitor ++ ldl(T1, c_rarg0, BasicObjectLock::obj_offset_in_bytes()); ++ bne(T1, unlock); ++ pop(state); ++ if (throw_monitor_exception) { ++ empty_FPU_stack(); ++ call_VM(NOREG, CAST_FROM_FN_PTR(address, ++ InterpreterRuntime::throw_illegal_monitor_state_exception)); ++ should_not_reach_here(); ++ } else { ++ // Monitor already unlocked during a stack unroll. If requested, ++ // install an illegal_monitor_state_exception. Continue with ++ // stack unrolling. ++ if (install_monitor_exception) { ++ // remove possible return value from FPU-stack, ++ // otherwise stack could overflow ++ empty_FPU_stack(); ++ call_VM(NOREG, CAST_FROM_FN_PTR(address, ++ InterpreterRuntime::new_illegal_monitor_state_exception)); ++ ++ } ++ ++ beq(R0, unlocked); ++ } ++ ++ BIND(unlock); ++ unlock_object(c_rarg0); ++ pop(state); ++ ++ // Check that for block-structured locking (i.e., that all locked ++ // objects has been unlocked) ++ BIND(unlocked); ++ ++ // V0, T4: Might contain return value ++ ++ // Check that all monitors are unlocked ++ { ++ Label loop, exception, entry, restart; ++ const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; ++ const Address monitor_block_top(FP, ++ frame::interpreter_frame_monitor_block_top_offset * wordSize); ++ ++ BIND(restart); ++ // points to current entry, starting with top-most entry ++ ldl(c_rarg0, monitor_block_top); ++ // points to word before bottom of monitor block ++ add_simm16(T3, FP, frame::interpreter_frame_initial_sp_offset * wordSize); ++ beq(R0, entry); ++ ++ // Entry already locked, need to throw exception ++ BIND(exception); ++ ++ if (throw_monitor_exception) { ++ // Throw exception ++ // remove possible return value from FPU-stack, ++ // otherwise stack could overflow ++ empty_FPU_stack(); ++ MacroAssembler::call_VM(NOREG, CAST_FROM_FN_PTR(address, ++ InterpreterRuntime::throw_illegal_monitor_state_exception)); ++ should_not_reach_here(); ++ } else { ++ // Stack unrolling. Unlock object and install illegal_monitor_exception ++ // Unlock does not block, so don't have to worry about the frame ++ // We don't have to preserve c_rarg0, since we are going to ++ // throw an exception ++ unlock_object(c_rarg0); ++ if (install_monitor_exception) { ++ empty_FPU_stack(); ++ call_VM(NOREG, CAST_FROM_FN_PTR(address, ++ InterpreterRuntime::new_illegal_monitor_state_exception)); ++ } ++ ++ beq(R0, restart); ++ } ++ ++ BIND(loop); ++ ldl(T1, c_rarg0, BasicObjectLock::obj_offset_in_bytes()); ++ bne(T1, exception);// check if current entry is used ++ ++ ++ add_simm16(c_rarg0, c_rarg0, entry_size);// otherwise advance to next entry ++ BIND(entry); ++ bne(c_rarg0, T3, loop); // check if bottom reached ++ } ++ ++ BIND(no_unlock); ++ ++ // jvmpi support (jvmdi does not generate MethodExit on exception / popFrame) ++ if (notify_jvmdi) { ++ notify_method_exit(false,state,NotifyJVMTI); // preserve TOSCA ++ } else { ++ notify_method_exit(false,state,SkipNotifyJVMTI);// preserve TOSCA ++ } ++ ++ // remove activation ++ // rewind sp to where the old sp saved on the stack, and coincidentally the ++ // stack contents we care about are all above this point. ++ ldi(SP, FP, frame::interpreter_frame_sender_sp_offset * wordSize); ++ ldl(ret_addr, FP, frame::interpreter_frame_return_addr_offset * wordSize); ++ ldl(FP, FP, frame::interpreter_frame_sender_fp_offset * wordSize); ++ // restore the old sp. ++ ldl(SP, SP, 0); ++} ++ ++#endif // C_INTERP ++ ++// Lock object ++// ++// Args: ++// c_rarg1: BasicObjectLock to be used for locking ++// ++// Kills: ++// c_rarg0, c_rarg1, c_rarg2, c_rarg3, .. (param regs) ++// rscratch1, rscratch2 (scratch regs) ++void InterpreterMacroAssembler::lock_object(Register lock_reg) { ++ assert(lock_reg == c_rarg0, "The argument is only for looks. It must be c_rarg0"); ++ ++ if (UseHeavyMonitors) { ++ call_VM(NOREG, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), ++ lock_reg); ++ } else { ++ Label done; ++ ++ const Register swap_reg = T2; // Must use T2 for cmpxchg instruction ++ const Register obj_reg = T1; // Will contain the oop ++ ++ const int obj_offset = BasicObjectLock::obj_offset_in_bytes(); ++ const int lock_offset = BasicObjectLock::lock_offset_in_bytes (); ++ const int mark_offset = lock_offset + ++ BasicLock::displaced_header_offset_in_bytes(); ++ ++ Label slow_case; ++ ++ // Load object pointer into obj_reg %T1 ++ ldl(obj_reg, lock_reg, obj_offset); ++ ++ if (UseBiasedLocking) { ++ // Note: we use noreg for the temporary register since it's hard ++ // to come up with a free register on all incoming code paths ++ biased_locking_enter(lock_reg, obj_reg, swap_reg, noreg, false, done, &slow_case); ++ } ++ ++ ++ // Load (object->mark() | 1) into swap_reg %T2 ++ ldl(AT, obj_reg, 0); ++ or_ins( swap_reg, AT, 1); ++ ++ ++ // Save (object->mark() | 1) into BasicLock's displaced header ++ stl(swap_reg, lock_reg, mark_offset); ++ ++ assert(lock_offset == 0, "displached header must be first word in BasicObjectLock"); ++ //if (os::is_MP()) { ++ // lock(); ++ //} ++ cmpxchg(lock_reg, Address(obj_reg, 0), swap_reg); ++ ++ if (PrintBiasedLockingStatistics) { ++ Label L; ++ beq(AT, L); ++ push(T0); ++ push(T1); ++ atomic_inc32((address)BiasedLocking::fast_path_entry_count_addr(), 1, T0, T1); ++ pop(T1); ++ pop(T0); ++ BIND(L); ++ } ++ ++ bne(AT, done); ++ ++ // Test if the oopMark is an obvious stack pointer, i.e., ++ // 1) (mark & 3) == 0, and ++ // 2) SP <= mark < SP + os::pagesize() ++ // ++ // These 3 tests can be done by evaluating the following ++ // expression: ((mark - sp) & (3 - os::vm_page_size())), ++ // assuming both stack pointer and pagesize have their ++ // least significant 2 bits clear. ++ // NOTE: the oopMark is in swap_reg %T2 as the result of cmpxchg ++ ++ subl(swap_reg, swap_reg, SP); ++ move(AT, 3 - os::vm_page_size()); ++ and_reg(swap_reg, swap_reg, AT); ++ // Save the test result, for recursive case, the result is zero ++ stl(swap_reg, lock_reg, mark_offset); ++ if (PrintBiasedLockingStatistics) { ++ Label L; ++ bne(swap_reg, L); ++ push(T0); ++ push(T1); ++ atomic_inc32((address)BiasedLocking::fast_path_entry_count_addr(), 1, T0, T1); ++ pop(T1); ++ pop(T0); ++ BIND(L); ++ } ++ ++ beq(swap_reg, done); ++ BIND(slow_case); ++ // Call the runtime routine for slow case ++ call_VM(NOREG, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg); ++ ++ BIND(done); ++ } ++} ++ ++ ++// Unlocks an object. Used in monitorexit bytecode and ++// remove_activation. Throws an IllegalMonitorException if object is ++// not locked by current thread. ++// ++// Args: ++// c_rarg1: BasicObjectLock for lock ++// ++// Kills: ++// c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs) ++// rscratch1, rscratch2 (scratch regs) ++// Argument: T6 : Points to BasicObjectLock structure for lock ++// Argument: c_rarg0 : Points to BasicObjectLock structure for lock ++// Throw an IllegalMonitorException if object is not locked by current thread ++void InterpreterMacroAssembler::unlock_object(Register lock_reg) { ++ assert(lock_reg == c_rarg0, "The argument is only for looks. It must be c_rarg0"); ++ ++ if (UseHeavyMonitors) { ++ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg); ++ } else { ++ Label done; ++ ++ const Register swap_reg = T2; // Must use T2 for cmpxchg instruction ++ const Register header_reg = T3; // Will contain the old oopMark ++ const Register obj_reg = T1; // Will contain the oop ++ ++ save_bcp(); // Save in case of exception ++ ++ // Convert from BasicObjectLock structure to object and BasicLock structure ++ // Store the BasicLock address into %T2 ++ add_simm16(swap_reg, lock_reg, BasicObjectLock::lock_offset_in_bytes()); ++ ++ // Load oop into obj_reg(%T1) ++ ldl(obj_reg, lock_reg, BasicObjectLock::obj_offset_in_bytes ()); ++ //free entry ++ stl(R0, lock_reg, BasicObjectLock::obj_offset_in_bytes()); ++ if (UseBiasedLocking) { ++ biased_locking_exit(obj_reg, header_reg, done); ++ } ++ ++ // Load the old header from BasicLock structure ++ ldl(header_reg, swap_reg, BasicLock::displaced_header_offset_in_bytes()); ++ // zero for recursive case ++ beq(header_reg, done); ++ ++ // Atomic swap back the old header ++ if (os::is_MP()); //lock(); ++ cmpxchg(header_reg, Address(obj_reg, 0), swap_reg); ++ ++ // zero for recursive case ++ bne(AT, done); ++ ++ // Call the runtime routine for slow case. ++ stl(obj_reg, lock_reg, BasicObjectLock::obj_offset_in_bytes()); // restore obj ++ call_VM(NOREG, ++ CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), ++ lock_reg); ++ ++ BIND(done); ++ ++ restore_bcp(); ++ } ++} ++ ++#ifndef CC_INTERP ++ ++void InterpreterMacroAssembler::test_method_data_pointer(Register mdp, ++ Label& zero_continue) { ++ assert(ProfileInterpreter, "must be profiling interpreter"); ++ ldl(mdp, Address(FP, frame::interpreter_frame_mdx_offset * wordSize)); ++ beq(mdp, zero_continue); ++} ++ ++ ++// Set the method data pointer for the current bcp. ++void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() { ++ assert(ProfileInterpreter, "must be profiling interpreter"); ++ Label set_mdp; ++ ++ // V0 and T0 will be used as two temporary registers. ++ stl(V0, SP, (-1) * wordSize); ++ stl(T0, SP, (-2) * wordSize); ++ add_simm16(SP, SP, (-2) * wordSize); ++ ++ get_method(T0); ++ // Test MDO to avoid the call if it is NULL. ++ ldl(V0, T0, in_bytes(Method::method_data_offset())); ++ beq(V0, set_mdp); ++ ++ // method: T0 ++ // bcp: BCP --> S0 ++ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), T0, BCP); ++ // mdi: V0 ++ // mdo is guaranteed to be non-zero here, we checked for it before the call. ++ get_method(T0); ++ ldl(T0, T0, in_bytes(Method::method_data_offset())); ++ add_simm16(T0, T0, in_bytes(MethodData::data_offset())); ++ addl(V0, T0, V0); ++ BIND(set_mdp); ++ stl(V0, FP, frame::interpreter_frame_mdx_offset * wordSize); ++ add_simm16(SP, SP, 2 * wordSize); ++ ldl(V0, SP, (-1) * wordSize); ++ ldl(T0, SP, (-2) * wordSize); ++} ++ ++void InterpreterMacroAssembler::verify_method_data_pointer() { ++ assert(ProfileInterpreter, "must be profiling interpreter"); ++#ifdef ASSERT ++ Label verify_continue; ++ Register method = V0; ++ Register mdp = T4; ++ Register tmp = A0; ++ push(method); ++ push(mdp); ++ push(tmp); ++ test_method_data_pointer(mdp, verify_continue); // If mdp is zero, continue ++ get_method(method); ++ ++ // If the mdp is valid, it will point to a DataLayout header which is ++ // consistent with the bcp. The converse is highly probable also. ++ ldhu(tmp, mdp, in_bytes(DataLayout::bci_offset())); ++ ldl(AT, method, in_bytes(Method::const_offset())); ++ addl(tmp, tmp, AT); ++ add_simm16(tmp, tmp, in_bytes(ConstMethod::codes_offset())); ++ beq(tmp, BCP, verify_continue); ++ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), method, BCP, mdp); ++ BIND(verify_continue); ++ pop(tmp); ++ pop(mdp); ++ pop(method); ++#endif // ASSERT ++} ++ ++ ++void InterpreterMacroAssembler::set_mdp_data_at(Register mdp_in, ++ int constant, ++ Register value) { ++ assert(ProfileInterpreter, "must be profiling interpreter"); ++ Address data(mdp_in, constant); ++ stl(value, data); ++} ++ ++ ++void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in, ++ int constant, ++ bool decrement) { ++ // Counter address ++ Address data(mdp_in, constant); ++ ++ increment_mdp_data_at(data, decrement); ++} ++ ++void InterpreterMacroAssembler::increment_mdp_data_at(Address data, ++ bool decrement) { ++ assert(ProfileInterpreter, "must be profiling interpreter"); ++ // %%% this does 64bit counters at best it is wasting space ++ // at worst it is a rare bug when counters overflow ++ Register tmp = S0; ++ push(tmp); ++ if (decrement) { ++ // Decrement the register. ++ ldl(AT, data); ++ add_simm16(tmp, AT, (int32_t) -DataLayout::counter_increment); ++ // If the decrement causes the counter to overflow, stay negative ++ Label L; ++ cmplt(AT, tmp, R0); ++ bne(AT, L); ++ add_simm16(tmp, tmp, (int32_t) DataLayout::counter_increment); ++ BIND(L); ++ stl(tmp, data); ++ } else { ++ assert(DataLayout::counter_increment == 1, ++ "flow-free idiom only works with 1"); ++ ldl(AT, data); ++ // Increment the register. ++ add_simm16(tmp, AT, DataLayout::counter_increment); ++ // If the increment causes the counter to overflow, pull back by 1. ++ cmplt(AT, tmp, R0); ++ subl(tmp, tmp, AT); ++ stl(tmp, data); ++ } ++ pop(tmp); ++} ++ ++ ++void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in, ++ Register reg, ++ int constant, ++ bool decrement) { ++ Register tmp = S0; ++ push(S0); ++ if (decrement) { ++ // Decrement the register. ++ addl(AT, mdp_in, reg); ++ assert(Assembler::is_simm16(constant), "constant is not a simm16 !"); ++ ldl(AT, AT, constant); ++ ++ add_simm16(tmp, AT, (int32_t) -DataLayout::counter_increment); ++ // If the decrement causes the counter to overflow, stay negative ++ Label L; ++ cmplt(AT, tmp, R0); ++ bne(AT, L); ++ add_simm16(tmp, tmp, (int32_t) DataLayout::counter_increment); ++ BIND(L); ++ ++ addl(AT, mdp_in, reg); ++ stl(tmp, AT, constant); ++ } else { ++ addl(AT, mdp_in, reg); ++ assert(Assembler::is_simm16(constant), "constant is not a simm16 !"); ++ ldl(AT, AT, constant); ++ ++ // Increment the register. ++ add_simm16(tmp, AT, DataLayout::counter_increment); ++ // If the increment causes the counter to overflow, pull back by 1. ++ cmplt(AT, tmp, R0); ++ subl(tmp, tmp, AT); ++ ++ addl(AT, mdp_in, reg); ++ stl(tmp, AT, constant); ++ } ++ pop(S0); ++} ++ ++void InterpreterMacroAssembler::set_mdp_flag_at(Register mdp_in, ++ int flag_byte_constant) { ++ assert(ProfileInterpreter, "must be profiling interpreter"); ++ int header_offset = in_bytes(DataLayout::header_offset()); ++ int header_bits = DataLayout::flag_mask_to_header_mask(flag_byte_constant); ++ // Set the flag ++ ldw_signed(AT, Address(mdp_in, header_offset)); ++ if(Assembler::is_uimm8(header_bits)) { ++ or_ins( AT, AT, header_bits); ++ } else { ++ ldi(GP, R0, header_bits); ++ or_ins(AT, AT, GP); ++ } ++ stw(AT, Address(mdp_in, header_offset)); ++} ++ ++ ++ ++void InterpreterMacroAssembler::test_mdp_data_at(Register mdp_in, ++ int offset, ++ Register value, ++ Register test_value_out, ++ Label& not_equal_continue) { ++ assert(ProfileInterpreter, "must be profiling interpreter"); ++ if (test_value_out == noreg) { ++ ldl(AT, Address(mdp_in, offset)); ++ bne(AT, value, not_equal_continue); ++ } else { ++ // Put the test value into a register, so caller can use it: ++ ldl(test_value_out, Address(mdp_in, offset)); ++ bne(value, test_value_out, not_equal_continue); ++ } ++} ++ ++ ++void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, ++ int offset_of_disp) { ++ assert(ProfileInterpreter, "must be profiling interpreter"); ++ assert(Assembler::is_simm16(offset_of_disp), "offset is not an simm16"); ++ ldl(AT, mdp_in, offset_of_disp); ++ addl(mdp_in, mdp_in, AT); ++ stl(mdp_in, Address(FP, frame::interpreter_frame_mdx_offset * wordSize)); ++} ++ ++ ++void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, ++ Register reg, ++ int offset_of_disp) { ++ assert(ProfileInterpreter, "must be profiling interpreter"); ++ addl(AT, reg, mdp_in); ++ assert(Assembler::is_simm16(offset_of_disp), "offset is not an simm16"); ++ ldl(AT, AT, offset_of_disp); ++ addl(mdp_in, mdp_in, AT); ++ stl(mdp_in, Address(FP, frame::interpreter_frame_mdx_offset * wordSize)); ++} ++ ++ ++void InterpreterMacroAssembler::update_mdp_by_constant(Register mdp_in, ++ int constant) { ++ assert(ProfileInterpreter, "must be profiling interpreter"); ++ if(Assembler::is_simm16(constant)) { ++ add_simm16(mdp_in, mdp_in, constant); ++ } else { ++ move(AT, constant); ++ addl(mdp_in, mdp_in, AT); ++ } ++ stl(mdp_in, Address(FP, frame::interpreter_frame_mdx_offset * wordSize)); ++} ++ ++ ++void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) { ++ assert(ProfileInterpreter, "must be profiling interpreter"); ++ push(return_bci); // save/restore across call_VM ++ call_VM(noreg, ++ CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), ++ return_bci); ++ pop(return_bci); ++} ++ ++ ++void InterpreterMacroAssembler::profile_taken_branch(Register mdp, ++ Register bumped_count) { ++ if (ProfileInterpreter) { ++ Label profile_continue; ++ ++ // If no method data exists, go to profile_continue. ++ // Otherwise, assign to mdp ++ test_method_data_pointer(mdp, profile_continue); ++ ++ // We are taking a branch. Increment the taken count. ++ // We inline increment_mdp_data_at to return bumped_count in a register ++ // increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset())); ++ ldl(bumped_count, mdp, in_bytes(JumpData::taken_offset())); ++ assert(DataLayout::counter_increment == 1, ++ "flow-free idiom only works with 1"); ++ push(T11); ++ // T11 is used as a temporary register. ++ add_simm16(T11, bumped_count, DataLayout::counter_increment); ++ cmplt(AT, T11, R0); ++ subl(bumped_count, T11, AT); ++ pop(T11); ++ stl(bumped_count, mdp, in_bytes(JumpData::taken_offset())); // Store back out ++ // The method data pointer needs to be updated to reflect the new target. ++ update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset())); ++ BIND(profile_continue); ++ } ++} ++ ++ ++void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) { ++ if (ProfileInterpreter) { ++ Label profile_continue; ++ ++ // If no method data exists, go to profile_continue. ++ test_method_data_pointer(mdp, profile_continue); ++ ++ // We are taking a branch. Increment the not taken count. ++ increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset())); ++ ++ // The method data pointer needs to be updated to correspond to ++ // the next bytecode ++ update_mdp_by_constant(mdp, in_bytes(BranchData::branch_data_size())); ++ BIND(profile_continue); ++ } ++} ++ ++ ++void InterpreterMacroAssembler::profile_call(Register mdp) { ++ if (ProfileInterpreter) { ++ Label profile_continue; ++ ++ // If no method data exists, go to profile_continue. ++ test_method_data_pointer(mdp, profile_continue); ++ ++ // We are making a call. Increment the count. ++ increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); ++ ++ // The method data pointer needs to be updated to reflect the new target. ++ update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size())); ++ BIND(profile_continue); ++ } ++} ++ ++ ++void InterpreterMacroAssembler::profile_final_call(Register mdp) { ++ if (ProfileInterpreter) { ++ Label profile_continue; ++ ++ // If no method data exists, go to profile_continue. ++ test_method_data_pointer(mdp, profile_continue); ++ ++ // We are making a call. Increment the count. ++ increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); ++ ++ // The method data pointer needs to be updated to reflect the new target. ++ update_mdp_by_constant(mdp, ++ in_bytes(VirtualCallData:: ++ virtual_call_data_size())); ++ BIND(profile_continue); ++ } ++} ++ ++ ++void InterpreterMacroAssembler::profile_virtual_call(Register receiver, ++ Register mdp, ++ Register reg2, ++ bool receiver_can_be_null) { ++ if (ProfileInterpreter) { ++ Label profile_continue; ++ ++ // If no method data exists, go to profile_continue. ++ test_method_data_pointer(mdp, profile_continue); ++ ++ Label skip_receiver_profile; ++ if (receiver_can_be_null) { ++ Label not_null; ++ bne(receiver, not_null); ++ // We are making a call. Increment the count. ++ increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); ++ beq(R0, skip_receiver_profile); ++ BIND(not_null); ++ } ++ ++ // Record the receiver type. ++ record_klass_in_profile(receiver, mdp, reg2, true); ++ BIND(skip_receiver_profile); ++ ++ // The method data pointer needs to be updated to reflect the new target. ++ update_mdp_by_constant(mdp, ++ in_bytes(VirtualCallData:: ++ virtual_call_data_size())); ++ BIND(profile_continue); ++ } ++} ++ ++// This routine creates a state machine for updating the multi-row ++// type profile at a virtual call site (or other type-sensitive bytecode). ++// The machine visits each row (of receiver/count) until the receiver type ++// is found, or until it runs out of rows. At the same time, it remembers ++// the location of the first empty row. (An empty row records null for its ++// receiver, and can be allocated for a newly-observed receiver type.) ++// Because there are two degrees of freedom in the state, a simple linear ++// search will not work; it must be a decision tree. Hence this helper ++// function is recursive, to generate the required tree structured code. ++// It's the interpreter, so we are trading off code space for speed. ++// See below for example code. ++void InterpreterMacroAssembler::record_klass_in_profile_helper( ++ Register receiver, Register mdp, ++ Register reg2, int start_row, ++ Label& done, bool is_virtual_call) { ++ if (TypeProfileWidth == 0) { ++ if (is_virtual_call) { ++ increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); ++ } ++ return; ++ } ++ ++ int last_row = VirtualCallData::row_limit() - 1; ++ assert(start_row <= last_row, "must be work left to do"); ++ // Test this row for both the receiver and for null. ++ // Take any of three different outcomes: ++ // 1. found receiver => increment count and goto done ++ // 2. found null => keep looking for case 1, maybe allocate this cell ++ // 3. found something else => keep looking for cases 1 and 2 ++ // Case 3 is handled by a recursive call. ++ for (int row = start_row; row <= last_row; row++) { ++ Label next_test; ++ bool test_for_null_also = (row == start_row); ++ ++ // See if the receiver is receiver[n]. ++ int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row)); ++ test_mdp_data_at(mdp, recvr_offset, receiver, ++ (test_for_null_also ? reg2 : noreg), ++ next_test); ++ // (Reg2 now contains the receiver from the CallData.) ++ ++ // The receiver is receiver[n]. Increment count[n]. ++ int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row)); ++ increment_mdp_data_at(mdp, count_offset); ++ beq(R0, done); ++ BIND(next_test); ++ ++ if (test_for_null_also) { ++ Label found_null; ++ // Failed the equality check on receiver[n]... Test for null. ++ if (start_row == last_row) { ++ // The only thing left to do is handle the null case. ++ if (is_virtual_call) { ++ beq(reg2, found_null); ++ // Receiver did not match any saved receiver and there is no empty row for it. ++ // Increment total counter to indicate polymorphic case. ++ increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); ++ beq(R0, done); ++ BIND(found_null); ++ } else { ++ bne(reg2, done); ++ } ++ break; ++ } ++ // Since null is rare, make it be the branch-taken case. ++ beq(reg2, found_null); ++ ++ // Put all the "Case 3" tests here. ++ record_klass_in_profile_helper(receiver, mdp, reg2, start_row + 1, done, is_virtual_call); ++ ++ // Found a null. Keep searching for a matching receiver, ++ // but remember that this is an empty (unused) slot. ++ BIND(found_null); ++ } ++ } ++ ++ // In the fall-through case, we found no matching receiver, but we ++ // observed the receiver[start_row] is NULL. ++ ++ // Fill in the receiver field and increment the count. ++ int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row)); ++ set_mdp_data_at(mdp, recvr_offset, receiver); ++ int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row)); ++ move(reg2, DataLayout::counter_increment); ++ set_mdp_data_at(mdp, count_offset, reg2); ++ if (start_row > 0) { ++ beq(R0, done); ++ } ++} ++ ++// Example state machine code for three profile rows: ++// // main copy of decision tree, rooted at row[1] ++// if (row[0].rec == rec) { row[0].incr(); goto done; } ++// if (row[0].rec != NULL) { ++// // inner copy of decision tree, rooted at row[1] ++// if (row[1].rec == rec) { row[1].incr(); goto done; } ++// if (row[1].rec != NULL) { ++// // degenerate decision tree, rooted at row[2] ++// if (row[2].rec == rec) { row[2].incr(); goto done; } ++// if (row[2].rec != NULL) { goto done; } // overflow ++// row[2].init(rec); goto done; ++// } else { ++// // remember row[1] is empty ++// if (row[2].rec == rec) { row[2].incr(); goto done; } ++// row[1].init(rec); goto done; ++// } ++// } else { ++// // remember row[0] is empty ++// if (row[1].rec == rec) { row[1].incr(); goto done; } ++// if (row[2].rec == rec) { row[2].incr(); goto done; } ++// row[0].init(rec); goto done; ++// } ++// done: ++ ++void InterpreterMacroAssembler::record_klass_in_profile(Register receiver, ++ Register mdp, Register reg2, ++ bool is_virtual_call) { ++ assert(ProfileInterpreter, "must be profiling"); ++ Label done; ++ ++ record_klass_in_profile_helper(receiver, mdp, reg2, 0, done, is_virtual_call); ++ ++ BIND (done); ++} ++ ++void InterpreterMacroAssembler::profile_ret(Register return_bci, ++ Register mdp) { ++ if (ProfileInterpreter) { ++ Label profile_continue; ++ uint row; ++ ++ // If no method data exists, go to profile_continue. ++ test_method_data_pointer(mdp, profile_continue); ++ ++ // Update the total ret count. ++ increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); ++ ++ for (row = 0; row < RetData::row_limit(); row++) { ++ Label next_test; ++ ++ // See if return_bci is equal to bci[n]: ++ test_mdp_data_at(mdp, ++ in_bytes(RetData::bci_offset(row)), ++ return_bci, noreg, ++ next_test); ++ ++ // return_bci is equal to bci[n]. Increment the count. ++ increment_mdp_data_at(mdp, in_bytes(RetData::bci_count_offset(row))); ++ ++ // The method data pointer needs to be updated to reflect the new target. ++ update_mdp_by_offset(mdp, ++ in_bytes(RetData::bci_displacement_offset(row))); ++ beq(R0, profile_continue); ++ BIND(next_test); ++ } ++ ++ update_mdp_for_ret(return_bci); ++ ++ BIND(profile_continue); ++ } ++} ++ ++ ++void InterpreterMacroAssembler::profile_null_seen(Register mdp) { ++ if (ProfileInterpreter) { ++ Label profile_continue; ++ ++ // If no method data exists, go to profile_continue. ++ test_method_data_pointer(mdp, profile_continue); ++ ++ set_mdp_flag_at(mdp, BitData::null_seen_byte_constant()); ++ ++ // The method data pointer needs to be updated. ++ int mdp_delta = in_bytes(BitData::bit_data_size()); ++ if (TypeProfileCasts) { ++ mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size()); ++ } ++ update_mdp_by_constant(mdp, mdp_delta); ++ ++ BIND(profile_continue); ++ } ++} ++ ++ ++void InterpreterMacroAssembler::profile_typecheck_failed(Register mdp) { ++ if (ProfileInterpreter && TypeProfileCasts) { ++ Label profile_continue; ++ ++ // If no method data exists, go to profile_continue. ++ test_method_data_pointer(mdp, profile_continue); ++ ++ int count_offset = in_bytes(CounterData::count_offset()); ++ // Back up the address, since we have already bumped the mdp. ++ count_offset -= in_bytes(VirtualCallData::virtual_call_data_size()); ++ ++ // *Decrement* the counter. We expect to see zero or small negatives. ++ increment_mdp_data_at(mdp, count_offset, true); ++ ++ BIND (profile_continue); ++ } ++} ++ ++ ++void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass, Register reg2) { ++ if (ProfileInterpreter) { ++ Label profile_continue; ++ ++ // If no method data exists, go to profile_continue. ++ test_method_data_pointer(mdp, profile_continue); ++ ++ // The method data pointer needs to be updated. ++ int mdp_delta = in_bytes(BitData::bit_data_size()); ++ if (TypeProfileCasts) { ++ mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size()); ++ ++ // Record the object type. ++ record_klass_in_profile(klass, mdp, reg2, false); ++ } ++ update_mdp_by_constant(mdp, mdp_delta); ++ ++ BIND(profile_continue); ++ } ++} ++ ++ ++void InterpreterMacroAssembler::profile_switch_default(Register mdp) { ++ if (ProfileInterpreter) { ++ Label profile_continue; ++ ++ // If no method data exists, go to profile_continue. ++ test_method_data_pointer(mdp, profile_continue); ++ ++ // Update the default case count ++ increment_mdp_data_at(mdp, ++ in_bytes(MultiBranchData::default_count_offset())); ++ ++ // The method data pointer needs to be updated. ++ update_mdp_by_offset(mdp, ++ in_bytes(MultiBranchData:: ++ default_displacement_offset())); ++ ++ BIND(profile_continue); ++ } ++} ++ ++ ++void InterpreterMacroAssembler::profile_switch_case(Register index, ++ Register mdp, ++ Register reg2) { ++ if (ProfileInterpreter) { ++ Label profile_continue; ++ ++ // If no method data exists, go to profile_continue. ++ test_method_data_pointer(mdp, profile_continue); ++ ++ // Build the base (index * per_case_size_in_bytes()) + ++ // case_array_offset_in_bytes() ++ move(reg2, in_bytes(MultiBranchData::per_case_size())); ++ mull(index, reg2, index); ++ add_simm16(index, index, in_bytes(MultiBranchData::case_array_offset())); ++ ++ // Update the case count ++ increment_mdp_data_at(mdp, ++ index, ++ in_bytes(MultiBranchData::relative_count_offset())); ++ ++ // The method data pointer needs to be updated. ++ update_mdp_by_offset(mdp, ++ index, ++ in_bytes(MultiBranchData:: ++ relative_displacement_offset())); ++ ++ BIND(profile_continue); ++ } ++} ++ ++ ++void InterpreterMacroAssembler::narrow(Register result) { ++ ++ // Get method->_constMethod->_result_type ++ ldl(T9, FP, frame::interpreter_frame_method_offset * wordSize); ++ ldl(T9, T9, in_bytes(Method::const_offset())); ++ ldbu(T9, T9, in_bytes(ConstMethod::result_type_offset())); ++ ++ Label done, notBool, notByte, notChar; ++ ++ // common case first ++ subl(AT, T9, T_INT); ++ beq(AT, done); ++ ++ // mask integer result to narrower return type. ++ subl(AT, T9, T_BOOLEAN); ++ bne(AT, notBool); ++ and_imm8(result, result, 0x1); ++ beq(R0, done); ++ ++ BIND(notBool); ++ subl(AT, T9, T_BYTE); ++ bne(AT, notByte); ++ sextb(result, result); ++ beq(R0, done); ++ ++ BIND(notByte); ++ subl(AT, T9, T_CHAR); ++ bne(AT, notChar); ++ zapnot(result, result, 0x3); ++ beq(R0, done); ++ ++ BIND(notChar); ++ sexth(result, result); ++ ++ // Nothing to do for T_INT ++ BIND(done); ++} ++ ++ ++void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) { ++ Label update, next, none; ++ ++ verify_oop(obj); ++ ++ bne(obj, update); ++ ++ push(T1); ++ if (mdo_addr.index() == noreg) { ++ ldl(T1, mdo_addr.base(), mdo_addr.disp()); ++ } else { ++ guarantee(T1 != mdo_addr.base(), "The base register will be corrupted !"); ++ guarantee(T1 != mdo_addr.index(), "The index register will be corrupted !"); ++ ++ slll(AT, mdo_addr.index(), mdo_addr.scale()); ++ addl(AT, mdo_addr.base(), AT); ++ ldl(T1, AT, mdo_addr.disp()); ++ } ++ or_ins( AT, T1, TypeEntries::null_seen); ++ if (mdo_addr.index() == noreg) { ++ stl(AT, mdo_addr); ++ } else { ++ guarantee(T1 != mdo_addr.base(), "The base register will be corrupted !"); ++ guarantee(T1 != mdo_addr.index(), "The index register will be corrupted !"); ++ ++ slll(T1, mdo_addr.index(), mdo_addr.scale()); ++ addl(T1, T1, mdo_addr.base()); ++ stl(AT, T1, mdo_addr.disp()); ++ } ++ pop(T1); ++ ++ beq(R0, next); ++ ++ BIND(update); ++ load_klass(obj, obj); ++ ++ if (mdo_addr.index() == noreg) { ++ ldl(AT, mdo_addr.base(), mdo_addr.disp()); ++ } else { ++ slll(AT, mdo_addr.index(), mdo_addr.scale()); ++ addl(AT, AT, mdo_addr.base()); ++ ldl(AT, AT, mdo_addr.disp()); ++ } ++ xor_ins(obj, obj, AT); ++ ++ ldi(AT, R0, TypeEntries::type_klass_mask); ++ and_reg(AT, obj, AT); ++ beq(AT, next); ++ ++ and_imm8(AT, obj, TypeEntries::type_unknown); ++ bne(AT, next); ++ ++ if (mdo_addr.index() == noreg) { ++ ldl(AT, mdo_addr.base(), mdo_addr.disp()); ++ } else { ++ slll(AT, mdo_addr.index(), mdo_addr.scale()); ++ addl(AT, AT, mdo_addr.base()); ++ ldl(AT, AT, mdo_addr.disp()); ++ } ++ beq(AT, none); ++ ++ push(T1); ++ if (mdo_addr.index() == noreg) { ++ ldl(T1, mdo_addr.base(), mdo_addr.disp()); ++ } else { ++ guarantee(T1 != mdo_addr.base(), "The base register will be corrupted !"); ++ guarantee(T1 != mdo_addr.index(), "The index register will be corrupted !"); ++ ++ slll(AT, mdo_addr.index(), mdo_addr.scale()); ++ addl(AT, AT, mdo_addr.base()); ++ ldl(T1, AT, mdo_addr.disp()); ++ } ++ subl(T1, AT, TypeEntries::null_seen); ++ pop(T1); ++ beq(AT, none); ++ ++ // There is a chance that the checks above (re-reading profiling ++ // data from memory) fail if another thread has just set the ++ // profiling to this obj's klass ++ if (mdo_addr.index() == noreg) { ++ ldl(AT, mdo_addr.base(), mdo_addr.disp()); ++ } else { ++ slll(AT, mdo_addr.index(), mdo_addr.scale()); ++ addl(AT, AT, mdo_addr.base()); ++ ldl(AT, AT, mdo_addr.disp()); ++ } ++ xor_ins(obj, obj, AT); ++ ldi(AT, R0, TypeEntries::type_klass_mask); ++ and_reg(AT, obj, AT); ++ beq(AT, next); ++ ++ // different than before. Cannot keep accurate profile. ++ push(T1); ++ if (mdo_addr.index() == noreg) { ++ ldl(T1, mdo_addr.base(), mdo_addr.disp()); ++ } else { ++ guarantee(T1 != mdo_addr.base(), "The base register will be corrupted !"); ++ guarantee(T1 != mdo_addr.index(), "The index register will be corrupted !"); ++ ++ slll(AT, mdo_addr.index(), mdo_addr.scale()); ++ addl(AT, AT, mdo_addr.base()); ++ ldl(T1, AT, mdo_addr.disp()); ++ } ++ or_ins( AT, T1, TypeEntries::type_unknown); ++ if (mdo_addr.index() == noreg) { ++ stl(AT, mdo_addr); ++ } else { ++ guarantee(T1 != mdo_addr.base(), "The base register will be corrupted !"); ++ guarantee(T1 != mdo_addr.index(), "The index register will be corrupted !"); ++ ++ slll(T1, mdo_addr.index(), mdo_addr.scale()); ++ addl(T1, T1, mdo_addr.base()); ++ stl(AT, T1, mdo_addr.disp()); ++ } ++ pop(T1); ++ beq(R0, next); ++ ++ ++ BIND(none); ++ // first time here. Set profile type. ++ if (mdo_addr.index() == noreg) { ++ stl(obj, mdo_addr); ++ } else { ++ slll(AT, mdo_addr.index(), mdo_addr.scale()); ++ addl(AT, AT, mdo_addr.base()); ++ stl(obj, AT, mdo_addr.disp()); ++ } ++ ++ BIND(next); ++} ++ ++void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual) { ++ if (!ProfileInterpreter) { ++ return; ++ } ++ ++ if (MethodData::profile_arguments() || MethodData::profile_return()) { ++ Label profile_continue; ++ ++ test_method_data_pointer(mdp, profile_continue); ++ ++ int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size()); ++ ++ ldbu(AT, mdp, in_bytes(DataLayout::tag_offset()) - off_to_start); ++ cmpeq(GP, AT, is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag); ++ beq(GP, profile_continue); ++ ++ ++ if (MethodData::profile_arguments()) { ++ Label done; ++ int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset()); ++ if (Assembler::is_simm16(off_to_args)) { ++ add_simm16(mdp, mdp, off_to_args); ++ } else { ++ move(AT, off_to_args); ++ addl(mdp, mdp, AT); ++ } ++ ++ ++ for (int i = 0; i < TypeProfileArgsLimit; i++) { ++ if (i > 0 || MethodData::profile_return()) { ++ // If return value type is profiled we may have no argument to profile ++ ldl(tmp, mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args); ++ ++ if (Assembler::is_simm16(i * TypeStackSlotEntries::per_arg_count())) { ++ ldi(tmp, tmp, -1 * i * TypeStackSlotEntries::per_arg_count()); ++ } else { ++ li(AT, i*TypeStackSlotEntries::per_arg_count()); ++ subl(tmp, tmp, AT); ++ } ++ ++ cmplt(AT, tmp, TypeStackSlotEntries::per_arg_count()); ++ bne(AT, done); ++ } ++ ldl(tmp, callee, in_bytes(Method::const_offset())); ++ ++ ldhu(tmp, tmp, in_bytes(ConstMethod::size_of_parameters_offset())); ++ ++ // stack offset o (zero based) from the start of the argument ++ // list, for n arguments translates into offset n - o - 1 from ++ // the end of the argument list ++ ldl(AT, mdp, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args); ++ subl(tmp, tmp, AT); ++ ++ subl(tmp, tmp, 1); ++ ++ Address arg_addr = argument_address(tmp); ++ ldl(tmp, arg_addr.base(), arg_addr.disp()); ++ ++ Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args); ++ profile_obj_type(tmp, mdo_arg_addr); ++ ++ int to_add = in_bytes(TypeStackSlotEntries::per_arg_size()); ++ if (Assembler::is_simm16(to_add)) { ++ add_simm16(mdp, mdp, to_add); ++ } else { ++ move(AT, to_add); ++ addl(mdp, mdp, AT); ++ } ++ ++ off_to_args += to_add; ++ } ++ ++ if (MethodData::profile_return()) { ++ ldl(tmp, mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args); ++ ++ int tmp_arg_counts = TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count(); ++ if (Assembler::is_simm16(-1 * tmp_arg_counts)) { ++ subl(tmp, tmp, 1 * tmp_arg_counts); ++ } else { ++ move(AT, tmp_arg_counts); ++ subw(mdp, mdp, AT); ++ } ++ } ++ ++ BIND(done); ++ ++ if (MethodData::profile_return()) { ++ // We're right after the type profile for the last ++ // argument. tmp is the number of cells left in the ++ // CallTypeData/VirtualCallTypeData to reach its end. Non null ++ // if there's a return to profile. ++ assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type"); ++ sllw_signed(tmp, tmp, exact_log2(DataLayout::cell_size)); ++ addl(mdp, mdp, tmp); ++ } ++ stl(mdp, FP, frame::interpreter_frame_mdx_offset * wordSize); ++ } else { ++ assert(MethodData::profile_return(), "either profile call args or call ret"); ++ update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size())); ++ } ++ ++ // mdp points right after the end of the ++ // CallTypeData/VirtualCallTypeData, right after the cells for the ++ // return value type if there's one ++ ++ BIND(profile_continue); ++ } ++} ++ ++void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) { ++ assert_different_registers(mdp, ret, tmp, _bcp_register); ++ if (ProfileInterpreter && MethodData::profile_return()) { ++ Label profile_continue, done; ++ ++ test_method_data_pointer(mdp, profile_continue); ++ ++ if (MethodData::profile_return_jsr292_only()) { ++ // If we don't profile all invoke bytecodes we must make sure ++ // it's a bytecode we indeed profile. We can't go back to the ++ // begining of the ProfileData we intend to update to check its ++ // type because we're right after it and we don't known its ++ // length ++ Label do_profile; ++ ldbu(AT, _bcp_register, 0); ++ add_simm16(AT, AT, -1 * Bytecodes::_invokedynamic); ++ beq(AT, do_profile); ++ ++ ldbu(AT, _bcp_register, 0); ++ add_simm16(AT, AT, -1 * Bytecodes::_invokehandle); ++ beq(AT, do_profile); ++ ++ get_method(tmp); ++ ldbu(tmp, tmp, Method::intrinsic_id_offset_in_bytes()); ++ cmpeq(AT, tmp, vmIntrinsics::_compiledLambdaForm); ++ beq(AT, offset(target(profile_continue))); ++ ++ BIND(do_profile); ++ } ++ ++ Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size())); ++ addl(tmp, ret, R0); ++ profile_obj_type(tmp, mdo_ret_addr); ++ ++ BIND(profile_continue); ++ } ++} ++ ++void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) { ++ ++ if (ProfileInterpreter && MethodData::profile_parameters()) { ++ Label profile_continue, done; ++ ++ test_method_data_pointer(mdp, profile_continue); ++ ++ // Load the offset of the area within the MDO used for ++ // parameters. If it's negative we're not profiling any parameters ++ ldw(tmp1, mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())); ++ blt(tmp1, profile_continue); ++ ++ // Compute a pointer to the area for parameters from the offset ++ // and move the pointer to the slot for the last ++ // parameters. Collect profiling from last parameter down. ++ // mdo start + parameters offset + array length - 1 ++ addl(mdp, mdp, tmp1); ++ ldl(tmp1, mdp, in_bytes(ArrayData::array_len_offset())); ++ decrement(tmp1, TypeStackSlotEntries::per_arg_count()); ++ ++ ++ Label loop; ++ BIND(loop); ++ ++ int off_base = in_bytes(ParametersTypeData::stack_slot_offset(0)); ++ int type_base = in_bytes(ParametersTypeData::type_offset(0)); ++ Address::ScaleFactor per_arg_scale = Address::times(DataLayout::cell_size); ++ Address arg_type(mdp, tmp1, per_arg_scale, type_base); ++ ++ // load offset on the stack from the slot for this parameter ++ slll(AT, tmp1, per_arg_scale); ++ addl(AT, AT, mdp); ++ ldl(tmp2, AT, off_base); ++ ++ subl(tmp2, R0, tmp2); ++ ++ // read the parameter from the local area ++ slll(AT, tmp2, Interpreter::stackElementScale()); ++ addl(AT, AT, _locals_register); ++ ldl(tmp2, AT, 0); ++ ++ // profile the parameter ++ profile_obj_type(tmp2, arg_type); ++ ++ // go to next parameter ++ decrement(tmp1, TypeStackSlotEntries::per_arg_count()); ++ bgt(tmp1, loop); ++ ++ BIND(profile_continue); ++ } ++} ++ ++void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) { ++ if (state == atos) { ++ MacroAssembler::verify_oop(reg); ++ } ++} ++ ++void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { ++} ++#endif // !CC_INTERP ++ ++ ++void InterpreterMacroAssembler::notify_method_entry() { ++ // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to ++ // track stack depth. If it is possible to enter interp_only_mode we add ++ // the code to check if the event should be sent. ++ Register tempreg = T0; ++ move(T11, S2thread); ++ if (JvmtiExport::can_post_interpreter_events()) { ++ Label L; ++ ldw(tempreg, T11, in_bytes(JavaThread::interp_only_mode_offset())); ++ beq(tempreg, L); ++ call_VM(noreg, CAST_FROM_FN_PTR(address, ++ InterpreterRuntime::post_method_entry)); ++ BIND(L); ++ } ++ ++ { ++ SkipIfEqual skip_if(this, &DTraceMethodProbes, 0); ++ get_method(S3); ++ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), ++ //Rthread, ++ T11, ++ //Rmethod); ++ S3); ++ } ++ ++} ++ ++void InterpreterMacroAssembler::notify_method_exit( ++ //TosState state, NotifyMethodExitMode mode) { ++ bool is_native_method, TosState state, NotifyMethodExitMode mode) { ++ // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to ++ // track stack depth. If it is possible to enter interp_only_mode we add ++ // the code to check if the event should be sent. ++ Register tempreg = T0; ++ move(T11, S2thread); ++ if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) { ++ Label skip; ++ ldw(tempreg, T11, in_bytes(JavaThread::interp_only_mode_offset())); ++ beq(tempreg, skip); ++ // Note: frame::interpreter_frame_result has a dependency on how the ++ // method result is saved across the call to post_method_exit. If this ++ // is changed then the interpreter_frame_result implementation will ++ // need to be updated too. ++ ++ // For c++ interpreter the result is always stored at a known location in the frame ++ // template interpreter will leave it on the top of the stack. ++ save_return_value(state, is_native_method); ++ call_VM(noreg, ++ CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit)); ++ restore_return_value(state, is_native_method); ++ BIND(skip); ++ } ++ ++ { ++ // Dtrace notification ++ SkipIfEqual skip_if(this, &DTraceMethodProbes, 0); ++ save_return_value(state, is_native_method); ++ get_method(S3); ++ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), ++ //Rthread, Rmethod); ++ T11, S3); ++ restore_return_value(state, is_native_method); ++ } ++} ++ ++void InterpreterMacroAssembler::save_return_value(TosState state, bool is_native_call) { ++ if (is_native_call) { ++ // save any potential method result value ++ stw(V0, FP, (-9) * wordSize); ++ fsts(F0, FP, (-10) * wordSize); ++ } else { ++ push(state); ++ } ++} ++ ++void InterpreterMacroAssembler::restore_return_value(TosState state, bool is_native_call) { ++ if (is_native_call) { ++ // Restore any method result value ++ ldw(V0, FP, (-9) * wordSize); ++ flds(F0, FP, (-10) * wordSize); ++ } else { ++ pop(state); ++ } ++} ++ ++// Jump if ((*counter_addr += increment) & mask) satisfies the condition. ++void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, ++ int increment, int mask, ++ Register scratch, bool preloaded, ++ Condition cond, Label* where) { ++ assert_different_registers(scratch, AT); ++ ++ if (!preloaded) { ++ ldw_signed(scratch, counter_addr); ++ } ++ addl(scratch, scratch, increment); ++ stw(scratch, counter_addr); ++ ++ move(AT, mask); ++ and_reg(scratch, scratch, AT); ++ ++ if (cond == Assembler::zero) { ++ beq(scratch, *where); ++ } else { ++ unimplemented(); ++ } ++} +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/interp_masm_sw64.hpp afu8u/hotspot/src/cpu/sw64/vm/interp_masm_sw64.hpp +--- openjdk/hotspot/src/cpu/sw64/vm/interp_masm_sw64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/interp_masm_sw64.hpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,270 @@ ++/* ++ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef CPU_SW64_VM_INTERP_MASM_SW64_HPP ++#define CPU_SW64_VM_INTERP_MASM_SW64_HPP ++ ++#include "asm/assembler.hpp" ++#include "asm/macroAssembler.hpp" ++#include "asm/macroAssembler.inline.hpp" ++#include "interpreter/invocationCounter.hpp" ++#include "runtime/frame.hpp" ++ ++// This file specializes the assember with interpreter-specific macros ++ ++ ++class InterpreterMacroAssembler: public MacroAssembler { ++#ifndef CC_INTERP ++ private: ++ ++ Register _locals_register; // register that contains the pointer to the locals ++ Register _bcp_register; // register that contains the bcp ++ ++ protected: ++ // Interpreter specific version of call_VM_base ++ virtual void call_VM_leaf_base(address entry_point, ++ int number_of_arguments); ++ ++ virtual void call_VM_base(Register oop_result, ++ Register java_thread, ++ Register last_java_sp, ++ address entry_point, ++ int number_of_arguments, ++ bool check_exceptions); ++ ++ virtual void check_and_handle_popframe(Register java_thread); ++ virtual void check_and_handle_earlyret(Register java_thread); ++ ++ // base routine for all dispatches ++ void dispatch_base(TosState state, address* table, bool verifyoop = true); ++#endif // CC_INTERP ++ ++ public: ++ // narrow int return value ++ void narrow(Register result); ++ ++ InterpreterMacroAssembler(CodeBuffer* code) : MacroAssembler(code), _locals_register(LVP), _bcp_register(BCP) {} ++ ++ void get_2_byte_integer_at_bcp(Register reg, Register tmp, int offset); ++ void get_4_byte_integer_at_bcp(Register reg, Register tmp, int offset); ++ ++ void load_earlyret_value(TosState state); ++ ++#ifdef CC_INTERP ++ void save_bcp() { /* not needed in c++ interpreter and harmless */ } ++ void restore_bcp() { /* not needed in c++ interpreter and harmless */ } ++ ++ // Helpers for runtime call arguments/results ++ void get_method(Register reg); ++ ++#else ++ ++ // Interpreter-specific registers ++ void save_bcp() { ++ stl(BCP, FP, frame::interpreter_frame_bcx_offset * wordSize); ++ } ++ ++ void restore_bcp() { ++ ldl(BCP, FP, frame::interpreter_frame_bcx_offset * wordSize); ++ } ++ ++ void restore_locals() { ++ ldl(LVP, FP, frame::interpreter_frame_locals_offset * wordSize); ++ } ++ ++ // Helpers for runtime call arguments/results ++ void get_method(Register reg) { ++ ldl(reg, FP, frame::interpreter_frame_method_offset * wordSize); ++ } ++ ++ void get_const(Register reg){ ++ get_method(reg); ++ ldl(reg, reg, in_bytes(Method::const_offset())); ++ } ++ ++ void get_constant_pool(Register reg) { ++ get_const(reg); ++ ldl(reg, reg, in_bytes(ConstMethod::constants_offset())); ++ } ++ ++ void get_constant_pool_cache(Register reg) { ++ get_constant_pool(reg); ++ ldl(reg, reg, ConstantPool::cache_offset_in_bytes()); ++ } ++ ++ void get_cpool_and_tags(Register cpool, Register tags) { ++ get_constant_pool(cpool); ++ ldl(tags, cpool, ConstantPool::tags_offset_in_bytes()); ++ } ++ ++ void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset); ++ void get_cache_and_index_at_bcp(Register cache, Register index, int bcp_offset, size_t index_size = sizeof(u2)); ++ void get_cache_and_index_and_bytecode_at_bcp(Register cache, Register index, Register bytecode, int byte_no, int bcp_offset, size_t index_size = sizeof(u2)); ++ void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2)); ++ void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2)); ++ void get_method_counters(Register method, Register mcs, Label& skip); ++ ++ // load cpool->resolved_references(index); ++ void load_resolved_reference_at_index(Register result, Register index); ++ ++ void pop_ptr( Register r = FSR); ++ void pop_i( Register r = FSR); ++ void pop_l( Register r = FSR); ++ void pop_f(FloatRegister r = FSF); ++ void pop_d(FloatRegister r = FSF); ++ ++ void push_ptr( Register r = FSR); ++ void push_i( Register r = FSR); ++ void push_l( Register r = FSR); ++ void push_f(FloatRegister r = FSF); ++ void push_d(FloatRegister r = FSF); ++ ++ void pop(Register r) { ((MacroAssembler*)this)->pop(r); } ++ ++ void push(Register r) { ((MacroAssembler*)this)->push(r); } ++ ++ void pop(TosState state); // transition vtos -> state ++ void push(TosState state); // transition state -> vtos ++ ++ void empty_expression_stack() { ++ ldl(SP, FP, frame::interpreter_frame_monitor_block_top_offset * wordSize); ++ // NULL last_sp until next java call ++ stl(R0, FP, frame::interpreter_frame_last_sp_offset * wordSize); ++ } ++ ++ // Super call_VM calls - correspond to MacroAssembler::call_VM(_leaf) calls ++ void load_ptr(int n, Register val); ++ void store_ptr(int n, Register val); ++ ++ // Generate a subtype check: branch to ok_is_subtype if sub_klass is ++ // a subtype of super_klass. ++ //void gen_subtype_check( Register sub_klass, Label &ok_is_subtype ); ++ void gen_subtype_check( Register Rsup_klass, Register sub_klass, Label &ok_is_subtype ); ++ ++ // Dispatching ++ void dispatch_prolog(TosState state, int step = 0); ++ void dispatch_epilog(TosState state, int step = 0); ++ void dispatch_only(TosState state); ++ void dispatch_only_normal(TosState state); ++ void dispatch_only_noverify(TosState state); ++ void dispatch_next(TosState state, int step = 0); ++ void dispatch_via (TosState state, address* table); ++ ++ // jump to an invoked target ++ void prepare_to_jump_from_interpreted(); ++ void jump_from_interpreted(Register method, Register temp); ++ ++ ++ // Returning from interpreted functions ++ // ++ // Removes the current activation (incl. unlocking of monitors) ++ // and sets up the return address. This code is also used for ++ // exception unwindwing. In that case, we do not want to throw ++ // IllegalMonitorStateExceptions, since that might get us into an ++ // infinite rethrow exception loop. ++ // Additionally this code is used for popFrame and earlyReturn. ++ // In popFrame case we want to skip throwing an exception, ++ // installing an exception, and notifying jvmdi. ++ // In earlyReturn case we only want to skip throwing an exception ++ // and installing an exception. ++ void remove_activation(TosState state, Register ret_addr, ++ bool throw_monitor_exception = true, ++ bool install_monitor_exception = true, ++ bool notify_jvmdi = true); ++#endif // CC_INTERP ++ ++ // Object locking ++ void lock_object (Register lock_reg); ++ void unlock_object(Register lock_reg); ++ ++#ifndef CC_INTERP ++ ++ // Interpreter profiling operations ++ void set_method_data_pointer_for_bcp(); ++ void test_method_data_pointer(Register mdp, Label& zero_continue); ++ void verify_method_data_pointer(); ++ ++ void set_mdp_data_at(Register mdp_in, int constant, Register value); ++ void increment_mdp_data_at(Address data, bool decrement = false); ++ void increment_mdp_data_at(Register mdp_in, int constant, ++ bool decrement = false); ++ void increment_mdp_data_at(Register mdp_in, Register reg, int constant, ++ bool decrement = false); ++ void increment_mask_and_jump(Address counter_addr, ++ int increment, int mask, ++ Register scratch, bool preloaded, ++ Condition cond, Label* where); ++ void set_mdp_flag_at(Register mdp_in, int flag_constant); ++ void test_mdp_data_at(Register mdp_in, int offset, Register value, ++ Register test_value_out, ++ Label& not_equal_continue); ++ ++ void record_klass_in_profile(Register receiver, Register mdp, ++ Register reg2, bool is_virtual_call); ++ void record_klass_in_profile_helper(Register receiver, Register mdp, ++ Register reg2, int start_row, ++ Label& done, bool is_virtual_call); ++ ++ void update_mdp_by_offset(Register mdp_in, int offset_of_offset); ++ void update_mdp_by_offset(Register mdp_in, Register reg, int offset_of_disp); ++ void update_mdp_by_constant(Register mdp_in, int constant); ++ void update_mdp_for_ret(Register return_bci); ++ ++ void profile_taken_branch(Register mdp, Register bumped_count); ++ void profile_not_taken_branch(Register mdp); ++ void profile_call(Register mdp); ++ void profile_final_call(Register mdp); ++ void profile_virtual_call(Register receiver, Register mdp, ++ Register scratch2, ++ bool receiver_can_be_null = false); ++ void profile_ret(Register return_bci, Register mdp); ++ void profile_null_seen(Register mdp); ++ void profile_typecheck(Register mdp, Register klass, Register scratch); ++ void profile_typecheck_failed(Register mdp); ++ void profile_switch_default(Register mdp); ++ void profile_switch_case(Register index_in_scratch, Register mdp, ++ Register scratch2); ++ ++ // Debugging ++ // only if +VerifyOops && state == atos ++ void verify_oop(Register reg, TosState state = atos); ++ // only if +VerifyFPU && (state == ftos || state == dtos) ++ void verify_FPU(int stack_depth, TosState state = ftos); ++ ++ void profile_obj_type(Register obj, const Address& mdo_addr); ++ void profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual); ++ void profile_return_type(Register mdp, Register ret, Register tmp); ++ void profile_parameters_type(Register mdp, Register tmp1, Register tmp2); ++#endif // !CC_INTERP ++ ++ typedef enum { NotifyJVMTI, SkipNotifyJVMTI } NotifyMethodExitMode; ++ ++ // support for jvmti/dtrace ++ void notify_method_entry(); ++ void notify_method_exit(bool is_native_method, TosState state, NotifyMethodExitMode mode); ++ void save_return_value(TosState state, bool is_native_call); ++ void restore_return_value(TosState state, bool is_native_call); ++}; ++ ++#endif // CPU_SW64_VM_INTERP_MASM_SW64_HPP +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/interpreterGenerator_sw64.hpp afu8u/hotspot/src/cpu/sw64/vm/interpreterGenerator_sw64.hpp +--- openjdk/hotspot/src/cpu/sw64/vm/interpreterGenerator_sw64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/interpreterGenerator_sw64.hpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,51 @@ ++/* ++ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef CPU_SW64_VM_INTERPRETERGENERATOR_SW64_HPP ++#define CPU_SW64_VM_INTERPRETERGENERATOR_SW64_HPP ++ ++ ++// Generation of Interpreter ++// ++ friend class AbstractInterpreterGenerator; ++ ++ private: ++ ++ address generate_normal_entry(bool synchronized); ++ address generate_native_entry(bool synchronized); ++ address generate_abstract_entry(void); ++ address generate_math_entry(AbstractInterpreter::MethodKind kind); ++ address generate_empty_entry(void); ++ address generate_accessor_entry(void); ++ address generate_Reference_get_entry(); ++ address generate_CRC32_update_entry(); ++ address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind); ++ ++ void lock_method(void); ++ void generate_stack_overflow_check(void); ++ ++ void generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue); ++ void generate_counter_overflow(Label* do_continue); ++ ++#endif // CPU_SW64_VM_INTERPRETERGENERATOR_SW64_HPP +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/interpreterRT_sw64.cpp afu8u/hotspot/src/cpu/sw64/vm/interpreterRT_sw64.cpp +--- openjdk/hotspot/src/cpu/sw64/vm/interpreterRT_sw64.cpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/interpreterRT_sw64.cpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,240 @@ ++/* ++ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++#include "interpreter/interpreter.hpp" ++#include "interpreter/interpreterRuntime.hpp" ++#include "memory/allocation.inline.hpp" ++#include "memory/universe.inline.hpp" ++#include "oops/method.hpp" ++#include "oops/oop.inline.hpp" ++#include "runtime/handles.inline.hpp" ++#include "runtime/icache.hpp" ++#include "runtime/interfaceSupport.hpp" ++#include "runtime/signature.hpp" ++ ++#define __ _masm-> ++ ++#ifdef PRODUCT ++#define BLOCK_COMMENT(str) /* nothing */ ++#else ++#define BLOCK_COMMENT(str) { char line[1024];sprintf(line,"%s:%s:%d",str,__FILE__, __LINE__); __ block_comment(line);} ++#endif ++ ++#define BIND(label) bind(label); BLOCK_COMMENT(#label ":") ++ ++// Implementation of SignatureHandlerGenerator ++ ++void InterpreterRuntime::SignatureHandlerGenerator::move(int from_offset, int to_offset) { ++ __ ldl(temp(), from(), Interpreter::local_offset_in_bytes(from_offset)); ++ __ stl(temp(), to(), to_offset * longSize); ++} ++ ++void InterpreterRuntime::SignatureHandlerGenerator::box(int from_offset, int to_offset) { ++ __ add_simm16(temp(), from(), Interpreter::local_offset_in_bytes(from_offset) ); ++ __ ldw(AT, from(), Interpreter::local_offset_in_bytes(from_offset) ); ++ ++ Label L; ++ __ bne(AT, L); ++ __ move(temp(), R0); ++ __ BIND(L); ++ __ stw(temp(), to(), to_offset * wordSize); ++} ++ ++void InterpreterRuntime::SignatureHandlerGenerator::generate(uint64_t fingerprint) { ++ // generate code to handle arguments ++ iterate(fingerprint); ++ // return result handler ++ __ li(V0, AbstractInterpreter::result_handler(method()->result_type())); ++ // return ++ __ ret(); ++ ++ __ flush(); ++} ++ ++void InterpreterRuntime::SignatureHandlerGenerator::pass_int() { ++ Argument jni_arg(jni_offset()); ++ __ ldw(temp(), from(), Interpreter::local_offset_in_bytes(offset())); ++ __ store_int_argument(temp(), jni_arg); ++} ++ ++void InterpreterRuntime::SignatureHandlerGenerator::pass_object() { ++ Argument jni_arg(jni_offset()); ++ ++ Register Rtmp1 = temp(); ++ ++ // the handle for a receiver will never be null ++ bool do_NULL_check = offset() != 0 || is_static(); ++ __ ldl(Rtmp1, from(), Interpreter::local_offset_in_bytes(offset())); ++ ++ Label L; ++ __ beq(Rtmp1, L); ++ __ add_simm16(Rtmp1, from(), Interpreter::local_offset_in_bytes(offset())); ++ __ BIND(L); ++ ++ __ store_ptr_argument(Rtmp1, jni_arg); ++} ++ ++//the jvm specifies that long type takes 2 stack spaces, so in do_long(), _offset += 2. ++void InterpreterRuntime::SignatureHandlerGenerator::pass_long() { ++ Argument jni_arg(jni_offset()); ++ __ ldl(temp(), from(), Interpreter::local_offset_in_bytes(offset() + 1)); ++ if(jni_arg.is_Register()) { ++ __ move(jni_arg.as_Register(), temp()); ++ } else { ++ __ stl(temp(), jni_arg.as_caller_address()); ++ } ++} ++ ++//not sure ++////#if (defined _LP64) || (defined N32) ++void InterpreterRuntime::SignatureHandlerGenerator::pass_float() { ++ Argument jni_arg(jni_offset()); ++ __ flds(F16, from(), Interpreter::local_offset_in_bytes(offset())); ++ __ store_float_argument(F16, jni_arg); ++} ++ ++ ++//the jvm specifies that double type takes 2 stack spaces, so in do_double(), _offset += 2. ++void InterpreterRuntime::SignatureHandlerGenerator::pass_double() { ++ Argument jni_arg(jni_offset()); ++ __ fldd(F16, from(), Interpreter::local_offset_in_bytes(offset() + 1)); ++ __ store_double_argument(F16, jni_arg); ++} ++////#endif ++ ++ ++Register InterpreterRuntime::SignatureHandlerGenerator::from() { return LVP; } ++Register InterpreterRuntime::SignatureHandlerGenerator::to() { return SP; } ++Register InterpreterRuntime::SignatureHandlerGenerator::temp() { return RT4; } ++ ++// Implementation of SignatureHandlerLibrary ++ ++void SignatureHandlerLibrary::pd_set_handler(address handler) {} ++ ++ ++class SlowSignatureHandler ++ : public NativeSignatureIterator { ++ private: ++ address _from; ++ intptr_t* _to; ++ intptr_t* _reg_args; ++ intptr_t* _fp_identifiers; ++ unsigned int _num_args; ++ ++ virtual void pass_int() ++ { ++ jint from_obj = *(jint *)(_from+Interpreter::local_offset_in_bytes(0)); ++ _from -= Interpreter::stackElementSize; ++ ++ if (_num_args < Argument::n_register_parameters) { ++ *_reg_args++ = from_obj; ++ _num_args++; ++ } else { ++ *_to++ = from_obj; ++ } ++ } ++ ++ virtual void pass_long() ++ { ++ intptr_t from_obj = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1)); ++ _from -= 2 * Interpreter::stackElementSize; ++ ++ if (_num_args < Argument::n_register_parameters) { ++ *_reg_args++ = from_obj; ++ _num_args++; ++ } else { ++ *_to++ = from_obj; ++ } ++ } ++ ++ virtual void pass_object() ++ { ++ intptr_t *from_addr = (intptr_t*)(_from + Interpreter::local_offset_in_bytes(0)); ++ _from -= Interpreter::stackElementSize; ++ if (_num_args < Argument::n_register_parameters) { ++ *_reg_args++ = (*from_addr == 0) ? NULL : (intptr_t) from_addr; ++ _num_args++; ++ } else { ++ *_to++ = (*from_addr == 0) ? NULL : (intptr_t) from_addr; ++ } ++ } ++ ++ virtual void pass_float() ++ { ++ jint from_obj = *(jint *)(_from+Interpreter::local_offset_in_bytes(0)); ++ _from -= Interpreter::stackElementSize; ++ ++ if (_num_args < Argument::n_float_register_parameters) { ++ *_reg_args++ = from_obj; ++ *_fp_identifiers |= (0x01 << (_num_args*2)); // mark as float ++ _num_args++; ++ } else { ++ *_to++ = from_obj; ++ } ++ } ++ ++ virtual void pass_double() ++ { ++ intptr_t from_obj = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1)); ++ _from -= 2*Interpreter::stackElementSize; ++ ++ if (_num_args < Argument::n_float_register_parameters) { ++ *_reg_args++ = from_obj; ++ *_fp_identifiers |= (0x3 << (_num_args*2)); // mark as double ++ _num_args++; ++ } else { ++ *_to++ = from_obj; ++ } ++ } ++ ++ public: ++ SlowSignatureHandler(methodHandle method, address from, intptr_t* to) ++ : NativeSignatureIterator(method) ++ { ++ _from = from; ++ _to = to; ++ ++ _reg_args = to - Argument::n_register_parameters + jni_offset() - 1; ++ _fp_identifiers = to - 1; ++ *(int*) _fp_identifiers = 0; ++ _num_args = jni_offset(); ++ } ++}; ++ ++ ++IRT_ENTRY(address, ++ InterpreterRuntime::slow_signature_handler(JavaThread* thread, ++ Method* method, ++ intptr_t* from, ++ intptr_t* to)) ++ methodHandle m(thread, (Method*)method); ++ assert(m->is_native(), "sanity check"); ++ ++ // handle arguments ++ SlowSignatureHandler(m, (address)from, to).iterate(UCONST64(-1)); ++ ++ // return result handler ++ return Interpreter::result_handler(m->result_type()); ++IRT_END +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/interpreterRT_sw64.hpp afu8u/hotspot/src/cpu/sw64/vm/interpreterRT_sw64.hpp +--- openjdk/hotspot/src/cpu/sw64/vm/interpreterRT_sw64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/interpreterRT_sw64.hpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,60 @@ ++/* ++ * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef CPU_SW64_VM_INTERPRETERRT_SW64_HPP ++#define CPU_SW64_VM_INTERPRETERRT_SW64_HPP ++ ++#include "memory/allocation.hpp" ++ ++// native method calls ++ ++class SignatureHandlerGenerator: public NativeSignatureIterator { ++ private: ++ MacroAssembler* _masm; ++ ++ void move(int from_offset, int to_offset); ++ ++ void box(int from_offset, int to_offset); ++ void pass_int(); ++ void pass_long(); ++ void pass_object(); ++ void pass_float(); ++ void pass_double(); ++ ++ public: ++ // Creation ++ SignatureHandlerGenerator(methodHandle method, CodeBuffer* buffer) : NativeSignatureIterator(method) { ++ _masm = new MacroAssembler(buffer); ++ } ++ ++ // Code generation ++ void generate(uint64_t fingerprint); ++ ++ // Code generation support ++ static Register from(); ++ static Register to(); ++ static Register temp(); ++}; ++ ++#endif // CPU_SW64_VM_INTERPRETERRT_SW64_HPP +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/interpreter_sw64.cpp afu8u/hotspot/src/cpu/sw64/vm/interpreter_sw64.cpp +--- openjdk/hotspot/src/cpu/sw64/vm/interpreter_sw64.cpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/interpreter_sw64.cpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,265 @@ ++/* ++ * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++#include "asm/macroAssembler.hpp" ++#include "interpreter/bytecodeHistogram.hpp" ++#include "interpreter/interpreter.hpp" ++#include "interpreter/interpreterGenerator.hpp" ++#include "interpreter/interpreterRuntime.hpp" ++#include "interpreter/templateTable.hpp" ++#include "oops/arrayOop.hpp" ++#include "oops/methodData.hpp" ++#include "oops/method.hpp" ++#include "oops/oop.inline.hpp" ++#include "prims/jvmtiExport.hpp" ++#include "prims/jvmtiThreadState.hpp" ++#include "prims/methodHandles.hpp" ++#include "runtime/arguments.hpp" ++#include "runtime/deoptimization.hpp" ++#include "runtime/frame.inline.hpp" ++#include "runtime/sharedRuntime.hpp" ++#include "runtime/stubRoutines.hpp" ++#include "runtime/synchronizer.hpp" ++#include "runtime/timer.hpp" ++#include "runtime/vframeArray.hpp" ++#include "utilities/debug.hpp" ++#ifdef COMPILER1 ++#include "c1/c1_Runtime1.hpp" ++#endif ++ ++#define __ _masm-> ++ ++#ifdef PRODUCT ++#define BLOCK_COMMENT(str) /* nothing */ ++#else ++#define BLOCK_COMMENT(str) { char line[1024];sprintf(line,"%s:%s:%d",str,__FILE__, __LINE__); __ block_comment(line);} ++#endif ++ ++#define BIND(label) bind(label); BLOCK_COMMENT(#label ":") ++ ++address AbstractInterpreterGenerator::generate_slow_signature_handler() { ++ address entry = __ pc(); ++ ++ // Rmethod: method ++ // LVP: pointer to locals ++ // T3: first stack arg - wordSize ++ __ move(T3, SP); ++ __ pushad(); ++ __ call_VM(noreg, ++ CAST_FROM_FN_PTR(address, ++ InterpreterRuntime::slow_signature_handler), ++ Rmethod, LVP, T3); ++ __ move(S0, V0); ++ __ popad(); ++ __ move(V0, S0); ++ ++ // V0: result handler ++ ++ // Stack layout: ++ // ... ++ ++ // Do FP first so we can use c_rarg3 as temp ++ __ ldl(T3, Address(SP, -1 * wordSize)); // float/double identifiers ++ ++ // A0 is for env. ++ // If the mothed is not static, A1 will be corrected in generate_native_entry. ++ for ( int i= 1; i < Argument::n_register_parameters; i++ ) { ++ Register reg = as_Register(i + A0->encoding()); ++ FloatRegister floatreg = as_FloatRegister(i + F16->encoding()); ++ Label isfloatordouble, isdouble, next; ++ ++ __ set64(AT, 1 << (i*2)); // Float or Double? ++ __ and_reg(AT, T3, AT); ++ __ bne(AT, isfloatordouble); ++ ++ // Do Int register here ++ __ ldl(reg, Address(SP, -(Argument::n_register_parameters + 1 -i) * wordSize)); ++ __ beq(R0, next); ++ ++ __ BIND(isfloatordouble); ++ __ set64(AT, 1 << ((i*2)+1)); // Double? ++ __ and_reg(AT, T3, AT); ++ __ bne(AT, isdouble); ++ ++ // Do Float Here ++ __ flds(floatreg, Address(SP, -(Argument::n_float_register_parameters + 1 -i) * wordSize)); ++ __ beq(R0, next); ++ ++ // Do Double here ++ __ BIND(isdouble); ++ __ fldd(floatreg, Address(SP, -(Argument::n_float_register_parameters + 1 -i) * wordSize)); ++ ++ __ BIND(next); ++ } ++ ++ __ ret(); ++ return entry; ++} ++ ++ ++// ++// Various method entries ++// ++ ++address InterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) { ++ ++ // Rmethod: methodOop ++ // V0: scratrch ++ // Rsender: send 's sp ++ ++ if (!InlineIntrinsics) return NULL; // Generate a vanilla entry ++ ++ address entry_point = __ pc(); ++ ++ // These don't need a safepoint check because they aren't virtually ++ // callable. We won't enter these intrinsics from compiled code. ++ // If in the future we added an intrinsic which was virtually callable ++ // we'd have to worry about how to safepoint so that this code is used. ++ ++ // mathematical functions inlined by compiler ++ // (interpreter must provide identical implementation ++ // in order to avoid monotonicity bugs when switching ++ // from interpreter to compiler in the middle of some ++ // computation) ++ // ++ // stack: [ lo(arg) ] <-- sp ++ // [ hi(arg) ] ++ { ++ // Note: For JDK 1.3 StrictMath exists and Math.sin/cos/sqrt are ++ // java methods. Interpreter::method_kind(...) will select ++ // this entry point for the corresponding methods in JDK 1.3. ++ __ fldd(F16, SP, 0 * wordSize); ++ __ fldd(F17, SP, 1 * wordSize); ++ __ stl(RA, SP, (-1) * wordSize); ++ __ stl(FP, SP, (-2) * wordSize); ++ __ move(FP, SP); ++ __ add_simm16(SP, SP, (-2) * wordSize); ++ ++ // [ fp ] <-- sp ++ // [ ra ] ++ // [ lo ] <-- fp ++ // [ hi ] ++ switch (kind) { ++ case Interpreter::java_lang_math_sin : ++ __ trigfunc('s'); ++ break; ++ case Interpreter::java_lang_math_cos : ++ __ trigfunc('c'); ++ break; ++ case Interpreter::java_lang_math_tan : ++ __ trigfunc('t'); ++ break; ++ case Interpreter::java_lang_math_sqrt: ++ __ sqrt_d(F0, F16); ++ break; ++ case Interpreter::java_lang_math_abs: ++ __ fabs(F0, F16); ++ break; ++ case Interpreter::java_lang_math_log: ++ // Store to stack to convert 80bit precision back to 64bits ++ break; ++ case Interpreter::java_lang_math_log10: ++ // Store to stack to convert 80bit precision back to 64bits ++ break; ++ case Interpreter::java_lang_math_pow: ++ break; ++ case Interpreter::java_lang_math_exp: ++ break; ++ ++ default : ++ ShouldNotReachHere(); ++ } ++ ++ __ ldl(RA, FP, (-1) * wordSize); ++ __ move(SP, Rsender); ++ __ ldl(FP, FP, (-2) * wordSize); ++ __ ret(); ++ } ++ return entry_point; ++} ++ ++ ++// Abstract method entry ++// Attempt to execute abstract method. Throw exception ++address InterpreterGenerator::generate_abstract_entry(void) { ++ ++ // Rmethod: methodOop ++ // V0: receiver (unused) ++ // Rsender : sender 's sp ++ address entry_point = __ pc(); ++ ++ // abstract method entry ++ // throw exception ++ // adjust stack to what a normal return would do ++ __ empty_expression_stack(); ++ __ restore_bcp(); ++ __ restore_locals(); ++ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); ++ // the call_VM checks for exception, so we should never return here. ++ __ should_not_reach_here(); ++ ++ return entry_point; ++} ++ ++ ++// Empty method, generate a very fast return. ++ ++address InterpreterGenerator::generate_empty_entry(void) { ++ ++ // Rmethod: methodOop ++ // V0: receiver (unused) ++ if (!UseFastEmptyMethods) return NULL; ++ ++ address entry_point = __ pc(); ++ ++ Label slow_path; ++ __ li(RT0, SafepointSynchronize::address_of_state()); ++ __ ldw(AT, RT0, 0); ++ __ move(RT0, (SafepointSynchronize::_not_synchronized)); ++ __ bne(AT, RT0, slow_path); ++ __ move(SP, Rsender); ++ __ ret(); ++ __ BIND(slow_path); ++ (void) generate_normal_entry(false); ++ ++ return entry_point; ++ ++} ++ ++void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) { ++ ++ // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in ++ // the days we had adapter frames. When we deoptimize a situation where a ++ // compiled caller calls a compiled caller will have registers it expects ++ // to survive the call to the callee. If we deoptimize the callee the only ++ // way we can restore these registers is to have the oldest interpreter ++ // frame that we create restore these values. That is what this routine ++ // will accomplish. ++ ++ // At the moment we have modified c2 to not have any callee save registers ++ // so this problem does not exist and this routine is just a place holder. ++ ++ assert(f->is_interpreted_frame(), "must be interpreted"); ++} +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/interpreter_sw64.hpp afu8u/hotspot/src/cpu/sw64/vm/interpreter_sw64.hpp +--- openjdk/hotspot/src/cpu/sw64/vm/interpreter_sw64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/interpreter_sw64.hpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,50 @@ ++/* ++ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef CPU_SW64_VM_INTERPRETER_SW64_HPP ++#define CPU_SW64_VM_INTERPRETER_SW64_HPP ++ ++ public: ++ ++ // Sentinel placed in the code for interpreter returns so ++ // that i2c adapters and osr code can recognize an interpreter ++ // return address and convert the return to a specialized ++ // block of code to handle compiedl return values and cleaning ++ // the fpu stack. ++ static const int return_sentinel; ++ ++ static Address::ScaleFactor stackElementScale() { ++ return Address::times_8; ++ } ++ ++ // Offset from sp (which points to the last stack element) ++ static int expr_offset_in_bytes(int i) { return stackElementSize * i; } ++ // Size of interpreter code. Increase if too small. Interpreter will ++ // fail with a guarantee ("not enough space for interpreter generation"); ++ // if too small. ++ // Run with +PrintInterpreterSize to get the VM to print out the size. ++ // Max size with JVMTI and TaggedStackInterpreter ++ const static int InterpreterCodeSize = 168 * 1024; ++ ++#endif // CPU_SW64_VM_INTERPRETER_SW64_HPP +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/javaFrameAnchor_sw64.hpp afu8u/hotspot/src/cpu/sw64/vm/javaFrameAnchor_sw64.hpp +--- openjdk/hotspot/src/cpu/sw64/vm/javaFrameAnchor_sw64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/javaFrameAnchor_sw64.hpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,86 @@ ++/* ++ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef CPU_SW64_VM_JAVAFRAMEANCHOR_SW64_HPP ++#define CPU_SW64_VM_JAVAFRAMEANCHOR_SW64_HPP ++ ++private: ++ ++ // FP value associated with _last_Java_sp: ++ intptr_t* volatile _last_Java_fp; // pointer is volatile not what it points to ++ ++public: ++ // Each arch must define reset, save, restore ++ // These are used by objects that only care about: ++ // 1 - initializing a new state (thread creation, javaCalls) ++ // 2 - saving a current state (javaCalls) ++ // 3 - restoring an old state (javaCalls) ++ ++ void clear(void) { ++ // clearing _last_Java_sp must be first ++ _last_Java_sp = NULL; ++ // fence? ++ _last_Java_fp = NULL; ++ _last_Java_pc = NULL; ++ } ++ ++ void copy(JavaFrameAnchor* src) { ++ // In order to make sure the transition state is valid for "this" ++ // We must clear _last_Java_sp before copying the rest of the new data ++ // ++ // Hack Alert: Temporary bugfix for 4717480/4721647 ++ // To act like previous version (pd_cache_state) don't NULL _last_Java_sp ++ // unless the value is changing ++ // ++ if (_last_Java_sp != src->_last_Java_sp) ++ _last_Java_sp = NULL; ++ ++ _last_Java_fp = src->_last_Java_fp; ++ _last_Java_pc = src->_last_Java_pc; ++ // Must be last so profiler will always see valid frame if has_last_frame() is true ++ _last_Java_sp = src->_last_Java_sp; ++ } ++ ++ // Always walkable ++ bool walkable(void) { return true; } ++ // Never any thing to do since we are always walkable and can find address of return addresses ++ void make_walkable(JavaThread* thread) { } ++ ++ intptr_t* last_Java_sp(void) const { return _last_Java_sp; } ++ ++ address last_Java_pc(void) { return _last_Java_pc; } ++ ++private: ++ ++ static ByteSize last_Java_fp_offset() { return byte_offset_of(JavaFrameAnchor, _last_Java_fp); } ++ ++public: ++ ++ void set_last_Java_sp(intptr_t* sp) { _last_Java_sp = sp; } ++ ++ intptr_t* last_Java_fp(void) { return _last_Java_fp; } ++ // Assert (last_Java_sp == NULL || fp == NULL) ++ void set_last_Java_fp(intptr_t* fp) { _last_Java_fp = fp; } ++ ++#endif // CPU_SW64_VM_JAVAFRAMEANCHOR_SW64_HPP +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/jniFastGetField_sw64.cpp afu8u/hotspot/src/cpu/sw64/vm/jniFastGetField_sw64.cpp +--- openjdk/hotspot/src/cpu/sw64/vm/jniFastGetField_sw64.cpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/jniFastGetField_sw64.cpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,220 @@ ++/* ++ * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++#include "asm/macroAssembler.hpp" ++#include "memory/resourceArea.hpp" ++#include "prims/jniFastGetField.hpp" ++#include "prims/jvm_misc.hpp" ++#include "runtime/safepoint.hpp" ++ ++#define __ masm-> ++ ++#ifdef PRODUCT ++#define BLOCK_COMMENT(str) /* nothing */ ++#else ++#define BLOCK_COMMENT(str) { char line[1024];sprintf(line,"%s:%s:%d",str,__FILE__, __LINE__); __ block_comment(line);} ++#endif ++ ++#define BIND(label) bind(label); BLOCK_COMMENT(#label ":") ++ ++#define BUFFER_SIZE 30*wordSize ++ ++// Instead of issuing lfence for LoadLoad barrier, we create data dependency ++// between loads, which is more efficient than lfence. ++ ++address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) { ++ const char *name = NULL; ++ switch (type) { ++ case T_BOOLEAN: name = "jni_fast_GetBooleanField"; break; ++ case T_BYTE: name = "jni_fast_GetByteField"; break; ++ case T_CHAR: name = "jni_fast_GetCharField"; break; ++ case T_SHORT: name = "jni_fast_GetShortField"; break; ++ case T_INT: name = "jni_fast_GetIntField"; break; ++ case T_LONG: name = "jni_fast_GetLongField"; break; ++ default: ShouldNotReachHere(); ++ } ++ ResourceMark rm; ++ BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE); ++ CodeBuffer cbuf(blob); ++ MacroAssembler* masm = new MacroAssembler(&cbuf); ++ address fast_entry = __ pc(); ++ ++ Label slow; ++ ++ // return pc RA ++ // jni env A0 ++ // obj A1 ++ // jfieldID A2 ++ ++ address counter_addr = SafepointSynchronize::safepoint_counter_addr(); ++ // Parameters(A0~A3) should not be modified, since they will be used in slow path ++ ++ __ set64(AT, (long)counter_addr); ++ __ ldw(T1, AT, 0); ++ ++ __ and_imm8(AT, T1, 1); ++ __ bne(AT, slow); ++ ++ __ move(T0, A1); ++ __ clear_jweak_tag(T0); ++ ++ __ ldl(T0, T0, 0); // unbox, *obj ++ __ move(T2, A2); ++ __ srll(T2, T2, 2); // offset ++ __ addl(T0, T0, T2); ++ ++ assert(count < LIST_CAPACITY, "LIST_CAPACITY too small"); ++ speculative_load_pclist[count] = __ pc(); ++ switch (type) { ++ case T_BOOLEAN: __ ldbu(V0, T0, 0); break; ++ case T_BYTE: __ ldb_signed(V0, T0, 0); break; ++ case T_CHAR: __ ldhu(V0, T0, 0); break; ++ case T_SHORT: __ ldh_signed(V0, T0, 0); break; ++ case T_INT: __ ldw(V0, T0, 0); break; ++ case T_LONG: __ ldl(V0, T0, 0); break; ++ default: ShouldNotReachHere(); ++ } ++ ++ __ set64(AT, (long)counter_addr); ++ __ ldw(AT, AT, 0); ++ __ bne(T1, AT, slow); ++ ++ __ ret(); ++ ++ slowcase_entry_pclist[count++] = __ pc(); ++ __ BIND (slow); ++ address slow_case_addr = NULL; ++ switch (type) { ++ case T_BOOLEAN: slow_case_addr = jni_GetBooleanField_addr(); break; ++ case T_BYTE: slow_case_addr = jni_GetByteField_addr(); break; ++ case T_CHAR: slow_case_addr = jni_GetCharField_addr(); break; ++ case T_SHORT: slow_case_addr = jni_GetShortField_addr(); break; ++ case T_INT: slow_case_addr = jni_GetIntField_addr(); break; ++ case T_LONG: slow_case_addr = jni_GetLongField_addr(); ++ } ++ __ jmp(slow_case_addr); ++ ++ __ flush (); ++ ++ return fast_entry; ++} ++ ++address JNI_FastGetField::generate_fast_get_boolean_field() { ++ return generate_fast_get_int_field0(T_BOOLEAN); ++} ++ ++address JNI_FastGetField::generate_fast_get_byte_field() { ++ return generate_fast_get_int_field0(T_BYTE); ++} ++ ++address JNI_FastGetField::generate_fast_get_char_field() { ++ return generate_fast_get_int_field0(T_CHAR); ++} ++ ++address JNI_FastGetField::generate_fast_get_short_field() { ++ return generate_fast_get_int_field0(T_SHORT); ++} ++ ++address JNI_FastGetField::generate_fast_get_int_field() { ++ return generate_fast_get_int_field0(T_INT); ++} ++ ++address JNI_FastGetField::generate_fast_get_long_field() { ++ return generate_fast_get_int_field0(T_LONG); ++} ++ ++address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) { ++ const char *name =NULL; ++ switch (type) { ++ case T_FLOAT: name = "jni_fast_GetFloatField"; break; ++ case T_DOUBLE: name = "jni_fast_GetDoubleField"; break; ++ default: ShouldNotReachHere(); ++ } ++ ResourceMark rm; ++ BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE); ++ CodeBuffer cbuf(blob); ++ MacroAssembler* masm = new MacroAssembler(&cbuf); ++ address fast_entry = __ pc(); ++ ++ Label slow; ++ ++ // return pc RA ++ // jni env A0 ++ // obj A1 ++ // jfieldID A2 ++ ++ address counter_addr = SafepointSynchronize::safepoint_counter_addr(); ++ __ set64(AT, (intptr_t)counter_addr); ++ __ ldw(T1, AT, 0); ++ __ and_imm8(AT, T1, 1); ++ __ bne(AT, slow); ++ ++ __ clear_jweak_tag(A1); ++ ++ __ ldl(A1, A1, 0); // unbox, *obj ++ __ srll(A2, A2, 2); // offset ++ __ addl(A1, A1, A2); ++ ++ assert(count < LIST_CAPACITY, "LIST_CAPACITY too small"); ++ speculative_load_pclist[count] = __ pc(); ++ switch (type) { ++ case T_FLOAT: ++ __ flds(F0, A1, 0); ++ break; ++ case T_DOUBLE: ++ __ fldd(F0, A1, 0); ++ break; ++ default: ShouldNotReachHere(); ++ } ++ ++ __ set64(AT, (intptr_t)counter_addr); ++ __ ldw(AT, AT, 0); ++ __ bne(T1, AT, slow); ++ ++ __ ret(); ++ ++ ++ slowcase_entry_pclist[count++] = __ pc(); ++ __ BIND (slow); ++ address slow_case_addr = NULL; ++ switch (type) { ++ case T_FLOAT: slow_case_addr = jni_GetFloatField_addr(); break; ++ case T_DOUBLE: slow_case_addr = jni_GetDoubleField_addr(); break; ++ default: ShouldNotReachHere(); ++ } ++ __ jmp(slow_case_addr); ++ ++ __ flush (); ++ ++ return fast_entry; ++} ++ ++address JNI_FastGetField::generate_fast_get_float_field() { ++ return generate_fast_get_float_field0(T_FLOAT); ++} ++ ++address JNI_FastGetField::generate_fast_get_double_field() { ++ return generate_fast_get_float_field0(T_DOUBLE); ++} +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/jni_sw64.h afu8u/hotspot/src/cpu/sw64/vm/jni_sw64.h +--- openjdk/hotspot/src/cpu/sw64/vm/jni_sw64.h 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/jni_sw64.h 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,51 @@ ++/* ++ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. Oracle designates this ++ * particular file as subject to the "Classpath" exception as provided ++ * by Oracle in the LICENSE file that accompanied this code. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++#ifndef _JAVASOFT_JNI_MD_H_ ++#define _JAVASOFT_JNI_MD_H_ ++ ++// Note: please do not change these without also changing jni_md.h in the JDK ++// repository ++#ifndef __has_attribute ++ #define __has_attribute(x) 0 ++#endif ++ ++#if (defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4) && (__GNUC_MINOR__ > 2))) || __has_attribute(visibility) ++ #define JNIEXPORT __attribute__((visibility("default"))) ++ #define JNIIMPORT __attribute__((visibility("default"))) ++#else ++ #define JNIEXPORT ++ #define JNIIMPORT ++#endif ++ ++#define JNICALL ++ ++typedef int jint; ++ ++ typedef long jlong; ++ ++typedef signed char jbyte; ++ ++#endif +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/jniTypes_sw64.hpp afu8u/hotspot/src/cpu/sw64/vm/jniTypes_sw64.hpp +--- openjdk/hotspot/src/cpu/sw64/vm/jniTypes_sw64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/jniTypes_sw64.hpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,143 @@ ++/* ++ * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef CPU_SW64_VM_JNITYPES_SW64_HPP ++#define CPU_SW64_VM_JNITYPES_SW64_HPP ++ ++#include "memory/allocation.hpp" ++#include "oops/oop.hpp" ++#include "prims/jni.h" ++ ++// This file holds platform-dependent routines used to write primitive jni ++// types to the array of arguments passed into JavaCalls::call ++ ++class JNITypes : AllStatic { ++ // These functions write a java primitive type (in native format) ++ // to a java stack slot array to be passed as an argument to JavaCalls:calls. ++ // I.e., they are functionally 'push' operations if they have a 'pos' ++ // formal parameter. Note that jlong's and jdouble's are written ++ // _in reverse_ of the order in which they appear in the interpreter ++ // stack. This is because call stubs (see stubGenerator_sparc.cpp) ++ // reverse the argument list constructed by JavaCallArguments (see ++ // javaCalls.hpp). ++ ++private: ++ ++ // 32bit Helper routines. ++ static inline void put_int2r(jint *from, intptr_t *to) { *(jint *)(to++) = from[1]; ++ *(jint *)(to ) = from[0]; } ++ static inline void put_int2r(jint *from, intptr_t *to, int& pos) { put_int2r(from, to + pos); pos += 2; } ++ ++public: ++ // In SW64, the sizeof intptr_t is 8 bytes, and each unit in JavaCallArguments::_value_buffer[] is 8 bytes. ++ // If we only write the low 4 bytes with (jint *), the high 4-bits will be left with uncertain values. ++ // Then, in JavaCallArguments::parameters(), the whole 8 bytes of a T_INT parameter is loaded. ++ // This error occurs in ReflectInvoke.java ++ // The parameter of DD(int) should be 4 instead of 0x550000004. ++ // ++ // See: [runtime/javaCalls.hpp] ++ ++ static inline void put_int(jint from, intptr_t *to) { *(intptr_t *)(to + 0 ) = from; } ++ static inline void put_int(jint from, intptr_t *to, int& pos) { *(intptr_t *)(to + pos++) = from; } ++ static inline void put_int(jint *from, intptr_t *to, int& pos) { *(intptr_t *)(to + pos++) = *from; } ++ ++ // Longs are stored in native format in one JavaCallArgument slot at ++ // *(to). ++ // In theory, *(to + 1) is an empty slot. But, for several Java2D testing programs (TestBorderLayout, SwingTest), ++ // *(to + 1) must contains a copy of the long value. Otherwise it will corrupts. ++ static inline void put_long(jlong from, intptr_t *to) { ++ *(jlong*) (to + 1) = from; ++ *(jlong*) (to) = from; ++ } ++ ++ // A long parameter occupies two slot. ++ // It must fit the layout rule in methodHandle. ++ // ++ // See: [runtime/reflection.cpp] Reflection::invoke() ++ // assert(java_args.size_of_parameters() == method->size_of_parameters(), "just checking"); ++ ++ static inline void put_long(jlong from, intptr_t *to, int& pos) { ++ *(jlong*) (to + 1 + pos) = from; ++ *(jlong*) (to + pos) = from; ++ pos += 2; ++ } ++ ++ static inline void put_long(jlong *from, intptr_t *to, int& pos) { ++ *(jlong*) (to + 1 + pos) = *from; ++ *(jlong*) (to + pos) = *from; ++ pos += 2; ++ } ++ ++ // Oops are stored in native format in one JavaCallArgument slot at *to. ++ static inline void put_obj(oop from, intptr_t *to) { *(oop *)(to + 0 ) = from; } ++ static inline void put_obj(oop from, intptr_t *to, int& pos) { *(oop *)(to + pos++) = from; } ++ static inline void put_obj(oop *from, intptr_t *to, int& pos) { *(oop *)(to + pos++) = *from; } ++ ++ // Floats are stored in native format in one JavaCallArgument slot at *to. ++ static inline void put_float(jfloat from, intptr_t *to) { *(jfloat *)(to + 0 ) = from; } ++ static inline void put_float(jfloat from, intptr_t *to, int& pos) { *(jfloat *)(to + pos++) = from; } ++ static inline void put_float(jfloat *from, intptr_t *to, int& pos) { *(jfloat *)(to + pos++) = *from; } ++ ++#undef _JNI_SLOT_OFFSET ++#define _JNI_SLOT_OFFSET 0 ++ ++ // Longs are stored in native format in one JavaCallArgument slot at ++ // *(to). ++ // In theory, *(to + 1) is an empty slot. But, for several Java2D testing programs (TestBorderLayout, SwingTest), ++ // *(to + 1) must contains a copy of the long value. Otherwise it will corrupts. ++ static inline void put_double(jdouble from, intptr_t *to) { ++ *(jdouble*) (to + 1) = from; ++ *(jdouble*) (to) = from; ++ } ++ ++ // A long parameter occupies two slot. ++ // It must fit the layout rule in methodHandle. ++ // ++ // See: [runtime/reflection.cpp] Reflection::invoke() ++ // assert(java_args.size_of_parameters() == method->size_of_parameters(), "just checking"); ++ ++ static inline void put_double(jdouble from, intptr_t *to, int& pos) { ++ *(jdouble*) (to + 1 + pos) = from; ++ *(jdouble*) (to + pos) = from; ++ pos += 2; ++ } ++ ++ static inline void put_double(jdouble *from, intptr_t *to, int& pos) { ++ *(jdouble*) (to + 1 + pos) = *from; ++ *(jdouble*) (to + pos) = *from; ++ pos += 2; ++ } ++ ++ ++ // The get_xxx routines, on the other hand, actually _do_ fetch ++ // java primitive types from the interpreter stack. ++ static inline jint get_int (intptr_t *from) { return *(jint *) from; } ++ static inline jlong get_long (intptr_t *from) { return *(jlong *) (from + _JNI_SLOT_OFFSET); } ++ static inline oop get_obj (intptr_t *from) { return *(oop *) from; } ++ static inline jfloat get_float (intptr_t *from) { return *(jfloat *) from; } ++ static inline jdouble get_double(intptr_t *from) { return *(jdouble *)(from + _JNI_SLOT_OFFSET); } ++#undef _JNI_SLOT_OFFSET ++}; ++ ++#endif // CPU_SW64_VM_JNITYPES_SW64_HPP +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/macroAssembler_sw64.cpp afu8u/hotspot/src/cpu/sw64/vm/macroAssembler_sw64.cpp +--- openjdk/hotspot/src/cpu/sw64/vm/macroAssembler_sw64.cpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/macroAssembler_sw64.cpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,3861 @@ ++/* ++ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++#include "asm/assembler.hpp" ++#include "asm/assembler.inline.hpp" ++#include "asm/macroAssembler.inline.hpp" ++#include "compiler/disassembler.hpp" ++#include "gc_interface/collectedHeap.inline.hpp" ++#include "interpreter/interpreter.hpp" ++#include "memory/cardTableModRefBS.hpp" ++#include "memory/resourceArea.hpp" ++#include "memory/universe.hpp" ++#include "prims/methodHandles.hpp" ++#include "runtime/biasedLocking.hpp" ++#include "runtime/interfaceSupport.hpp" ++#include "runtime/objectMonitor.hpp" ++#include "runtime/os.hpp" ++#include "runtime/sharedRuntime.hpp" ++#include "runtime/stubRoutines.hpp" ++#include "utilities/macros.hpp" ++#if INCLUDE_ALL_GCS ++#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" ++#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" ++#include "gc_implementation/g1/heapRegion.hpp" ++#endif // INCLUDE_ALL_GCS ++ ++#ifdef PRODUCT ++#define BLOCK_COMMENT(str) /* nothing */ ++#define STOP(error) stop(error) ++#else ++#define BLOCK_COMMENT(str) { char line[1024]; sprintf(line,"%s:%s:%d",str,__FILE__, __LINE__); block_comment(line); } ++#define STOP(error) block_comment(error); stop(error) ++#endif ++ ++#define BIND(label) bind(label); BLOCK_COMMENT(#label ":") ++ ++// Implementation of MacroAssembler ++ ++intptr_t MacroAssembler::i[32] = {0}; ++float MacroAssembler::f[32] = {0.0}; ++ ++void MacroAssembler::print(outputStream *s) { ++ unsigned int k; ++ for(k=0; kprint_cr("i%d = 0x%.16lx", k, i[k]); ++ } ++ s->cr(); ++ ++ for(k=0; kprint_cr("f%d = %f", k, f[k]); ++ } ++ s->cr(); ++} ++ ++int MacroAssembler::i_offset(unsigned int k) { return (intptr_t)&((MacroAssembler*)0)->i[k]; } ++int MacroAssembler::f_offset(unsigned int k) { return (intptr_t)&((MacroAssembler*)0)->f[k]; } ++ ++void MacroAssembler::save_registers(MacroAssembler *masm) { ++#define __ masm-> ++ for(int k=0; k<32; k++) { ++ __ stw (as_Register(k), A0, i_offset(k)); ++ } ++ ++ for(int k=0; k<32; k++) { ++ __ fsts(as_FloatRegister(k), A0, f_offset(k)); ++ } ++#undef __ ++} ++ ++void MacroAssembler::restore_registers(MacroAssembler *masm) { ++#define __ masm-> ++ for(int k=0; k<32; k++) { ++ __ ldw (as_Register(k), A0, i_offset(k)); ++ } ++ ++ for(int k=0; k<32; k++) { ++ __ flds (as_FloatRegister(k), A0, f_offset(k)); ++ } ++#undef __ ++} ++ ++ ++void MacroAssembler::pd_patch_instruction(address branch, address target) { ++ jint& stub_inst = *(jint*) branch; ++ stub_inst = patched_branch(target - branch, stub_inst, 0); ++} ++ ++static inline address first_cache_address() { ++ return CodeCache::low_bound() + sizeof(HeapBlock::Header); ++} ++ ++static inline address last_cache_address() { ++ return CodeCache::high_bound() - Assembler::InstructionSize; ++} ++ ++// in the code cache (because code can be relocated)? ++bool MacroAssembler::reachable_from_cache(address target) { ++ return false; ++} ++ ++void MacroAssembler::general_jump(address target) { ++ if (reachable_from_cache(target)) { ++ beq(R0, offset(target)); ++ } else { ++ set64(T12, (long)target); ++ jmp(T12); ++ } ++} ++ ++int MacroAssembler::insts_for_general_jump(address target) { ++ if (reachable_from_cache(target)) { ++ return 1; //ZHJ return 2; ++ } else { ++ return insts_for_set64((jlong)target) + 1; //ZHJ return insts_for_set64((jlong)target) + 2; ++ } ++} ++ ++void MacroAssembler::patchable_jump(address target) { ++ if (reachable_from_cache(target)) { ++ nop(); ++ nop(); ++ nop(); ++ nop(); ++ beq(R0, offset(target)); ++ } else { ++ if (SafePatch) { ++ if(offset() % 8 == 0) { ++ nop(); ++ br(T12, 2); ++ emit_int64((long)target); ++ ldl(T12, T12, 0); ++ }else { ++ br(T12, 2); ++ emit_int64((long)target); ++ ldl(T12, T12, 0); ++ nop(); ++ } ++ } else { ++ patchable_set48(T12, (long)target); ++ } ++ jmp(T12); ++ } ++} ++ ++int MacroAssembler::insts_for_patchable_jump(address target) { ++ //ZHJ return 6; ++ return 5; ++} ++ ++void MacroAssembler::general_call(address target) { ++ if (reachable_from_cache(target)) { ++ bsr(RA, (int)(long)target); ++ } else { ++ set64(T12, (long)target); ++ call(T12); ++ } ++} ++ ++int MacroAssembler::insts_for_general_call(address target) { ++ Unimplemented(); ++ if (reachable_from_cache(target)) { ++ return 1; //ZHJ return 2; ++ } else { ++ return insts_for_set64((jlong)target) + 1; ++ } ++} ++ ++void MacroAssembler::patchable_call(address target) { ++ if (reachable_from_cache(target)) { ++ nop(); ++ nop(); ++ nop(); ++ nop(); ++ bsr(RA, (int)(long)target); ++ } else { ++ if (SafePatch) { ++ if(offset() % 8 == 0) { ++ nop(); ++ br(T12, 2); ++ emit_int64((long)target); ++ ldl(T12, T12, 0); ++ }else { ++ br(T12, 2); ++ emit_int64((long)target); ++ ldl(T12, T12, 0); ++ nop(); ++ } ++ } else { ++ patchable_set48(T12, (long)target); ++ } ++ call(T12); ++ } ++} ++ ++void MacroAssembler::patchable_call_setfpec1(address target) { ++ if (reachable_from_cache(target)) { ++ nop(); ++ nop(); ++ nop(); ++ nop(); ++ bsr(RA, (int)(long)target); ++ } else { ++ if (SafePatch) { ++ if(offset() % 8 == 0) { ++ nop(); ++ br(T12, 2); ++ emit_int64((long)target); ++ ldl(T12, T12, 0); ++ }else { ++ br(T12, 2); ++ emit_int64((long)target); ++ ldl(T12, T12, 0); ++ nop(); ++ } ++ } else { ++ li48(T12, (long)target); ++ } ++ Assembler::call(RA, T12, 0); ++ if (Usesetfpec1) { ++ setfpec1(); ++ } else { ++ nop(); ++ } ++ } ++} ++ ++int MacroAssembler::insts_for_patchable_call(address target) { ++ if (SafePatch) { ++ return 6; ++ } else { ++ return 5; ++ } ++} ++ ++void MacroAssembler::beq_long(Register rs, Register rt, Label& L) { ++ Label not_taken; ++ ++ bne(rs, rt, not_taken); ++ ++ jmp_far(L); ++ ++ BIND(not_taken); ++} ++ ++void MacroAssembler::bne_long(Register rs, Register rt, Label& L) { ++ Label not_taken; ++ ++ beq(rs, rt, not_taken); ++ ++ jmp_far(L); ++ ++ BIND(not_taken); ++} ++ ++void MacroAssembler::bc1t_long(Label& L) { ++ Label not_taken; ++ ++ fbeq(FcmpRES, not_taken); ++ ++ jmp_far(L); ++ ++ BIND(not_taken); ++} ++ ++void MacroAssembler::bc1f_long(Label& L) { ++ Label not_taken; ++ ++ fbne(FcmpRES, not_taken); ++ ++ jmp_far(L); ++ ++ BIND(not_taken); ++} ++ ++void MacroAssembler::b_far(Label& L) { ++ Assembler::beq(R0, L); ++} ++ ++void MacroAssembler::b_far(address entry) { ++ Assembler::beq(R0, offset(entry)); ++} ++ ++void MacroAssembler::ld_ptr(Register rt, Register offset, Register base) { ++ addu_long(AT, base, offset); ++ ld_ptr(rt, 0, AT); ++} ++ ++void MacroAssembler::st_ptr(Register rt, Register offset, Register base) { ++ addu_long(AT, base, offset); ++ st_ptr(rt, 0, AT); ++} ++ ++void MacroAssembler::ld_long(Register rt, Register offset, Register base) { ++ addu_long(AT, base, offset); ++ ld_long(rt, 0, AT); ++} ++ ++void MacroAssembler::st_long(Register rt, Register offset, Register base) { ++ addu_long(AT, base, offset); ++ st_long(rt, 0, AT); ++} ++ ++Address MacroAssembler::as_Address(AddressLiteral adr) { ++ return Address(adr.target(), adr.rspec()); ++} ++ ++Address MacroAssembler::as_Address(ArrayAddress adr) { ++ return Address::make_array(adr); ++} ++ ++// tmp_reg1 and tmp_reg2 should be saved outside of atomic_inc32 (caller saved). ++void MacroAssembler::atomic_inc32(address counter_addr, int inc, Register tmp_reg1, Register tmp_reg2) { ++ Label again; ++ if (UseSW8A) { ++ li(tmp_reg1, counter_addr); ++ BIND(again); ++ lldw(tmp_reg2, tmp_reg1, 0); ++ add_simm16(tmp_reg2, tmp_reg2, inc); ++ move(AT, tmp_reg2); ++ lstw(AT, tmp_reg1, 0); ++ beq(AT, again); ++ } else { ++ SizedScope sc(this, 60); ++ li(tmp_reg1, counter_addr); ++ BIND(again); ++ lldw(tmp_reg2, tmp_reg1, 0); ++ ldi(GP, R0, 1); ++ wr_f(GP); ++ add_simm16(tmp_reg2, tmp_reg2, inc); ++ move(AT, tmp_reg2); ++ align(8); // must align ++ lstw(AT, tmp_reg1, 0); ++ rd_f(AT); ++ beq(AT, again); ++ } ++} ++ ++int MacroAssembler::biased_locking_enter(Register lock_reg, ++ Register obj_reg, ++ Register swap_reg, ++ Register tmp_reg, ++ bool swap_reg_contains_mark, ++ Label& done, ++ Label* slow_case, ++ BiasedLockingCounters* counters) { ++ assert(UseBiasedLocking, "why call this otherwise?"); ++ bool need_tmp_reg = false; ++ if (tmp_reg == noreg) { ++ need_tmp_reg = true; ++ tmp_reg = T12; ++ } ++ assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg, AT); ++ assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout"); ++ Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes()); ++ Address saved_mark_addr(lock_reg, 0); ++ ++ // Biased locking ++ // See whether the lock is currently biased toward our thread and ++ // whether the epoch is still valid ++ // Note that the runtime guarantees sufficient alignment of JavaThread ++ // pointers to allow age to be placed into low bits ++ // First check to see whether biasing is even enabled for this object ++ Label cas_label; ++ int null_check_offset = -1; ++ if (!swap_reg_contains_mark) { ++ null_check_offset = offset(); ++ ld_ptr(swap_reg, mark_addr); ++ } ++ ++ if (need_tmp_reg) { ++ push(tmp_reg); ++ } ++ move(tmp_reg, swap_reg); ++ and_imm8(tmp_reg, tmp_reg, markOopDesc::biased_lock_mask_in_place); ++ add_simm16(AT, R0, markOopDesc::biased_lock_pattern); ++ subl(AT, AT, tmp_reg); ++ if (need_tmp_reg) { ++ pop(tmp_reg); ++ } ++ ++ bne(AT, cas_label); ++ ++ st_ptr(swap_reg, saved_mark_addr); ++ if (need_tmp_reg) { ++ push(tmp_reg); ++ } ++ if (swap_reg_contains_mark) { ++ null_check_offset = offset(); ++ } ++ load_prototype_header(tmp_reg, obj_reg); ++ xor_ins(tmp_reg, tmp_reg, swap_reg); ++ get_thread(swap_reg); ++ xor_ins(swap_reg, swap_reg, tmp_reg); ++ ++ move(AT, ~((int) markOopDesc::age_mask_in_place)); ++ and_reg(swap_reg, swap_reg, AT); ++ ++ if (PrintBiasedLockingStatistics) { ++ Label L; ++ bne(swap_reg, L); ++ push(tmp_reg); ++ push(A0); ++ atomic_inc32((address)BiasedLocking::biased_lock_entry_count_addr(), 1, A0, tmp_reg); ++ pop(A0); ++ pop(tmp_reg); ++ BIND(L); ++ } ++ if (need_tmp_reg) { ++ pop(tmp_reg); ++ } ++ beq(swap_reg, done); ++ Label try_revoke_bias; ++ Label try_rebias; ++ ++ // At this point we know that the header has the bias pattern and ++ // that we are not the bias owner in the current epoch. We need to ++ // figure out more details about the state of the header in order to ++ // know what operations can be legally performed on the object's ++ // header. ++ ++ // If the low three bits in the xor result aren't clear, that means ++ // the prototype header is no longer biased and we have to revoke ++ // the bias on this object. ++ ++ move(AT, markOopDesc::biased_lock_mask_in_place); ++ and_reg(AT, swap_reg, AT); ++ bne(AT, try_revoke_bias); ++ // Biasing is still enabled for this data type. See whether the ++ // epoch of the current bias is still valid, meaning that the epoch ++ // bits of the mark word are equal to the epoch bits of the ++ // prototype header. (Note that the prototype header's epoch bits ++ // only change at a safepoint.) If not, attempt to rebias the object ++ // toward the current thread. Note that we must be absolutely sure ++ // that the current epoch is invalid in order to do this because ++ // otherwise the manipulations it performs on the mark word are ++ // illegal. ++ ++ move(AT, markOopDesc::epoch_mask_in_place); ++ and_reg(AT,swap_reg, AT); ++ bne(AT, try_rebias); ++ // The epoch of the current bias is still valid but we know nothing ++ // about the owner; it might be set or it might be clear. Try to ++ // acquire the bias of the object using an atomic operation. If this ++ // fails we will go in to the runtime to revoke the object's bias. ++ // Note that we first construct the presumed unbiased header so we ++ // don't accidentally blow away another thread's valid bias. ++ ++ ld_ptr(swap_reg, saved_mark_addr); ++ ++ move(AT, markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place); ++ and_reg(swap_reg, swap_reg, AT); ++ ++ if (need_tmp_reg) { ++ push(tmp_reg); ++ } ++ get_thread(tmp_reg); ++ or_ins(tmp_reg, tmp_reg, swap_reg); ++ //if (os::is_MP()) { ++ // memb(); ++ //} ++ cmpxchg(tmp_reg, Address(obj_reg, 0), swap_reg); ++ if (need_tmp_reg) { ++ pop(tmp_reg); ++ } ++ // If the biasing toward our thread failed, this means that ++ // another thread succeeded in biasing it toward itself and we ++ // need to revoke that bias. The revocation will occur in the ++ // interpreter runtime in the slow case. ++ if (PrintBiasedLockingStatistics) { ++ Label L; ++ bne(AT, L); ++ push(tmp_reg); ++ push(A0); ++ atomic_inc32((address)BiasedLocking::anonymously_biased_lock_entry_count_addr(), 1, A0, tmp_reg); ++ pop(A0); ++ pop(tmp_reg); ++ BIND(L); ++ } ++ if (slow_case != NULL) { ++ beq(AT, *slow_case); ++ } ++ beq(R0, done); ++ ++ BIND(try_rebias); ++ // At this point we know the epoch has expired, meaning that the ++ // current "bias owner", if any, is actually invalid. Under these ++ // circumstances _only_, we are allowed to use the current header's ++ // value as the comparison value when doing the cas to acquire the ++ // bias in the current epoch. In other words, we allow transfer of ++ // the bias from one thread to another directly in this situation. ++ // ++ if (need_tmp_reg) { ++ push(tmp_reg); ++ } ++ load_prototype_header(tmp_reg, obj_reg); ++ get_thread(swap_reg); ++ or_ins(tmp_reg, tmp_reg, swap_reg); ++ ld_ptr(swap_reg, saved_mark_addr); ++ ++ cmpxchg(tmp_reg, Address(obj_reg, 0), swap_reg); ++ if (need_tmp_reg) { ++ pop(tmp_reg); ++ } ++ // If the biasing toward our thread failed, then another thread ++ // succeeded in biasing it toward itself and we need to revoke that ++ // bias. The revocation will occur in the runtime in the slow case. ++ if (PrintBiasedLockingStatistics) { ++ Label L; ++ bne(AT, L); ++ push(AT); ++ push(tmp_reg); ++ atomic_inc32((address)BiasedLocking::rebiased_lock_entry_count_addr(), 1, AT, tmp_reg); ++ pop(tmp_reg); ++ pop(AT); ++ BIND(L); ++ } ++ if (slow_case != NULL) { ++ beq(AT, *slow_case); ++ } ++ ++ beq(R0, done); ++ BIND(try_revoke_bias); ++ // The prototype mark in the klass doesn't have the bias bit set any ++ // more, indicating that objects of this data type are not supposed ++ // to be biased any more. We are going to try to reset the mark of ++ // this object to the prototype value and fall through to the ++ // CAS-based locking scheme. Note that if our CAS fails, it means ++ // that another thread raced us for the privilege of revoking the ++ // bias of this particular object, so it's okay to continue in the ++ // normal locking code. ++ // ++ ld_ptr(swap_reg, saved_mark_addr); ++ ++ if (need_tmp_reg) { ++ push(tmp_reg); ++ } ++ load_prototype_header(tmp_reg, obj_reg); ++ //if (os::is_MP()) { ++ // lock(); ++ //} ++ cmpxchg(tmp_reg, Address(obj_reg, 0), swap_reg); ++ if (need_tmp_reg) { ++ pop(tmp_reg); ++ } ++ // Fall through to the normal CAS-based lock, because no matter what ++ // the result of the above CAS, some thread must have succeeded in ++ // removing the bias bit from the object's header. ++ if (PrintBiasedLockingStatistics) { ++ Label L; ++ bne(AT, L); ++ push(AT); ++ push(tmp_reg); ++ atomic_inc32((address)BiasedLocking::revoked_lock_entry_count_addr(), 1, AT, tmp_reg); ++ pop(tmp_reg); ++ pop(AT); ++ BIND(L); ++ } ++ ++ BIND(cas_label); ++ return null_check_offset; ++} ++ ++void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) { ++ assert(UseBiasedLocking, "why call this otherwise?"); ++ ++ // Check for biased locking unlock case, which is a no-op ++ // Note: we do not have to check the thread ID for two reasons. ++ // First, the interpreter checks for IllegalMonitorStateException at ++ // a higher level. Second, if the bias was revoked while we held the ++ // lock, the object could not be rebiased toward another thread, so ++ // the bias bit would be clear. ++ ldl(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes())); ++ and_imm8(temp_reg, temp_reg, markOopDesc::biased_lock_mask_in_place); ++ add_simm16(AT, R0, markOopDesc::biased_lock_pattern); ++ ++ beq(AT, temp_reg, done); ++} ++ ++// the stack pointer adjustment is needed. see InterpreterMacroAssembler::super_call_VM_leaf ++// this method will handle the stack problem, you need not to preserve the stack space for the argument now ++void MacroAssembler::call_VM_leaf_base(address entry_point, int number_of_arguments) { ++ Label L, E; ++ ++ assert(number_of_arguments <= 4, "just check"); ++ ++ and_imm8(AT, SP, 0xf); ++ beq(AT, L); ++ subl(SP, SP, 8); ++ call(entry_point, relocInfo::runtime_call_type); ++ addl(SP, SP, 8); ++ beq(R0, E); ++ ++ BIND(L); ++ call(entry_point, relocInfo::runtime_call_type); ++ BIND(E); ++} ++ ++ ++void MacroAssembler::jmp(Register reg) { ++ Assembler::jmp(AT, reg, 0); ++} ++ ++void MacroAssembler::jmp(address entry) { ++ patchable_jump(entry); ++} ++ ++void MacroAssembler::jmp(address entry, relocInfo::relocType rtype) { ++ switch (rtype) { ++ case relocInfo::runtime_call_type: ++ case relocInfo::none: ++ jmp(entry); ++ break; ++ default: ++ { ++ InstructionMark im(this); ++ relocate(rtype); ++ patchable_jump(entry); ++ } ++ break; ++ } ++} ++ ++void MacroAssembler::jmp_far(Label& L) { ++ if (L.is_bound()) { ++ address entry = target(L); ++ assert(entry != NULL, "jmp most probably wrong"); ++ InstructionMark im(this); ++ ++ relocate(relocInfo::internal_word_type); ++ if (SafePatch) { ++ if(offset() % 8 == 0) { ++ nop(); ++ br(T12, 2); ++ emit_int64((long)entry); ++ ldl(T12, T12, 0); ++ }else { ++ br(T12, 2); ++ emit_int64((long)entry); ++ ldl(T12, T12, 0); ++ nop(); ++ } ++ } else { ++ patchable_set48(T12, (long)entry); ++ } ++ jmp(T12); ++ } else { ++ InstructionMark im(this); ++ L.add_patch_at(code(), locator()); ++ ++ relocate(relocInfo::internal_word_type); ++ if (SafePatch) { ++ if(offset() % 8 == 0) { ++ nop(); ++ br(T12, 2); ++ emit_int64((long)pc()); ++ ldl(T12, T12, 0); ++ }else { ++ br(T12, 2); ++ emit_int64((long)pc()); ++ ldl(T12, T12, 0); ++ nop(); ++ } ++ } else { ++ patchable_set48(T12, (long)pc()); ++ } ++ jmp(T12); ++ } ++} ++ ++void MacroAssembler::mov_metadata(Address dst, Metadata* obj) { ++ int oop_index; ++ if (obj) { ++ oop_index = oop_recorder()->find_index(obj); ++ } else { ++ oop_index = oop_recorder()->allocate_metadata_index(obj); ++ } ++ relocate(metadata_Relocation::spec(oop_index)); ++ patchable_set48(AT, (long)obj); ++ stl(AT, dst); ++} ++ ++void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { ++ int oop_index; ++ if (obj) { ++ oop_index = oop_recorder()->find_index(obj); ++ } else { ++ oop_index = oop_recorder()->allocate_metadata_index(obj); ++ } ++ relocate(metadata_Relocation::spec(oop_index)); ++ patchable_set48(dst, (long)obj); ++} ++ ++void MacroAssembler::call(Register reg) { ++ Assembler::call(RA, reg, 0); ++ nop(); ++} ++ ++void MacroAssembler::call(address entry) { ++// c/c++ code assume T12 is entry point, so we just always move entry to t12 ++// maybe there is some more graceful method to handle this. ++// For more info, see class NativeCall. ++ patchable_call(entry); ++} ++ ++void MacroAssembler::call(address entry, relocInfo::relocType rtype) { ++ switch (rtype) { ++ case relocInfo::runtime_call_type: ++ patchable_call_setfpec1(entry); ++ break; ++ case relocInfo::none: ++ call(entry); ++ break; ++ default: ++ { ++ InstructionMark im(this); ++ relocate(rtype); ++ call(entry); ++ } ++ break; ++ } ++} ++ ++void MacroAssembler::call(address entry, RelocationHolder& rh) ++{ ++ switch (rh.type()) { ++ case relocInfo::runtime_call_type: ++ patchable_call_setfpec1(entry); ++ break; ++ case relocInfo::none: ++ call(entry); ++ break; ++ default: ++ { ++ InstructionMark im(this); ++ relocate(rh); ++ call(entry); ++ } ++ break; ++ } ++} ++ ++void MacroAssembler::ic_call(address entry) { ++ RelocationHolder rh = virtual_call_Relocation::spec(pc()); ++ patchable_set48(IC_Klass, (long)Universe::non_oop_word()); ++ assert(entry != NULL, "call most probably wrong"); ++ InstructionMark im(this); ++ relocate(rh); ++ patchable_call(entry); ++} ++ ++void MacroAssembler::c2bool(Register r) { ++ Assembler::selne(r, 1, R0, r); ++} ++ ++#ifndef PRODUCT ++extern "C" void findpc(intptr_t x); ++#endif ++ ++void MacroAssembler::debug(char* msg/*, RegistersForDebugging* regs*/) { ++ if ( ShowMessageBoxOnError ) { ++ JavaThreadState saved_state = JavaThread::current()->thread_state(); ++ JavaThread::current()->set_thread_state(_thread_in_vm); ++ { ++ // In order to get locks work, we need to fake a in_VM state ++ ttyLocker ttyl; ++ ::tty->print_cr("EXECUTION STOPPED: %s\n", msg); ++ if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { ++ BytecodeCounter::print(); ++ } ++ ++ } ++ ThreadStateTransition::transition(JavaThread::current(), _thread_in_vm, saved_state); ++ } ++ else ++ ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg); ++} ++ ++ ++void MacroAssembler::stop(const char* msg) { ++ li(A0, (long)msg); ++ call(CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type); ++ brk(17); ++} ++ ++void MacroAssembler::warn(const char* msg) { ++ pushad(); ++ li(A0, (long)msg); ++ push(S2); ++ move(AT, -(StackAlignmentInBytes)); ++ move(S2, SP); // use S2 as a sender SP holder ++ and_reg(SP, SP, AT); // align stack as required by ABI ++ call(CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type); ++ move(SP, S2); // use S2 as a sender SP holder ++ pop(S2); ++ popad(); ++} ++ ++//void MacroAssembler::print_reg(Register reg) { ++// void * cur_pc = pc(); ++// pushad(); ++// NOT_LP64(push(FP);) ++// ++// li(A0, (long)reg->name()); ++// if (reg == SP) ++// add_simm16(A1, SP, wordSize * 23); //23 registers saved in pushad() ++// else if (reg == A0) ++// ldl(A1, SP, wordSize * 19); //A0 has been modified by li(A0, (long)reg->name()). Ugly Code! ++// else ++// move(A1, reg); ++// li(A2, (long)cur_pc); ++// push(S2); ++// move(AT, -(StackAlignmentInBytes)); ++// move(S2, SP); // use S2 as a sender SP holder ++// and_reg(SP, SP, AT); // align stack as required by ABI ++// call(CAST_FROM_FN_PTR(address, SharedRuntime::print_reg_with_pc),relocInfo::runtime_call_type); ++// move(SP, S2); // use S2 as a sender SP holder ++// pop(S2); ++// NOT_LP64(pop(FP);) ++// popad(); ++// ++//} ++ ++//void MacroAssembler::print_reg(FloatRegister reg) { ++// void * cur_pc = pc(); ++// pushad(); ++// NOT_LP64(push(FP);) ++// li(A0, (long)reg->name()); ++// push(S2); ++// move(AT, -(StackAlignmentInBytes)); ++// move(S2, SP); // use S2 as a sender SP holder ++// and_reg(SP, SP, AT); // align stack as required by ABI ++// call(CAST_FROM_FN_PTR(address, SharedRuntime::print_str),relocInfo::runtime_call_type); ++// move(SP, S2); // use S2 as a sender SP holder ++// pop(S2); ++// NOT_LP64(pop(FP);) ++// popad(); ++// ++// pushad(); ++// NOT_LP64(push(FP);) ++// move(FP, SP); ++// move(AT, -(StackAlignmentInBytes)); ++// and_reg(SP , SP , AT); ++// fmovd(F16, reg); ++// call(CAST_FROM_FN_PTR(address, SharedRuntime::print_double),relocInfo::runtime_call_type); ++// move(SP, FP); ++// NOT_LP64(pop(FP);) ++// popad(); ++// ++//} ++ ++void MacroAssembler::increment(Register reg, int imm) { ++ if (!imm) return; ++ if (is_simm16(imm)) { ++ add_simm16(reg, reg, imm); ++ } else { ++ move(AT, imm); ++ addl(reg, reg, AT); ++ } ++} ++ ++void MacroAssembler::decrement(Register reg, int imm) { ++ increment(reg, -imm); ++} ++ ++ ++void MacroAssembler::call_VM(Register oop_result, ++ address entry_point, ++ bool check_exceptions) { ++ call_VM_helper(oop_result, entry_point, 0, check_exceptions); ++} ++ ++void MacroAssembler::call_VM(Register oop_result, ++ address entry_point, ++ Register arg_1, ++ bool check_exceptions) { ++ if (arg_1!=A1) move(A1, arg_1); ++ call_VM_helper(oop_result, entry_point, 1, check_exceptions); ++} ++ ++void MacroAssembler::call_VM(Register oop_result, ++ address entry_point, ++ Register arg_1, ++ Register arg_2, ++ bool check_exceptions) { ++ if (arg_1!=A1) move(A1, arg_1); ++ if (arg_2!=A2) move(A2, arg_2); ++ assert(arg_2 != A1, "smashed argument"); ++ call_VM_helper(oop_result, entry_point, 2, check_exceptions); ++} ++ ++void MacroAssembler::call_VM(Register oop_result, ++ address entry_point, ++ Register arg_1, ++ Register arg_2, ++ Register arg_3, ++ bool check_exceptions) { ++ if (arg_1!=A1) move(A1, arg_1); ++ if (arg_2!=A2) move(A2, arg_2); assert(arg_2 != A1, "smashed argument"); ++ if (arg_3!=A3) move(A3, arg_3); assert(arg_3 != A1 && arg_3 != A2, "smashed argument"); ++ call_VM_helper(oop_result, entry_point, 3, check_exceptions); ++} ++ ++void MacroAssembler::call_VM(Register oop_result, ++ Register last_java_sp, ++ address entry_point, ++ int number_of_arguments, ++ bool check_exceptions) { ++ call_VM_base(oop_result, NOREG, last_java_sp, entry_point, number_of_arguments, check_exceptions); ++} ++ ++void MacroAssembler::call_VM(Register oop_result, ++ Register last_java_sp, ++ address entry_point, ++ Register arg_1, ++ bool check_exceptions) { ++ if (arg_1 != A1) move(A1, arg_1); ++ call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); ++} ++ ++void MacroAssembler::call_VM(Register oop_result, ++ Register last_java_sp, ++ address entry_point, ++ Register arg_1, ++ Register arg_2, ++ bool check_exceptions) { ++ if (arg_1 != A1) move(A1, arg_1); ++ if (arg_2 != A2) move(A2, arg_2); assert(arg_2 != A1, "smashed argument"); ++ call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); ++} ++ ++void MacroAssembler::call_VM(Register oop_result, ++ Register last_java_sp, ++ address entry_point, ++ Register arg_1, ++ Register arg_2, ++ Register arg_3, ++ bool check_exceptions) { ++ if (arg_1 != A1) move(A1, arg_1); ++ if (arg_2 != A2) move(A2, arg_2); assert(arg_2 != A1, "smashed argument"); ++ if (arg_3 != A3) move(A3, arg_3); assert(arg_3 != A1 && arg_3 != A2, "smashed argument"); ++ call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); ++} ++ ++void MacroAssembler::call_VM_base(Register oop_result, ++ Register java_thread, ++ Register last_java_sp, ++ address entry_point, ++ int number_of_arguments, ++ bool check_exceptions) { ++ ++ address before_call_pc; ++ // determine java_thread register ++ if (!java_thread->is_valid()) { ++ java_thread = S2thread; ++ } ++ // determine last_java_sp register ++ if (!last_java_sp->is_valid()) { ++ last_java_sp = SP; ++ } ++ // debugging support ++ assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); ++ assert(number_of_arguments <= 4 , "cannot have negative number of arguments"); ++ assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); ++ assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp"); ++ ++ assert(last_java_sp != FP, "this code doesn't work for last_java_sp == fp, which currently can't portably work anyway since C2 doesn't save fp"); ++ ++ // set last Java frame before call ++ before_call_pc = (address)pc(); ++ set_last_Java_frame(java_thread, last_java_sp, FP, before_call_pc); ++ ++ // do the call ++ move(A0, java_thread); ++ call(entry_point, relocInfo::runtime_call_type); ++ ++ // restore the thread (cannot use the pushed argument since arguments ++ // may be overwritten by C code generated by an optimizing compiler); ++ // however can use the register value directly if it is callee saved. ++#ifdef ASSERT ++ { ++ Label L; ++ get_thread(AT); ++ beq(java_thread, AT, L); ++ stop("MacroAssembler::call_VM_base: TREG not callee saved?"); ++ BIND(L); ++ } ++#endif ++ ++ // discard thread and arguments ++ ld_ptr(SP, java_thread, in_bytes(JavaThread::last_Java_sp_offset())); ++ // reset last Java frame ++ reset_last_Java_frame(java_thread, false); ++ ++ check_and_handle_popframe(java_thread); ++ check_and_handle_earlyret(java_thread); ++ if (check_exceptions) { ++ // check for pending exceptions (java_thread is set upon return) ++ Label L; ++ ldl(AT, java_thread, in_bytes(Thread::pending_exception_offset())); ++ beq(AT, L); ++ li(AT, before_call_pc); ++ push(AT); ++ jmp(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); ++ BIND(L); ++ } ++ ++ // get oop result if there is one and reset the value in the thread ++ if (oop_result->is_valid()) { ++ ldl(oop_result, java_thread, in_bytes(JavaThread::vm_result_offset())); ++ stl(R0, java_thread, in_bytes(JavaThread::vm_result_offset())); ++ // verify_oop(oop_result); ++ } ++} ++ ++void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { ++ ++ move(V0, SP); ++ //we also reserve space for java_thread here ++ move(AT, -(StackAlignmentInBytes)); ++ and_reg(SP, SP, AT); ++ call_VM_base(oop_result, NOREG, V0, entry_point, number_of_arguments, check_exceptions); ++ ++} ++ ++void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) { ++ call_VM_leaf_base(entry_point, number_of_arguments); ++} ++ ++void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) { ++ if (arg_0 != A0) move(A0, arg_0); ++ call_VM_leaf(entry_point, 1); ++} ++ ++void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { ++ if (arg_0 != A0) move(A0, arg_0); ++ if (arg_1 != A1) move(A1, arg_1); assert(arg_1 != A0, "smashed argument"); ++ call_VM_leaf(entry_point, 2); ++} ++ ++void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { ++ if (arg_0 != A0) move(A0, arg_0); ++ if (arg_1 != A1) move(A1, arg_1); assert(arg_1 != A0, "smashed argument"); ++ if (arg_2 != A2) move(A2, arg_2); assert(arg_2 != A0 && arg_2 != A1, "smashed argument"); ++ call_VM_leaf(entry_point, 3); ++} ++void MacroAssembler::super_call_VM_leaf(address entry_point) { ++ MacroAssembler::call_VM_leaf_base(entry_point, 0); ++} ++ ++ ++void MacroAssembler::super_call_VM_leaf(address entry_point, ++ Register arg_1) { ++ if (arg_1 != A0) move(A0, arg_1); ++ MacroAssembler::call_VM_leaf_base(entry_point, 1); ++} ++ ++ ++void MacroAssembler::super_call_VM_leaf(address entry_point, ++ Register arg_1, ++ Register arg_2) { ++ if (arg_1 != A0) move(A0, arg_1); ++ if (arg_2 != A1) move(A1, arg_2); assert(arg_2 != A0, "smashed argument"); ++ MacroAssembler::call_VM_leaf_base(entry_point, 2); ++} ++ ++ ++void MacroAssembler::super_call_VM_leaf(address entry_point, ++ Register arg_1, ++ Register arg_2, ++ Register arg_3) { ++ if (arg_1 != A0) move(A0, arg_1); ++ if (arg_2 != A1) move(A1, arg_2); assert(arg_2 != A0, "smashed argument"); ++ if (arg_3 != A2) move(A2, arg_3); assert(arg_3 != A0 && arg_3 != A1, "smashed argument"); ++ MacroAssembler::call_VM_leaf_base(entry_point, 3); ++} ++ ++void MacroAssembler::check_and_handle_earlyret(Register java_thread) { ++} ++ ++void MacroAssembler::check_and_handle_popframe(Register java_thread) { ++} ++ ++void MacroAssembler::null_check(Register reg, int offset) { ++ if (needs_explicit_null_check(offset)) { ++ // provoke OS NULL exception if reg = NULL by ++ // accessing M[reg] w/o changing any (non-CC) registers ++ // NOTE: cmpl is plenty here to provoke a segv ++ ldw(AT, reg, 0); ++ } else { ++ // nothing to do, (later) access of M[reg + offset] ++ // will provoke OS NULL exception if reg = NULL ++ } ++} ++ ++void MacroAssembler::enter() { ++ push2(RA, FP); ++ move(FP, SP); ++} ++ ++void MacroAssembler::leave() { ++ add_simm16(SP, FP, 2 * wordSize); ++ ldl(RA, SP, - 1 * wordSize); ++ ldl(FP, SP, - 2 * wordSize); ++} ++ ++void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp) { ++ // determine java_thread register ++ if (!java_thread->is_valid()) { ++ java_thread = S2thread; ++ } ++ // we must set sp to zero to clear frame ++ st_ptr(R0, java_thread, in_bytes(JavaThread::last_Java_sp_offset())); ++ // must clear fp, so that compiled frames are not confused; it is possible ++ // that we need it only for debugging ++ if(clear_fp) { ++ st_ptr(R0, java_thread, in_bytes(JavaThread::last_Java_fp_offset())); ++ } ++ ++ // Always clear the pc because it could have been set by make_walkable() ++ st_ptr(R0, java_thread, in_bytes(JavaThread::last_Java_pc_offset())); ++} ++ ++void MacroAssembler::reset_last_Java_frame(bool clear_fp) { ++ Register thread = S2thread; ++ // we must set sp to zero to clear frame ++ stl(R0, Address(thread, JavaThread::last_Java_sp_offset())); ++ // must clear fp, so that compiled frames are not confused; it is ++ // possible that we need it only for debugging ++ if (clear_fp) { ++ stl(R0, Address(thread, JavaThread::last_Java_fp_offset())); ++ } ++ ++ // Always clear the pc because it could have been set by make_walkable() ++ stl(R0, Address(thread, JavaThread::last_Java_pc_offset())); ++} ++ ++// Write serialization page so VM thread can do a pseudo remote membar. ++// We use the current thread pointer to calculate a thread specific ++// offset to write to within the page. This minimizes bus traffic ++// due to cache line collision. ++void MacroAssembler::serialize_memory(Register thread, Register tmp) { ++ move(tmp, thread); ++ zapnot(tmp, tmp, 0xf); ++ srll(tmp, tmp, os::get_serialize_page_shift_count()); ++ move(AT, (os::vm_page_size() - sizeof(int))); ++ and_reg(tmp, tmp, AT); ++ stw(tmp, Address(tmp, (intptr_t)os::get_memory_serialize_page())); ++} ++ ++// Calls to C land ++// ++// When entering C land, the fp, & sp of the last Java frame have to be recorded ++// in the (thread-local) JavaThread object. When leaving C land, the last Java fp ++// has to be reset to 0. This is required to allow proper stack traversal. ++void MacroAssembler::set_last_Java_frame(Register java_thread, ++ Register last_java_sp, ++ Register last_java_fp, ++ address last_java_pc) { ++ // determine java_thread register ++ if (!java_thread->is_valid()) { ++ java_thread = S2thread; ++ } ++ // determine last_java_sp register ++ if (!last_java_sp->is_valid()) { ++ last_java_sp = SP; ++ } ++ ++ // last_java_fp is optional ++ if (last_java_fp->is_valid()) { ++ st_ptr(last_java_fp, java_thread, in_bytes(JavaThread::last_Java_fp_offset())); ++ } ++ ++ // last_java_pc is optional ++ if (last_java_pc != NULL) { ++ relocate(relocInfo::internal_word_type); ++ patchable_set48(AT, (long)last_java_pc); ++ st_ptr(AT, java_thread, in_bytes(JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset())); ++ } ++ st_ptr(last_java_sp, java_thread, in_bytes(JavaThread::last_Java_sp_offset())); ++} ++ ++void MacroAssembler::set_last_Java_frame(Register last_java_sp, ++ Register last_java_fp, ++ address last_java_pc) { ++ // determine last_java_sp register ++ if (!last_java_sp->is_valid()) { ++ last_java_sp = SP; ++ } ++ ++ Register thread = S2thread; ++ // last_java_fp is optional ++ if (last_java_fp->is_valid()) { ++ stl(last_java_fp, Address(thread, JavaThread::last_Java_fp_offset())); ++ } ++ ++ // last_java_pc is optional ++ if (last_java_pc != NULL) { ++ relocate(relocInfo::internal_word_type); ++ patchable_set48(AT, (long)last_java_pc); ++ st_ptr(AT, thread, in_bytes(JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset())); ++ } ++ ++ stl(last_java_sp, Address(thread, JavaThread::last_Java_sp_offset())); ++} ++ ++void MacroAssembler::resolve_jobject(Register value, ++ Register thread, ++ Register tmp) { ++ assert_different_registers(value, thread, tmp); ++ Label done, not_weak; ++ beq(value, done); // Use NULL as-is. ++ ldi(AT, R0, JNIHandles::weak_tag_mask); ++ and_reg(AT, value, AT); ++ beq(AT, not_weak); ++ // Resolve jweak. ++ ldl(value, value, -JNIHandles::weak_tag_value); ++ verify_oop(value); ++#if INCLUDE_ALL_GCS ++ if (UseG1GC) { ++ g1_write_barrier_pre(noreg /* obj */, ++ value /* pre_val */, ++ thread /* thread */, ++ tmp /* tmp */, ++ true /* tosca_live */, ++ true /* expand_call */); ++ } ++#endif // INCLUDE_ALL_GCS ++ beq(R0, done); ++ BIND(not_weak); ++ // Resolve (untagged) jobject. ++ ldl(value, value, 0); ++ verify_oop(value); ++ BIND(done); ++} ++ ++void MacroAssembler::clear_jweak_tag(Register possibly_jweak) { ++ const int32_t inverted_jweak_mask = ~static_cast(JNIHandles::weak_tag_mask); ++ STATIC_ASSERT(inverted_jweak_mask == -2); // otherwise check this code ++ // The inverted mask is sign-extended ++ move(AT, inverted_jweak_mask); ++ and_reg(possibly_jweak, possibly_jweak, AT); ++} ++ ++////////////////////////////////////////////////////////////////////////////////// ++#if INCLUDE_ALL_GCS ++ ++void MacroAssembler::g1_write_barrier_pre(Register obj, ++ Register pre_val, ++ Register thread, ++ Register tmp, ++ bool tosca_live, ++ bool expand_call) { ++ ++ // If expand_call is true then we expand the call_VM_leaf macro ++ // directly to skip generating the check by ++ // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp. ++ ++ assert(thread == S2thread, "must be"); ++ ++ Label done; ++ Label runtime; ++ ++ assert(pre_val != noreg, "check this code"); ++ ++ if (obj != noreg) { ++ assert_different_registers(obj, pre_val, tmp); ++ assert(pre_val != V0, "check this code"); ++ } ++ ++ Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() + ++ PtrQueue::byte_offset_of_active())); ++ Address index(thread, in_bytes(JavaThread::satb_mark_queue_offset() + ++ PtrQueue::byte_offset_of_index())); ++ Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() + ++ PtrQueue::byte_offset_of_buf())); ++ ++ ++ // Is marking active? ++ if (in_bytes(PtrQueue::byte_width_of_active()) == 4) { ++ ldw_signed(AT, in_progress); ++ } else { ++ assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption"); ++ ldb_signed(AT, in_progress); ++ } ++ beq(AT, done); ++ ++ // Do we need to load the previous value? ++ if (obj != noreg) { ++ load_heap_oop(pre_val, Address(obj, 0)); ++ } ++ ++ // Is the previous value null? ++ beq(pre_val, done); ++ ++ // Can we store original value in the thread's buffer? ++ // Is index == 0? ++ // (The index field is typed as size_t.) ++ ++ ldl(tmp, index); ++ beq(tmp, runtime); ++ ++ add_simm16(tmp, tmp, -1 * wordSize); ++ stl(tmp, index); ++ ldl(AT, buffer); ++ addl(tmp, tmp, AT); ++ ++ // Record the previous value ++ stl(pre_val, tmp, 0); ++ beq(R0, done); ++ ++ BIND(runtime); ++ // save the live input values ++ if (tosca_live) push(V0); ++ ++ if (obj != noreg && obj != V0) push(obj); ++ ++ if (pre_val != V0) push(pre_val); ++ ++ // Calling the runtime using the regular call_VM_leaf mechanism generates ++ // code (generated by InterpreterMacroAssember::call_VM_leaf_base) ++ // that checks that the *(fp+frame::interpreter_frame_last_sp) == NULL. ++ // ++ // If we care generating the pre-barrier without a frame (e.g. in the ++ // intrinsified Reference.get() routine) then fp might be pointing to ++ // the caller frame and so this check will most likely fail at runtime. ++ // ++ // Expanding the call directly bypasses the generation of the check. ++ // So when we do not have have a full interpreter frame on the stack ++ // expand_call should be passed true. ++ ++ NOT_LP64( push(thread); ) ++ ++ if (expand_call) { ++ LP64_ONLY( assert(pre_val != A1, "smashed arg"); ) ++ if (thread != A1) move(A1, thread); ++ if (pre_val != A0) move(A0, pre_val); ++ MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), 2); ++ } else { ++ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread); ++ } ++ ++ NOT_LP64( pop(thread); ) ++ ++ // save the live input values ++ if (pre_val != V0) ++ pop(pre_val); ++ ++ if (obj != noreg && obj != V0) ++ pop(obj); ++ ++ if(tosca_live) pop(V0); ++ ++ BIND(done); ++} ++ ++void MacroAssembler::g1_write_barrier_post(Register store_addr, ++ Register new_val, ++ Register thread, ++ Register tmp, ++ Register tmp2) { ++ assert(tmp != AT, "must be"); ++ assert(tmp2 != AT, "must be"); ++ assert(thread == S2thread, "must be"); ++ ++ Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() + ++ PtrQueue::byte_offset_of_index())); ++ Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() + ++ PtrQueue::byte_offset_of_buf())); ++ ++ BarrierSet* bs = Universe::heap()->barrier_set(); ++ CardTableModRefBS* ct = (CardTableModRefBS*)bs; ++ assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); ++ ++ Label done; ++ Label runtime; ++ ++ // Does store cross heap regions? ++ xor_ins(AT, store_addr, new_val); ++ srll(AT, AT, HeapRegion::LogOfHRGrainBytes); ++ beq(AT, done); ++ ++ ++ // crosses regions, storing NULL? ++ beq(new_val, done); ++ ++ // storing region crossing non-NULL, is card already dirty? ++ const Register card_addr = tmp; ++ const Register cardtable = tmp2; ++ ++ move(card_addr, store_addr); ++ srll(card_addr, card_addr, CardTableModRefBS::card_shift); ++ // Do not use ExternalAddress to load 'byte_map_base', since 'byte_map_base' is NOT ++ // a valid address and therefore is not properly handled by the relocation code. ++ set64(cardtable, (intptr_t)ct->byte_map_base); ++ addl(card_addr, card_addr, cardtable); ++ ++ ldbu(AT, card_addr, 0); ++ add_simm16(AT, AT, -1 * (int)G1SATBCardTableModRefBS::g1_young_card_val()); ++ beq(AT, done); ++ ++ memb(); ++ ldbu(AT, card_addr, 0); ++ add_simm16(AT, AT, -1 * (int)(int)CardTableModRefBS::dirty_card_val()); ++ beq(AT, done); ++ ++ ++ // storing a region crossing, non-NULL oop, card is clean. ++ // dirty card and log. ++ move(AT, (int)CardTableModRefBS::dirty_card_val()); ++ stb(AT, card_addr, 0); ++ ++ ldw_signed(AT, queue_index); ++ beq(AT, runtime); ++ add_simm16(AT, AT, -1 * wordSize); ++ stw(AT, queue_index); ++ ldl(tmp2, buffer); ++ ldl(AT, queue_index); ++ addl(tmp2, tmp2, AT); ++ stl(card_addr, tmp2, 0); ++ beq(R0, done); ++ ++ BIND(runtime); ++ // save the live input values ++ push(store_addr); ++ push(new_val); ++ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, S2thread); ++ pop(new_val); ++ pop(store_addr); ++ ++ BIND(done); ++} ++ ++#endif // INCLUDE_ALL_GCS ++////////////////////////////////////////////////////////////////////////////////// ++ ++ ++void MacroAssembler::store_check(Register obj) { ++ // Does a store check for the oop in register obj. The content of ++ // register obj is destroyed afterwards. ++ store_check_part_1(obj); ++ store_check_part_2(obj); ++} ++ ++void MacroAssembler::store_check(Register obj, Address dst) { ++ store_check(obj); ++} ++ ++ ++// split the store check operation so that other instructions can be scheduled inbetween ++void MacroAssembler::store_check_part_1(Register obj) { ++ BarrierSet* bs = Universe::heap()->barrier_set(); ++ assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind"); ++ srll(obj, obj, CardTableModRefBS::card_shift); ++} ++ ++void MacroAssembler::store_check_part_2(Register obj) { ++ BarrierSet* bs = Universe::heap()->barrier_set(); ++ assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind"); ++ CardTableModRefBS* ct = (CardTableModRefBS*)bs; ++ assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); ++ ++ set64(AT, (long)ct->byte_map_base); ++ addl(AT, AT, obj); ++ if (UseConcMarkSweepGC) ++ if(UseWmemb) ++ wmemb(); ++ else ++ memb(); ++ stb(R0, AT, 0); ++} ++ ++// Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. ++void MacroAssembler::tlab_allocate(Register obj, Register var_size_in_bytes, int con_size_in_bytes, ++ Register t1, Register t2, Label& slow_case) { ++ assert_different_registers(obj, var_size_in_bytes, t1, t2, AT); ++ ++ Register end = t2; ++ Register thread = S2thread; ++ verify_tlab(t1, t2);//blows t1&t2 ++ ++ ld_ptr(obj, thread, in_bytes(JavaThread::tlab_top_offset())); ++ ++ if (var_size_in_bytes == NOREG) { ++ set64(AT, con_size_in_bytes); ++ addl(end, obj, AT); ++ } else { ++ addl(end, obj, var_size_in_bytes); ++ } ++ ++ ld_ptr(AT, thread, in_bytes(JavaThread::tlab_end_offset())); ++ cmpult(AT, AT, end); ++ bne(AT, slow_case); ++ ++ ++ // update the tlab top pointer ++ st_ptr(end, thread, in_bytes(JavaThread::tlab_top_offset())); ++ ++ verify_tlab(t1, t2); ++} ++ ++// Defines obj, preserves var_size_in_bytes ++void MacroAssembler::eden_allocate(Register obj, Register var_size_in_bytes, int con_size_in_bytes, ++ Register t1, Register t2, Label& slow_case) { ++ assert_different_registers(obj, var_size_in_bytes, t1, AT); ++ if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) { ++ // No allocation in the shared eden. ++ b_far(slow_case); ++ } else { ++ ++ Address heap_top(t1); ++ li(t1, (long)Universe::heap()->top_addr()); ++ ld_ptr(obj, heap_top); ++ ++ Register end = t2; ++ Label retry; ++ ++ BIND(retry); ++ if (var_size_in_bytes == NOREG) { ++ set64(AT, con_size_in_bytes); ++ addl(end, obj, AT); ++ } else { ++ addl(end, obj, var_size_in_bytes); ++ } ++ // if end < obj then we wrapped around => object too long => slow case ++ cmpult(AT, end, obj); ++ bne(AT, slow_case); ++ ++ li(AT, (long)Universe::heap()->end_addr()); ++ ld_ptr(AT, AT, 0); ++ cmpult(AT, AT, end); ++ bne(AT, slow_case); ++ // Compare obj with the top addr, and if still equal, store the new top addr in ++ // end at the address of the top addr pointer. Sets ZF if was equal, and clears ++ // it otherwise. Use lock prefix for atomicity on MPs. ++ //if (os::is_MP()) { ++ // memb(); ++ //} ++ ++ // if someone beat us on the allocation, try again, otherwise continue ++ cmpxchg(end, heap_top, obj); ++ beq(AT, retry); ++ } ++} ++ ++// C2 doesn't invoke this one. ++void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case) { ++ Register top = T0; ++ Register t1 = T1; ++ Register t2 = T12; ++ Register t3 = T3; ++ Register thread_reg = T11; ++ assert_different_registers(top, thread_reg, t1, t2, /* preserve: */ T2, A4); ++ Label do_refill, discard_tlab; ++ if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) { ++ // No allocation in the shared eden. ++ beq(R0, slow_case); ++ } ++ ++ get_thread(thread_reg); ++ ++ ldl(top, thread_reg, in_bytes(JavaThread::tlab_top_offset())); ++ ldl(t1, thread_reg, in_bytes(JavaThread::tlab_end_offset())); ++ ++ // calculate amount of free space ++ subl(t1, t1, top); ++ srll(t1, t1, LogHeapWordSize); ++ ++ // Retain tlab and allocate object in shared space if ++ // the amount free in the tlab is too large to discard. ++ ldl(t2, thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())); ++ cmplt(AT, t2, t1); ++ beq(AT, discard_tlab); ++ ++ // Retain ++ add_simm16(t2, t2, ThreadLocalAllocBuffer::refill_waste_limit_increment()); ++ stl(t2, thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())); ++ ++ if (TLABStats) { ++ // increment number of slow_allocations ++ ldw(AT, thread_reg, in_bytes(JavaThread::tlab_slow_allocations_offset())); ++ addl(AT, AT, 1); ++ stw(AT, thread_reg, in_bytes(JavaThread::tlab_slow_allocations_offset())); ++ } ++ beq(R0, try_eden); ++ ++ BIND(discard_tlab); ++ if (TLABStats) { ++ // increment number of refills ++ ldw(AT, thread_reg, in_bytes(JavaThread::tlab_number_of_refills_offset())); ++ addl(AT, AT, 1); ++ stw(AT, thread_reg, in_bytes(JavaThread::tlab_number_of_refills_offset())); ++ // accumulate wastage -- t1 is amount free in tlab ++ ldw(AT, thread_reg, in_bytes(JavaThread::tlab_fast_refill_waste_offset())); ++ addl(AT, AT, t1); ++ stw(AT, thread_reg, in_bytes(JavaThread::tlab_fast_refill_waste_offset())); ++ } ++ ++ // if tlab is currently allocated (top or end != null) then ++ // fill [top, end + alignment_reserve) with array object ++ beq(top, do_refill); ++ ++ // set up the mark word ++ li(AT, (long)markOopDesc::prototype()->copy_set_hash(0x2)); ++ stl(AT, top, oopDesc::mark_offset_in_bytes()); ++ ++ // set the length to the remaining space ++ add_simm16(t1, t1, - typeArrayOopDesc::header_size(T_INT)); ++ add_simm16(t1, t1, ThreadLocalAllocBuffer::alignment_reserve()); ++ slll(t1, t1, log2_intptr(HeapWordSize/sizeof(jint))); ++ stw(t1, top, arrayOopDesc::length_offset_in_bytes()); ++ ++ // set klass to intArrayKlass ++ li(AT, (intptr_t)Universe::intArrayKlassObj_addr()); ++ ldl(t1,AT,0); ++ //st_ptr(t1, top, oopDesc::klass_offset_in_bytes()); ++ store_klass(top, t1); ++ ++ ldl(t1, thread_reg, in_bytes(JavaThread::tlab_start_offset())); ++ subl(t1, top, t1); ++ incr_allocated_bytes(thread_reg, t1, 0); ++ ++ // refill the tlab with an eden allocation ++ BIND(do_refill); ++ ldl(t1, thread_reg, in_bytes(JavaThread::tlab_size_offset())); ++ slll(t1, t1, LogHeapWordSize); ++ // add object_size ?? ++ eden_allocate(top, t1, 0, t2, t3, slow_case); ++ ++ // Check that t1 was preserved in eden_allocate. ++#ifdef ASSERT ++ if (UseTLAB) { ++ Label ok; ++ assert_different_registers(thread_reg, t1); ++ ldl(AT, thread_reg, in_bytes(JavaThread::tlab_size_offset())); ++ slll(AT, AT, LogHeapWordSize); ++ beq(AT, t1, ok); ++ stop("assert(t1 != tlab size)"); ++ should_not_reach_here(); ++ ++ BIND(ok); ++ } ++#endif ++ stl(top, thread_reg, in_bytes(JavaThread::tlab_start_offset())); ++ stl(top, thread_reg, in_bytes(JavaThread::tlab_top_offset())); ++ addl(top, top, t1); ++ add_simm16(top, top, - ThreadLocalAllocBuffer::alignment_reserve_in_bytes()); ++ stl(top, thread_reg, in_bytes(JavaThread::tlab_end_offset())); ++ verify_tlab(t1, t2); ++ beq(R0, retry); ++} ++ ++/** ++ * Emits code to update CRC-32 with a byte value according to constants in table ++ * ++ * @param [in,out]crc Register containing the crc. ++ * @param [in]val Register containing the byte to fold into the CRC. ++ * @param [in]table Register containing the table of crc constants. ++ * ++ * uint32_t crc; ++ * val = crc_table[(val ^ crc) & 0xFF]; ++ * crc = val ^ (crc >> 8); ++ * ++ */ ++void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { ++ xor_ins(val, crc, val); ++ and_imm8(val, val, 0xFF); ++ srll(crc, crc, 8); // unsigned shift ++// zapnot(crc, crc, 0xF); ++ ++ slll(AT, val, Address::times_4); ++ addl(AT, table, AT); ++ ldw(AT, AT, 0); ++ zapnot(AT, AT, 0xF); ++ xor_ins(crc, AT, crc); ++} ++ ++/** ++ * @param crc register containing existing CRC (32-bit) ++ * @param buf register pointing to input byte buffer (byte*) ++ * @param len register containing number of bytes ++ * @param tmp scratch register ++ */ ++void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register tmp, Register tmp3) { ++ assert_different_registers(crc, buf, len, tmp, tmp3, V0); ++ Label L_begin_loop, L_aligned, CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, L_exit; ++ ++ ornot(crc, R0, crc); ++ if(SolveAlignment) { ++ beq(len, L_exit); ++ BIND(L_begin_loop); ++ and_imm8(AT, buf, 7); ++ beq(AT, L_aligned); ++ ldbu_a(tmp, 1, buf); ++ subl(len, len, 1); ++ crc32b(crc, crc, tmp); ++ bgt(len, L_begin_loop); ++ beq(R0, L_exit); ++ } ++ ++ BIND (L_aligned); ++ subl(len, len, 64); ++ bge(len, CRC_by64_loop); ++ addl(len, len, 64-4); ++ bge(len, CRC_by4_loop); ++ addl(len, len, 4); ++ bgt(len, CRC_by1_loop); ++ beq(R0, L_exit); ++ ++ BIND(CRC_by4_loop); ++ ldw_a(tmp, 4, buf); ++ subl(len, len, 4); ++ crc32w(crc, crc, tmp); ++ bge(len, CRC_by4_loop); ++ addl(len, len, 4); ++ ble(len, L_exit); ++ BIND(CRC_by1_loop); ++ ldbu_a(tmp, 1, buf); ++ subl(len, len, 1); ++ crc32b(crc, crc, tmp); ++ bgt(len, CRC_by1_loop); ++ beq(R0, L_exit); ++ ++ align(CodeEntryAlignment); ++ BIND(CRC_by64_loop); ++ subl(len, len, 64); ++ ldl_a(tmp, 8, buf); ++ ldl_a(tmp3, 8, buf); ++ crc32l(crc, crc, tmp); ++ crc32l(crc, crc, tmp3); ++ ldl_a(tmp, 8, buf); ++ ldl_a(tmp3, 8, buf); ++ crc32l(crc, crc, tmp); ++ crc32l(crc, crc, tmp3); ++ ldl_a(tmp, 8, buf); ++ ldl_a(tmp3, 8, buf); ++ crc32l(crc, crc, tmp); ++ crc32l(crc, crc, tmp3); ++ ldl_a(tmp, 8, buf); ++ ldl_a(tmp3, 8, buf); ++ crc32l(crc, crc, tmp); ++ crc32l(crc, crc, tmp3); ++ bge(len, CRC_by64_loop); ++ addl(len, len, 64-4); ++ bge(len, CRC_by4_loop); ++ addl(len, len, 4); ++ bgt(len, CRC_by1_loop); ++ BIND(L_exit); ++ ornot(crc, R0, crc); ++ ++ move(V0, crc); ++} ++ ++void MacroAssembler::incr_allocated_bytes(Register thread, ++ Register var_size_in_bytes, ++ int con_size_in_bytes, ++ Register t1) { ++ if (!thread->is_valid()) { ++ thread = S2thread; ++ } ++ ++ ld_ptr(AT, thread, in_bytes(JavaThread::allocated_bytes_offset())); ++ if (var_size_in_bytes->is_valid()) { ++ addl(AT, AT, var_size_in_bytes); ++ } else { ++ add_simm16(AT, AT, con_size_in_bytes); ++ } ++ st_ptr(AT, thread, in_bytes(JavaThread::allocated_bytes_offset())); ++} ++ ++static const double pi_4 = 0.7853981633974483; ++ ++// must get argument(a double) in F16/F17 ++//void MacroAssembler::trigfunc(char trig, bool preserve_cpu_regs, int num_fpu_regs_in_use) { ++//We need to preseve the register which maybe modified during the Call ++void MacroAssembler::trigfunc(char trig, int num_fpu_regs_in_use) { ++//save all modified register here ++ pushad(); ++//we should preserve the stack space before we call ++ add_simm16(SP, SP, -wordSize * 2); ++ switch (trig){ ++ case 's' : ++ call( CAST_FROM_FN_PTR(address, SharedRuntime::dsin), relocInfo::runtime_call_type ); ++ break; ++ case 'c': ++ call( CAST_FROM_FN_PTR(address, SharedRuntime::dcos), relocInfo::runtime_call_type ); ++ break; ++ case 't': ++ call( CAST_FROM_FN_PTR(address, SharedRuntime::dtan), relocInfo::runtime_call_type ); ++ break; ++ default:assert (false, "bad intrinsic"); ++ break; ++ } ++ ++ add_simm16(SP, SP, wordSize * 2); ++ popad(); ++} ++ ++void MacroAssembler::li(Register rd, long imm) { ++ int32_t lsb32 = (int32_t) (imm); ++ int32_t msb32 = (int32_t) ((imm - lsb32) >> 32); ++ int16_t msb_h = (msb32-(int16_t)msb32) >> 16; ++ int16_t msb_l = (int16_t)msb32; ++ int16_t lsb_h = (lsb32-(int16_t)lsb32) >> 16; ++ int16_t lsb_l = (int16_t)lsb32; ++ ++ if(msb_h == 0) { ++ ldi(rd, R0, msb_l); ++ } else { ++ ldih(rd, R0, msb_h); ++ if(msb_l != 0) ++ ldi(rd, rd, msb_l); ++ } ++ slll(rd, rd, 32); ++ if( ((int)lsb_h == -32768) && (lsb_l < 0) ) { ++ ldih(rd, rd, 0x4000); ++ ldih(rd, rd, 0x4000); ++ ldi(rd, rd, lsb_l); ++ } else { ++ ldih(rd, rd, lsb_h); ++ ldi(rd, rd, lsb_l); ++ } ++} ++ ++ ++void MacroAssembler::boundary_test(FloatRegister ft, Register res){ ++ Register tmp1 = AT; ++ Register tmp2 = GP; ++ fimovd(tmp1, ft); ++ slll(tmp2, tmp1, 0x1); ++ srll(tmp2, tmp2, 53); ++ ldi(tmp1, R0, 2047); ++ subl(res, tmp2, tmp1); ++} ++ ++void MacroAssembler::set64(Register d, jlong value) { ++ ++ int32_t lo = (int32_t) (value); ++ int32_t hi = (int32_t) ((value - lo) >> 32); ++ ++ int16_t lo_h16 = (lo - (int16_t)(lo))>>16; ++ int16_t lo_l16 = (int16_t)(lo); ++ int16_t hi_h16 = (hi - (int16_t)(hi))>>16; ++ int16_t hi_l16 = (int16_t)(hi); ++ ++ if ( is_simm16(value) ) { ++ ldi(d, R0, value); ++ } else if ( hi != 0 ) { ++ if ( is_simm16(hi) ) { ++ ldi(d, R0, hi); ++ } else { ++ ldih(d, R0, hi_h16); ++ if (hi_l16 != 0) ++ ldi(d, d, hi_l16); ++ } ++ slll(d, d, 32); ++ if ( lo != 0 ) { ++ if ( ((int)lo_h16 == -32768) && ((int)lo_l16 < 0)) { ++ // original val was in range 0x7FFF8000..0x7FFFFFFF ++ ldih(d, d, 0x4000); ++ ldih(d, d, 0x4000); ++ if (lo_l16 != 0) ++ ldi(d, d, lo_l16); ++ } else { ++ ldih(d, d, lo_h16); ++ if (lo_l16 != 0) ++ ldi(d, d, lo_l16); ++ } ++ } ++ } else if ( (hi == 0) && (lo != 0) ) { ++ if ( ((int)lo_h16 == -32768) && ((int)lo_l16 < 0)) { ++ // original val was in range 0x7FFF8000..0x7FFFFFFF ++ /* ldih(d, R0, lo_h16); ++ * ldi(d, d, lo_l16); ++ * addw(d, 0, d); */ ++ ldih(d, R0, 0x4000); ++ ldih(d, d, 0x4000); ++ if (lo_l16 != 0) ++ ldi(d, d, lo_l16); ++ } else { ++ ldih(d, R0, lo_h16); ++ if (lo_l16 != 0) ++ ldi(d, d, lo_l16); ++ } ++ } else { ++ tty->print_cr("value = 0x%x", value); ++ guarantee(false, "Not supported yet in set64!"); ++ } ++} ++ ++ ++int MacroAssembler::insts_for_set64(jlong value) { ++ ++ int count = 0; ++ ++ int32_t lo = (int32_t) (value); ++ int32_t hi = (int32_t) ((value - lo) >> 32); ++ ++ int16_t lo_h16 = (lo - (int16_t)(lo))>>16; ++ int16_t lo_l16 = (int16_t)(lo); ++ int16_t hi_h16 = (hi - (int16_t)(hi))>>16; ++ int16_t hi_l16 = (int16_t)(hi); ++ ++ if ( is_simm16(value) ) { ++ count += 1; ++ } else if ( hi != 0 ) { ++ if ( is_simm16(hi) ) { ++ count += 1; ++ } else { ++ count += 1; ++ if (hi_l16 != 0) ++ count += 1; ++ } ++ count += 1; ++ if ( lo != 0 ) { ++ if ( ((int)lo_h16 == -32768) && ((int)lo_l16 < 0)) { ++ count += 2; ++ if (lo_l16 != 0) ++ count += 1; ++ } else { ++ count += 1; ++ if (lo_l16 != 0) ++ count += 1; ++ } ++ } ++ } else if ( (hi == 0) && (lo != 0) ) { ++ if ( ((int)lo_h16 == -32768) && ((int)lo_l16 < 0)) { ++ count += 2; ++ if (lo_l16 != 0) ++ count += 1; ++ } else { ++ count += 1; ++ if (lo_l16 != 0) ++ count += 1; ++ } ++ } else { ++ guarantee(false, "Not supported yet in insts_for_set64!"); ++ } ++ ++ return count; ++} ++ ++ ++void MacroAssembler::patchable_set48(Register d, jlong value) { ++//TODO: optimize it ++ li48(d, value); ++} ++ ++void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { ++ assert(UseCompressedClassPointers, "should only be used for compressed header"); ++ assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); ++ ++ int klass_index = oop_recorder()->find_index(k); ++ RelocationHolder rspec = metadata_Relocation::spec(klass_index); ++ long narrowKlass = (long)Klass::encode_klass(k); ++ ++ relocate(rspec, Assembler::narrow_oop_operand); ++ patchable_set48(dst, narrowKlass); ++} ++ ++ ++void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { ++ assert(UseCompressedOops, "should only be used for compressed header"); ++ assert(oop_recorder() != NULL, "this assembler needs an OopRecorder"); ++ ++ int oop_index = oop_recorder()->find_index(obj); ++ RelocationHolder rspec = oop_Relocation::spec(oop_index); ++ ++ relocate(rspec, Assembler::narrow_oop_operand); ++ patchable_set48(dst, oop_index); ++} ++ ++void MacroAssembler::li64(Register rd, long imm) { ++ //Unimplemented(); ++ ++ int32_t lsb32 = (int32_t) (imm); ++ int32_t msb32 = (int32_t) ((imm - lsb32) >> 32); ++ int16_t msb_h = (msb32-(int16_t)msb32) >> 16; ++ int16_t msb_l = (int16_t)msb32; ++ int16_t lsb_h = (lsb32-(int16_t)lsb32) >> 16; ++ int16_t lsb_l = (int16_t)lsb32; ++ assert((int)lsb_h != (-32768), "wrong number in li64"); ++ ++ ldih(rd, R0, msb_h); ++ ldi(rd, rd, msb_l); ++ slll(rd, rd, 32); ++ ldih(rd, rd, lsb_h); ++ ldi(rd, rd, lsb_l); ++} ++ ++void MacroAssembler::li48(Register rd, long imm) { ++ assert(is_simm16(imm >> 32), "Not a 48-bit address"); ++ ++ int16_t msb_l, lsb_h, lsb_l; ++ NativeInstruction::imm48_split(imm, msb_l, lsb_h, lsb_l); ++ ++ ldi(rd, R0, msb_l); ++ slll(rd, rd, 32); ++ ldih(rd, rd, lsb_h); ++ ldi(rd, rd, lsb_l); ++} ++ ++void MacroAssembler::verify_oop(Register reg, const char* s) { ++ if (!VerifyOops) return; ++ const char * b = NULL; ++ stringStream ss; ++ ss.print("verify_oop: %s: %s", reg->name(), s); ++ b = code_string(ss.as_string()); ++ pushad(); ++ move(A1, reg); ++ li(A0, (long)b); ++ li(AT, (long)StubRoutines::verify_oop_subroutine_entry_address()); ++ ldl(T12, AT, 0); ++ call(T12); ++ popad(); ++} ++ ++ ++void MacroAssembler::verify_oop_addr(Address addr, const char* s) { ++ if (!VerifyOops) { ++ return; ++ } ++ // Pass register number to verify_oop_subroutine ++ const char * b = NULL; ++ stringStream ss; ++ ss.print("verify_oop_addr: %s", s); ++ b = code_string(ss.as_string()); ++ ++ st_ptr(T0, SP, - wordSize); ++ st_ptr(T1, SP, - 2*wordSize); ++ st_ptr(RA, SP, - 3*wordSize); ++ st_ptr(A0, SP, - 4*wordSize); ++ st_ptr(A1, SP, - 5*wordSize); ++ st_ptr(AT, SP, - 6*wordSize); ++ st_ptr(T12, SP, - 7*wordSize); ++ ld_ptr(A1, addr); // addr may use SP, so load from it before change SP ++ add_simm16(SP, SP, - 7 * wordSize); ++ ++ li(A0, (long)b); ++ // call indirectly to solve generation ordering problem ++ li(AT, (long)StubRoutines::verify_oop_subroutine_entry_address()); ++ ld_ptr(T12, AT, 0); ++ call(T12); ++ ld_ptr(T0, SP, 6* wordSize); ++ ld_ptr(T1, SP, 5* wordSize); ++ ld_ptr(RA, SP, 4* wordSize); ++ ld_ptr(A0, SP, 3* wordSize); ++ ld_ptr(A1, SP, 2* wordSize); ++ ld_ptr(AT, SP, 1* wordSize); ++ ld_ptr(T12, SP, 0* wordSize); ++ add_simm16(SP, SP, 7 * wordSize); ++} ++ ++// used registers : T0, T1 ++void MacroAssembler::verify_oop_subroutine() { ++ // RA: ra ++ // A0: char* error message ++ // A1: oop object to verify ++ ++ Label exit, error; ++ // increment counter ++ li(T0, (long)StubRoutines::verify_oop_count_addr()); ++ ldw(AT, T0, 0); ++ addl(AT, AT, 1); ++ stw(AT, T0, 0); ++ ++ // make sure object is 'reasonable' ++ beq(A1, exit); // if obj is NULL it is ok ++ ++ // Check if the oop is in the right area of memory ++ //const int oop_mask = Universe::verify_oop_mask(); ++ //const int oop_bits = Universe::verify_oop_bits(); ++ const uintptr_t oop_mask = Universe::verify_oop_mask(); ++ const uintptr_t oop_bits = Universe::verify_oop_bits(); ++ if (Assembler::is_simm8(oop_mask)) { ++ and_imm8(T0, A1, oop_mask); ++ } else { ++ li(AT, oop_mask); ++ and_reg(T0, A1, AT); ++ } ++ if (Assembler::is_simm8(oop_bits)) { ++ cmpeq(AT, T0, oop_bits); ++ beq(AT, offset(target(error))); ++ } else { ++ li(AT, oop_bits); ++ bne(T0, AT, error); ++ } ++ ++ // make sure klass is 'reasonable' ++ //add for compressedoops ++ reinit_heapbase(); ++ //add for compressedoops ++ load_klass(T0, A1); ++ beq(T0, error); // if klass is NULL it is broken ++ // return if everything seems ok ++ BIND(exit); ++ ++ ret(); ++ ++ // handle errors ++ BIND(error); ++ pushad(); ++ call(CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type); ++ popad(); ++ ret(); ++} ++ ++void MacroAssembler::verify_tlab(Register t1, Register t2) { ++#ifdef ASSERT ++ assert_different_registers(t1, t2, AT); ++ if (UseTLAB && VerifyOops) { ++ Label next, ok; ++ ++ get_thread(t1); ++ ++ ld_ptr(t2, t1, in_bytes(JavaThread::tlab_top_offset())); ++ ld_ptr(AT, t1, in_bytes(JavaThread::tlab_start_offset())); ++ cmpult(AT, t2, AT); ++ beq(AT, next); ++ ++ stop("assert(top >= start)"); ++ ++ BIND(next); ++ ld_ptr(AT, t1, in_bytes(JavaThread::tlab_end_offset())); ++ cmpult(AT, AT, t2); ++ beq(AT, ok); ++ ++ stop("assert(top <= end)"); ++ ++ BIND(ok); ++ ++ } ++#endif ++} ++ ++RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr, ++ Register tmp, ++ int offset) { ++ intptr_t value = *delayed_value_addr; ++ if (value != 0) ++ return RegisterOrConstant(value + offset); ++ AddressLiteral a(delayed_value_addr); ++ // load indirectly to solve generation ordering problem ++ // movptr(tmp, ExternalAddress((address) delayed_value_addr)); ++ // ld(tmp, a); ++ if (offset != 0) ++ add_simm16(tmp,tmp, offset); ++ ++ return RegisterOrConstant(tmp); ++} ++ ++void MacroAssembler::hswap(Register reg) { ++ if (UseSW8A) { ++ revbh(reg, reg); ++ sexth(reg, reg); ++ } else { ++ /* The following two version's are all OK! */ ++ srll(AT, reg, 8); ++ slll(reg, reg, 24); ++ addw(reg, reg, 0); ++ sral(reg, reg, 16); ++ or_ins(reg, reg, AT); ++ } ++} ++ ++void MacroAssembler::huswap(Register reg) { ++ if (UseSW8A) { ++ revbh(reg, reg); ++ } else { ++ /* The following two version's are all OK! */ ++ srll(AT, reg, 8); ++ slll(reg, reg, 8); ++ zapnot(reg, reg, 0x2); ++ or_ins(reg, reg, AT); ++ } ++} ++ ++// something funny to do this will only one more register AT ++// 32 bits ++void MacroAssembler::swap(Register reg) { ++ if (UseSW8A) { ++ revbw(reg, reg); ++ } else { ++ assert_different_registers(reg, AT); ++ zapnot (reg, reg, 0xf); ++ srll(AT, reg, 8); ++ slll(reg, reg, 24); ++ or_ins(reg, reg, AT); ++ srll(AT, AT, 16); ++ xor_ins(AT, AT, reg); ++ and_imm8(AT, AT, 0xff); ++ xor_ins(reg, reg, AT); ++ slll(AT, AT, 16); ++ xor_ins(reg, reg, AT); ++ addw(reg, reg, 0x0); ++ } ++} ++ ++void MacroAssembler::saveTRegisters(){ ++ Register regs[] = {T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, A0, A1, A2, A3, A4, A5, GP, V0, AT}; ++ int len = sizeof(regs) / sizeof(regs[0]); ++ ++ subl(SP, SP, 176); ++ for (int i = 0; i < len; i++) { ++ stl(regs[i], SP, 8*i); ++ } ++} ++ ++void MacroAssembler::restoreTRegisters(){ ++ Register regs[] = {T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, A0, A1, A2, A3, A4, A5, GP, V0, AT}; ++ int len = sizeof(regs) / sizeof(regs[0]); ++ ++ for (int i = (len - 1); i >= 0; i--) { ++ ldl(regs[i], SP, 8*i); ++ } ++ addl(SP, SP, 176); ++} ++ ++void MacroAssembler::cmpxchg32(Register x_reg, Address dest, Register c_reg) { ++ assert_different_registers(AT, GP, T10, x_reg, dest.base()); ++ assert_different_registers(AT, GP, T10, c_reg, dest.base()); ++ SizedScope sc(this, 60); ++ Label again, nequal, done; ++ if (UseSW8A) { ++ if (UseCAS) { ++ move(GP, x_reg); ++ if (dest.disp() != 0) { ++ ldi(AT, dest.base(), dest.disp()); ++ casw(c_reg, AT, GP); ++ } else { ++ casw(c_reg, dest.base(), GP); ++ } ++ cmpeq(AT, c_reg, GP); ++ move(c_reg, GP); ++ } else { ++ BIND(again); ++ lldw(AT, dest); ++ bne(AT, c_reg, nequal); ++ move(AT, x_reg); ++ lstw(AT, dest); ++ beq(AT, again); ++ beq(R0, done); ++ // not xchged ++ BIND(nequal); ++ move(c_reg, AT); ++ move(AT, R0); ++ BIND(done); ++ } ++ } else { ++ subl(SP, SP, 16); ++ stl(T10, SP, 0); ++ ++ BIND(again); ++ lldw(T10, dest.base(), dest.disp()); ++ cmpeq(GP, T10, c_reg); ++ wr_f(GP); ++ move(AT, x_reg); ++ align(8); ++ lstw(AT, dest.base(), dest.disp()); ++ rd_f(AT); ++ beq(GP, nequal); ++ beq(AT, again); ++ // not xchged ++ BIND(nequal); ++ move(c_reg, T10); ++ ++ ldl(T10, SP, 0); ++ addl(SP, SP, 16); ++ } ++} ++ ++void MacroAssembler::cmpxchg(Register x_reg, Address dest, Register c_reg) { ++ assert_different_registers(AT, GP, T10, x_reg, dest.base()); ++ assert_different_registers(AT, GP, T10, c_reg, dest.base()); ++ SizedScope sc(this, 60); ++ Label again, nequal, done; ++ if (UseSW8A) { ++ if (UseCAS) { ++ move(GP, x_reg); ++ if (dest.disp() != 0) { ++ ldi(AT, dest.base(), dest.disp()); ++ casl(c_reg, AT, GP); ++ } else { ++ casl(c_reg, dest.base(), GP); ++ } ++ cmpeq(AT, c_reg, GP); ++ move(c_reg, GP); ++ } else { ++ BIND(again); ++ lldl(AT, dest); ++ bne(AT, c_reg, nequal); ++ move(AT, x_reg); ++ lstl(AT, dest); ++ beq(AT, again); ++ beq(R0, done); ++ // not xchged ++ BIND(nequal); ++ move(c_reg, AT); ++ move(AT, R0); ++ BIND(done); ++ } ++ } else { ++ subl(SP, SP, 16); ++ stl(T10, SP, 0); ++ ++ BIND(again); ++ lldl(T10, dest.base(), dest.disp()); ++ cmpeq(GP, T10, c_reg); ++ wr_f(GP); ++ move(AT, x_reg); ++ align(8); ++ lstl(AT, dest.base(), dest.disp()); ++ rd_f(AT); ++ beq(GP, nequal); ++ beq(AT, again); ++ // not xchged ++ BIND(nequal); ++ move(c_reg, T10); ++ ++ ldl(T10, SP, 0); ++ addl(SP, SP, 16); ++ } ++} ++ ++// be sure the three register is different ++void MacroAssembler::rem_s(FloatRegister fd, FloatRegister fs, FloatRegister ft, FloatRegister tmp) { ++ assert_different_registers(tmp, fs, ft); ++ div_s(tmp, fs, ft); ++ trunc_l_s(tmp, tmp); ++ fcvtS2L(tmp, tmp); ++ mul_s(tmp, tmp, ft); ++ sub_s(fd, fs, tmp); ++} ++ ++// be sure the three register is different ++void MacroAssembler::rem_d(FloatRegister fd, FloatRegister fs, FloatRegister ft, FloatRegister tmp) { ++ assert_different_registers(tmp, fs, ft); ++ div_d(tmp, fs, ft); ++ trunc_l_d(tmp, tmp); ++ fcvtD2L(tmp, tmp); ++ mul_d(tmp, tmp, ft); ++ sub_d(fd, fs, tmp); ++} ++ ++// Fast_Lock and Fast_Unlock used by C2 ++ ++// Because the transitions from emitted code to the runtime ++// monitorenter/exit helper stubs are so slow it's critical that ++// we inline both the stack-locking fast-path and the inflated fast path. ++// ++// See also: cmpFastLock and cmpFastUnlock. ++// ++// What follows is a specialized inline transliteration of the code ++// in slow_enter() and slow_exit(). If we're concerned about I$ bloat ++// another option would be to emit TrySlowEnter and TrySlowExit methods ++// at startup-time. These methods would accept arguments as ++// (Obj, Self, box, Scratch) and return success-failure ++// indications in the icc.ZFlag. Fast_Lock and Fast_Unlock would simply ++// marshal the arguments and emit calls to TrySlowEnter and TrySlowExit. ++// In practice, however, the # of lock sites is bounded and is usually small. ++// Besides the call overhead, TrySlowEnter and TrySlowExit might suffer ++// if the processor uses simple bimodal branch predictors keyed by EIP ++// Since the helper routines would be called from multiple synchronization ++// sites. ++// ++// An even better approach would be write "MonitorEnter()" and "MonitorExit()" ++// in java - using j.u.c and unsafe - and just bind the lock and unlock sites ++// to those specialized methods. That'd give us a mostly platform-independent ++// implementation that the JITs could optimize and inline at their pleasure. ++// Done correctly, the only time we'd need to cross to native could would be ++// to park() or unpark() threads. We'd also need a few more unsafe operators ++// to (a) prevent compiler-JIT reordering of non-volatile accesses, and ++// (b) explicit barriers or fence operations. ++// ++// TODO: ++// ++// * Arrange for C2 to pass "Self" into Fast_Lock and Fast_Unlock in one of the registers (scr). ++// This avoids manifesting the Self pointer in the Fast_Lock and Fast_Unlock terminals. ++// Given TLAB allocation, Self is usually manifested in a register, so passing it into ++// the lock operators would typically be faster than reifying Self. ++// ++// * Ideally I'd define the primitives as: ++// fast_lock (nax Obj, nax box, tmp, nax scr) where box, tmp and scr are KILLED. ++// fast_unlock (nax Obj, box, nax tmp) where box and tmp are KILLED ++// Unfortunately ADLC bugs prevent us from expressing the ideal form. ++// Instead, we're stuck with a rather awkward and brittle register assignments below. ++// Furthermore the register assignments are overconstrained, possibly resulting in ++// sub-optimal code near the synchronization site. ++// ++// * Eliminate the sp-proximity tests and just use "== Self" tests instead. ++// Alternately, use a better sp-proximity test. ++// ++// * Currently ObjectMonitor._Owner can hold either an sp value or a (THREAD *) value. ++// Either one is sufficient to uniquely identify a thread. ++// TODO: eliminate use of sp in _owner and use get_thread(tr) instead. ++// ++// * Intrinsify notify() and notifyAll() for the common cases where the ++// object is locked by the calling thread but the waitlist is empty. ++// avoid the expensive JNI call to JVM_Notify() and JVM_NotifyAll(). ++// ++// * use jccb and jmpb instead of jcc and jmp to improve code density. ++// But beware of excessive branch density on AMD Opterons. ++// ++// * Both Fast_Lock and Fast_Unlock set the ICC.ZF to indicate success ++// or failure of the fast-path. If the fast-path fails then we pass ++// control to the slow-path, typically in C. In Fast_Lock and ++// Fast_Unlock we often branch to DONE_LABEL, just to find that C2 ++// will emit a conditional branch immediately after the node. ++// So we have branches to branches and lots of ICC.ZF games. ++// Instead, it might be better to have C2 pass a "FailureLabel" ++// into Fast_Lock and Fast_Unlock. In the case of success, control ++// will drop through the node. ICC.ZF is undefined at exit. ++// In the case of failure, the node will branch directly to the ++// FailureLabel ++ ++ ++// obj: object to lock ++// box: on-stack box address (displaced header location) - KILLED ++// tmp: tmp -- KILLED ++// scr: tmp -- KILLED ++void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg, Register scrReg) { ++ ++ // Ensure the register assignents are disjoint ++ guarantee (objReg != boxReg, "") ; ++ guarantee (objReg != tmpReg, "") ; ++ guarantee (objReg != scrReg, "") ; ++ guarantee (boxReg != tmpReg, "") ; ++ guarantee (boxReg != scrReg, "") ; ++ ++ ++ block_comment("FastLock"); ++ if (PrintBiasedLockingStatistics) { ++ push(tmpReg); ++ atomic_inc32((address)BiasedLocking::total_entry_count_addr(), 1, AT, tmpReg); ++ pop(tmpReg); ++ } ++ ++ if (EmitSync & 1) { ++ move(AT, 0x0); ++ return; ++ } else ++ if (EmitSync & 2) { ++ Label DONE_LABEL ; ++ if (UseBiasedLocking) { ++ // Note: tmpReg maps to the swap_reg argument and scrReg to the tmp_reg argument. ++ biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL); ++ } ++ ++ ldl(tmpReg, Address(objReg, 0)) ; // fetch markword ++ or_ins(tmpReg, tmpReg, 0x1); ++ stl(tmpReg, Address(boxReg, 0)); // Anticipate successful CAS ++ ++ cmpxchg(boxReg, Address(objReg, 0), tmpReg); // Updates tmpReg ++ bne(AT, DONE_LABEL); ++ ++ // Recursive locking ++ subl(tmpReg, tmpReg, SP); ++ ++ if (Assembler::is_simm16(7 - os::vm_page_size())) { ++ ldi(AT, R0, (7 - os::vm_page_size() )); ++ } else { ++ li(AT, (7 - os::vm_page_size() )); ++ } ++ and_reg(tmpReg, tmpReg, AT); ++ stl(tmpReg, Address(boxReg, 0)); ++ BIND(DONE_LABEL) ; ++ } else { ++ // Possible cases that we'll encounter in fast_lock ++ // ------------------------------------------------ ++ // * Inflated ++ // -- unlocked ++ // -- Locked ++ // = by self ++ // = by other ++ // * biased ++ // -- by Self ++ // -- by other ++ // * neutral ++ // * stack-locked ++ // -- by self ++ // = sp-proximity test hits ++ // = sp-proximity test generates false-negative ++ // -- by other ++ // ++ ++ Label IsInflated, DONE_LABEL, PopDone ; ++ ++ // TODO: optimize away redundant LDs of obj->mark and improve the markword triage ++ // order to reduce the number of conditional branches in the most common cases. ++ // Beware -- there's a subtle invariant that fetch of the markword ++ // at [FETCH], below, will never observe a biased encoding (*101b). ++ // If this invariant is not held we risk exclusion (safety) failure. ++ if (UseBiasedLocking && !UseOptoBiasInlining) { ++ biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL); ++ } ++ ++ ldl(tmpReg, Address(objReg, 0)) ; //Fetch the markword of the object. ++ and_imm8(AT, tmpReg, markOopDesc::monitor_value); ++ bne(AT, IsInflated); // inflated vs stack-locked|neutral|bias ++ ++ // Attempt stack-locking ... ++ or_ins(tmpReg, tmpReg, markOopDesc::unlocked_value); ++ stl(tmpReg, Address(boxReg, 0)); // Anticipate successful CAS ++ //if (os::is_MP()) { ++ // memb(); ++ //} ++ ++ cmpxchg(boxReg, Address(objReg, 0), tmpReg); // Updates tmpReg ++ //AT == 1: unlocked ++ ++ if (PrintBiasedLockingStatistics) { ++ Label L; ++ beq(AT, L); ++ push(T0); ++ push(T1); ++ atomic_inc32((address)BiasedLocking::fast_path_entry_count_addr(), 1, T0, T1); ++ pop(T1); ++ pop(T0); ++ BIND(L); ++ } ++ bne(AT, DONE_LABEL); ++ ++ // Recursive locking ++ // The object is stack-locked: markword contains stack pointer to BasicLock. ++ // Locked by current thread if difference with current SP is less than one page. ++ subl(tmpReg, tmpReg, SP); ++ if (Assembler::is_simm16(7 - os::vm_page_size())) { ++ ldi(AT, R0, (7 - os::vm_page_size())); ++ } else { ++ li(AT, 7 - os::vm_page_size() ); ++ } ++ and_reg(tmpReg, tmpReg, AT); ++ stl(tmpReg, Address(boxReg, 0)); ++ if (PrintBiasedLockingStatistics) { ++ Label L; ++ // tmpReg == 0 => BiasedLocking::_fast_path_entry_count++ ++ bne(tmpReg, L); ++ push(T0); ++ push(T1); ++ atomic_inc32((address)BiasedLocking::fast_path_entry_count_addr(), 1, T0, T1); ++ pop(T1); ++ pop(T0); ++ BIND(L); ++ } ++ cmpult(AT, tmpReg, 1); ++ beq(R0, DONE_LABEL) ; ++ ++ BIND(IsInflated) ; ++ // The object's monitor m is unlocked iff m->owner == NULL, ++ // otherwise m->owner may contain a thread or a stack address. ++ ++ // TODO: someday avoid the ST-before-CAS penalty by ++ // relocating (deferring) the following ST. ++ // We should also think about trying a CAS without having ++ // fetched _owner. If the CAS is successful we may ++ // avoid an RTO->RTS upgrade on the $line. ++ // Without cast to int32_t a movptr will destroy r10 which is typically obj ++ if (Assembler::is_simm16((int32_t)intptr_t(markOopDesc::unused_mark()))) { ++ ldi(AT, R0, (int32_t)intptr_t(markOopDesc::unused_mark())); ++ } else { ++ li(AT, (int32_t)intptr_t(markOopDesc::unused_mark())); ++ } ++ stl(AT, Address(boxReg, 0)); ++ ++ move(boxReg, tmpReg) ; ++ ldl(tmpReg, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; ++ // if (m->owner != 0) => AT = 0, goto slow path. ++ move(AT, R0); ++ bne(tmpReg, DONE_LABEL); ++ ++ // It's inflated and appears unlocke ++ //if (os::is_MP()) { ++ // memb(); ++ //} ++ cmpxchg(S2thread, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2), tmpReg) ; ++ // Intentional fall-through into DONE_LABEL ... ++ ++ ++ // DONE_LABEL is a hot target - we'd really like to place it at the ++ // start of cache line by padding with NOPs. ++ // See the AMD and Intel software optimization manuals for the ++ // most efficient "long" NOP encodings. ++ // Unfortunately none of our alignment mechanisms suffice. ++ BIND(DONE_LABEL); ++ ++ // At DONE_LABEL the AT is set as follows ... ++ // Fast_Unlock uses the same protocol. ++ // AT == 1 -> Success ++ // AT == 0 -> Failure - force control through the slow-path ++ ++ // Avoid branch-to-branch on AMD processors ++ // This appears to be superstition. ++ if (EmitSync & 32) nop(); ++ ++ } ++} ++ ++// obj: object to unlock ++// box: box address (displaced header location), killed. ++// tmp: killed tmp; cannot be obj nor box. ++// ++// Some commentary on balanced locking: ++// ++// Fast_Lock and Fast_Unlock are emitted only for provably balanced lock sites. ++// Methods that don't have provably balanced locking are forced to run in the ++// interpreter - such methods won't be compiled to use fast_lock and fast_unlock. ++// The interpreter provides two properties: ++// I1: At return-time the interpreter automatically and quietly unlocks any ++// objects acquired the current activation (frame). Recall that the ++// interpreter maintains an on-stack list of locks currently held by ++// a frame. ++// I2: If a method attempts to unlock an object that is not held by the ++// the frame the interpreter throws IMSX. ++// ++// Lets say A(), which has provably balanced locking, acquires O and then calls B(). ++// B() doesn't have provably balanced locking so it runs in the interpreter. ++// Control returns to A() and A() unlocks O. By I1 and I2, above, we know that O ++// is still locked by A(). ++// ++// The only other source of unbalanced locking would be JNI. The "Java Native Interface: ++// Programmer's Guide and Specification" claims that an object locked by jni_monitorenter ++// should not be unlocked by "normal" java-level locking and vice-versa. The specification ++// doesn't specify what will occur if a program engages in such mixed-mode locking, however. ++ ++void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpReg) { ++ ++ guarantee (objReg != boxReg, "") ; ++ guarantee (objReg != tmpReg, "") ; ++ guarantee (boxReg != tmpReg, "") ; ++ ++ block_comment("FastUnlock"); ++ ++ ++ if (EmitSync & 4) { ++ // Disable - inhibit all inlining. Force control through the slow-path ++ move(AT, 0x0); ++ return; ++ } else ++ if (EmitSync & 8) { ++ Label DONE_LABEL ; ++ if (UseBiasedLocking) { ++ biased_locking_exit(objReg, tmpReg, DONE_LABEL); ++ } ++ // classic stack-locking code ... ++ ldl(tmpReg, Address(boxReg, 0)) ; ++ move(AT, 0x1); // should set 0x1 before branch ++ beq(tmpReg, DONE_LABEL) ; ++ ++ cmpxchg(tmpReg, Address(objReg, 0), boxReg); // Uses EAX which is box ++ BIND(DONE_LABEL); ++ } else { ++ Label DONE_LABEL, Stacked, CheckSucc, Inflated ; ++ ++ // Critically, the biased locking test must have precedence over ++ // and appear before the (box->dhw == 0) recursive stack-lock test. ++ if (UseBiasedLocking && !UseOptoBiasInlining) { ++ biased_locking_exit(objReg, tmpReg, DONE_LABEL); ++ } ++ ++ ldl(GP, Address(boxReg, 0)) ; // Examine the displaced header ++ addl(AT, R0, 0x1); ++ beq(GP, DONE_LABEL) ; // 0 indicates recursive stack-lock ++ ++ ldl(tmpReg, Address(objReg, 0)) ; // Examine the object's markword ++ and_imm8(AT, tmpReg, markOopDesc::monitor_value) ; // Inflated? ++ beq(AT, Stacked) ; // Inflated? ++ ++ BIND(Inflated) ; ++ // It's inflated. ++ // Despite our balanced locking property we still check that m->_owner == Self ++ // as java routines or native JNI code called by this thread might ++ // have released the lock. ++ // Refer to the comments in synchronizer.cpp for how we might encode extra ++ // state in _succ so we can avoid fetching EntryList|cxq. ++ // ++ // I'd like to add more cases in fast_lock() and fast_unlock() -- ++ // such as recursive enter and exit -- but we have to be wary of ++ // I$ bloat, T$ effects and BP$ effects. ++ // ++ // If there's no contention try a 1-0 exit. That is, exit without ++ // a costly MEMBAR or CAS. See synchronizer.cpp for details on how ++ // we detect and recover from the race that the 1-0 exit admits. ++ // ++ // Conceptually Fast_Unlock() must execute a STST|LDST "release" barrier ++ // before it STs null into _owner, releasing the lock. Updates ++ // to data protected by the critical section must be visible before ++ // we drop the lock (and thus before any other thread could acquire ++ // the lock and observe the fields protected by the lock). ++ // It's inflated ++ ldl(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; ++ xor_ins(boxReg, boxReg, S2thread); ++ ++ ldl(AT, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ; ++ or_ins(boxReg, boxReg, AT); ++ ++ move(AT, R0); ++ bne(boxReg, DONE_LABEL); ++ ++ ldl(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ; ++ ldl(AT, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ; ++ or_ins(boxReg, boxReg, AT); ++ ++ move(AT, R0); ++ bne(boxReg, DONE_LABEL); ++ ++ memb(); ++ stl(R0, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; ++ move(AT, 0x1); ++ beq(R0, DONE_LABEL); ++ ++ BIND (Stacked); ++ ldl(tmpReg, Address(boxReg, 0)) ; ++ //if (os::is_MP()) { memb(); } ++ cmpxchg(tmpReg, Address(objReg, 0), boxReg); ++ ++ if (EmitSync & 65536) { ++ BIND (CheckSucc); ++ } ++ ++ BIND(DONE_LABEL); ++ ++ // Avoid branch to branch on AMD processors ++ if (EmitSync & 32768) { nop(); } ++ } ++} ++ ++void MacroAssembler::align(int modulus) { ++ while (offset() % modulus != 0) nop(); ++} ++ ++void MacroAssembler::verify_FPU(int stack_depth, const char* s) { ++ //Unimplemented(); ++} ++ ++Register caller_saved_registers[] = {V0, T0, T1, T2, T3, T4, T5, T6, T7, FP, A0, A1, A2, A3, A4, A5, T8, T9, T10, T11, RA, T12, AT, GP}; ++Register caller_saved_registers_except_RA[] = {V0, T0, T1, T2, T3, T4, T5, T6, T7, FP, A0, A1, A2, A3, A4, A5, T8, T9, T10, T11, T12, AT, GP}; ++ ++// In SW64, F0~23 are all caller-saved registers ++FloatRegister caller_saved_fpu_registers[] = {F0, F16, F17}; ++ ++//We preserve all caller-saved register ++void MacroAssembler::pushad(){ ++ int i; ++ ++ // Fixed-point registers ++ int len = sizeof(caller_saved_registers) / sizeof(caller_saved_registers[0]); ++ add_simm16(SP, SP, -1 * len * wordSize); ++ for (i = 0; i < len; i++) ++ { ++ stl(caller_saved_registers[i], SP, (len - i - 1) * wordSize); ++ } ++ ++ /* Floating-point registers */ ++ len = sizeof(caller_saved_fpu_registers) / sizeof(caller_saved_fpu_registers[0]); ++ add_simm16(SP, SP, -1 * len * wordSize); ++ for (i = 0; i < len; i++) ++ { ++ fstd(caller_saved_fpu_registers[i], SP, (len - i - 1) * wordSize); ++ } ++}; ++ ++void MacroAssembler::popad(){ ++ int i; ++ ++ /* Floating-point registers */ ++ int len = sizeof(caller_saved_fpu_registers) / sizeof(caller_saved_fpu_registers[0]); ++ for (i = 0; i < len; i++) ++ { ++ fldd(caller_saved_fpu_registers[i], SP, (len - i - 1) * wordSize); ++ } ++ add_simm16(SP, SP, len * wordSize); ++ ++ /* Fixed-point registers */ ++ len = sizeof(caller_saved_registers) / sizeof(caller_saved_registers[0]); ++ for (i = 0; i < len; i++) ++ { ++ ldl(caller_saved_registers[i], SP, (len - i - 1) * wordSize); ++ } ++ add_simm16(SP, SP, len * wordSize); ++}; ++ ++// We preserve all caller-saved register except V0 ++void MacroAssembler::pushad_except_RA() { ++ int i; ++ ++ // Fixed-point registers ++ int len = sizeof(caller_saved_registers_except_RA) / sizeof(caller_saved_registers_except_RA[0]); ++ add_simm16(SP, SP, -1 * len * wordSize); ++ for (i = 0; i < len; i++) { ++ stl(caller_saved_registers_except_RA[i], SP, (len - i - 1) * wordSize); ++ } ++ ++ // Floating-point registers ++ len = sizeof(caller_saved_fpu_registers) / sizeof(caller_saved_fpu_registers[0]); ++ add_simm16(SP, SP, -1 * len * wordSize); ++ for (i = 0; i < len; i++) { ++ fstd(caller_saved_fpu_registers[i], SP, (len - i - 1) * wordSize); ++ } ++} ++ ++void MacroAssembler::popad_except_RA() { ++ int i; ++ ++ // Floating-point registers ++ int len = sizeof(caller_saved_fpu_registers) / sizeof(caller_saved_fpu_registers[0]); ++ for (i = 0; i < len; i++) { ++ fldd(caller_saved_fpu_registers[i], SP, (len - i - 1) * wordSize); ++ } ++ add_simm16(SP, SP, len * wordSize); ++ ++ // Fixed-point registers ++ len = sizeof(caller_saved_registers_except_RA) / sizeof(caller_saved_registers_except_RA[0]); ++ for (i = 0; i < len; i++) { ++ ldl(caller_saved_registers_except_RA[i], SP, (len - i - 1) * wordSize); ++ } ++ add_simm16(SP, SP, len * wordSize); ++} ++ ++void MacroAssembler::push2(Register reg1, Register reg2) { ++ subl(SP, SP, 16); ++ stl(reg2, SP, 0); ++ stl(reg1, SP, 8); ++} ++ ++void MacroAssembler::pop(Register reg) { ++ if (UseSW8A){ ++ ldl_a(reg, 8, SP); ++ }else { ++ ldl(reg, SP, 0); addl(SP, SP, 8); ++ } ++} ++ ++void MacroAssembler::pop(FloatRegister reg) { ++ if (UseSW8A){ ++ fldd_a(reg, 8, SP); ++ }else { ++ fldd(reg, SP, 0); addl(SP, SP, 8); ++ } ++} ++ ++void MacroAssembler::pop2(Register reg1, Register reg2) { ++ if (UseSW8A){ ++ ldl_a(reg1, 8, SP); ++ ldl_a(reg2, 8, SP); ++ } else { ++ ldl(reg1, SP, 0); ++ ldl(reg2, SP, 8); ++ addl(SP, SP, 16); ++ } ++} ++ ++//for UseCompressedOops Option ++void MacroAssembler::load_klass(Register dst, Register src) { ++ if(UseCompressedClassPointers){ ++ ldw_unsigned(dst, Address(src, oopDesc::klass_offset_in_bytes())); ++ decode_klass_not_null(dst); ++ } else ++ ldl(dst, src, oopDesc::klass_offset_in_bytes()); ++} ++ ++void MacroAssembler::store_klass(Register dst, Register src) { ++ if(UseCompressedClassPointers){ ++ encode_klass_not_null(src); ++ stw(src, dst, oopDesc::klass_offset_in_bytes()); ++ } else { ++ stl(src, dst, oopDesc::klass_offset_in_bytes()); ++ } ++} ++ ++void MacroAssembler::load_prototype_header(Register dst, Register src) { ++ load_klass(dst, src); ++ ldl(dst, Address(dst, Klass::prototype_header_offset())); ++} ++ ++void MacroAssembler::store_klass_gap(Register dst, Register src) { ++ if (UseCompressedClassPointers) { ++ stw(src, dst, oopDesc::klass_gap_offset_in_bytes()); ++ } ++} ++ ++void MacroAssembler::load_heap_oop(Register dst, Address src) { ++ if(UseCompressedOops){ ++ ldw_unsigned(dst, src); ++ decode_heap_oop(dst); ++ } else { ++ ldl(dst, src); ++ } ++} ++ ++void MacroAssembler::store_heap_oop(Address dst, Register src){ ++ if(UseCompressedOops){ ++ assert(!dst.uses(src), "not enough registers"); ++ encode_heap_oop(src); ++ stw(src, dst); ++ } else { ++ stl(src, dst); ++ } ++} ++ ++void MacroAssembler::store_heap_oop_null(Address dst){ ++ if(UseCompressedOops){ ++ stw(R0, dst); ++ } else { ++ stl(R0, dst); ++ } ++} ++ ++#ifdef ASSERT ++void MacroAssembler::verify_heapbase(const char* msg) { ++ assert (UseCompressedOops || UseCompressedClassPointers, "should be compressed"); ++ assert (Universe::heap() != NULL, "java heap should be initialized"); ++} ++#endif ++ ++ ++// Algorithm must match oop.inline.hpp encode_heap_oop. ++void MacroAssembler::encode_heap_oop(Register r) { ++#ifdef ASSERT ++ verify_heapbase("MacroAssembler::encode_heap_oop:heap base corrupted?"); ++#endif ++ verify_oop(r, "broken oop in encode_heap_oop"); ++ if (Universe::narrow_oop_base() == NULL) { ++ if (Universe::narrow_oop_shift() != 0) { ++ assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); ++ shr(r, LogMinObjAlignmentInBytes); ++ } ++ return; ++ } ++ ++ seleq(r, S5_heapbase, r, r); ++ subl(r, r, S5_heapbase); ++ if (Universe::narrow_oop_shift() != 0) { ++ assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); ++ shr(r, LogMinObjAlignmentInBytes); ++ } ++} ++ ++void MacroAssembler::encode_heap_oop(Register dst, Register src) { ++#ifdef ASSERT ++ verify_heapbase("MacroAssembler::encode_heap_oop:heap base corrupted?"); ++#endif ++ verify_oop(src, "broken oop in encode_heap_oop"); ++ if (Universe::narrow_oop_base() == NULL) { ++ if (Universe::narrow_oop_shift() != 0) { ++ assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); ++ srll(dst, src, LogMinObjAlignmentInBytes); ++ } else { ++ if (dst != src) move(dst, src); ++ } ++ } else { ++ if (dst == src) { ++ seleq(dst, S5_heapbase, dst, dst); ++ subl(dst, dst, S5_heapbase); ++ if (Universe::narrow_oop_shift() != 0) { ++ assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); ++ shr(dst, LogMinObjAlignmentInBytes); ++ } ++ } else { ++ subl(dst, src, S5_heapbase); ++ if (Universe::narrow_oop_shift() != 0) { ++ assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); ++ shr(dst, LogMinObjAlignmentInBytes); ++ } ++ seleq(src, R0, dst, dst); ++ } ++ } ++} ++ ++void MacroAssembler::encode_heap_oop_not_null(Register r) { ++ assert (UseCompressedOops, "should be compressed"); ++#ifdef ASSERT ++ if (CheckCompressedOops) { ++ Label ok; ++ bne(r, ok); ++ stop("null oop passed to encode_heap_oop_not_null"); ++ BIND(ok); ++ } ++#endif ++ verify_oop(r, "broken oop in encode_heap_oop_not_null"); ++ if (Universe::narrow_oop_base() != NULL) { ++ subl(r, r, S5_heapbase); ++ } ++ if (Universe::narrow_oop_shift() != 0) { ++ assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); ++ shr(r, LogMinObjAlignmentInBytes); ++ } ++ ++} ++ ++void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) { ++ assert (UseCompressedOops, "should be compressed"); ++#ifdef ASSERT ++ if (CheckCompressedOops) { ++ Label ok; ++ bne(src, ok); ++ stop("null oop passed to encode_heap_oop_not_null2"); ++ BIND(ok); ++ } ++#endif ++ verify_oop(src, "broken oop in encode_heap_oop_not_null2"); ++ ++ if (Universe::narrow_oop_base() != NULL) { ++ subl(dst, src, S5_heapbase); ++ if (Universe::narrow_oop_shift() != 0) { ++ assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); ++ shr(dst, LogMinObjAlignmentInBytes); ++ } ++ } else { ++ if (Universe::narrow_oop_shift() != 0) { ++ assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); ++ srll(dst, src, LogMinObjAlignmentInBytes); ++ } else { ++ if (dst != src) move(dst, src); ++ } ++ } ++} ++ ++void MacroAssembler::decode_heap_oop(Register r) { ++#ifdef ASSERT ++ verify_heapbase("MacroAssembler::decode_heap_oop corrupted?"); ++#endif ++ if (Universe::narrow_oop_base() == NULL) { ++ if (Universe::narrow_oop_shift() != 0) { ++ assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); ++ shl(r, LogMinObjAlignmentInBytes); ++ } ++ } else { ++ move(AT, r); ++ if (Universe::narrow_oop_shift() != 0) { ++ assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); ++ shl(r, LogMinObjAlignmentInBytes); ++ } ++ addl(r, r, S5_heapbase); ++ seleq(AT, R0, r, r); ++ } ++ verify_oop(r, "broken oop in decode_heap_oop"); ++} ++ ++void MacroAssembler::decode_heap_oop(Register dst, Register src) { ++#ifdef ASSERT ++ verify_heapbase("MacroAssembler::decode_heap_oop corrupted?"); ++#endif ++ if (Universe::narrow_oop_base() == NULL) { ++ if (Universe::narrow_oop_shift() != 0) { ++ assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); ++ if (dst != src) nop(); // DON'T DELETE THIS GUY. ++ slll(dst, src, LogMinObjAlignmentInBytes); ++ } else { ++ if (dst != src) move(dst, src); ++ } ++ } else { ++ if (dst == src) { ++ move(AT, dst); ++ if (Universe::narrow_oop_shift() != 0) { ++ assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); ++ shl(dst, LogMinObjAlignmentInBytes); ++ } ++ addl(dst, dst, S5_heapbase); ++ seleq(AT, R0, dst, dst); ++ } else { ++ if (Universe::narrow_oop_shift() != 0) { ++ assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); ++ slll(dst, src, LogMinObjAlignmentInBytes); ++ addl(dst, dst, S5_heapbase); ++ } else { ++ addl(dst, src, S5_heapbase); ++ } ++ seleq(src, R0, dst, dst); ++ } ++ } ++ verify_oop(dst, "broken oop in decode_heap_oop"); ++} ++ ++void MacroAssembler::decode_heap_oop_not_null(Register r) { ++ // Note: it will change flags ++ assert (UseCompressedOops, "should only be used for compressed headers"); ++ assert (Universe::heap() != NULL, "java heap should be initialized"); ++ // Cannot assert, unverified entry point counts instructions (see .ad file) ++ // vtableStubs also counts instructions in pd_code_size_limit. ++ // Also do not verify_oop as this is called by verify_oop. ++ if (Universe::narrow_oop_shift() != 0) { ++ assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); ++ shl(r, LogMinObjAlignmentInBytes); ++ if (Universe::narrow_oop_base() != NULL) { ++ addl(r, r, S5_heapbase); ++ } ++ } else { ++ assert (Universe::narrow_oop_base() == NULL, "sanity"); ++ } ++} ++ ++void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { ++ assert (UseCompressedOops, "should only be used for compressed headers"); ++ assert (Universe::heap() != NULL, "java heap should be initialized"); ++ ++ // Cannot assert, unverified entry point counts instructions (see .ad file) ++ // vtableStubs also counts instructions in pd_code_size_limit. ++ // Also do not verify_oop as this is called by verify_oop. ++ //lea(dst, Address(S5_heapbase, src, Address::times_8, 0)); ++ if (Universe::narrow_oop_shift() != 0) { ++ assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); ++ if (LogMinObjAlignmentInBytes == Address::times_8) { ++ slll(dst, src, LogMinObjAlignmentInBytes); ++ addl(dst, dst, S5_heapbase); ++ } else { ++ slll(dst, src, LogMinObjAlignmentInBytes); ++ if (Universe::narrow_oop_base() != NULL) { ++ addl(dst, dst, S5_heapbase); ++ } ++ } ++ } else { ++ assert (Universe::narrow_oop_base() == NULL, "sanity"); ++ if (dst != src) { ++ move(dst, src); ++ } ++ } ++} ++ ++// Compare char[] arrays aligned to 4 bytes. ++void MacroAssembler::char_arrays_equals(Register ary1, Register ary2, ++ Register limit, Register result, ++ Register chr1, Register chr2, Label& Ldone) { ++ ++ assert_different_registers(GP, ary1, ary2, limit, result); ++ ++ Label Lvector, Lloop, Ldone_before1, Ldone_before2, Lexit; ++ assert(chr1 == result, "should be the same"); ++ ++ // Note: limit contains number of bytes (2*char_elements) != 0. ++ and_imm8(chr1, limit, 0x2); // trailing character ? ++ beq(chr1, Lvector); ++ ++ // compare the trailing char ++ subl(limit, limit, sizeof(jchar)); ++ addl(chr1, ary1, limit); ++ ldhu(chr1, chr1, 0); ++ addl(chr2, ary2, limit); ++ ldhu(chr2, chr2, 0); ++ cmpeq(GP, chr1, chr2); ++ beq(GP, Ldone_before1); ++ ++ // only one char ? ++ beq(limit, Ldone_before2); ++ ++ // word by word compare, dont't need alignment check ++ BIND(Lvector); ++ // Shift ary1 and ary2 to the end of the arrays, negate limit ++ addl(ary1, ary1, limit); ++ addl(ary2, ary2, limit); ++ subl(limit, R0, limit); ++ ++ BIND(Lloop);// YJ20111018 ++ addl(chr1, ary1, limit); ++ ldw(chr1, chr1, 0); ++ zapnot(chr1, chr1, 0xf); ++ addl(chr2, ary2, limit); ++ ldw(chr2, chr2, 0); ++ zapnot(chr2, chr2, 0xf); ++ cmpeq(GP, chr1, chr2); ++ beq(GP, Ldone_before1); ++ addl(limit, limit, 2*sizeof(jchar)); ++ // annul LDUW if branch is not taken to prevent access past end of array ++ bne(limit, Lloop); ++ ++ beq(R0, Lexit); ++ ++ BIND(Ldone_before1); ++ or_ins(result, R0, 0); // not equal ++ beq(R0, Ldone); ++ ++ BIND(Ldone_before2); ++ or_ins(result, R0, 1); // zero-length arrays are equal ++ beq(R0, Ldone); ++ ++ BIND(Lexit); ++} ++ ++void MacroAssembler::encode_klass_not_null(Register r) { ++ if (Universe::narrow_klass_base() != NULL) { ++ assert(r != AT, "Encoding a klass in AT"); ++ set64(AT, (int64_t)Universe::narrow_klass_base()); ++ subl(r, r, AT); ++ } ++ if (Universe::narrow_klass_shift() != 0) { ++ assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); ++ shr(r, LogKlassAlignmentInBytes); ++ } ++} ++ ++void MacroAssembler::encode_klass_not_null(Register dst, Register src) { ++ if (dst == src) { ++ encode_klass_not_null(src); ++ } else { ++ if (Universe::narrow_klass_base() != NULL) { ++ set64(dst, (int64_t)Universe::narrow_klass_base()); ++ subl(dst, src, dst); ++ if (Universe::narrow_klass_shift() != 0) { ++ assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); ++ shr(dst, LogKlassAlignmentInBytes); ++ } ++ } else { ++ if (Universe::narrow_klass_shift() != 0) { ++ assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); ++ srll(dst, src, LogKlassAlignmentInBytes); ++ } else { ++ move(dst, src); ++ } ++ } ++ } ++} ++ ++// Function instr_size_for_decode_klass_not_null() counts the instructions ++// generated by decode_klass_not_null(register r) and reinit_heapbase(), ++// when (Universe::heap() != NULL). Hence, if the instructions they ++// generate change, then this method needs to be updated. ++int MacroAssembler::instr_size_for_decode_klass_not_null() { ++ assert (UseCompressedClassPointers, "only for compressed klass ptrs"); ++ if (Universe::narrow_klass_base() != NULL) { ++ // mov64 + addq + shlq? + mov64 (for reinit_heapbase()). ++ return (Universe::narrow_klass_shift() == 0 ? 4 * 9 : 4 * 10); ++ } else { ++ // longest load decode klass function, mov64, leaq ++ return (Universe::narrow_klass_shift() == 0 ? 4 * 0 : 4 * 1); ++ } ++} ++ ++void MacroAssembler::decode_klass_not_null(Register r) { ++ assert (UseCompressedClassPointers, "should only be used for compressed headers"); ++ assert(r != AT, "Decoding a klass in AT"); ++ // Cannot assert, unverified entry point counts instructions (see .ad file) ++ // vtableStubs also counts instructions in pd_code_size_limit. ++ // Also do not verify_oop as this is called by verify_oop. ++ if (Universe::narrow_klass_shift() != 0) { ++ assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); ++ shl(r, LogKlassAlignmentInBytes); ++ } ++ if (Universe::narrow_klass_base() != NULL) { ++ set64(AT, (int64_t)Universe::narrow_klass_base()); ++ addl(r, r, AT); ++ //Not neccessary for SW64 at all. ++ //reinit_heapbase(); ++ } ++} ++ ++void MacroAssembler::decode_klass_not_null(Register dst, Register src) { ++ assert (UseCompressedClassPointers, "should only be used for compressed headers"); ++ ++ if (dst == src) { ++ decode_klass_not_null(dst); ++ } else { ++ // Cannot assert, unverified entry point counts instructions (see .ad file) ++ // vtableStubs also counts instructions in pd_code_size_limit. ++ // Also do not verify_oop as this is called by verify_oop. ++ set64(dst, (int64_t)Universe::narrow_klass_base()); ++ if (Universe::narrow_klass_shift() != 0) { ++ assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); ++ assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?"); ++ slll(AT, src, Address::times_8); ++ addl(dst, dst, AT); ++ } else { ++ addl(dst, src, dst); ++ } ++ } ++} ++ ++void MacroAssembler::incrementl(Register reg, int value) { ++ if (value == min_jint) { ++ move(AT, value); ++ addw(reg, reg, AT); ++ return; ++ } ++ if (value < 0) { decrementl(reg, -value); return; } ++ if (value == 0) { ; return; } ++ ++ if(Assembler::is_simm16(value)) { ++ move(AT, value); addw(reg, reg, AT); ++ } else { ++ move(AT, value); ++ addw(reg, reg, AT); ++ } ++} ++ ++void MacroAssembler::decrementl(Register reg, int value) { ++ if (value == min_jint) { ++ move(AT, value); ++ subw(reg, reg, AT); ++ return; ++ } ++ if (value < 0) { incrementl(reg, -value); return; } ++ if (value == 0) { ; return; } ++ ++ if (Assembler::is_simm16(value)) { ++ move(AT, value); subw(reg, reg, AT); ++ } else { ++ move(AT, value); ++ subw(reg, reg, AT); ++ } ++} ++ ++void MacroAssembler::reinit_heapbase() { ++ if (UseCompressedOops || UseCompressedClassPointers) { ++ if (Universe::heap() != NULL) { ++ if (Universe::narrow_oop_base() == NULL) { ++ move(S5_heapbase, R0); ++ } else { ++ set64(S5_heapbase, (int64_t)Universe::narrow_ptrs_base()); ++ } ++ } else { ++ set64(S5_heapbase, (intptr_t)Universe::narrow_ptrs_base_addr()); ++ ldl(S5_heapbase, S5_heapbase, 0); ++ } ++ } ++} ++ ++void MacroAssembler::check_klass_subtype(Register sub_klass, ++ Register super_klass, ++ Register temp_reg, ++ Label& L_success) { ++//implement ind gen_subtype_check ++ Label L_failure; ++ check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, NULL); ++ check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, NULL); ++ BIND(L_failure); ++} ++ ++SkipIfEqual::SkipIfEqual( ++ MacroAssembler* masm, const bool* flag_addr, bool value) { ++ _masm = masm; ++ _masm->li(AT, (address)flag_addr); ++ _masm->ldbu(AT, AT, 0); ++ _masm->add_simm16(AT,AT,-value); ++ _masm->beq(AT,_label); ++} ++void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, ++ Register super_klass, ++ Register temp_reg, ++ Label* L_success, ++ Label* L_failure, ++ Label* L_slow_path, ++ RegisterOrConstant super_check_offset) { ++ assert_different_registers(sub_klass, super_klass, temp_reg); ++ bool must_load_sco = (super_check_offset.constant_or_zero() == -1); ++ if (super_check_offset.is_register()) { ++ assert_different_registers(sub_klass, super_klass, ++ super_check_offset.as_register()); ++ } else if (must_load_sco) { ++ assert(temp_reg != noreg, "supply either a temp or a register offset"); ++ } ++ ++ Label L_fallthrough; ++ int label_nulls = 0; ++ if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } ++ if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } ++ if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; } ++ assert(label_nulls <= 1, "at most one NULL in the batch"); ++ ++ int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); ++ int sco_offset = in_bytes(Klass::super_check_offset_offset()); ++ // If the pointers are equal, we are done (e.g., String[] elements). ++ // This self-check enables sharing of secondary supertype arrays among ++ // non-primary types such as array-of-interface. Otherwise, each such ++ // type would need its own customized SSA. ++ // We move this check to the front of the fast path because many ++ // type checks are in fact trivially successful in this manner, ++ // so we get a nicely predicted branch right at the start of the check. ++ beq(sub_klass, super_klass, *L_success); ++ // Check the supertype display: ++ if (must_load_sco) { ++ // Positive movl does right thing on LP64. ++ ldw_unsigned(temp_reg, super_klass, sco_offset); ++ super_check_offset = RegisterOrConstant(temp_reg); ++ } ++ slll(AT, super_check_offset.register_or_noreg(), Address::times_1); ++ addl(AT, sub_klass, AT); ++ ldl(AT, AT, super_check_offset.constant_or_zero()*Address::times_1); ++ ++ // This check has worked decisively for primary supers. ++ // Secondary supers are sought in the super_cache ('super_cache_addr'). ++ // (Secondary supers are interfaces and very deeply nested subtypes.) ++ // This works in the same check above because of a tricky aliasing ++ // between the super_cache and the primary super display elements. ++ // (The 'super_check_addr' can address either, as the case requires.) ++ // Note that the cache is updated below if it does not help us find ++ // what we need immediately. ++ // So if it was a primary super, we can just fail immediately. ++ // Otherwise, it's the slow path for us (no success at this point). ++ ++ if (super_check_offset.is_register()) { ++ beq(super_klass, AT, *L_success); ++ add_simm16(AT, super_check_offset.as_register(), -sc_offset); ++ if (L_failure == &L_fallthrough) { ++ beq(AT, *L_slow_path); ++ } else { ++ bne(AT, *L_failure); ++ beq(R0, *L_slow_path); ++ } ++ } else if (super_check_offset.as_constant() == sc_offset) { ++ // Need a slow path; fast failure is impossible. ++ if (L_slow_path == &L_fallthrough) { ++ beq(super_klass, AT, *L_success); ++ } else { ++ bne(super_klass, AT, *L_slow_path); ++ beq(R0, *L_success); ++ } ++ } else { ++ // No slow path; it's a fast decision. ++ if (L_failure == &L_fallthrough) { ++ beq(super_klass, AT, *L_success); ++ } else { ++ bne(super_klass, AT, *L_failure); ++ beq(R0, *L_success); ++ } ++ } ++ ++ BIND(L_fallthrough); ++ ++} ++ ++ ++void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, ++ Register super_klass, ++ Register temp_reg, ++ Register temp2_reg, ++ Label* L_success, ++ Label* L_failure, ++ bool set_cond_codes) { ++ assert_different_registers(sub_klass, super_klass, temp_reg); ++ if (temp2_reg != noreg) ++ assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg); ++ else ++ temp2_reg = T12; ++#define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg) ++ ++ Label L_fallthrough; ++ int label_nulls = 0; ++ if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } ++ if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } ++ assert(label_nulls <= 1, "at most one NULL in the batch"); ++ ++ // a couple of useful fields in sub_klass: ++ int ss_offset = in_bytes(Klass::secondary_supers_offset()); ++ int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); ++ Address secondary_supers_addr(sub_klass, ss_offset); ++ Address super_cache_addr( sub_klass, sc_offset); ++ ++ // Do a linear scan of the secondary super-klass chain. ++ // This code is rarely used, so simplicity is a virtue here. ++ // The repne_scan instruction uses fixed registers, which we must spill. ++ // Don't worry too much about pre-existing connections with the input regs. ++ ++#ifndef PRODUCT ++ int* pst_counter = &SharedRuntime::_partial_subtype_ctr; ++ ExternalAddress pst_counter_addr((address) pst_counter); ++ NOT_LP64( incrementl(pst_counter_addr) ); ++#endif //PRODUCT ++ ++ // We will consult the secondary-super array. ++ ldl(temp_reg, secondary_supers_addr); ++ // Load the array length. (Positive movl does right thing on LP64.) ++ ldw_signed(temp2_reg, Address(temp_reg, Array::length_offset_in_bytes())); ++ // Skip to start of data. ++ add_simm16(temp_reg, temp_reg, Array::base_offset_in_bytes()); ++ ++ // OpenJDK8 never compresses klass pointers in secondary-super array. ++ Label Loop, subtype; ++ BIND(Loop); ++ beq(temp2_reg, *L_failure); ++ ldl(AT, temp_reg, 0); ++ beq(AT, super_klass, subtype); ++ add_simm16(temp_reg, temp_reg, 1 * wordSize); ++ subl(temp2_reg, temp2_reg, 1); ++ beq(R0, Loop); ++ ++ BIND(subtype); ++ stl(super_klass, super_cache_addr); ++ if (L_success != &L_fallthrough) { ++ beq(R0, *L_success); ++ } ++ ++ // Success. Cache the super we found and proceed in triumph. ++#undef IS_A_TEMP ++ ++ BIND(L_fallthrough); ++} ++ ++void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) { ++ ldl(oop_result, Address(java_thread, JavaThread::vm_result_offset())); ++ stl(R0, Address(java_thread, JavaThread::vm_result_offset())); ++ verify_oop(oop_result, "broken oop in call_VM_base"); ++} ++ ++void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) { ++ ldl(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset())); ++ stl(R0, Address(java_thread, JavaThread::vm_result_2_offset())); ++} ++ ++Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, ++ int extra_slot_offset) { ++ // cf. TemplateTable::prepare_invoke(), if (load_receiver). ++ int stackElementSize = Interpreter::stackElementSize; ++ int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0); ++#ifdef ASSERT ++ int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1); ++ assert(offset1 - offset == stackElementSize, "correct arithmetic"); ++#endif ++ Register scale_reg = NOREG; ++ Address::ScaleFactor scale_factor = Address::no_scale; ++ if (arg_slot.is_constant()) { ++ offset += arg_slot.as_constant() * stackElementSize; ++ } else { ++ scale_reg = arg_slot.as_register(); ++ scale_factor = Address::times_8; ++ } ++ // We don't push RA on stack in prepare_invoke. ++ // offset += wordSize; // return PC is on stack ++ if(scale_reg==NOREG) return Address(SP, offset); ++ else { ++ slll(scale_reg, scale_reg, scale_factor); ++ addl(scale_reg, SP, scale_reg); ++ return Address(scale_reg, offset); ++ } ++} ++ ++SkipIfEqual::~SkipIfEqual() { ++ _masm->bind(_label); ++} ++ ++void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) { ++ switch (size_in_bytes) { ++ case 8: ldl(dst, src); break; ++ case 4: ldw_signed(dst, src); break; ++ case 2: is_signed ? ldh_signed(dst, src) : ldh_unsigned(dst, src); break; ++ case 1: is_signed ? ldb_signed( dst, src) : ldbu( dst, src); break; ++ default: ShouldNotReachHere(); ++ } ++} ++ ++void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) { ++ switch (size_in_bytes) { ++ case 8: stl(src, dst); break; ++ case 4: stw(src, dst); break; ++ case 2: sth(src, dst); break; ++ case 1: stb(src, dst); break; ++ default: ShouldNotReachHere(); ++ } ++} ++ ++// Look up the method for a megamorphic invokeinterface call. ++// The target method is determined by . ++// The receiver klass is in recv_klass. ++// On success, the result will be in method_result, and execution falls through. ++// On failure, execution transfers to the given label. ++void MacroAssembler::lookup_interface_method(Register recv_klass, ++ Register intf_klass, ++ RegisterOrConstant itable_index, ++ Register method_result, ++ Register scan_temp, ++ Label& L_no_such_interface, ++ bool return_method) { ++ assert_different_registers(recv_klass, intf_klass, scan_temp, AT); ++ assert_different_registers(method_result, intf_klass, scan_temp, AT); ++ assert(recv_klass != method_result || !return_method, ++ "recv_klass can be destroyed when method isn't needed"); ++ ++ assert(itable_index.is_constant() || itable_index.as_register() == method_result, ++ "caller must use same register for non-constant itable index as for method"); ++ ++ // Compute start of first itableOffsetEntry (which is at the end of the vtable) ++ int vtable_base = InstanceKlass::vtable_start_offset() * wordSize; ++ int itentry_off = itableMethodEntry::method_offset_in_bytes(); ++ int scan_step = itableOffsetEntry::size() * wordSize; ++ int vte_size = vtableEntry::size() * wordSize; ++ Address::ScaleFactor times_vte_scale = Address::times_ptr; ++ assert(vte_size == wordSize, "else adjust times_vte_scale"); ++ ++ ldw_signed(scan_temp, Address(recv_klass, InstanceKlass::vtable_length_offset() * wordSize)); ++ ++ // %%% Could store the aligned, prescaled offset in the klassoop. ++ slll(scan_temp, scan_temp, times_vte_scale); ++ addl(scan_temp, recv_klass, scan_temp); ++ add_simm16(scan_temp, scan_temp, vtable_base); ++ if (HeapWordsPerLong > 1) { ++ // Round up to align_object_offset boundary ++ // see code for InstanceKlass::start_of_itable! ++ round_to(scan_temp, BytesPerLong); ++ } ++ ++ if (return_method) { ++ // Adjust recv_klass by scaled itable_index, so we can free itable_index. ++ assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); ++ if (itable_index.is_constant()) { ++ set64(AT, (int)itable_index.is_constant()); ++ slll(AT, AT, (int)Address::times_ptr); ++ } else { ++ slll(AT, itable_index.as_register(), (int)Address::times_ptr); ++ } ++ addl(AT, AT, recv_klass); ++ add_simm16(recv_klass, AT, itentry_off); ++ } ++ ++ Label search, found_method; ++ ++ for (int peel = 1; peel >= 0; peel--) { ++ ldl(method_result, Address(scan_temp, itableOffsetEntry::interface_offset_in_bytes())); ++ ++ if (peel) { ++ beq(intf_klass, method_result, found_method); ++ } else { ++ bne(intf_klass, method_result, search); ++ // (invert the test to fall through to found_method...) ++ } ++ ++ if (!peel) break; ++ ++ BIND(search); ++ ++ // Check that the previous entry is non-null. A null entry means that ++ // the receiver class doesn't implement the interface, and wasn't the ++ // same as when the caller was compiled. ++ beq(method_result, L_no_such_interface); ++ add_simm16(scan_temp, scan_temp, scan_step); ++ } ++ ++ BIND(found_method); ++ ++ if (return_method) { ++ // Got a hit. ++ ldw_signed(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset_in_bytes())); ++ addl(AT, recv_klass, scan_temp); ++ ldl(method_result, AT); ++ } ++} ++ ++// virtual method calling ++void MacroAssembler::lookup_virtual_method(Register recv_klass, ++ RegisterOrConstant vtable_index, ++ Register method_result) { ++ Register tmp = GP; ++ push(tmp); ++ ++ if (vtable_index.is_constant()) { ++ assert_different_registers(recv_klass, method_result, tmp); ++ } else { ++ assert_different_registers(recv_klass, method_result, vtable_index.as_register(), tmp); ++ } ++ const int base = InstanceKlass::vtable_start_offset() * wordSize; ++ assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below"); ++ if (vtable_index.is_constant()) { ++ set64(AT, vtable_index.as_constant()); ++ slll(AT, AT, (int)Address::times_ptr); ++ } else { ++ slll(AT, vtable_index.as_register(), (int)Address::times_ptr); ++ } ++ set64(tmp, base + vtableEntry::method_offset_in_bytes()); ++ addl(tmp, tmp, AT); ++ addl(tmp, tmp, recv_klass); ++ ldl(method_result, tmp, 0); ++ ++ pop(tmp); ++} ++ ++void MacroAssembler::store_for_type_by_register(Register src_reg, Register tmp_reg, int disp, BasicType type, bool wide) { ++ switch (type) { ++ case T_LONG: ++ st_ptr(src_reg, tmp_reg, disp); ++ break; ++ case T_ARRAY: ++ case T_OBJECT: ++ if (UseCompressedOops && !wide) { ++ stw(src_reg, tmp_reg, disp); ++ } else { ++ st_ptr(src_reg, tmp_reg, disp); ++ } ++ break; ++ case T_ADDRESS: ++ st_ptr(src_reg, tmp_reg, disp); ++ break; ++ case T_INT: ++ stw(src_reg, tmp_reg, disp); ++ break; ++ case T_CHAR: ++ case T_SHORT: ++ sth(src_reg, tmp_reg, disp); ++ break; ++ case T_BYTE: ++ case T_BOOLEAN: ++ stb(src_reg, tmp_reg, disp); ++ break; ++ default: ++ ShouldNotReachHere(); ++ } ++} ++ ++//C1 ++void MacroAssembler::store_for_type(Register src_reg, Address addr, BasicType type, bool wide) { ++ Register tmp_reg = T9; ++ ++ int disp = addr.disp(); ++ bool disp_is_simm16 = true; ++ if (!Assembler::is_simm16(disp)) { ++ disp_is_simm16 = false; ++ } ++ ++ Register base_reg = addr.base(); ++ if (!disp_is_simm16) { ++ assert_different_registers(tmp_reg, base_reg); ++ move(tmp_reg, disp); ++ addl(tmp_reg, base_reg, tmp_reg); ++ } ++ store_for_type_by_register(src_reg, disp_is_simm16 ? base_reg : tmp_reg, disp_is_simm16 ? disp : 0, type, wide); ++} ++ ++void MacroAssembler::store_for_type_by_register(FloatRegister src_reg, Register tmp_reg, int disp, BasicType type) { ++ switch (type) { ++ case T_DOUBLE: ++ fstd(src_reg, tmp_reg, disp); ++ break; ++ case T_FLOAT: ++ fsts(src_reg, tmp_reg, disp); ++ break; ++ default: ++ ShouldNotReachHere(); ++ } ++} ++ ++//C1 ++void MacroAssembler::store_for_type(FloatRegister src_reg, Address addr, BasicType type) { ++ Register tmp_reg = T9; ++ ++ int disp = addr.disp(); ++ bool disp_is_simm16 = true; ++ if (!Assembler::is_simm16(disp)) { ++ disp_is_simm16 = false; ++ } ++ ++ Register base_reg = addr.base(); ++ if (!disp_is_simm16) { ++ assert_different_registers(tmp_reg, base_reg); ++ move(tmp_reg, disp); ++ addl(tmp_reg, base_reg, tmp_reg); ++ } ++ store_for_type_by_register(src_reg, disp_is_simm16 ? base_reg : tmp_reg, disp_is_simm16 ? disp : 0, type); ++} ++ ++void MacroAssembler::load_for_type_by_register(Register dst_reg, Register tmp_reg, int disp, BasicType type, bool wide) { ++ switch (type) { ++ case T_LONG: ++ ld_ptr(dst_reg, tmp_reg, disp); ++ break; ++ case T_ARRAY: ++ case T_OBJECT: ++ if (UseCompressedOops && !wide) { ++ ldw_unsigned(dst_reg, tmp_reg, disp); ++ } else { ++ ld_ptr(dst_reg, tmp_reg, disp); ++ } ++ break; ++ case T_ADDRESS: ++ if (UseCompressedClassPointers && disp == oopDesc::klass_offset_in_bytes()) { ++ ldw_unsigned(dst_reg, tmp_reg, disp); ++ } else { ++ ld_ptr(dst_reg, tmp_reg, disp); ++ } ++ break; ++ case T_INT: ++ ldw(dst_reg, tmp_reg, disp); ++ break; ++ case T_CHAR: ++ ldhu(dst_reg, tmp_reg, disp); ++ break; ++ case T_SHORT: ++ ldh_signed(dst_reg, tmp_reg, disp); ++ break; ++ case T_BYTE: ++ case T_BOOLEAN: ++ ldb_signed(dst_reg, tmp_reg, disp); ++ break; ++ default: ++ ShouldNotReachHere(); ++ } ++} ++ ++//C1 ++int MacroAssembler::load_for_type(Register dst_reg, Address addr, BasicType type, bool wide) { ++ int code_offset = 0; ++ Register tmp_reg = T9; ++ ++ int disp = addr.disp(); ++ bool disp_is_simm16 = true; ++ if (!Assembler::is_simm16(disp)) { ++ disp_is_simm16 = false; ++ } ++ ++ Register base_reg = addr.base(); ++ if (!disp_is_simm16) { ++ assert_different_registers(tmp_reg, base_reg); ++ move(tmp_reg, disp); ++ addl(tmp_reg, base_reg, tmp_reg); ++ } ++ code_offset = offset(); ++ load_for_type_by_register(dst_reg, disp_is_simm16 ? base_reg : tmp_reg, disp_is_simm16 ? disp : 0, type, wide); ++ ++ return code_offset; ++} ++ ++void MacroAssembler::load_for_type_by_register(FloatRegister dst_reg, Register tmp_reg, int disp, BasicType type) { ++ switch (type) { ++ case T_DOUBLE: ++ fldd(dst_reg, tmp_reg, disp); ++ break; ++ case T_FLOAT: ++ flds(dst_reg, tmp_reg, disp); ++ break; ++ default: ++ ShouldNotReachHere(); ++ } ++} ++ ++//C1 ++int MacroAssembler::load_for_type(FloatRegister dst_reg, Address addr, BasicType type) { ++ int code_offset = 0; ++ Register tmp_reg = T9; ++ ++ int disp = addr.disp(); ++ bool disp_is_simm16 = true; ++ if (!Assembler::is_simm16(disp)) { ++ disp_is_simm16 = false; ++ } ++ ++ Register base_reg = addr.base(); ++ if (!disp_is_simm16) { ++ assert_different_registers(tmp_reg, base_reg); ++ move(tmp_reg, disp); ++ addl(tmp_reg, base_reg, tmp_reg); ++ } ++ code_offset = offset(); ++ load_for_type_by_register(dst_reg, disp_is_simm16 ? base_reg : tmp_reg, disp_is_simm16 ? disp : 0, type); ++ ++ return code_offset; ++} +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/macroAssembler_sw64.hpp afu8u/hotspot/src/cpu/sw64/vm/macroAssembler_sw64.hpp +--- openjdk/hotspot/src/cpu/sw64/vm/macroAssembler_sw64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/macroAssembler_sw64.hpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,712 @@ ++/* ++ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef CPU_SW64_VM_MACROASSEMBLER_SW64_HPP ++#define CPU_SW64_VM_MACROASSEMBLER_SW64_HPP ++ ++#include "asm/assembler.hpp" ++#include "utilities/macros.hpp" ++#include "runtime/rtmLocking.hpp" ++ ++ ++// MacroAssembler extends Assembler by frequently used macros. ++// ++// Instructions for which a 'better' code sequence exists depending ++// on arguments should also go in here. ++ ++class MacroAssembler: public Assembler { ++ friend class LIR_Assembler; ++ friend class Runtime1; // as_Address() ++ ++ protected: ++ ++ Address as_Address(AddressLiteral adr); ++ Address as_Address(ArrayAddress adr); ++ ++ // Support for VM calls ++ // ++ // This is the base routine called by the different versions of call_VM_leaf. The interpreter ++ // may customize this version by overriding it for its purposes (e.g., to save/restore ++ // additional registers when doing a VM call). ++#ifdef CC_INTERP ++ // c++ interpreter never wants to use interp_masm version of call_VM ++ #define VIRTUAL ++#else ++ #define VIRTUAL virtual ++#endif ++ ++ VIRTUAL void call_VM_leaf_base( ++ address entry_point, // the entry point ++ int number_of_arguments // the number of arguments to pop after the call ++ ); ++ ++ // This is the base routine called by the different versions of call_VM. The interpreter ++ // may customize this version by overriding it for its purposes (e.g., to save/restore ++ // additional registers when doing a VM call). ++ // ++ // If no java_thread register is specified (noreg) than S2Thread will be used instead. call_VM_base ++ // returns the register which contains the thread upon return. If a thread register has been ++ // specified, the return value will correspond to that register. If no last_java_sp is specified ++ // (noreg) than sp will be used instead. ++ VIRTUAL void call_VM_base( // returns the register containing the thread upon return ++ Register oop_result, // where an oop-result ends up if any; use noreg otherwise ++ Register java_thread, // the thread if computed before ; use noreg otherwise ++ Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise ++ address entry_point, // the entry point ++ int number_of_arguments, // the number of arguments (w/o thread) to pop after the call ++ bool check_exceptions // whether to check for pending exceptions after return ++ ); ++ ++ // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. ++ // The implementation is only non-empty for the InterpreterMacroAssembler, ++ // as only the interpreter handles PopFrame and ForceEarlyReturn requests. ++ virtual void check_and_handle_popframe(Register java_thread); ++ virtual void check_and_handle_earlyret(Register java_thread); ++ ++ void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true); ++ ++ // helpers for FPU flag access ++ // tmp is a temporary register, if none is available use noreg ++ ++ public: ++ static intptr_t i[32]; ++ static float f[32]; ++ static void print(outputStream *s); ++ ++ static int i_offset(unsigned int k); ++ static int f_offset(unsigned int k); ++ ++ static void save_registers(MacroAssembler *masm); ++ static void restore_registers(MacroAssembler *masm); ++ ++ MacroAssembler(CodeBuffer* code) : Assembler(code) {} ++ ++ // Support for NULL-checks ++ // ++ // Generates code that causes a NULL OS exception if the content of reg is NULL. ++ // If the accessed location is M[reg + offset] and the offset is known, provide the ++ // offset. No explicit code generation is needed if the offset is within a certain ++ // range (0 <= offset <= page_size). ++ ++ void null_check(Register reg, int offset = -1); ++ static bool needs_explicit_null_check(intptr_t offset); ++ ++ // Required platform-specific helpers for Label::patch_instructions. ++ // They _shadow_ the declarations in AbstractAssembler, which are undefined. ++ void pd_patch_instruction(address branch, address target); ++ ++ // Support for inc/dec with optimal instruction selection depending on value ++ void incrementl(Register reg, int value = 1); ++ void decrementl(Register reg, int value = 1); ++ ++ ++ // Alignment ++ void align(int modulus); ++ ++ ++ // Stack frame creation/removal ++ void enter(); ++ void leave(); ++ ++ // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information) ++ // The pointer will be loaded into the thread register. ++ void get_thread(Register thread); ++ ++ ++ // Support for VM calls ++ // ++ // It is imperative that all calls into the VM are handled via the call_VM macros. ++ // They make sure that the stack linkage is setup correctly. call_VM's correspond ++ // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points. ++ ++ ++ void call_VM(Register oop_result, ++ address entry_point, ++ bool check_exceptions = true); ++ void call_VM(Register oop_result, ++ address entry_point, ++ Register arg_1, ++ bool check_exceptions = true); ++ void call_VM(Register oop_result, ++ address entry_point, ++ Register arg_1, Register arg_2, ++ bool check_exceptions = true); ++ void call_VM(Register oop_result, ++ address entry_point, ++ Register arg_1, Register arg_2, Register arg_3, ++ bool check_exceptions = true); ++ ++ // Overloadings with last_Java_sp ++ void call_VM(Register oop_result, ++ Register last_java_sp, ++ address entry_point, ++ int number_of_arguments = 0, ++ bool check_exceptions = true); ++ void call_VM(Register oop_result, ++ Register last_java_sp, ++ address entry_point, ++ Register arg_1, bool ++ check_exceptions = true); ++ void call_VM(Register oop_result, ++ Register last_java_sp, ++ address entry_point, ++ Register arg_1, Register arg_2, ++ bool check_exceptions = true); ++ void call_VM(Register oop_result, ++ Register last_java_sp, ++ address entry_point, ++ Register arg_1, Register arg_2, Register arg_3, ++ bool check_exceptions = true); ++ ++ void get_vm_result (Register oop_result, Register thread); ++ void get_vm_result_2(Register metadata_result, Register thread); ++ void call_VM_leaf(address entry_point, ++ int number_of_arguments = 0); ++ void call_VM_leaf(address entry_point, ++ Register arg_1); ++ void call_VM_leaf(address entry_point, ++ Register arg_1, Register arg_2); ++ void call_VM_leaf(address entry_point, ++ Register arg_1, Register arg_2, Register arg_3); ++ ++ // Super call_VM calls - correspond to MacroAssembler::call_VM(_leaf) calls ++ void super_call_VM_leaf(address entry_point); ++ void super_call_VM_leaf(address entry_point, Register arg_1); ++ void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2); ++ void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); ++ ++ // last Java Frame (fills frame anchor) ++ void set_last_Java_frame(Register thread, ++ Register last_java_sp, ++ Register last_java_fp, ++ address last_java_pc); ++ ++ // thread in the default location (S2) ++ void set_last_Java_frame(Register last_java_sp, ++ Register last_java_fp, ++ address last_java_pc); ++ ++ void reset_last_Java_frame(Register thread, bool clear_fp); ++ ++ // thread in the default location (S2) ++ void reset_last_Java_frame(bool clear_fp); ++ ++ // Stores ++ void store_check(Register obj); // store check for obj - register is destroyed afterwards ++ void store_check(Register obj, Address dst); // same as above, dst is exact store location (reg. is destroyed) ++ ++ void resolve_jobject(Register value, Register thread, Register tmp); ++ void clear_jweak_tag(Register possibly_jweak); ++ ++#if INCLUDE_ALL_GCS ++ ++ void g1_write_barrier_pre(Register obj, ++ Register pre_val, ++ Register thread, ++ Register tmp, ++ bool tosca_live, ++ bool expand_call); ++ ++ void g1_write_barrier_post(Register store_addr, ++ Register new_val, ++ Register thread, ++ Register tmp, ++ Register tmp2); ++ ++#endif // INCLUDE_ALL_GCS ++ ++ // split store_check(Register obj) to enhance instruction interleaving ++ void store_check_part_1(Register obj); ++ void store_check_part_2(Register obj); ++ ++ // C 'boolean' to Java boolean: x == 0 ? 0 : 1 ++ void c2bool(Register x); ++ //add for compressedoops ++ void load_klass(Register dst, Register src); ++ void store_klass(Register dst, Register src); ++ void load_prototype_header(Register dst, Register src); ++ ++ void store_klass_gap(Register dst, Register src); ++ ++ void load_heap_oop(Register dst, Address src); ++ void store_heap_oop(Address dst, Register src); ++ void store_heap_oop_null(Address dst); ++ void encode_heap_oop(Register r); ++ void encode_heap_oop(Register dst, Register src); ++ void decode_heap_oop(Register r); ++ void decode_heap_oop(Register dst, Register src); ++ void encode_heap_oop_not_null(Register r); ++ void decode_heap_oop_not_null(Register r); ++ void encode_heap_oop_not_null(Register dst, Register src); ++ void decode_heap_oop_not_null(Register dst, Register src); ++ ++ void encode_klass_not_null(Register r); ++ void decode_klass_not_null(Register r); ++ void encode_klass_not_null(Register dst, Register src); ++ void decode_klass_not_null(Register dst, Register src); ++ ++ // Returns the byte size of the instructions generated by decode_klass_not_null() ++ // when compressed klass pointers are being used. ++ static int instr_size_for_decode_klass_not_null(); ++ ++ // if heap base register is used - reinit it with the correct value ++ void reinit_heapbase(); ++ ++ DEBUG_ONLY(void verify_heapbase(const char* msg);) ++ ++ void set_narrow_klass(Register dst, Klass* k); ++ void set_narrow_oop(Register dst, jobject obj); ++ ++ void int3(); ++ // Sign extension ++ void sign_extend_short(Register reg) { sexth(reg, reg); } ++ void sign_extend_byte(Register reg) { sextb(reg, reg); } ++ void rem_s(FloatRegister fd, FloatRegister fs, FloatRegister ft, FloatRegister tmp); ++ void rem_d(FloatRegister fd, FloatRegister fs, FloatRegister ft, FloatRegister tmp); ++ ++ void trigfunc(char trig, int num_fpu_regs_in_use = 1); ++ // allocation ++ void eden_allocate( ++ Register obj, // result: pointer to object after successful allocation ++ Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise ++ int con_size_in_bytes, // object size in bytes if known at compile time ++ Register t1, // temp register ++ Register t2, ++ Label& slow_case // continuation point if fast allocation fails ++ ); ++ void tlab_allocate( ++ Register obj, // result: pointer to object after successful allocation ++ Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise ++ int con_size_in_bytes, // object size in bytes if known at compile time ++ Register t1, // temp register ++ Register t2, // temp register ++ Label& slow_case // continuation point if fast allocation fails ++ ); ++ void update_byte_crc32(Register crc, Register val, Register table); ++ void kernel_crc32(Register crc, Register buf, Register len, Register tmp, Register tmp3); ++ void tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case); ++ void incr_allocated_bytes(Register thread, ++ Register var_size_in_bytes, int con_size_in_bytes, ++ Register t1 = noreg); ++ // interface method calling ++ void lookup_interface_method(Register recv_klass, ++ Register intf_klass, ++ RegisterOrConstant itable_index, ++ Register method_result, ++ Register scan_temp, ++ Label& no_such_interface, ++ bool return_method = true); ++ ++ // virtual method calling ++ void lookup_virtual_method(Register recv_klass, ++ RegisterOrConstant vtable_index, ++ Register method_result); ++ ++ // Test sub_klass against super_klass, with fast and slow paths. ++ ++ // The fast path produces a tri-state answer: yes / no / maybe-slow. ++ // One of the three labels can be NULL, meaning take the fall-through. ++ // If super_check_offset is -1, the value is loaded up from super_klass. ++ // No registers are killed, except temp_reg. ++ void check_klass_subtype_fast_path(Register sub_klass, ++ Register super_klass, ++ Register temp_reg, ++ Label* L_success, ++ Label* L_failure, ++ Label* L_slow_path, ++ RegisterOrConstant super_check_offset = RegisterOrConstant(-1)); ++ ++ // The rest of the type check; must be wired to a corresponding fast path. ++ // It does not repeat the fast path logic, so don't use it standalone. ++ // The temp_reg and temp2_reg can be noreg, if no temps are available. ++ // Updates the sub's secondary super cache as necessary. ++ // If set_cond_codes, condition codes will be Z on success, NZ on failure. ++ void check_klass_subtype_slow_path(Register sub_klass, ++ Register super_klass, ++ Register temp_reg, ++ Register temp2_reg, ++ Label* L_success, ++ Label* L_failure, ++ bool set_cond_codes = false); ++ ++ // Simplified, combined version, good for typical uses. ++ // Falls through on failure. ++ void check_klass_subtype(Register sub_klass, ++ Register super_klass, ++ Register temp_reg, ++ Label& L_success); ++ ++ ++ // Debugging ++ ++ // only if +VerifyOops ++ void verify_oop(Register reg, const char* s = "broken oop"); ++ void verify_oop_addr(Address addr, const char * s = "broken oop addr"); ++ void verify_oop_subroutine(); ++ // TODO: verify method and klass metadata (compare against vptr?) ++ void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {} ++ void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){} ++ ++ #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__) ++ #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) ++ ++ // only if +VerifyFPU ++ void verify_FPU(int stack_depth, const char* s = "illegal FPU state"); ++ ++ // prints msg, dumps registers and stops execution ++ void stop(const char* msg); ++ ++ // prints msg and continues ++ void warn(const char* msg); ++ ++ static void debug(char* msg/*, RegistersForDebugging* regs*/); ++ static void debug64(char* msg, int64_t pc, int64_t regs[]); ++ ++// void print_reg(Register reg); ++// void print_reg(FloatRegister reg); ++ //void os_breakpoint(); ++ ++ void untested() { stop("untested"); } ++ ++ void unimplemented(const char* what = "") { char* b = new char[1024]; jio_snprintf(b, sizeof(b), "unimplemented: %s", what); stop(b); } ++ ++ void should_not_reach_here() { stop("should not reach here"); } ++ ++ void print_CPU_state(); ++ ++ // Stack overflow checking ++ void bang_stack_with_offset(int offset) { ++ // stack grows down, caller passes positive offset ++ assert(offset > 0, "must bang with negative offset"); ++ if (offset <= 32768) { ++ stw(R0, SP, -offset); // sw(A0, SP, -offset); ++ } else { ++ li(AT, offset); ++ subl(AT, SP, AT); ++ stw(R0, AT, 0); // sw(A0, AT, 0); ++ } ++ } ++ ++ // Writes to stack successive pages until offset reached to check for ++ // stack overflow + shadow pages. Also, clobbers tmp ++ void bang_stack_size(Register size, Register tmp); ++ ++ virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, ++ Register tmp, ++ int offset); ++ ++ // Support for serializing memory accesses between threads ++ void serialize_memory(Register thread, Register tmp); ++ ++ //void verify_tlab(); ++ void verify_tlab(Register t1, Register t2); ++ ++ // Compare char[] arrays aligned to 4 bytes. ++ void char_arrays_equals(Register ary1, Register ary2, ++ Register limit, Register result, ++ Register chr1, Register chr2, Label& Ldone); ++ ++ // Biased locking support ++ // lock_reg and obj_reg must be loaded up with the appropriate values. ++ // tmp_reg is optional. If it is supplied (i.e., != noreg) it will ++ // be killed; if not supplied, push/pop will be used internally to ++ // allocate a temporary (inefficient, avoid if possible). ++ // Optional slow case is for implementations (interpreter and C1) which branch to ++ // slow case directly. Leaves condition codes set for C2's Fast_Lock node. ++ // Returns offset of first potentially-faulting instruction for null ++ // check info (currently consumed only by C1). If ++ // swap_reg_contains_mark is true then returns -1 as it is assumed ++ // the calling code has already passed any potential faults. ++ int biased_locking_enter(Register lock_reg, Register obj_reg, ++ Register swap_reg, Register tmp_reg, ++ bool swap_reg_contains_mark, ++ Label& done, Label* slow_case = NULL, ++ BiasedLockingCounters* counters = NULL); ++ void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done); ++#ifdef COMPILER2 ++ void fast_lock(Register obj, Register box, Register tmp, Register scr); ++ void fast_unlock(Register obj, Register box, Register tmp); ++#endif ++ ++ ++ // Arithmetics ++ // Regular vs. d* versions ++ inline void addu_long(Register rd, Register rs, Register rt) { ++ addl(rd, rs, rt); ++ } ++ ++ inline void addu_long(Register rd, Register rs, long imm32_64) { ++ add_simm16(rd, rs, imm32_64); ++ } ++ ++ void round_to(Register reg, int modulus) { ++ assert_different_registers(reg, AT); ++ increment(reg, modulus - 1); ++ move(AT, - modulus); ++ and_reg(reg, reg, AT); ++ } ++ ++ // the follow two might use AT register, be sure you have no meanful data in AT before you call them ++ void increment(Register reg, int imm); ++ void decrement(Register reg, int imm); ++ ++ void shl(Register reg, int sa) { slll(reg, reg, sa); } ++ void shr(Register reg, int sa) { srll(reg, reg, sa); } ++ void sar(Register reg, int sa) { sral(reg, reg, sa); } ++ // Helper functions for statistics gathering. ++ void atomic_inc32(address counter_addr, int inc, Register tmp_reg1, Register tmp_reg2); ++ ++ // Calls ++ void call(address entry); ++ void call(Register reg); ++ void call(address entry, relocInfo::relocType rtype); ++ void call(address entry, RelocationHolder& rh); ++ // Emit the CompiledIC call idiom ++ void ic_call(address entry); ++ ++ // Jumps ++ void jmp(address entry); ++ void jmp(Register reg); ++ void jmp(address entry, relocInfo::relocType rtype); ++ void jmp_far(Label& L); // always long jumps ++ ++ /* branches may exceed 16-bit offset */ ++ void b_far(address entry); ++ void b_far(Label& L); ++ ++ // For C2 to support long branches ++ void beq_long (Register rs, Register rt, Label& L); ++ void bne_long (Register rs, Register rt, Label& L); ++ void bc1t_long (Label& L); ++ void bc1f_long (Label& L); ++ ++ void patchable_call(address target); ++ void patchable_call_setfpec1(address target); ++ void general_call(address target); ++ ++ void patchable_jump(address target); ++ void general_jump(address target); ++ ++ static int insts_for_patchable_call(address target); ++ static int insts_for_general_call(address target); ++ ++ static int insts_for_patchable_jump(address target); ++ static int insts_for_general_jump(address target); ++ ++ // Floating ++ // Data ++ ++ // Argument ops ++ inline void store_int_argument(Register s, Argument &a) { ++ if(a.is_Register()) { ++ move(a.as_Register(), s); ++ } else { ++ stw(s, a.as_caller_address()); ++ } ++ } ++ ++ inline void store_long_argument(Register s, Argument &a) { ++ Argument a1 = a.successor(); ++ if(a.is_Register() && a1.is_Register()) { ++ move(a.as_Register(), s); ++ move(a.as_Register(), s); ++ } else { ++ stl(s, a.as_caller_address()); ++ } ++ } ++ ++ inline void store_float_argument(FloatRegister s, Argument &a) { ++ if(a.is_Register()) { ++ fmovs(a.as_FloatRegister(), s); ++ } else { ++ fsts(s, a.as_caller_address()); ++ } ++ } ++ ++ inline void store_double_argument(FloatRegister s, Argument &a) { ++ if(a.is_Register()) { ++ fmovd(a.as_FloatRegister(), s); ++ } else { ++ fstd(s, a.as_caller_address()); ++ } ++ } ++ ++ inline void store_ptr_argument(Register s, Argument &a) { ++ if(a.is_Register()) { ++ move(a.as_Register(), s); ++ } else { ++ st_ptr(s, a.as_caller_address()); ++ } ++ } ++ ++ // Load and store values by size and signed-ness ++ void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg); ++ void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg); ++ ++ inline void ld_ptr(Register rt, Address a){ ++ ldl(rt, a.base(), a.disp()); ++ } ++ inline void ld_ptr(Register rt, Register base, int offset16){ ++ ldl(rt, base, offset16); ++ } ++ inline void st_ptr(Register rt, Address a){ ++ stl(rt, a.base(), a.disp()); ++ } ++ inline void st_ptr(Register rt, Register base, int offset16) { ++ stl(rt, base, offset16); ++ } ++ ++ void ld_ptr(Register rt, Register offset, Register base); ++ void st_ptr(Register rt, Register offset, Register base); ++ ++ inline void ld_long(Register rt, Register base, int offset16); ++ inline void st_long(Register rt, Register base, int offset16); ++ inline void ld_long(Register rt, Address a); ++ inline void st_long(Register rt, Address a); ++ void ld_long(Register rt, Register offset, Register base); ++ void st_long(Register rt, Register offset, Register base); ++ ++ // swap the two byte of the low 16-bit halfword ++ // this directive will use AT, be sure the high 16-bit of reg is zero ++ void hswap(Register reg); ++ void huswap(Register reg); ++ ++ // convert big endian integer to little endian integer ++ void swap(Register reg); ++ void saveTRegisters(); ++ void restoreTRegisters(); ++ ++ // implement the x86 instruction semantic ++ // if c_reg == *dest then *dest <= x_reg ++ // else c_reg <= *dest ++ // the AT indicate if xchg occurred, 1 for xchged, else 0 ++ void cmpxchg(Register x_reg, Address dest, Register c_reg); ++ void cmpxchg32(Register x_reg, Address dest, Register c_reg); ++ ++ void extend_sign(Register rh, Register rl) { stop("extend_sign"); } ++ void push (Register reg) { subl(SP, SP, 8); stl (reg, SP, 0); } ++ void push (FloatRegister reg) { subl(SP, SP, 8); fstd(reg, SP, 0); } ++ void pop (Register reg); ++ void pop (FloatRegister reg); ++ void pop () { addl(SP, SP, 8); } ++ void pop2 () { addl(SP, SP, 16); } ++ void push2(Register reg1, Register reg2); ++ void pop2 (Register reg1, Register reg2); ++ void dpush (Register reg) { subl(SP, SP, 8); stl (reg, SP, 0); } ++ void dpop (Register reg) { ldl (reg, SP, 0); addl(SP, SP, 8); } ++ //we need 2 fun to save and resotre general register ++ void pushad(); ++ void popad(); ++ void pushad_except_RA(); ++ void popad_except_RA(); ++ ++ //move an 32-bit immediate to Register ++ void move(Register reg, int imm32) { li32(reg, imm32); } ++ void li (Register rd, long imm); ++ void li (Register rd, address addr) { li(rd, (long)addr); } ++ //replace move(Register reg, int imm) ++ void boundary_test(FloatRegister ft, Register res); ++ void set64(Register d, jlong value); ++ static int insts_for_set64(jlong value); ++ ++ void patchable_set48(Register d, jlong value); ++ ++ static bool reachable_from_cache(address target); ++ ++ ++ void dli(Register rd, long imm) { li(rd, imm); } ++ void li64(Register rd, long imm); ++ void li48(Register rd, long imm); ++ ++ void move(Register rd, Register rs) { if (rs != rd) or_ins(rd, R0, rs); } ++ void move_u32(Register rd, Register rs) { if(rs != rd) or_ins(rd, R0, rs); } ++ void dmove(Register rd, Register rs) { if (rs != rd) or_ins(rd, R0, rs); } ++ void mov_metadata(Register dst, Metadata* obj); ++ void mov_metadata(Address dst, Metadata* obj); ++ ++ void store_for_type_by_register(Register src_reg, Register tmp_reg, int disp, BasicType type, bool wide); ++ void store_for_type_by_register(FloatRegister src_reg, Register tmp_reg, int disp, BasicType type); ++ void store_for_type(Register src_reg, Address addr, BasicType type = T_INT, bool wide = false); ++ void store_for_type(FloatRegister src_reg, Address addr, BasicType type = T_INT); ++ void load_for_type_by_register(Register dst_reg, Register tmp_reg, int disp, BasicType type, bool wide); ++ void load_for_type_by_register(FloatRegister dst_reg, Register tmp_reg, int disp, BasicType type); ++ int load_for_type(Register dst_reg, Address addr, BasicType type = T_INT, bool wide = false); ++ int load_for_type(FloatRegister dst_reg, Address addr, BasicType type = T_INT); ++ ++#ifndef PRODUCT ++ static void pd_print_patched_instruction(address branch) { ++ jint stub_inst = *(jint*) branch; ++ print_instruction(stub_inst); ++ ::tty->print("%s", " (unresolved)"); ++ } ++#endif ++ ++ void empty_FPU_stack(){/*need implemented*/}; ++ ++ ++ Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0); ++ ++#undef VIRTUAL ++ ++}; ++ ++/** ++ * class SkipIfEqual: ++ * ++ * Instantiating this class will result in assembly code being output that will ++ * jump around any code emitted between the creation of the instance and it's ++ * automatic destruction at the end of a scope block, depending on the value of ++ * the flag passed to the constructor, which will be checked at run-time. ++ */ ++class SkipIfEqual { ++ private: ++ MacroAssembler* _masm; ++ Label _label; ++ ++ public: ++ SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value); ++ ~SkipIfEqual(); ++}; ++ ++class SizedScope { ++private: ++ int _size; ++ MacroAssembler* _masm; ++ address _start; ++public: ++ SizedScope(MacroAssembler* masm, int size) { ++ _masm = masm; ++ _size = size; ++ _start = _masm->pc(); ++ } ++ ~SizedScope() { ++ if (_masm->pc() - _start > _size) Unimplemented(); ++ while (_masm->pc() - _start < _size) _masm->nop(); ++ } ++}; ++#ifdef ASSERT ++inline bool AbstractAssembler::pd_check_instruction_mark() { return true; } ++#endif ++ ++#endif // CPU_SW64_VM_MACROASSEMBLER_SW64_HPP +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/macroAssembler_sw64.inline.hpp afu8u/hotspot/src/cpu/sw64/vm/macroAssembler_sw64.inline.hpp +--- openjdk/hotspot/src/cpu/sw64/vm/macroAssembler_sw64.inline.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/macroAssembler_sw64.inline.hpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,33 @@ ++/* ++ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef CPU_SW64_VM_MACROASSEMBLER_SW64_INLINE_HPP ++#define CPU_SW64_VM_MACROASSEMBLER_SW64_INLINE_HPP ++ ++#include "asm/assembler.inline.hpp" ++#include "asm/macroAssembler.hpp" ++#include "asm/codeBuffer.hpp" ++#include "code/codeCache.hpp" ++ ++#endif // CPU_SW64_VM_MACROASSEMBLER_SW64_INLINE_HPP +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/metaspaceShared_sw64.cpp afu8u/hotspot/src/cpu/sw64/vm/metaspaceShared_sw64.cpp +--- openjdk/hotspot/src/cpu/sw64/vm/metaspaceShared_sw64.cpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/metaspaceShared_sw64.cpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,114 @@ ++/* ++ * Copyright (c) 2004, 2012, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++#include "asm/macroAssembler.hpp" ++#include "asm/codeBuffer.hpp" ++#include "memory/metaspaceShared.hpp" ++ ++// Generate the self-patching vtable method: ++// ++// This method will be called (as any other Klass virtual method) with ++// the Klass itself as the first argument. Example: ++// ++// oop obj; ++// int size = obj->klass()->klass_part()->oop_size(this); ++// ++// for which the virtual method call is Klass::oop_size(); ++// ++// The dummy method is called with the Klass object as the first ++// operand, and an object as the second argument. ++// ++ ++//===================================================================== ++ ++// All of the dummy methods in the vtable are essentially identical, ++// differing only by an ordinal constant, and they bear no releationship ++// to the original method which the caller intended. Also, there needs ++// to be 'vtbl_list_size' instances of the vtable in order to ++// differentiate between the 'vtable_list_size' original Klass objects. ++ ++#define __ masm-> ++ ++#ifdef PRODUCT ++#define BLOCK_COMMENT(str) /* nothing */ ++#else ++#define BLOCK_COMMENT(str) { char line[1024];sprintf(line,"%s:%s:%d",str,__FILE__, __LINE__); __ block_comment(line);} ++#endif ++ ++#define BIND(label) bind(label); BLOCK_COMMENT(#label ":") ++ ++void MetaspaceShared::generate_vtable_methods(void** vtbl_list, ++ void** vtable, ++ char** md_top, ++ char* md_end, ++ char** mc_top, ++ char* mc_end) { ++ ++ intptr_t vtable_bytes = (num_virtuals * vtbl_list_size) * sizeof(void*); ++ *(intptr_t *)(*md_top) = vtable_bytes; ++ *md_top += sizeof(intptr_t); ++ void** dummy_vtable = (void**)*md_top; ++ *vtable = dummy_vtable; ++ *md_top += vtable_bytes; ++ ++ // Get ready to generate dummy methods. ++ ++ CodeBuffer cb((unsigned char*)*mc_top, mc_end - *mc_top); ++ MacroAssembler* masm = new MacroAssembler(&cb); ++ ++ Label common_code; ++ for (int i = 0; i < vtbl_list_size; ++i) { ++ for (int j = 0; j < num_virtuals; ++j) { ++ dummy_vtable[num_virtuals * i + j] = (void*)masm->pc(); ++ ++ // Load V0 with a value indicating vtable/offset pair. ++ // -- bits[ 7..0] (8 bits) which virtual method in table? ++ // -- bits[12..8] (5 bits) which virtual method table? ++ // -- must fit in 13-bit instruction immediate field. ++ __ move(V0, (i << 8) + j); ++ __ beq(R0, common_code); ++ } ++ } ++ ++ __ BIND(common_code); ++ ++ __ zapnot(T12, V0, 0xf); ++ __ srll(T12, T12, 8); // isolate vtable identifier. ++ __ slll(T12, T12, LogBytesPerWord); ++ __ li(AT, (long)vtbl_list); ++ __ addl(T12, AT, T12); ++ __ ldl(T12, T12, 0); // get correct vtable address. ++ __ stl(T12, A0, 0); // update vtable pointer. ++ ++ __ and_imm8(V0, V0, 0x00ff); // isolate vtable method index ++ __ slll(V0, V0, LogBytesPerWord); ++ __ addl(T12, T12, V0); ++ __ ldl(T12, T12, 0); // address of real method pointer. ++ __ jmp(T12); // get real method pointer. ++ ++ __ flush(); ++ ++ *mc_top = (char*)__ pc(); ++} +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/methodHandles_sw64.cpp afu8u/hotspot/src/cpu/sw64/vm/methodHandles_sw64.cpp +--- openjdk/hotspot/src/cpu/sw64/vm/methodHandles_sw64.cpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/methodHandles_sw64.cpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,549 @@ ++/* ++ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++#include "asm/macroAssembler.hpp" ++#include "interpreter/interpreter.hpp" ++#include "interpreter/interpreterRuntime.hpp" ++#include "memory/allocation.inline.hpp" ++#include "prims/methodHandles.hpp" ++ ++#define __ _masm-> ++ ++#ifdef PRODUCT ++#define BLOCK_COMMENT(str) /* nothing */ ++#define STOP(error) stop(error) ++#else ++#define BLOCK_COMMENT(str) { char line[1024];sprintf(line,"%s:%s:%d",str,__FILE__, __LINE__); __ block_comment(line); } ++#define STOP(error) block_comment(error); __ stop(error) ++#endif ++ ++#define BIND(label) bind(label); BLOCK_COMMENT(#label ":") ++ ++void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg) { ++ if (VerifyMethodHandles) ++ verify_klass(_masm, klass_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_Class), ++ "MH argument is a Class"); ++ __ ldl(klass_reg, Address(klass_reg, java_lang_Class::klass_offset_in_bytes())); ++} ++ ++#ifdef ASSERT ++static int check_nonzero(const char* xname, int x) { ++ assert(x != 0, err_msg("%s should be nonzero", xname)); ++ return x; ++} ++#define NONZERO(x) check_nonzero(#x, x) ++#else //ASSERT ++#define NONZERO(x) (x) ++#endif //ASSERT ++ ++#ifdef ASSERT ++void MethodHandles::verify_klass(MacroAssembler* _masm, ++ Register obj, SystemDictionary::WKID klass_id, ++ const char* error_message) { ++} ++ ++void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) { ++ Label L; ++ BLOCK_COMMENT("verify_ref_kind {"); ++ __ ldw_signed(temp, Address(member_reg, NONZERO(java_lang_invoke_MemberName::flags_offset_in_bytes()))); ++ __ sral(temp, temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_SHIFT&0x1f); ++ __ move(AT, java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK); ++ __ and_reg(temp, temp, AT); ++ __ move(AT, ref_kind); ++ __ beq(temp, AT, L); ++ { char* buf = NEW_C_HEAP_ARRAY(char, 100, mtInternal); ++ jio_snprintf(buf, 100, "verify_ref_kind expected %x", ref_kind); ++ if (ref_kind == JVM_REF_invokeVirtual || ++ ref_kind == JVM_REF_invokeSpecial) ++ // could do this for all ref_kinds, but would explode assembly code size ++ trace_method_handle(_masm, buf); ++ __ STOP(buf); ++ } ++ BLOCK_COMMENT("} verify_ref_kind"); ++ __ BIND(L); ++} ++ ++#endif //ASSERT ++ ++void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp, ++ bool for_compiler_entry) { ++ assert(method == Rmethod, "interpreter calling convention"); ++ ++ Label L_no_such_method; ++ __ beq(method, L_no_such_method); ++ ++ __ verify_method_ptr(method); ++ ++ if (!for_compiler_entry && JvmtiExport::can_post_interpreter_events()) { ++ Label run_compiled_code; ++ // JVMTI events, such as single-stepping, are implemented partly by avoiding running ++ // compiled code in threads for which the event is enabled. Check here for ++ // interp_only_mode if these events CAN be enabled. ++ Register rthread = S2thread; ++ // interp_only is an int, on little endian it is sufficient to test the byte only ++ // Is a cmpl faster? ++ __ ldbu(AT, rthread, in_bytes(JavaThread::interp_only_mode_offset())); ++ __ beq(AT, run_compiled_code); ++ __ ldl(T12, method, in_bytes(Method::interpreter_entry_offset())); ++ __ jmp(T12); ++ __ BIND(run_compiled_code); ++ } ++ ++ const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() : ++ Method::from_interpreted_offset(); ++ __ ldl(T12, method, in_bytes(entry_offset)); ++ __ jmp(T12); ++ ++ __ BIND(L_no_such_method); ++ address wrong_method = StubRoutines::throw_AbstractMethodError_entry(); ++ __ jmp(wrong_method, relocInfo::runtime_call_type); ++} ++ ++void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm, ++ Register recv, Register method_temp, ++ Register temp2, ++ bool for_compiler_entry) { ++ BLOCK_COMMENT("jump_to_lambda_form {"); ++ // This is the initial entry point of a lazy method handle. ++ // After type checking, it picks up the invoker from the LambdaForm. ++ assert_different_registers(recv, method_temp, temp2); ++ assert(recv != noreg, "required register"); ++ assert(method_temp == Rmethod, "required register for loading method"); ++ ++ //NOT_PRODUCT({ FlagSetting fs(TraceMethodHandles, true); trace_method_handle(_masm, "LZMH"); }); ++ ++ // Load the invoker, as MH -> MH.form -> LF.vmentry ++ __ verify_oop(recv); ++ __ load_heap_oop(method_temp, Address(recv, NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes()))); ++ __ verify_oop(method_temp); ++ __ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes()))); ++ __ verify_oop(method_temp); ++ // the following assumes that a Method* is normally compressed in the vmtarget field: ++ __ ldl(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()))); ++ ++ if (VerifyMethodHandles && !for_compiler_entry) { ++ // make sure recv is already on stack ++ __ ldl(temp2, Address(method_temp, Method::const_offset())); ++ __ load_sized_value(temp2, ++ Address(temp2, ConstMethod::size_of_parameters_offset()), ++ sizeof(u2), false); ++ // assert(sizeof(u2) == sizeof(Method::_size_of_parameters), ""); ++ Label L; ++ Address recv_addr = __ argument_address(temp2, -1); ++ __ ldl(AT, recv_addr); ++ __ beq(recv, AT, L); ++ ++ recv_addr = __ argument_address(temp2, -1); ++ __ ldl(V0, recv_addr); ++ __ STOP("receiver not on stack"); ++ __ BIND(L); ++ } ++ ++ jump_from_method_handle(_masm, method_temp, temp2, for_compiler_entry); ++ BLOCK_COMMENT("} jump_to_lambda_form"); ++} ++ ++ ++// Code generation ++address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm, ++ vmIntrinsics::ID iid) { ++ const bool not_for_compiler_entry = false; // this is the interpreter entry ++ assert(is_signature_polymorphic(iid), "expected invoke iid"); ++ if (iid == vmIntrinsics::_invokeGeneric || ++ iid == vmIntrinsics::_compiledLambdaForm) { ++ // Perhaps surprisingly, the symbolic references visible to Java are not directly used. ++ // They are linked to Java-generated adapters via MethodHandleNatives.linkMethod. ++ // They all allow an appendix argument. ++ __ stop("empty stubs make SG sick"); ++ return NULL; ++ } ++ ++ // Rmethod: Method* ++ // T9: argument locator (parameter slot count, added to sp) ++ // S7: used as temp to hold mh or receiver ++ Register t12_argp = T12; // argument list ptr, live on error paths ++ Register s1_mh = S1; // MH receiver; dies quickly and is recycled ++ Register rm_method = Rmethod; // eventual target of this invocation ++ ++ // here's where control starts out: ++ __ align(CodeEntryAlignment); ++ address entry_point = __ pc(); ++ ++ if (VerifyMethodHandles) { ++ Label L; ++ BLOCK_COMMENT("verify_intrinsic_id {"); ++ __ ldbu(AT, rm_method, Method::intrinsic_id_offset_in_bytes()); ++ guarantee(Assembler::is_simm16(iid), "Oops, iid is not simm16! Change the instructions."); ++ __ add_simm16(AT, AT, -1 * (int) iid); ++ __ beq(AT, L); ++ if (iid == vmIntrinsics::_linkToVirtual || ++ iid == vmIntrinsics::_linkToSpecial) { ++ // could do this for all kinds, but would explode assembly code size ++ trace_method_handle(_masm, "bad Method*::intrinsic_id"); ++ } ++ __ STOP("bad Method*::intrinsic_id"); ++ __ BIND(L); ++ BLOCK_COMMENT("} verify_intrinsic_id"); ++ } ++ ++ // First task: Find out how big the argument list is. ++ Address t12_first_arg_addr; ++ int ref_kind = signature_polymorphic_intrinsic_ref_kind(iid); ++ assert(ref_kind != 0 || iid == vmIntrinsics::_invokeBasic, "must be _invokeBasic or a linkTo intrinsic"); ++ if (ref_kind == 0 || MethodHandles::ref_kind_has_receiver(ref_kind)) { ++ __ ldl(t12_argp, Address(rm_method, Method::const_offset())); ++ __ load_sized_value(t12_argp, ++ Address(t12_argp, ConstMethod::size_of_parameters_offset()), ++ sizeof(u2), false); ++ // assert(sizeof(u2) == sizeof(Method::_size_of_parameters), ""); ++ t12_first_arg_addr = __ argument_address(t12_argp, -1); ++ } else { ++ DEBUG_ONLY(t12_argp = noreg); ++ } ++ ++ if (!is_signature_polymorphic_static(iid)) { ++ __ ldl(s1_mh, t12_first_arg_addr); ++ DEBUG_ONLY(t12_argp = noreg); ++ } ++ ++ // t12_first_arg_addr is live! ++ ++ trace_method_handle_interpreter_entry(_masm, iid); ++ ++ if (iid == vmIntrinsics::_invokeBasic) { ++ generate_method_handle_dispatch(_masm, iid, s1_mh, noreg, not_for_compiler_entry); ++ ++ } else { ++ // Adjust argument list by popping the trailing MemberName argument. ++ Register r_recv = noreg; ++ if (MethodHandles::ref_kind_has_receiver(ref_kind)) { ++ // Load the receiver (not the MH; the actual MemberName's receiver) up from the interpreter stack. ++ __ ldl(r_recv = T2, t12_first_arg_addr); ++ } ++ DEBUG_ONLY(t12_argp = noreg); ++ Register rm_member = rm_method; // MemberName ptr; incoming method ptr is dead now ++ __ pop(rm_member); // extract last argument ++ generate_method_handle_dispatch(_masm, iid, r_recv, rm_member, not_for_compiler_entry); ++ } ++ ++ return entry_point; ++} ++ ++void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm, ++ vmIntrinsics::ID iid, ++ Register receiver_reg, ++ Register member_reg, ++ bool for_compiler_entry) { ++ assert(is_signature_polymorphic(iid), "expected invoke iid"); ++ Register rm_method = Rmethod; // eventual target of this invocation ++ // temps used in this code are not used in *either* compiled or interpreted calling sequences ++ Register j_rarg0 = A1; ++ Register j_rarg1 = A2; ++ Register j_rarg2 = A3; ++ Register j_rarg3 = A4; ++ Register j_rarg4 = A5; ++ Register j_rarg5 = A0; ++ ++ Register temp1 = T11; ++ Register temp2 = T12; ++ Register temp3 = V0; ++ if (for_compiler_entry) { ++ assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : j_rarg0), "only valid assignment"); ++ assert_different_registers(temp1, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5); ++ assert_different_registers(temp2, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5); ++ assert_different_registers(temp3, j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5); ++ } ++ else { ++ assert_different_registers(temp1, temp2, temp3, saved_last_sp_register()); // don't trash lastSP ++ } ++ assert_different_registers(temp1, temp2, temp3, receiver_reg); ++ assert_different_registers(temp1, temp2, temp3, member_reg); ++ ++ if (iid == vmIntrinsics::_invokeBasic) { ++ // indirect through MH.form.vmentry.vmtarget ++ jump_to_lambda_form(_masm, receiver_reg, rm_method, temp1, for_compiler_entry); ++ ++ } else { ++ // The method is a member invoker used by direct method handles. ++ if (VerifyMethodHandles) { ++ // make sure the trailing argument really is a MemberName (caller responsibility) ++ verify_klass(_masm, member_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_invoke_MemberName), ++ "MemberName required for invokeVirtual etc."); ++ } ++ ++ Address member_clazz( member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes())); ++ Address member_vmindex( member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes())); ++ Address member_vmtarget( member_reg, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())); ++ ++ Register temp1_recv_klass = temp1; ++ if (iid != vmIntrinsics::_linkToStatic) { ++ __ verify_oop(receiver_reg); ++ if (iid == vmIntrinsics::_linkToSpecial) { ++ // Don't actually load the klass; just null-check the receiver. ++ __ null_check(receiver_reg); ++ } else { ++ // load receiver klass itself ++ __ null_check(receiver_reg, oopDesc::klass_offset_in_bytes()); ++ __ load_klass(temp1_recv_klass, receiver_reg); ++ __ verify_klass_ptr(temp1_recv_klass); ++ } ++ BLOCK_COMMENT("check_receiver {"); ++ // The receiver for the MemberName must be in receiver_reg. ++ // Check the receiver against the MemberName.clazz ++ if (VerifyMethodHandles && iid == vmIntrinsics::_linkToSpecial) { ++ // Did not load it above... ++ __ load_klass(temp1_recv_klass, receiver_reg); ++ __ verify_klass_ptr(temp1_recv_klass); ++ } ++ if (VerifyMethodHandles && iid != vmIntrinsics::_linkToInterface) { ++ Label L_ok; ++ Register temp2_defc = temp2; ++ __ load_heap_oop(temp2_defc, member_clazz); ++ load_klass_from_Class(_masm, temp2_defc); ++ __ verify_klass_ptr(temp2_defc); ++ __ check_klass_subtype(temp1_recv_klass, temp2_defc, temp3, L_ok); ++ // If we get here, the type check failed! ++ __ STOP("receiver class disagrees with MemberName.clazz"); ++ __ BIND(L_ok); ++ } ++ BLOCK_COMMENT("} check_receiver"); ++ } ++ if (iid == vmIntrinsics::_linkToSpecial || ++ iid == vmIntrinsics::_linkToStatic) { ++ DEBUG_ONLY(temp1_recv_klass = noreg); // these guys didn't load the recv_klass ++ } ++ ++ // Live registers at this point: ++ // member_reg - MemberName that was the trailing argument ++ // temp1_recv_klass - klass of stacked receiver, if needed ++ ++ Label L_incompatible_class_change_error; ++ switch (iid) { ++ case vmIntrinsics::_linkToSpecial: ++ if (VerifyMethodHandles) { ++ verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3); ++ } ++ __ ldl(rm_method, member_vmtarget); ++ break; ++ ++ case vmIntrinsics::_linkToStatic: ++ if (VerifyMethodHandles) { ++ verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3); ++ } ++ __ ldl(rm_method, member_vmtarget); ++ break; ++ ++ case vmIntrinsics::_linkToVirtual: ++ { ++ // same as TemplateTable::invokevirtual, ++ // minus the CP setup and profiling: ++ ++ if (VerifyMethodHandles) { ++ verify_ref_kind(_masm, JVM_REF_invokeVirtual, member_reg, temp3); ++ } ++ ++ // pick out the vtable index from the MemberName, and then we can discard it: ++ Register temp2_index = temp2; ++ __ ldl(temp2_index, member_vmindex); ++ ++ if (VerifyMethodHandles) { ++ Label L_index_ok; ++ __ cmplt(AT, R0, temp2_index); ++ __ bne(AT, L_index_ok); ++ __ STOP("no virtual index"); ++ __ BIND(L_index_ok); ++ } ++ ++ // Note: The verifier invariants allow us to ignore MemberName.clazz and vmtarget ++ // at this point. And VerifyMethodHandles has already checked clazz, if needed. ++ ++ // get target Method* & entry point ++ __ lookup_virtual_method(temp1_recv_klass, temp2_index, rm_method); ++ break; ++ } ++ ++ case vmIntrinsics::_linkToInterface: ++ { ++ // same as TemplateTable::invokeinterface ++ // (minus the CP setup and profiling, with different argument motion) ++ if (VerifyMethodHandles) { ++ verify_ref_kind(_masm, JVM_REF_invokeInterface, member_reg, temp3); ++ } ++ ++ Register temp3_intf = temp3; ++ __ load_heap_oop(temp3_intf, member_clazz); ++ load_klass_from_Class(_masm, temp3_intf); ++ __ verify_klass_ptr(temp3_intf); ++ ++ Register rm_index = rm_method; ++ __ ldl(rm_index, member_vmindex); ++ if (VerifyMethodHandles) { ++ Label L; ++ __ cmplt(AT, rm_index, R0); ++ __ beq(AT, L); ++ __ STOP("invalid vtable index for MH.invokeInterface"); ++ __ BIND(L); ++ } ++ ++ // given intf, index, and recv klass, dispatch to the implementation method ++ __ lookup_interface_method(temp1_recv_klass, temp3_intf, ++ // note: next two args must be the same: ++ rm_index, rm_method, ++ temp2, ++ L_incompatible_class_change_error); ++ break; ++ } ++ ++ default: ++ fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid))); ++ break; ++ } ++ ++ // Live at this point: ++ // rm_method ++ ++ // After figuring out which concrete method to call, jump into it. ++ // Note that this works in the interpreter with no data motion. ++ // But the compiled version will require that r_recv be shifted out. ++ __ verify_method_ptr(rm_method); ++ jump_from_method_handle(_masm, rm_method, temp1, for_compiler_entry); ++ ++ if (iid == vmIntrinsics::_linkToInterface) { ++ __ BIND(L_incompatible_class_change_error); ++ address icce_entry= StubRoutines::throw_IncompatibleClassChangeError_entry(); ++ __ jmp(icce_entry, relocInfo::runtime_call_type); ++ } ++ } ++} ++ ++#ifndef PRODUCT ++void trace_method_handle_stub(const char* adaptername, ++ oop mh, ++ intptr_t* saved_regs, ++ intptr_t* entry_sp) { ++ // called as a leaf from native code: do not block the JVM! ++ bool has_mh = (strstr(adaptername, "/static") == NULL && ++ strstr(adaptername, "linkTo") == NULL); // static linkers don't have MH ++ const char* mh_reg_name = has_mh ? "s1_mh" : "s1"; ++ tty->print_cr("MH %s %s="PTR_FORMAT" sp="PTR_FORMAT, ++ adaptername, mh_reg_name, ++ (void *)mh, entry_sp); ++ ++ if (Verbose) { ++ tty->print_cr("Registers:"); ++ const int saved_regs_count = RegisterImpl::number_of_registers; ++ for (int i = 0; i < saved_regs_count; i++) { ++ Register r = as_Register(i); ++ // The registers are stored in reverse order on the stack (by pusha). ++ tty->print("%3s=" PTR_FORMAT, r->name(), saved_regs[((saved_regs_count - 1) - i)]); ++ if ((i + 1) % 4 == 0) { ++ tty->cr(); ++ } else { ++ tty->print(", "); ++ } ++ } ++ tty->cr(); ++ ++ { ++ // dumping last frame with frame::describe ++ ++ JavaThread* p = JavaThread::active(); ++ ++ ResourceMark rm; ++ PRESERVE_EXCEPTION_MARK; // may not be needed by safer and unexpensive here ++ FrameValues values; ++ ++ // Note: We want to allow trace_method_handle from any call site. ++ // While trace_method_handle creates a frame, it may be entered ++ // without a PC on the stack top (e.g. not just after a call). ++ // Walking that frame could lead to failures due to that invalid PC. ++ // => carefully detect that frame when doing the stack walking ++ ++ // Current C frame ++ frame cur_frame = os::current_frame(); ++ ++ // Robust search of trace_calling_frame (independant of inlining). ++ // Assumes saved_regs comes from a pusha in the trace_calling_frame. ++ assert(cur_frame.sp() < saved_regs, "registers not saved on stack ?"); ++ frame trace_calling_frame = os::get_sender_for_C_frame(&cur_frame); ++ while (trace_calling_frame.fp() < saved_regs) { ++ trace_calling_frame = os::get_sender_for_C_frame(&trace_calling_frame); ++ } ++ ++ // safely create a frame and call frame::describe ++ intptr_t *dump_sp = trace_calling_frame.sender_sp(); ++ intptr_t *dump_fp = trace_calling_frame.link(); ++ ++ bool walkable = has_mh; // whether the traced frame shoud be walkable ++ ++ if (walkable) { ++ // The previous definition of walkable may have to be refined ++ // if new call sites cause the next frame constructor to start ++ // failing. Alternatively, frame constructors could be ++ // modified to support the current or future non walkable ++ // frames (but this is more intrusive and is not considered as ++ // part of this RFE, which will instead use a simpler output). ++ frame dump_frame = frame(dump_sp, dump_fp); ++ dump_frame.describe(values, 1); ++ } else { ++ // Stack may not be walkable (invalid PC above FP): ++ // Add descriptions without building a Java frame to avoid issues ++ values.describe(-1, dump_fp, "fp for #1 "); ++ values.describe(-1, dump_sp, "sp for #1"); ++ } ++ values.describe(-1, entry_sp, "raw top of stack"); ++ ++ tty->print_cr("Stack layout:"); ++ values.print(p); ++ } ++ if (has_mh && mh->is_oop()) { ++ mh->print(); ++ if (java_lang_invoke_MethodHandle::is_instance(mh)) { ++ if (java_lang_invoke_MethodHandle::form_offset_in_bytes() != 0) ++ java_lang_invoke_MethodHandle::form(mh)->print(); ++ } ++ } ++ } ++} ++ ++// The stub wraps the arguments in a struct on the stack to avoid ++// dealing with the different calling conventions for passing 6 ++// arguments. ++struct MethodHandleStubArguments { ++ const char* adaptername; ++ oopDesc* mh; ++ intptr_t* saved_regs; ++ intptr_t* entry_sp; ++}; ++void trace_method_handle_stub_wrapper(MethodHandleStubArguments* args) { ++ trace_method_handle_stub(args->adaptername, ++ args->mh, ++ args->saved_regs, ++ args->entry_sp); ++} ++ ++void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) { ++} ++#endif //PRODUCT +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/methodHandles_sw64.hpp afu8u/hotspot/src/cpu/sw64/vm/methodHandles_sw64.hpp +--- openjdk/hotspot/src/cpu/sw64/vm/methodHandles_sw64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/methodHandles_sw64.hpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,61 @@ ++/* ++ * Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++// Platform-specific definitions for method handles. ++// These definitions are inlined into class MethodHandles. ++ ++// Adapters ++enum /* platform_dependent_constants */ { ++ adapter_code_size = NOT_LP64(16000 DEBUG_ONLY(+ 25000)) LP64_ONLY(32000 DEBUG_ONLY(+ 150000)) ++}; ++ ++// Additional helper methods for MethodHandles code generation: ++public: ++ static void load_klass_from_Class(MacroAssembler* _masm, Register klass_reg); ++ ++ static void verify_klass(MacroAssembler* _masm, ++ Register obj, SystemDictionary::WKID klass_id, ++ const char* error_message = "wrong klass") NOT_DEBUG_RETURN; ++ ++ static void verify_method_handle(MacroAssembler* _masm, Register mh_reg) { ++ verify_klass(_masm, mh_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_invoke_MethodHandle), ++ "reference is a MH"); ++ } ++ ++ static void verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) NOT_DEBUG_RETURN; ++ ++ // Similar to InterpreterMacroAssembler::jump_from_interpreted. ++ // Takes care of special dispatch from single stepping too. ++ static void jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp, ++ bool for_compiler_entry); ++ ++ static void jump_to_lambda_form(MacroAssembler* _masm, ++ Register recv, Register method_temp, ++ Register temp2, ++ bool for_compiler_entry); ++ ++ static Register saved_last_sp_register() { ++ // Should be in sharedRuntime, not here. ++ return I29; ++ } +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/nativeInst_sw64.cpp afu8u/hotspot/src/cpu/sw64/vm/nativeInst_sw64.cpp +--- openjdk/hotspot/src/cpu/sw64/vm/nativeInst_sw64.cpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/nativeInst_sw64.cpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,561 @@ ++/* ++ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++#include "asm/macroAssembler.hpp" ++#include "memory/resourceArea.hpp" ++#include "nativeInst_sw64.hpp" ++#include "oops/oop.inline.hpp" ++#include "runtime/handles.hpp" ++#include "runtime/sharedRuntime.hpp" ++#include "runtime/stubRoutines.hpp" ++#include "utilities/ostream.hpp" ++#ifdef COMPILER1 ++#include "c1/c1_Runtime1.hpp" ++#endif ++ ++#include ++ ++int NativeCall::instruction_size = 5 * BytesPerInstWord; ++int NativeCall::return_address_offset = 5 * BytesPerInstWord; ++int NativeCall::return_address_offset_long = 5 * BytesPerInstWord; ++int NativeGeneralJump::instruction_size = 5 * BytesPerInstWord; ++void NativeInstruction::wrote(int offset) { ++ ICache::invalidate_word(addr_at(offset)); ++} ++ ++void NativeInstruction::imm48_split(long imm48, int16_t &msb_l, int16_t &lsb_h, int16_t &lsb_l) { ++ int32_t lsb32 = (int32_t) ((intptr_t) imm48); ++ int32_t msb32 = (int32_t) (((intptr_t) imm48 - lsb32) >> 32); ++ ++ msb_l = (int16_t) msb32; ++ lsb_h = (lsb32 - (int16_t) lsb32) >> 16; ++ lsb_l = (int16_t) lsb32; ++ guarantee((msb_l >= 0x0 && msb_l < 0x7fff) || (msb_l == 0x7fff && lsb32 >= 0x0 && lsb32 < 0x7fff8000), "wrong number in li48 "); ++ if (lsb32 >= 0x7fff8000) ++ msb_l = msb_l + 1; ++} ++ ++void NativeInstruction::set_address(address dest) { ++ int16_t msb_l, lsb_h, lsb_l; ++ NativeInstruction::imm48_split((long) dest, msb_l, lsb_h, lsb_l); ++ if (SafePatch) { ++ if (is_op(int_at(0), Assembler::op_ldi) && ++ is_op(int_at(4), Assembler::op_br) && ++ is_op(int_at(16), Assembler::op_ldl)) { ++ set_long_at(8, (long) dest); ++ } else if (is_op(int_at(0), Assembler::op_br) && ++ is_op(int_at(12), Assembler::op_ldl) && ++ is_op(int_at(16), Assembler::op_ldi)) { ++ set_long_at(4, (long) dest); ++ } else { ++ tty->print_cr("\nError!\nset_address: 0x%lx", addr_at(0)); ++ Disassembler::decode(addr_at(0) - 10 * 4, addr_at(0) + 10 * 4, tty); ++ fatal("not a call "); ++ } ++ } else { ++ OrderAccess::fence(); ++ /* li48 or li64 */ ++ if (is_op(int_at(0), Assembler::op_ldi) && is_op(int_at(4), Assembler::op_slll_l)) { ++ int first_word = int_at(0); ++ set_int_at(0, 0x13FFFFFF); /* .1: br .1 */ ++ set_int_at(8, (int_at(8) & 0xffff0000) | (lsb_h & 0xffff)); ++ set_int_at(12, (int_at(12) & 0xffff0000) | (lsb_l & 0xffff)); ++ set_int_at(0, (first_word & 0xffff0000) | (msb_l & 0xffff)); ++ ++ ICache::invalidate_range(addr_at(0), 16); ++ } else if (is_op(int_at(0), Assembler::op_ldih) && is_op(int_at(8), Assembler::op_slll_l)) { ++ Unimplemented(); ++ } else { ++ fatal("not a call "); ++ } ++ } ++} ++void NativeInstruction::set_long_at(int offset, long i) { ++ address addr = addr_at(offset); ++ *(long*) addr = i; ++ ICache::invalidate_range(addr, 8); ++} ++ ++static int illegal_instruction_bits = 0; ++ ++int NativeInstruction::illegal_instruction() { ++ if (illegal_instruction_bits == 0) { ++ ResourceMark rm; ++ char buf[40]; ++ CodeBuffer cbuf((address)&buf[0], 20); ++ MacroAssembler* a = new MacroAssembler(&cbuf); ++ address ia = a->pc(); ++#ifdef SW64 //ZHJ20100508 ++ a->sys_call(0x80); ++ int bits = *(int*) ia; ++ assert((Assembler::sw2_op(bits) == Assembler::op_sys_call), "bad instruction"); ++#else ++ a->brk(11); ++ int bits = *(int*) ia; ++#endif ++ illegal_instruction_bits = bits; ++ } ++ return illegal_instruction_bits; ++} ++ ++bool NativeInstruction::is_int_branch() { ++ // is it the output of MacroAssembler::fb? ++ int x = long_at(0); ++ int op = Assembler::sw2_op(x); ++ ++ if (op >= Assembler::op_beq && op <= Assembler::op_blbs) ++ return true; ++ ++ if ((op == Assembler::op_br) || (op == Assembler::op_bsr)) ++ return true; ++ ++ return false; ++} ++ ++bool NativeInstruction::is_float_branch() { ++ // is it the output of MacroAssembler::fb? ++ int x = long_at(0); ++ int op = Assembler::sw2_op(x); ++ ++ if (op >= Assembler::op_fbeq && op <= Assembler::op_fbge) ++ return true; ++ ++ return false; ++} ++ ++ ++//------------------------------------------------------------------- ++ ++void NativeCall::verify() { ++ // make sure code pattern is actually a call instruction ++ /* li64 or li48 */ ++ int li_64 = 0; ++ int li_48 = 0; ++ if (SafePatch) { ++ if (is_op(Assembler::op_br) && ++ is_op(int_at(12), Assembler::op_ldl) && ++ is_op(int_at(16), Assembler::op_ldi) && ++ (is_op(int_at(20), Assembler::op_call)||is_op(int_at(20), Assembler::op_jmp))) { ++ li_48 = 1; ++ } ++ if (is_op(Assembler::op_ldi) && ++ is_op(int_at(4), Assembler::op_br) && ++ is_op(int_at(16), Assembler::op_ldl) && ++ (is_op(int_at(20), Assembler::op_call)||is_op(int_at(20), Assembler::op_jmp))) { ++ li_48 = 1; ++ } ++ ++ if (!li_64 && !li_48) { ++ tty->print_cr("NativeCall::verify addr=%lx", addr_at(0)); ++ fatal("not a call"); ++ } ++ } else { ++ // wait until the first inst is not spin any more. spin is 13ffffff(>0), ldi and ldih is fxxxxxxx < 0 ++ // ldw (t1, 0, a0); ++ // bgt (t1, -2); ++ __asm__ __volatile__( ++ "1: ldw $2, 0($16) \n\t" ++ " bgt $2, 1b \n\t" ++ ); ++ ++ if (is_op(Assembler::op_ldih) && ++ is_op(int_at(4), Assembler::op_ldi) && ++ is_op(int_at(8), Assembler::op_slll_l) && ++ is_op(int_at(12), Assembler::op_ldih) && ++ is_op(int_at(16), Assembler::op_ldi) && ++ is_op(int_at(24), Assembler::op_call)) { ++ li_64 = 1; ++ } ++ ++ if (is_op(Assembler::op_ldi) && ++ is_op(int_at(4), Assembler::op_slll_l) && ++ is_op(int_at(8), Assembler::op_ldih) && ++ is_op(int_at(12), Assembler::op_ldi) && ++ is_op(int_at(16), Assembler::op_call)) { ++ li_48 = 1; ++ } ++ ++ if (!li_64 && !li_48) { ++ tty->print_cr("NativeCall::verify addr=%lx", addr_at(0)); ++ fatal("not a call"); ++ } ++ } ++} ++ ++address NativeCall::destination() const { ++ if (SafePatch) { ++ if (is_op(int_at(0), Assembler::op_ldi) && ++ is_op(int_at(4), Assembler::op_br) && ++ is_op(int_at(16), Assembler::op_ldl)) { ++ return (address) long_at(8); ++ } else if (is_op(int_at(0), Assembler::op_br) && ++ is_op(int_at(12), Assembler::op_ldl) && ++ is_op(int_at(16), Assembler::op_ldi)) { ++ return (address) long_at(4); ++ } else { ++ tty->print_cr("\nError!\ndestination: 0x%lx", addr_at(0)); ++ Disassembler::decode(addr_at(0) - 10 * 4, addr_at(0) + 10 * 4, tty); ++ fatal("not a call "); ++ } ++ } else { ++ // wait until the first inst is not spin any more. spin is 13ffffff(>0), ldi and ldih is fxxxxxxx < 0 ++ // ldw (t1, 0, a0); ++ // bgt (t1, -2); ++ __asm__ __volatile__( ++ "1: ldw $2, 0($16) \n\t" ++ " bgt $2, 1b \n\t" ++ ); ++ ++ if (is_op(int_at(0), Assembler::op_ldih) && is_op(int_at(8), Assembler::op_slll_l)) { ++ /* li64 */ ++ int16_t msb_h = int_at(0)&0xffff; ++ int16_t msb_l = int_at(4)&0xffff; ++ int16_t lsb_h = int_at(12)&0xffff; ++ int16_t lsb_l = int_at(16)&0xffff; ++ ++ return (address)( ( ((intptr_t)(msb_h << 16) + (intptr_t)(msb_l)) << 32 ) + ++ ( ((intptr_t)(lsb_h << 16) + (intptr_t)(lsb_l)) ) ); ++ } ++ ++ if ( is_op(int_at(0), Assembler::op_ldi) && is_op(int_at(16), Assembler::op_call)) { ++ /* li48 */ ++ int16_t msb_l = int_at(0)&0xffff; ++ int16_t lsb_h = int_at(8)&0xffff; ++ int16_t lsb_l = int_at(12)&0xffff; ++ ++ // -1 should be 0xffff ffff ffff ffff, so we can not use low 48 bits ++ return (address) (((intptr_t) (msb_l) << 32) + ((intptr_t) (lsb_h) << 16) + (intptr_t) (lsb_l)); ++ } ++ } ++ ++ Unimplemented(); ++} ++ ++typedef void (* atomic_store64_ptr)(long *addr, int offset, long data64); ++ ++static int *buf; ++ ++static atomic_store64_ptr get_atomic_store64_func() { ++ static atomic_store64_ptr p = NULL; ++ if (p != NULL) ++ return p; ++ ++ buf = (int *)mmap(NULL, 64, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, ++ -1, 0); ++ buf[0] = 0x42110110; ++ buf[1] = 0xae500000; ++ buf[2] = 0x0bfa0001; ++ buf[3] = 0x43ff075f; /* nop */ ++ ++ p = (atomic_store64_ptr) buf; ++ return p; ++} ++ ++void NativeCall::set_destination(address dest) { ++ set_address(dest); ++} ++ ++void NativeCall::print() { ++ tty->print_cr(PTR_FORMAT ": call " PTR_FORMAT, ++ instruction_address(), destination()); ++} ++ ++// Inserts a native call instruction at a given pc ++void NativeCall::insert(address code_pos, address entry) { ++ NativeCall *call = nativeCall_at(code_pos); ++ CodeBuffer cb(call->addr_at(0), instruction_size); ++ MacroAssembler masm(&cb); ++#define __ masm. ++ if (SafePatch) { ++ if (__ offset() % 8 == 0) { ++ __ nop(); ++ __ br(T12, 2); ++ __ emit_int64((long) entry); ++ __ ldl(T12, T12, 0); ++ } else { ++ __ br(T12, 2); ++ __ emit_int64((long) entry); ++ __ ldl(T12, T12, 0); ++ __ nop(); ++ } ++ } else { ++ __ li48(T12, (long) entry); ++ } ++ __ call(T12); ++#undef __ ++ ++ ICache::invalidate_range(call->addr_at(0), instruction_size); ++} ++ ++//------------------------------------------------------------------- ++ ++void NativeMovConstReg::verify() { ++ /* li64 or li48 */ ++ int li_64 = 0; ++ int li_48 = 0; ++ ++ if (is_op(Assembler::op_ldih) && ++ is_op(int_at(4), Assembler::op_ldi) && ++ is_op(int_at(8), Assembler::op_slll_l) && ++ is_op(int_at(12), Assembler::op_ldih) && ++ is_op(int_at(16), Assembler::op_ldi)) { ++ li_64 = 1; ++ } ++ ++ if (is_op(Assembler::op_ldi) && ++ is_op(int_at(4), Assembler::op_slll_l) && ++ is_op(int_at(8), Assembler::op_ldih) && ++ is_op(int_at(12), Assembler::op_ldi)) { ++ li_48 = 1; ++ } ++ ++ if (is_op(int_at(0), Assembler::op_ldi) && ++ is_op(int_at(4), Assembler::op_br) && ++ is_op(int_at(16), Assembler::op_ldl)) { ++ li_48 = 1; ++ } ++ if (is_op(int_at(0), Assembler::op_br) && ++ is_op(int_at(12), Assembler::op_ldl) && ++ is_op(int_at(16), Assembler::op_ldi)) { ++ li_48 = 1; ++ } ++ ++ if (!li_64 && !li_48) { ++ fatal("not a mov reg, imm64/imm48"); ++ } ++} ++ ++void NativeMovConstReg::print() { ++ tty->print_cr(PTR_FORMAT ": mov reg, " INTPTR_FORMAT, ++ instruction_address(), data()); ++} ++ ++intptr_t NativeMovConstReg::data() const { ++ if (is_op(int_at(0), Assembler::op_ldi) && ++ is_op(int_at(4), Assembler::op_br) && ++ is_op(int_at(16), Assembler::op_ldl)) { ++ return (intptr_t) long_at(8); ++ } ++ if (is_op(int_at(0), Assembler::op_br) && ++ is_op(int_at(12), Assembler::op_ldl) && ++ is_op(int_at(16), Assembler::op_ldi)) { ++ return (intptr_t) long_at(4); ++ } ++ if (is_op(int_at(0), Assembler::op_ldih) && is_op(long_at(8), Assembler::op_slll_l)) { ++ /* li64 */ ++ int16_t msb_h = int_at(0)&0xffff; ++ int16_t msb_l = int_at(4)&0xffff; ++ int16_t lsb_h = int_at(12)&0xffff; ++ int16_t lsb_l = int_at(16)&0xffff; ++ ++ return ( (((intptr_t) (msb_h << 16) + (intptr_t) (msb_l)) << 32) + ((intptr_t) (lsb_h << 16) + (intptr_t) (lsb_l))); ++ ++ } else if (is_op(int_at(0), Assembler::op_ldi) && is_op(long_at(4), Assembler::op_slll_l)) { ++ /* li48 */ ++ int16_t msb_l = int_at(0)&0xffff; ++ int16_t lsb_h = int_at(8)&0xffff; ++ int16_t lsb_l = int_at(12)&0xffff; ++ ++ // -1 should be 0xffff ffff ffff ffff, so we can not use low 48 bits ++ return ((intptr_t) (msb_l) << 32) + ((intptr_t) (lsb_h) << 16) + (intptr_t) (lsb_l); ++ ++ } else { ++ fatal(" fatal in NativeMovConstReg::data() "); ++ } ++} ++ ++void NativeMovConstReg::set_data(intptr_t x) { ++ // for fix_oop_relocation,sw64 set oop use li48 ++ OrderAccess::fence(); ++ int16_t msb_l, lsb_h, lsb_l; ++ NativeInstruction::imm48_split((long)x, msb_l, lsb_h, lsb_l); ++ /* li48 or li64 */ ++ if (is_op(int_at(0), Assembler::op_ldi) && is_op(int_at(4), Assembler::op_slll_l)) { ++ set_int_at(0, (int_at(0) & 0xffff0000) | (msb_l & 0xffff)); ++ set_int_at(8, (int_at(8) & 0xffff0000) | (lsb_h & 0xffff)); ++ set_int_at(12, (int_at(12) & 0xffff0000) | (lsb_l & 0xffff)); ++ ICache::invalidate_range(addr_at(0), 16); ++ } else if (is_op(int_at(0), Assembler::op_ldih) && is_op(int_at(8), Assembler::op_slll_l)) { ++ Unimplemented(); ++ } else { ++ fatal("not a call "); ++ } ++} ++ ++//------------------------------------------------------------------- ++ ++int NativeMovRegMem::offset() const { ++ Unimplemented(); ++} ++ ++void NativeMovRegMem::set_offset(int x) { ++ Unimplemented(); ++} ++ ++void NativeMovRegMem::verify() { ++ Unimplemented(); ++} ++ ++void NativeMovRegMem::print() { ++ tty->print_cr("0x%x: mov reg, [reg + %x]", instruction_address(), offset()); ++} ++ ++bool NativeInstruction::is_sigill_zombie_not_entrant() { ++ return uint_at(0) == NativeIllegalInstruction::instruction_code; ++} ++ ++void NativeIllegalInstruction::insert(address code_pos) { ++ *(juint*)code_pos = instruction_code; ++ ICache::invalidate_range(code_pos, instruction_size); ++} ++ ++void NativeGeneralJump::verify() { ++ assert(((NativeInstruction *)this)->is_jump() || ++ ((NativeInstruction *)this)->is_cond_jump(), "not a general jump instruction"); ++} ++ ++void NativeGeneralJump::set_jump_destination(address dest) { ++ set_address(dest); ++} ++ ++address NativeGeneralJump::jump_destination() { ++ if (is_short()) { ++ Unimplemented(); ++ } ++ ++ if (SafePatch) { ++ if (is_op(int_at(0), Assembler::op_ldi) && ++ is_op(int_at(4), Assembler::op_br) && ++ is_op(int_at(16), Assembler::op_ldl)) { ++ return (address) long_at(8); ++ } else if (is_op(int_at(0), Assembler::op_br) && ++ is_op(int_at(12), Assembler::op_ldl) && ++ is_op(int_at(16), Assembler::op_ldi)) { ++ return (address) long_at(4); ++ } else { ++ tty->print_cr("\nError!\nNativeGeneralJump destination: 0x%lx", addr_at(0)); ++ Disassembler::decode(addr_at(0) - 10 * 4, addr_at(0) + 10 * 4, tty); ++ fatal(" fatal in NativeGeneralJump::jump_destination() "); ++ ShouldNotReachHere(); ++ } ++ } else { ++ /* li64 */ ++ if (is_op(int_at(0), Assembler::op_ldih) && is_op(long_at(8), Assembler::op_slll_l)) { ++ int16_t msb_h = int_at(0)&0xffff; ++ int16_t msb_l = int_at(4)&0xffff; ++ int16_t lsb_h = int_at(12)&0xffff; ++ int16_t lsb_l = int_at(16)&0xffff; ++ return (address) ((((intptr_t) (msb_h << 16)+(intptr_t) msb_l) << 32)+((intptr_t) (lsb_h << 16)+(intptr_t) lsb_l)); ++ } ++ ++ /* li48 */ ++ if (is_op(int_at(0), Assembler::op_ldi) && is_op(long_at(4), Assembler::op_slll_l)) { ++ int16_t msb_l = int_at(0)&0xffff; ++ int16_t lsb_h = int_at(8)&0xffff; ++ int16_t lsb_l = int_at(12)&0xffff; ++ return (address) (((intptr_t) (msb_l) << 32)+((intptr_t) (lsb_h << 16)+(intptr_t) lsb_l)); ++ } ++ } ++ ++ Unimplemented(); ++} ++ ++/* Must ensure atomicity */ ++void NativeGeneralJump::patch_verified_entry(address entry, address verified_entry, address dest) { ++ // ensure 100% atomicity. ++ // The destination is fixed and can be cached in JavaThread. ++ // ++// guarantee(!os::is_MP() || (((long)verified_entry % BytesPerWord) == 0), "destination must be aligned"); ++// bool is_aligned = !os::is_MP() || (((long)verified_entry % BytesPerWord) == 0); ++// ++// if (is_aligned) { ++// int code_buffer[4]; ++// ++// CodeBuffer cb((address)code_buffer, instruction_size); ++// MacroAssembler masm(&cb); ++//#define __ masm. ++// __ ldl(T12, S2thread, in_bytes(JavaThread::handle_wrong_method_stub_offset())); ++// __ jmp(T12); ++// __ nop(); ++// __ nop(); ++// ++// atomic_store64_ptr func = get_atomic_store64_func(); ++// (*func)((long *)verified_entry, 0, *(long *)&code_buffer[0]); ++// } else { ++ // We use an illegal instruction for marking a method as ++ // not_entrant or zombie ++ NativeIllegalInstruction::insert(verified_entry); ++// } ++ ICache::invalidate_range(verified_entry, instruction_size); ++} ++ ++bool NativeInstruction::is_jump() { ++ if (SafePatch) { ++ if (is_op(int_at(0), Assembler::op_ldi) && ++ is_op(int_at(4), Assembler::op_br) && ++ is_op(int_at(16), Assembler::op_ldl)) { ++ return true; ++ } else if (is_op(int_at(0), Assembler::op_br) && ++ is_op(int_at(12), Assembler::op_ldl) && ++ is_op(int_at(16), Assembler::op_ldi)) { ++ return true; ++ } else { ++ tty->print_cr("\nError!\nNativeGeneralJump destination"); ++ ShouldNotReachHere(); ++ } ++ } else { ++ if (is_op(Assembler::op_ldi) && ++ is_op(int_at(4), Assembler::op_slll_l) && ++ is_op(int_at(8), Assembler::op_ldih) && ++ is_op(int_at(12), Assembler::op_ldi)) ++ return true; ++ } ++ ++ if (is_op(Assembler::op_ldih) && ++ is_op(int_at(4), Assembler::op_ldi) && ++ is_op(int_at(8), Assembler::op_slll_l) && ++ is_op(int_at(12), Assembler::op_ldih) && ++ is_op(int_at(16), Assembler::op_ldi)) ++ return true; ++ ++ // Unimplemented(); ++ return false; ++} ++ ++bool NativeInstruction::is_safepoint_poll() { ++ ++ //refer to relocInfo::poll_return_type in sw64.ad ++ int x = long_at(0); ++ int op = Assembler::sw2_op(x); ++ if (op != Assembler::op_ldw) return false; ++ ++ Register ra = Assembler::sw2_ra(x); ++ if (ra != AT) return false; ++ ++ int mdisp = Assembler::sw2_mdisp(x); ++ if (mdisp != 0) return false; ++ ++ return true; ++} +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/nativeInst_sw64.hpp afu8u/hotspot/src/cpu/sw64/vm/nativeInst_sw64.hpp +--- openjdk/hotspot/src/cpu/sw64/vm/nativeInst_sw64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/nativeInst_sw64.hpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,501 @@ ++/* ++ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef CPU_SW64_VM_NATIVEINST_SW64_HPP ++#define CPU_SW64_VM_NATIVEINST_SW64_HPP ++ ++#include "asm/assembler.hpp" ++#include "memory/allocation.hpp" ++#include "runtime/icache.hpp" ++#include "runtime/os.hpp" ++#include "utilities/top.hpp" ++ ++// We have interfaces for the following instructions: ++// - NativeInstruction ++// - - NativeCall ++// - - NativeMovConstReg ++// - - NativeMovConstRegPatching ++// - - NativeMovRegMem ++// - - NativeMovRegMemPatching ++// - - NativeJump ++// - - NativeIllegalOpCode ++// - - NativeGeneralJump ++// - - NativeReturn ++// - - NativeReturnX (return with argument) ++// - - NativePushConst ++// - - NativeTstRegMem ++ ++// The base class for different kinds of native instruction abstractions. ++// Provides the primitive operations to manipulate code relative to this. ++ ++class NativeInstruction VALUE_OBJ_CLASS_SPEC { ++ friend class Relocation; ++ ++ public: ++ enum sw64_specific_constants { ++ nop_instruction_code = 0, ++ nop_instruction_size = 4 ++ }; ++ static void imm48_split(long imm48, int16_t &msb_l, int16_t &lsb_h, int16_t &lsb_l); ++ ++ bool is_nop() { return is_op(Assembler::op_ldi) && Assembler::is_ra(R0); } ++ bool is_memb() { return is_op(Assembler::op_memb); } ++ inline bool is_call(); ++ inline bool is_illegal(); ++ inline bool is_return(); ++ bool is_jump(); ++ inline bool is_cond_jump(); ++ bool is_safepoint_poll(); ++ ++ //sw64 has no instruction to generate a illegal instrucion exception ++ //we define ours: break 11 ++ static int illegal_instruction(); ++ ++ bool is_int_branch(); ++ bool is_float_branch(); ++ ++ //We use an illegal instruction for marking a method as not_entrant or zombie. ++ bool is_sigill_zombie_not_entrant(); ++ ++ protected: ++ address addr_at(int offset) const { return address(this) + offset; } ++ address instruction_address() const { return addr_at(0); } ++ address next_instruction_address() const { return addr_at(BytesPerInstWord); } ++ address prev_instruction_address() const { return addr_at(-BytesPerInstWord); } ++ ++ s_char sbyte_at(int offset) const { return *(s_char*) addr_at(offset); } ++ u_char ubyte_at(int offset) const { return *(u_char*) addr_at(offset); } ++ ++ jint int_at(int offset) const { return *(jint*) addr_at(offset); } ++ juint uint_at(int offset) const { return *(juint*) addr_at(offset); } ++ ++ intptr_t ptr_at(int offset) const { return *(intptr_t*) addr_at(offset); } ++ ++ oop oop_at (int offset) const { return *(oop*) addr_at(offset); } ++ jlong long_at(int offset) const { return *(jlong*)addr_at(offset); } ++ ++ void set_address(address dest); ++ void set_char_at(int offset, char c) { *addr_at(offset) = (u_char)c; wrote(offset); } ++ void set_int_at(int offset, jint i) { OrderAccess::fence(); *(jint*)addr_at(offset) = i; wrote(offset); } ++ void set_ptr_at (int offset, intptr_t ptr) { *(intptr_t*) addr_at(offset) = ptr; wrote(offset); } ++ void set_oop_at (int offset, oop o) { *(oop*) addr_at(offset) = o; wrote(offset); } ++ void set_long_at(int offset, long i); ++ ++ int insn_word() const { return long_at(0); } ++ static bool is_op (int insn, Assembler::ops_mem op) { return Assembler::sw2_op(insn) == (int)op; } ++ bool is_op (Assembler::ops_mem op) const { return is_op(insn_word(), op); } ++ static bool is_op (int insn, Assembler::ops_opr op) { return Assembler::sw2_arith_op(insn) == (int)op; } ++ bool is_op (Assembler::ops_opr op) const { return is_op(insn_word(), op); } ++ static bool is_op (int insn, Assembler::ops_oprl op) { return Assembler::sw2_arith_op(insn) == (int)op; } ++ bool is_op (Assembler::ops_oprl op) const { return is_op(insn_word(), op); } ++ static bool is_op (int insn, Assembler::ops_extra op) { return Assembler::sw2_mfc_op(insn) == (int)op; } ++ bool is_op (Assembler::ops_extra op) const { return is_op(insn_word(), op); } ++ static bool is_op (int insn, Assembler::ops_bra op) { return Assembler::sw2_op(insn) == (int)op; } ++ bool is_op (Assembler::ops_bra op) const { return is_op(insn_word(), op); } ++ static bool is_op (int insn, Assembler::ops_fp op) { return Assembler::sw2_op(insn) == (int)op; } ++ ++ bool is_rs (int insn, Register rs) const { return Assembler::rs(insn) == (int)rs->encoding(); } ++ bool is_rs (Register rs) const { return is_rs(insn_word(), rs); } ++ bool is_rt (int insn, Register rt) const { return Assembler::rt(insn) == (int)rt->encoding(); } ++ bool is_rt (Register rt) const { return is_rt(insn_word(), rt); } ++ ++ void wrote(int offset); ++ ++ public: ++ ++ // unit test stuff ++ static void test() {} // override for testing ++ ++ inline friend NativeInstruction* nativeInstruction_at(address address); ++}; ++ ++inline NativeInstruction* nativeInstruction_at(address address) { ++ NativeInstruction* inst = (NativeInstruction*)address; ++#ifdef ASSERT ++ //inst->verify(); ++#endif ++ return inst; ++} ++ ++inline NativeCall* nativeCall_at(address address); ++// The NativeCall is an abstraction for accessing/manipulating native call sw64 ++// instructions (used to manipulate inline caches, primitive & dll calls, etc.). ++// a call was done like this: ++// SW64 li64 bits: ++// ldih rd, msb_h(R0); ++// ldi rd, msb_l(rd); ++// slll rd, 32, rd; ++// ldih rd, lsb_h(rd); ++// ldi rd, lsb_l(rd); ++// call ra, rd, (hint) ++// setfpec1 ++// ++ ++// we just consider the above for instruction as one call instruction ++class NativeCall: public NativeInstruction { ++ public: ++ enum sw64_specific_constants { ++ instruction_offset = 0, ++// instruction_size = 5 * BytesPerInstWord, ++// return_address_offset = 5 * BytesPerInstWord, ++// return_address_offset_long = 5 * BytesPerInstWord, //equal to return_address_offset ++ displacement_offset = 0 ++ }; ++ static int instruction_size; //The three member variables can be reassigned in the bytecodes_sw64.cpp when SafePatch is true. ++ static int return_address_offset; ++ static int return_address_offset_long; ++ ++ address instruction_address() const { return addr_at(instruction_offset); } ++ address next_instruction_address() const { return addr_at(return_address_offset); } ++ address return_address() const { return addr_at(return_address_offset); } ++ ++ address destination() const; ++ void set_destination(address dest); ++ void set_destination_mt_safe(address dest) { set_destination(dest);} ++ void set_long_at(int offset, long i); ++ ++ void verify_alignment() { } ++ void verify(); ++ void print(); ++ ++ // Creation ++ inline friend NativeCall* nativeCall_at(address address); ++ inline friend NativeCall* nativeCall_before(address return_address); ++ ++ static bool is_call_at(address instr) { ++ return nativeInstruction_at(instr)->is_call(); ++ } ++ ++ static bool is_call_before(address return_address) { ++ return is_call_at(return_address - NativeCall::return_address_offset); ++ } ++ ++ static bool is_call_to(address instr, address target) { ++ return nativeInstruction_at(instr)->is_call() && ++ nativeCall_at(instr)->destination() == target; ++ } ++ ++ // MT-safe patching of a call instruction. ++ static void insert(address code_pos, address entry); ++}; ++ ++inline NativeCall* nativeCall_at(address address) { ++ NativeCall* call = (NativeCall*)(address - NativeCall::instruction_offset); ++#ifdef ASSERT ++ call->verify(); ++#endif ++ return call; ++} ++ ++inline NativeCall* nativeCall_before(address return_address) { ++ NativeCall* call = (NativeCall*)(return_address - NativeCall::return_address_offset); ++#ifdef ASSERT ++ call->verify(); ++#endif ++ return call; ++} ++ ++// SW64 li48 bits: ++// ldi rd, msb_l(rd); ++// slll rd, 32, rd; ++// ldih rd, lsb_h(rd); ++// ldi rd, lsb_l(rd); ++// call ra, rd, (hint) ++// setfpec1 ++// ++class NativeMovConstReg: public NativeInstruction { ++ public: ++ enum sw64_specific_constants { ++ instruction_offset = 0, ++ instruction_size = 4 * BytesPerInstWord, ++ next_instruction_offset = 4 * BytesPerInstWord, ++ }; ++ ++ int insn_word() const { return long_at(instruction_offset); } ++ address instruction_address() const { return addr_at(0); } ++ address next_instruction_address() const { return addr_at(next_instruction_offset); } ++ intptr_t data() const; ++ void set_data(intptr_t x); ++ ++ void verify(); ++ void print(); ++ ++ // unit test stuff ++ static void test() {} ++ ++ // Creation ++ inline friend NativeMovConstReg* nativeMovConstReg_at(address address); ++ inline friend NativeMovConstReg* nativeMovConstReg_before(address address); ++}; ++ ++inline NativeMovConstReg* nativeMovConstReg_at(address address) { ++ NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_offset); ++#ifdef ASSERT ++ test->verify(); ++#endif ++ return test; ++} ++ ++inline NativeMovConstReg* nativeMovConstReg_before(address address) { ++ NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_size - NativeMovConstReg::instruction_offset); ++#ifdef ASSERT ++ test->verify(); ++#endif ++ return test; ++} ++ ++class NativeMovConstRegPatching: public NativeMovConstReg { ++ private: ++ friend NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address) { ++ NativeMovConstRegPatching* test = (NativeMovConstRegPatching*)(address - instruction_offset); ++ #ifdef ASSERT ++ test->verify(); ++ #endif ++ return test; ++ } ++}; ++ ++ ++class NativeMovRegMem: public NativeInstruction { ++ public: ++ enum sw64_specific_constants { ++ instruction_offset = 0, ++ hiword_offset = 4, ++ ldst_offset = 12, ++ immediate_size = 4, ++ ldst_size = 16 ++ }; ++ ++ //offset is less than 16 bits. ++ bool is_immediate() const { return !is_op(long_at(instruction_offset), Assembler::op_ldih); } ++ bool is_64ldst() const { ++ if (is_immediate()) { ++ return (Assembler::opcode(long_at(hiword_offset)) == Assembler::opcode(long_at(instruction_offset))) && ++ (Assembler::imm_off(long_at(hiword_offset)) == Assembler::imm_off(long_at(instruction_offset)) + wordSize); ++ } else { ++ return (Assembler::opcode(long_at(ldst_offset+hiword_offset)) == Assembler::opcode(long_at(ldst_offset))) && ++ (Assembler::imm_off(long_at(ldst_offset+hiword_offset)) == Assembler::imm_off(long_at(ldst_offset)) + wordSize); ++ } ++ } ++ ++ address instruction_address() const { return addr_at(instruction_offset); } ++ address next_instruction_address() const { ++ return addr_at( (is_immediate()? immediate_size : ldst_size) + (is_64ldst()? 4 : 0)); ++ } ++ ++ int offset() const; ++ ++ void set_offset(int x); ++ ++ void add_offset_in_bytes(int add_offset) { set_offset ( ( offset() + add_offset ) ); } ++ ++ void verify(); ++ void print (); ++ ++ // unit test stuff ++ static void test() {} ++ ++ private: ++ inline friend NativeMovRegMem* nativeMovRegMem_at (address address); ++}; ++ ++inline NativeMovRegMem* nativeMovRegMem_at (address address) { ++ NativeMovRegMem* test = (NativeMovRegMem*)(address - NativeMovRegMem::instruction_offset); ++#ifdef ASSERT ++ test->verify(); ++#endif ++ return test; ++} ++ ++class NativeMovRegMemPatching: public NativeMovRegMem { ++ private: ++ friend NativeMovRegMemPatching* nativeMovRegMemPatching_at (address address) { ++ NativeMovRegMemPatching* test = (NativeMovRegMemPatching*)(address - instruction_offset); ++ #ifdef ASSERT ++ test->verify(); ++ #endif ++ return test; ++ } ++}; ++ ++ ++// SW64 li64 bits: ++// ldih rd, msb_h(R0); ++// ldi rd, msb_l(rd); ++// slll rd, 32, rd; ++// ldih rd, lsb_h(rd); ++// ldi rd, lsb_l(rd); ++// call ra, rd, (hint) ++// setfpec1 ++// ++ ++class NativeGeneralJump: public NativeInstruction { ++public: ++ enum sw64_specific_constants { ++ instruction_offset = 0, ++// instruction_size = 5 * BytesPerInstWord ++ }; ++ static int instruction_size; //This member variable can be reassigned in the bytecodes_sw64.cpp when SafePatch is true. ++ ++ bool is_short() { ++ int x = long_at(0); ++ int op = Assembler::sw2_op(x); ++ if(op >= Assembler::op_beq && op <= Assembler::op_fbge) ++ return true; ++ return false; ++ } ++ address instruction_address() const { return addr_at(instruction_offset); } ++ address jump_destination(); ++ ++ void set_jump_destination(address dest); ++ ++ // Creation ++ inline friend NativeGeneralJump* nativeGeneralJump_at(address address); ++ ++ // Insertion of native general jump instruction ++ static void check_verified_entry_alignment(address entry, address verified_entry){} ++ static void patch_verified_entry(address entry, address verified_entry, address dest); ++ ++ void verify(); ++}; ++ ++inline NativeGeneralJump* nativeGeneralJump_at(address address) { ++ NativeGeneralJump* jump = (NativeGeneralJump*)(address); ++ debug_only(jump->verify();) ++ return jump; ++} ++ ++ ++class NativeIllegalInstruction: public NativeInstruction { ++public: ++ enum sw64_specific_constants { ++ instruction_code = 0x0000DEAD, // Special instruction ++ instruction_size = 4, ++ instruction_offset = 0, ++ next_instruction_offset = 4 ++ }; ++ ++ // Insert illegal opcode as specific address ++ static void insert(address code_pos); ++}; ++ ++// return instruction that does not pop values of the stack for sw64. ++// call RA ++// setfpec1 ++class NativeReturn: public NativeInstruction { ++ public: ++ enum sw64_specific_constants { ++ instruction_size = 8, ++ instruction_offset = 0, ++ next_instruction_offset = 8 ++ }; ++}; ++ ++ ++ ++ ++class NativeCondJump; ++inline NativeCondJump* nativeCondJump_at(address address); ++class NativeCondJump: public NativeInstruction { ++ public: ++ enum sw64_specific_constants { ++ instruction_size = 16, ++ instruction_offset = 12, ++ next_instruction_offset = 20 ++ }; ++ ++ ++ int insn_word() const { return long_at(instruction_offset); } ++ address instruction_address() const { return addr_at(0); } ++ address next_instruction_address() const { return addr_at(next_instruction_offset); } ++ ++ // Creation ++ inline friend NativeCondJump* nativeCondJump_at(address address); ++ ++ address jump_destination() const { ++ return ::nativeCondJump_at(addr_at(12))->jump_destination(); ++ } ++ ++ void set_jump_destination(address dest) { ++ ::nativeCondJump_at(addr_at(12))->set_jump_destination(dest); ++ } ++ ++}; ++ ++inline NativeCondJump* nativeCondJump_at(address address) { ++ NativeCondJump* jump = (NativeCondJump*)(address); ++ return jump; ++} ++ ++ ++ ++inline bool NativeInstruction::is_illegal() { return insn_word() == illegal_instruction(); } ++ ++inline bool NativeInstruction::is_call() { ++ ++ if (is_op(long_at(0), Assembler::op_call)&&is_op(long_at(4), Assembler::op_setfpec1)){ ++ return true; ++ } ++ ++ /* li64 or li48 */ ++ if (is_op(long_at(0), Assembler::op_ldih)) { ++ /* li64 */ ++ return is_op(long_at(0),Assembler::op_ldih) && ++ is_op(int_at(4), Assembler::op_ldi) && ++ is_op(int_at(8), Assembler::op_slll_l) && ++ is_op(int_at(12), Assembler::op_ldih) && ++ is_op(int_at(16), Assembler::op_ldi) && ++ is_op(long_at(24), Assembler::op_call); ++ } else { ++ /* li48 */ ++ if (SafePatch) { ++ return (is_op(int_at(0), Assembler::op_ldi) && ++ is_op(int_at(4), Assembler::op_br) && ++ is_op(int_at(16), Assembler::op_ldl) )|| (is_op(int_at(0), Assembler::op_br) && ++ is_op(int_at(12), Assembler::op_ldl) && ++ is_op(int_at(16), Assembler::op_ldi) ); ++ } else { ++ return is_op(long_at(0),Assembler::op_ldi) && ++ is_op(int_at(4), Assembler::op_slll_l) && ++ is_op(int_at(8), Assembler::op_ldih) && ++ is_op(int_at(12), Assembler::op_ldi) && ++ is_op(long_at(16), Assembler::op_call); ++ } ++ } ++} ++ ++inline bool NativeInstruction::is_return() { ++ return is_op(Assembler::op_jmp) && is_rs(RA); ++} ++ ++inline bool NativeInstruction::is_cond_jump(){ ++ int x = long_at(0); ++ int op = Assembler::sw2_op(x); ++ if(op>=Assembler::op_beq && op<=Assembler::op_fbge) ++ return true; ++ return false; ++} ++ ++#endif // CPU_SW64_VM_NATIVEINST_SW64_HPP ++ +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/register_definitions_sw64.cpp afu8u/hotspot/src/cpu/sw64/vm/register_definitions_sw64.cpp +--- openjdk/hotspot/src/cpu/sw64/vm/register_definitions_sw64.cpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/register_definitions_sw64.cpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,98 @@ ++/* ++ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++#include "asm/assembler.hpp" ++#include "asm/register.hpp" ++#include "register_sw64.hpp" ++#include "interp_masm_sw64.hpp" ++ ++ ++REGISTER_DEFINITION(Register, noreg); ++REGISTER_DEFINITION(Register, i0); ++REGISTER_DEFINITION(Register, i1); ++REGISTER_DEFINITION(Register, i2); ++REGISTER_DEFINITION(Register, i3); ++REGISTER_DEFINITION(Register, i4); ++REGISTER_DEFINITION(Register, i5); ++REGISTER_DEFINITION(Register, i6); ++REGISTER_DEFINITION(Register, i7); ++REGISTER_DEFINITION(Register, i8); ++REGISTER_DEFINITION(Register, i9); ++REGISTER_DEFINITION(Register, i10); ++REGISTER_DEFINITION(Register, i11); ++REGISTER_DEFINITION(Register, i12); ++REGISTER_DEFINITION(Register, i13); ++REGISTER_DEFINITION(Register, i14); ++REGISTER_DEFINITION(Register, i15); ++REGISTER_DEFINITION(Register, i16); ++REGISTER_DEFINITION(Register, i17); ++REGISTER_DEFINITION(Register, i18); ++REGISTER_DEFINITION(Register, i19); ++REGISTER_DEFINITION(Register, i20); ++REGISTER_DEFINITION(Register, i21); ++REGISTER_DEFINITION(Register, i22); ++REGISTER_DEFINITION(Register, i23); ++REGISTER_DEFINITION(Register, i24); ++REGISTER_DEFINITION(Register, i25); ++REGISTER_DEFINITION(Register, i26); ++REGISTER_DEFINITION(Register, i27); ++REGISTER_DEFINITION(Register, i28); ++REGISTER_DEFINITION(Register, i29); ++REGISTER_DEFINITION(Register, i30); ++REGISTER_DEFINITION(Register, i31); ++ ++REGISTER_DEFINITION(FloatRegister, fnoreg); ++REGISTER_DEFINITION(FloatRegister, f0); ++REGISTER_DEFINITION(FloatRegister, f1); ++REGISTER_DEFINITION(FloatRegister, f2); ++REGISTER_DEFINITION(FloatRegister, f3); ++REGISTER_DEFINITION(FloatRegister, f4); ++REGISTER_DEFINITION(FloatRegister, f5); ++REGISTER_DEFINITION(FloatRegister, f6); ++REGISTER_DEFINITION(FloatRegister, f7); ++REGISTER_DEFINITION(FloatRegister, f8); ++REGISTER_DEFINITION(FloatRegister, f9); ++REGISTER_DEFINITION(FloatRegister, f10); ++REGISTER_DEFINITION(FloatRegister, f11); ++REGISTER_DEFINITION(FloatRegister, f12); ++REGISTER_DEFINITION(FloatRegister, f13); ++REGISTER_DEFINITION(FloatRegister, f14); ++REGISTER_DEFINITION(FloatRegister, f15); ++REGISTER_DEFINITION(FloatRegister, f16); ++REGISTER_DEFINITION(FloatRegister, f17); ++REGISTER_DEFINITION(FloatRegister, f18); ++REGISTER_DEFINITION(FloatRegister, f19); ++REGISTER_DEFINITION(FloatRegister, f20); ++REGISTER_DEFINITION(FloatRegister, f21); ++REGISTER_DEFINITION(FloatRegister, f22); ++REGISTER_DEFINITION(FloatRegister, f23); ++REGISTER_DEFINITION(FloatRegister, f24); ++REGISTER_DEFINITION(FloatRegister, f25); ++REGISTER_DEFINITION(FloatRegister, f26); ++REGISTER_DEFINITION(FloatRegister, f27); ++REGISTER_DEFINITION(FloatRegister, f28); ++REGISTER_DEFINITION(FloatRegister, f29); ++REGISTER_DEFINITION(FloatRegister, f30); ++REGISTER_DEFINITION(FloatRegister, f31); +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/registerMap_sw64.hpp afu8u/hotspot/src/cpu/sw64/vm/registerMap_sw64.hpp +--- openjdk/hotspot/src/cpu/sw64/vm/registerMap_sw64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/registerMap_sw64.hpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,47 @@ ++/* ++ * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef CPU_SW64_VM_REGISTERMAP_SW64_HPP ++#define CPU_SW64_VM_REGISTERMAP_SW64_HPP ++ ++// machine-dependent implemention for register maps ++ friend class frame; ++ ++ private: ++#ifndef CORE ++ // This is the hook for finding a register in an "well-known" location, ++ // such as a register block of a predetermined format. ++ // Since there is none, we just return NULL. ++ // See registerMap_sparc.hpp for an example of grabbing registers ++ // from register save areas of a standard layout. ++ address pd_location(VMReg reg) const {return NULL;} ++#endif ++ ++ // no PD state to clear or copy: ++ void pd_clear() {} ++ void pd_initialize() {} ++ void pd_initialize_from(const RegisterMap* map) {} ++ ++#endif // CPU_SW64_VM_REGISTERMAP_SW64_HPP ++ +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/register_sw64.cpp afu8u/hotspot/src/cpu/sw64/vm/register_sw64.cpp +--- openjdk/hotspot/src/cpu/sw64/vm/register_sw64.cpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/register_sw64.cpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,52 @@ ++/* ++ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++#include "register_sw64.hpp" ++ ++const int ConcreteRegisterImpl::max_gpr = RegisterImpl::number_of_registers << 1; ++const int ConcreteRegisterImpl::max_fpr = ConcreteRegisterImpl::max_gpr + ++ 2 * FloatRegisterImpl::number_of_registers; ++ ++ ++const char* RegisterImpl::name() const { ++ const char* names[number_of_registers] = { ++ "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6", ++ "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp", ++ "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9", ++ "t10", "t11", "ra", "t12", "at", "gp", "sp", "zero" ++ }; ++ return is_valid() ? names[encoding()] : "noreg"; ++} ++ ++const char* FloatRegisterImpl::name() const { ++ const char* names[number_of_registers] = { ++ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", ++ "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", ++ "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", ++ "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31" ++ }; ++ return is_valid() ? names[encoding()] : "fnoreg"; ++} ++ +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/register_sw64.hpp afu8u/hotspot/src/cpu/sw64/vm/register_sw64.hpp +--- openjdk/hotspot/src/cpu/sw64/vm/register_sw64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/register_sw64.hpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,332 @@ ++/* ++ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef CPU_SW64_VM_REGISTER_SW64_HPP ++#define CPU_SW64_VM_REGISTER_SW64_HPP ++ ++#include "asm/register.hpp" ++#include "vm_version_sw64.hpp" ++ ++class VMRegImpl; ++typedef VMRegImpl* VMReg; ++ ++// Use Register as shortcut ++class RegisterImpl; ++typedef RegisterImpl* Register; ++ ++ ++// The implementation of integer registers for the SW64 architecture ++inline Register as_Register(int encoding) { ++ return (Register)(intptr_t) encoding; ++} ++ ++class RegisterImpl: public AbstractRegisterImpl { ++ public: ++ enum { ++ number_of_registers = 32 ++ }; ++ ++ // derived registers, offsets, and addresses ++ Register successor() const { return as_Register(encoding() + 1); } ++ ++ // construction ++ inline friend Register as_Register(int encoding); ++ ++ VMReg as_VMReg(); ++ ++ // accessors ++ int encoding() const { assert(is_valid(),err_msg( "invalid register (%d)", (int)(intptr_t)this)); return (intptr_t)this; } ++ bool is_valid() const { return 0 <= (intptr_t)this && (intptr_t)this < number_of_registers; } ++ const char* name() const; ++}; ++ ++ ++// The integer registers of the SW64 architecture ++CONSTANT_REGISTER_DECLARATION(Register, noreg, (-1)); ++ ++ ++CONSTANT_REGISTER_DECLARATION(Register, i0, (0)); ++CONSTANT_REGISTER_DECLARATION(Register, i1, (1)); ++CONSTANT_REGISTER_DECLARATION(Register, i2, (2)); ++CONSTANT_REGISTER_DECLARATION(Register, i3, (3)); ++CONSTANT_REGISTER_DECLARATION(Register, i4, (4)); ++CONSTANT_REGISTER_DECLARATION(Register, i5, (5)); ++CONSTANT_REGISTER_DECLARATION(Register, i6, (6)); ++CONSTANT_REGISTER_DECLARATION(Register, i7, (7)); ++CONSTANT_REGISTER_DECLARATION(Register, i8, (8)); ++CONSTANT_REGISTER_DECLARATION(Register, i9, (9)); ++CONSTANT_REGISTER_DECLARATION(Register, i10, (10)); ++CONSTANT_REGISTER_DECLARATION(Register, i11, (11)); ++CONSTANT_REGISTER_DECLARATION(Register, i12, (12)); ++CONSTANT_REGISTER_DECLARATION(Register, i13, (13)); ++CONSTANT_REGISTER_DECLARATION(Register, i14, (14)); ++CONSTANT_REGISTER_DECLARATION(Register, i15, (15)); ++CONSTANT_REGISTER_DECLARATION(Register, i16, (16)); ++CONSTANT_REGISTER_DECLARATION(Register, i17, (17)); ++CONSTANT_REGISTER_DECLARATION(Register, i18, (18)); ++CONSTANT_REGISTER_DECLARATION(Register, i19, (19)); ++CONSTANT_REGISTER_DECLARATION(Register, i20, (20)); ++CONSTANT_REGISTER_DECLARATION(Register, i21, (21)); ++CONSTANT_REGISTER_DECLARATION(Register, i22, (22)); ++CONSTANT_REGISTER_DECLARATION(Register, i23, (23)); ++CONSTANT_REGISTER_DECLARATION(Register, i24, (24)); ++CONSTANT_REGISTER_DECLARATION(Register, i25, (25)); ++CONSTANT_REGISTER_DECLARATION(Register, i26, (26)); ++CONSTANT_REGISTER_DECLARATION(Register, i27, (27)); ++CONSTANT_REGISTER_DECLARATION(Register, i28, (28)); ++CONSTANT_REGISTER_DECLARATION(Register, i29, (29)); ++CONSTANT_REGISTER_DECLARATION(Register, i30, (30)); ++CONSTANT_REGISTER_DECLARATION(Register, i31, (31)); ++ ++#ifndef DONT_USE_REGISTER_DEFINES ++#define NOREG ((Register)(noreg_RegisterEnumValue)) ++ ++#define I0 ((Register)(i0_RegisterEnumValue)) ++#define I1 ((Register)(i1_RegisterEnumValue)) ++#define I2 ((Register)(i2_RegisterEnumValue)) ++#define I3 ((Register)(i3_RegisterEnumValue)) ++#define I4 ((Register)(i4_RegisterEnumValue)) ++#define I5 ((Register)(i5_RegisterEnumValue)) ++#define I6 ((Register)(i6_RegisterEnumValue)) ++#define I7 ((Register)(i7_RegisterEnumValue)) ++#define I8 ((Register)(i8_RegisterEnumValue)) ++#define I9 ((Register)(i9_RegisterEnumValue)) ++#define I10 ((Register)(i10_RegisterEnumValue)) ++#define I11 ((Register)(i11_RegisterEnumValue)) ++#define I12 ((Register)(i12_RegisterEnumValue)) ++#define I13 ((Register)(i13_RegisterEnumValue)) ++#define I14 ((Register)(i14_RegisterEnumValue)) ++#define I15 ((Register)(i15_RegisterEnumValue)) ++#define I16 ((Register)(i16_RegisterEnumValue)) ++#define I17 ((Register)(i17_RegisterEnumValue)) ++#define I18 ((Register)(i18_RegisterEnumValue)) ++#define I19 ((Register)(i19_RegisterEnumValue)) ++#define I20 ((Register)(i20_RegisterEnumValue)) ++#define I21 ((Register)(i21_RegisterEnumValue)) ++#define I22 ((Register)(i22_RegisterEnumValue)) ++#define I23 ((Register)(i23_RegisterEnumValue)) ++#define I24 ((Register)(i24_RegisterEnumValue)) ++#define I25 ((Register)(i25_RegisterEnumValue)) ++#define I26 ((Register)(i26_RegisterEnumValue)) ++#define I27 ((Register)(i27_RegisterEnumValue)) ++#define I28 ((Register)(i28_RegisterEnumValue)) ++#define I29 ((Register)(i29_RegisterEnumValue)) ++#define I30 ((Register)(i30_RegisterEnumValue)) ++#define I31 ((Register)(i31_RegisterEnumValue)) ++ ++ ++#define V0 ((Register)(i0_RegisterEnumValue)) ++#define T0 ((Register)(i1_RegisterEnumValue)) ++#define T1 ((Register)(i2_RegisterEnumValue)) ++#define T2 ((Register)(i3_RegisterEnumValue)) ++#define T3 ((Register)(i4_RegisterEnumValue)) ++#define T4 ((Register)(i5_RegisterEnumValue)) ++#define T5 ((Register)(i6_RegisterEnumValue)) ++#define T6 ((Register)(i7_RegisterEnumValue)) ++#define T7 ((Register)(i8_RegisterEnumValue)) ++#define S0 ((Register)(i9_RegisterEnumValue)) ++#define S1 ((Register)(i10_RegisterEnumValue)) ++#define S2 ((Register)(i11_RegisterEnumValue)) ++#define S3 ((Register)(i12_RegisterEnumValue)) ++#define S4 ((Register)(i13_RegisterEnumValue)) ++#define S5 ((Register)(i14_RegisterEnumValue)) ++#define FP ((Register)(i15_RegisterEnumValue)) ++#define A0 ((Register)(i16_RegisterEnumValue)) ++#define A1 ((Register)(i17_RegisterEnumValue)) ++#define A2 ((Register)(i18_RegisterEnumValue)) ++#define A3 ((Register)(i19_RegisterEnumValue)) ++#define A4 ((Register)(i20_RegisterEnumValue)) ++#define A5 ((Register)(i21_RegisterEnumValue)) ++#define T8 ((Register)(i22_RegisterEnumValue)) ++#define T9 ((Register)(i23_RegisterEnumValue)) ++#define T10 ((Register)(i24_RegisterEnumValue)) ++#define T11 ((Register)(i25_RegisterEnumValue)) ++#define RA ((Register)(i26_RegisterEnumValue)) ++#define T12 ((Register)(i27_RegisterEnumValue)) ++#define AT ((Register)(i28_RegisterEnumValue)) ++#define GP ((Register)(i29_RegisterEnumValue)) ++#define SP ((Register)(i30_RegisterEnumValue)) ++#define R0 ((Register)(i31_RegisterEnumValue)) ++ ++ ++#define c_rarg0 T0 ++#define c_rarg1 T1 ++#define Rmethod S3 ++#define Rsender S4 ++#define Rnext T8 ++ ++#define RT0 T0 ++#define RT1 T1 ++#define RT2 T2 ++#define RT3 T3 ++#define RT4 T11 ++#define RT5 T12 ++ ++//for interpreter frame ++// bytecode pointer register ++#define BCP S0 ++// local variable pointer register ++#define LVP S1 ++ ++#define OPT_SAFEPOINT 1 ++#define S2thread S2 ++#define S5_heapbase S5 ++#define mh_SP_save SP ++ ++#define FSR V0 ++#define SSR T4 ++#define FSF F0 ++#define SSF F1 ++#define FTF F14 ++#define STF F15 ++ ++#define AFT F30 ++#define FcmpRES F29 ++ ++#define IC_Klass T1 ++ ++#endif // DONT_USE_REGISTER_DEFINES ++ ++// Use FloatRegister as shortcut ++class FloatRegisterImpl; ++typedef FloatRegisterImpl* FloatRegister; ++ ++inline FloatRegister as_FloatRegister(int encoding) { ++ return (FloatRegister)(intptr_t) encoding; ++} ++ ++// The implementation of floating point registers for the SW64 architecture ++class FloatRegisterImpl: public AbstractRegisterImpl { ++ public: ++ enum { ++ float_arg_base = 16, ++ number_of_registers = 32 ++ }; ++ ++ // construction ++ inline friend FloatRegister as_FloatRegister(int encoding); ++ ++ VMReg as_VMReg(); ++ ++ // derived registers, offsets, and addresses ++ FloatRegister successor() const { return as_FloatRegister(encoding() + 1); } ++ ++ // accessors ++ int encoding() const { assert(is_valid(), "invalid register"); return (intptr_t)this; } ++ bool is_valid() const { return 0 <= (intptr_t)this && (intptr_t)this < number_of_registers; } ++ const char* name() const; ++ ++}; ++ ++CONSTANT_REGISTER_DECLARATION(FloatRegister, fnoreg , (-1)); ++ ++CONSTANT_REGISTER_DECLARATION(FloatRegister, f0 , ( 0)); ++CONSTANT_REGISTER_DECLARATION(FloatRegister, f1 , ( 1)); ++CONSTANT_REGISTER_DECLARATION(FloatRegister, f2 , ( 2)); ++CONSTANT_REGISTER_DECLARATION(FloatRegister, f3 , ( 3)); ++CONSTANT_REGISTER_DECLARATION(FloatRegister, f4 , ( 4)); ++CONSTANT_REGISTER_DECLARATION(FloatRegister, f5 , ( 5)); ++CONSTANT_REGISTER_DECLARATION(FloatRegister, f6 , ( 6)); ++CONSTANT_REGISTER_DECLARATION(FloatRegister, f7 , ( 7)); ++CONSTANT_REGISTER_DECLARATION(FloatRegister, f8 , ( 8)); ++CONSTANT_REGISTER_DECLARATION(FloatRegister, f9 , ( 9)); ++CONSTANT_REGISTER_DECLARATION(FloatRegister, f10 , (10)); ++CONSTANT_REGISTER_DECLARATION(FloatRegister, f11 , (11)); ++CONSTANT_REGISTER_DECLARATION(FloatRegister, f12 , (12)); ++CONSTANT_REGISTER_DECLARATION(FloatRegister, f13 , (13)); ++CONSTANT_REGISTER_DECLARATION(FloatRegister, f14 , (14)); ++CONSTANT_REGISTER_DECLARATION(FloatRegister, f15 , (15)); ++CONSTANT_REGISTER_DECLARATION(FloatRegister, f16 , (16)); ++CONSTANT_REGISTER_DECLARATION(FloatRegister, f17 , (17)); ++CONSTANT_REGISTER_DECLARATION(FloatRegister, f18 , (18)); ++CONSTANT_REGISTER_DECLARATION(FloatRegister, f19 , (19)); ++CONSTANT_REGISTER_DECLARATION(FloatRegister, f20 , (20)); ++CONSTANT_REGISTER_DECLARATION(FloatRegister, f21 , (21)); ++CONSTANT_REGISTER_DECLARATION(FloatRegister, f22 , (22)); ++CONSTANT_REGISTER_DECLARATION(FloatRegister, f23 , (23)); ++CONSTANT_REGISTER_DECLARATION(FloatRegister, f24 , (24)); ++CONSTANT_REGISTER_DECLARATION(FloatRegister, f25 , (25)); ++CONSTANT_REGISTER_DECLARATION(FloatRegister, f26 , (26)); ++CONSTANT_REGISTER_DECLARATION(FloatRegister, f27 , (27)); ++CONSTANT_REGISTER_DECLARATION(FloatRegister, f28 , (28)); ++CONSTANT_REGISTER_DECLARATION(FloatRegister, f29 , (29)); ++CONSTANT_REGISTER_DECLARATION(FloatRegister, f30 , (30)); ++CONSTANT_REGISTER_DECLARATION(FloatRegister, f31 , (31)); ++ ++#ifndef DONT_USE_REGISTER_DEFINES ++#define FNOREG ((FloatRegister)(fnoreg_FloatRegisterEnumValue)) ++#define F0 ((FloatRegister)( f0_FloatRegisterEnumValue)) ++#define F1 ((FloatRegister)( f1_FloatRegisterEnumValue)) ++#define F2 ((FloatRegister)( f2_FloatRegisterEnumValue)) ++#define F3 ((FloatRegister)( f3_FloatRegisterEnumValue)) ++#define F4 ((FloatRegister)( f4_FloatRegisterEnumValue)) ++#define F5 ((FloatRegister)( f5_FloatRegisterEnumValue)) ++#define F6 ((FloatRegister)( f6_FloatRegisterEnumValue)) ++#define F7 ((FloatRegister)( f7_FloatRegisterEnumValue)) ++#define F8 ((FloatRegister)( f8_FloatRegisterEnumValue)) ++#define F9 ((FloatRegister)( f9_FloatRegisterEnumValue)) ++#define F10 ((FloatRegister)( f10_FloatRegisterEnumValue)) ++#define F11 ((FloatRegister)( f11_FloatRegisterEnumValue)) ++#define F12 ((FloatRegister)( f12_FloatRegisterEnumValue)) ++#define F13 ((FloatRegister)( f13_FloatRegisterEnumValue)) ++#define F14 ((FloatRegister)( f14_FloatRegisterEnumValue)) ++#define F15 ((FloatRegister)( f15_FloatRegisterEnumValue)) ++#define F16 ((FloatRegister)( f16_FloatRegisterEnumValue)) ++#define F17 ((FloatRegister)( f17_FloatRegisterEnumValue)) ++#define F18 ((FloatRegister)( f18_FloatRegisterEnumValue)) ++#define F19 ((FloatRegister)( f19_FloatRegisterEnumValue)) ++#define F20 ((FloatRegister)( f20_FloatRegisterEnumValue)) ++#define F21 ((FloatRegister)( f21_FloatRegisterEnumValue)) ++#define F22 ((FloatRegister)( f22_FloatRegisterEnumValue)) ++#define F23 ((FloatRegister)( f23_FloatRegisterEnumValue)) ++#define F24 ((FloatRegister)( f24_FloatRegisterEnumValue)) ++#define F25 ((FloatRegister)( f25_FloatRegisterEnumValue)) ++#define F26 ((FloatRegister)( f26_FloatRegisterEnumValue)) ++#define F27 ((FloatRegister)( f27_FloatRegisterEnumValue)) ++#define F28 ((FloatRegister)( f28_FloatRegisterEnumValue)) ++#define F29 ((FloatRegister)( f29_FloatRegisterEnumValue)) ++#define F30 ((FloatRegister)( f30_FloatRegisterEnumValue)) ++#define F31 ((FloatRegister)( f31_FloatRegisterEnumValue)) ++#endif // DONT_USE_REGISTER_DEFINES ++ ++ ++// Need to know the total number of registers of all sorts for SharedInfo. ++// Define a class that exports it. ++class ConcreteRegisterImpl : public AbstractRegisterImpl { ++ public: ++ enum { ++ // A big enough number for C2: all the registers plus flags ++ // This number must be large enough to cover REG_COUNT (defined by c2) registers. ++ // There is no requirement that any ordering here matches any ordering c2 gives ++ // it's optoregs. ++ number_of_registers = (RegisterImpl::number_of_registers + FloatRegisterImpl::number_of_registers) ++ LP64_ONLY( * 2) ++ }; ++ ++ static const int max_gpr; ++ static const int max_fpr; ++ ++ ++}; ++#endif //CPU_SW64_VM_REGISTER_SW64_HPP +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/relocInfo_sw64.cpp afu8u/hotspot/src/cpu/sw64/vm/relocInfo_sw64.cpp +--- openjdk/hotspot/src/cpu/sw64/vm/relocInfo_sw64.cpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/relocInfo_sw64.cpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,126 @@ ++/* ++ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++#include "asm/macroAssembler.hpp" ++#include "code/relocInfo.hpp" ++#include "nativeInst_sw64.hpp" ++#include "oops/oop.inline.hpp" ++#include "runtime/safepoint.hpp" ++ ++ ++void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) { ++ x += o; ++ typedef Assembler::WhichOperand WhichOperand; ++ WhichOperand which = (WhichOperand) format(); // that is, disp32 or imm, call32, narrow oop ++ assert(which == Assembler::disp32_operand || ++ which == Assembler::narrow_oop_operand || ++ which == Assembler::imm_operand, "format unpacks ok"); ++ if (which == Assembler::imm_operand) { ++ if (verify_only) { ++ assert(nativeMovConstReg_at(addr())->data() == (long)x, "instructions must match"); ++ } else { ++ nativeMovConstReg_at(addr())->set_data((intptr_t)(x)); ++ } ++ } else if (which == Assembler::narrow_oop_operand) { ++ // both compressed oops and compressed classes look the same ++ if (Universe::heap()->is_in_reserved((oop)x)) { ++ if (verify_only) { ++ assert(nativeMovConstReg_at(addr())->data() == (long)oopDesc::encode_heap_oop((oop)x), "instructions must match"); ++ } else { ++ nativeMovConstReg_at(addr())->set_data((intptr_t)(oopDesc::encode_heap_oop((oop)x))); ++ } ++ } else { ++ if (verify_only) { ++ assert(nativeMovConstReg_at(addr())->data() == (long)Klass::encode_klass((Klass*)x), "instructions must match"); ++ } else { ++ nativeMovConstReg_at(addr())->set_data((intptr_t)(Klass::encode_klass((Klass*)x))); ++ } ++ } ++ } else { ++ // Note: Use runtime_call_type relocations for call32_operand. ++ assert(0, "call32_operand not supported in SW64"); ++ } ++} ++ ++ ++//NOTICE HERE, this relocate is not need for SW64, since SW64 USE abosolutly target, ++//Maybe We should FORGET CALL RELOCATION ++address Relocation::pd_call_destination(address orig_addr) { ++ intptr_t adj = 0; ++ NativeInstruction* ni = nativeInstruction_at(addr()); ++ if (ni->is_call()) { ++ return nativeCall_at(addr())->destination() + adj; ++ } else if (ni->is_jump()) { ++ return nativeGeneralJump_at(addr())->jump_destination() + adj; ++ } else if (ni->is_cond_jump()) { ++ return nativeCondJump_at(addr())->jump_destination() +adj; ++ } else { ++ tty->print_cr("\nError!\ncall destination: 0x%lx", addr()); ++ Disassembler::decode(addr() - 10 * 4, addr() + 10 * 4, tty); ++ ShouldNotReachHere(); ++ return NULL; ++ } ++} ++ ++ ++void Relocation::pd_set_call_destination(address x) { ++ NativeInstruction* ni = nativeInstruction_at(addr()); ++ if (ni->is_call()) { ++ nativeCall_at(addr())->set_destination(x); ++ } else if (ni->is_jump()) ++ nativeGeneralJump_at(addr())->set_jump_destination(x); ++ else if (ni->is_cond_jump()) ++ nativeCondJump_at(addr())->set_jump_destination(x); ++ else ++ { ShouldNotReachHere(); } ++ ++ // Unresolved jumps are recognized by a destination of -1 ++ // However 64bit can't actually produce such an address ++ // and encodes a jump to self but jump_destination will ++ // return a -1 as the signal. We must not relocate this ++ // jmp or the ic code will not see it as unresolved. ++} ++ ++ ++address* Relocation::pd_address_in_code() { ++ return (address*)addr(); ++} ++ ++ ++address Relocation::pd_get_address_from_code() { ++ NativeMovConstReg* ni = nativeMovConstReg_at(addr()); ++ return (address)ni->data(); ++} ++ ++ ++ ++void poll_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) { ++} ++ ++void poll_return_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) { ++} ++ ++void metadata_Relocation::pd_fix_value(address x) { ++} +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/relocInfo_sw64.hpp afu8u/hotspot/src/cpu/sw64/vm/relocInfo_sw64.hpp +--- openjdk/hotspot/src/cpu/sw64/vm/relocInfo_sw64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/relocInfo_sw64.hpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,39 @@ ++/* ++ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef CPU_SW64_VM_RELOCINFO_SW64_HPP ++#define CPU_SW64_VM_RELOCINFO_SW64_HPP ++ ++ // machine-dependent parts of class relocInfo ++ private: ++ enum { ++ // Since SW64 instructions are whole words, ++ // the two low-order offset bits can always be discarded. ++ offset_unit = 4, ++ ++ // imm_oop_operand vs. narrow_oop_operand ++ format_width = 2 ++ }; ++ ++#endif // CPU_SW64_VM_RELOCINFO_SW64_HPP +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/runtime_sw64.cpp afu8u/hotspot/src/cpu/sw64/vm/runtime_sw64.cpp +--- openjdk/hotspot/src/cpu/sw64/vm/runtime_sw64.cpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/runtime_sw64.cpp 2025-05-06 10:53:44.907633666 +0800 +@@ -0,0 +1,189 @@ ++/* ++ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++#ifdef COMPILER2 ++#include "asm/macroAssembler.hpp" ++#include "asm/macroAssembler.inline.hpp" ++#include "classfile/systemDictionary.hpp" ++#include "code/vmreg.hpp" ++#include "interpreter/interpreter.hpp" ++#include "opto/runtime.hpp" ++#include "runtime/interfaceSupport.hpp" ++#include "runtime/sharedRuntime.hpp" ++#include "runtime/stubRoutines.hpp" ++#include "runtime/vframeArray.hpp" ++#include "utilities/globalDefinitions.hpp" ++#include "vmreg_sw64.inline.hpp" ++#endif ++ ++#define __ masm-> ++ ++//-------------- generate_exception_blob ----------- ++// creates _exception_blob. ++// The exception blob is jumped to from a compiled method. ++// (see emit_exception_handler in sparc.ad file) ++// ++// Given an exception pc at a call we call into the runtime for the ++// handler in this method. This handler might merely restore state ++// (i.e. callee save registers) unwind the frame and jump to the ++// exception handler for the nmethod if there is no Java level handler ++// for the nmethod. ++// ++// This code is entered with a jump, and left with a jump. ++// ++// Arguments: ++// V0: exception oop ++// T4: exception pc ++// ++// Results: ++// A0: exception oop ++// A1: exception pc in caller or ??? ++// jumps to: exception handler of caller ++// ++// Note: the exception pc MUST be at a call (precise debug information) ++// ++// [stubGenerator_sw64.cpp] generate_forward_exception() ++// |- V0, T4 are created ++// |- T12 <= SharedRuntime::exception_handler_for_return_address ++// `- jr T12 ++// `- the caller's exception_handler ++// `- jr OptoRuntime::exception_blob ++// `- here ++// ++void OptoRuntime::generate_exception_blob() { ++ // Capture info about frame layout ++ enum layout { ++ fp_off, ++ return_off, // slot for return address ++ framesize ++ }; ++ ++ // allocate space for the code ++ ResourceMark rm; ++ // setup code generation tools ++ CodeBuffer buffer("exception_blob", 5120, 5120); ++ MacroAssembler* masm = new MacroAssembler(&buffer); ++ ++ ++ address start = __ pc(); ++ ++ __ add_simm16(SP, SP, -1 * framesize * wordSize); // Prolog! ++ ++ // this frame will be treated as the original caller method. ++ // So, the return pc should be filled with the original exception pc. ++ // ref: X86's implementation ++ __ stl(T4, SP, return_off *wordSize); // return address ++ __ stl(FP, SP, fp_off *wordSize); ++ ++ // Save callee saved registers. None for UseSSE=0, ++ // floats-only for UseSSE=1, and doubles for UseSSE=2. ++ ++ __ add_simm16(FP, SP, fp_off * wordSize); ++ ++ // Store exception in Thread object. We cannot pass any arguments to the ++ // handle_exception call, since we do not want to make any assumption ++ // about the size of the frame where the exception happened in. ++ Register thread = S2thread; ++ ++ __ stl(V0, Address(thread, JavaThread::exception_oop_offset())); ++ __ stl(T4, Address(thread, JavaThread::exception_pc_offset())); ++ ++ // This call does all the hard work. It checks if an exception handler ++ // exists in the method. ++ // If so, it returns the handler address. ++ // If not, it prepares for stack-unwinding, restoring the callee-save ++ // registers of the frame being removed. ++ __ set_last_Java_frame(thread, NOREG, NOREG, NULL); ++ ++ __ move(AT, -(StackAlignmentInBytes)); ++ __ and_reg(SP, SP, AT); // Fix stack alignment as required by ABI ++ ++#ifdef ZHJ20180909 ++ __ relocate(relocInfo::internal_pc_type); ++ { ++ long save_pc = (long)__ pc() + 24 + NativeCall::return_address_offset; ++ __ patchable_set48(AT, save_pc); ++ } ++#else ++ { ++ if(UseAddpi){ ++ intptr_t patch_off = 2 + NativeCall::return_address_offset/BytesPerInstWord; ++ __ addpi(patch_off, AT); ++ }else { ++ intptr_t patch_off = 3 * BytesPerInstWord + NativeCall::return_address_offset; ++ __ br(AT, 0); ++ __ addl(AT, AT, patch_off); ++ } ++ } ++#endif ++ __ stl(AT, thread, in_bytes(JavaThread::last_Java_pc_offset())); ++ ++ __ move(A0, thread); ++ __ patchable_call_setfpec1((address)OptoRuntime::handle_exception_C); ++ ++ // Set an oopmap for the call site ++ OopMapSet *oop_maps = new OopMapSet(); ++ OopMap* map = new OopMap( framesize, 0 ); ++ ++ oop_maps->add_gc_map( __ offset() - 4, map); ++ ++ __ reset_last_Java_frame(thread, true); ++ ++ // Pop self-frame. ++ __ leave(); // Epilog! ++ ++ // V0: exception handler ++ ++ // We have a handler in V0, (could be deopt blob) ++ __ move(T12, V0); ++ ++ // Get the exception ++ __ ldl(A0, Address(thread, JavaThread::exception_oop_offset())); ++ // Get the exception pc in case we are deoptimized ++ __ ldl(A1, Address(thread, JavaThread::exception_pc_offset())); ++#ifdef ASSERT ++ __ stl(R0, Address(thread, JavaThread::exception_handler_pc_offset())); ++ __ stl(R0, Address(thread, JavaThread::exception_pc_offset())); ++#endif ++ // Clear the exception oop so GC no longer processes it as a root. ++ __ stl(R0, Address(thread, JavaThread::exception_oop_offset())); ++ ++ // Fix seg fault when running: ++ // Eclipse + Plugin + Debug As ++ // This is the only condition where C2 calls SharedRuntime::generate_deopt_blob() ++ // ++ __ move(V0, A0); ++ __ move(T4, A1); ++ ++ // V0: exception oop ++ // T12: exception handler ++ // A1: exception pc ++ __ jmp(T12); ++ ++ // make sure all code is generated ++ masm->flush(); ++ ++ _exception_blob = ExceptionBlob::create(&buffer, oop_maps, framesize); ++} +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/sharedRuntime_sw64.cpp afu8u/hotspot/src/cpu/sw64/vm/sharedRuntime_sw64.cpp +--- openjdk/hotspot/src/cpu/sw64/vm/sharedRuntime_sw64.cpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/sharedRuntime_sw64.cpp 2025-05-06 10:53:44.911633666 +0800 +@@ -0,0 +1,3848 @@ ++/* ++ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++#include "asm/macroAssembler.hpp" ++#include "asm/macroAssembler.inline.hpp" ++#include "code/debugInfoRec.hpp" ++#include "code/icBuffer.hpp" ++#include "code/vtableStubs.hpp" ++#include "interpreter/interpreter.hpp" ++#include "oops/compiledICHolder.hpp" ++#include "prims/jvmtiRedefineClassesTrace.hpp" ++#include "runtime/sharedRuntime.hpp" ++#include "runtime/vframeArray.hpp" ++#include "vmreg_sw64.inline.hpp" ++#ifdef COMPILER1 ++#include "c1/c1_Runtime1.hpp" ++#endif ++#ifdef COMPILER2 ++#include "opto/runtime.hpp" ++#endif ++ ++#include ++ ++#define __ masm-> ++ ++#ifdef PRODUCT ++#define BLOCK_COMMENT(str) /* nothing */ ++#else ++#define BLOCK_COMMENT(str) { char line[1024];sprintf(line,"%s:%s:%d",str,__FILE__, __LINE__); __ block_comment(line);} ++#endif ++ ++#define BIND(label) bind(label); BLOCK_COMMENT(#label ":") ++ ++const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size; ++ ++class RegisterSaver { ++ enum { FPU_regs_live = 32 }; ++ // Capture info about frame layout ++ enum layout { ++#define DEF_LAYOUT_OFFS(regname) regname ## _off, regname ## H_off, ++ DEF_LAYOUT_OFFS(for_16_bytes_aligned) ++ DEF_LAYOUT_OFFS(fpr0) ++ DEF_LAYOUT_OFFS(fpr1) ++ DEF_LAYOUT_OFFS(fpr2) ++ DEF_LAYOUT_OFFS(fpr3) ++ DEF_LAYOUT_OFFS(fpr4) ++ DEF_LAYOUT_OFFS(fpr5) ++ DEF_LAYOUT_OFFS(fpr6) ++ DEF_LAYOUT_OFFS(fpr7) ++ DEF_LAYOUT_OFFS(fpr8) ++ DEF_LAYOUT_OFFS(fpr9) ++ DEF_LAYOUT_OFFS(fpr10) ++ DEF_LAYOUT_OFFS(fpr11) ++ DEF_LAYOUT_OFFS(fpr12) ++ DEF_LAYOUT_OFFS(fpr13) ++ DEF_LAYOUT_OFFS(fpr14) ++ DEF_LAYOUT_OFFS(fpr15) ++ DEF_LAYOUT_OFFS(fpr16) ++ DEF_LAYOUT_OFFS(fpr17) ++ DEF_LAYOUT_OFFS(fpr18) ++ DEF_LAYOUT_OFFS(fpr19) ++ DEF_LAYOUT_OFFS(fpr20) ++ DEF_LAYOUT_OFFS(fpr21) ++ DEF_LAYOUT_OFFS(fpr22) ++ DEF_LAYOUT_OFFS(fpr23) ++ DEF_LAYOUT_OFFS(fpr24) ++ DEF_LAYOUT_OFFS(fpr25) ++ DEF_LAYOUT_OFFS(fpr26) ++ DEF_LAYOUT_OFFS(fpr27) ++ DEF_LAYOUT_OFFS(fpr28) ++ DEF_LAYOUT_OFFS(fpr29) ++ DEF_LAYOUT_OFFS(fpr30) ++ DEF_LAYOUT_OFFS(fpr31) ++ ++ DEF_LAYOUT_OFFS(v0) ++ DEF_LAYOUT_OFFS(t0) ++ DEF_LAYOUT_OFFS(t1) ++ DEF_LAYOUT_OFFS(t2) ++ DEF_LAYOUT_OFFS(t3) ++ DEF_LAYOUT_OFFS(t4) ++ DEF_LAYOUT_OFFS(t5) ++ DEF_LAYOUT_OFFS(t6) ++ DEF_LAYOUT_OFFS(t7) ++ DEF_LAYOUT_OFFS(s0) ++ DEF_LAYOUT_OFFS(s1) ++ DEF_LAYOUT_OFFS(s2) ++ DEF_LAYOUT_OFFS(s3) ++ DEF_LAYOUT_OFFS(s4) ++ DEF_LAYOUT_OFFS(s5) ++ // FP move down ++ DEF_LAYOUT_OFFS(a0) ++ DEF_LAYOUT_OFFS(a1) ++ DEF_LAYOUT_OFFS(a2) ++ DEF_LAYOUT_OFFS(a3) ++ DEF_LAYOUT_OFFS(a4) ++ DEF_LAYOUT_OFFS(a5) ++ DEF_LAYOUT_OFFS(t8) ++ DEF_LAYOUT_OFFS(t9) ++ DEF_LAYOUT_OFFS(t10) ++ DEF_LAYOUT_OFFS(t11) ++ // RA move down ++ DEF_LAYOUT_OFFS(t12) ++ // no AT ++ DEF_LAYOUT_OFFS(gp) ++ // no SP ++ // no R0 ++ DEF_LAYOUT_OFFS(fp) ++ DEF_LAYOUT_OFFS(return) ++ reg_save_size ++ }; ++ ++ public: ++ ++ static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors =false ); ++ static void restore_live_registers(MacroAssembler* masm, bool restore_vectors = false); ++ static int raOffset(void) { return return_off / 2; } ++ //Rmethod ++ static int methodOffset(void) { return s3_off / 2; } ++ ++ static int v0Offset(void) { return v0_off / 2; } ++ ++ static int fpResultOffset(void) { return fpr0_off / 2; } ++ ++ // During deoptimization only the result register need to be restored ++ // all the other values have already been extracted. ++ static void restore_result_registers(MacroAssembler* masm); ++}; ++ ++OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors ) { ++ ++ // Always make the frame size 16-byte aligned ++ int frame_size_in_bytes = round_to(additional_frame_words*wordSize + ++ reg_save_size*BytesPerInt, 16); ++ // OopMap frame size is in compiler stack slots (jint's) not bytes or words ++ int frame_size_in_slots = frame_size_in_bytes / BytesPerInt; ++ // The caller will allocate additional_frame_words ++ int additional_frame_slots = additional_frame_words*wordSize / BytesPerInt; ++ // CodeBlob frame size is in words. ++ int frame_size_in_words = frame_size_in_bytes / wordSize; ++ *total_frame_words = frame_size_in_words; ++ ++ // save registers ++ ++ __ add_simm16(SP, SP, - reg_save_size * jintSize); ++ ++ __ fstd(F0, SP, fpr0_off * jintSize); __ fstd(F1, SP, fpr1_off * jintSize); ++ __ fstd(F2, SP, fpr2_off * jintSize); __ fstd(F3, SP, fpr3_off * jintSize); ++ __ fstd(F4, SP, fpr4_off * jintSize); __ fstd(F5, SP, fpr5_off * jintSize); ++ __ fstd(F6, SP, fpr6_off * jintSize); __ fstd(F7, SP, fpr7_off * jintSize); ++ __ fstd(F8, SP, fpr8_off * jintSize); __ fstd(F9, SP, fpr9_off * jintSize); ++ __ fstd(F10, SP, fpr10_off * jintSize); __ fstd(F11, SP, fpr11_off * jintSize); ++ __ fstd(F12, SP, fpr12_off * jintSize); __ fstd(F13, SP, fpr13_off * jintSize); ++ __ fstd(F14, SP, fpr14_off * jintSize); __ fstd(F15, SP, fpr15_off * jintSize); ++ __ fstd(F16, SP, fpr16_off * jintSize); __ fstd(F17, SP, fpr17_off * jintSize); ++ __ fstd(F18, SP, fpr18_off * jintSize); __ fstd(F19, SP, fpr19_off * jintSize); ++ __ fstd(F20, SP, fpr20_off * jintSize); __ fstd(F21, SP, fpr21_off * jintSize); ++ __ fstd(F22, SP, fpr22_off * jintSize); __ fstd(F23, SP, fpr23_off * jintSize); ++ __ fstd(F24, SP, fpr24_off * jintSize); __ fstd(F25, SP, fpr25_off * jintSize); ++ __ fstd(F26, SP, fpr26_off * jintSize); __ fstd(F27, SP, fpr27_off * jintSize); ++ __ fstd(F28, SP, fpr28_off * jintSize); __ fstd(F29, SP, fpr29_off * jintSize); ++ __ fstd(F30, SP, fpr30_off * jintSize); ++ __ stl(V0, SP, v0_off * jintSize); ++ __ stl(T0, SP, t0_off * jintSize); ++ __ stl(T1, SP, t1_off * jintSize); ++ __ stl(T2, SP, t2_off * jintSize); ++ __ stl(T3, SP, t3_off * jintSize); ++ __ stl(T4, SP, t4_off * jintSize); ++ __ stl(T5, SP, t5_off * jintSize); ++ __ stl(T6, SP, t6_off * jintSize); ++ __ stl(T7, SP, t7_off * jintSize); ++ __ stl(S0, SP, s0_off * jintSize); ++ __ stl(S1, SP, s1_off * jintSize); ++ __ stl(S2, SP, s2_off * jintSize); ++ __ stl(S3, SP, s3_off * jintSize); ++ __ stl(S4, SP, s4_off * jintSize); ++ __ stl(S5, SP, s5_off * jintSize); ++ __ stl(A0, SP, a0_off * jintSize); __ stl(A1, SP, a1_off * jintSize); ++ __ stl(A2, SP, a2_off * jintSize); __ stl(A3, SP, a3_off * jintSize); ++ __ stl(A4, SP, a4_off * jintSize); __ stl(A5, SP, a5_off * jintSize); ++ __ stl(T8, SP, t8_off * jintSize); ++ __ stl(T9, SP, t9_off * jintSize); ++ __ stl(T10, SP, t10_off * jintSize); ++ __ stl(T11, SP, t11_off * jintSize); ++ __ stl(T12, SP, t12_off * jintSize); ++ ++ __ stl(GP, SP, gp_off * jintSize); ++ __ stl(FP, SP, fp_off * jintSize); ++ __ stl(RA, SP, return_off * jintSize); ++ __ add_simm16(FP, SP, fp_off * jintSize); ++ ++ OopMapSet *oop_maps = new OopMapSet(); ++ OopMap* map = new OopMap( frame_size_in_slots, 0 ); ++ ++ ++#define STACK_OFFSET(x) VMRegImpl::stack2reg((x) + additional_frame_slots) ++ map->set_callee_saved(STACK_OFFSET( v0_off), V0->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( t0_off), T0->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( t1_off), T1->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( t2_off), T2->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( t3_off), T3->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( t4_off), T4->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( t5_off), T5->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( t6_off), T6->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( t7_off), T7->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( s0_off), S0->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( s1_off), S1->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( s2_off), S2->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( s3_off), S3->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( s4_off), S4->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( s5_off), S5->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( a0_off), A0->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( a1_off), A1->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( a2_off), A2->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( a3_off), A3->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( a4_off), A4->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( a5_off), A5->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( t8_off), T8->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( t9_off), T9->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( t10_off), T10->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( t11_off), T11->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( t12_off), T12->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( gp_off), GP->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( fp_off), FP->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( return_off), RA->as_VMReg()); ++ ++ map->set_callee_saved(STACK_OFFSET( fpr0_off), F0->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( fpr1_off), F1->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( fpr2_off), F2->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( fpr3_off), F3->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( fpr4_off), F4->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( fpr5_off), F5->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( fpr6_off), F6->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( fpr7_off), F7->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( fpr8_off), F8->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( fpr9_off), F9->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( fpr10_off), F10->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( fpr11_off), F11->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( fpr12_off), F12->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( fpr13_off), F13->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( fpr14_off), F14->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( fpr15_off), F15->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( fpr16_off), F16->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( fpr17_off), F17->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( fpr18_off), F18->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( fpr19_off), F19->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( fpr20_off), F20->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( fpr21_off), F21->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( fpr22_off), F22->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( fpr23_off), F23->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( fpr24_off), F24->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( fpr25_off), F25->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( fpr26_off), F26->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( fpr27_off), F27->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( fpr28_off), F28->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( fpr29_off), F29->as_VMReg()); ++ map->set_callee_saved(STACK_OFFSET( fpr30_off), F30->as_VMReg()); ++ ++#undef STACK_OFFSET ++ return map; ++} ++ ++ ++// Pop the current frame and restore all the registers that we ++// saved. ++void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) { ++ __ fldd(F0, SP, fpr0_off * jintSize); __ fldd(F1, SP, fpr1_off * jintSize); ++ __ fldd(F2, SP, fpr2_off * jintSize); __ fldd(F3, SP, fpr3_off * jintSize); ++ __ fldd(F4, SP, fpr4_off * jintSize); __ fldd(F5, SP, fpr5_off * jintSize); ++ __ fldd(F6, SP, fpr6_off * jintSize); __ fldd(F7, SP, fpr7_off * jintSize); ++ __ fldd(F8, SP, fpr8_off * jintSize); __ fldd(F9, SP, fpr9_off * jintSize); ++ __ fldd(F10, SP, fpr10_off * jintSize); __ fldd(F11, SP, fpr11_off * jintSize); ++ __ fldd(F12, SP, fpr12_off * jintSize); __ fldd(F13, SP, fpr13_off * jintSize); ++ __ fldd(F14, SP, fpr14_off * jintSize); __ fldd(F15, SP, fpr15_off * jintSize); ++ __ fldd(F16, SP, fpr16_off * jintSize); __ fldd(F17, SP, fpr17_off * jintSize); ++ __ fldd(F18, SP, fpr18_off * jintSize); __ fldd(F19, SP, fpr19_off * jintSize); ++ __ fldd(F20, SP, fpr20_off * jintSize); __ fldd(F21, SP, fpr21_off * jintSize); ++ __ fldd(F22, SP, fpr22_off * jintSize); __ fldd(F23, SP, fpr23_off * jintSize); ++ __ fldd(F24, SP, fpr24_off * jintSize); __ fldd(F25, SP, fpr25_off * jintSize); ++ __ fldd(F26, SP, fpr26_off * jintSize); __ fldd(F27, SP, fpr27_off * jintSize); ++ __ fldd(F28, SP, fpr28_off * jintSize); __ fldd(F29, SP, fpr29_off * jintSize); ++ __ fldd(F30, SP, fpr30_off * jintSize); ++ ++ __ ldl(V0, SP, v0_off * jintSize); ++ __ ldl(T0, SP, t0_off * jintSize); ++ __ ldl(T1, SP, t1_off * jintSize); ++ __ ldl(T2, SP, t2_off * jintSize); ++ __ ldl(T3, SP, t3_off * jintSize); ++ __ ldl(T4, SP, t4_off * jintSize); ++ __ ldl(T5, SP, t5_off * jintSize); ++ __ ldl(T6, SP, t6_off * jintSize); ++ __ ldl(T7, SP, t7_off * jintSize); ++ __ ldl(S0, SP, s0_off * jintSize); ++ __ ldl(S1, SP, s1_off * jintSize); ++ __ ldl(S2, SP, s2_off * jintSize); ++ __ ldl(S3, SP, s3_off * jintSize); ++ __ ldl(S4, SP, s4_off * jintSize); ++ __ ldl(S5, SP, s5_off * jintSize); ++ __ ldl(A0, SP, a0_off * jintSize); __ ldl(A1, SP, a1_off * jintSize); ++ __ ldl(A2, SP, a2_off * jintSize); __ ldl(A3, SP, a3_off * jintSize); ++ __ ldl(A4, SP, a4_off * jintSize); __ ldl(A5, SP, a5_off * jintSize); ++ __ ldl(T8, SP, t8_off * jintSize); ++ __ ldl(T9, SP, t9_off * jintSize); ++ __ ldl(T10, SP, t10_off * jintSize); ++ __ ldl(T11, SP, t11_off * jintSize); ++ __ ldl(T12, SP, t12_off * jintSize); ++ ++ __ ldl(GP, SP, gp_off * jintSize); ++ __ ldl(FP, SP, fp_off * jintSize); ++ __ ldl(RA, SP, return_off * jintSize); ++ ++ __ add_simm16(SP, SP, reg_save_size * jintSize); ++} ++ ++// Pop the current frame and restore the registers that might be holding ++// a result. ++void RegisterSaver::restore_result_registers(MacroAssembler* masm) { ++ ++ // Just restore result register. Only used by deoptimization. By ++ // now any callee save register that needs to be restore to a c2 ++ // caller of the deoptee has been extracted into the vframeArray ++ // and will be stuffed into the c2i adapter we create for later ++ // restoration so only result registers need to be restored here. ++ ++ __ ldl(V0, SP, v0_off * jintSize); ++ __ add_simm16(SP, SP, return_off * jintSize); ++} ++ ++// Is vector's size (in bytes) bigger than a size saved by default? ++// 16 bytes XMM registers are saved by default using fxsave/fxrstor instructions. ++bool SharedRuntime::is_wide_vector(int size) { ++ return size > 16; ++} ++ ++// The java_calling_convention describes stack locations as ideal slots on ++// a frame with no abi restrictions. Since we must observe abi restrictions ++// (like the placement of the register window) the slots must be biased by ++// the following value. ++ ++static int reg2offset_in(VMReg r) { ++ // Account for saved fp and return address ++ // This should really be in_preserve_stack_slots ++ return (r->reg2stack() + 2 * VMRegImpl::slots_per_word) * VMRegImpl::stack_slot_size; // + 2 * VMRegImpl::stack_slot_size); ++} ++ ++static int reg2offset_out(VMReg r) { ++ return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; ++} ++ ++// --------------------------------------------------------------------------- ++// Read the array of BasicTypes from a signature, and compute where the ++// arguments should go. Values in the VMRegPair regs array refer to 4-byte ++// quantities. Values less than SharedInfo::stack0 are registers, those above ++// refer to 4-byte stack slots. All stack slots are based off of the stack pointer ++// as framesizes are fixed. ++// VMRegImpl::stack0 refers to the first slot 0(sp). ++// and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register ++// up to RegisterImpl::number_of_registers) are the 32-bit ++// integer registers. ++ ++// Pass first five oop/int args in registers T0, A0 - A3. ++// Pass float/double/long args in stack. ++// Doubles have precedence, so if you pass a mix of floats and doubles ++// the doubles will grab the registers before the floats will. ++ ++// Note: the INPUTS in sig_bt are in units of Java argument words, which are ++// either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit ++// units regardless of build. ++ ++ ++// --------------------------------------------------------------------------- ++// The compiled Java calling convention. ++// Pass first five oop/int args in registers T0, A0 - A3. ++// Pass float/double/long args in stack. ++// Doubles have precedence, so if you pass a mix of floats and doubles ++// the doubles will grab the registers before the floats will. ++ ++int SharedRuntime::java_calling_convention(const BasicType *sig_bt, ++ VMRegPair *regs, ++ int total_args_passed, ++ int is_outgoing) { ++ ++ // Create the mapping between argument positions and ++ // registers. ++ static const Register INT_ArgReg[Argument::n_register_parameters] = { ++ A1, A2, A3, A4, A5, A0 ++ }; ++ ++ static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters] = { ++ F16, F17, F18, F19, F20, F21 ++ }; ++ ++ ++ uint args = 0; ++ uint stk_args = 0; // inc by 2 each time ++ ++ for (int i = 0; i < total_args_passed; i++) { ++ switch (sig_bt[i]) { ++ case T_VOID: ++ // halves of T_LONG or T_DOUBLE ++ assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half"); ++ regs[i].set_bad(); ++ break; ++ case T_BOOLEAN: ++ case T_CHAR: ++ case T_BYTE: ++ case T_SHORT: ++ case T_INT: ++ if (args < Argument::n_register_parameters) { ++ regs[i].set1(INT_ArgReg[args++]->as_VMReg()); ++ } else { ++ regs[i].set1(VMRegImpl::stack2reg(stk_args)); ++ stk_args += 2; ++ } ++ break; ++ case T_LONG: ++ assert(sig_bt[i + 1] == T_VOID, "expecting half"); ++ // fall through ++ case T_OBJECT: ++ case T_ARRAY: ++ case T_ADDRESS: ++ if (args < Argument::n_register_parameters) { ++ regs[i].set2(INT_ArgReg[args++]->as_VMReg()); ++ } else { ++ regs[i].set2(VMRegImpl::stack2reg(stk_args)); ++ stk_args += 2; ++ } ++ break; ++ case T_FLOAT: ++ if (args < Argument::n_float_register_parameters) { ++ regs[i].set1(FP_ArgReg[args++]->as_VMReg()); ++ } else { ++ regs[i].set1(VMRegImpl::stack2reg(stk_args)); ++ stk_args += 2; ++ } ++ break; ++ case T_DOUBLE: ++ assert(sig_bt[i + 1] == T_VOID, "expecting half"); ++ if (args < Argument::n_float_register_parameters) { ++ regs[i].set2(FP_ArgReg[args++]->as_VMReg()); ++ } else { ++ regs[i].set2(VMRegImpl::stack2reg(stk_args)); ++ stk_args += 2; ++ } ++ break; ++ default: ++ ShouldNotReachHere(); ++ break; ++ } ++ } ++ ++ return round_to(stk_args, 2); ++} ++ ++// Helper class mostly to avoid passing masm everywhere, and handle store ++// displacement overflow logic for LP64 ++class AdapterGenerator { ++ MacroAssembler *masm; ++ Register Rdisp; ++ void set_Rdisp(Register r) { Rdisp = r; } ++ ++ void patch_callers_callsite(); ++ ++ // base+st_off points to top of argument ++ int arg_offset(const int st_off) { return st_off; } ++ int next_arg_offset(const int st_off) { ++ return st_off - Interpreter::stackElementSize; ++ } ++ ++ // On _LP64 argument slot values are loaded first into a register ++ // because they might not fit into displacement. ++ Register arg_slot(const int st_off); ++ Register next_arg_slot(const int st_off); ++ ++ // Stores long into offset pointed to by base ++ void store_c2i_long(Register r, Register base, ++ const int st_off, bool is_stack); ++ void store_c2i_object(Register r, Register base, ++ const int st_off); ++ void store_c2i_int(Register r, Register base, ++ const int st_off); ++ void store_c2i_double(VMReg r_2, ++ VMReg r_1, Register base, const int st_off); ++ void store_c2i_float(FloatRegister f, Register base, ++ const int st_off); ++ ++ public: ++ //void tag_stack(const BasicType sig, int st_off); ++ void gen_c2i_adapter(int total_args_passed, ++ // VMReg max_arg, ++ int comp_args_on_stack, // VMRegStackSlots ++ const BasicType *sig_bt, ++ const VMRegPair *regs, ++ Label& skip_fixup); ++ void gen_i2c_adapter(int total_args_passed, ++ // VMReg max_arg, ++ int comp_args_on_stack, // VMRegStackSlots ++ const BasicType *sig_bt, ++ const VMRegPair *regs); ++ ++ AdapterGenerator(MacroAssembler *_masm) : masm(_masm) {} ++}; ++ ++ ++// Patch the callers callsite with entry to compiled code if it exists. ++void AdapterGenerator::patch_callers_callsite() { ++ Label L; ++ // __ verify_oop(Rmethod); ++ __ ld_ptr(AT, Rmethod, in_bytes(Method::code_offset())); ++ __ beq(AT, L); ++ // Schedule the branch target address early. ++ // Call into the VM to patch the caller, then jump to compiled callee ++ // V0 isn't live so capture return address while we easily can ++ __ move(V0, RA); ++ ++ __ pushad(); ++#ifdef COMPILER2 ++ // C2 may leave the stack dirty if not in SSE2+ mode ++ __ empty_FPU_stack(); ++#endif ++ ++ // VM needs caller's callsite ++ // VM needs target method ++ ++ __ move(A0, Rmethod); ++ __ move(A1, V0); ++//we should preserve the return address ++ //__ verify_oop(Rmethod); ++ __ move(S0, SP); ++ __ move(AT, -(StackAlignmentInBytes)); // align the stack ++ __ and_reg(SP, SP, AT); ++ __ call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), ++ relocInfo::runtime_call_type); ++ ++ __ move(SP, S0); ++ __ popad(); ++ __ BIND(L); ++} ++ ++Register AdapterGenerator::arg_slot(const int st_off) { ++ Unimplemented(); ++} ++ ++Register AdapterGenerator::next_arg_slot(const int st_off){ ++ Unimplemented(); ++} ++ ++// Stores long into offset pointed to by base ++void AdapterGenerator::store_c2i_long(Register r, Register base, ++ const int st_off, bool is_stack) { ++ Unimplemented(); ++} ++ ++void AdapterGenerator::store_c2i_object(Register r, Register base, ++ const int st_off) { ++ Unimplemented(); ++} ++ ++void AdapterGenerator::store_c2i_int(Register r, Register base, ++ const int st_off) { ++ Unimplemented(); ++} ++ ++// Stores into offset pointed to by base ++void AdapterGenerator::store_c2i_double(VMReg r_2, ++ VMReg r_1, Register base, const int st_off) { ++ Unimplemented(); ++} ++ ++void AdapterGenerator::store_c2i_float(FloatRegister f, Register base, ++ const int st_off) { ++ Unimplemented(); ++} ++ ++void AdapterGenerator::gen_c2i_adapter( ++ int total_args_passed, ++ // VMReg max_arg, ++ int comp_args_on_stack, // VMRegStackSlots ++ const BasicType *sig_bt, ++ const VMRegPair *regs, ++ Label& skip_fixup) { ++ ++ // Before we get into the guts of the C2I adapter, see if we should be here ++ // at all. We've come from compiled code and are attempting to jump to the ++ // interpreter, which means the caller made a static call to get here ++ // (vcalls always get a compiled target if there is one). Check for a ++ // compiled target. If there is one, we need to patch the caller's call. ++ // However we will run interpreted if we come thru here. The next pass ++ // thru the call site will run compiled. If we ran compiled here then ++ // we can (theorectically) do endless i2c->c2i->i2c transitions during ++ // deopt/uncommon trap cycles. If we always go interpreted here then ++ // we can have at most one and don't need to play any tricks to keep ++ // from endlessly growing the stack. ++ // ++ // Actually if we detected that we had an i2c->c2i transition here we ++ // ought to be able to reset the world back to the state of the interpreted ++ // call and not bother building another interpreter arg area. We don't ++ // do that at this point. ++ ++ patch_callers_callsite(); ++ ++ __ BIND(skip_fixup); ++ ++#ifdef COMPILER2 ++ __ empty_FPU_stack(); ++#endif ++ //this is for native ? ++ // Since all args are passed on the stack, total_args_passed * interpreter_ ++ // stack_element_size is the ++ // space we need. ++ int extraspace = total_args_passed * Interpreter::stackElementSize; ++ ++ // stack is aligned, keep it that way ++ extraspace = round_to(extraspace, 2*wordSize); ++ ++ // Get return address ++ __ move(V0, RA); ++ // set senderSP value ++ __ move(Rsender, SP); ++ __ add_simm16(SP, SP, -extraspace); ++ ++ // Now write the args into the outgoing interpreter space ++ for (int i = 0; i < total_args_passed; i++) { ++ if (sig_bt[i] == T_VOID) { ++ assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half"); ++ continue; ++ } ++ ++ // st_off points to lowest address on stack. ++ int st_off = ((total_args_passed - 1) - i) * Interpreter::stackElementSize; ++ // Say 4 args: ++ // i st_off ++ // 0 12 T_LONG ++ // 1 8 T_VOID ++ // 2 4 T_OBJECT ++ // 3 0 T_BOOL ++ VMReg r_1 = regs[i].first(); ++ VMReg r_2 = regs[i].second(); ++ if (!r_1->is_valid()) { ++ assert(!r_2->is_valid(), ""); ++ continue; ++ } ++ if (r_1->is_stack()) { ++ // memory to memory use fpu stack top ++ int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace; ++ if (!r_2->is_valid()) { ++ __ ld_ptr(AT, SP, ld_off); ++ __ st_ptr(AT, SP, st_off); ++ ++ } else { ++ ++ ++ int next_off = st_off - Interpreter::stackElementSize; ++ __ ld_ptr(AT, SP, ld_off); ++ __ st_ptr(AT, SP, st_off); ++ ++ // Ref to is_Register condition ++ if(sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ++ __ st_ptr(AT,SP,st_off - 8); ++ } ++ } else if (r_1->is_Register()) { ++ Register r = r_1->as_Register(); ++ if (!r_2->is_valid()) { ++ __ stl(r,SP, st_off); ++ } else { ++ // long/double in gpr ++ __ stl(r,SP, st_off); ++ // In [java/util/zip/ZipFile.java] ++ // ++ // private static native long open(String name, int mode, long lastModified); ++ // private static native int getTotal(long jzfile); ++ // ++ // We need to transfer T_LONG paramenters from a compiled method to a native method. ++ // It's a complex process: ++ // ++ // Caller -> lir_static_call -> gen_resolve_stub ++ // -> -- resolve_static_call_C ++ // `- gen_c2i_adapter() [*] ++ // | ++ // `- AdapterHandlerLibrary::get_create_apapter_index ++ // -> generate_native_entry ++ // -> InterpreterRuntime::SignatureHandlerGenerator::pass_long [**] ++ // ++ // In [**], T_Long parameter is stored in stack as: ++ // ++ // (high) ++ // | | ++ // ----------- ++ // | 8 bytes | ++ // | (void) | ++ // ----------- ++ // | 8 bytes | ++ // | (long) | ++ // ----------- ++ // | | ++ // (low) ++ // ++ // However, the sequence is reversed here: ++ // ++ // (high) ++ // | | ++ // ----------- ++ // | 8 bytes | ++ // | (long) | ++ // ----------- ++ // | 8 bytes | ++ // | (void) | ++ // ----------- ++ // | | ++ // (low) ++ // ++ // So I stored another 8 bytes in the T_VOID slot. It then can be accessed from generate_native_entry(). ++ // ++ if (sig_bt[i] == T_LONG) ++ __ stl(r,SP, st_off - 8); ++ } ++ } else if (r_1->is_FloatRegister()) { ++ assert(sig_bt[i] == T_FLOAT || sig_bt[i] == T_DOUBLE, "Must be a float register"); ++ ++ FloatRegister fr = r_1->as_FloatRegister(); ++ if (sig_bt[i] == T_FLOAT) ++ __ fsts(fr,SP, st_off); ++ else { ++ __ fstd(fr,SP, st_off); ++ __ fstd(fr,SP, st_off - 8); // T_DOUBLE needs two slots ++ } ++ } ++ } ++ ++ // Schedule the branch target address early. ++ __ ld_ptr(AT, Rmethod,in_bytes(Method::interpreter_entry_offset()) ); ++ // And repush original return address ++ __ move(RA, V0); ++ __ jmp(AT); ++} ++ ++void AdapterGenerator::gen_i2c_adapter(int total_args_passed, ++ // VMReg max_arg, ++ int comp_args_on_stack, // VMRegStackSlots ++ const BasicType *sig_bt, ++ const VMRegPair *regs) { ++ ++ // Generate an I2C adapter: adjust the I-frame to make space for the C-frame ++ // layout. Lesp was saved by the calling I-frame and will be restored on ++ // return. Meanwhile, outgoing arg space is all owned by the callee ++ // C-frame, so we can mangle it at will. After adjusting the frame size, ++ // hoist register arguments and repack other args according to the compiled ++ // code convention. Finally, end in a jump to the compiled code. The entry ++ // point address is the start of the buffer. ++ ++ // We will only enter here from an interpreted frame and never from after ++ // passing thru a c2i. Azul allowed this but we do not. If we lose the ++ // race and use a c2i we will remain interpreted for the race loser(s). ++ // the possibility of having c2i -> i2c -> c2i -> ... endless transitions. ++ ++ ++ __ move(T12, SP); ++ ++ // Cut-out for having no stack args. Since up to 2 int/oop args are passed ++ // in registers, we will occasionally have no stack args. ++ int comp_words_on_stack = 0; ++ if (comp_args_on_stack) { ++ // Sig words on the stack are greater-than VMRegImpl::stack0. Those in ++ // registers are below. By subtracting stack0, we either get a negative ++ // number (all values in registers) or the maximum stack slot accessed. ++ // int comp_args_on_stack = VMRegImpl::reg2stack(max_arg); ++ // Convert 4-byte stack slots to words. ++ comp_words_on_stack = round_to(comp_args_on_stack*4, wordSize)>>LogBytesPerWord; ++ // Round up to miminum stack alignment, in wordSize ++ comp_words_on_stack = round_to(comp_words_on_stack, 2); ++ __ add_simm16(SP, SP, -comp_words_on_stack * wordSize); ++ } ++ ++ // Align the outgoing SP ++ __ move(AT, -(StackAlignmentInBytes)); ++ __ and_reg(SP, SP, AT); ++ // push the return address on the stack (note that pushing, rather ++ // than storing it, yields the correct frame alignment for the callee) ++ // Put saved SP in another register ++ const Register saved_sp = V0; ++ __ move(saved_sp, T12); ++ ++ ++ // Will jump to the compiled code just as if compiled code was doing it. ++ // Pre-load the register-jump target early, to schedule it better. ++ __ ldl(T12, Rmethod, in_bytes(Method::from_compiled_offset())); ++ ++ // Now generate the shuffle code. Pick up all register args and move the ++ // rest through the floating point stack top. ++ for (int i = 0; i < total_args_passed; i++) { ++ if (sig_bt[i] == T_VOID) { ++ // Longs and doubles are passed in native word order, but misaligned ++ // in the 32-bit build. ++ assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half"); ++ continue; ++ } ++ ++ // Pick up 0, 1 or 2 words from SP+offset. ++ ++ //assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), "scrambled load targets?"); ++ // Load in argument order going down. ++ int ld_off = (total_args_passed -1 - i)*Interpreter::stackElementSize; ++ // Point to interpreter value (vs. tag) ++ int next_off = ld_off - Interpreter::stackElementSize; ++ VMReg r_1 = regs[i].first(); ++ VMReg r_2 = regs[i].second(); ++ if (!r_1->is_valid()) { ++ assert(!r_2->is_valid(), ""); ++ continue; ++ } ++ if (r_1->is_stack()) { ++ // Convert stack slot to an SP offset (+ wordSize to account for return address ) ++ int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size; ++ //+ wordSize; ++ ++ if (!r_2->is_valid()) { ++ __ ldl(AT, saved_sp, ld_off); ++ __ stl(AT, SP, st_off); ++ } else { ++ __ ldl(AT, saved_sp, ld_off); ++ if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ++ __ ldl(AT, saved_sp, ld_off - 8); ++ __ stl(AT, SP, st_off); ++ } ++ } else if (r_1->is_Register()) { // Register argument ++ Register r = r_1->as_Register(); ++ if (r_2->is_valid()) { ++ // So r_2 gets loaded from high address regardless of the platform ++ assert(r_2->as_Register() == r_1->as_Register(), ""); ++ __ ldl(r, saved_sp, ld_off); ++ ++ // ++ // For T_LONG type, the real layout is as below: ++ // ++ // (high) ++ // | | ++ // ----------- ++ // | 8 bytes | ++ // | (void) | ++ // ----------- ++ // | 8 bytes | ++ // | (long) | ++ // ----------- ++ // | | ++ // (low) ++ // ++ // We should load the low-8 bytes. ++ // ++ if (sig_bt[i] == T_LONG) ++ __ ldl(r, saved_sp, ld_off - 8); ++ } else { ++ __ ldw(r, saved_sp, ld_off); ++ } ++ } else if (r_1->is_FloatRegister()) { // Float Register ++ assert(sig_bt[i] == T_FLOAT || sig_bt[i] == T_DOUBLE, "Must be a float register"); ++ ++ FloatRegister fr = r_1->as_FloatRegister(); ++ if (sig_bt[i] == T_FLOAT) ++ __ flds(fr, saved_sp, ld_off); ++ else { ++ __ fldd(fr, saved_sp, ld_off); ++ __ fldd(fr, saved_sp, ld_off - 8); ++ } ++ } ++ } ++ ++ // 6243940 We might end up in handle_wrong_method if ++ // the callee is deoptimized as we race thru here. If that ++ // happens we don't want to take a safepoint because the ++ // caller frame will look interpreted and arguments are now ++ // "compiled" so it is much better to make this transition ++ // invisible to the stack walking code. Unfortunately if ++ // we try and find the callee by normal means a safepoint ++ // is possible. So we stash the desired callee in the thread ++ // and the vm will find there should this case occur. ++ __ get_thread(T11); ++ __ stl(Rmethod, T11, in_bytes(JavaThread::callee_target_offset())); ++ ++ // move methodOop to V0 in case we end up in an c2i adapter. ++ // the c2i adapters expect methodOop in V0 (c2) because c2's ++ // resolve stubs return the result (the method) in V0. ++ // I'd love to fix this. ++ __ move(V0, Rmethod); ++ __ jmp(T12); ++} ++ ++// --------------------------------------------------------------- ++AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm, ++ int total_args_passed, ++ // VMReg max_arg, ++ int comp_args_on_stack, // VMRegStackSlots ++ const BasicType *sig_bt, ++ const VMRegPair *regs, ++ AdapterFingerPrint* fingerprint) { ++ address i2c_entry = __ pc(); ++ ++ AdapterGenerator agen(masm); ++ ++ agen.gen_i2c_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs); ++ ++ ++ // ------------------------------------------------------------------------- ++ // Generate a C2I adapter. On entry we know S3 holds the methodOop. The ++ // args start out packed in the compiled layout. They need to be unpacked ++ // into the interpreter layout. This will almost always require some stack ++ // space. We grow the current (compiled) stack, then repack the args. We ++ // finally end in a jump to the generic interpreter entry point. On exit ++ // from the interpreter, the interpreter will restore our SP (lest the ++ // compiled code, which relys solely on SP and not FP, get sick). ++ ++ address c2i_unverified_entry = __ pc(); ++ Label skip_fixup; ++ { ++ Register holder = T1; ++ Register receiver = A1; ++ Register temp = T11; ++ address ic_miss = SharedRuntime::get_ic_miss_stub(); ++ ++ Label missed; ++ ++ __ verify_oop(holder); ++ //add for compressedoops ++ __ load_klass(temp, receiver); ++ __ verify_oop(temp); ++ ++ __ ld_ptr(AT, holder, CompiledICHolder::holder_klass_offset()); ++ __ ld_ptr(Rmethod, holder, CompiledICHolder::holder_metadata_offset()); ++ __ bne(AT, temp, missed); ++ // Method might have been compiled since the call site was patched to ++ // interpreted if that is the case treat it as a miss so we can get ++ // the call site corrected. ++ __ ld_ptr(AT, Rmethod, in_bytes(Method::code_offset())); ++ __ beq(AT, skip_fixup); ++ __ BIND(missed); ++ ++ __ jmp(ic_miss, relocInfo::runtime_call_type); ++ } ++ ++ address c2i_entry = __ pc(); ++ ++ agen.gen_c2i_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup); ++ ++ __ flush(); ++ return AdapterHandlerLibrary::new_entry(fingerprint,i2c_entry, c2i_entry, c2i_unverified_entry); ++} ++ ++int SharedRuntime::c_calling_convention(const BasicType *sig_bt, ++ VMRegPair *regs, ++ VMRegPair *regs2, ++ int total_args_passed) { ++ assert(regs2 == NULL, "not needed on SW64"); ++ // Return the number of VMReg stack_slots needed for the args. ++ // This value does not include an abi space (like register window ++ // save area). ++ ++ // The native convention is V8 if !LP64 ++ // The LP64 convention is the V9 convention which is slightly more sane. ++ ++ // We return the amount of VMReg stack slots we need to reserve for all ++ // the arguments NOT counting out_preserve_stack_slots. Since we always ++ // have space for storing at least 6 registers to memory we start with that. ++ // See int_stk_helper for a further discussion. ++ // We return the amount of VMRegImpl stack slots we need to reserve for all ++ // the arguments NOT counting out_preserve_stack_slots. ++ static const Register INT_ArgReg[Argument::n_register_parameters] = { ++ A0, A1, A2, A3, A4, A5 ++ }; ++ static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters] = { ++ F16, F17, F18, F19, F20, F21 ++ }; ++ uint args = 0; ++ uint stk_args = 0; // inc by 2 each time ++ ++// Example: ++// n java.lang.UNIXProcess::forkAndExec ++// private native int forkAndExec(byte[] prog, ++// byte[] argBlock, int argc, ++// byte[] envBlock, int envc, ++// byte[] dir, ++// boolean redirectErrorStream, ++// FileDescriptor stdin_fd, ++// FileDescriptor stdout_fd, ++// FileDescriptor stderr_fd) ++// JNIEXPORT jint JNICALL ++// Java_java_lang_UNIXProcess_forkAndExec(JNIEnv *env, ++// jobject process, ++// jbyteArray prog, ++// jbyteArray argBlock, jint argc, ++// jbyteArray envBlock, jint envc, ++// jbyteArray dir, ++// jboolean redirectErrorStream, ++// jobject stdin_fd, ++// jobject stdout_fd, ++// jobject stderr_fd) ++// ++// ::c_calling_convention ++// 0: // env <-- a0 ++// 1: L // klass/obj <-- t0 => a1 ++// 2: [ // prog[] <-- a0 => a2 ++// 3: [ // argBlock[] <-- a1 => a3 ++// 4: I // argc ++// 5: [ // envBlock[] <-- a3 => a5 ++// 6: I // envc ++// 7: [ // dir[] <-- a5 => a7 ++// 8: Z // redirectErrorStream a6 => sp[0] ++// 9: L // stdin a7 => sp[8] ++// 10: L // stdout fp[16] => sp[16] ++// 11: L // stderr fp[24] => sp[24] ++// ++ for (int i = 0; i < total_args_passed; i++) { ++ switch (sig_bt[i]) { ++ case T_VOID: // Halves of longs and doubles ++ assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half"); ++ regs[i].set_bad(); ++ break; ++ case T_BOOLEAN: ++ case T_CHAR: ++ case T_BYTE: ++ case T_SHORT: ++ case T_INT: ++ if (args < Argument::n_register_parameters) { ++ regs[i].set1(INT_ArgReg[args++]->as_VMReg()); ++ } else { ++ regs[i].set1(VMRegImpl::stack2reg(stk_args)); ++ stk_args += 2; ++ } ++ break; ++ case T_LONG: ++ assert(sig_bt[i + 1] == T_VOID, "expecting half"); ++ // fall through ++ case T_OBJECT: ++ case T_ARRAY: ++ case T_ADDRESS: ++ case T_METADATA: ++ if (args < Argument::n_register_parameters) { ++ regs[i].set2(INT_ArgReg[args++]->as_VMReg()); ++ } else { ++ regs[i].set2(VMRegImpl::stack2reg(stk_args)); ++ stk_args += 2; ++ } ++ break; ++ case T_FLOAT: ++ if (args < Argument::n_float_register_parameters) { ++ regs[i].set1(FP_ArgReg[args++]->as_VMReg()); ++ } else { ++ regs[i].set1(VMRegImpl::stack2reg(stk_args)); ++ stk_args += 2; ++ } ++ break; ++ case T_DOUBLE: ++ assert(sig_bt[i + 1] == T_VOID, "expecting half"); ++ if (args < Argument::n_float_register_parameters) { ++ regs[i].set2(FP_ArgReg[args++]->as_VMReg()); ++ } else { ++ regs[i].set2(VMRegImpl::stack2reg(stk_args)); ++ stk_args += 2; ++ } ++ break; ++ default: ++ ShouldNotReachHere(); ++ break; ++ } ++ } ++ ++ return round_to(stk_args, 2); ++} ++ ++// --------------------------------------------------------------------------- ++void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { ++ // We always ignore the frame_slots arg and just use the space just below frame pointer ++ // which by this time is free to use ++ switch (ret_type) { ++ case T_FLOAT: ++ __ fsts(FSF, FP, -wordSize); ++ break; ++ case T_DOUBLE: ++ __ fstd(FSF, FP, -wordSize ); ++ break; ++ case T_VOID: break; ++ case T_LONG: ++ __ stl(V0, FP, -wordSize); ++ break; ++ case T_OBJECT: ++ case T_ARRAY: ++ __ stl(V0, FP, -wordSize); ++ break; ++ default: { ++ __ stw(V0, FP, -wordSize); ++ } ++ } ++} ++ ++void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { ++ // We always ignore the frame_slots arg and just use the space just below frame pointer ++ // which by this time is free to use ++ switch (ret_type) { ++ case T_FLOAT: ++ __ flds(FSF, FP, -wordSize); ++ break; ++ case T_DOUBLE: ++ __ fldd(FSF, FP, -wordSize ); ++ break; ++ case T_LONG: ++ __ ldl(V0, FP, -wordSize); ++ break; ++ case T_VOID: break; ++ case T_OBJECT: ++ case T_ARRAY: ++ __ ldl(V0, FP, -wordSize); ++ break; ++ default: { ++ __ ldw(V0, FP, -wordSize); ++ } ++ } ++} ++ ++static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) { ++ for ( int i = first_arg ; i < arg_count ; i++ ) { ++ if (args[i].first()->is_Register()) { ++ __ push(args[i].first()->as_Register()); ++ } else if (args[i].first()->is_FloatRegister()) { ++ __ push(args[i].first()->as_FloatRegister()); ++ } ++ } ++} ++ ++static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) { ++ for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) { ++ if (args[i].first()->is_Register()) { ++ __ pop(args[i].first()->as_Register()); ++ } else if (args[i].first()->is_FloatRegister()) { ++ __ pop(args[i].first()->as_FloatRegister()); ++ } ++ } ++} ++ ++// A simple move of integer like type ++static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { ++ if (src.first()->is_stack()) { ++ if (dst.first()->is_stack()) { ++ // stack to stack ++ __ ldw(AT, FP, reg2offset_in(src.first())); ++ __ stl(AT,SP, reg2offset_out(dst.first())); ++ } else { ++ // stack to reg ++ __ ldw(dst.first()->as_Register(), FP, reg2offset_in(src.first())); ++ } ++ } else if (dst.first()->is_stack()) { ++ // reg to stack ++ __ stl(src.first()->as_Register(), SP, reg2offset_out(dst.first())); ++ } else { ++ if (dst.first() != src.first()){ ++ __ move(dst.first()->as_Register(), src.first()->as_Register()); ++ } ++ } ++} ++ ++// An oop arg. Must pass a handle not the oop itself ++static void object_move(MacroAssembler* masm, ++ OopMap* map, ++ int oop_handle_offset, ++ int framesize_in_slots, ++ VMRegPair src, ++ VMRegPair dst, ++ bool is_receiver, ++ int* receiver_offset) { ++ ++ // must pass a handle. First figure out the location we use as a handle ++ ++ if (src.first()->is_stack()) { ++ // Oop is already on the stack as an argument ++ Register rHandle = V0; ++ Label nil; ++ __ xor_ins(rHandle, rHandle, rHandle); ++ __ ldl(AT, FP, reg2offset_in(src.first())); ++ __ beq(AT, nil); ++ __ lea(rHandle, Address(FP, reg2offset_in(src.first()))); ++ __ BIND(nil); ++ if(dst.first()->is_stack())__ stl( rHandle, SP, reg2offset_out(dst.first())); ++ else __ move( (dst.first())->as_Register(),rHandle); ++ //if dst is register ++ int offset_in_older_frame = src.first()->reg2stack() ++ + SharedRuntime::out_preserve_stack_slots(); ++ map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); ++ if (is_receiver) { ++ *receiver_offset = (offset_in_older_frame ++ + framesize_in_slots) * VMRegImpl::stack_slot_size; ++ } ++ } else { ++ // Oop is in an a register we must store it to the space we reserve ++ // on the stack for oop_handles ++ const Register rOop = src.first()->as_Register(); ++ assert( (rOop->encoding() >= A0->encoding()) && (rOop->encoding() <= A5->encoding()),"wrong register"); ++ const Register rHandle = V0; ++ //Important: refer to java_calling_convertion ++ int oop_slot = (rOop->encoding() - A0->encoding()) * VMRegImpl::slots_per_word + oop_handle_offset; ++ int offset = oop_slot*VMRegImpl::stack_slot_size; ++ Label skip; ++ __ stl( rOop , SP, offset ); ++ map->set_oop(VMRegImpl::stack2reg(oop_slot)); ++ __ xor_ins( rHandle, rHandle, rHandle); ++ __ beq(rOop, skip); ++ __ lea(rHandle, Address(SP, offset)); ++ __ BIND(skip); ++ // Store the handle parameter ++ if(dst.first()->is_stack())__ stl( rHandle, SP, reg2offset_out(dst.first())); ++ else __ move((dst.first())->as_Register(), rHandle); ++ //if dst is register ++ ++ if (is_receiver) { ++ *receiver_offset = offset; ++ } ++ } ++} ++ ++// A float arg may have to do float reg int reg conversion ++static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { ++ assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move"); ++ ++ if (src.first()->is_stack()) { ++ if(dst.first()->is_stack()) { ++ __ flds(F16 , FP, reg2offset_in(src.first())); ++ __ fsts(F16 ,SP, reg2offset_out(dst.first())); ++ } ++ else ++ __ flds( dst.first()->as_FloatRegister(), FP, reg2offset_in(src.first())); ++ } else { ++ // reg to stack ++ if(dst.first()->is_stack()) ++ __ fsts( src.first()->as_FloatRegister(),SP, reg2offset_out(dst.first())); ++ else ++ __ fmovs( dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); ++ } ++} ++ ++// A long move ++static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { ++ ++ // The only legal possibility for a long_move VMRegPair is: ++ // 1: two stack slots (possibly unaligned) ++ // as neither the java or C calling convention will use registers ++ // for longs. ++ ++ if (src.first()->is_stack()) { ++ assert(src.second()->is_stack() && dst.second()->is_stack(), "must be all stack"); ++ if( dst.first()->is_stack()){ ++ __ ldl(AT, FP, reg2offset_in(src.first())); ++ __ stl(AT, SP, reg2offset_out(dst.first())); ++ } else { ++ __ ldl( (dst.first())->as_Register() , FP, reg2offset_in(src.first())); ++ } ++ } else { ++ if( dst.first()->is_stack()){ ++ __ stl( (src.first())->as_Register(), SP, reg2offset_out(dst.first())); ++ } else{ ++ __ move( (dst.first())->as_Register() , (src.first())->as_Register()); ++ } ++ } ++} ++ ++// A double move ++static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { ++ ++ // The only legal possibilities for a double_move VMRegPair are: ++ // The painful thing here is that like long_move a VMRegPair might be ++ ++ // Because of the calling convention we know that src is either ++ // 1: a single physical register (xmm registers only) ++ // 2: two stack slots (possibly unaligned) ++ // dst can only be a pair of stack slots. ++ ++ ++ if (src.first()->is_stack()) { ++ // source is all stack ++ if( dst.first()->is_stack()){ ++ __ fldd(F16, FP, reg2offset_in(src.first())); ++ __ fstd(F16, SP, reg2offset_out(dst.first())); ++ } else{ ++ __ fldd( (dst.first())->as_FloatRegister(), FP, reg2offset_in(src.first())); ++ } ++ ++ } else { ++ // reg to stack ++ // No worries about stack alignment ++ if( dst.first()->is_stack()){ ++ __ fstd( src.first()->as_FloatRegister(),SP, reg2offset_out(dst.first())); ++ } ++ else ++ __ fmovd( dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); ++ ++ } ++} ++ ++static void verify_oop_args(MacroAssembler* masm, ++ methodHandle method, ++ const BasicType* sig_bt, ++ const VMRegPair* regs) { ++ Register temp_reg = T12; // not part of any compiled calling seq ++ if (VerifyOops) { ++ for (int i = 0; i < method->size_of_parameters(); i++) { ++ if (sig_bt[i] == T_OBJECT || ++ sig_bt[i] == T_ARRAY) { ++ VMReg r = regs[i].first(); ++ assert(r->is_valid(), "bad oop arg"); ++ if (r->is_stack()) { ++ __ ldl(temp_reg, Address(SP, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize)); ++ __ verify_oop(temp_reg); ++ } else { ++ __ verify_oop(r->as_Register()); ++ } ++ } ++ } ++ } ++} ++ ++static void gen_special_dispatch(MacroAssembler* masm, ++ methodHandle method, ++ const BasicType* sig_bt, ++ const VMRegPair* regs) { ++ verify_oop_args(masm, method, sig_bt, regs); ++ vmIntrinsics::ID iid = method->intrinsic_id(); ++ ++ // Now write the args into the outgoing interpreter space ++ bool has_receiver = false; ++ Register receiver_reg = noreg; ++ int member_arg_pos = -1; ++ Register member_reg = noreg; ++ int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid); ++ if (ref_kind != 0) { ++ member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument ++ member_reg = S3; // known to be free at this point ++ has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind); ++ } else if (iid == vmIntrinsics::_invokeBasic) { ++ has_receiver = true; ++ } else { ++ fatal(err_msg_res("unexpected intrinsic id %d", iid)); ++ } ++ ++ if (member_reg != noreg) { ++ // Load the member_arg into register, if necessary. ++ SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs); ++ VMReg r = regs[member_arg_pos].first(); ++ if (r->is_stack()) { ++ __ ldl(member_reg, Address(SP, r->reg2stack() * VMRegImpl::stack_slot_size)); ++ } else { ++ // no data motion is needed ++ member_reg = r->as_Register(); ++ } ++ } ++ ++ if (has_receiver) { ++ // Make sure the receiver is loaded into a register. ++ assert(method->size_of_parameters() > 0, "oob"); ++ assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object"); ++ VMReg r = regs[0].first(); ++ assert(r->is_valid(), "bad receiver arg"); ++ if (r->is_stack()) { ++ // Porting note: This assumes that compiled calling conventions always ++ // pass the receiver oop in a register. If this is not true on some ++ // platform, pick a temp and load the receiver from stack. ++ fatal("receiver always in a register"); ++ receiver_reg = SSR; // known to be free at this point ++ __ ldl(receiver_reg, Address(SP, r->reg2stack() * VMRegImpl::stack_slot_size)); ++ } else { ++ // no data motion is needed ++ receiver_reg = r->as_Register(); ++ } ++ } ++ ++ // Figure out which address we are really jumping to: ++ MethodHandles::generate_method_handle_dispatch(masm, iid, ++ receiver_reg, member_reg, /*for_compiler_entry:*/ true); ++} ++ ++// --------------------------------------------------------------------------- ++// Generate a native wrapper for a given method. The method takes arguments ++// in the Java compiled code convention, marshals them to the native ++// convention (handlizes oops, etc), transitions to native, makes the call, ++// returns to java state (possibly blocking), unhandlizes any result and ++// returns. ++nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm, ++ methodHandle method, ++ int compile_id, ++ BasicType* in_sig_bt, ++ VMRegPair* in_regs, ++ BasicType ret_type) { ++ if (method->is_method_handle_intrinsic()) { ++ vmIntrinsics::ID iid = method->intrinsic_id(); ++ intptr_t start = (intptr_t)__ pc(); ++ int vep_offset = ((intptr_t)__ pc()) - start; ++ gen_special_dispatch(masm, ++ method, ++ in_sig_bt, ++ in_regs); ++ int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period ++ __ flush(); ++ int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually ++ return nmethod::new_native_nmethod(method, ++ compile_id, ++ masm->code(), ++ vep_offset, ++ frame_complete, ++ stack_slots / VMRegImpl::slots_per_word, ++ in_ByteSize(-1), ++ in_ByteSize(-1), ++ (OopMapSet*)NULL); ++ } ++ bool is_critical_native = true; ++ address native_func = method->critical_native_function(); ++ if (native_func == NULL) { ++ native_func = method->native_function(); ++ is_critical_native = false; ++ } ++ assert(native_func != NULL, "must have function"); ++ ++ // Native nmethod wrappers never take possesion of the oop arguments. ++ // So the caller will gc the arguments. The only thing we need an ++ // oopMap for is if the call is static ++ // ++ // An OopMap for lock (and class if static), and one for the VM call itself ++ OopMapSet *oop_maps = new OopMapSet(); ++ ++ // We have received a description of where all the java arg are located ++ // on entry to the wrapper. We need to convert these args to where ++ // the jni function will expect them. To figure out where they go ++ // we convert the java signature to a C signature by inserting ++ // the hidden arguments as arg[0] and possibly arg[1] (static method) ++ ++ const int total_in_args = method->size_of_parameters(); ++ int total_c_args = total_in_args; ++ if (!is_critical_native) { ++ total_c_args += 1; ++ if (method->is_static()) { ++ total_c_args++; ++ } ++ } else { ++ for (int i = 0; i < total_in_args; i++) { ++ if (in_sig_bt[i] == T_ARRAY) { ++ total_c_args++; ++ } ++ } ++ } ++ ++ BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); ++ VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); ++ BasicType* in_elem_bt = NULL; ++ ++ int argc = 0; ++ if (!is_critical_native) { ++ out_sig_bt[argc++] = T_ADDRESS; ++ if (method->is_static()) { ++ out_sig_bt[argc++] = T_OBJECT; ++ } ++ ++ for (int i = 0; i < total_in_args ; i++ ) { ++ out_sig_bt[argc++] = in_sig_bt[i]; ++ } ++ } else { ++ Thread* THREAD = Thread::current(); ++ in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args); ++ SignatureStream ss(method->signature()); ++ for (int i = 0; i < total_in_args ; i++ ) { ++ if (in_sig_bt[i] == T_ARRAY) { ++ // Arrays are passed as int, elem* pair ++ out_sig_bt[argc++] = T_INT; ++ out_sig_bt[argc++] = T_ADDRESS; ++ Symbol* atype = ss.as_symbol(CHECK_NULL); ++ const char* at = atype->as_C_string(); ++ if (strlen(at) == 2) { ++ assert(at[0] == '[', "must be"); ++ switch (at[1]) { ++ case 'B': in_elem_bt[i] = T_BYTE; break; ++ case 'C': in_elem_bt[i] = T_CHAR; break; ++ case 'D': in_elem_bt[i] = T_DOUBLE; break; ++ case 'F': in_elem_bt[i] = T_FLOAT; break; ++ case 'I': in_elem_bt[i] = T_INT; break; ++ case 'J': in_elem_bt[i] = T_LONG; break; ++ case 'S': in_elem_bt[i] = T_SHORT; break; ++ case 'Z': in_elem_bt[i] = T_BOOLEAN; break; ++ default: ShouldNotReachHere(); ++ } ++ } ++ } else { ++ out_sig_bt[argc++] = in_sig_bt[i]; ++ in_elem_bt[i] = T_VOID; ++ } ++ if (in_sig_bt[i] != T_VOID) { ++ assert(in_sig_bt[i] == ss.type(), "must match"); ++ ss.next(); ++ } ++ } ++ } ++ ++ // Now figure out where the args must be stored and how much stack space ++ // they require (neglecting out_preserve_stack_slots but space for storing ++ // the 1st six register arguments). It's weird see int_stk_helper. ++ int out_arg_slots; ++ out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args); ++ ++ // Compute framesize for the wrapper. We need to handlize all oops in ++ // registers. We must create space for them here that is disjoint from ++ // the windowed save area because we have no control over when we might ++ // flush the window again and overwrite values that gc has since modified. ++ // (The live window race) ++ // ++ // We always just allocate 6 word for storing down these object. This allow ++ // us to simply record the base and use the Ireg number to decide which ++ // slot to use. (Note that the reg number is the inbound number not the ++ // outbound number). ++ // We must shuffle args to match the native convention, and include var-args space. ++ ++ // Calculate the total number of stack slots we will need. ++ ++ // First count the abi requirement plus all of the outgoing args ++ int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots; ++ ++ // Now the space for the inbound oop handle area ++ int total_save_slots = 6 * VMRegImpl::slots_per_word; // 6 arguments passed in registers ++ if (is_critical_native) { ++ // Critical natives may have to call out so they need a save area ++ // for register arguments. ++ int double_slots = 0; ++ int single_slots = 0; ++ for ( int i = 0; i < total_in_args; i++) { ++ if (in_regs[i].first()->is_Register()) { ++ const Register reg = in_regs[i].first()->as_Register(); ++ switch (in_sig_bt[i]) { ++ case T_BOOLEAN: ++ case T_BYTE: ++ case T_SHORT: ++ case T_CHAR: ++ case T_INT: single_slots++; break; ++ case T_ARRAY: // specific to LP64 (7145024) ++ case T_LONG: double_slots++; break; ++ default: ShouldNotReachHere(); ++ } ++ } else if (in_regs[i].first()->is_FloatRegister()) { ++ switch (in_sig_bt[i]) { ++ case T_FLOAT: single_slots++; break; ++ case T_DOUBLE: double_slots++; break; ++ default: ShouldNotReachHere(); ++ } ++ } ++ } ++ total_save_slots = double_slots * 2 + single_slots; ++ // align the save area ++ if (double_slots != 0) { ++ stack_slots = round_to(stack_slots, 2); ++ } ++ } ++ ++ int oop_handle_offset = stack_slots; ++ stack_slots += total_save_slots; ++ ++ // Now any space we need for handlizing a klass if static method ++ ++ int klass_slot_offset = 0; ++ int klass_offset = -1; ++ int lock_slot_offset = 0; ++ bool is_static = false; ++ ++ if (method->is_static()) { ++ klass_slot_offset = stack_slots; ++ stack_slots += VMRegImpl::slots_per_word; ++ klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size; ++ is_static = true; ++ } ++ ++ // Plus a lock if needed ++ ++ if (method->is_synchronized()) { ++ lock_slot_offset = stack_slots; ++ stack_slots += VMRegImpl::slots_per_word; ++ } ++ ++ // Now a place to save return value or as a temporary for any gpr -> fpr moves ++ // + 2 for return address (which we own) and saved fp ++ stack_slots += 2 + 6 * VMRegImpl::slots_per_word; // (A1, A2, A3, A4, A5, A0) ++ ++ // Ok The space we have allocated will look like: ++ // ++ // ++ // FP-> | | ++ // |---------------------| ++ // | 2 slots for moves | ++ // |---------------------| ++ // | lock box (if sync) | ++ // |---------------------| <- lock_slot_offset ++ // | klass (if static) | ++ // |---------------------| <- klass_slot_offset ++ // | oopHandle area | ++ // |---------------------| <- oop_handle_offset ++ // | outbound memory | ++ // | based arguments | ++ // | | ++ // |---------------------| ++ // | vararg area | ++ // |---------------------| ++ // | | ++ // SP-> | out_preserved_slots | ++ // ++ // ++ ++ ++ // Now compute actual number of stack words we need rounding to make ++ // stack properly aligned. ++ stack_slots = round_to(stack_slots, StackAlignmentInSlots); ++ ++ int stack_size = stack_slots * VMRegImpl::stack_slot_size; ++ ++ intptr_t start = (intptr_t)__ pc(); ++ ++ ++ ++ // First thing make an ic check to see if we should even be here ++ address ic_miss = SharedRuntime::get_ic_miss_stub(); ++ ++ // We are free to use all registers as temps without saving them and ++ // restoring them except fp. fp is the only callee save register ++ // as far as the interpreter and the compiler(s) are concerned. ++ ++ const Register ic_reg = T1; ++ const Register receiver = A1; ++ ++ Label hit; ++ Label exception_pending; ++ ++ __ verify_oop(receiver); ++ //add for compressedoops ++ __ load_klass(T12, receiver); ++ __ beq(T12, ic_reg, hit); ++ __ jmp(ic_miss, relocInfo::runtime_call_type); ++ // verified entry must be aligned for code patching. ++ // and the first 5 bytes must be in the same cache line ++ // if we align at 8 then we will be sure 5 bytes are in the same line ++ __ align(8); ++ ++ __ BIND(hit); ++ ++ ++ int vep_offset = ((intptr_t)__ pc()) - start; ++ ++ // The instruction at the verified entry point must be 5 bytes or longer ++ // because it can be patched on the fly by make_non_entrant. The stack bang ++ // instruction fits that requirement. ++ ++ // Generate stack overflow check ++ ++ if (UseStackBanging) { ++ __ bang_stack_with_offset(StackShadowPages*os::vm_page_size()); ++ } else { ++ // need a 5 byte instruction to allow MT safe patching to non-entrant ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ } ++ // Generate a new frame for the wrapper. ++ __ st_ptr(SP, S2thread, in_bytes(JavaThread::last_Java_sp_offset())); ++ __ move(AT, -(StackAlignmentInBytes)); ++ __ and_reg(SP, SP, AT); ++ ++ __ enter(); ++ // -2 because return address is already present and so is saved fp ++ __ add_simm16(SP, SP, -1 * (stack_size - 2*wordSize)); ++ ++ // Frame is now completed as far a size and linkage. ++ ++ int frame_complete = ((intptr_t)__ pc()) - start; ++ ++ // Calculate the difference between sp and fp. We need to know it ++ // after the native call because on windows Java Natives will pop ++ // the arguments and it is painful to do sp relative addressing ++ // in a platform independent way. So after the call we switch to ++ // fp relative addressing. ++ int fp_adjustment = stack_size - 2*wordSize; ++ ++#ifdef COMPILER2 ++ // C2 may leave the stack dirty if not in SSE2+ mode ++ __ empty_FPU_stack(); ++#endif ++ ++ // Compute the fp offset for any slots used after the jni call ++ ++ int lock_slot_fp_offset = (lock_slot_offset*VMRegImpl::stack_slot_size) - fp_adjustment; ++ // We use S2thread as a thread pointer because it is callee save and ++ // if we load it once it is usable thru the entire wrapper ++ const Register thread = S2thread; ++ ++ // We use S4 as the oop handle for the receiver/klass ++ // It is callee save so it survives the call to native ++ ++ const Register oop_handle_reg = S4; ++ if (is_critical_native) { ++ __ stop("generate_native_wrapper in sharedRuntime <2>"); ++ } ++ ++ // ++ // We immediately shuffle the arguments so that any vm call we have to ++ // make from here on out (sync slow path, jvmpi, etc.) we will have ++ // captured the oops from our caller and have a valid oopMap for ++ // them. ++ ++ // ----------------- ++ // The Grand Shuffle ++ // ++ // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv* ++ // and, if static, the class mirror instead of a receiver. This pretty much ++ // parms though amd does). Since the native abi doesn't use register args ++ // and the java conventions does we don't have to worry about collisions. ++ // All of our moved are reg->stack or stack->stack. ++ // We ignore the extra arguments during the shuffle and handle them at the ++ // last moment. The shuffle is described by the two calling convention ++ // vectors we have in our possession. We simply walk the java vector to ++ // get the source locations and the c vector to get the destinations. ++ ++ int c_arg = method->is_static() ? 2 : 1 ; ++ ++ // Record sp-based slot for receiver on stack for non-static methods ++ int receiver_offset = -1; ++ ++ // This is a trick. We double the stack slots so we can claim ++ // the oops in the caller's frame. Since we are sure to have ++ // more args than the caller doubling is enough to make ++ // sure we can capture all the incoming oop args from the ++ // caller. ++ // ++ OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); ++ ++ // Mark location of fp (someday) ++ // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(fp)); ++ ++#ifdef ASSERT ++ bool reg_destroyed[RegisterImpl::number_of_registers]; ++ bool freg_destroyed[FloatRegisterImpl::number_of_registers]; ++ for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) { ++ reg_destroyed[r] = false; ++ } ++ for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) { ++ freg_destroyed[f] = false; ++ } ++#endif /* ASSERT */ ++ ++ // This may iterate in two different directions depending on the ++ // kind of native it is. The reason is that for regular JNI natives ++ // the incoming and outgoing registers are offset upwards and for ++ // critical natives they are offset down. ++ GrowableArray arg_order(2 * total_in_args); ++ VMRegPair tmp_vmreg; ++ tmp_vmreg.set1(T11->as_VMReg()); ++ ++ if (!is_critical_native) { ++ for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) { ++ arg_order.push(i); ++ arg_order.push(c_arg); ++ } ++ } else { ++ // Compute a valid move order, using tmp_vmreg to break any cycles ++ __ stop("generate_native_wrapper in sharedRuntime <2>"); ++// ComputeMoveOrder cmo(total_in_args, in_regs, total_c_args, out_regs, in_sig_bt, arg_order, tmp_vmreg); ++ } ++ ++ int temploc = -1; ++ for (int ai = 0; ai < arg_order.length(); ai += 2) { ++ int i = arg_order.at(ai); ++ int c_arg = arg_order.at(ai + 1); ++ __ block_comment(err_msg("move %d -> %d", i, c_arg)); ++ if (c_arg == -1) { ++ assert(is_critical_native, "should only be required for critical natives"); ++ // This arg needs to be moved to a temporary ++ __ move(tmp_vmreg.first()->as_Register(), in_regs[i].first()->as_Register()); ++ in_regs[i] = tmp_vmreg; ++ temploc = i; ++ continue; ++ } else if (i == -1) { ++ assert(is_critical_native, "should only be required for critical natives"); ++ // Read from the temporary location ++ assert(temploc != -1, "must be valid"); ++ i = temploc; ++ temploc = -1; ++ } ++#ifdef ASSERT ++ if (in_regs[i].first()->is_Register()) { ++ assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!"); ++ } else if (in_regs[i].first()->is_FloatRegister()) { ++ assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding()], "destroyed reg!"); ++ } ++ if (out_regs[c_arg].first()->is_Register()) { ++ reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true; ++ } else if (out_regs[c_arg].first()->is_FloatRegister()) { ++ freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true; ++ } ++#endif /* ASSERT */ ++ switch (in_sig_bt[i]) { ++ case T_ARRAY: ++ if (is_critical_native) { ++ __ stop("generate_native_wrapper in sharedRuntime <2>"); ++ // unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]); ++ c_arg++; ++#ifdef ASSERT ++ if (out_regs[c_arg].first()->is_Register()) { ++ reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true; ++ } else if (out_regs[c_arg].first()->is_FloatRegister()) { ++ freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true; ++ } ++#endif ++ break; ++ } ++ case T_OBJECT: ++ assert(!is_critical_native, "no oop arguments"); ++ object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg], ++ ((i == 0) && (!is_static)), ++ &receiver_offset); ++ break; ++ case T_VOID: ++ break; ++ ++ case T_FLOAT: ++ float_move(masm, in_regs[i], out_regs[c_arg]); ++ break; ++ ++ case T_DOUBLE: ++ assert( i + 1 < total_in_args && ++ in_sig_bt[i + 1] == T_VOID && ++ out_sig_bt[c_arg+1] == T_VOID, "bad arg list"); ++ double_move(masm, in_regs[i], out_regs[c_arg]); ++ break; ++ ++ case T_LONG : ++ long_move(masm, in_regs[i], out_regs[c_arg]); ++ break; ++ ++ case T_ADDRESS: assert(false, "found T_ADDRESS in java args"); ++ ++ default: ++ simple_move32(masm, in_regs[i], out_regs[c_arg]); ++ } ++ } ++ ++ // point c_arg at the first arg that is already loaded in case we ++ // need to spill before we call out ++ c_arg = total_c_args - total_in_args; ++ // Pre-load a static method's oop. Used both by locking code and ++ // the normal JNI call code. ++ ++ __ move(oop_handle_reg, A1); ++ ++ if (method->is_static() && !is_critical_native) { ++ ++ // load opp into a register ++ int oop_index = __ oop_recorder()->find_index(JNIHandles::make_local( ++ (method->method_holder())->java_mirror())); ++ ++ ++ RelocationHolder rspec = oop_Relocation::spec(oop_index); ++ __ relocate(rspec); ++ __ patchable_set48(oop_handle_reg, (long)JNIHandles::make_local((method->method_holder())->java_mirror())); ++ // Now handlize the static class mirror it's known not-null. ++ __ stl( oop_handle_reg, SP, klass_offset); ++ map->set_oop(VMRegImpl::stack2reg(klass_slot_offset)); ++ ++ // Now get the handle ++ __ lea(oop_handle_reg, Address(SP, klass_offset)); ++ // store the klass handle as second argument ++ __ move(A1, oop_handle_reg); ++ // and protect the arg if we must spill ++ c_arg--; ++ } ++ ++ // Change state to native (we save the return address in the thread, since it might not ++ // be pushed on the stack when we do a a stack traversal). It is enough that the pc() ++ // points into the right code segment. It does not have to be the correct return pc. ++ // We use the same pc/oopMap repeatedly when we call out ++ ++ intptr_t the_pc = (intptr_t) __ pc(); ++ oop_maps->add_gc_map(the_pc - start, map); ++ ++#ifdef ZHJ20180909 ++ __ set_last_Java_frame(SP, noreg, NULL); ++ ++ __ relocate(relocInfo::internal_pc_type); ++ { ++ intptr_t save_pc = (intptr_t)the_pc ; ++ __ patchable_set48(AT, save_pc); ++ } ++ __ sd(AT, thread, in_bytes(JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset())); ++#else ++ if(UseAddpi) ++ __ addpi(-1, AT); ++ else { ++ __ br(AT, 0); ++ __ subl(AT, AT, 4); // the_pc = last_Java_pc = $(AT) - br (1) ++ } ++ __ stl(AT, thread, in_bytes(JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset())); ++ ++ // move from upside, want to avoid patching ++ __ set_last_Java_frame(SP, noreg, NULL); ++#endif ++ ++ // We have all of the arguments setup at this point. We must not touch any register ++ // argument registers at this point (what if we save/restore them there are no oop? ++ { ++ SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0); ++ int metadata_index = __ oop_recorder()->find_index(method()); ++ RelocationHolder rspec = metadata_Relocation::spec(metadata_index); ++ __ relocate(rspec); ++ __ patchable_set48(AT, (long)(method())); ++ ++ __ call_VM_leaf( ++ CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), ++ thread, AT); ++ ++ } ++ ++ // These are register definitions we need for locking/unlocking ++ const Register swap_reg = T11; // Must use T11 for cmpxchg instruction ++ const Register obj_reg = T12; // Will contain the oop ++ //const Register lock_reg = T6; // Address of compiler lock object (BasicLock) ++ const Register lock_reg = c_rarg0; // Address of compiler lock object (BasicLock) ++ ++ ++ ++ Label slow_path_lock; ++ Label lock_done; ++ ++ // Lock a synchronized method ++ if (method->is_synchronized()) { ++ assert(!is_critical_native, "unhandled"); ++ ++ const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes(); ++ ++ // Get the handle (the 2nd argument) ++ __ move(oop_handle_reg, A1); ++ ++ // Get address of the box ++ __ lea(lock_reg, Address(FP, lock_slot_fp_offset)); ++ ++ // Load the oop from the handle ++ __ ldl(obj_reg, oop_handle_reg, 0); ++ ++ if (UseBiasedLocking) { ++ // Note that oop_handle_reg is trashed during this call ++ __ biased_locking_enter(lock_reg, obj_reg, swap_reg, A1, false, lock_done, &slow_path_lock); ++ } ++ ++ // Load immediate 1 into swap_reg %T11 ++ __ move(swap_reg, 1); ++ ++ __ ldl(AT, obj_reg, 0); ++ __ or_ins(swap_reg, swap_reg, AT); ++ ++ __ stl(swap_reg, lock_reg, mark_word_offset); ++ __ cmpxchg(lock_reg, Address(obj_reg, 0), swap_reg); ++ __ bne(AT, lock_done); ++ ++ // Test if the oopMark is an obvious stack pointer, i.e., ++ // 1) (mark & 3) == 0, and ++ // 2) sp <= mark < mark + os::pagesize() ++ // These 3 tests can be done by evaluating the following ++ // expression: ((mark - sp) & (3 - os::vm_page_size())), ++ // assuming both stack pointer and pagesize have their ++ // least significant 2 bits clear. ++ // NOTE: the oopMark is in swap_reg %T11 as the result of cmpxchg ++ ++ __ subl(swap_reg, swap_reg, SP); ++ __ move(AT, 3 - os::vm_page_size()); ++ __ and_reg(swap_reg , swap_reg, AT); ++ // Save the test result, for recursive case, the result is zero ++ __ stl(swap_reg, lock_reg, mark_word_offset); ++ __ bne(swap_reg, slow_path_lock); ++ // Slow path will re-enter here ++ __ BIND(lock_done); ++ ++ if (UseBiasedLocking) { ++ // Re-fetch oop_handle_reg as we trashed it above ++ __ move(A1, oop_handle_reg); ++ } ++ } ++ ++ ++ // Finally just about ready to make the JNI call ++ ++ ++ // get JNIEnv* which is first argument to native ++ if (!is_critical_native) { ++ __ add_simm16(A0, thread, in_bytes(JavaThread::jni_environment_offset())); ++ } ++ ++ // Example: Java_java_lang_ref_Finalizer_invokeFinalizeMethod(JNIEnv *env, jclass clazz, jobject ob) ++ // Load the second arguments into A1 ++ //__ ldl(A1, SP , wordSize ); // klass ++ ++ // Now set thread in native ++ __ add_simm16(AT, R0, _thread_in_native); ++ __ stw(AT, thread, in_bytes(JavaThread::thread_state_offset())); ++ // do the call ++ __ call(method->native_function(), relocInfo::runtime_call_type); ++ // WARNING - on Windows Java Natives use pascal calling convention and pop the ++ // arguments off of the stack. We could just re-adjust the stack pointer here ++ // and continue to do SP relative addressing but we instead switch to FP ++ // relative addressing. ++ ++ // Unpack native results. ++ switch (ret_type) { ++ case T_BOOLEAN: __ c2bool(V0); break; ++ case T_CHAR : __ zapnot(V0, V0, 0x3); break; ++ case T_BYTE : __ sign_extend_byte (V0); break; ++ case T_SHORT : __ sign_extend_short(V0); break; ++ case T_INT : // nothing to do break; ++ case T_DOUBLE : ++ case T_FLOAT : ++ // Result is in st0 we'll save as needed ++ break; ++ case T_ARRAY: // Really a handle ++ case T_OBJECT: // Really a handle ++ break; // can't de-handlize until after safepoint check ++ case T_VOID: break; ++ case T_LONG: break; ++ default : ShouldNotReachHere(); ++ } ++ ++ // Switch thread to "native transition" state before reading the synchronization state. ++ // This additional state is necessary because reading and testing the synchronization ++ // state is not atomic w.r.t. GC, as this scenario demonstrates: ++ // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted. ++ // VM thread changes sync state to synchronizing and suspends threads for GC. ++ // Thread A is resumed to finish this native method, but doesn't block here since it ++ // didn't see any synchronization is progress, and escapes. ++ __ add_simm16(AT, R0, _thread_in_native_trans); ++ __ stw(AT, thread, in_bytes(JavaThread::thread_state_offset())); ++ ++ Label after_transition; ++ ++ // check for safepoint operation in progress and/or pending suspend requests ++ { ++ Label Continue; ++ __ li(AT, SafepointSynchronize::address_of_state()); ++ __ ldw(A0, AT, 0); ++ __ add_simm16(AT, A0, -SafepointSynchronize::_not_synchronized); ++ Label L; ++ __ bne(AT, L); ++ __ ldw(AT, thread, in_bytes(JavaThread::suspend_flags_offset())); ++ __ beq(AT, Continue); ++ __ BIND(L); ++ ++ // Don't use call_VM as it will see a possible pending exception and forward it ++ // and never return here preventing us from clearing _last_native_pc down below. ++ // ++ save_native_result(masm, ret_type, stack_slots); ++ __ move(A0, thread); ++ __ add_simm16(SP, SP, -wordSize); ++ __ push(S2); ++ __ move(AT, -(StackAlignmentInBytes)); ++ __ move(S2, SP); // use S2 as a sender SP holder ++ __ and_reg(SP, SP, AT); // align stack as required by ABI ++ if (!is_critical_native) { ++ __ call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), relocInfo::runtime_call_type); ++ } else { ++ __ call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition), relocInfo::runtime_call_type); ++ } ++ __ move(SP, S2); // use S2 as a sender SP holder ++ __ pop(S2); ++ __ add_simm16(SP,SP, wordSize); ++ //add for compressedoops ++ __ reinit_heapbase(); ++ // Restore any method result value ++ restore_native_result(masm, ret_type, stack_slots); ++ ++ if (is_critical_native) { ++ // The call above performed the transition to thread_in_Java so ++ // skip the transition logic below. ++ __ beq(R0, after_transition); ++ } ++ ++ __ BIND(Continue); ++ } ++ ++ // change thread state ++ __ add_simm16(AT, R0, _thread_in_Java); ++ __ stw(AT, thread, in_bytes(JavaThread::thread_state_offset())); ++ __ BIND(after_transition); ++ Label reguard; ++ Label reguard_done; ++ __ ldw(AT, thread, in_bytes(JavaThread::stack_guard_state_offset())); ++ __ add_simm16(AT, AT, -JavaThread::stack_guard_yellow_disabled); ++ __ beq(AT, reguard); ++ // slow path reguard re-enters here ++ __ BIND(reguard_done); ++ ++ // Handle possible exception (will unlock if necessary) ++ ++ // native result if any is live ++ ++ // Unlock ++ Label slow_path_unlock; ++ Label unlock_done; ++ if (method->is_synchronized()) { ++ ++ Label done; ++ ++ // Get locked oop from the handle we passed to jni ++ __ ldl( obj_reg, oop_handle_reg, 0); ++ if (UseBiasedLocking) { ++ __ biased_locking_exit(obj_reg, T11, done); ++ } ++ ++ // Simple recursive lock? ++ ++ __ ldl(AT, FP, lock_slot_fp_offset); ++ __ beq(AT, done); ++ // Must save FSF if if it is live now because cmpxchg must use it ++ if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) { ++ save_native_result(masm, ret_type, stack_slots); ++ } ++ ++ // get old displaced header ++ __ ldl (T11, FP, lock_slot_fp_offset); ++ // get address of the stack lock ++ __ add_simm16 (c_rarg0, FP, lock_slot_fp_offset); ++ // Atomic swap old header if oop still contains the stack lock ++ __ cmpxchg(T11, Address(obj_reg, 0), c_rarg0); ++ ++ __ beq(AT, slow_path_unlock); ++ // slow path re-enters here ++ __ BIND(unlock_done); ++ if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) { ++ restore_native_result(masm, ret_type, stack_slots); ++ } ++ ++ __ BIND(done); ++ ++ } ++ { ++ SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0); ++ // Tell dtrace about this method exit ++ save_native_result(masm, ret_type, stack_slots); ++ int metadata_index = __ oop_recorder()->find_index( (method())); ++ RelocationHolder rspec = metadata_Relocation::spec(metadata_index); ++ __ relocate(rspec); ++ __ patchable_set48(AT, (long)(method())); ++ ++ __ call_VM_leaf( ++ CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), ++ thread, AT); ++ restore_native_result(masm, ret_type, stack_slots); ++ } ++ ++ // We can finally stop using that last_Java_frame we setup ages ago ++ ++ __ reset_last_Java_frame(false); ++ ++ // Unbox oop result, e.g. JNIHandles::resolve value. ++ if (ret_type == T_OBJECT || ret_type == T_ARRAY) { ++ __ resolve_jobject(V0 /* value */, ++ thread /* thread */, ++ AT/* tmp */); ++ } ++ ++ if (!is_critical_native) { ++ // reset handle block ++ __ ldl(AT, thread, in_bytes(JavaThread::active_handles_offset())); ++ __ stw(R0, AT, JNIHandleBlock::top_offset_in_bytes()); ++ } ++ ++ if (!is_critical_native) { ++ // Any exception pending? ++ __ ldl(AT, thread, in_bytes(Thread::pending_exception_offset())); ++ __ bne(AT, exception_pending); ++ } ++ // no exception, we're almost done ++ ++ // check that only result value is on FPU stack ++ __ verify_FPU(ret_type == T_FLOAT || ret_type == T_DOUBLE ? 1 : 0, "native_wrapper normal exit"); ++ ++ // Return ++ //__ ld_ptr(SP, S2thread, in_bytes(JavaThread::last_Java_sp_offset())); ++ __ leave(); ++ ++ __ ret(); ++ ++ // Unexpected paths are out of line and go here ++ // Slow path locking & unlocking ++ if (method->is_synchronized()) { ++ ++ // BEGIN Slow path lock ++ ++ __ BIND(slow_path_lock); ++ ++ // protect the args we've loaded ++ save_args(masm, total_c_args, c_arg, out_regs); ++ ++ // has last_Java_frame setup. No exceptions so do vanilla call not call_VM ++ // args are (oop obj, BasicLock* lock, JavaThread* thread) ++ ++ __ move(A0, obj_reg); ++ __ move(A1, lock_reg); ++ __ move(A2, thread); ++ __ add_simm16(SP, SP, - 3*wordSize); ++ __ push(S2); ++ __ move(AT, -(StackAlignmentInBytes)); ++ __ move(S2, SP); // use S2 as a sender SP holder ++ __ and_reg(SP, SP, AT); // align stack as required by ABI ++ ++ __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), relocInfo::runtime_call_type); ++ __ move(SP, S2); ++ __ pop(S2); ++ __ add_simm16(SP, SP, 3*wordSize); ++ ++ restore_args(masm, total_c_args, c_arg, out_regs); ++ ++#ifdef ASSERT ++ { Label L; ++ __ ldl(AT, thread, in_bytes(Thread::pending_exception_offset())); ++ __ beq(AT, L); ++ __ stop("no pending exception allowed on exit from monitorenter"); ++ __ BIND(L); ++ } ++#endif ++ __ beq(R0, lock_done); ++ // END Slow path lock ++ ++ // BEGIN Slow path unlock ++ __ BIND(slow_path_unlock); ++ ++ // Slow path unlock ++ ++ if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) { ++ save_native_result(masm, ret_type, stack_slots); ++ } ++ // Save pending exception around call to VM (which contains an EXCEPTION_MARK) ++ ++ __ ldl(AT, thread, in_bytes(Thread::pending_exception_offset())); ++ __ push(AT); ++ __ stl(R0, thread, in_bytes(Thread::pending_exception_offset())); ++ ++ __ push(S2); ++ __ move(AT, -(StackAlignmentInBytes)); ++ __ move(S2, SP); // use S2 as a sender SP holder ++ __ and_reg(SP, SP, AT); // align stack as required by ABI ++ ++ // should be a peal ++ // +wordSize because of the push above ++ __ add_simm16(A1, FP, lock_slot_fp_offset); ++ ++ __ move(A0, obj_reg); ++ __ add_simm16(SP,SP, -2*wordSize); ++ __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), ++ relocInfo::runtime_call_type); ++ __ add_simm16(SP,SP, 2*wordSize); ++ __ move(SP, S2); ++ __ pop(S2); ++ ++ //add for compressedoops ++ __ reinit_heapbase(); ++#ifdef ASSERT ++ { ++ Label L; ++ __ ldw( AT, thread, in_bytes(Thread::pending_exception_offset())); ++ __ beq(AT, L); ++ __ stop("no pending exception allowed on exit complete_monitor_unlocking_C"); ++ __ BIND(L); ++ } ++#endif /* ASSERT */ ++ ++ __ pop(AT); ++ __ stl(AT, thread, in_bytes(Thread::pending_exception_offset())); ++ if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) { ++ restore_native_result(masm, ret_type, stack_slots); ++ } ++ __ beq(R0, unlock_done); ++ // END Slow path unlock ++ ++ } ++ ++ // SLOW PATH Reguard the stack if needed ++ ++ __ BIND(reguard); ++ save_native_result(masm, ret_type, stack_slots); ++ __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages), ++ relocInfo::runtime_call_type); ++ //add for compressedoops ++ __ reinit_heapbase(); ++ restore_native_result(masm, ret_type, stack_slots); ++ __ beq(R0, reguard_done); ++ ++ // BEGIN EXCEPTION PROCESSING ++ if (!is_critical_native) { ++ // Forward the exception ++ __ BIND(exception_pending); ++ ++ // remove possible return value from FPU register stack ++ __ empty_FPU_stack(); ++ ++ // pop our frame ++ // forward_exception_entry need return address on stack ++ __ add_simm16(SP, FP, wordSize); ++ __ ldl(FP, SP, (-1) * wordSize); ++ ++ // and forward the exception ++ __ jmp(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); ++ } ++ __ flush(); ++ ++ nmethod *nm = nmethod::new_native_nmethod(method, ++ compile_id, ++ masm->code(), ++ vep_offset, ++ frame_complete, ++ stack_slots / VMRegImpl::slots_per_word, ++ (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)), ++ in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size), ++ oop_maps); ++ ++ if (is_critical_native) { ++ nm->set_lazy_critical_native(true); ++ } ++ ++ return nm; ++ ++} ++ ++// this function returns the adjust size (in number of words) to a c2i adapter ++// activation for use during deoptimization ++int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) { ++ return (callee_locals - callee_parameters) * Interpreter::stackElementWords; ++} ++ ++// "Top of Stack" slots that may be unused by the calling convention but must ++// otherwise be preserved. ++// On Intel these are not necessary and the value can be zero. ++// On Sparc this describes the words reserved for storing a register window ++// when an interrupt occurs. ++uint SharedRuntime::out_preserve_stack_slots() { ++ return 0; ++} ++ ++//------------------------------generate_deopt_blob---------------------------- ++// Ought to generate an ideal graph & compile, but here's some SPARC ASM ++// instead. ++void SharedRuntime::generate_deopt_blob() { ++ // allocate space for the code ++ ResourceMark rm; ++ // setup code generation tools ++ //CodeBuffer buffer ("deopt_blob", 4000, 2048); ++ CodeBuffer buffer ("deopt_blob", 8000, 2048); ++ MacroAssembler* masm = new MacroAssembler( & buffer); ++ int frame_size_in_words; ++ OopMap* map = NULL; ++ // Account for the extra args we place on the stack ++ // by the time we call fetch_unroll_info ++ const int additional_words = 2; // deopt kind, thread ++ ++ OopMapSet *oop_maps = new OopMapSet(); ++ ++ address start = __ pc(); ++ Label cont; ++ // we use S3 for DeOpt reason register ++ Register reason = S3; ++ // use S2 for thread register ++ Register thread = S2thread; ++ // use S1 for fetch_unroll_info returned UnrollBlock ++ Register unroll = S1; ++ // Prolog for non exception case! ++ // Correct the return address we were given. ++ __ add_simm16(RA, RA, - (NativeCall::return_address_offset)); ++ // Save everything in sight. ++ map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words); ++ // Normal deoptimization ++ __ move(reason, Deoptimization::Unpack_deopt); ++ __ beq(R0, cont); ++ ++ int reexecute_offset = __ pc() - start; ++ ++ // Reexecute case ++ // return address is the pc describes what bci to do re-execute at ++ ++ // No need to update map as each call to save_live_registers will produce identical oopmap ++ (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words); ++ __ move(reason, Deoptimization::Unpack_reexecute); ++ __ beq(R0, cont); ++ ++ int exception_offset = __ pc() - start; ++ // Prolog for exception case ++ ++ // all registers are dead at this entry point, except for V0 and ++ // T4 which contain the exception oop and exception pc ++ // respectively. Set them in TLS and fall thru to the ++ // unpack_with_exception_in_tls entry point. ++ ++ __ get_thread(thread); ++ __ st_ptr(T4, thread, in_bytes(JavaThread::exception_pc_offset())); ++ __ st_ptr(V0, thread, in_bytes(JavaThread::exception_oop_offset())); ++ int exception_in_tls_offset = __ pc() - start; ++ // new implementation because exception oop is now passed in JavaThread ++ ++ // Prolog for exception case ++ // All registers must be preserved because they might be used by LinearScan ++ // Exceptiop oop and throwing PC are passed in JavaThread ++ // tos: stack at point of call to method that threw the exception (i.e. only ++ // args are on the stack, no return address) ++ ++ // Return address will be patched later with the throwing pc. The correct value is not ++ // available now because loading it from memory would destroy registers. ++ // Save everything in sight. ++ // No need to update map as each call to save_live_registers will produce identical oopmap ++ __ add_simm16(RA, RA, - (NativeCall::return_address_offset)); ++ (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words); ++ ++ // Now it is safe to overwrite any register ++ // store the correct deoptimization type ++ __ move(reason, Deoptimization::Unpack_exception); ++ // load throwing pc from JavaThread and patch it as the return address ++ // of the current frame. Then clear the field in JavaThread ++ __ get_thread(thread); ++ __ ld_ptr(T4, thread, in_bytes(JavaThread::exception_pc_offset())); ++ __ st_ptr(T4, SP, RegisterSaver::raOffset() * wordSize); //save ra ++ __ st_ptr(R0, thread, in_bytes(JavaThread::exception_pc_offset())); ++ ++ ++#ifdef ASSERT ++ // verify that there is really an exception oop in JavaThread ++ __ ld_ptr(AT, thread, in_bytes(JavaThread::exception_oop_offset())); ++ __ verify_oop(AT); ++ // verify that there is no pending exception ++ Label no_pending_exception; ++ __ ld_ptr(AT, thread, in_bytes(Thread::pending_exception_offset())); ++ __ beq(AT, no_pending_exception); ++ __ stop("must not have pending exception here"); ++ __ BIND(no_pending_exception); ++#endif ++ __ BIND(cont); ++ // Compiled code leaves the floating point stack dirty, empty it. ++ __ empty_FPU_stack(); ++ ++ ++ // Call C code. Need thread and this frame, but NOT official VM entry ++ // crud. We cannot block on this call, no GC can happen. ++ ++ __ move(A0, thread); ++ __ add_simm16(SP, SP, -additional_words * wordSize); ++ ++ __ set_last_Java_frame(NOREG, NOREG, NULL); ++ ++ // Call fetch_unroll_info(). Need thread and this frame, but NOT official VM entry - cannot block on ++ // this call, no GC can happen. Call should capture return values. ++ ++#ifdef ZHJ20180909 ++ __ relocate(relocInfo::internal_pc_type); ++ { ++ intptr_t save_pc = (intptr_t)__ pc() + NativeMovConstReg::instruction_size + NativeCall::return_address_offset + 4; ++ __ patchable_set48(AT, save_pc); ++ } ++#else ++ { ++ if(UseAddpi){ ++ intptr_t patch_off = 1 + (NativeCall::return_address_offset)/BytesPerInstWord; ++ __ addpi(patch_off, AT); ++ }else { ++ intptr_t patch_off = 2 * BytesPerInstWord + NativeCall::return_address_offset; ++ __ br(AT, 0); ++ __ addl(AT, AT, patch_off); ++ } ++ } ++#endif ++ __ stl(AT, thread, in_bytes(JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset())); ++ ++ __ call((address)Deoptimization::fetch_unroll_info, relocInfo::runtime_call_type); ++ ++ // use jalr + setfpec1, so we should -4. ++ oop_maps->add_gc_map(__ pc() - start - 4, map); ++ ++ __ add_simm16(SP, SP, additional_words * wordSize); ++ __ get_thread(thread); ++ __ reset_last_Java_frame(false); ++ ++ // Load UnrollBlock into S1 ++ __ move(unroll, V0); ++ ++ ++ // Move the unpack kind to a safe place in the UnrollBlock because ++ // we are very short of registers ++ ++ Address unpack_kind(unroll, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()); ++ __ stw(reason, unpack_kind); ++ // save the unpack_kind value ++ // Retrieve the possible live values (return values) ++ // All callee save registers representing jvm state ++ // are now in the vframeArray. ++ ++ Label noException; ++ __ move(AT, Deoptimization::Unpack_exception); ++ __ bne(AT, reason, noException);// Was exception pending? ++ __ ld_ptr(V0, thread, in_bytes(JavaThread::exception_oop_offset())); ++ __ ld_ptr(T4, thread, in_bytes(JavaThread::exception_pc_offset())); ++ __ st_ptr(R0, thread, in_bytes(JavaThread::exception_pc_offset())); ++ __ st_ptr(R0, thread, in_bytes(JavaThread::exception_oop_offset())); ++ ++ __ verify_oop(V0); ++ ++ // Overwrite the result registers with the exception results. ++ __ st_ptr(V0, SP, RegisterSaver::v0Offset()*wordSize); ++ ++ __ BIND(noException); ++ ++ ++ // Stack is back to only having register save data on the stack. ++ // Now restore the result registers. Everything else is either dead or captured ++ // in the vframeArray. ++ ++ RegisterSaver::restore_result_registers(masm); ++ // All of the register save area has been popped of the stack. Only the ++ // return address remains. ++ // Pop all the frames we must move/replace. ++ // Frame picture (youngest to oldest) ++ // 1: self-frame (no frame link) ++ // 2: deopting frame (no frame link) ++ // 3: caller of deopting frame (could be compiled/interpreted). ++ // ++ // Note: by leaving the return address of self-frame on the stack ++ // and using the size of frame 2 to adjust the stack ++ // when we are done the return to frame 3 will still be on the stack. ++ ++ // register for the sender's sp ++ Register sender_sp = Rsender; ++ // register for frame pcs ++ Register pcs = T0; ++ // register for frame sizes ++ Register sizes = T1; ++ // register for frame count ++ Register count = T3; ++ ++ // Pop deoptimized frame ++ __ ldw(AT, unroll, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()); ++ __ addl(SP, SP, AT); ++ // sp should be pointing at the return address to the caller (3) ++ ++ // Load array of frame pcs into pcs ++ __ ld_ptr(pcs, unroll, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()); ++ __ add_simm16(SP, SP, wordSize); // trash the old pc ++ // Load array of frame sizes into T6 ++ __ ld_ptr(sizes, unroll, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()); ++ ++ ++ ++ // Load count of frams into T3 ++ __ ldw(count, unroll, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()); ++ // Pick up the initial fp we should save ++ __ ldl(FP, unroll, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()); ++ // Now adjust the caller's stack to make up for the extra locals ++ // but record the original sp so that we can save it in the skeletal interpreter ++ // frame and the stack walking of interpreter_sender will get the unextended sp ++ // value and not the "real" sp value. ++ __ move(sender_sp, SP); ++ __ ldw(AT, unroll, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()); ++ __ subl(SP, SP, AT); ++ ++// pcs[0] = frame_pcs[0] = deopt_sender.raw_pc(); regex.split ++ Label loop; ++ __ BIND(loop); ++ __ ldl(T2, sizes, 0); // Load frame size ++ __ ld_ptr(AT, pcs, 0); // save return address ++ __ add_simm16(T2, T2, -2*wordSize); // we'll push pc and fp, by hand ++ __ push2(AT, FP); ++ __ move(FP, SP); ++ __ subl(SP, SP, T2); // Prolog! ++ // This value is corrected by layout_activation_impl ++ __ stl(R0, FP, frame::interpreter_frame_last_sp_offset * wordSize); ++ __ stl(sender_sp, FP, frame::interpreter_frame_sender_sp_offset * wordSize);// Make it walkable ++ __ move(sender_sp, SP); // pass to next frame ++ __ subl(count, count, 1); // decrement counter ++ __ add_simm16(sizes, sizes, wordSize); // Bump array pointer (sizes) ++ __ add_simm16(pcs, pcs, wordSize); ++ __ bne(count, loop); ++ __ ldl(AT, pcs, 0); // frame_pcs[number_of_frames] = Interpreter::deopt_entry(vtos, 0); ++ // Re-push self-frame ++ __ push2(AT, FP); ++ __ move(FP, SP); ++ __ stl(R0, FP, frame::interpreter_frame_last_sp_offset * wordSize); ++ __ stl(sender_sp, FP, frame::interpreter_frame_sender_sp_offset * wordSize); ++ __ add_simm16(SP, SP, -(frame_size_in_words - 2 - additional_words) * wordSize); ++ ++ // Restore frame locals after moving the frame ++ __ stl(V0, SP, RegisterSaver::v0Offset() * wordSize); ++ __ fstd(F0, SP, RegisterSaver::fpResultOffset()* wordSize);// Pop float stack and store in local ++ __ fstd(F1, SP, (RegisterSaver::fpResultOffset() + 1) * wordSize); ++ ++ ++ // Call unpack_frames(). Need thread and this frame, but NOT official VM entry - cannot block on ++ // this call, no GC can happen. ++ __ move(A1, reason); // exec_mode ++ __ get_thread(thread); ++ __ move(A0, thread); // thread ++ __ add_simm16(SP, SP, (-additional_words) *wordSize); ++ ++ // set last_Java_sp, last_Java_fp ++ __ set_last_Java_frame(NOREG, FP, NULL); ++ ++ __ move(AT, -(StackAlignmentInBytes)); ++ __ and_reg(SP, SP, AT); // Fix stack alignment as required by ABI ++ ++#ifdef ZHJ20180909 ++ __ relocate(relocInfo::internal_pc_type); ++ { ++ intptr_t save_pc = (intptr_t)__ pc() + NativeMovConstReg::instruction_size + NativeCall::return_address_offset + 4; ++ __ patchable_set48(AT, save_pc); ++ } ++#else ++ { ++ if(UseAddpi){ ++ intptr_t patch_off = 1 + (NativeCall::return_address_offset)/BytesPerInstWord; ++ __ addpi(patch_off, AT); ++ }else { ++ intptr_t patch_off = 2 * BytesPerInstWord + NativeCall::return_address_offset; ++ __ br(AT, 0); ++ __ addl(AT, AT, patch_off); ++ } ++ } ++#endif ++ __ stl(AT, thread, in_bytes(JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset())); ++ ++ __ call(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), relocInfo::runtime_call_type); ++ // Revert SP alignment after call since we're going to do some SP relative addressing below ++ __ ldl(SP, thread, in_bytes(JavaThread::last_Java_sp_offset())); ++ ++ // Set an oopmap for the call site ++ oop_maps->add_gc_map(__ offset() - 4, new OopMap( frame_size_in_words , 0)); ++ ++ __ push(V0); ++ ++ __ get_thread(thread); ++ __ reset_last_Java_frame(true); ++ ++ // Collect return values ++ __ ldl(V0, SP, (RegisterSaver::v0Offset() + additional_words +1) * wordSize); ++ __ fldd(F0, SP, RegisterSaver::fpResultOffset()* wordSize);// Pop float stack and store in local ++ __ fldd(F1, SP, (RegisterSaver::fpResultOffset() + 1) * wordSize); ++ // Clear floating point stack before returning to interpreter ++ __ empty_FPU_stack(); ++ // Push a float or double return value if necessary. ++ __ leave(); ++ ++ // Jump to interpreter ++ __ ret(); ++ ++ masm->flush(); ++ _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words); ++ _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset); ++} ++ ++#ifdef COMPILER2 ++ ++//------------------------------generate_uncommon_trap_blob-------------------- ++// Ought to generate an ideal graph & compile, but here's some SPARC ASM ++// instead. ++void SharedRuntime::generate_uncommon_trap_blob() { ++ // allocate space for the code ++ ResourceMark rm; ++ // setup code generation tools ++ CodeBuffer buffer ("uncommon_trap_blob", 512*80 , 512*40 ); ++ MacroAssembler* masm = new MacroAssembler(&buffer); ++ ++ enum frame_layout { ++ s0_off, s0_off2, ++ s1_off, s1_off2, ++ s2_off, s2_off2, ++ s3_off, s3_off2, ++ s4_off, s4_off2, ++ s5_off, s5_off2, ++ fp_off, fp_off2, ++ return_off, return_off2, // slot for return address sp + 9 ++ framesize ++ }; ++ assert(framesize % 4 == 0, "sp not 16-byte aligned"); ++ ++ address start = __ pc(); ++ ++ // Push self-frame. ++ __ add_simm16(SP, SP, -framesize * BytesPerInt); ++ ++ __ stl(RA, SP, return_off * BytesPerInt); ++ __ stl(FP, SP, fp_off * BytesPerInt); ++ ++ // Save callee saved registers. None for UseSSE=0, ++ // floats-only for UseSSE=1, and doubles for UseSSE=2. ++ __ stl(S0, SP, s0_off * BytesPerInt); ++ __ stl(S1, SP, s1_off * BytesPerInt); ++ __ stl(S2, SP, s2_off * BytesPerInt); ++ __ stl(S3, SP, s3_off * BytesPerInt); ++ __ stl(S4, SP, s4_off * BytesPerInt); ++ __ stl(S5, SP, s5_off * BytesPerInt); ++ ++ __ add_simm16(FP, SP, fp_off * BytesPerInt); ++ ++ // Clear the floating point exception stack ++ __ empty_FPU_stack(); ++ ++ Register thread = S2thread; ++ ++ // set last_Java_sp ++ __ set_last_Java_frame(NOREG, FP, NULL); ++#ifdef ZHJ20180909 ++ __ relocate(relocInfo::internal_pc_type); ++ if (SafePatch) { ++ assert(NativeCall::return_address_offset == 24, "in sharedRuntime return_address_offset"); ++ } else { ++ assert(NativeCall::return_address_offset == 20, "in sharedRuntime return_address_offset"); ++ } ++ { ++ long save_pc = (long)__ pc() + (6 * BytesPerInstWord) + NativeCall::return_address_offset; ++ __ patchable_set48(AT, (long)save_pc); ++ } ++#else ++ if (SafePatch) { ++ assert(NativeCall::return_address_offset == 24, "in sharedRuntime return_address_offset"); ++ } else { ++ assert(NativeCall::return_address_offset == 20, "in sharedRuntime return_address_offset"); ++ } ++ { ++ if(UseAddpi){ ++ // stl (1) + move (1) + patchable_call_setfpec1 ++ intptr_t patch_off = 2 + (NativeCall::return_address_offset)/BytesPerInstWord; ++ __ addpi(patch_off, AT); ++ }else { ++ // addl (1) + stl (1) + move (1) + patchable_call_setfpec1 ++ intptr_t patch_off = 3 * BytesPerInstWord + NativeCall::return_address_offset; ++ __ br(AT, 0); ++ __ addl(AT, AT, patch_off); ++ } ++ } ++#endif ++ __ stl(AT, thread, in_bytes(JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset())); ++ ++ // Call C code. Need thread but NOT official VM entry ++ // crud. We cannot block on this call, no GC can happen. Call should ++ // capture callee-saved registers as well as return values. ++ __ move(A0, thread); ++ // argument already in A1 ++ __ patchable_call_setfpec1((address)Deoptimization::uncommon_trap); ++ ++ // Set an oopmap for the call site ++ OopMapSet *oop_maps = new OopMapSet(); ++ OopMap* map = new OopMap( framesize, 0 ); ++ ++ map->set_callee_saved( VMRegImpl::stack2reg(s0_off ), S0->as_VMReg() ); ++ map->set_callee_saved( VMRegImpl::stack2reg(s1_off ), S1->as_VMReg() ); ++ map->set_callee_saved( VMRegImpl::stack2reg(s2_off ), S2->as_VMReg() ); ++ map->set_callee_saved( VMRegImpl::stack2reg(s3_off ), S3->as_VMReg() ); ++ map->set_callee_saved( VMRegImpl::stack2reg(s4_off ), S4->as_VMReg() ); ++ map->set_callee_saved( VMRegImpl::stack2reg(s5_off ), S5->as_VMReg() ); ++ ++ // use jalr + setfpec1, so we should -4. ++ oop_maps->add_gc_map( __ offset() - 4, map); ++ ++ __ reset_last_Java_frame(false); ++ ++ // Load UnrollBlock into S1 ++ Register unroll = S1; ++ __ move(unroll, V0); ++ ++ // Pop all the frames we must move/replace. ++ // ++ // Frame picture (youngest to oldest) ++ // 1: self-frame (no frame link) ++ // 2: deopting frame (no frame link) ++ // 3: possible-i2c-adapter-frame ++ // 4: caller of deopting frame (could be compiled/interpreted. If interpreted we will create an ++ // and c2i here) ++ ++ __ add_simm16(SP, SP, framesize * BytesPerInt); ++ ++ // Pop deoptimized frame ++ __ ldw(AT, unroll, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()); ++ __ addl(SP, SP, AT); ++ ++ // register for frame pcs ++ Register pcs = T11; ++ // register for frame sizes ++ Register sizes = T12; ++ // register for frame count ++ Register count = T3; ++ // register for the sender's sp ++ Register sender_sp = T1; ++ ++ // sp should be pointing at the return address to the caller (4) ++ // Load array of frame pcs ++ __ ldl(pcs, unroll, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()); ++ ++ // Load array of frame sizes ++ __ ldl(sizes, unroll, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()); ++ __ ldw_unsigned(count, unroll, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()); ++ ++ // Pick up the initial fp we should save ++ __ ldl(FP, unroll, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()); ++ // Now adjust the caller's stack to make up for the extra locals ++ // but record the original sp so that we can save it in the skeletal interpreter ++ // frame and the stack walking of interpreter_sender will get the unextended sp ++ // value and not the "real" sp value. ++ ++ __ move(sender_sp, SP); ++ __ ldw(AT, unroll, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()); ++ __ subl(SP, SP, AT); ++ // Push interpreter frames in a loop ++ Label loop; ++ __ BIND(loop); ++ __ ldl(T2, sizes, 0); // Load frame size ++ __ ldl(AT, pcs, 0); // save return address ++ __ add_simm16(T2, T2, -2*wordSize); // we'll push pc and fp, by hand ++ __ push2(AT, FP); ++ __ move(FP, SP); ++ __ subl(SP, SP, T2); // Prolog! ++ // This value is corrected by layout_activation_impl ++ __ stl(R0, FP, frame::interpreter_frame_last_sp_offset * wordSize); ++ __ stl(sender_sp, FP, frame::interpreter_frame_sender_sp_offset * wordSize);// Make it walkable ++ __ move(sender_sp, SP); // pass to next frame ++ __ subl(count, count, 1); // decrement counter ++ __ add_simm16(sizes, sizes, wordSize); // Bump array pointer (sizes) ++ __ add_simm16(pcs, pcs, wordSize); // Bump array pointer (pcs) ++ __ bne(count, loop); ++ ++ __ ldl(RA, pcs, 0); ++ ++ // Re-push self-frame ++ __ add_simm16(SP, SP, - 2 * wordSize); // save old & set new FP ++ __ stl(FP, SP, 0 * wordSize); // save final return address ++ __ stl(RA, SP, 1 * wordSize); ++ __ move(FP, SP); ++ __ add_simm16(SP, SP, -(framesize / 2 - 2) * wordSize); ++ ++ // set last_Java_sp, last_Java_fp ++ __ set_last_Java_frame(NOREG, FP, NULL); ++ ++ __ move(AT, -(StackAlignmentInBytes)); ++ __ and_reg(SP, SP, AT); // Fix stack alignment as required by ABI ++ ++#ifdef ZHJ20180909 ++ __ relocate(relocInfo::internal_pc_type); ++ { ++ long save_pc = (long)__ pc() + (7 * BytesPerInstWord) + NativeCall::return_address_offset; ++ __ patchable_set48(AT, (long)save_pc); ++ } ++#else ++ { ++ if(UseAddpi){ ++ intptr_t patch_off = 3 + (NativeCall::return_address_offset)/BytesPerInstWord; ++ __ addpi(patch_off, AT); ++ }else { ++ intptr_t patch_off = 4 * BytesPerInstWord + NativeCall::return_address_offset; ++ __ br(AT, 0); ++ __ addl(AT, AT, patch_off); ++ } ++ } ++#endif ++ __ stl(AT, thread, in_bytes(JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset())); ++ ++ // Call C code. Need thread but NOT official VM entry ++ // crud. We cannot block on this call, no GC can happen. Call should ++ // restore return values to their stack-slots with the new SP. ++ __ move(A0, thread); ++ __ move(A1, Deoptimization::Unpack_uncommon_trap); ++ __ patchable_call_setfpec1((address)Deoptimization::unpack_frames); ++ ++ // Set an oopmap for the call site ++ // use jalr + setfpec1, so we should -4. ++ oop_maps->add_gc_map( __ offset() - 4, new OopMap( framesize, 0 ) ); ++ ++ __ reset_last_Java_frame(true); ++ ++ // Pop self-frame. ++ __ leave(); // Epilog! ++ ++ // Jump to interpreter ++ __ ret(); ++ ++ // ------------- ++ // make sure all code is generated ++ masm->flush(); ++ ++ _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps, framesize / 2); ++} ++ ++#endif // COMPILER2 ++ ++//------------------------------generate_handler_blob------------------- ++// ++// Generate a special Compile2Runtime blob that saves all registers, and sets ++// up an OopMap and calls safepoint code to stop the compiled code for ++// a safepoint. ++// ++// This blob is jumped to (via a breakpoint and the signal handler) from a ++// safepoint in compiled code. ++ ++SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int pool_type) { ++ ++ // Account for thread arg in our frame ++ const int additional_words = 0; ++ int frame_size_in_words; ++ ++ assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); ++ ++ ResourceMark rm; ++ OopMapSet *oop_maps = new OopMapSet(); ++ OopMap* map; ++ ++ // allocate space for the code ++ // setup code generation tools ++ CodeBuffer buffer ("handler_blob", 2048, 512); ++ MacroAssembler* masm = new MacroAssembler( &buffer); ++ ++ const Register thread = S2thread; ++ address start = __ pc(); ++ address call_pc = NULL; ++ bool cause_return = (pool_type == POLL_AT_RETURN); ++ bool save_vectors = (pool_type == POLL_AT_VECTOR_LOOP); ++ ++ // If cause_return is true we are at a poll_return and there is ++ // the return address in RA to the caller on the nmethod ++ // that is safepoint. We can leave this return in RA and ++ // effectively complete the return and safepoint in the caller. ++ // Otherwise we load exception pc to RA. ++ __ push(thread); ++ ++ if(!cause_return) { ++ __ ld_ptr(RA, Address(thread, JavaThread::saved_exception_pc_offset())); ++ } ++ ++ __ pop(thread); ++ map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, save_vectors); ++ ++ // The following is basically a call_VM. However, we need the precise ++ // address of the call in order to generate an oopmap. Hence, we do all the ++ // work outselvs. ++ ++ __ move(A0, thread); ++ __ set_last_Java_frame(NOREG, NOREG, NULL); ++ ++ //set last_Java_pc ++ if(UseAddpi){ ++ intptr_t patch_off = 1 + (NativeCall::return_address_offset)/BytesPerInstWord; ++ __ addpi(patch_off, AT); ++ }else { ++ intptr_t patch_off = 2 * BytesPerInstWord + NativeCall::return_address_offset; ++ __ br(AT, 0); ++ __ addl(AT, AT, patch_off); ++ } ++ __ stl(AT, thread, in_bytes(JavaThread::last_Java_pc_offset())); ++ ++ // do the call ++ __ patchable_call_setfpec1(call_ptr); ++ ++ // Set an oopmap for the call site. This oopmap will map all ++ // oop-registers and debug-info registers as callee-saved. This ++ // will allow deoptimization at this safepoint to find all possible ++ // debug-info recordings, as well as let GC find all oops. ++ // use jalr + setfpec1, so we should -4. ++ oop_maps->add_gc_map(__ offset() - 4, map); ++ ++ Label noException; ++ ++ // Clear last_Java_sp again ++ __ reset_last_Java_frame(false); ++ ++ __ ld_ptr(AT, thread, in_bytes(Thread::pending_exception_offset())); ++ __ beq(AT, noException); ++ ++ // Exception pending ++ ++ RegisterSaver::restore_live_registers(masm, save_vectors); ++ //forward_exception_entry need return address on the stack ++ __ push(RA); ++ __ patchable_jump((address)StubRoutines::forward_exception_entry()); ++ ++ // No exception case ++ __ BIND(noException); ++ // Normal exit, register restoring and exit ++ RegisterSaver::restore_live_registers(masm, save_vectors); ++ __ ret(); ++ ++ masm->flush(); ++ ++ // Fill-out other meta info ++ return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words); ++} ++ ++// ++// generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss ++// ++// Generate a stub that calls into vm to find out the proper destination ++// of a java call. All the argument registers are live at this point ++// but since this is generic code we don't know what they are and the caller ++// must do any gc of the args. ++// ++RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) { ++ assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); ++ ++ // allocate space for the code ++ ResourceMark rm; ++ ++ //CodeBuffer buffer(name, 1000, 512); ++ CodeBuffer buffer(name, 20000, 2048); ++ MacroAssembler* masm = new MacroAssembler(&buffer); ++ ++ int frame_size_words; ++ //we put the thread in A0 ++ ++ OopMapSet *oop_maps = new OopMapSet(); ++ OopMap* map = NULL; ++ ++ int start = __ offset(); ++ map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words); ++ ++ ++ int frame_complete = __ offset(); ++ ++ const Register thread = T11; ++ __ get_thread(thread); ++ ++ __ move(A0, thread); ++ __ set_last_Java_frame(noreg, FP, NULL); ++ //align the stack before invoke native ++ __ move(AT, -(StackAlignmentInBytes)); ++ __ and_reg(SP, SP, AT); ++#ifdef ZHJ20180909 ++ __ relocate(relocInfo::internal_pc_type); ++ { ++ intptr_t save_pc = (intptr_t)__ pc() + NativeMovConstReg::instruction_size + NativeCall::return_address_offset + 1 * BytesPerInstWord; ++ __ patchable_set48(AT, save_pc); ++ } ++#else ++ { ++ if(UseAddpi){ ++ intptr_t patch_off = 1 + (NativeCall::return_address_offset)/BytesPerInstWord; ++ __ addpi(patch_off, AT); ++ }else { ++ intptr_t patch_off = 2 * BytesPerInstWord + NativeCall::return_address_offset; ++ __ br(AT, 0); ++ __ addl(AT, AT, patch_off); ++ } ++ } ++#endif ++ __ stl(AT, thread, in_bytes(JavaThread::last_Java_pc_offset())); ++ ++ __ patchable_call_setfpec1(destination); ++ ++ // Set an oopmap for the call site. ++ // We need this not only for callee-saved registers, but also for volatile ++ // registers that the compiler might be keeping live across a safepoint. ++ // use jalr + setfpec1, so we should -4. ++ oop_maps->add_gc_map( __ offset() - start - 4, map); ++ ++ // V0 contains the address we are going to jump to assuming no exception got installed ++ __ get_thread(thread); ++ __ ld_ptr(SP, thread, in_bytes(JavaThread::last_Java_sp_offset())); ++ // clear last_Java_sp ++ __ reset_last_Java_frame(true); ++ // check for pending exceptions ++ Label pending; ++ __ ld_ptr(AT, thread, in_bytes(Thread::pending_exception_offset())); ++ __ bne(AT, pending); ++ ++ // get the returned Method* ++ __ get_vm_result_2(Rmethod, thread); // Refer to OpenJDK8 ++ __ st_ptr(Rmethod, SP, RegisterSaver::methodOffset() * wordSize); ++ __ st_ptr(V0, SP, RegisterSaver::v0Offset() * wordSize); ++ RegisterSaver::restore_live_registers(masm); ++ ++ // We are back the the original state on entry and ready to go the callee method. ++ __ jmp(V0); ++ ++ // Pending exception after the safepoint ++ ++ __ BIND(pending); ++ ++ RegisterSaver::restore_live_registers(masm); ++ ++ // exception pending => remove activation and forward to exception handler ++ //forward_exception_entry need return address on the stack ++ __ push(RA); ++ __ get_thread(thread); ++ __ st_ptr(R0, thread, in_bytes(JavaThread::vm_result_offset())); ++ __ ld_ptr(V0, thread, in_bytes(Thread::pending_exception_offset())); ++ __ jmp(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); ++ // ++ // make sure all code is generated ++ masm->flush(); ++ ++ RuntimeStub* tmp= RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true); ++ return tmp; ++} ++ ++//ADD BY LJ ++ ++//------------------------------Montgomery multiplication------------------------ ++// ++ ++ ++#define ASM_SUBTRACT ++ ++#ifdef ASM_SUBTRACT ++typedef int __attribute__((mode(TI))) int128; ++ ++// Subtract 0:b from carry:a. Return carry. ++static unsigned long ++sub(unsigned long a[], unsigned long b[], unsigned long carry, int len) { ++ int128 tmp = 0; ++ int i; ++ for (i = 0; i < len; i++) { ++ tmp += a[i]; ++ tmp -= b[i]; ++ a[i] = tmp; ++ tmp >>= 64; ++ assert(-1 <= tmp && tmp <= 0, "invariant"); ++ } ++ return tmp + carry; ++} ++#endif // ! ASM_SUBTRACT ++ ++// Multiply (unsigned) Long A by Long B, accumulating the double- ++// length result into the accumulator formed of T0, T1, and T2. ++inline void MACC(unsigned long A, unsigned long B, unsigned long &t0, unsigned long &t1, unsigned long &t2) { ++ unsigned long hi, lo, carry = 0, t = 0; ++ __asm__ __volatile__( ++ "mull %[A], %[B] , %[lo] \n" ++ "umulh %[A], %[B] , %[hi] \n" ++ "addl %[lo], %[t0], %[t0] \n" ++ "cmpult %[t0], %[lo], %[carry] \n" ++ "addl %[t1], %[carry], %[t1] \n" ++ "cmpult %[t1], %[carry], %[t] \n" ++ "addl %[t1], %[hi], %[t1] \n" ++ "cmpult %[t1], %[hi], %[carry] \n" ++ "bis %[carry], %[t] , %[carry] \n" ++ "addl %[t2], %[carry], %[t2] \n" ++ : [hi]"=&r"(hi), [lo]"=&r"(lo), [t0]"+r"(t0), [t1]"+r"(t1), [t2]"+r"(t2), [carry]"+r"(carry), [t]"+r"(t) ++ : [A]"r"(A), [B]"r"(B) ++ : ++ ); ++} ++ ++// As above, but add twice the double-length result into the ++// accumulator. ++inline void MACC2(unsigned long A, unsigned long B, unsigned long &t0, unsigned long &t1, unsigned long &t2) { ++ unsigned long hi, lo, carry = 0, t = 0; ++ __asm__ __volatile__( ++ "mull %[A], %[B] , %[lo] \n" ++ "umulh %[A], %[B] , %[hi] \n" ++ "addl %[t0], %[lo], %[t0] \n" ++ "cmpult %[t0], %[lo], %[carry] \n" ++ "addl %[t1], %[carry], %[t1] \n" ++ "cmpult %[t1], %[carry], %[t] \n" ++ "addl %[t1], %[hi], %[t1] \n" ++ "cmpult %[t1], %[hi], %[carry] \n" ++ "bis %[carry], %[t], %[carry] \n" ++ "addl %[t2], %[carry], %[t2] \n" ++ "addl %[t0], %[lo], %[t0] \n" ++ "cmpult %[t0], %[lo], %[carry] \n" ++ "addl %[t1], %[carry], %[t1] \n" ++ "cmpult %[t1], %[carry], %[t] \n" ++ "addl %[t1], %[hi], %[t1] \n" ++ "cmpult %[t1], %[hi], %[carry] \n" ++ "bis %[carry], %[t], %[carry] \n" ++ "addl %[t2], %[carry], %[t2] \n" ++ : [hi]"=&r"(hi), [lo]"=&r"(lo), [t0]"+r"(t0), [t1]"+r"(t1), [t2]"+r"(t2), [carry]"+r"(carry), [t]"+r"(t) ++ : [A]"r"(A), [B]"r"(B) ++ : ++ ); ++} ++ ++// Fast Montgomery multiplication. The derivation of the algorithm is ++// in A Cryptographic Library for the Motorola DSP56000, ++// Dusse and Kaliski, Proc. EUROCRYPT 90, pp. 230-237. ++ ++static void __attribute__((noinline)) ++montgomery_multiply(unsigned long a[], unsigned long b[], unsigned long n[], ++ unsigned long m[], unsigned long inv, int len) { ++ unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator ++ int i; ++ assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply"); ++/* ++static long count=0; ++count++; ++if( (count%1000)==0 ) ++ printf("montgomery_multiply,,,\n"); ++*/ ++ for (i = 0; i < len; i++) { ++ int j; ++ for (j = 0; j < i; j++) { ++ MACC(a[j], b[i-j], t0, t1, t2); ++ MACC(m[j], n[i-j], t0, t1, t2); ++ } ++ MACC(a[i], b[0], t0, t1, t2); ++ m[i] = t0 * inv; ++ MACC(m[i], n[0], t0, t1, t2); ++ ++ assert(t0 == 0, "broken Montgomery multiply"); ++ ++ t0 = t1; t1 = t2; t2 = 0; ++ } ++ ++ for (i = len; i < 2*len; i++) { ++ int j; ++ for (j = i-len+1; j < len; j++) { ++ MACC(a[j], b[i-j], t0, t1, t2); ++ MACC(m[j], n[i-j], t0, t1, t2); ++ } ++ m[i-len] = t0; ++ t0 = t1; t1 = t2; t2 = 0; ++ } ++ ++ while (t0) ++ t0 = sub(m, n, t0, len); ++} ++ ++// Fast Montgomery squaring. This uses asymptotically 25% fewer ++// multiplies so it should be up to 25% faster than Montgomery ++// multiplication. However, its loop control is more complex and it ++// may actually run slower on some machines. ++ ++static void __attribute__((noinline)) ++montgomery_square(unsigned long a[], unsigned long n[], ++ unsigned long m[], unsigned long inv, int len) { ++ unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator ++ int i; ++ ++ assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply"); ++ ++ for (i = 0; i < len; i++) { ++ int j; ++ int end = (i+1)/2; ++ for (j = 0; j < end; j++) { ++ MACC2(a[j], a[i-j], t0, t1, t2); ++ MACC(m[j], n[i-j], t0, t1, t2); ++ } ++ if ((i & 1) == 0) { ++ MACC(a[j], a[j], t0, t1, t2); ++ } ++ for (; j < i; j++) { ++ MACC(m[j], n[i-j], t0, t1, t2); ++ } ++ m[i] = t0 * inv; ++ MACC(m[i], n[0], t0, t1, t2); ++ ++ assert(t0 == 0, "broken Montgomery square"); ++ ++ t0 = t1; t1 = t2; t2 = 0; ++ } ++ ++ for (i = len; i < 2*len; i++) { ++ int start = i-len+1; ++ int end = start + (len - start)/2; ++ int j; ++ for (j = start; j < end; j++) { ++ MACC2(a[j], a[i-j], t0, t1, t2); ++ MACC(m[j], n[i-j], t0, t1, t2); ++ } ++ if ((i & 1) == 0) { ++ MACC(a[j], a[j], t0, t1, t2); ++ } ++ for (; j < len; j++) { ++ MACC(m[j], n[i-j], t0, t1, t2); ++ } ++ m[i-len] = t0; ++ t0 = t1; t1 = t2; t2 = 0; ++ } ++ ++ while (t0) ++ t0 = sub(m, n, t0, len); ++} ++ ++// Swap words in a longword. ++static unsigned long swap(unsigned long x) { ++ return (x << 32) | (x >> 32); ++} ++ ++// Copy len longwords from s to d, word-swapping as we go. The ++// destination array is reversed. ++static void reverse_words(unsigned long *s, unsigned long *d, int len) { ++ d += len; ++ while(len-- > 0) { ++ d--; ++ *d = swap(*s); ++ s++; ++ } ++} ++ ++// The threshold at which squaring is advantageous was determined ++// experimentally on an i7-3930K (Ivy Bridge) CPU @ 3.5GHz. ++#define MONTGOMERY_SQUARING_THRESHOLD 64 ++ ++/* ========================================================================= */ ++static const int crc_table[8][256] = ++{ ++ { ++ 0x00000000UL, 0x77073096UL, 0xee0e612cUL, 0x990951baUL, 0x076dc419UL, ++ 0x706af48fUL, 0xe963a535UL, 0x9e6495a3UL, 0x0edb8832UL, 0x79dcb8a4UL, ++ 0xe0d5e91eUL, 0x97d2d988UL, 0x09b64c2bUL, 0x7eb17cbdUL, 0xe7b82d07UL, ++ 0x90bf1d91UL, 0x1db71064UL, 0x6ab020f2UL, 0xf3b97148UL, 0x84be41deUL, ++ 0x1adad47dUL, 0x6ddde4ebUL, 0xf4d4b551UL, 0x83d385c7UL, 0x136c9856UL, ++ 0x646ba8c0UL, 0xfd62f97aUL, 0x8a65c9ecUL, 0x14015c4fUL, 0x63066cd9UL, ++ 0xfa0f3d63UL, 0x8d080df5UL, 0x3b6e20c8UL, 0x4c69105eUL, 0xd56041e4UL, ++ 0xa2677172UL, 0x3c03e4d1UL, 0x4b04d447UL, 0xd20d85fdUL, 0xa50ab56bUL, ++ 0x35b5a8faUL, 0x42b2986cUL, 0xdbbbc9d6UL, 0xacbcf940UL, 0x32d86ce3UL, ++ 0x45df5c75UL, 0xdcd60dcfUL, 0xabd13d59UL, 0x26d930acUL, 0x51de003aUL, ++ 0xc8d75180UL, 0xbfd06116UL, 0x21b4f4b5UL, 0x56b3c423UL, 0xcfba9599UL, ++ 0xb8bda50fUL, 0x2802b89eUL, 0x5f058808UL, 0xc60cd9b2UL, 0xb10be924UL, ++ 0x2f6f7c87UL, 0x58684c11UL, 0xc1611dabUL, 0xb6662d3dUL, 0x76dc4190UL, ++ 0x01db7106UL, 0x98d220bcUL, 0xefd5102aUL, 0x71b18589UL, 0x06b6b51fUL, ++ 0x9fbfe4a5UL, 0xe8b8d433UL, 0x7807c9a2UL, 0x0f00f934UL, 0x9609a88eUL, ++ 0xe10e9818UL, 0x7f6a0dbbUL, 0x086d3d2dUL, 0x91646c97UL, 0xe6635c01UL, ++ 0x6b6b51f4UL, 0x1c6c6162UL, 0x856530d8UL, 0xf262004eUL, 0x6c0695edUL, ++ 0x1b01a57bUL, 0x8208f4c1UL, 0xf50fc457UL, 0x65b0d9c6UL, 0x12b7e950UL, ++ 0x8bbeb8eaUL, 0xfcb9887cUL, 0x62dd1ddfUL, 0x15da2d49UL, 0x8cd37cf3UL, ++ 0xfbd44c65UL, 0x4db26158UL, 0x3ab551ceUL, 0xa3bc0074UL, 0xd4bb30e2UL, ++ 0x4adfa541UL, 0x3dd895d7UL, 0xa4d1c46dUL, 0xd3d6f4fbUL, 0x4369e96aUL, ++ 0x346ed9fcUL, 0xad678846UL, 0xda60b8d0UL, 0x44042d73UL, 0x33031de5UL, ++ 0xaa0a4c5fUL, 0xdd0d7cc9UL, 0x5005713cUL, 0x270241aaUL, 0xbe0b1010UL, ++ 0xc90c2086UL, 0x5768b525UL, 0x206f85b3UL, 0xb966d409UL, 0xce61e49fUL, ++ 0x5edef90eUL, 0x29d9c998UL, 0xb0d09822UL, 0xc7d7a8b4UL, 0x59b33d17UL, ++ 0x2eb40d81UL, 0xb7bd5c3bUL, 0xc0ba6cadUL, 0xedb88320UL, 0x9abfb3b6UL, ++ 0x03b6e20cUL, 0x74b1d29aUL, 0xead54739UL, 0x9dd277afUL, 0x04db2615UL, ++ 0x73dc1683UL, 0xe3630b12UL, 0x94643b84UL, 0x0d6d6a3eUL, 0x7a6a5aa8UL, ++ 0xe40ecf0bUL, 0x9309ff9dUL, 0x0a00ae27UL, 0x7d079eb1UL, 0xf00f9344UL, ++ 0x8708a3d2UL, 0x1e01f268UL, 0x6906c2feUL, 0xf762575dUL, 0x806567cbUL, ++ 0x196c3671UL, 0x6e6b06e7UL, 0xfed41b76UL, 0x89d32be0UL, 0x10da7a5aUL, ++ 0x67dd4accUL, 0xf9b9df6fUL, 0x8ebeeff9UL, 0x17b7be43UL, 0x60b08ed5UL, ++ 0xd6d6a3e8UL, 0xa1d1937eUL, 0x38d8c2c4UL, 0x4fdff252UL, 0xd1bb67f1UL, ++ 0xa6bc5767UL, 0x3fb506ddUL, 0x48b2364bUL, 0xd80d2bdaUL, 0xaf0a1b4cUL, ++ 0x36034af6UL, 0x41047a60UL, 0xdf60efc3UL, 0xa867df55UL, 0x316e8eefUL, ++ 0x4669be79UL, 0xcb61b38cUL, 0xbc66831aUL, 0x256fd2a0UL, 0x5268e236UL, ++ 0xcc0c7795UL, 0xbb0b4703UL, 0x220216b9UL, 0x5505262fUL, 0xc5ba3bbeUL, ++ 0xb2bd0b28UL, 0x2bb45a92UL, 0x5cb36a04UL, 0xc2d7ffa7UL, 0xb5d0cf31UL, ++ 0x2cd99e8bUL, 0x5bdeae1dUL, 0x9b64c2b0UL, 0xec63f226UL, 0x756aa39cUL, ++ 0x026d930aUL, 0x9c0906a9UL, 0xeb0e363fUL, 0x72076785UL, 0x05005713UL, ++ 0x95bf4a82UL, 0xe2b87a14UL, 0x7bb12baeUL, 0x0cb61b38UL, 0x92d28e9bUL, ++ 0xe5d5be0dUL, 0x7cdcefb7UL, 0x0bdbdf21UL, 0x86d3d2d4UL, 0xf1d4e242UL, ++ 0x68ddb3f8UL, 0x1fda836eUL, 0x81be16cdUL, 0xf6b9265bUL, 0x6fb077e1UL, ++ 0x18b74777UL, 0x88085ae6UL, 0xff0f6a70UL, 0x66063bcaUL, 0x11010b5cUL, ++ 0x8f659effUL, 0xf862ae69UL, 0x616bffd3UL, 0x166ccf45UL, 0xa00ae278UL, ++ 0xd70dd2eeUL, 0x4e048354UL, 0x3903b3c2UL, 0xa7672661UL, 0xd06016f7UL, ++ 0x4969474dUL, 0x3e6e77dbUL, 0xaed16a4aUL, 0xd9d65adcUL, 0x40df0b66UL, ++ 0x37d83bf0UL, 0xa9bcae53UL, 0xdebb9ec5UL, 0x47b2cf7fUL, 0x30b5ffe9UL, ++ 0xbdbdf21cUL, 0xcabac28aUL, 0x53b39330UL, 0x24b4a3a6UL, 0xbad03605UL, ++ 0xcdd70693UL, 0x54de5729UL, 0x23d967bfUL, 0xb3667a2eUL, 0xc4614ab8UL, ++ 0x5d681b02UL, 0x2a6f2b94UL, 0xb40bbe37UL, 0xc30c8ea1UL, 0x5a05df1bUL, ++ 0x2d02ef8dUL ++ }, ++ { ++ 0x00000000UL, 0x191b3141UL, 0x32366282UL, 0x2b2d53c3UL, 0x646cc504UL, ++ 0x7d77f445UL, 0x565aa786UL, 0x4f4196c7UL, 0xc8d98a08UL, 0xd1c2bb49UL, ++ 0xfaefe88aUL, 0xe3f4d9cbUL, 0xacb54f0cUL, 0xb5ae7e4dUL, 0x9e832d8eUL, ++ 0x87981ccfUL, 0x4ac21251UL, 0x53d92310UL, 0x78f470d3UL, 0x61ef4192UL, ++ 0x2eaed755UL, 0x37b5e614UL, 0x1c98b5d7UL, 0x05838496UL, 0x821b9859UL, ++ 0x9b00a918UL, 0xb02dfadbUL, 0xa936cb9aUL, 0xe6775d5dUL, 0xff6c6c1cUL, ++ 0xd4413fdfUL, 0xcd5a0e9eUL, 0x958424a2UL, 0x8c9f15e3UL, 0xa7b24620UL, ++ 0xbea97761UL, 0xf1e8e1a6UL, 0xe8f3d0e7UL, 0xc3de8324UL, 0xdac5b265UL, ++ 0x5d5daeaaUL, 0x44469febUL, 0x6f6bcc28UL, 0x7670fd69UL, 0x39316baeUL, ++ 0x202a5aefUL, 0x0b07092cUL, 0x121c386dUL, 0xdf4636f3UL, 0xc65d07b2UL, ++ 0xed705471UL, 0xf46b6530UL, 0xbb2af3f7UL, 0xa231c2b6UL, 0x891c9175UL, ++ 0x9007a034UL, 0x179fbcfbUL, 0x0e848dbaUL, 0x25a9de79UL, 0x3cb2ef38UL, ++ 0x73f379ffUL, 0x6ae848beUL, 0x41c51b7dUL, 0x58de2a3cUL, 0xf0794f05UL, ++ 0xe9627e44UL, 0xc24f2d87UL, 0xdb541cc6UL, 0x94158a01UL, 0x8d0ebb40UL, ++ 0xa623e883UL, 0xbf38d9c2UL, 0x38a0c50dUL, 0x21bbf44cUL, 0x0a96a78fUL, ++ 0x138d96ceUL, 0x5ccc0009UL, 0x45d73148UL, 0x6efa628bUL, 0x77e153caUL, ++ 0xbabb5d54UL, 0xa3a06c15UL, 0x888d3fd6UL, 0x91960e97UL, 0xded79850UL, ++ 0xc7cca911UL, 0xece1fad2UL, 0xf5facb93UL, 0x7262d75cUL, 0x6b79e61dUL, ++ 0x4054b5deUL, 0x594f849fUL, 0x160e1258UL, 0x0f152319UL, 0x243870daUL, ++ 0x3d23419bUL, 0x65fd6ba7UL, 0x7ce65ae6UL, 0x57cb0925UL, 0x4ed03864UL, ++ 0x0191aea3UL, 0x188a9fe2UL, 0x33a7cc21UL, 0x2abcfd60UL, 0xad24e1afUL, ++ 0xb43fd0eeUL, 0x9f12832dUL, 0x8609b26cUL, 0xc94824abUL, 0xd05315eaUL, ++ 0xfb7e4629UL, 0xe2657768UL, 0x2f3f79f6UL, 0x362448b7UL, 0x1d091b74UL, ++ 0x04122a35UL, 0x4b53bcf2UL, 0x52488db3UL, 0x7965de70UL, 0x607eef31UL, ++ 0xe7e6f3feUL, 0xfefdc2bfUL, 0xd5d0917cUL, 0xcccba03dUL, 0x838a36faUL, ++ 0x9a9107bbUL, 0xb1bc5478UL, 0xa8a76539UL, 0x3b83984bUL, 0x2298a90aUL, ++ 0x09b5fac9UL, 0x10aecb88UL, 0x5fef5d4fUL, 0x46f46c0eUL, 0x6dd93fcdUL, ++ 0x74c20e8cUL, 0xf35a1243UL, 0xea412302UL, 0xc16c70c1UL, 0xd8774180UL, ++ 0x9736d747UL, 0x8e2de606UL, 0xa500b5c5UL, 0xbc1b8484UL, 0x71418a1aUL, ++ 0x685abb5bUL, 0x4377e898UL, 0x5a6cd9d9UL, 0x152d4f1eUL, 0x0c367e5fUL, ++ 0x271b2d9cUL, 0x3e001cddUL, 0xb9980012UL, 0xa0833153UL, 0x8bae6290UL, ++ 0x92b553d1UL, 0xddf4c516UL, 0xc4eff457UL, 0xefc2a794UL, 0xf6d996d5UL, ++ 0xae07bce9UL, 0xb71c8da8UL, 0x9c31de6bUL, 0x852aef2aUL, 0xca6b79edUL, ++ 0xd37048acUL, 0xf85d1b6fUL, 0xe1462a2eUL, 0x66de36e1UL, 0x7fc507a0UL, ++ 0x54e85463UL, 0x4df36522UL, 0x02b2f3e5UL, 0x1ba9c2a4UL, 0x30849167UL, ++ 0x299fa026UL, 0xe4c5aeb8UL, 0xfdde9ff9UL, 0xd6f3cc3aUL, 0xcfe8fd7bUL, ++ 0x80a96bbcUL, 0x99b25afdUL, 0xb29f093eUL, 0xab84387fUL, 0x2c1c24b0UL, ++ 0x350715f1UL, 0x1e2a4632UL, 0x07317773UL, 0x4870e1b4UL, 0x516bd0f5UL, ++ 0x7a468336UL, 0x635db277UL, 0xcbfad74eUL, 0xd2e1e60fUL, 0xf9ccb5ccUL, ++ 0xe0d7848dUL, 0xaf96124aUL, 0xb68d230bUL, 0x9da070c8UL, 0x84bb4189UL, ++ 0x03235d46UL, 0x1a386c07UL, 0x31153fc4UL, 0x280e0e85UL, 0x674f9842UL, ++ 0x7e54a903UL, 0x5579fac0UL, 0x4c62cb81UL, 0x8138c51fUL, 0x9823f45eUL, ++ 0xb30ea79dUL, 0xaa1596dcUL, 0xe554001bUL, 0xfc4f315aUL, 0xd7626299UL, ++ 0xce7953d8UL, 0x49e14f17UL, 0x50fa7e56UL, 0x7bd72d95UL, 0x62cc1cd4UL, ++ 0x2d8d8a13UL, 0x3496bb52UL, 0x1fbbe891UL, 0x06a0d9d0UL, 0x5e7ef3ecUL, ++ 0x4765c2adUL, 0x6c48916eUL, 0x7553a02fUL, 0x3a1236e8UL, 0x230907a9UL, ++ 0x0824546aUL, 0x113f652bUL, 0x96a779e4UL, 0x8fbc48a5UL, 0xa4911b66UL, ++ 0xbd8a2a27UL, 0xf2cbbce0UL, 0xebd08da1UL, 0xc0fdde62UL, 0xd9e6ef23UL, ++ 0x14bce1bdUL, 0x0da7d0fcUL, 0x268a833fUL, 0x3f91b27eUL, 0x70d024b9UL, ++ 0x69cb15f8UL, 0x42e6463bUL, 0x5bfd777aUL, 0xdc656bb5UL, 0xc57e5af4UL, ++ 0xee530937UL, 0xf7483876UL, 0xb809aeb1UL, 0xa1129ff0UL, 0x8a3fcc33UL, ++ 0x9324fd72UL ++ }, ++ { ++ 0x00000000UL, 0x01c26a37UL, 0x0384d46eUL, 0x0246be59UL, 0x0709a8dcUL, ++ 0x06cbc2ebUL, 0x048d7cb2UL, 0x054f1685UL, 0x0e1351b8UL, 0x0fd13b8fUL, ++ 0x0d9785d6UL, 0x0c55efe1UL, 0x091af964UL, 0x08d89353UL, 0x0a9e2d0aUL, ++ 0x0b5c473dUL, 0x1c26a370UL, 0x1de4c947UL, 0x1fa2771eUL, 0x1e601d29UL, ++ 0x1b2f0bacUL, 0x1aed619bUL, 0x18abdfc2UL, 0x1969b5f5UL, 0x1235f2c8UL, ++ 0x13f798ffUL, 0x11b126a6UL, 0x10734c91UL, 0x153c5a14UL, 0x14fe3023UL, ++ 0x16b88e7aUL, 0x177ae44dUL, 0x384d46e0UL, 0x398f2cd7UL, 0x3bc9928eUL, ++ 0x3a0bf8b9UL, 0x3f44ee3cUL, 0x3e86840bUL, 0x3cc03a52UL, 0x3d025065UL, ++ 0x365e1758UL, 0x379c7d6fUL, 0x35dac336UL, 0x3418a901UL, 0x3157bf84UL, ++ 0x3095d5b3UL, 0x32d36beaUL, 0x331101ddUL, 0x246be590UL, 0x25a98fa7UL, ++ 0x27ef31feUL, 0x262d5bc9UL, 0x23624d4cUL, 0x22a0277bUL, 0x20e69922UL, ++ 0x2124f315UL, 0x2a78b428UL, 0x2bbade1fUL, 0x29fc6046UL, 0x283e0a71UL, ++ 0x2d711cf4UL, 0x2cb376c3UL, 0x2ef5c89aUL, 0x2f37a2adUL, 0x709a8dc0UL, ++ 0x7158e7f7UL, 0x731e59aeUL, 0x72dc3399UL, 0x7793251cUL, 0x76514f2bUL, ++ 0x7417f172UL, 0x75d59b45UL, 0x7e89dc78UL, 0x7f4bb64fUL, 0x7d0d0816UL, ++ 0x7ccf6221UL, 0x798074a4UL, 0x78421e93UL, 0x7a04a0caUL, 0x7bc6cafdUL, ++ 0x6cbc2eb0UL, 0x6d7e4487UL, 0x6f38fadeUL, 0x6efa90e9UL, 0x6bb5866cUL, ++ 0x6a77ec5bUL, 0x68315202UL, 0x69f33835UL, 0x62af7f08UL, 0x636d153fUL, ++ 0x612bab66UL, 0x60e9c151UL, 0x65a6d7d4UL, 0x6464bde3UL, 0x662203baUL, ++ 0x67e0698dUL, 0x48d7cb20UL, 0x4915a117UL, 0x4b531f4eUL, 0x4a917579UL, ++ 0x4fde63fcUL, 0x4e1c09cbUL, 0x4c5ab792UL, 0x4d98dda5UL, 0x46c49a98UL, ++ 0x4706f0afUL, 0x45404ef6UL, 0x448224c1UL, 0x41cd3244UL, 0x400f5873UL, ++ 0x4249e62aUL, 0x438b8c1dUL, 0x54f16850UL, 0x55330267UL, 0x5775bc3eUL, ++ 0x56b7d609UL, 0x53f8c08cUL, 0x523aaabbUL, 0x507c14e2UL, 0x51be7ed5UL, ++ 0x5ae239e8UL, 0x5b2053dfUL, 0x5966ed86UL, 0x58a487b1UL, 0x5deb9134UL, ++ 0x5c29fb03UL, 0x5e6f455aUL, 0x5fad2f6dUL, 0xe1351b80UL, 0xe0f771b7UL, ++ 0xe2b1cfeeUL, 0xe373a5d9UL, 0xe63cb35cUL, 0xe7fed96bUL, 0xe5b86732UL, ++ 0xe47a0d05UL, 0xef264a38UL, 0xeee4200fUL, 0xeca29e56UL, 0xed60f461UL, ++ 0xe82fe2e4UL, 0xe9ed88d3UL, 0xebab368aUL, 0xea695cbdUL, 0xfd13b8f0UL, ++ 0xfcd1d2c7UL, 0xfe976c9eUL, 0xff5506a9UL, 0xfa1a102cUL, 0xfbd87a1bUL, ++ 0xf99ec442UL, 0xf85cae75UL, 0xf300e948UL, 0xf2c2837fUL, 0xf0843d26UL, ++ 0xf1465711UL, 0xf4094194UL, 0xf5cb2ba3UL, 0xf78d95faUL, 0xf64fffcdUL, ++ 0xd9785d60UL, 0xd8ba3757UL, 0xdafc890eUL, 0xdb3ee339UL, 0xde71f5bcUL, ++ 0xdfb39f8bUL, 0xddf521d2UL, 0xdc374be5UL, 0xd76b0cd8UL, 0xd6a966efUL, ++ 0xd4efd8b6UL, 0xd52db281UL, 0xd062a404UL, 0xd1a0ce33UL, 0xd3e6706aUL, ++ 0xd2241a5dUL, 0xc55efe10UL, 0xc49c9427UL, 0xc6da2a7eUL, 0xc7184049UL, ++ 0xc25756ccUL, 0xc3953cfbUL, 0xc1d382a2UL, 0xc011e895UL, 0xcb4dafa8UL, ++ 0xca8fc59fUL, 0xc8c97bc6UL, 0xc90b11f1UL, 0xcc440774UL, 0xcd866d43UL, ++ 0xcfc0d31aUL, 0xce02b92dUL, 0x91af9640UL, 0x906dfc77UL, 0x922b422eUL, ++ 0x93e92819UL, 0x96a63e9cUL, 0x976454abUL, 0x9522eaf2UL, 0x94e080c5UL, ++ 0x9fbcc7f8UL, 0x9e7eadcfUL, 0x9c381396UL, 0x9dfa79a1UL, 0x98b56f24UL, ++ 0x99770513UL, 0x9b31bb4aUL, 0x9af3d17dUL, 0x8d893530UL, 0x8c4b5f07UL, ++ 0x8e0de15eUL, 0x8fcf8b69UL, 0x8a809decUL, 0x8b42f7dbUL, 0x89044982UL, ++ 0x88c623b5UL, 0x839a6488UL, 0x82580ebfUL, 0x801eb0e6UL, 0x81dcdad1UL, ++ 0x8493cc54UL, 0x8551a663UL, 0x8717183aUL, 0x86d5720dUL, 0xa9e2d0a0UL, ++ 0xa820ba97UL, 0xaa6604ceUL, 0xaba46ef9UL, 0xaeeb787cUL, 0xaf29124bUL, ++ 0xad6fac12UL, 0xacadc625UL, 0xa7f18118UL, 0xa633eb2fUL, 0xa4755576UL, ++ 0xa5b73f41UL, 0xa0f829c4UL, 0xa13a43f3UL, 0xa37cfdaaUL, 0xa2be979dUL, ++ 0xb5c473d0UL, 0xb40619e7UL, 0xb640a7beUL, 0xb782cd89UL, 0xb2cddb0cUL, ++ 0xb30fb13bUL, 0xb1490f62UL, 0xb08b6555UL, 0xbbd72268UL, 0xba15485fUL, ++ 0xb853f606UL, 0xb9919c31UL, 0xbcde8ab4UL, 0xbd1ce083UL, 0xbf5a5edaUL, ++ 0xbe9834edUL ++ }, ++ { ++ 0x00000000UL, 0xb8bc6765UL, 0xaa09c88bUL, 0x12b5afeeUL, 0x8f629757UL, ++ 0x37def032UL, 0x256b5fdcUL, 0x9dd738b9UL, 0xc5b428efUL, 0x7d084f8aUL, ++ 0x6fbde064UL, 0xd7018701UL, 0x4ad6bfb8UL, 0xf26ad8ddUL, 0xe0df7733UL, ++ 0x58631056UL, 0x5019579fUL, 0xe8a530faUL, 0xfa109f14UL, 0x42acf871UL, ++ 0xdf7bc0c8UL, 0x67c7a7adUL, 0x75720843UL, 0xcdce6f26UL, 0x95ad7f70UL, ++ 0x2d111815UL, 0x3fa4b7fbUL, 0x8718d09eUL, 0x1acfe827UL, 0xa2738f42UL, ++ 0xb0c620acUL, 0x087a47c9UL, 0xa032af3eUL, 0x188ec85bUL, 0x0a3b67b5UL, ++ 0xb28700d0UL, 0x2f503869UL, 0x97ec5f0cUL, 0x8559f0e2UL, 0x3de59787UL, ++ 0x658687d1UL, 0xdd3ae0b4UL, 0xcf8f4f5aUL, 0x7733283fUL, 0xeae41086UL, ++ 0x525877e3UL, 0x40edd80dUL, 0xf851bf68UL, 0xf02bf8a1UL, 0x48979fc4UL, ++ 0x5a22302aUL, 0xe29e574fUL, 0x7f496ff6UL, 0xc7f50893UL, 0xd540a77dUL, ++ 0x6dfcc018UL, 0x359fd04eUL, 0x8d23b72bUL, 0x9f9618c5UL, 0x272a7fa0UL, ++ 0xbafd4719UL, 0x0241207cUL, 0x10f48f92UL, 0xa848e8f7UL, 0x9b14583dUL, ++ 0x23a83f58UL, 0x311d90b6UL, 0x89a1f7d3UL, 0x1476cf6aUL, 0xaccaa80fUL, ++ 0xbe7f07e1UL, 0x06c36084UL, 0x5ea070d2UL, 0xe61c17b7UL, 0xf4a9b859UL, ++ 0x4c15df3cUL, 0xd1c2e785UL, 0x697e80e0UL, 0x7bcb2f0eUL, 0xc377486bUL, ++ 0xcb0d0fa2UL, 0x73b168c7UL, 0x6104c729UL, 0xd9b8a04cUL, 0x446f98f5UL, ++ 0xfcd3ff90UL, 0xee66507eUL, 0x56da371bUL, 0x0eb9274dUL, 0xb6054028UL, ++ 0xa4b0efc6UL, 0x1c0c88a3UL, 0x81dbb01aUL, 0x3967d77fUL, 0x2bd27891UL, ++ 0x936e1ff4UL, 0x3b26f703UL, 0x839a9066UL, 0x912f3f88UL, 0x299358edUL, ++ 0xb4446054UL, 0x0cf80731UL, 0x1e4da8dfUL, 0xa6f1cfbaUL, 0xfe92dfecUL, ++ 0x462eb889UL, 0x549b1767UL, 0xec277002UL, 0x71f048bbUL, 0xc94c2fdeUL, ++ 0xdbf98030UL, 0x6345e755UL, 0x6b3fa09cUL, 0xd383c7f9UL, 0xc1366817UL, ++ 0x798a0f72UL, 0xe45d37cbUL, 0x5ce150aeUL, 0x4e54ff40UL, 0xf6e89825UL, ++ 0xae8b8873UL, 0x1637ef16UL, 0x048240f8UL, 0xbc3e279dUL, 0x21e91f24UL, ++ 0x99557841UL, 0x8be0d7afUL, 0x335cb0caUL, 0xed59b63bUL, 0x55e5d15eUL, ++ 0x47507eb0UL, 0xffec19d5UL, 0x623b216cUL, 0xda874609UL, 0xc832e9e7UL, ++ 0x708e8e82UL, 0x28ed9ed4UL, 0x9051f9b1UL, 0x82e4565fUL, 0x3a58313aUL, ++ 0xa78f0983UL, 0x1f336ee6UL, 0x0d86c108UL, 0xb53aa66dUL, 0xbd40e1a4UL, ++ 0x05fc86c1UL, 0x1749292fUL, 0xaff54e4aUL, 0x322276f3UL, 0x8a9e1196UL, ++ 0x982bbe78UL, 0x2097d91dUL, 0x78f4c94bUL, 0xc048ae2eUL, 0xd2fd01c0UL, ++ 0x6a4166a5UL, 0xf7965e1cUL, 0x4f2a3979UL, 0x5d9f9697UL, 0xe523f1f2UL, ++ 0x4d6b1905UL, 0xf5d77e60UL, 0xe762d18eUL, 0x5fdeb6ebUL, 0xc2098e52UL, ++ 0x7ab5e937UL, 0x680046d9UL, 0xd0bc21bcUL, 0x88df31eaUL, 0x3063568fUL, ++ 0x22d6f961UL, 0x9a6a9e04UL, 0x07bda6bdUL, 0xbf01c1d8UL, 0xadb46e36UL, ++ 0x15080953UL, 0x1d724e9aUL, 0xa5ce29ffUL, 0xb77b8611UL, 0x0fc7e174UL, ++ 0x9210d9cdUL, 0x2aacbea8UL, 0x38191146UL, 0x80a57623UL, 0xd8c66675UL, ++ 0x607a0110UL, 0x72cfaefeUL, 0xca73c99bUL, 0x57a4f122UL, 0xef189647UL, ++ 0xfdad39a9UL, 0x45115eccUL, 0x764dee06UL, 0xcef18963UL, 0xdc44268dUL, ++ 0x64f841e8UL, 0xf92f7951UL, 0x41931e34UL, 0x5326b1daUL, 0xeb9ad6bfUL, ++ 0xb3f9c6e9UL, 0x0b45a18cUL, 0x19f00e62UL, 0xa14c6907UL, 0x3c9b51beUL, ++ 0x842736dbUL, 0x96929935UL, 0x2e2efe50UL, 0x2654b999UL, 0x9ee8defcUL, ++ 0x8c5d7112UL, 0x34e11677UL, 0xa9362eceUL, 0x118a49abUL, 0x033fe645UL, ++ 0xbb838120UL, 0xe3e09176UL, 0x5b5cf613UL, 0x49e959fdUL, 0xf1553e98UL, ++ 0x6c820621UL, 0xd43e6144UL, 0xc68bceaaUL, 0x7e37a9cfUL, 0xd67f4138UL, ++ 0x6ec3265dUL, 0x7c7689b3UL, 0xc4caeed6UL, 0x591dd66fUL, 0xe1a1b10aUL, ++ 0xf3141ee4UL, 0x4ba87981UL, 0x13cb69d7UL, 0xab770eb2UL, 0xb9c2a15cUL, ++ 0x017ec639UL, 0x9ca9fe80UL, 0x241599e5UL, 0x36a0360bUL, 0x8e1c516eUL, ++ 0x866616a7UL, 0x3eda71c2UL, 0x2c6fde2cUL, 0x94d3b949UL, 0x090481f0UL, ++ 0xb1b8e695UL, 0xa30d497bUL, 0x1bb12e1eUL, 0x43d23e48UL, 0xfb6e592dUL, ++ 0xe9dbf6c3UL, 0x516791a6UL, 0xccb0a91fUL, 0x740cce7aUL, 0x66b96194UL, ++ 0xde0506f1UL ++ }, ++ { ++ 0x00000000UL, 0x96300777UL, 0x2c610eeeUL, 0xba510999UL, 0x19c46d07UL, ++ 0x8ff46a70UL, 0x35a563e9UL, 0xa395649eUL, 0x3288db0eUL, 0xa4b8dc79UL, ++ 0x1ee9d5e0UL, 0x88d9d297UL, 0x2b4cb609UL, 0xbd7cb17eUL, 0x072db8e7UL, ++ 0x911dbf90UL, 0x6410b71dUL, 0xf220b06aUL, 0x4871b9f3UL, 0xde41be84UL, ++ 0x7dd4da1aUL, 0xebe4dd6dUL, 0x51b5d4f4UL, 0xc785d383UL, 0x56986c13UL, ++ 0xc0a86b64UL, 0x7af962fdUL, 0xecc9658aUL, 0x4f5c0114UL, 0xd96c0663UL, ++ 0x633d0ffaUL, 0xf50d088dUL, 0xc8206e3bUL, 0x5e10694cUL, 0xe44160d5UL, ++ 0x727167a2UL, 0xd1e4033cUL, 0x47d4044bUL, 0xfd850dd2UL, 0x6bb50aa5UL, ++ 0xfaa8b535UL, 0x6c98b242UL, 0xd6c9bbdbUL, 0x40f9bcacUL, 0xe36cd832UL, ++ 0x755cdf45UL, 0xcf0dd6dcUL, 0x593dd1abUL, 0xac30d926UL, 0x3a00de51UL, ++ 0x8051d7c8UL, 0x1661d0bfUL, 0xb5f4b421UL, 0x23c4b356UL, 0x9995bacfUL, ++ 0x0fa5bdb8UL, 0x9eb80228UL, 0x0888055fUL, 0xb2d90cc6UL, 0x24e90bb1UL, ++ 0x877c6f2fUL, 0x114c6858UL, 0xab1d61c1UL, 0x3d2d66b6UL, 0x9041dc76UL, ++ 0x0671db01UL, 0xbc20d298UL, 0x2a10d5efUL, 0x8985b171UL, 0x1fb5b606UL, ++ 0xa5e4bf9fUL, 0x33d4b8e8UL, 0xa2c90778UL, 0x34f9000fUL, 0x8ea80996UL, ++ 0x18980ee1UL, 0xbb0d6a7fUL, 0x2d3d6d08UL, 0x976c6491UL, 0x015c63e6UL, ++ 0xf4516b6bUL, 0x62616c1cUL, 0xd8306585UL, 0x4e0062f2UL, 0xed95066cUL, ++ 0x7ba5011bUL, 0xc1f40882UL, 0x57c40ff5UL, 0xc6d9b065UL, 0x50e9b712UL, ++ 0xeab8be8bUL, 0x7c88b9fcUL, 0xdf1ddd62UL, 0x492dda15UL, 0xf37cd38cUL, ++ 0x654cd4fbUL, 0x5861b24dUL, 0xce51b53aUL, 0x7400bca3UL, 0xe230bbd4UL, ++ 0x41a5df4aUL, 0xd795d83dUL, 0x6dc4d1a4UL, 0xfbf4d6d3UL, 0x6ae96943UL, ++ 0xfcd96e34UL, 0x468867adUL, 0xd0b860daUL, 0x732d0444UL, 0xe51d0333UL, ++ 0x5f4c0aaaUL, 0xc97c0dddUL, 0x3c710550UL, 0xaa410227UL, 0x10100bbeUL, ++ 0x86200cc9UL, 0x25b56857UL, 0xb3856f20UL, 0x09d466b9UL, 0x9fe461ceUL, ++ 0x0ef9de5eUL, 0x98c9d929UL, 0x2298d0b0UL, 0xb4a8d7c7UL, 0x173db359UL, ++ 0x810db42eUL, 0x3b5cbdb7UL, 0xad6cbac0UL, 0x2083b8edUL, 0xb6b3bf9aUL, ++ 0x0ce2b603UL, 0x9ad2b174UL, 0x3947d5eaUL, 0xaf77d29dUL, 0x1526db04UL, ++ 0x8316dc73UL, 0x120b63e3UL, 0x843b6494UL, 0x3e6a6d0dUL, 0xa85a6a7aUL, ++ 0x0bcf0ee4UL, 0x9dff0993UL, 0x27ae000aUL, 0xb19e077dUL, 0x44930ff0UL, ++ 0xd2a30887UL, 0x68f2011eUL, 0xfec20669UL, 0x5d5762f7UL, 0xcb676580UL, ++ 0x71366c19UL, 0xe7066b6eUL, 0x761bd4feUL, 0xe02bd389UL, 0x5a7ada10UL, ++ 0xcc4add67UL, 0x6fdfb9f9UL, 0xf9efbe8eUL, 0x43beb717UL, 0xd58eb060UL, ++ 0xe8a3d6d6UL, 0x7e93d1a1UL, 0xc4c2d838UL, 0x52f2df4fUL, 0xf167bbd1UL, ++ 0x6757bca6UL, 0xdd06b53fUL, 0x4b36b248UL, 0xda2b0dd8UL, 0x4c1b0aafUL, ++ 0xf64a0336UL, 0x607a0441UL, 0xc3ef60dfUL, 0x55df67a8UL, 0xef8e6e31UL, ++ 0x79be6946UL, 0x8cb361cbUL, 0x1a8366bcUL, 0xa0d26f25UL, 0x36e26852UL, ++ 0x95770cccUL, 0x03470bbbUL, 0xb9160222UL, 0x2f260555UL, 0xbe3bbac5UL, ++ 0x280bbdb2UL, 0x925ab42bUL, 0x046ab35cUL, 0xa7ffd7c2UL, 0x31cfd0b5UL, ++ 0x8b9ed92cUL, 0x1daede5bUL, 0xb0c2649bUL, 0x26f263ecUL, 0x9ca36a75UL, ++ 0x0a936d02UL, 0xa906099cUL, 0x3f360eebUL, 0x85670772UL, 0x13570005UL, ++ 0x824abf95UL, 0x147ab8e2UL, 0xae2bb17bUL, 0x381bb60cUL, 0x9b8ed292UL, ++ 0x0dbed5e5UL, 0xb7efdc7cUL, 0x21dfdb0bUL, 0xd4d2d386UL, 0x42e2d4f1UL, ++ 0xf8b3dd68UL, 0x6e83da1fUL, 0xcd16be81UL, 0x5b26b9f6UL, 0xe177b06fUL, ++ 0x7747b718UL, 0xe65a0888UL, 0x706a0fffUL, 0xca3b0666UL, 0x5c0b0111UL, ++ 0xff9e658fUL, 0x69ae62f8UL, 0xd3ff6b61UL, 0x45cf6c16UL, 0x78e20aa0UL, ++ 0xeed20dd7UL, 0x5483044eUL, 0xc2b30339UL, 0x612667a7UL, 0xf71660d0UL, ++ 0x4d476949UL, 0xdb776e3eUL, 0x4a6ad1aeUL, 0xdc5ad6d9UL, 0x660bdf40UL, ++ 0xf03bd837UL, 0x53aebca9UL, 0xc59ebbdeUL, 0x7fcfb247UL, 0xe9ffb530UL, ++ 0x1cf2bdbdUL, 0x8ac2bacaUL, 0x3093b353UL, 0xa6a3b424UL, 0x0536d0baUL, ++ 0x9306d7cdUL, 0x2957de54UL, 0xbf67d923UL, 0x2e7a66b3UL, 0xb84a61c4UL, ++ 0x021b685dUL, 0x942b6f2aUL, 0x37be0bb4UL, 0xa18e0cc3UL, 0x1bdf055aUL, ++ 0x8def022dUL ++ }, ++ { ++ 0x00000000UL, 0x41311b19UL, 0x82623632UL, 0xc3532d2bUL, 0x04c56c64UL, ++ 0x45f4777dUL, 0x86a75a56UL, 0xc796414fUL, 0x088ad9c8UL, 0x49bbc2d1UL, ++ 0x8ae8effaUL, 0xcbd9f4e3UL, 0x0c4fb5acUL, 0x4d7eaeb5UL, 0x8e2d839eUL, ++ 0xcf1c9887UL, 0x5112c24aUL, 0x1023d953UL, 0xd370f478UL, 0x9241ef61UL, ++ 0x55d7ae2eUL, 0x14e6b537UL, 0xd7b5981cUL, 0x96848305UL, 0x59981b82UL, ++ 0x18a9009bUL, 0xdbfa2db0UL, 0x9acb36a9UL, 0x5d5d77e6UL, 0x1c6c6cffUL, ++ 0xdf3f41d4UL, 0x9e0e5acdUL, 0xa2248495UL, 0xe3159f8cUL, 0x2046b2a7UL, ++ 0x6177a9beUL, 0xa6e1e8f1UL, 0xe7d0f3e8UL, 0x2483dec3UL, 0x65b2c5daUL, ++ 0xaaae5d5dUL, 0xeb9f4644UL, 0x28cc6b6fUL, 0x69fd7076UL, 0xae6b3139UL, ++ 0xef5a2a20UL, 0x2c09070bUL, 0x6d381c12UL, 0xf33646dfUL, 0xb2075dc6UL, ++ 0x715470edUL, 0x30656bf4UL, 0xf7f32abbUL, 0xb6c231a2UL, 0x75911c89UL, ++ 0x34a00790UL, 0xfbbc9f17UL, 0xba8d840eUL, 0x79dea925UL, 0x38efb23cUL, ++ 0xff79f373UL, 0xbe48e86aUL, 0x7d1bc541UL, 0x3c2ade58UL, 0x054f79f0UL, ++ 0x447e62e9UL, 0x872d4fc2UL, 0xc61c54dbUL, 0x018a1594UL, 0x40bb0e8dUL, ++ 0x83e823a6UL, 0xc2d938bfUL, 0x0dc5a038UL, 0x4cf4bb21UL, 0x8fa7960aUL, ++ 0xce968d13UL, 0x0900cc5cUL, 0x4831d745UL, 0x8b62fa6eUL, 0xca53e177UL, ++ 0x545dbbbaUL, 0x156ca0a3UL, 0xd63f8d88UL, 0x970e9691UL, 0x5098d7deUL, ++ 0x11a9ccc7UL, 0xd2fae1ecUL, 0x93cbfaf5UL, 0x5cd76272UL, 0x1de6796bUL, ++ 0xdeb55440UL, 0x9f844f59UL, 0x58120e16UL, 0x1923150fUL, 0xda703824UL, ++ 0x9b41233dUL, 0xa76bfd65UL, 0xe65ae67cUL, 0x2509cb57UL, 0x6438d04eUL, ++ 0xa3ae9101UL, 0xe29f8a18UL, 0x21cca733UL, 0x60fdbc2aUL, 0xafe124adUL, ++ 0xeed03fb4UL, 0x2d83129fUL, 0x6cb20986UL, 0xab2448c9UL, 0xea1553d0UL, ++ 0x29467efbUL, 0x687765e2UL, 0xf6793f2fUL, 0xb7482436UL, 0x741b091dUL, ++ 0x352a1204UL, 0xf2bc534bUL, 0xb38d4852UL, 0x70de6579UL, 0x31ef7e60UL, ++ 0xfef3e6e7UL, 0xbfc2fdfeUL, 0x7c91d0d5UL, 0x3da0cbccUL, 0xfa368a83UL, ++ 0xbb07919aUL, 0x7854bcb1UL, 0x3965a7a8UL, 0x4b98833bUL, 0x0aa99822UL, ++ 0xc9fab509UL, 0x88cbae10UL, 0x4f5def5fUL, 0x0e6cf446UL, 0xcd3fd96dUL, ++ 0x8c0ec274UL, 0x43125af3UL, 0x022341eaUL, 0xc1706cc1UL, 0x804177d8UL, ++ 0x47d73697UL, 0x06e62d8eUL, 0xc5b500a5UL, 0x84841bbcUL, 0x1a8a4171UL, ++ 0x5bbb5a68UL, 0x98e87743UL, 0xd9d96c5aUL, 0x1e4f2d15UL, 0x5f7e360cUL, ++ 0x9c2d1b27UL, 0xdd1c003eUL, 0x120098b9UL, 0x533183a0UL, 0x9062ae8bUL, ++ 0xd153b592UL, 0x16c5f4ddUL, 0x57f4efc4UL, 0x94a7c2efUL, 0xd596d9f6UL, ++ 0xe9bc07aeUL, 0xa88d1cb7UL, 0x6bde319cUL, 0x2aef2a85UL, 0xed796bcaUL, ++ 0xac4870d3UL, 0x6f1b5df8UL, 0x2e2a46e1UL, 0xe136de66UL, 0xa007c57fUL, ++ 0x6354e854UL, 0x2265f34dUL, 0xe5f3b202UL, 0xa4c2a91bUL, 0x67918430UL, ++ 0x26a09f29UL, 0xb8aec5e4UL, 0xf99fdefdUL, 0x3accf3d6UL, 0x7bfde8cfUL, ++ 0xbc6ba980UL, 0xfd5ab299UL, 0x3e099fb2UL, 0x7f3884abUL, 0xb0241c2cUL, ++ 0xf1150735UL, 0x32462a1eUL, 0x73773107UL, 0xb4e17048UL, 0xf5d06b51UL, ++ 0x3683467aUL, 0x77b25d63UL, 0x4ed7facbUL, 0x0fe6e1d2UL, 0xccb5ccf9UL, ++ 0x8d84d7e0UL, 0x4a1296afUL, 0x0b238db6UL, 0xc870a09dUL, 0x8941bb84UL, ++ 0x465d2303UL, 0x076c381aUL, 0xc43f1531UL, 0x850e0e28UL, 0x42984f67UL, ++ 0x03a9547eUL, 0xc0fa7955UL, 0x81cb624cUL, 0x1fc53881UL, 0x5ef42398UL, ++ 0x9da70eb3UL, 0xdc9615aaUL, 0x1b0054e5UL, 0x5a314ffcUL, 0x996262d7UL, ++ 0xd85379ceUL, 0x174fe149UL, 0x567efa50UL, 0x952dd77bUL, 0xd41ccc62UL, ++ 0x138a8d2dUL, 0x52bb9634UL, 0x91e8bb1fUL, 0xd0d9a006UL, 0xecf37e5eUL, ++ 0xadc26547UL, 0x6e91486cUL, 0x2fa05375UL, 0xe836123aUL, 0xa9070923UL, ++ 0x6a542408UL, 0x2b653f11UL, 0xe479a796UL, 0xa548bc8fUL, 0x661b91a4UL, ++ 0x272a8abdUL, 0xe0bccbf2UL, 0xa18dd0ebUL, 0x62defdc0UL, 0x23efe6d9UL, ++ 0xbde1bc14UL, 0xfcd0a70dUL, 0x3f838a26UL, 0x7eb2913fUL, 0xb924d070UL, ++ 0xf815cb69UL, 0x3b46e642UL, 0x7a77fd5bUL, 0xb56b65dcUL, 0xf45a7ec5UL, ++ 0x370953eeUL, 0x763848f7UL, 0xb1ae09b8UL, 0xf09f12a1UL, 0x33cc3f8aUL, ++ 0x72fd2493UL ++ }, ++ { ++ 0x00000000UL, 0x376ac201UL, 0x6ed48403UL, 0x59be4602UL, 0xdca80907UL, ++ 0xebc2cb06UL, 0xb27c8d04UL, 0x85164f05UL, 0xb851130eUL, 0x8f3bd10fUL, ++ 0xd685970dUL, 0xe1ef550cUL, 0x64f91a09UL, 0x5393d808UL, 0x0a2d9e0aUL, ++ 0x3d475c0bUL, 0x70a3261cUL, 0x47c9e41dUL, 0x1e77a21fUL, 0x291d601eUL, ++ 0xac0b2f1bUL, 0x9b61ed1aUL, 0xc2dfab18UL, 0xf5b56919UL, 0xc8f23512UL, ++ 0xff98f713UL, 0xa626b111UL, 0x914c7310UL, 0x145a3c15UL, 0x2330fe14UL, ++ 0x7a8eb816UL, 0x4de47a17UL, 0xe0464d38UL, 0xd72c8f39UL, 0x8e92c93bUL, ++ 0xb9f80b3aUL, 0x3cee443fUL, 0x0b84863eUL, 0x523ac03cUL, 0x6550023dUL, ++ 0x58175e36UL, 0x6f7d9c37UL, 0x36c3da35UL, 0x01a91834UL, 0x84bf5731UL, ++ 0xb3d59530UL, 0xea6bd332UL, 0xdd011133UL, 0x90e56b24UL, 0xa78fa925UL, ++ 0xfe31ef27UL, 0xc95b2d26UL, 0x4c4d6223UL, 0x7b27a022UL, 0x2299e620UL, ++ 0x15f32421UL, 0x28b4782aUL, 0x1fdeba2bUL, 0x4660fc29UL, 0x710a3e28UL, ++ 0xf41c712dUL, 0xc376b32cUL, 0x9ac8f52eUL, 0xada2372fUL, 0xc08d9a70UL, ++ 0xf7e75871UL, 0xae591e73UL, 0x9933dc72UL, 0x1c259377UL, 0x2b4f5176UL, ++ 0x72f11774UL, 0x459bd575UL, 0x78dc897eUL, 0x4fb64b7fUL, 0x16080d7dUL, ++ 0x2162cf7cUL, 0xa4748079UL, 0x931e4278UL, 0xcaa0047aUL, 0xfdcac67bUL, ++ 0xb02ebc6cUL, 0x87447e6dUL, 0xdefa386fUL, 0xe990fa6eUL, 0x6c86b56bUL, ++ 0x5bec776aUL, 0x02523168UL, 0x3538f369UL, 0x087faf62UL, 0x3f156d63UL, ++ 0x66ab2b61UL, 0x51c1e960UL, 0xd4d7a665UL, 0xe3bd6464UL, 0xba032266UL, ++ 0x8d69e067UL, 0x20cbd748UL, 0x17a11549UL, 0x4e1f534bUL, 0x7975914aUL, ++ 0xfc63de4fUL, 0xcb091c4eUL, 0x92b75a4cUL, 0xa5dd984dUL, 0x989ac446UL, ++ 0xaff00647UL, 0xf64e4045UL, 0xc1248244UL, 0x4432cd41UL, 0x73580f40UL, ++ 0x2ae64942UL, 0x1d8c8b43UL, 0x5068f154UL, 0x67023355UL, 0x3ebc7557UL, ++ 0x09d6b756UL, 0x8cc0f853UL, 0xbbaa3a52UL, 0xe2147c50UL, 0xd57ebe51UL, ++ 0xe839e25aUL, 0xdf53205bUL, 0x86ed6659UL, 0xb187a458UL, 0x3491eb5dUL, ++ 0x03fb295cUL, 0x5a456f5eUL, 0x6d2fad5fUL, 0x801b35e1UL, 0xb771f7e0UL, ++ 0xeecfb1e2UL, 0xd9a573e3UL, 0x5cb33ce6UL, 0x6bd9fee7UL, 0x3267b8e5UL, ++ 0x050d7ae4UL, 0x384a26efUL, 0x0f20e4eeUL, 0x569ea2ecUL, 0x61f460edUL, ++ 0xe4e22fe8UL, 0xd388ede9UL, 0x8a36abebUL, 0xbd5c69eaUL, 0xf0b813fdUL, ++ 0xc7d2d1fcUL, 0x9e6c97feUL, 0xa90655ffUL, 0x2c101afaUL, 0x1b7ad8fbUL, ++ 0x42c49ef9UL, 0x75ae5cf8UL, 0x48e900f3UL, 0x7f83c2f2UL, 0x263d84f0UL, ++ 0x115746f1UL, 0x944109f4UL, 0xa32bcbf5UL, 0xfa958df7UL, 0xcdff4ff6UL, ++ 0x605d78d9UL, 0x5737bad8UL, 0x0e89fcdaUL, 0x39e33edbUL, 0xbcf571deUL, ++ 0x8b9fb3dfUL, 0xd221f5ddUL, 0xe54b37dcUL, 0xd80c6bd7UL, 0xef66a9d6UL, ++ 0xb6d8efd4UL, 0x81b22dd5UL, 0x04a462d0UL, 0x33cea0d1UL, 0x6a70e6d3UL, ++ 0x5d1a24d2UL, 0x10fe5ec5UL, 0x27949cc4UL, 0x7e2adac6UL, 0x494018c7UL, ++ 0xcc5657c2UL, 0xfb3c95c3UL, 0xa282d3c1UL, 0x95e811c0UL, 0xa8af4dcbUL, ++ 0x9fc58fcaUL, 0xc67bc9c8UL, 0xf1110bc9UL, 0x740744ccUL, 0x436d86cdUL, ++ 0x1ad3c0cfUL, 0x2db902ceUL, 0x4096af91UL, 0x77fc6d90UL, 0x2e422b92UL, ++ 0x1928e993UL, 0x9c3ea696UL, 0xab546497UL, 0xf2ea2295UL, 0xc580e094UL, ++ 0xf8c7bc9fUL, 0xcfad7e9eUL, 0x9613389cUL, 0xa179fa9dUL, 0x246fb598UL, ++ 0x13057799UL, 0x4abb319bUL, 0x7dd1f39aUL, 0x3035898dUL, 0x075f4b8cUL, ++ 0x5ee10d8eUL, 0x698bcf8fUL, 0xec9d808aUL, 0xdbf7428bUL, 0x82490489UL, ++ 0xb523c688UL, 0x88649a83UL, 0xbf0e5882UL, 0xe6b01e80UL, 0xd1dadc81UL, ++ 0x54cc9384UL, 0x63a65185UL, 0x3a181787UL, 0x0d72d586UL, 0xa0d0e2a9UL, ++ 0x97ba20a8UL, 0xce0466aaUL, 0xf96ea4abUL, 0x7c78ebaeUL, 0x4b1229afUL, ++ 0x12ac6fadUL, 0x25c6adacUL, 0x1881f1a7UL, 0x2feb33a6UL, 0x765575a4UL, ++ 0x413fb7a5UL, 0xc429f8a0UL, 0xf3433aa1UL, 0xaafd7ca3UL, 0x9d97bea2UL, ++ 0xd073c4b5UL, 0xe71906b4UL, 0xbea740b6UL, 0x89cd82b7UL, 0x0cdbcdb2UL, ++ 0x3bb10fb3UL, 0x620f49b1UL, 0x55658bb0UL, 0x6822d7bbUL, 0x5f4815baUL, ++ 0x06f653b8UL, 0x319c91b9UL, 0xb48adebcUL, 0x83e01cbdUL, 0xda5e5abfUL, ++ 0xed3498beUL ++ }, ++ { ++ 0x00000000UL, 0x6567bcb8UL, 0x8bc809aaUL, 0xeeafb512UL, 0x5797628fUL, ++ 0x32f0de37UL, 0xdc5f6b25UL, 0xb938d79dUL, 0xef28b4c5UL, 0x8a4f087dUL, ++ 0x64e0bd6fUL, 0x018701d7UL, 0xb8bfd64aUL, 0xddd86af2UL, 0x3377dfe0UL, ++ 0x56106358UL, 0x9f571950UL, 0xfa30a5e8UL, 0x149f10faUL, 0x71f8ac42UL, ++ 0xc8c07bdfUL, 0xada7c767UL, 0x43087275UL, 0x266fcecdUL, 0x707fad95UL, ++ 0x1518112dUL, 0xfbb7a43fUL, 0x9ed01887UL, 0x27e8cf1aUL, 0x428f73a2UL, ++ 0xac20c6b0UL, 0xc9477a08UL, 0x3eaf32a0UL, 0x5bc88e18UL, 0xb5673b0aUL, ++ 0xd00087b2UL, 0x6938502fUL, 0x0c5fec97UL, 0xe2f05985UL, 0x8797e53dUL, ++ 0xd1878665UL, 0xb4e03addUL, 0x5a4f8fcfUL, 0x3f283377UL, 0x8610e4eaUL, ++ 0xe3775852UL, 0x0dd8ed40UL, 0x68bf51f8UL, 0xa1f82bf0UL, 0xc49f9748UL, ++ 0x2a30225aUL, 0x4f579ee2UL, 0xf66f497fUL, 0x9308f5c7UL, 0x7da740d5UL, ++ 0x18c0fc6dUL, 0x4ed09f35UL, 0x2bb7238dUL, 0xc518969fUL, 0xa07f2a27UL, ++ 0x1947fdbaUL, 0x7c204102UL, 0x928ff410UL, 0xf7e848a8UL, 0x3d58149bUL, ++ 0x583fa823UL, 0xb6901d31UL, 0xd3f7a189UL, 0x6acf7614UL, 0x0fa8caacUL, ++ 0xe1077fbeUL, 0x8460c306UL, 0xd270a05eUL, 0xb7171ce6UL, 0x59b8a9f4UL, ++ 0x3cdf154cUL, 0x85e7c2d1UL, 0xe0807e69UL, 0x0e2fcb7bUL, 0x6b4877c3UL, ++ 0xa20f0dcbUL, 0xc768b173UL, 0x29c70461UL, 0x4ca0b8d9UL, 0xf5986f44UL, ++ 0x90ffd3fcUL, 0x7e5066eeUL, 0x1b37da56UL, 0x4d27b90eUL, 0x284005b6UL, ++ 0xc6efb0a4UL, 0xa3880c1cUL, 0x1ab0db81UL, 0x7fd76739UL, 0x9178d22bUL, ++ 0xf41f6e93UL, 0x03f7263bUL, 0x66909a83UL, 0x883f2f91UL, 0xed589329UL, ++ 0x546044b4UL, 0x3107f80cUL, 0xdfa84d1eUL, 0xbacff1a6UL, 0xecdf92feUL, ++ 0x89b82e46UL, 0x67179b54UL, 0x027027ecUL, 0xbb48f071UL, 0xde2f4cc9UL, ++ 0x3080f9dbUL, 0x55e74563UL, 0x9ca03f6bUL, 0xf9c783d3UL, 0x176836c1UL, ++ 0x720f8a79UL, 0xcb375de4UL, 0xae50e15cUL, 0x40ff544eUL, 0x2598e8f6UL, ++ 0x73888baeUL, 0x16ef3716UL, 0xf8408204UL, 0x9d273ebcUL, 0x241fe921UL, ++ 0x41785599UL, 0xafd7e08bUL, 0xcab05c33UL, 0x3bb659edUL, 0x5ed1e555UL, ++ 0xb07e5047UL, 0xd519ecffUL, 0x6c213b62UL, 0x094687daUL, 0xe7e932c8UL, ++ 0x828e8e70UL, 0xd49eed28UL, 0xb1f95190UL, 0x5f56e482UL, 0x3a31583aUL, ++ 0x83098fa7UL, 0xe66e331fUL, 0x08c1860dUL, 0x6da63ab5UL, 0xa4e140bdUL, ++ 0xc186fc05UL, 0x2f294917UL, 0x4a4ef5afUL, 0xf3762232UL, 0x96119e8aUL, ++ 0x78be2b98UL, 0x1dd99720UL, 0x4bc9f478UL, 0x2eae48c0UL, 0xc001fdd2UL, ++ 0xa566416aUL, 0x1c5e96f7UL, 0x79392a4fUL, 0x97969f5dUL, 0xf2f123e5UL, ++ 0x05196b4dUL, 0x607ed7f5UL, 0x8ed162e7UL, 0xebb6de5fUL, 0x528e09c2UL, ++ 0x37e9b57aUL, 0xd9460068UL, 0xbc21bcd0UL, 0xea31df88UL, 0x8f566330UL, ++ 0x61f9d622UL, 0x049e6a9aUL, 0xbda6bd07UL, 0xd8c101bfUL, 0x366eb4adUL, ++ 0x53090815UL, 0x9a4e721dUL, 0xff29cea5UL, 0x11867bb7UL, 0x74e1c70fUL, ++ 0xcdd91092UL, 0xa8beac2aUL, 0x46111938UL, 0x2376a580UL, 0x7566c6d8UL, ++ 0x10017a60UL, 0xfeaecf72UL, 0x9bc973caUL, 0x22f1a457UL, 0x479618efUL, ++ 0xa939adfdUL, 0xcc5e1145UL, 0x06ee4d76UL, 0x6389f1ceUL, 0x8d2644dcUL, ++ 0xe841f864UL, 0x51792ff9UL, 0x341e9341UL, 0xdab12653UL, 0xbfd69aebUL, ++ 0xe9c6f9b3UL, 0x8ca1450bUL, 0x620ef019UL, 0x07694ca1UL, 0xbe519b3cUL, ++ 0xdb362784UL, 0x35999296UL, 0x50fe2e2eUL, 0x99b95426UL, 0xfcdee89eUL, ++ 0x12715d8cUL, 0x7716e134UL, 0xce2e36a9UL, 0xab498a11UL, 0x45e63f03UL, ++ 0x208183bbUL, 0x7691e0e3UL, 0x13f65c5bUL, 0xfd59e949UL, 0x983e55f1UL, ++ 0x2106826cUL, 0x44613ed4UL, 0xaace8bc6UL, 0xcfa9377eUL, 0x38417fd6UL, ++ 0x5d26c36eUL, 0xb389767cUL, 0xd6eecac4UL, 0x6fd61d59UL, 0x0ab1a1e1UL, ++ 0xe41e14f3UL, 0x8179a84bUL, 0xd769cb13UL, 0xb20e77abUL, 0x5ca1c2b9UL, ++ 0x39c67e01UL, 0x80fea99cUL, 0xe5991524UL, 0x0b36a036UL, 0x6e511c8eUL, ++ 0xa7166686UL, 0xc271da3eUL, 0x2cde6f2cUL, 0x49b9d394UL, 0xf0810409UL, ++ 0x95e6b8b1UL, 0x7b490da3UL, 0x1e2eb11bUL, 0x483ed243UL, 0x2d596efbUL, ++ 0xc3f6dbe9UL, 0xa6916751UL, 0x1fa9b0ccUL, 0x7ace0c74UL, 0x9461b966UL, ++ 0xf10605deUL ++ } ++}; ++/* ========================================================================= */ ++#define DOLIT4 c ^= *buf4++; \ ++ c = crc_table[3][c & 0xff] ^ crc_table[2][(c >> 8) & 0xff] ^ \ ++ crc_table[1][(c >> 16) & 0xff] ^ crc_table[0][c >> 24] ++#define DOLIT32 DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4 ++ ++unsigned int SharedRuntime::updateBytesCRC32(unsigned long crc, const unsigned char *buf, unsigned int len) { ++ if (buf == 0) return 0UL; ++ ++ register unsigned int c; ++ register const unsigned int *buf4; ++ c = (unsigned int)crc; ++ c = ~c; ++ while (len && ((ptrdiff_t)buf & 3)) { ++ c = crc_table[0][(c ^ *buf++) & 0xff] ^ (c >> 8); ++ len--; ++ } ++ ++ buf4 = (const unsigned int *) (const void *)buf; ++ while (len >= 32) { ++ DOLIT32; ++ len -= 32; ++ } ++ while (len >= 4) { ++ DOLIT4; ++ len -= 4; ++ } ++ buf = (const unsigned char *)buf4; ++ ++ if (len) do { ++ c = crc_table[0][(c ^ *buf++) & 0xff] ^ (c >> 8); ++ } while (--len); ++ c = ~c; ++ return (unsigned long)c; ++} ++ ++void SharedRuntime::montgomery_multiply(jint *a_ints, jint *b_ints, jint *n_ints, ++ jint len, jlong inv, ++ jint *m_ints) { ++ assert(len % 2 == 0, "array length in montgomery_multiply must be even"); ++ int longwords = len/2; ++ ++ // Make very sure we don't use so much space that the stack might ++ // overflow. 512 jints corresponds to an 16384-bit integer and ++ // will use here a total of 8k bytes of stack space. ++ int total_allocation = longwords * sizeof (unsigned long) * 4; ++ guarantee(total_allocation <= 8192, "must be"); ++ unsigned long *scratch = (unsigned long *)alloca(total_allocation); ++ ++ // Local scratch arrays ++ unsigned long ++ *a = scratch + 0 * longwords, ++ *b = scratch + 1 * longwords, ++ *n = scratch + 2 * longwords, ++ *m = scratch + 3 * longwords; ++ ++ reverse_words((unsigned long *)a_ints, a, longwords); ++ reverse_words((unsigned long *)b_ints, b, longwords); ++ reverse_words((unsigned long *)n_ints, n, longwords); ++ ++ ::montgomery_multiply(a, b, n, m, (unsigned long)inv, longwords); ++ ++ reverse_words(m, (unsigned long *)m_ints, longwords); ++} ++ ++void SharedRuntime::montgomery_square(jint *a_ints, jint *n_ints, ++ jint len, jlong inv, ++ jint *m_ints) { ++ assert(len % 2 == 0, "array length in montgomery_square must be even"); ++ int longwords = len/2; ++ ++ // Make very sure we don't use so much space that the stack might ++ // overflow. 512 jints corresponds to an 16384-bit integer and ++ // will use here a total of 6k bytes of stack space. ++ int total_allocation = longwords * sizeof (unsigned long) * 3; ++ guarantee(total_allocation <= 8192, "must be"); ++ unsigned long *scratch = (unsigned long *)alloca(total_allocation); ++ ++ // Local scratch arrays ++ unsigned long ++ *a = scratch + 0 * longwords, ++ *n = scratch + 1 * longwords, ++ *m = scratch + 2 * longwords; ++ ++ reverse_words((unsigned long *)a_ints, a, longwords); ++ reverse_words((unsigned long *)n_ints, n, longwords); ++ ++ //montgomery_square fails to pass BigIntegerTest on solaris amd64 ++ //on jdk7 and jdk8. ++#ifndef SOLARIS ++ if (len >= MONTGOMERY_SQUARING_THRESHOLD) { ++#else ++ if (0) { ++#endif ++ ::montgomery_square(a, n, m, (unsigned long)inv, longwords); ++ } else { ++ ::montgomery_multiply(a, a, n, m, (unsigned long)inv, longwords); ++ } ++ ++ reverse_words(m, (unsigned long *)m_ints, longwords); ++} ++ ++extern "C" int SpinPause() {return 0;} +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/stubGenerator_sw64.cpp afu8u/hotspot/src/cpu/sw64/vm/stubGenerator_sw64.cpp +--- openjdk/hotspot/src/cpu/sw64/vm/stubGenerator_sw64.cpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/stubGenerator_sw64.cpp 2025-05-06 10:53:44.911633666 +0800 +@@ -0,0 +1,3894 @@ ++/* ++ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++#include "asm/macroAssembler.hpp" ++#include "asm/macroAssembler.inline.hpp" ++#include "interpreter/interpreter.hpp" ++#include "nativeInst_sw64.hpp" ++#include "oops/instanceOop.hpp" ++#include "oops/method.hpp" ++#include "oops/objArrayKlass.hpp" ++#include "oops/oop.inline.hpp" ++#include "prims/methodHandles.hpp" ++#include "runtime/frame.inline.hpp" ++#include "runtime/handles.inline.hpp" ++#include "runtime/sharedRuntime.hpp" ++#include "runtime/stubCodeGenerator.hpp" ++#include "runtime/stubRoutines.hpp" ++#include "runtime/thread.inline.hpp" ++#include "utilities/top.hpp" ++#ifdef COMPILER2 ++#include "opto/runtime.hpp" ++#endif ++ ++// Declaration and definition of StubGenerator (no .hpp file). ++// For a more detailed description of the stub routine structure ++// see the comment in stubRoutines.hpp ++ ++#define __ _masm-> ++#define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8) ++//#define a__ ((Assembler*)_masm)-> ++ ++#ifdef PRODUCT ++#define BLOCK_COMMENT(str) /* nothing */ ++#else ++#define BLOCK_COMMENT(str) { char line[1024]; sprintf(line,"%s:%s:%d",str,__FILE__, __LINE__); __ block_comment(line);} ++#endif ++ ++#define BIND(label) bind(label); BLOCK_COMMENT(#label ":") ++ ++const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions ++ ++// Stub Code definitions ++ ++static address handle_unsafe_access() { ++ JavaThread* thread = JavaThread::current(); ++ address pc = thread->saved_exception_pc(); ++ // pc is the instruction which we must emulate ++ // doing a no-op is fine: return garbage from the load ++ // therefore, compute npc ++ address npc = (address)((unsigned long)pc + sizeof(unsigned int)); ++ ++ // request an async exception ++ thread->set_pending_unsafe_access_error(); ++ ++ // return address of next instruction to execute ++ return npc; ++} ++ ++class StubGenerator: public StubCodeGenerator { ++ private: ++ ++#ifdef PRODUCT ++#define inc_counter_np(counter) ((void)0) ++#else ++ void inc_counter_np_(int& counter) { ++ __ li64(GP, (long )&counter); ++ __ ldw(AT, GP, 0); ++ __ addw(AT, AT, 1); ++ __ stw(AT, GP, 0); ++ } ++#define inc_counter_np(counter) \ ++ BLOCK_COMMENT("inc_counter " #counter); \ ++ inc_counter_np_(counter); ++#endif ++ // ABI sw64 ++ // This fig is not SW64 ABI. It is call Java from C ABI. ++ // Call stubs are used to call Java from C ++ // ++ // [ return_from_Java ] ++ // [ argument word n-1 ] <--- sp ++ // ... ++ // [ argument word 0 ] ++ // ... ++ //-10 [ S2 ] ++ // -9 [ S5 ] ++ // -8 [ S4 ] ++ // -7 [ S3 ] ++ // -6 [ S0 ] ++ // -5 [ TSR(T5err) ] ++ // -4 [ LVP(S1) ] ++ // -3 [ BCP(T10) ] ++ // -2 [ saved fp ] <--- fp_after_call ++ // -1 [ return address ] ++ // 0 [ ptr. to call wrapper ] <--- a0 (old sp -->)fp ++ // 1 [ result ] <--- a1 ++ // 2 [ result_type ] <--- a2 ++ // 3 [ method ] <--- a3 ++ // 4 [ entry_point ] <--- a4 ++ // 5 [ parameters ] <--- a5 ++ // 6 [ parameter_size ] <--- a6 ++ // 7 [ thread ] <--- a7 ++ ++ // ++ // _LP64: n64 does not save paras in sp. ++ // ++ // [ return_from_Java ] ++ // [ argument word n-1 ] <--- sp ++ // ... ++ // [ argument word 0 ] ++ // ... ++ //-14 [ thread ] ++ //-13 [ result_type ] <--- a2 ++ //-12 [ result ] <--- a1 ++ //-11 [ ptr. to call wrapper ] <--- a0 ++ //-10 [ S2 ] ++ // -9 [ S5 ] ++ // -8 [ S4 ] ++ // -7 [ S3 ] ++ // -6 [ S0 ] ++ // -5 [ TSR(T5err) ] ++ // -4 [ LVP(S1) ] ++ // -3 [ BCP(T10) ] ++ // -2 [ saved fp ] <--- fp_after_call ++ // -1 [ return address ] ++ // 0 [ ] <--- old sp ++ ++ // Call stubs are used to call Java from C ++ //SW64 Incoming arguments: ++ // ++ // AR0 : call wrapper address ++ // AR1 : result (address) ++ // AR2 : result type ++ // AR3 : method ++ // AR4 : (interpreter) entry point ++ // AR5 : parameters (address) ++ // [sp + 0x0] : parameter size (in words) ++ // [sp + 0x8] : thread ++ ++ ++ enum call_stub_layout { ++ RA_off = -1, ++ FP_off = -2, ++ S0_off = -3, ++ S1_off = -4, ++ S2_off = -5, ++ S3_off = -6, ++ S4_off = -7, ++ S5_off = -8, ++ F2_off = -9, ++ F3_off = -10, ++ F4_off = -11, ++ F5_off = -12, ++ F6_off = -13, ++ F7_off = -14, ++ F8_off = -15, ++ F9_off = -16, ++ result_off = -17, ++ result_type_off = -18, ++ total_off = result_type_off - 1, ++ }; ++ ++ address generate_call_stub(address& return_address) { ++ ++ StubCodeMark mark(this, "StubRoutines", "call_stub"); ++ address start = __ pc(); ++ const Address parameter_size(FP, BytesPerWord * 2); ++ const Address thread(FP, BytesPerWord * 3); ++ ++ // call Java method from C function, by LIX20170503 ++ if (Usesetfpec1) { ++ __ setfpec1(); ++ } ++ ++ //set FPCR in kernel ++// if (SetFPCR) { ++// __ rfpcr(F28); ++// __ fimovd(AT, F28); ++// __ sbt(AT, 45, AT); ++// __ ifmovd(F28, AT); ++// __ wfpcr(F28); ++//// __ setfpec0(); ++// } ++ ++ // same as in generate_catch_exception()! ++ ++ // stub code ++ // save ra and fp ++ __ add_simm16(SP, SP, total_off * wordSize); ++ // save ra and fp ++ __ stl(RA, SP, (RA_off - total_off) * wordSize); ++ __ stl(FP, SP, (FP_off - total_off) * wordSize); ++ __ stl(S0, SP, (S0_off - total_off) * wordSize); ++ __ stl(S1, SP, (S1_off - total_off) * wordSize); ++ __ stl(S2, SP, (S2_off - total_off) * wordSize); ++ __ stl(S3, SP, (S3_off - total_off) * wordSize); ++ __ stl(S4, SP, (S4_off - total_off) * wordSize); ++ __ stl(S5, SP, (S5_off - total_off) * wordSize); ++ __ stl(A1, SP, (result_off - total_off) * wordSize); ++ __ stl(A2, SP, (result_type_off - total_off) * wordSize); ++ ++ __ fstd(F2, SP, (F2_off - total_off) * wordSize); ++ __ fstd(F3, SP, (F3_off - total_off) * wordSize); ++ __ fstd(F4, SP, (F4_off - total_off) * wordSize); ++ __ fstd(F5, SP, (F5_off - total_off) * wordSize); ++ __ fstd(F6, SP, (F6_off - total_off) * wordSize); ++ __ fstd(F7, SP, (F7_off - total_off) * wordSize); ++ __ fstd(F8, SP, (F8_off - total_off) * wordSize); ++ __ fstd(F9, SP, (F9_off - total_off) * wordSize); ++ ++ ++ // I think 14 is the max gap between argument and callee saved register ++ __ add_simm16(FP, SP, (FP_off - total_off) * wordSize); ++ ++ __ stl(A0, FP, frame::entry_frame_call_wrapper_offset * wordSize); ++ ++ ++ ++ // -9 [ ptr. to call wrapper]<--- a0<----SP ++ // -8 [ result_type ]<--- a2 ++ // -7 [ result ]<--- a1 ++ // -6 [ S5 ] ++ // -5 [ S4 ] ++ // -4 [ S3 ] ++ // -3 [ S2 ] ++ // -2 [ S1 ] ++ // -1 [ S0 ] ++ // -0 [ saved fp ] <--- fp_after_call ++ // 1 [ RA ] ++ // 2 [ parameter size ] <--- old_SP ++ // 3 [ thread ] ++ ++ __ ld_ptr(S2thread, thread); ++ ++ //add for compressedoops ++ __ reinit_heapbase(); ++ ++#ifdef ASSERT ++ // make sure we have no pending exceptions ++ { ++ Label L; ++ __ ldl(AT, S2thread, in_bytes(Thread::pending_exception_offset())); ++ __ beq(AT, L); ++ __ stop("StubRoutines::call_stub: entered with pending exception"); ++ __ BIND(L); ++ } ++#endif ++ ++ // pass parameters if any ++ // A5: parameter ++ // T6 parameter_size ++ // T0: parameter_size_tmp(--) ++ // T2: offset(++) ++ // T3: tmp ++ Label parameters_done; ++ // judge if the parameter_size equals 0 ++ __ ld_ptr(T6, parameter_size); ++ __ beq(T6, parameters_done); ++ __ slll(AT, T6, Interpreter::logStackElementSize); ++ __ subl(SP, SP, AT); ++ __ move(AT, -StackAlignmentInBytes); ++ __ and_reg(SP, SP , AT); ++ // Copy Java parameters in reverse order (receiver last) ++ // Note that the argument order is inverted in the process ++ Label loop; ++ __ move(T0, T6); ++ __ move(T2, R0); ++ __ BIND(loop); ++ ++ // get parameter ++ __ slll(T3, T0, LogBytesPerWord); ++ __ addl(T3, T3, A5); //A5 parameters (address) ++ __ ldl(AT, T3, -wordSize); ++ __ slll(T3, T2, LogBytesPerWord); ++ __ addl(T3, T3, SP); ++ __ stl(AT, T3, Interpreter::expr_offset_in_bytes(0)); ++ __ addl(T2, T2, 1); ++ __ subl(T0, T0, 1); ++ __ bne(T0, loop); ++ // advance to next parameter ++ ++ // call Java function ++ __ BIND(parameters_done); ++ ++ // receiver in V0, methodOop in Rmethod ++ ++ __ move(Rmethod, A3); //A3 method ++ __ move(Rsender, SP); //set sender sp ++ __ Assembler::call(RA, A4, 0);//A4 (interpreter) entry point ++ return_address = __ pc(); ++ ++ Label common_return; ++ __ BIND(common_return); ++ ++ // restore ++ __ add_simm16(SP, FP, 2 * wordSize ); ++ __ ldl(RA, SP, RA_off * wordSize); ++ __ ldl(FP, SP, FP_off * wordSize); ++ __ ldl(S0, SP, S0_off * wordSize); ++ __ ldl(S1, SP, S1_off * wordSize); ++ __ ldl(S2, SP, S2_off * wordSize); ++ __ ldl(S3, SP, S3_off * wordSize); ++ __ ldl(S4, SP, S4_off * wordSize); ++ __ ldl(S5, SP, S5_off * wordSize); ++ __ ldl(A1, SP, result_off * wordSize); ++ __ ldl(A2, SP, result_type_off * wordSize); ++ ++ ++ __ fldd(F2, SP, F2_off * wordSize); ++ __ fldd(F3, SP, F3_off * wordSize); ++ __ fldd(F4, SP, F4_off * wordSize); ++ __ fldd(F5, SP, F5_off * wordSize); ++ __ fldd(F6, SP, F6_off * wordSize); ++ __ fldd(F7, SP, F7_off * wordSize); ++ __ fldd(F8, SP, F8_off * wordSize); ++ __ fldd(F9, SP, F9_off * wordSize); ++ ++ // store result depending on type ++ // (everything that is not T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) ++ const Register a1_resultAddr = A1; ++ const Register a2_resultType = A2; ++ Label is_long, is_float, is_double, exit; ++ __ add_simm16(T3, a2_resultType, (-1) * T_LONG); ++ __ beq(T3, is_long); ++ __ add_simm16(T3, a2_resultType, (-1) * T_FLOAT); ++ __ beq(T3, is_float); ++ __ add_simm16(T3, a2_resultType, (-1) * T_DOUBLE); ++ __ beq(T3, is_double); ++ ++ // handle T_INT case ++ __ stl(V0, a1_resultAddr, 0 * wordSize); ++ __ BIND(exit); ++ ++ // return ++ __ ret(); ++ ++ // handle return types different from T_INT ++ __ BIND(is_long); ++ __ stl(V0, a1_resultAddr, 0 * wordSize); ++ __ beq(R0, exit); ++ ++ __ BIND(is_float); ++ __ fsts(F0, a1_resultAddr, 0 * wordSize); ++ __ beq(R0, exit); ++ ++ __ BIND(is_double); ++ __ fstd(F0, a1_resultAddr, 0 * wordSize); ++ __ beq(R0, exit); ++ ++ StubRoutines::sw64::set_call_stub_compiled_return(__ pc()); ++ __ beq(R0, common_return); ++ return start; ++ } ++ ++ // Return point for a Java call if there's an exception thrown in ++ // Java code. The exception is caught and transformed into a ++ // pending exception stored in JavaThread that can be tested from ++ // within the VM. ++ // ++ // Note: Usually the parameters are removed by the callee. In case ++ // of an exception crossing an activation frame boundary, that is ++ // not the case if the callee is compiled code => need to setup the ++ // sp. ++ // ++ // V0: exception oop ++ ++ address generate_catch_exception() { ++ StubCodeMark mark(this, "StubRoutines", "catch_exception"); ++ address start = __ pc(); ++ ++ Register thread = S2thread; ++ ++ // get thread directly ++ ++#ifdef ASSERT ++ // verify that threads correspond ++ { Label L; ++ __ get_thread(T11); ++ __ beq(T11, thread, L); ++ __ stop("StubRoutines::catch_exception: threads must correspond"); ++ __ BIND(L); ++ } ++#endif ++ // set pending exception ++ __ verify_oop(V0); ++ __ stl(V0, thread, in_bytes(Thread::pending_exception_offset())); ++ __ li(AT, (long)__FILE__); ++ __ stl(AT, thread, in_bytes(Thread::exception_file_offset ())); ++ __ li(AT, (long)__LINE__); ++ __ stl(AT, thread, in_bytes(Thread::exception_line_offset ())); ++ ++ // complete return to VM ++ assert(StubRoutines::_call_stub_return_address != NULL, "_call_stub_return_address must have been generated before"); ++ __ jmp(StubRoutines::_call_stub_return_address, relocInfo::none); ++ ++ return start; ++ } ++ ++ // Continuation point for runtime calls returning with a pending ++ // exception. The pending exception check happened in the runtime ++ // or native call stub. The pending exception in Thread is ++ // converted into a Java-level exception. ++ // ++ // Contract with Java-level exception handlers: ++ // V0: exception ++ // T4: throwing pc ++ // ++ // NOTE: At entry of this stub, exception-pc must be on stack !! ++ ++ address generate_forward_exception() { ++ StubCodeMark mark(this, "StubRoutines", "forward exception"); ++ Register thread = S2thread; ++ address start = __ pc(); ++ ++ // Upon entry, the sp points to the return address returning into ++ // Java (interpreted or compiled) code; i.e., the return address ++ // throwing pc. ++ // ++ // Arguments pushed before the runtime call are still on the stack ++ // but the exception handler will reset the stack pointer -> ++ // ignore them. A potential result in registers can be ignored as ++ // well. ++ ++#ifdef ASSERT ++ // make sure this code is only executed if there is a pending exception ++ { ++ Label L; ++ __ ldl(AT, thread, in_bytes(Thread::pending_exception_offset())); ++ __ bne(AT, L); ++ __ stop("StubRoutines::forward exception: no pending exception (1)"); ++ __ BIND(L); ++ } ++#endif ++ ++ // compute exception handler into T12 ++ __ ldl(A1, SP, 0); ++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, A1); ++ __ move(T12, V0); ++ __ pop(T4); ++ ++ __ ldl(V0, thread, in_bytes(Thread::pending_exception_offset())); ++ __ stl(R0, thread, in_bytes(Thread::pending_exception_offset())); ++ ++#ifdef ASSERT ++ // make sure exception is set ++ { ++ Label L; ++ __ bne(V0, L); ++ __ stop("StubRoutines::forward exception: no pending exception (2)"); ++ __ BIND(L); ++ } ++#endif ++ ++ // continue at exception handler (return address removed) ++ // V0: exception ++ // T12: exception handler ++ // T4: throwing pc ++ __ verify_oop(V0); ++ __ jmp(T12); ++ ++ return start; ++ } ++ ++ // Support for intptr_t get_previous_fp() ++ // ++ // This routine is used to find the previous frame pointer for the ++ // caller (current_frame_guess). This is used as part of debugging ++ // ps() is seemingly lost trying to find frames. ++ // This code assumes that caller current_frame_guess) has a frame. ++ address generate_get_previous_fp() { ++ StubCodeMark mark(this, "StubRoutines", "get_previous_fp"); ++ const Address old_fp (FP, 0); ++ const Address older_fp (V0, 0); ++ address start = __ pc(); ++ __ enter(); ++ __ ldw_signed(V0, old_fp); // callers fp ++ __ ldw_signed(V0, older_fp); // the frame for ps() ++ __ leave(); ++ __ ret(); ++ return start; ++ } ++ ++ // The following routine generates a subroutine to throw an ++ // asynchronous UnknownError when an unsafe access gets a fault that ++ // could not be reasonably prevented by the programmer. (Example: ++ // SIGBUS/OBJERR.) ++ address generate_handler_for_unsafe_access() { ++ StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access"); ++ address start = __ pc(); ++ __ pushad_except_RA(); // push registers ++ // Address next_pc(esp, RegisterImpl::number_of_registers * BytesPerWord); ++ __ call(CAST_FROM_FN_PTR(address, handle_unsafe_access), relocInfo::runtime_call_type); ++ __ move(RA,V0); ++ __ popad_except_RA(); ++ __ ret(); ++ return start; ++ } ++ ++ // Non-destructive plausibility checks for oops ++ // ++ address generate_verify_oop() { ++ StubCodeMark mark(this, "StubRoutines", "verify_oop"); ++ address start = __ pc(); ++ __ reinit_heapbase(); ++ __ verify_oop_subroutine(); ++ address end = __ pc(); ++ return start; ++ } ++ ++ // ++ // Generate overlap test for array copy stubs ++ // ++ // Input: ++ // A0 - array1 ++ // A1 - array2 ++ // A2 - element count ++ // ++ ++ // use T12 as temp ++ void array_overlap_test(address no_overlap_target, int log2_elem_size) { ++ int elem_size = 1 << log2_elem_size; ++ Address::ScaleFactor sf = Address::times_1; ++ ++ switch (log2_elem_size) { ++ case 0: sf = Address::times_1; break; ++ case 1: sf = Address::times_2; break; ++ case 2: sf = Address::times_4; break; ++ case 3: sf = Address::times_8; break; ++ } ++ ++ __ slll(AT, A2, sf); ++ __ addl(AT, AT, A0); ++ __ add_simm16(T12, AT, -elem_size); ++ __ subl(AT, A1, A0); ++ __ ble(AT, no_overlap_target); ++ __ subl(AT, A1, T12); ++ __ bgt(AT, no_overlap_target); ++ ++ // If A0 = 0xf... and A1 = 0x0..., than goto no_overlap_target ++ Label L; ++ __ bge(A0, L); ++ __ bgt(A1, no_overlap_target); ++ __ BIND(L); ++ ++ } ++ ++ // ++ // Generate store check for array ++ // ++ // Input: ++ // T0 - starting address ++ // T1 - element count ++ // ++ // The 2 input registers are overwritten ++ // ++ ++//lsp to do check if array_store_check is necessary?? ++ void array_store_check(Register tmp) { ++ assert_different_registers(tmp, AT, T0, T1); ++ BarrierSet* bs = Universe::heap()->barrier_set(); ++ assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind"); ++ CardTableModRefBS* ct = (CardTableModRefBS*)bs; ++ assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); ++ Label l_0; ++ ++ if (UseConcMarkSweepGC) __ memb(); ++ ++ __ set64(tmp, (long)ct->byte_map_base); ++ ++ __ slll(AT, T1, TIMES_OOP); ++ __ addl(AT, T0, AT); ++ __ add_simm16(T1, AT, - BytesPerHeapOop); ++ ++ __ shr(T0, CardTableModRefBS::card_shift); ++ __ shr(T1, CardTableModRefBS::card_shift); ++ ++ __ subl(T1, T1, T0); // end --> cards count ++ __ BIND(l_0); ++ ++ __ addl(AT, tmp, T0); ++ __ addl(AT, AT, T1); ++ __ stb(R0, AT, 0); ++ ++ __ subl(T1, T1, 1); ++ __ bge(T1, l_0); ++ ++ } ++ ++ // Generate code for an array write pre barrier ++ // ++ // addr - starting address ++ // count - element count ++ // tmp - scratch register ++ // ++ // Destroy no registers! ++ // ++ void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) { ++ BarrierSet* bs = Universe::heap()->barrier_set(); ++ switch (bs->kind()) { ++ case BarrierSet::G1SATBCT: ++ case BarrierSet::G1SATBCTLogging: ++ // With G1, don't generate the call if we statically know that the target in uninitialized ++ if (!dest_uninitialized) { ++ __ pushad(); // push registers ++ if (count == A0) { ++ if (addr == A1) { ++ // exactly backwards!! ++ //__ xchgptr(c_rarg1, c_rarg0); ++ __ move(AT, A0); ++ __ move(A0, A1); ++ __ move(A1, AT); ++ } else { ++ __ move(A1, count); ++ __ move(A0, addr); ++ } ++ } else { ++ __ move(A0, addr); ++ __ move(A1, count); ++ } ++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2); ++ __ popad(); ++ } ++ break; ++ case BarrierSet::CardTableModRef: ++ case BarrierSet::CardTableExtension: ++ case BarrierSet::ModRef: ++ break; ++ default: ++ ShouldNotReachHere(); ++ ++ } ++ } ++ ++ // ++ // Generate code for an array write post barrier ++ // ++ // Input: ++ // start - register containing starting address of destination array ++ // count - elements count ++ // scratch - scratch register ++ // ++ // The input registers are overwritten. ++ // ++ void gen_write_ref_array_post_barrier(Register start, Register count, Register scratch) { ++ Register tmp1 = GP; ++ assert_different_registers(start, count, scratch, AT); ++ BarrierSet* bs = Universe::heap()->barrier_set(); ++ switch (bs->kind()) { ++ case BarrierSet::G1SATBCT: ++ case BarrierSet::G1SATBCTLogging: ++ { ++ __ pushad(); // push registers (overkill) ++ if (count == A0) { ++ if (start == A1) { ++ // exactly backwards!! ++ //__ xchgptr(c_rarg1, c_rarg0); ++ __ move(AT, A0); ++ __ move(A0, A1); ++ __ move(A1, AT); ++ } else { ++ __ move(A1, count); ++ __ move(A0, start); ++ } ++ } else { ++ __ move(A0, start); ++ __ move(A1, count); ++ } ++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 2); ++ __ popad(); ++ } ++ break; ++ case BarrierSet::CardTableModRef: ++ case BarrierSet::CardTableExtension: ++ { ++ CardTableModRefBS* ct = (CardTableModRefBS*)bs; ++ assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); ++ ++ Label L_loop; ++ const Register end = count; ++ ++ if (UseConcMarkSweepGC) ++ if(UseWmemb) ++ __ wmemb(); ++ else ++ __ memb(); ++ int64_t disp = (int64_t) ct->byte_map_base; ++ __ set64(scratch, disp); ++ ++ __ lea(end, Address(start, count, TIMES_OOP, 0)); ++ __ add_simm16(end, end, -BytesPerHeapOop); // end - 1 to make inclusive ++ __ shr(start, CardTableModRefBS::card_shift); ++ __ shr(end, CardTableModRefBS::card_shift); ++ __ subl(end, end, start); // end --> cards count ++ ++ __ addl(start, start, scratch); ++ ++ __ BIND(L_loop); ++ __ addl(AT, start, count); ++ __ stb(R0, AT, 0); ++ __ subl(count, count, 1); ++ __ cmplt(AT, count, R0); ++ __ beq(AT, L_loop); ++ } ++ break; ++ default: ++ ShouldNotReachHere(); ++ } ++ } ++ ++ // conjoint large copy ++ // void generate_conjoint_large_copy(Label &entry, const char *name) { ++ void generate_conjoint_large_copy(const char *name) { ++// StubCodeMark mark(this, "StubRoutines", name); ++// __ align(CodeEntryAlignment); ++ ++ { ++ Label loop, le32, le16, le8, lt8; ++ ++ // __ bind(entry); ++ __ addl(A3, A1, A2); ++ __ addl(A2, A0, A2); ++ __ ldl(T6, A0, 0); ++ __ ldl(T7, A2, -8); ++ ++ __ and_imm8(T1, A2, 7); ++ __ subl(A2, A2, T1); ++ __ subl(A5, A3, T1); ++ ++ __ addl(A4, A0, 64); ++ __ bge(A4, A2, le32); ++ ++ __ bind(loop); ++ __ ldl(T0, A2, -8); ++ __ ldl(T1, A2, -16); ++ __ ldl(T2, A2, -24); ++ __ ldl(T3, A2, -32); ++ __ ldl(T4, A2, -40); ++ __ ldl(T5, A2, -48); ++ __ ldl(T6, A2, -56); ++ __ ldl(T7, A2, -64); ++ __ ldi(A2, A2, -64); ++ __ stl(T0, A5, -8); ++ __ stl(T1, A5, -16); ++ __ stl(T2, A5, -24); ++ __ stl(T3, A5, -32); ++ __ stl(T4, A5, -40); ++ __ stl(T5, A5, -48); ++ __ stl(T6, A5, -56); ++ __ stl(T7, A5, -64); ++ __ ldi(A5, A5, -64); ++ __ blt(A4, A2, loop); ++ ++ __ bind(le32); ++ __ ldi(A4, A0, 32); ++ __ bge(A4, A2, le16); ++ __ ldl(T0, A2, -8); ++ __ ldl(T1, A2, -16); ++ __ ldl(T2, A2, -24); ++ __ ldl(T3, A2, -32); ++ __ ldi(A2, A2, -32); ++ __ stl(T0, A5, -8); ++ __ stl(T1, A5, -16); ++ __ stl(T2, A5, -24); ++ __ stl(T3, A5, -32); ++ __ ldi(A5, A5, -32); ++ ++ __ bind(le16); ++ __ ldi(A4, A0, 16); ++ __ bge(A4, A2, le8); ++ __ ldl(T0, A2, -8); ++ __ ldl(T1, A2, -16); ++ __ ldi(A2, A2, -16); ++ __ stl(T0, A5, -8); ++ __ stl(T1, A5, -16); ++ __ ldi(A5, A5, -16); ++ ++ __ bind(le8); ++ __ ldi(A4, A0, 8); ++ __ bge(A4, A2, lt8); ++ __ ldl(T0, A2, -8); ++ __ stl(T0, A5, -8); ++ ++ __ bind(lt8); ++ __ stl(T6, A1, 0); ++ __ stl(T7, A3, -8); ++ } ++ ++// __ ret(); ++ } ++ // conjoint large copy lasx ++ // void generate_conjoint_large_copy_simd(Label &entry, const char *name) { ++ void generate_conjoint_large_copy_simd( const char *name) { ++// StubCodeMark mark(this, "StubRoutines", name); ++// __ align(CodeEntryAlignment); ++ ++ { ++ Label loop, le128, le64, le32, lt32; ++ //__ bind(entry); ++ __ addl(A3, A1, A2); ++ __ addl(A2, A0, A2); ++ __ vldd(F20, A0, 0); ++ __ vldd(F21, A2, -32); ++ ++ __ and_imm8(T1, A2, 31); ++ __ subl(A2, A2, T1); ++ __ subl(A5, A3, T1); ++ ++ __ ldi(A4, A0, 256); ++ __ bge(A4, A2, le128); ++ ++ __ bind(loop); ++ __ vldd(F10, A2, -32); ++ __ vldd(F11, A2, -64); ++ __ vldd(F12, A2, -96); ++ __ vldd(F13, A2, -128); ++ __ vldd(F14, A2, -160); ++ __ vldd(F15, A2, -192); ++ __ vldd(F16, A2, -224); ++ __ vldd(F17, A2, -256); ++ __ ldi(A2, A2, -256); ++ __ vstd(F10, A5, -32); ++ __ vstd(F11, A5, -64); ++ __ vstd(F12, A5, -96); ++ __ vstd(F13, A5, -128); ++ __ vstd(F14, A5, -160); ++ __ vstd(F15, A5, -192); ++ __ vstd(F16, A5, -224); ++ __ vstd(F17, A5, -256); ++ __ ldi(A5, A5, -256); ++ __ blt(A4, A2, loop); ++ ++ __ bind(le128); ++ __ ldi(A4, A0, 128); ++ __ bge(A4, A2, le64); ++ __ vldd(F10, A2, -32); ++ __ vldd(F11, A2, -64); ++ __ vldd(F12, A2, -96); ++ __ vldd(F13, A2, -128); ++ __ ldi(A2, A2, -128); ++ __ vstd(F10, A5, -32); ++ __ vstd(F11, A5, -64); ++ __ vstd(F12, A5, -96); ++ __ vstd(F13, A5, -128); ++ __ ldi(A5, A5, -128); ++ ++ __ bind(le64); ++ __ ldi(A4, A0, 64); ++ __ bge(A4, A2, le32); ++ __ vldd(F10, A2, -32); ++ __ vldd(F11, A2, -64); ++ __ ldi(A2, A2, -64); ++ __ vstd(F10, A5, -32); ++ __ vstd(F11, A5, -64); ++ __ ldi(A5, A5, -64); ++ ++ __ bind(le32); ++ __ ldi(A4, A0, 32); ++ __ bge(A4, A2, lt32); ++ __ vldd(F10, A2, -32); ++ __ vstd(F10, A5, -32); ++ ++ __ bind(lt32); ++ __ vstd(F20, A1, 0); ++ __ vstd(F21, A3, -32); ++ } ++ ++ //__ ret(); ++ } ++ void array_overlap_test2(address no_overlap_target, int log2_elem_size) { ++ __ slll(T4, A2, log2_elem_size); ++ __ subl(AT, A1, A0); ++ __ bge(AT, T4, no_overlap_target); ++ } ++ // disjoint large copy ++ //void generate_disjoint_large_copy(Label &entry, const char *name) { ++ void generate_disjoint_large_copy(const char *name) { ++// StubCodeMark mark(this, "StubRoutines", name); ++// __ align(CodeEntryAlignment); ++ ++ { ++ Label loop, le32, le16, le8, lt8; ++ ++// __ BIND(entry); ++ __ addl(A3, A1, A2); ++ __ addl(A2, A0, A2); ++ __ ldl(T6, A0, 0); ++ __ ldl(T7, A2, -8); ++ ++ __ and_imm8(T1, A0, 7); ++ __ subl(T0, R0, T1); ++ __ addl(T0, T0, 8); ++ ++ __ addl(A0, A0, T0); ++ __ addl(A5, A1, T0); ++ ++ __ subl(A4, A2, 64); ++ __ bge(A0, A4, le32); ++ ++ __ BIND(loop); ++ __ ldl(T0, A0, 0); ++ __ ldl(T1, A0, 8); ++ __ ldl(T2, A0, 16); ++ __ ldl(T3, A0, 24); ++ __ ldl(T4, A0, 32); ++ __ ldl(T5, A0, 40); ++ __ ldl(T6, A0, 48); ++ __ ldl(T7, A0, 56); ++ __ addl(A0, A0, 64); ++ __ stl(T0, A5, 0); ++ __ stl(T1, A5, 8); ++ __ stl(T2, A5, 16); ++ __ stl(T3, A5, 24); ++ __ stl(T4, A5, 32); ++ __ stl(T5, A5, 40); ++ __ stl(T6, A5, 48); ++ __ stl(T7, A5, 56); ++ __ addl(A5, A5, 64); ++ __ blt(A0, A4, loop); ++ ++ __ BIND(le32); ++ __ subl(A4, A2, 32); ++ __ bge(A0, A4, le16); ++ __ ldl(T0, A0, 0); ++ __ ldl(T1, A0, 8); ++ __ ldl(T2, A0, 16); ++ __ ldl(T3, A0, 24); ++ __ addl(A0, A0, 32); ++ __ stl(T0, A5, 0); ++ __ stl(T1, A5, 8); ++ __ stl(T2, A5, 16); ++ __ stl(T3, A5, 24); ++ __ addl(A5, A5, 32); ++ ++ __ BIND(le16); ++ __ subl(A4, A2, 16); ++ __ bge(A0, A4, le8); ++ __ ldl(T0, A0, 0); ++ __ ldl(T1, A0, 8); ++ __ addl(A0, A0, 16); ++ __ stl(T0, A5, 0); ++ __ stl(T1, A5, 8); ++ __ addl(A5, A5, 16); ++ ++ __ BIND(le8); ++ __ subl(A4, A2, 8); ++ __ bge(A0, A4, lt8); ++ __ ldl(T0, A0, 0); ++ __ stl(T0, A5, 0); ++ ++ __ BIND(lt8); ++ __ stl(T6, A1, 0); ++ __ stl(T7, A3, -8); ++ } ++// __ ret(); ++ } ++ ++ // disjoint large copy lasx ++// void generate_disjoint_large_copy_simd(Label &entry, const char *name) { ++ void generate_disjoint_large_copy_simd( const char *name) { ++// StubCodeMark mark(this, "StubRoutines", name); ++// __ align(CodeEntryAlignment); ++ ++ { ++ Label loop, le128, le64, le32, lt32; ++ ++ // __ BIND(entry); ++ __ addl(A3, A1, A2); ++ __ addl(A2, A0, A2); ++ __ vldd(F20, A0, 0); ++ __ vldd(F21, A2, -32); ++ ++ __ and_imm8(T1, A0, 31); ++ __ subl(T0, R0, T1); ++ __ addl(T0, T0, 32); ++ ++ __ addl(A0, A0, T0); ++ __ addl(A5, A1, T0); ++ ++ __ ldi(A4, A2, -256); ++ __ bge(A0, A4, le128); ++ ++ __ BIND(loop); ++ __ vldd(F10, A0, 0); ++ __ vldd(F11, A0, 32); ++ __ vldd(F12, A0, 64); ++ __ vldd(F13, A0, 96); ++ __ vldd(F14, A0, 128); ++ __ vldd(F15, A0, 160); ++ __ vldd(F16, A0, 192); ++ __ vldd(F17, A0, 224); ++ __ ldi(A0, A0, 256); ++ __ vstd(F10, A5, 0); ++ __ vstd(F11, A5, 32); ++ __ vstd(F12, A5, 64); ++ __ vstd(F13, A5, 96); ++ __ vstd(F14, A5, 128); ++ __ vstd(F15, A5, 160); ++ __ vstd(F16, A5, 192); ++ __ vstd(F17, A5, 224); ++ __ ldi(A5, A5, 256); ++ __ blt( A0, A4, loop); ++ ++ __ BIND(le128); ++ __ ldi(A4, A2, -128); ++ __ bge(A0, A4, le64); ++ __ vldd(F10, A0, 0); ++ __ vldd(F11, A0, 32); ++ __ vldd(F12, A0, 64); ++ __ vldd(F13, A0, 96); ++ __ ldi(A0, A0, 128); ++ __ vstd(F10, A5, 0); ++ __ vstd(F11, A5, 32); ++ __ vstd(F12, A5, 64); ++ __ vstd(F13, A5, 96); ++ __ ldi(A5, A5, 128); ++ ++ __ BIND(le64); ++ __ ldi(A4, A2, -64); ++ __ bge(A0, A4, le32); ++ __ vldd(F10, A0, 0); ++ __ vldd(F11, A0, 32); ++ __ ldi(A0, A0, 64); ++ __ vstd(F10, A5, 0); ++ __ vstd(F11, A5, 32); ++ __ ldi(A5, A5, 64); ++ ++ __ BIND(le32); ++ __ ldi(A4, A2, -32); ++ __ bge(A0, A4, lt32); ++ __ vldd(F10, A0, 0); ++ __ vstd(F10, A5, 0); ++ ++ __ BIND(lt32); ++ __ vstd(F20, A1, 0); ++ __ vstd(F21, A3, -32); ++ } ++// __ ret(); ++ } ++ void generate_byte_small_copy(Label &entry, const char *name) { ++ StubCodeMark mark(this, "StubRoutines", name); ++ __ align(CodeEntryAlignment); ++ __ BIND(entry); ++ __ br(AT, 0); ++ __ addl(AT, AT, 16);//__ addpi(3, AT) ++ __ slll(A2, A2, 6); ++ __ addl(AT, AT, A2); ++ __ jmp(AT); ++ ++ // 0: ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 1: ++ __ ldbu(AT, A0, 0); ++ __ stb(AT, A1, 0); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 2: ++ __ ldhu(AT, A0, 0); ++ __ sth(AT, A1, 0); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 3: ++ __ ldhu(AT, A0, 0); ++ __ ldbu(T0, A0, 2); ++ __ sth(AT, A1, 0); ++ __ stb(T0, A1, 2); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 4: ++ __ ldw(AT, A0, 0); ++ __ stw(AT, A1, 0); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 5: ++ __ ldw(AT, A0, 0); ++ __ ldbu(T2, A0, 4); ++ __ stw(AT, A1, 0); ++ __ stb(T2, A1, 4); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 6: ++ __ ldw(AT, A0, 0); ++ __ ldhu(T2, A0, 4); ++ __ stw(AT, A1, 0); ++ __ sth(T2, A1, 4); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 7: ++ __ ldw(AT, A0, 0); ++ __ ldw(A2, A0, 3); ++ __ stw(AT, A1, 0); ++ __ stw(A2, A1, 3); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 8: ++ __ ldl(AT, A0, 0); ++ __ stl(AT, A1, 0); ++ __ ret(); ++// if (!UseUnaligned) ++// return; ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 9: ++ __ ldl(AT, A0, 0); ++ __ ldbu(A2, A0, 8); ++ __ stl(AT, A1, 0); ++ __ stb(A2, A1, 8); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 10: ++ __ ldl(AT, A0, 0); ++ __ ldhu(A2, A0, 8); ++ __ stl(AT, A1, 0); ++ __ sth(A2, A1, 8); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 11: ++ __ ldl(AT, A0, 0); ++ __ ldw(A2, A0, 7); ++ __ stl(AT, A1, 0); ++ __ stw(A2, A1, 7); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 12: ++ __ ldl(AT, A0, 0); ++ __ ldw(A2, A0, 8); ++ __ stl(AT, A1, 0); ++ __ stw(A2, A1, 8); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 13: ++ __ ldl(AT, A0, 0); ++ __ ldl(A2, A0, 5); ++ __ stl(AT, A1, 0); ++ __ stl(A2, A1, 5); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 14: ++ __ ldl(AT, A0, 0); ++ __ ldl(A2, A0, 6); ++ __ stl(AT, A1, 0); ++ __ stl(A2, A1, 6); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 15: ++ __ ldl(AT, A0, 0); ++ __ ldl(A2, A0, 7); ++ __ stl(AT, A1, 0); ++ __ stl(A2, A1, 7); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 16: ++ __ ldl(AT, A0, 0); ++ __ ldl(A2, A0, 8); ++ __ stl(AT, A1, 0); ++ __ stl(A2, A1, 8); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ ++ // 17: ++ __ ldl(AT, A0, 0); ++ __ ldl(A2, A0, 8); ++ __ ldbu(A3, A0, 16); ++ __ stl(AT, A1, 0); ++ __ stl(A2, A1, 8); ++ __ stb(A3, A1, 16); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 18: ++ __ ldl(AT, A0, 0); ++ __ ldl(A2, A0, 8); ++ __ ldhu(A3, A0, 16); ++ __ stl(AT, A1, 0); ++ __ stl(A2, A1, 8); ++ __ sth(A3, A1, 16); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 19: ++ __ ldl(AT, A0, 0); ++ __ ldl(A2, A0, 8); ++ __ ldw(A3, A0, 15); ++ __ stl(AT, A1, 0); ++ __ stl(A2, A1, 8); ++ __ stw(A3, A1, 15); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 20: ++ __ ldl(AT, A0, 0); ++ __ ldl(A2, A0, 8); ++ __ ldw(A3, A0, 16); ++ __ stl(AT, A1, 0); ++ __ stl(A2, A1, 8); ++ __ stw(A3, A1, 16); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 21: ++ __ ldl(AT, A0, 0); ++ __ ldl(A2, A0, 8); ++ __ ldl(A3, A0, 13); ++ __ stl(AT, A1, 0); ++ __ stl(A2, A1, 8); ++ __ stl(A3, A1, 13); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 22: ++ __ ldl(AT, A0, 0); ++ __ ldl(A2, A0, 8); ++ __ ldl(A3, A0, 14); ++ __ stl(AT, A1, 0); ++ __ stl(A2, A1, 8); ++ __ stl(A3, A1, 14); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 23: ++ __ ldl(AT, A0, 0); ++ __ ldl(A2, A0, 8); ++ __ ldl(A3, A0, 15); ++ __ stl(AT, A1, 0); ++ __ stl(A2, A1, 8); ++ __ stl(A3, A1, 15); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 24: ++ __ ldl(AT, A0, 0); ++ __ ldl(A2, A0, 8); ++ __ ldl(A3, A0, 16); ++ __ stl(AT, A1, 0); ++ __ stl(A2, A1, 8); ++ __ stl(A3, A1, 16); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 25: ++ __ ldl(AT, A0, 0); ++ __ ldl(A2, A0, 8); ++ __ ldl(A3, A0, 16); ++ __ ldbu(A4, A0, 24); ++ __ stl(AT, A1, 0); ++ __ stl(A2, A1, 8); ++ __ stl(A3, A1, 16); ++ __ stb(A4, A1, 24); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 26: ++ __ ldl(AT, A0, 0); ++ __ ldl(A2, A0, 8); ++ __ ldl(A3, A0, 16); ++ __ ldhu(A4, A0, 24); ++ __ stl(AT, A1, 0); ++ __ stl(A2, A1, 8); ++ __ stl(A3, A1, 16); ++ __ sth(A4, A1, 24); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 27: ++ __ ldl(AT, A0, 0); ++ __ ldl(A2, A0, 8); ++ __ ldl(A3, A0, 16); ++ __ ldw(A4, A0, 23); ++ __ stl(AT, A1, 0); ++ __ stl(A2, A1, 8); ++ __ stl(A3, A1, 16); ++ __ stw(A4, A1, 23); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 28: ++ __ ldl(AT, A0, 0); ++ __ ldl(A2, A0, 8); ++ __ ldl(A3, A0, 16); ++ __ ldw(A4, A0, 24); ++ __ stl(AT, A1, 0); ++ __ stl(A2, A1, 8); ++ __ stl(A3, A1, 16); ++ __ stw(A4, A1, 24); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 29: ++ __ ldl(AT, A0, 0); ++ __ ldl(A2, A0, 8); ++ __ ldl(A3, A0, 16); ++ __ ldl(A4, A0, 21); ++ __ stl(AT, A1, 0); ++ __ stl(A2, A1, 8); ++ __ stl(A3, A1, 16); ++ __ stl(A4, A1, 21); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ // 30: ++ __ ldl(AT, A0, 0); ++ __ ldl(A2, A0, 8); ++ __ ldl(A3, A0, 16); ++ __ ldl(A4, A0, 22); ++ __ stl(AT, A1, 0); ++ __ stl(A2, A1, 8); ++ __ stl(A3, A1, 16); ++ __ stl(A4, A1, 22); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 31: ++ __ ldl(AT, A0, 0); ++ __ ldl(A2, A0, 8); ++ __ ldl(A3, A0, 16); ++ __ ldl(A4, A0, 23); ++ __ stl(AT, A1, 0); ++ __ stl(A2, A1, 8); ++ __ stl(A3, A1, 16); ++ __ stl(A4, A1, 23); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 32: ++ __ ldl(AT, A0, 0); ++ __ ldl(A2, A0, 8); ++ __ ldl(A3, A0, 16); ++ __ ldl(A4, A0, 24); ++ __ stl(AT, A1, 0); ++ __ stl(A2, A1, 8); ++ __ stl(A3, A1, 16); ++ __ stl(A4, A1, 24); ++ __ ret(); ++ ++ } ++ ++ // Short small copy: less than { int:9, lsx:9, lasx:17 } elements. ++ void generate_short_small_copy(Label &entry, const char *name) { ++ StubCodeMark mark(this, "StubRoutines", name); ++ __ align(CodeEntryAlignment); ++ Label L; ++ __ BIND(entry); ++ __ br(AT, 0); ++ __ addl(AT, AT, 16);//__ addpi(3, AT) ++ __ slll(A2, A2, 6); ++ __ addl(AT, AT, A2); ++ __ jmp(AT); ++ ++ __ BIND(L); ++ // 0: ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 1: ++ __ ldhu(AT, A0, 0); ++ __ sth(AT, A1, 0); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 2: ++ __ ldw(AT, A0, 0); ++ __ stw(AT, A1, 0); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 3: ++ __ ldw(AT, A0, 0); ++ __ ldhu(A2, A0, 4); ++ __ stw(AT, A1, 0); ++ __ sth(A2, A1, 4); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 4: ++ __ ldl(AT, A0, 0); ++ __ stl(AT, A1, 0); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 5: ++ __ ldl(AT, A0, 0); ++ __ ldhu(A2, A0, 8); ++ __ stl(AT, A1, 0); ++ __ sth(A2, A1, 8); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 6: ++ __ ldl(AT, A0, 0); ++ __ ldw(A2, A0, 8); ++ __ stl(AT, A1, 0); ++ __ stw(A2, A1, 8); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 7: ++ __ ldl(AT, A0, 0); ++ __ ldl(A2, A0, 6); ++ __ stl(AT, A1, 0); ++ __ stl(A2, A1, 6); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 8: ++ __ ldl(AT, A0, 0); ++ __ ldl(A2, A0, 8); ++ __ stl(AT, A1, 0); ++ __ stl(A2, A1, 8); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 9: ++ __ ldl(AT, A0, 0); ++ __ ldl(A2, A0, 8); ++ __ ldhu(A3, A0, 16); ++ __ stl(AT, A1, 0); ++ __ stl(A2, A1, 8); ++ __ sth(A3, A1, 16); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 10: ++ __ ldl(AT, A0, 0); ++ __ ldl(A2, A0, 8); ++ __ ldw(A3, A0, 16); ++ __ stl(AT, A1, 0); ++ __ stl(A2, A1, 8); ++ __ stw(A3, A1, 16); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 11: ++ __ ldl(AT, A0, 0); ++ __ ldl(A2, A0, 8); ++ __ ldl(A3, A0, 14); ++ __ stl(AT, A1, 0); ++ __ stl(A2, A1, 8); ++ __ stl(A3, A1, 14); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 12: ++ __ ldl(AT, A0, 0); ++ __ ldl(A2, A0, 8); ++ __ ldl(A3, A0, 16); ++ __ stl(AT, A1, 0); ++ __ stl(A2, A1, 8); ++ __ stl(A3, A1, 16); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 13: ++ __ ldl(AT, A0, 0); ++ __ ldl(A2, A0, 8); ++ __ ldl(A3, A0, 16); ++ __ ldhu(A4, A0, 24); ++ __ stl(AT, A1, 0); ++ __ stl(A2, A1, 8); ++ __ stl(A3, A1, 16); ++ __ sth(A4, A1, 24); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 14: ++ __ ldl(AT, A0, 0); ++ __ ldl(A2, A0, 8); ++ __ ldl(A3, A0, 16); ++ __ ldw(A4, A0, 24); ++ __ stl(AT, A1, 0); ++ __ stl(A2, A1, 8); ++ __ stl(A3, A1, 16); ++ __ stw(A4, A1, 24); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 15: ++ __ ldl(AT, A0, 0); ++ __ ldl(A2, A0, 8); ++ __ ldl(A3, A0, 16); ++ __ ldl(A4, A0, 22); ++ __ stl(AT, A1, 0); ++ __ stl(A2, A1, 8); ++ __ stl(A3, A1, 16); ++ __ stl(A4, A1, 22); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 16: ++ __ ldl(AT, A0, 0); ++ __ ldl(A2, A0, 8); ++ __ ldl(A3, A0, 16); ++ __ ldl(A4, A0, 24); ++ __ stl(AT, A1, 0); ++ __ stl(A2, A1, 8); ++ __ stl(A3, A1, 16); ++ __ stl(A4, A1, 24); ++ __ ret(); ++ } ++// Int small copy: less than { int:7, lsx:7, lasx:9 } elements. ++ void generate_int_small_copy(Label &entry, const char *name) { ++ StubCodeMark mark(this, "StubRoutines", name); ++ __ align(CodeEntryAlignment); ++ ++ Label L; ++ __ BIND(entry); ++ __ br(AT, 0); ++ __ addl(AT, AT, 16);//__ addpi(3, AT) ++ __ slll(A2, A2, 6); ++ __ addl(AT, AT, A2); ++ __ jmp(AT); ++ ++ ++ __ BIND(L); ++ // 0: ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 1: ++ __ ldw(AT, A0, 0); ++ __ stw(AT, A1, 0); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 2: ++ __ ldl(AT, A0, 0); ++ __ stl(AT, A1, 0); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 3: ++ __ ldl(AT, A0, 0); ++ __ ldw(A2, A0, 8); ++ __ stl(AT, A1, 0); ++ __ stw(A2, A1, 8); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 4: ++ __ ldl(AT, A0, 0); ++ __ ldl(A2, A0, 8); ++ __ stl(AT, A1, 0); ++ __ stl(A2, A1, 8); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 5: ++ __ ldl(AT, A0, 0); ++ __ ldl(A2, A0, 8); ++ __ ldw(A3, A0, 16); ++ __ stl(AT, A1, 0); ++ __ stl(A2, A1, 8); ++ __ stw(A3, A1, 16); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 6: ++ __ ldl(AT, A0, 0); ++ __ ldl(A2, A0, 8); ++ __ ldl(A3, A0, 16); ++ __ stl(AT, A1, 0); ++ __ stl(A2, A1, 8); ++ __ stl(A3, A1, 16); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 7: ++ __ ldl(AT, A0, 0); ++ __ ldl(A2, A0, 8); ++ __ ldl(A3, A0, 16); ++ __ ldw(A4, A0, 24); ++ __ stl(AT, A1, 0); ++ __ stl(A2, A1, 8); ++ __ stl(A3, A1, 16); ++ __ stw(A4, A1, 24); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 8: ++ __ vldd(F10, A0, 0); ++ __ vstd(F10, A1, 0); ++ __ ret(); ++ } ++ ++ // Long small copy: less than { int:4, lsx:4, lasx:5 } elements. ++ void generate_long_small_copy(Label &entry, const char *name) { ++ StubCodeMark mark(this, "StubRoutines", name); ++ __ align(CodeEntryAlignment); ++ ++// Label L; ++ __ BIND(entry); ++ __ br(AT, 0); ++ __ addl(AT, AT, 16);//__ addpi(3, AT) ++ __ slll(A2, A2, 5); ++ __ addl(AT, AT, A2); ++ __ jmp(AT); ++ ++// __ BIND(L); ++ // 0: ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 1: ++ __ ldl(AT, A0, 0); ++ __ stl(AT, A1, 0); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 2: ++ __ ldl(AT, A0, 0); ++ __ ldl(A2, A0, 8); ++ __ stl(AT, A1, 0); ++ __ stl(A2, A1, 8); ++ __ ret(); ++ __ nop(); ++ __ nop(); ++ __ nop(); ++ ++ // 3: ++ __ ldl(AT, A0, 0); ++ __ ldl(A2, A0, 8); ++ __ ldl(A3, A0, 16); ++ __ stl(AT, A1, 0); ++ __ stl(A2, A1, 8); ++ __ stl(A3, A1, 16); ++ __ ret(); ++ __ nop(); ++ ++ // 4: ++ __ vldd(F10, A0, 0); ++ __ vstd(F10, A1, 0); ++ __ ret(); ++ } ++ // Arguments: ++ // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary ++ // ignored ++ // name - stub name string ++ // ++ // Inputs: ++ // c_rarg0 - source array address ++ // c_rarg1 - destination array address ++ // c_rarg2 - element count, treated as ssize_t, can be zero ++ // ++ // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, ++ // we let the hardware handle it. The one to eight bytes within words, ++ // dwords or qwords that span cache line boundaries will still be loaded ++ // and stored atomically. ++ // ++ // Side Effects: ++ // disjoint_byte_copy_entry is set to the no-overlap entry point ++ // used by generate_conjoint_byte_copy(). ++ // ++ address generate_disjoint_byte_copy(bool aligned, Label &small, Label &large, ++ Label &large_aligned, const char * name) { ++ StubCodeMark mark(this, "StubRoutines", name); ++ __ align(CodeEntryAlignment); ++ address start = __ pc(); ++ ++ __ cmplt(T0, A2, 33); ++ __ bne(T0, small); ++ ++ generate_disjoint_large_copy_simd("StubRoutines generate_disjoint_byte_copy"); ++ __ ret(); ++ return start; ++ } ++ ++ address generate_disjoint_short_copy(bool aligned, Label &small, Label &large, ++ Label &large_aligned, const char * name) { ++ StubCodeMark mark(this, "StubRoutines", name); ++ __ align(CodeEntryAlignment); ++ address start = __ pc(); ++ ++ __ cmpult(T0, A2, 17); ++ __ bne(T0, small); ++ ++ __ slll(A2, A2, 1); ++ generate_disjoint_large_copy_simd("StubRoutines generate_disjoint_short_copy"); ++ __ ret(); ++ return start; ++ } ++// Arguments: ++ // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary ++ // ignored ++ // name - stub name string ++ // ++ // Inputs: ++ // A0 - source array address ++ // A1 - destination array address ++ // A2 - element count, treated as ssize_t, can be zero ++ // ++ // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, ++ // we let the hardware handle it. The one to eight bytes within words, ++ // dwords or qwords that span cache line boundaries will still be loaded ++ // and stored atomically. ++ // ++ address generate_conjoint_byte_copy(bool aligned, Label &small, Label &large, ++ Label &large_aligned, const char *name) { ++ StubCodeMark mark(this, "StubRoutines", name); ++ __ align(CodeEntryAlignment); ++ address start = __ pc(); ++ ++ array_overlap_test2(StubRoutines::jbyte_disjoint_arraycopy(), 0); ++ ++ ++ __ cmpult(T0, A2, 33); ++ __ bne(T0, small); ++ ++ generate_conjoint_large_copy_simd("StubRoutines generate_disjoint_short_copy"); ++ __ ret(); ++ ++ return start; ++ } ++ // Arguments: ++ // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary ++ // ignored ++ // name - stub name string ++ // ++ // Inputs: ++ // A0 - source array address ++ // A1 - destination array address ++ // A2 - element count, treated as ssize_t, can be zero ++ // ++ // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, ++ // we let the hardware handle it. The one to eight bytes within words, ++ // dwords or qwords that span cache line boundaries will still be loaded ++ // and stored atomically. ++ // ++ // Side Effects: ++ // disjoint_short_copy_entry is set to the no-overlap entry point ++ // used by generate_conjoint_short_copy(). ++ // ++ address generate_conjoint_short_copy(bool aligned, Label &small, Label &large, ++ Label &large_aligned, const char * name) { ++ StubCodeMark mark(this, "StubRoutines", name); ++ __ align(CodeEntryAlignment); ++ address start = __ pc(); ++ array_overlap_test2(StubRoutines::jshort_disjoint_arraycopy(), 1); ++ __ cmpult(T0, A2, 17); ++ __ bne(T0, small); ++ ++ __ slll(A2, A2, 1); ++ generate_conjoint_large_copy_simd("StubRoutines generate_conjoint_short_copy"); ++ __ ret(); ++ return start; ++ } ++ ++ // Arguments: ++ // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary ++ // ignored ++ // name - stub name string ++ // ++ // Inputs: ++ // c_rarg0 - source array address ++ // c_rarg1 - destination array address ++ // c_rarg2 - element count, treated as ssize_t, can be zero ++ // ++ // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, ++ // we let the hardware handle it. The one to eight bytes within words, ++ // dwords or qwords that span cache line boundaries will still be loaded ++ // and stored atomically. ++ // ++ // Side Effects: ++ // disjoint_byte_copy_entry is set to the no-overlap entry point ++ // used by generate_conjoint_byte_copy(). ++ // ++ address generate_disjoint_byte_copy(bool aligned, const char * name) { ++ StubCodeMark mark(this, "StubRoutines", name); ++ __ align(CodeEntryAlignment); ++ ++ Register src = T0; ++ Register dst = T1; ++ Register count = T3; ++ Register tmp1 = AT; ++ Register tmp2 = GP; ++ ++ address start = __ pc(); ++ ++ __ move(src, A0); ++ __ move(dst, A1); ++ __ move(count, A2); ++ ++ ++ Label l_align_dst, l_align_src, l_tail_bytes, l_end, l_tail; ++ ++ if(UseSimdForward){ ++ __ beq(count, l_end); ++ ++ __ cmple(tmp1, count, 63); ++ __ bne(tmp1, l_tail_bytes); //when count <= 63, don't use simd ++ ++ __ BIND(l_align_dst); ++ __ and_imm8(tmp1, dst, 31); //is dst 0mod32? ++ __ beq(tmp1, l_align_src); ++ ++ __ ldbu(tmp1, src, 0); //grab 1 byte at a time, until dst is 0mod32 ++ __ stb(tmp1, dst, 0); ++ __ subl(count, count, 1); ++ __ addl(dst, dst, 1); ++ __ addl(src, src, 1); ++ __ beq(R0, l_align_dst); ++ ++ __ BIND(l_align_src); ++ copy_core_forward(32, src, dst, count, tmp1, tmp2); ++ ++ __ BIND(l_tail); ++ __ ble(count, l_end); ++ ++ //copy tail bytes. ++ __ BIND(l_tail_bytes); ++ __ ldbu(tmp1, src, 0); ++ __ stb(tmp1, dst, 0); ++ __ addl(src, src, 1); ++ __ addl(dst, dst, 1); ++ __ subl(count, count, 1); ++ __ bne(count, l_tail_bytes); ++ ++ __ BIND(l_end); ++ ++ }else{ ++ generate_disjoint_copy(0, src, dst, count); ++ } ++ __ ret(); ++ ++ return start; ++} ++ ++ ++ void generate_disjoint_copy(int widthInByte, Register src, Register dst, Register count) { ++ // Label lblMissAlignInByte, lblMissAlignInShort, lblMissAlignInWord, lblMissAlignInLong; ++ Label lblMissAlign[4]; ++ // Label lblSkipByte, lblSkipInShort, lblSkipInWord, lblSkipInLong; ++ Label lblSkip[4]; ++ // Label lblCopyByte, lblCopyShort, lblCopyWord, lblCopyLong; ++ Label lblCopy[4]; ++ ++ ++// __ subl(count, 9, AT); //why the number is 9 ? ++ if (widthInByte == 0) {__ subl(AT, count, 9); __ ble(AT, lblMissAlign[1]);} ++ if (widthInByte == 1) {__ subl(AT, count, 9); __ ble(AT, lblMissAlign[2]);} ++ ++ for (int i = widthInByte; i < 3; i++) { ++ __ xor_ins(AT, src, dst); ++ __ and_imm8(AT, AT, 1 << i); // if the backward ith bit of src and and dst is the same ++ __ bne(AT, lblMissAlign[i+1]); // if arrays don't have the same alignment, ... ++ ++ __ and_imm8(AT, src, 1 << i); ++ __ beq(AT, lblSkip[i]); // have same alignment but extra byte/short/int ++ ++ __ load(i, AT, 0, src); ++ __ store(i, AT, 0, dst); ++ __ addl(src, src, 1 << i); ++ __ addl(dst, dst, 1 << i); ++ __ subl(count, count, 1 << i); ++ ++ __ BIND(lblSkip[i]); ++ } ++ ++ for (int i = 3; i >= widthInByte; i--) { // FasterArrayCopy ++ if(i == widthInByte){ ++ __ beq(count, lblMissAlign[i]); ++ }else{ ++ __ cmplt(AT, count, 1 << i); ++ __ bne(AT, lblMissAlign[i]); ++ } ++ __ BIND(lblCopy[i]); ++ __ load(i, AT, 0, src); ++ __ store(i, AT, 0, dst); ++ __ addl(src, src, 1 << i); ++ __ addl(dst, dst, 1 << i); ++ __ subl(count, count, 1 << i); ++ if(i == widthInByte){ ++ __ bne(count, lblCopy[i]); ++ }else{ ++ __ subl(AT, count, 1 << i); ++ __ bge(AT, lblCopy[i]); ++ } ++ __ BIND(lblMissAlign[i]); ++ } ++ } ++ void generate_conjoint_copy(int widthInByte,Register src, Register dst, Register count) { ++ // Label lblMissAlignInByte, lblMissAlignInShort, lblMissAlignInWord, lblMissAlignInLong; ++ Label lblMissAlign[4]; ++ // Label lblSkipByte, lblSkipInShort, lblSkipInWord, lblSkipInLong; ++ Label lblSkip[4]; ++ // Label lblCopyByte, lblCopyShort, lblCopyWord, lblCopyLong; ++ Label lblCopy[4]; ++ ++ if (widthInByte == 0) {__ subl(AT, count, 9); __ ble(AT, lblMissAlign[1]);} ++ if (widthInByte == 1) {__ subl(AT, count, 9); __ ble(AT, lblMissAlign[2]);} ++ ++ for (int i = widthInByte; i < 3; i++) { ++ __ xor_ins(AT, src, dst); ++ __ and_imm8(AT, AT, 1 << i); // if the backward ith bit of src and and dst is the same ++ __ bne(AT, lblMissAlign[i+1]); // if arrays don't have the same alignment, ... ++ ++ __ and_imm8(AT, src, 1 << i); ++ __ beq(AT, lblSkip[i]); // have same alignment but extra byte/short/int ++ ++ __ subl(src, src, 1 << i); ++ __ subl(dst, dst, 1 << i); ++ __ load(i, AT, 0, src); ++ __ store(i, AT, 0, dst); ++ __ subl(count, count, 1 << i); ++ ++ __ BIND(lblSkip[i]); ++ } ++ ++ for (int i = 3; i >= widthInByte; i--) { // FasterArrayCopy ++ if(i == widthInByte){ ++ __ beq(count, lblMissAlign[i]); ++ }else{ ++ __ cmplt(AT, count, 1 << i); ++ __ bne(AT, lblMissAlign[i]); ++ } ++ ++ __ BIND(lblCopy[i]); ++ __ subl(src, src, 1 << i); ++ __ subl(dst, dst, 1 << i); ++ __ load(i, AT, 0, src); ++ __ store(i, AT, 0, dst); ++ __ subl(count, count, 1 << i); ++ if(i == widthInByte){ ++ __ bne(count, lblCopy[i]); ++ }else{ ++ __ subl(AT, count, 1 << i); ++ __ bge(AT, lblCopy[i]); ++ } ++ __ BIND(lblMissAlign[i]); ++ } ++} ++ ++ // Arguments: ++ // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary ++ // ignored ++ // name - stub name string ++ // ++ // Inputs: ++ // A0 - source array address ++ // A1 - destination array address ++ // A2 - element count, treated as ssize_t, can be zero ++ // ++ // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, ++ // we let the hardware handle it. The one to eight bytes within words, ++ // dwords or qwords that span cache line boundaries will still be loaded ++ // and stored atomically. ++ // ++ address generate_conjoint_byte_copy(bool aligned, address *entry, const char *name) { ++ __ align(CodeEntryAlignment); ++ StubCodeMark mark(this, "StubRoutines", name); ++ ++ address start = __ pc(); ++ ++ Label l_exit; ++ Label l_copy_byte; ++ Label l_align_dst, l_align_src, l_tail_bytes, l_end, l_tail; ++ ++ if (entry != NULL) { ++ *entry = start; ++ // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) ++ BLOCK_COMMENT("Entry:"); ++ } ++ ++ address nooverlap_target = aligned ? ++ StubRoutines::arrayof_jbyte_disjoint_arraycopy() : ++ StubRoutines::jbyte_disjoint_arraycopy(); ++ ++ array_overlap_test(nooverlap_target, 0); ++ ++ Register src = A0; // source array address ++ Register dst = A1; // destination array address ++ Register count = A2; // elements count ++ Register end_src = T3; // source array end address ++ Register end_dst = T0; // destination array end address ++ Register end_count = T1; // destination array end address ++ Register tmp1 = AT; ++ Register tmp2 = GP; ++ ++ ++ // copy from high to low ++ __ move(end_count, count); ++ __ addl(end_src, src, end_count); ++ __ addl(end_dst, dst, end_count); ++ ++ if(UseSimdBackward){ ++ ++ __ beq(count, l_end); ++ ++ __ cmple(tmp1, count, 63); ++ __ bne(tmp1, l_tail_bytes); //when count <= 63, don't use simd ++ ++ __ BIND(l_align_dst); ++ __ and_imm8(tmp1, end_dst, 31); //is dst 0mod32? ++ __ beq(tmp1, l_align_src); ++ ++ __ ldbu(tmp2, end_src, -1); //grab 1 bytes at a time, until dst is 0mod32 ++ __ stb(tmp2, end_dst, -1); ++ __ subl(count, count, 1); ++ __ subl(end_dst, end_dst, 1); ++ __ subl(end_src, end_src, 1); ++ __ beq(R0, l_align_dst); ++ ++ __ BIND(l_align_src); ++ copy_core_backward(32, end_src, end_dst, count, tmp1, tmp2); ++ ++ __ BIND(l_tail); ++ __ ble(count, l_end); ++ ++ __ BIND(l_tail_bytes); ++ __ ldbu(tmp1, end_src, -1); ++ __ stb(tmp1, end_dst, -1); ++ __ subl(end_src, end_src, 1); ++ __ subl(end_dst, end_dst, 1); ++ __ subl(count, count, 1); ++ __ bne(count, l_tail_bytes); ++ ++ __ BIND(l_end); ++ ++ } else { ++ generate_conjoint_copy(0, end_src, end_dst, end_count); ++ } ++ __ ret(); ++ return start; ++ } ++ // ++ // Generate 'unsafe' array copy stub ++ // Though just as safe as the other stubs, it takes an unscaled ++ // size_t argument instead of an element count. ++ // ++ // Input: ++ // c_rarg0 - source array address ++ // c_rarg1 - destination array address ++ // c_rarg2 - byte count, treated as ssize_t, can be zero ++ // ++ // Examines the alignment of the operands and dispatches ++ // to a long, int, short, or byte copy loop. ++ // ++ address generate_unsafe_copy(const char *name, ++ address byte_copy_entry, ++ address short_copy_entry, ++ address int_copy_entry, ++ address long_copy_entry) { ++ Label L_long_aligned, L_int_aligned, L_short_aligned; ++ Register s = A0, d = A1, count = A2; ++ ++ __ align(CodeEntryAlignment); ++ StubCodeMark mark(this, "StubRoutines", name); ++ address start = __ pc(); ++ //__ enter(); // required for proper stackwalking of RuntimeStub frame ++ ++ // bump this on entry, not on exit: ++ // inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); ++ ++ __ bis(AT, s, d); ++ __ bis(AT, AT, count); ++ if (PrefetchUnsafeCopyInBytes > 0) ++ __ fillcs(s, PrefetchUnsafeCopyInBytes); ++ ++ __ and_imm8(AT, AT, BytesPerLong - 1); ++ __ beq(AT, L_long_aligned); ++ __ and_imm8(AT, AT, BytesPerInt - 1); ++ __ beq(AT, L_int_aligned); ++ __ and_imm8(AT, AT, BytesPerShort - 1); ++ __ beq(AT, L_short_aligned); ++ __ beq_a(R0, byte_copy_entry); ++ ++ __ BIND(L_short_aligned); ++ __ srll(count, count, LogBytesPerShort); // size => short_count ++ __ beq_a(R0, short_copy_entry); ++ __ BIND(L_int_aligned); ++ __ srll(count, count, LogBytesPerInt); // size => int_count ++ __ beq_a(R0, int_copy_entry); ++ __ BIND(L_long_aligned); ++ __ srll(count, count, LogBytesPerLong); // size => long_count ++ __ beq_a(R0, long_copy_entry); ++ ++ return start; ++ } ++ // ++ // Generate 'unsafe' array copy stub ++ // Though just as safe as the other stubs, it takes an unscaled ++ // size_t argument instead of an element count. ++ // ++ // Input: ++ // A0 - source array address ++ // A1 - destination array address ++ // A2 - byte count, treated as ssize_t, can be zero ++ // ++ // Examines the alignment of the operands and dispatches ++ // to a long, int, short, or byte copy loop. ++ // ++ address generate_unsafe_copy(const char *name) { ++ Label L_long_aligned, L_int_aligned, L_short_aligned; ++ Register s = A0, d = A1, count = A2; ++ ++ __ align(CodeEntryAlignment); ++ StubCodeMark mark(this, "StubRoutines", name); ++ address start = __ pc(); ++ ++ __ bis(AT, s, d); ++ __ bis(AT, AT, count); ++ ++ __ and_imm8(AT, AT, BytesPerLong-1); ++ __ beq(AT, L_long_aligned); ++ __ and_imm8(AT, AT, BytesPerInt-1); ++ __ beq(AT, L_int_aligned); ++ __ and_imm8(AT, AT, BytesPerShort-1); ++ __ beq(AT, L_short_aligned); ++ __ beq_a(R0, StubRoutines::_jbyte_arraycopy); ++ ++ __ bind(L_short_aligned); ++ __ srll(count, count, LogBytesPerShort); // size => short_count ++ __ beq_a(R0, StubRoutines::_jshort_arraycopy); ++ __ bind(L_int_aligned); ++ __ srll(count, count, LogBytesPerInt); // size => int_count ++ __ beq_a(R0, StubRoutines::_jint_arraycopy); ++ __ bind(L_long_aligned); ++ __ srll(count, count, LogBytesPerLong); // size => long_count ++ __ beq_a(R0, StubRoutines::_jlong_arraycopy); ++ ++ return start; ++ } ++ // Generate stub for disjoint short copy. If "aligned" is true, the ++ // "from" and "to" addresses are assumed to be heapword aligned. ++ // ++ // Arguments for generated stub: ++ // from: A0 ++ // to: A1 ++ // elm.count: A2 treated as signed ++ // one element: 2 bytes ++ // ++ // Strategy for aligned==true: ++ // ++ // If length <= 9: ++ // 1. copy 1 elements at a time (l_5) ++ // ++ // If length > 9: ++ // 1. copy 4 elements at a time until less than 4 elements are left (l_7) ++ // 2. copy 2 elements at a time until less than 2 elements are left (l_6) ++ // 3. copy last element if one was left in step 2. (l_1) ++ // ++ // ++ // Strategy for aligned==false: ++ // ++ // If length <= 9: same as aligned==true case ++ // ++ // If length > 9: ++ // 1. continue with step 7. if the alignment of from and to mod 4 ++ // is different. ++ // 2. align from and to to 4 bytes by copying 1 element if necessary ++ // 3. at l_2 from and to are 4 byte aligned; continue with ++ // 6. if they cannot be aligned to 8 bytes because they have ++ // got different alignment mod 8. ++ // 4. at this point we know that both, from and to, have the same ++ // alignment mod 8, now copy one element if necessary to get ++ // 8 byte alignment of from and to. ++ // 5. copy 4 elements at a time until less than 4 elements are ++ // left; depending on step 3. all load/stores are aligned. ++ // 6. copy 2 elements at a time until less than 2 elements are ++ // left. (l_6) ++ // 7. copy 1 element at a time. (l_5) ++ // 8. copy last element if one was left in step 6. (l_1) ++ ++ address generate_disjoint_short_copy(bool aligned, const char * name) { ++ StubCodeMark mark(this, "StubRoutines", name); ++ __ align(CodeEntryAlignment); ++ ++ Register src = T0; ++ Register dst = T1; ++ Register count = T3; ++ Register tmp1 = GP; ++ Register tmp2 = AT; ++ ++ Register tmp4 = T11; ++ Register tmp5 = T12; ++ Register tmp6 = T2; ++ ++ address start = __ pc(); ++ ++ __ move(src, A0); ++ __ move(dst, A1); ++ __ move(count, A2); ++ ++ Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8, l_9, l_10, l_11, l_12, l_13, l_14; ++ Label l_debug; ++ Label l_align_dst, l_align_src, l_tail_2_bytes, l_end, l_tail; ++ // don't try anything fancy if arrays don't have many elements ++ ++ if(UseSimdForward){ ++ ++ __ cmple(tmp1, count, 31); //if count < 32(bytes < 64), then copy 2 bytes at a time ++ __ bne(tmp1, l_tail); ++ ++ __ BIND(l_align_dst); ++ __ and_imm8(tmp1, dst, 31); ++ __ beq(tmp1, l_align_src); ++ ++ __ ldhu(tmp2, src, 0); ++ __ subl(count, count, 1); ++ __ sth(tmp2, dst, 0); ++ __ addl(src, src, 2); ++ __ addl(dst, dst, 2); ++ __ beq(R0, l_align_dst); ++ ++ __ BIND(l_align_src); ++ copy_core_forward(16, src, dst, count, tmp1, tmp2); ++ ++ __ BIND(l_tail); ++ __ ble(count, l_end); ++ ++ __ BIND(l_tail_2_bytes); ++ __ ldhu(tmp1, src, 0); ++ __ sth(tmp1, dst, 0); ++ __ addl(src, src, 2); ++ __ addl(dst, dst, 2); ++ __ subl(count, count, 1); ++ __ bne(count, R0, l_tail_2_bytes); ++ ++ ++ __ BIND(l_end); ++ ++ } else { ++ __ slll(count, count, 1); ++ generate_disjoint_copy(1, src, dst, count); ++ } ++ __ ret(); ++ ++ __ stop("generate_disjoint_short_copy should not reach here"); ++ return start; ++ } ++ ++ // Arguments: ++ // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary ++ // ignored ++ // name - stub name string ++ // ++ // Inputs: ++ // c_rarg0 - source array address ++ // c_rarg1 - destination array address ++ // c_rarg2 - element count, treated as ssize_t, can be zero ++ // ++ // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we ++ // let the hardware handle it. The two or four words within dwords ++ // or qwords that span cache line boundaries will still be loaded ++ // and stored atomically. ++ // ++ address generate_conjoint_short_copy(bool aligned, address *entry, const char *name) { ++ Label l_tail_2_bytes, l_align_dst, l_align_src, l_tail, l_end, l_exit, l_copy_2_bytes; ++ StubCodeMark mark(this, "StubRoutines", name); ++ __ align(CodeEntryAlignment); ++ address start = __ pc(); ++ address nooverlap_target = aligned ? ++ StubRoutines::arrayof_jshort_disjoint_arraycopy() : ++ StubRoutines::jshort_disjoint_arraycopy(); ++ ++ array_overlap_test(nooverlap_target, 1); ++ ++ if (entry != NULL) { ++ *entry = start; ++ // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) ++ BLOCK_COMMENT("Entry:"); ++ } ++ ++ Register end_src = T3; ++ Register end_dst = T0; ++ Register count = T1; ++ Register tmp1 = AT; ++ Register tmp2 = GP; ++ ++ ++ __ move(T1, A2); ++ __ move(T3, A0); ++ __ move(T0, A1); ++ ++ if(UseSimdBackward){ ++ ++ __ beq(count, R0, l_end); ++ ++ __ sllw_signed(tmp1, T1, Address::times_2); ++ __ addl(end_src, T3, tmp1); ++ __ addl(end_dst, T0, tmp1); ++ ++ __ cmple(tmp1, count, 31); ++ __ bne(tmp1, l_tail_2_bytes); //when count <= 31, don't use simd ++ ++ __ BIND(l_align_dst); ++ __ and_imm8(tmp1, end_dst, 31); //is dst 0mod32? ++ __ beq(tmp1, l_align_src); ++ ++ __ ldhu(tmp2, end_src, -2); //grab 2 bytes at a time, until dst is 0mod32 ++ __ sth(tmp2, end_dst, -2); ++ __ subl(count, count, 1); ++ __ subl(end_dst, end_dst, 2); ++ __ subl(end_src, end_src, 2); ++ __ beq(R0, l_align_dst); ++ ++ __ BIND(l_align_src); ++ copy_core_backward(16, end_src, end_dst, count, tmp1, tmp2); ++ ++ __ BIND(l_tail); ++ __ ble(count, l_end); ++ ++ __ BIND(l_tail_2_bytes); ++ __ ldhu(tmp1, end_src, -2); ++ __ sth(tmp1, end_dst, -2); ++ __ subl(end_src, end_src, 2); ++ __ subl(end_dst, end_dst, 2); ++ __ subl(count, count, 1); ++ __ bne(count, R0, l_tail_2_bytes); ++ ++ __ BIND(l_end); ++ ++ }else{ ++ __ slll(count, count, 1); ++ __ addl(end_src, T3, count); ++ __ addl(end_dst, T0, count); ++ generate_conjoint_copy(1, end_src, end_dst, count); ++ } ++ __ ret(); ++ return start; ++ } ++ ++ // Arguments: ++ // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary ++ // ignored ++ // is_oop - true => oop array, so generate store check code ++ // name - stub name string ++ // ++ // Inputs: ++ // c_rarg0 - source array address ++ // c_rarg1 - destination array address ++ // c_rarg2 - element count, treated as ssize_t, can be zero ++ // ++ // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let ++ // the hardware handle it. The two dwords within qwords that span ++ // cache line boundaries will still be loaded and stored atomicly. ++ // ++ // Side Effects: ++ // disjoint_int_copy_entry is set to the no-overlap entry point ++ // used by generate_conjoint_int_oop_copy(). ++ // ++ address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, const char *name, bool dest_uninitialized = false) { ++ Label l_tail_4_bytes, l_align_dst, l_align_src, l_align_simd, l_misalign, l_misalign_simd, l_tail, l_before_tail, l_end; ++ StubCodeMark mark(this, "StubRoutines", name); ++ ++ Register src = T3; ++ Register dst = T0; ++ Register count = T1; ++ Register tmp1 = AT; ++ Register tmp2 = GP; ++ __ align(CodeEntryAlignment); ++ address start = __ pc(); ++ __ move(count, A2); ++ __ move(src, A0); ++ __ move(dst, A1); ++ ++ if (is_oop) { ++ gen_write_ref_array_pre_barrier(A1, A2, dest_uninitialized); ++ } ++ ++ if(UseSimdForward){ ++ ++ __ cmple(tmp1, count, 15); ++ __ bne(tmp1, l_tail); ++ ++ __ BIND(l_align_dst); ++ __ and_imm8(tmp1, dst, 31); ++ __ beq(tmp1, l_align_src); ++ ++ __ ldw(tmp1, src, 0); ++ __ subl(count, count, 1); ++ __ stw(tmp1, dst, 0); ++ __ addl(src, src, 4); ++ __ addl(dst, dst, 4); ++ __ beq(R0, l_align_dst); ++ ++ __ BIND(l_align_src); ++ copy_core_forward(8, src, dst, count, tmp1, tmp2); ++ ++ __ BIND(l_tail); ++ __ ble(count, l_end); ++ ++ __ BIND(l_tail_4_bytes); ++ __ ldw(tmp2, src, 0); ++ __ stw(tmp2, dst, 0); ++ __ addl(src, src, 4); ++ __ addl(dst, dst, 4); ++ __ subl(count, count, 1); ++ __ bne(count, R0, l_tail_4_bytes); ++ ++ ++ __ BIND(l_end); ++ ++ } else { ++ __ slll(count, count, 2); ++ generate_disjoint_copy(2, src, dst, count); ++ } ++ if (is_oop) { ++ gen_write_ref_array_post_barrier(A1, A2, T1); ++ } ++ __ ret(); ++ return start; ++ } ++ ++ // Arguments: ++ // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary ++ // ignored ++ // is_oop - true => oop array, so generate store check code ++ // name - stub name string ++ // ++ // Inputs: ++ // c_rarg0 - source array address ++ // c_rarg1 - destination array address ++ // c_rarg2 - element count, treated as ssize_t, can be zero ++ // ++ // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let ++ // the hardware handle it. The two dwords within qwords that span ++ // cache line boundaries will still be loaded and stored atomicly. ++ // ++ address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, address *entry, const char *name, bool dest_uninitialized = false) { ++ Label l_2, l_4; ++ Label l_tail_4_bytes, l_align_dst, l_align_src, l_tail, l_end; ++ StubCodeMark mark(this, "StubRoutines", name); ++ __ align(CodeEntryAlignment); ++ address start = __ pc(); ++ address nooverlap_target; ++ ++ if (entry != NULL) { ++ *entry = start; ++ // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) ++ BLOCK_COMMENT("Entry:"); ++ } ++ ++ if (is_oop) { ++ nooverlap_target = aligned ? ++ StubRoutines::arrayof_oop_disjoint_arraycopy() : ++ StubRoutines::oop_disjoint_arraycopy(); ++ }else { ++ nooverlap_target = aligned ? ++ StubRoutines::arrayof_jint_disjoint_arraycopy() : ++ StubRoutines::jint_disjoint_arraycopy(); ++ } ++ ++ array_overlap_test(nooverlap_target, 2); ++ ++ Register end_src = T3; ++ Register end_dst = T0; ++ Register count = T1; ++ Register tmp1 = AT; ++ Register tmp2 = GP; ++ ++ if (is_oop) { ++ gen_write_ref_array_pre_barrier(A1, A2, dest_uninitialized); ++ } ++ ++ ++ __ move(T1, A2); ++ __ move(T3, A0); ++ __ move(T0, A1); ++ ++ // T3: source array address ++ // T0: destination array address ++ // T1: element count ++ ++ if(UseSimdBackward){ ++ ++ __ beq(count, R0, l_end); ++ ++ __ sllw_signed(tmp1, T1, Address::times_4); ++ __ addl(end_src, T3, tmp1); ++ __ addl(end_dst, T0, tmp1); ++ ++ __ cmple(tmp1, count, 15); ++ __ bne(tmp1, l_tail_4_bytes); //when count <= 15, don't use simd ++ ++ __ BIND(l_align_dst); ++ __ and_imm8(tmp1, end_dst, 31); //is dst 0mod32? ++ __ beq(tmp1, l_align_src); ++ ++ __ ldw(tmp1, end_src, -4); //grab 4 bytes at a time, until dst is 0mod32 ++ __ stw(tmp1, end_dst, -4); ++ __ subl(count, count, 1); ++ __ subl(end_dst, end_dst, 4); ++ __ subl(end_src, end_src, 4); ++ __ beq(R0, l_align_dst); ++ ++ __ BIND(l_align_src); ++ copy_core_backward(8, end_src, end_dst, count, tmp1, tmp2); ++ ++ __ BIND(l_tail); ++ __ ble(count, l_end); ++ ++ __ BIND(l_tail_4_bytes); ++ __ ldw(tmp1, end_src, -4); ++ __ stw(tmp1, end_dst, -4); ++ __ subl(end_src, end_src, 4); ++ __ subl(end_dst, end_dst, 4); ++ __ subl(count, count, 1); ++ __ bne(count, R0, l_tail_4_bytes); ++ ++ __ BIND(l_end); ++ ++ }else{ ++ __ slll(count, count, 2); ++ __ addl(end_src, T3, count); ++ __ addl(end_dst, T0, count); ++ generate_conjoint_copy(2, end_src, end_dst, count); ++ } ++ ++ if (is_oop) { ++ gen_write_ref_array_post_barrier(A1, A2, T1); ++ } ++ __ ret(); ++ return start; ++ } ++ ++ // Arguments: ++ // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary ++ // ignored ++ // is_oop - true => oop array, so generate store check code ++ // name - stub name string ++ // ++ // Inputs: ++ // c_rarg0 - source array address ++ // c_rarg1 - destination array address ++ // c_rarg2 - element count, treated as ssize_t, can be zero ++ // ++ // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let ++ // the hardware handle it. The two dwords within qwords that span ++ // cache line boundaries will still be loaded and stored atomicly. ++ // ++ // Side Effects: ++ // disjoint_int_copy_entry is set to the no-overlap entry point ++ // used by generate_conjoint_int_oop_copy(). ++ // ++ address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, const char *name, bool dest_uninitialized = false) { ++ Label l_3, l_4; ++ Label l_tail_8_bytes, l_align_dst, l_align_src, l_tail, l_end; ++ ++ Register src = T3; ++ Register dst = T0; ++ Register count = T1; ++ Register tmp1 = AT; ++ Register tmp2 = GP; ++ ++ StubCodeMark mark(this, "StubRoutines", name); ++ __ align(CodeEntryAlignment); ++ address start = __ pc(); ++ ++ if (is_oop) { ++ gen_write_ref_array_pre_barrier(A1, A2, dest_uninitialized); ++ } ++ ++ ++ __ move(T1, A2); ++ __ move(T3, A0); ++ __ move(T0, A1); ++ ++ // T3: source array address ++ // T0: destination array address ++ // T1: element count ++ if(UseSimdForward){ ++ __ align(16); ++ __ beq(count, R0, l_end); ++ ++ __ cmple(tmp1, count, 7); ++ __ bne(tmp1, l_tail_8_bytes); //when count <= 7, don't use simd ++ ++ __ BIND(l_align_dst); ++ __ and_imm8(tmp1, dst, 31); //is dst 0mod32? ++ __ beq(tmp1, l_align_src); ++ ++ __ ldl(tmp1, src, 0); //grab 8 bytes at a time, until dst is 0mod32 ++ __ stl(tmp1, dst, 0); ++ __ subl(count, count, 1); ++ __ addl(dst, dst, 8); ++ __ addl(src, src, 8); ++ __ beq(R0, l_align_dst); ++ ++ __ BIND(l_align_src); ++ copy_core_forward(4, src, dst, count, tmp1, tmp2); ++ ++ __ BIND(l_tail); ++ __ ble(count, l_end); ++ ++ __ BIND(l_tail_8_bytes); ++ __ ldl(tmp1, src, 0); ++ __ stl(tmp1, dst, 0); ++ __ addl(src, src, 8); ++ __ addl(dst, dst, 8); ++ __ subl(count, count, 1); ++ __ bne(count, R0, l_tail_8_bytes); ++ ++ __ BIND(l_end); ++ ++ }else{ ++ __ slll(count, count, 3); ++ generate_disjoint_copy(3, src, dst, count); ++ } ++ if (is_oop) { ++ gen_write_ref_array_post_barrier(A1, A2, T1); ++ } ++ __ ret(); ++ return start; ++ } ++ ++ // Arguments: ++ // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary ++ // ignored ++ // is_oop - true => oop array, so generate store check code ++ // name - stub name string ++ // ++ // Inputs: ++ // c_rarg0 - source array address ++ // c_rarg1 - destination array address ++ // c_rarg2 - element count, treated as ssize_t, can be zero ++ // ++ // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let ++ // the hardware handle it. The two dwords within qwords that span ++ // cache line boundaries will still be loaded and stored atomicly. ++ // ++ address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, const char *name, bool dest_uninitialized = false) { ++ Label l_2, l_4; ++ Label l_tail_8_bytes, l_align_dst, l_align_src, l_tail, l_end; ++ StubCodeMark mark(this, "StubRoutines", name); ++ __ align(CodeEntryAlignment); ++ address start = __ pc(); ++ address nooverlap_target; ++ ++ if (is_oop) { ++ nooverlap_target = aligned ? ++ StubRoutines::arrayof_oop_disjoint_arraycopy() : ++ StubRoutines::oop_disjoint_arraycopy(); ++ }else { ++ nooverlap_target = aligned ? ++ StubRoutines::arrayof_jlong_disjoint_arraycopy() : ++ StubRoutines::jlong_disjoint_arraycopy(); ++ } ++ ++ array_overlap_test(nooverlap_target, 3); ++ ++ Register end_src = T3; ++ Register end_dst = T0; ++ Register count = T1; ++ Register tmp1 = AT; ++ Register tmp2 = GP; ++ ++ if (is_oop) { ++ gen_write_ref_array_pre_barrier(A1, A2, dest_uninitialized); ++ } ++ ++ ++ __ move(T1, A2); ++ __ move(T3, A0); ++ __ move(T0, A1); ++ ++ if(UseSimdLongOop){ ++ __ align(16); ++ __ beq(count, R0, l_end); ++ ++ __ sllw_signed(tmp1, T1, Address::times_8); ++ __ addl(end_src, T3, tmp1); ++ __ addl(end_dst, T0, tmp1); ++ ++ __ cmple(tmp1, count, 7); ++ __ bne(tmp1, l_tail_8_bytes); //when count <= 7, don't use simd ++ ++ __ BIND(l_align_dst); ++ __ and_imm8(tmp1, end_dst, 31); //is dst 0mod32? ++ __ beq(tmp1, l_align_src); ++ ++ __ ldl(tmp1, end_src, -8); //grab 8 bytes at a time, until dst is 0mod32 ++ __ stl(tmp1, end_dst, -8); ++ __ subl(count, count, 1); ++ __ subl(end_dst, end_dst, 8); ++ __ subl(end_src, end_src, 8); ++ __ beq(R0, l_align_dst); ++ ++ __ BIND(l_align_src); ++ copy_core_backward(4, end_src, end_dst, count, tmp1, tmp2); ++ ++ __ BIND(l_tail); ++ __ ble(count, l_end); ++ ++ __ BIND(l_tail_8_bytes); ++ __ ldl(tmp1, end_src, -8); ++ __ stl(tmp1, end_dst, -8); ++ __ subl(end_src, end_src, 8); ++ __ subl(end_dst, end_dst, 8); ++ __ subl(count, count, 1); ++ __ bne(count, R0, l_tail_8_bytes); ++ ++ __ BIND(l_end); ++ ++ }else{ ++ __ slll(count, count, Address::times_8); ++ __ addl(end_src, T3, count); ++ __ addl(end_dst, T0, count); ++ generate_conjoint_copy(3, end_src, end_dst, count); ++ } ++ ++ if (is_oop) { ++ gen_write_ref_array_post_barrier(A1, A2, T1); ++ } ++ __ ret(); ++ return start; ++ } ++ ++ //FIXME ++ address generate_disjoint_long_copy(bool aligned, const char *name) { ++ Label l_1, l_2; ++ Label l_tail_8_bytes, l_align_dst, l_align_src, l_tail, l_end; ++ StubCodeMark mark(this, "StubRoutines", name); ++ __ align(CodeEntryAlignment); ++ address start = __ pc(); ++ ++ Register src = T3; ++ Register dst = T0; ++ Register count = T1; ++ Register tmp1 = AT; ++ Register tmp2 = GP; ++ ++ __ move(T1, A2); ++ __ move(T3, A0); ++ __ move(T0, A1); ++ ++ if(UseSimdForward){ ++ __ align(16); ++ __ beq(count, R0, l_end); ++ ++ __ cmple(tmp1, count, 7); ++ __ bne(tmp1, l_tail_8_bytes); //when count <= 7, don't use simd ++ ++ __ BIND(l_align_dst); ++ __ and_imm8(tmp1, dst, 31); //is dst 0mod32? ++ __ beq(tmp1, l_align_src); ++ ++ __ ldl(tmp1, src, 0); //grab 8 bytes at a time, until dst is 0mod32 ++ __ stl(tmp1, dst, 0); ++ __ subl(count, count, 1); ++ __ addl(dst, dst, 8); ++ __ addl(src, src, 8); ++ __ ble(count, l_end); ++ __ beq(R0, l_align_dst); ++ ++ __ BIND(l_align_src); ++ copy_core_forward(4, src, dst, count, tmp1, tmp2); ++ ++ __ BIND(l_tail); ++ __ ble(count, l_end); ++ ++ __ BIND(l_tail_8_bytes); ++ __ ldl(tmp1, src, 0); ++ __ stl(tmp1, dst, 0); ++ __ addl(src, src, 8); ++ __ addl(dst, dst, 8); ++ __ subl(count, count, 1); ++ __ bne(count, R0, l_tail); ++ ++ __ BIND(l_end); ++ ++ }else{ ++ __ slll(count, count, 3); ++ generate_disjoint_copy(3, src, dst, count); ++ } ++ __ ret(); ++ return start; ++ } ++ ++ ++ address generate_conjoint_long_copy(bool aligned, address *entry, const char *name) { ++ Label l_1, l_2; ++ Label l_tail_8_bytes, l_align_dst, l_align_src, l_tail, l_end; ++ ++ StubCodeMark mark(this, "StubRoutines", name); ++ __ align(CodeEntryAlignment); ++ address start = __ pc(); ++ ++ if (entry != NULL) { ++ *entry = start; ++ // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) ++ BLOCK_COMMENT("Entry:"); ++ } ++ ++ address nooverlap_target = aligned ? ++ StubRoutines::arrayof_jlong_disjoint_arraycopy() : ++ StubRoutines::jlong_disjoint_arraycopy(); ++ array_overlap_test(nooverlap_target, 3); ++ ++ Register end_src = T3; ++ Register end_dst = T0; ++ Register count = T1; ++ Register tmp1 = AT; ++ Register tmp2 = GP; ++ ++ ++ __ move(T1, A2); ++ __ move(T3, A0); ++ __ move(T0, A1); ++ ++ if(UseSimdBackward){ ++ __ align(16); ++ __ beq(count, R0, l_end); ++ ++ __ sllw_signed(tmp1, T1, Address::times_8); ++ __ addl(end_src, T3, tmp1); ++ __ addl(end_dst, T0, tmp1); ++ ++ __ cmple(tmp1, count, 7); ++ __ bne(tmp1, l_tail_8_bytes); //when count <= 7, don't use simd ++ ++ __ BIND(l_align_dst); ++ __ and_imm8(tmp1, end_dst, 31); //is dst 0mod32? ++ __ beq(tmp1, l_align_src); ++ ++ __ ldl(tmp1, end_src, -8); //grab 8 bytes at a time, until dst is 0mod32 ++ __ stl(tmp1, end_dst, -8); ++ __ subl(count, count, 1); ++ __ subl(end_dst, end_dst, 8); ++ __ subl(end_src, end_src, 8); ++ __ ble(count, l_end); ++ __ beq(R0, l_align_dst); ++ ++ __ BIND(l_align_src); ++ copy_core_backward(4, end_src, end_dst, count, tmp1, tmp2); ++ ++ __ BIND(l_tail); ++ __ ble(count, l_end); ++ ++ __ BIND(l_tail_8_bytes); ++ __ ldl(tmp1, end_src, -8); ++ __ stl(tmp1, end_dst, -8); ++ __ subl(end_src, end_src, 8); ++ __ subl(end_dst, end_dst, 8); ++ __ subl(count, count, 1); ++ __ bne(count, R0, l_tail_8_bytes); ++ ++ __ BIND(l_end); ++ ++ }else{ ++ __ slll(count, count, Address::times_8); ++ __ addl(end_src, T3, count); ++ __ addl(end_dst, T0, count); ++ generate_conjoint_copy(3, end_src, end_dst, count); ++ } ++ __ ret(); ++ return start; ++ } ++ ++ void copy_core_forward(int limit, Register src, Register dst, Register count, Register tmp1, Register tmp2){ ++ Label l_misalign, l_misalign_simd, l_align_simd, l_before_tail, l_exit; ++ ++ ++ __ and_imm8(tmp1, src, 31); ++ __ beq(tmp1, l_align_simd); ++ ++ __ BIND(l_misalign); ++ __ and_imm8(tmp1, src, 31); //from low-5-bit = src mod 32 ++ __ slll(tmp1, tmp1, 3); ++ __ ifmovs(F15, tmp1); ++ __ ldi(tmp2, R0, 256); ++ __ subl(tmp1, tmp2, tmp1); ++ __ ifmovs(F17, tmp1); ++ __ andnot(tmp1, src, 31); ++ __ vldd(F10, tmp1, 0); //load 32 bytes from src ++ ++ __ BIND(l_misalign_simd); ++ __ srlow(F12, F10, F15);//get high feild bytes of 32 bytes ++ __ vldd(F10, tmp1, 32); //load next 32 bytes from src+32 ++ __ sllow(F13, F10, F17);//get low field bytes of 32 bytes ++ __ vlog(0xfc, F12, F13, F31, F12); //merge F12, F13, into F12 ++ __ vstd(F12, dst, 0); ++ ++ __ addl(tmp1, tmp1, 32); ++ __ addl(dst, dst, 32); ++ __ subl(count, count, limit); ++ ++ __ cmple(tmp2, count, limit-1); //At least one more trip? ++ __ beq(tmp2, l_misalign_simd); ++ __ beq(R0, l_before_tail); ++ ++ __ BIND(l_align_simd); ++ __ vldd(F10, src, 0); ++ __ vstd(F10, dst, 0); ++ __ subl(count, count, limit); ++ __ addl(src, src, 32); ++ __ addl(dst, dst, 32); ++ __ cmple(tmp1, count, limit-1); //while count >=32, do simd ++ __ beq(tmp1, l_align_simd); ++ __ beq(R0, l_exit); ++ ++ __ BIND(l_before_tail); ++ __ and_imm8(src, src, 31); ++ __ addl(src, tmp1, src); ++ ++ __ BIND(l_exit); ++ } ++ ++ void copy_core_backward(int limit, Register end_src, Register end_dst, Register count, Register tmp1, Register tmp2){ ++ Label l_misalign, l_misalign_simd, l_align_simd, l_before_tail, l_exit; ++ ++ __ and_imm8(tmp1, end_src, 31); ++ __ beq(tmp1, l_align_simd); ++ ++ __ BIND(l_misalign); ++ __ and_imm8(tmp1, end_src, 31); //from low-5-bit = src mod 32 ++ __ slll(tmp1, tmp1, 3); ++ __ ifmovs(F15, tmp1); ++ __ ldi(tmp2, R0, 256); ++ __ subl(tmp1, tmp2, tmp1); ++ __ ifmovs(F17, tmp1); ++ __ andnot(tmp1, end_src, 31); ++ __ vldd(F10, tmp1, 0); //load 32 bytes from src ++ ++ __ BIND(l_misalign_simd); ++ __ sllow(F13, F10, F17);//get low field bytes of 32 bytes ++ __ vldd(F10, tmp1, -32); //load next 32 bytes from src+32 ++ __ srlow(F12, F10, F15);//get high feild bytes of 32 bytes ++ __ vlog(0xfc, F12, F13, F31, F12); //merge F12, F13, into F12 ++ __ vstd(F12, end_dst, -32); ++ ++ __ subl(tmp1, tmp1, 32); ++ __ subl(end_dst, end_dst, 32); ++ __ subl(count, count, limit); ++ ++ __ cmple(tmp2, count, limit-1); //At least one more trip? ++ __ beq(tmp2, l_misalign_simd); ++ __ beq(R0, l_before_tail); ++ ++ __ BIND(l_align_simd); ++ __ vldd(F10, end_src, -32); ++ __ vstd(F10, end_dst, -32); ++ __ subl(count, count, limit); ++ __ subl(end_src, end_src, 32); ++ __ subl(end_dst, end_dst, 32); ++ __ cmple(tmp1, count, limit-1); //while count >=32, do simd ++ __ beq(tmp1, l_align_simd); ++ __ beq(R0, l_exit); ++ ++ __ BIND(l_before_tail); ++ __ and_imm8(end_src, end_src, 31); ++ __ addl(end_src, tmp1, end_src); ++ ++ __ BIND(l_exit); ++ } ++ ++ void generate_arraycopy_stubs() { ++ address entry; ++ address entry_jbyte_arraycopy; ++ address entry_jshort_arraycopy; ++ address entry_jint_arraycopy; ++ address entry_oop_arraycopy; ++ address entry_jlong_arraycopy; ++ address entry_checkcast_arraycopy; ++ Label byte_small_copy, short_small_copy, int_small_copy, long_small_copy; ++ Label disjoint_large_copy, conjoint_large_copy, disjoint_large_copy_simd, conjoint_large_copy_simd; ++ generate_disjoint_large_copy_simd("disjoint_large_copy_simd"); ++ generate_disjoint_large_copy("disjoint_large_copy"); ++ generate_conjoint_large_copy_simd("disjoint_large_copy_simd"); ++ generate_conjoint_large_copy("disjoint_large_copy"); ++ generate_byte_small_copy(byte_small_copy, "jbyte_small_copy"); ++ generate_short_small_copy(short_small_copy, "jshort_small_copy"); ++// generate_int_small_copy(int_small_copy, "jint_small_copy"); ++// generate_long_small_copy(long_small_copy, "jlong_small_copy"); ++ if (UseCompressedOops) { ++ StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, ++ "oop_disjoint_arraycopy"); ++ StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, &entry_jint_arraycopy, ++ "oop_arraycopy"); ++ StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, ++ "oop_disjoint_arraycopy_uninit", true); ++ StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, &entry_jint_arraycopy, ++ "oop_arraycopy_uninit", true); ++ } else { ++ StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, ++ "oop_disjoint_arraycopy"); ++ StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, ++ "oop_arraycopy"); ++ StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, ++ "oop_disjoint_arraycopy_uninit", true); ++ StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, ++ "oop_arraycopy_uninit", true); ++ } ++ if(SolveAlignment) { ++ StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, "jbyte_disjoint_arraycopy"); ++ StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy"); ++ StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, &entry_jbyte_arraycopy, "jbyte_arraycopy"); ++ StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, &entry_jshort_arraycopy, "jshort_arraycopy"); ++ } else { ++ StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, byte_small_copy, disjoint_large_copy_simd, disjoint_large_copy, "jbyte_disjoint_arraycopy"); ++ StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, short_small_copy, disjoint_large_copy_simd, disjoint_large_copy, "jshort_disjoint_arraycopy"); ++ StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, byte_small_copy, conjoint_large_copy_simd, conjoint_large_copy, "jbyte_arraycopy"); ++ StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, short_small_copy, conjoint_large_copy_simd, conjoint_large_copy, "jshort_arraycopy"); ++ } ++ StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, "jint_disjoint_arraycopy"); ++ StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_copy(false, "jlong_disjoint_arraycopy"); ++ StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, &entry_jint_arraycopy, "jint_arraycopy"); ++ StubRoutines::_jlong_arraycopy = generate_conjoint_long_copy(false, &entry_jlong_arraycopy, "jlong_arraycopy"); ++ ++ // We don't generate specialized code for HeapWord-aligned source ++ // arrays, so just use the code we've already generated ++ StubRoutines::_arrayof_jbyte_disjoint_arraycopy = StubRoutines::_jbyte_disjoint_arraycopy; ++ StubRoutines::_arrayof_jbyte_arraycopy = StubRoutines::_jbyte_arraycopy; ++ ++ StubRoutines::_arrayof_jshort_disjoint_arraycopy = StubRoutines::_jshort_disjoint_arraycopy; ++ StubRoutines::_arrayof_jshort_arraycopy = StubRoutines::_jshort_arraycopy; ++ ++ StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy; ++ StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; ++ ++ StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy; ++ StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; ++ ++ StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy; ++ StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; ++ ++ StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = StubRoutines::_oop_disjoint_arraycopy_uninit; ++ StubRoutines::_arrayof_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit; ++ if(UseUnsafeCopyIntrinsic){ ++ StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy"); ++ } ++ } ++ ++ // add a function to implement SafeFetch32 and SafeFetchN ++ void generate_safefetch(const char* name, int size, address* entry, ++ address* fault_pc, address* continuation_pc) { ++ // safefetch signatures: ++ // int SafeFetch32(int* adr, int errValue); ++ // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); ++ // ++ // arguments: ++ // A0 = adr ++ // A1 = errValue ++ // ++ // result: ++ // PPC_RET = *adr or errValue ++ ++ StubCodeMark mark(this, "StubRoutines", name); ++ ++ // Entry point, pc or function descriptor. ++ *entry = __ pc(); ++ ++ // Load *adr into A1, may fault. ++ *fault_pc = __ pc(); ++ switch (size) { ++ case 4: ++ // int32_t ++ __ ldw(A1, A0, 0); ++ break; ++ case 8: ++ // int64_t ++ __ ldl(A1, A0, 0); ++ break; ++ default: ++ ShouldNotReachHere(); ++ } ++ ++ // return errValue or *adr ++ *continuation_pc = __ pc(); ++ __ addl(V0,A1,R0); ++ __ ret(); ++ } ++ ++ /** ++ * Arguments: ++ * ++ * Inputs: ++ * A0 - int crc ++ * A1 - byte* buf ++ * A2 - int length ++ * ++ * Output: ++ * V0 - int crc result ++ * ++ */ ++ address generate_updateBytesCRC32() { ++ assert(UseCRC32Intrinsics, "what are we doing here?"); ++ ++ __ align(CodeEntryAlignment); ++ StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32"); ++ ++ address start = __ pc(); ++ ++ const Register crc = A0; // crc ++ const Register buf = A1; // source java byte array address ++ const Register len = A2; // length ++ const Register table = A3; // crc_table address ++ const Register tmp = A4; ++ const Register tmp3 = A5; ++ ++ BLOCK_COMMENT("Entry:"); ++ __ enter(); // required for proper stackwalking of RuntimeStub frame ++ ++ __ kernel_crc32(crc, buf, len, tmp, tmp3); ++ ++ __ leave(); // required for proper stackwalking of RuntimeStub frame ++ __ ret(); ++ ++ return start; ++ } ++ ++#undef __ ++#define __ masm-> ++ ++ // Continuation point for throwing of implicit exceptions that are ++ // not handled in the current activation. Fabricates an exception ++ // oop and initiates normal exception dispatching in this ++ // frame. Since we need to preserve callee-saved values (currently ++ // only for C2, but done for C1 as well) we need a callee-saved oop ++ // map and therefore have to make these stubs into RuntimeStubs ++ // rather than BufferBlobs. If the compiler needs all registers to ++ // be preserved between the fault point and the exception handler ++ // then it must assume responsibility for that in ++ // AbstractCompiler::continuation_for_implicit_null_exception or ++ // continuation_for_implicit_division_by_zero_exception. All other ++ // implicit exceptions (e.g., NullPointerException or ++ // AbstractMethodError on entry) are either at call sites or ++ // otherwise assume that stack unwinding will be initiated, so ++ // caller saved registers were assumed volatile in the compiler. ++ address generate_throw_exception(const char* name, ++ address runtime_entry, ++ bool restore_saved_exception_pc) { ++ // Information about frame layout at time of blocking runtime call. ++ // Note that we only have to preserve callee-saved registers since ++ // the compilers are responsible for supplying a continuation point ++ // if they expect all registers to be preserved. ++ enum layout { ++ thread_off, // last_java_sp ++ S5_off, // callee saved register sp + 3 ++ S4_off, // callee saved register sp + 4 ++ S3_off, // callee saved register sp + 5 ++ S2_off, // callee saved register sp + 6 ++ S1_off, // callee saved register sp + 7 ++ S0_off, // callee saved register sp + 8 ++ FP_off, ++ ret_address, ++ framesize ++ }; ++ ++ int insts_size = 2048; ++ int locs_size = 32; ++ ++ // CodeBuffer* code = new CodeBuffer(insts_size, locs_size, 0, 0, 0, false, ++ // NULL, NULL, NULL, false, NULL, name, false); ++ CodeBuffer code (name , insts_size, locs_size); ++ OopMapSet* oop_maps = new OopMapSet(); ++ MacroAssembler* masm = new MacroAssembler(&code); ++ ++ address start = __ pc(); ++ ++ // This is an inlined and slightly modified version of call_VM ++ // which has the ability to fetch the return PC out of ++ // thread-local storage and also sets up last_Java_sp slightly ++ // differently than the real call_VM ++ Register java_thread = S2thread; ++ if (restore_saved_exception_pc) { ++ __ ldl(RA, java_thread, in_bytes(JavaThread::saved_exception_pc_offset())); ++ } ++ ++ __ enter(); // required for proper stackwalking of RuntimeStub frame ++ ++ __ add_simm16(SP, SP, (-1) * (framesize-2) * wordSize); // prolog ++ __ stl(S0, SP, S0_off * wordSize); ++ __ stl(S1, SP, S1_off * wordSize); ++ __ stl(S2, SP, S2_off * wordSize); ++ __ stl(S3, SP, S3_off * wordSize); ++ __ stl(S4, SP, S4_off * wordSize); ++ __ stl(S5, SP, S5_off * wordSize); ++ ++ int frame_complete = __ pc() - start; ++ // push java thread (becomes first argument of C function) ++ __ stl(java_thread, SP, thread_off * wordSize); ++ if (java_thread != A0) ++ __ move(A0, java_thread); ++ ++ // Set up last_Java_sp and last_Java_fp ++ __ set_last_Java_frame(java_thread, SP, FP, NULL); ++ // Align stack ++ __ set64(AT, -(StackAlignmentInBytes)); ++ __ and_reg(SP, SP, AT); ++ ++#ifdef ZHJ20180909 ++ __ relocate(relocInfo::internal_pc_type); ++ { ++ // patchable_set48 (4) + sd (1) + call ++ intptr_t save_pc = (intptr_t)__ pc() + NativeMovConstReg::instruction_size + NativeCall::return_address_offset + 4; ++ __ patchable_set48(AT, save_pc); ++ } ++#else ++ { ++ if(UseAddpi){ ++ intptr_t patch_off = 1 + (NativeCall::return_address_offset)/BytesPerInstWord; ++ __ addpi(patch_off, AT); ++ }else { ++ intptr_t patch_off = 2 * BytesPerInstWord + NativeCall::return_address_offset; ++ __ br(AT, 0); ++ __ addl(AT, AT, patch_off); ++ } ++ } ++#endif ++ __ stl(AT, java_thread, in_bytes(JavaThread::last_Java_pc_offset())); ++ ++ // Call runtime ++ __ call(runtime_entry); ++ // Generate oop map ++ OopMap* map = new OopMap(framesize, 0); ++ oop_maps->add_gc_map(__ offset() - 4, map); ++ ++ // restore the thread (cannot use the pushed argument since arguments ++ // may be overwritten by C code generated by an optimizing compiler); ++ // however can use the register value directly if it is callee saved. ++ ++ __ ldl(SP, java_thread, in_bytes(JavaThread::last_Java_sp_offset())); ++ __ reset_last_Java_frame(java_thread, true); ++ ++ // Restore callee save registers. This must be done after resetting the Java frame ++ __ ldl(S0, SP, S0_off * wordSize); ++ __ ldl(S1, SP, S1_off * wordSize); ++ __ ldl(S2, SP, S2_off * wordSize); ++ __ ldl(S3, SP, S3_off * wordSize); ++ __ ldl(S4, SP, S4_off * wordSize); ++ __ ldl(S5, SP, S5_off * wordSize); ++ ++ // discard arguments ++ __ add_simm16(SP, SP, (framesize-2) * wordSize); // epilog ++ __ add_simm16(SP, FP, wordSize); ++ __ ldl(FP, SP, -1*wordSize); ++ // check for pending exceptions ++#ifdef ASSERT ++ Label L; ++ __ ldw(AT, java_thread, in_bytes(Thread::pending_exception_offset())); ++ __ bne(AT, L); ++ __ should_not_reach_here(); ++ __ BIND(L); ++#endif //ASSERT ++ __ jmp(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); ++ RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, ++ &code, ++ frame_complete, ++ framesize, ++ oop_maps, false); ++ return stub->entry_point(); ++ } ++ ++ // Initialization ++ void generate_initial() { ++ // Generates all stubs and initializes the entry points ++ ++ //------------------------------------------------------------- ++ //----------------------------------------------------------- ++ // entry points that exist in all platforms ++ // Note: This is code that could be shared among different platforms - however the benefit seems to be smaller ++ // than the disadvantage of having a much more complicated generator structure. ++ // See also comment in stubRoutines.hpp. ++ StubRoutines::_forward_exception_entry = generate_forward_exception(); ++ StubRoutines::_call_stub_entry = generate_call_stub(StubRoutines::_call_stub_return_address); ++ // is referenced by megamorphic call ++ StubRoutines::_catch_exception_entry = generate_catch_exception(); ++ ++ StubRoutines::_handler_for_unsafe_access_entry = generate_handler_for_unsafe_access(); ++ ++ StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", ++ CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError), false); ++ // platform dependent ++ StubRoutines::sw64::_get_previous_fp_entry = generate_get_previous_fp(); ++ ++ if (UseCRC32Intrinsics) { ++ // set table address before stub generation which use it ++ StubRoutines::_crc_table_adr = (address)StubRoutines::sw64::_crc_table; ++ if (UseCRC32) { ++ StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32(); ++ } else { ++ StubRoutines::_updateBytesCRC32 = CAST_FROM_FN_PTR(address, SharedRuntime::updateBytesCRC32); ++ } ++ } ++ } ++ ++ void generate_all() { ++ // Generates all stubs and initializes the entry points ++ ++ // These entry points require SharedInfo::stack0 to be set up in ++ // non-core builds and need to be relocatable, so they each ++ // fabricate a RuntimeStub internally. ++ StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", ++ CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError), false); ++ ++ StubRoutines::_throw_IncompatibleClassChangeError_entry = generate_throw_exception("IncompatibleClassChangeError throw_exception", ++ CAST_FROM_FN_PTR(address, SharedRuntime:: throw_IncompatibleClassChangeError), false); ++ ++ StubRoutines::_throw_NullPointerException_at_call_entry = generate_throw_exception("NullPointerException at call throw_exception", ++ CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false); ++ ++ // entry points that are platform specific ++ ++ // support for verify_oop (must happen after universe_init) ++ StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); ++#ifndef CORE ++ // arraycopy stubs used by compilers ++ generate_arraycopy_stubs(); ++#endif ++ ++ // Safefetch stubs. ++ generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, ++ &StubRoutines::_safefetch32_fault_pc, ++ &StubRoutines::_safefetch32_continuation_pc); ++ generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, ++ &StubRoutines::_safefetchN_fault_pc, ++ &StubRoutines::_safefetchN_continuation_pc); ++ if (UseMontgomeryMultiplyIntrinsic) { ++ StubRoutines::_montgomeryMultiply ++ = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply); ++ } ++ if (UseMontgomerySquareIntrinsic) { ++ StubRoutines::_montgomerySquare ++ = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square); ++ } ++ ++ } ++ ++ public: ++ StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { ++ if (all) { ++ generate_all(); ++ } else { ++ generate_initial(); ++ } ++ } ++}; // end class declaration ++ ++void StubGenerator_generate(CodeBuffer* code, bool all) { ++ StubGenerator g(code, all); ++} +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/stubRoutines_sw64.cpp afu8u/hotspot/src/cpu/sw64/vm/stubRoutines_sw64.cpp +--- openjdk/hotspot/src/cpu/sw64/vm/stubRoutines_sw64.cpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/stubRoutines_sw64.cpp 2025-05-06 10:53:44.911633666 +0800 +@@ -0,0 +1,91 @@ ++/* ++ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++#include "runtime/deoptimization.hpp" ++#include "runtime/frame.inline.hpp" ++#include "runtime/stubRoutines.hpp" ++#include "runtime/thread.inline.hpp" ++ ++// a description of how to extend it, see the stubRoutines.hpp file. ++ ++//find the last fp value ++address StubRoutines::sw64::_get_previous_fp_entry = NULL; ++address StubRoutines::sw64::_call_stub_compiled_return = NULL; ++ ++juint StubRoutines::sw64::_crc_table[] = ++{ ++ 0x00000000UL, 0x77073096UL, 0xee0e612cUL, 0x990951baUL, 0x076dc419UL, ++ 0x706af48fUL, 0xe963a535UL, 0x9e6495a3UL, 0x0edb8832UL, 0x79dcb8a4UL, ++ 0xe0d5e91eUL, 0x97d2d988UL, 0x09b64c2bUL, 0x7eb17cbdUL, 0xe7b82d07UL, ++ 0x90bf1d91UL, 0x1db71064UL, 0x6ab020f2UL, 0xf3b97148UL, 0x84be41deUL, ++ 0x1adad47dUL, 0x6ddde4ebUL, 0xf4d4b551UL, 0x83d385c7UL, 0x136c9856UL, ++ 0x646ba8c0UL, 0xfd62f97aUL, 0x8a65c9ecUL, 0x14015c4fUL, 0x63066cd9UL, ++ 0xfa0f3d63UL, 0x8d080df5UL, 0x3b6e20c8UL, 0x4c69105eUL, 0xd56041e4UL, ++ 0xa2677172UL, 0x3c03e4d1UL, 0x4b04d447UL, 0xd20d85fdUL, 0xa50ab56bUL, ++ 0x35b5a8faUL, 0x42b2986cUL, 0xdbbbc9d6UL, 0xacbcf940UL, 0x32d86ce3UL, ++ 0x45df5c75UL, 0xdcd60dcfUL, 0xabd13d59UL, 0x26d930acUL, 0x51de003aUL, ++ 0xc8d75180UL, 0xbfd06116UL, 0x21b4f4b5UL, 0x56b3c423UL, 0xcfba9599UL, ++ 0xb8bda50fUL, 0x2802b89eUL, 0x5f058808UL, 0xc60cd9b2UL, 0xb10be924UL, ++ 0x2f6f7c87UL, 0x58684c11UL, 0xc1611dabUL, 0xb6662d3dUL, 0x76dc4190UL, ++ 0x01db7106UL, 0x98d220bcUL, 0xefd5102aUL, 0x71b18589UL, 0x06b6b51fUL, ++ 0x9fbfe4a5UL, 0xe8b8d433UL, 0x7807c9a2UL, 0x0f00f934UL, 0x9609a88eUL, ++ 0xe10e9818UL, 0x7f6a0dbbUL, 0x086d3d2dUL, 0x91646c97UL, 0xe6635c01UL, ++ 0x6b6b51f4UL, 0x1c6c6162UL, 0x856530d8UL, 0xf262004eUL, 0x6c0695edUL, ++ 0x1b01a57bUL, 0x8208f4c1UL, 0xf50fc457UL, 0x65b0d9c6UL, 0x12b7e950UL, ++ 0x8bbeb8eaUL, 0xfcb9887cUL, 0x62dd1ddfUL, 0x15da2d49UL, 0x8cd37cf3UL, ++ 0xfbd44c65UL, 0x4db26158UL, 0x3ab551ceUL, 0xa3bc0074UL, 0xd4bb30e2UL, ++ 0x4adfa541UL, 0x3dd895d7UL, 0xa4d1c46dUL, 0xd3d6f4fbUL, 0x4369e96aUL, ++ 0x346ed9fcUL, 0xad678846UL, 0xda60b8d0UL, 0x44042d73UL, 0x33031de5UL, ++ 0xaa0a4c5fUL, 0xdd0d7cc9UL, 0x5005713cUL, 0x270241aaUL, 0xbe0b1010UL, ++ 0xc90c2086UL, 0x5768b525UL, 0x206f85b3UL, 0xb966d409UL, 0xce61e49fUL, ++ 0x5edef90eUL, 0x29d9c998UL, 0xb0d09822UL, 0xc7d7a8b4UL, 0x59b33d17UL, ++ 0x2eb40d81UL, 0xb7bd5c3bUL, 0xc0ba6cadUL, 0xedb88320UL, 0x9abfb3b6UL, ++ 0x03b6e20cUL, 0x74b1d29aUL, 0xead54739UL, 0x9dd277afUL, 0x04db2615UL, ++ 0x73dc1683UL, 0xe3630b12UL, 0x94643b84UL, 0x0d6d6a3eUL, 0x7a6a5aa8UL, ++ 0xe40ecf0bUL, 0x9309ff9dUL, 0x0a00ae27UL, 0x7d079eb1UL, 0xf00f9344UL, ++ 0x8708a3d2UL, 0x1e01f268UL, 0x6906c2feUL, 0xf762575dUL, 0x806567cbUL, ++ 0x196c3671UL, 0x6e6b06e7UL, 0xfed41b76UL, 0x89d32be0UL, 0x10da7a5aUL, ++ 0x67dd4accUL, 0xf9b9df6fUL, 0x8ebeeff9UL, 0x17b7be43UL, 0x60b08ed5UL, ++ 0xd6d6a3e8UL, 0xa1d1937eUL, 0x38d8c2c4UL, 0x4fdff252UL, 0xd1bb67f1UL, ++ 0xa6bc5767UL, 0x3fb506ddUL, 0x48b2364bUL, 0xd80d2bdaUL, 0xaf0a1b4cUL, ++ 0x36034af6UL, 0x41047a60UL, 0xdf60efc3UL, 0xa867df55UL, 0x316e8eefUL, ++ 0x4669be79UL, 0xcb61b38cUL, 0xbc66831aUL, 0x256fd2a0UL, 0x5268e236UL, ++ 0xcc0c7795UL, 0xbb0b4703UL, 0x220216b9UL, 0x5505262fUL, 0xc5ba3bbeUL, ++ 0xb2bd0b28UL, 0x2bb45a92UL, 0x5cb36a04UL, 0xc2d7ffa7UL, 0xb5d0cf31UL, ++ 0x2cd99e8bUL, 0x5bdeae1dUL, 0x9b64c2b0UL, 0xec63f226UL, 0x756aa39cUL, ++ 0x026d930aUL, 0x9c0906a9UL, 0xeb0e363fUL, 0x72076785UL, 0x05005713UL, ++ 0x95bf4a82UL, 0xe2b87a14UL, 0x7bb12baeUL, 0x0cb61b38UL, 0x92d28e9bUL, ++ 0xe5d5be0dUL, 0x7cdcefb7UL, 0x0bdbdf21UL, 0x86d3d2d4UL, 0xf1d4e242UL, ++ 0x68ddb3f8UL, 0x1fda836eUL, 0x81be16cdUL, 0xf6b9265bUL, 0x6fb077e1UL, ++ 0x18b74777UL, 0x88085ae6UL, 0xff0f6a70UL, 0x66063bcaUL, 0x11010b5cUL, ++ 0x8f659effUL, 0xf862ae69UL, 0x616bffd3UL, 0x166ccf45UL, 0xa00ae278UL, ++ 0xd70dd2eeUL, 0x4e048354UL, 0x3903b3c2UL, 0xa7672661UL, 0xd06016f7UL, ++ 0x4969474dUL, 0x3e6e77dbUL, 0xaed16a4aUL, 0xd9d65adcUL, 0x40df0b66UL, ++ 0x37d83bf0UL, 0xa9bcae53UL, 0xdebb9ec5UL, 0x47b2cf7fUL, 0x30b5ffe9UL, ++ 0xbdbdf21cUL, 0xcabac28aUL, 0x53b39330UL, 0x24b4a3a6UL, 0xbad03605UL, ++ 0xcdd70693UL, 0x54de5729UL, 0x23d967bfUL, 0xb3667a2eUL, 0xc4614ab8UL, ++ 0x5d681b02UL, 0x2a6f2b94UL, 0xb40bbe37UL, 0xc30c8ea1UL, 0x5a05df1bUL, ++ 0x2d02ef8dUL ++}; +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/stubRoutines_sw64.hpp afu8u/hotspot/src/cpu/sw64/vm/stubRoutines_sw64.hpp +--- openjdk/hotspot/src/cpu/sw64/vm/stubRoutines_sw64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/stubRoutines_sw64.hpp 2025-05-06 10:53:44.911633666 +0800 +@@ -0,0 +1,65 @@ ++/* ++ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef CPU_SW64_VM_STUBROUTINES_SW64_HPP ++#define CPU_SW64_VM_STUBROUTINES_SW64_HPP ++ ++// This file holds the platform specific parts of the StubRoutines ++// definition. See stubRoutines.hpp for a description on how to ++// extend it. ++ ++static bool returns_to_call_stub(address return_pc) { ++ return return_pc == _call_stub_return_address || return_pc == sw64::get_call_stub_compiled_return(); ++} ++ ++enum platform_dependent_constants { ++ code_size1 = 20000, // simply increase if too small (assembler will crash if too small) ++ code_size2 = 40000 // simply increase if too small (assembler will crash if too small) ++}; ++ ++class sw64 { ++ friend class StubGenerator; ++ friend class VMStructs; ++ private: ++ // If we call compiled code directly from the call stub we will ++ // need to adjust the return back to the call stub to a specialized ++ // piece of code that can handle compiled results and cleaning the fpu ++ // stack. The variable holds that location. ++ static address _call_stub_compiled_return; ++ static address _get_previous_fp_entry; ++ static address _verify_mxcsr_entry; ++ // shuffle mask for fixing up 128-bit words consisting of big-endian 32-bit integers ++ static address _key_shuffle_mask_addr; ++ // masks and table for CRC32 ++ static uint64_t _crc_by128_masks[]; ++ static juint _crc_table[]; ++public: ++ // Call back points for traps in compiled code ++ static address get_previous_fp_entry() { return _get_previous_fp_entry; } ++ static address get_call_stub_compiled_return() { return _call_stub_compiled_return; } ++ static void set_call_stub_compiled_return(address ret) { _call_stub_compiled_return = ret; } ++ ++}; ++ ++#endif // CPU_SW64_VM_STUBROUTINES_SW64_HPP +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/sw64.ad afu8u/hotspot/src/cpu/sw64/vm/sw64.ad +--- openjdk/hotspot/src/cpu/sw64/vm/sw64.ad 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/sw64.ad 2025-05-06 10:53:44.911633666 +0800 +@@ -0,0 +1,13510 @@ ++// ++// Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. ++// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++// ++// This code is free software; you can redistribute it and/or modify it ++// under the terms of the GNU General Public License version 2 only, as ++// published by the Free Software Foundation. ++// ++// This code is distributed in the hope that it will be useful, but WITHOUT ++// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++// version 2 for more details (a copy is included in the LICENSE file that ++// accompanied this code). ++// ++// You should have received a copy of the GNU General Public License version ++// 2 along with this work; if not, write to the Free Software Foundation, ++// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++// ++// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++// or visit www.oracle.com if you need additional information or have any ++// questions. ++// ++// ++ ++// SW64 Architecture Description File ++ ++//----------REGISTER DEFINITION BLOCK------------------------------------------ ++// This information is used by the matcher and the register allocator to ++// describe individual registers and classes of registers within the target ++// archtecture. ++ ++// format: ++// reg_def name (call convention, c-call convention, ideal type, encoding); ++// call convention : ++// NS = No-Save ++// SOC = Save-On-Call ++// SOE = Save-On-Entry ++// AS = Always-Save ++// ideal type : ++// see opto/opcodes.hpp for more info ++// reg_class name (reg, ...); ++// alloc_class name (reg, ...); ++ ++register %{ ++ ++// General Registers ++// Integer Registers ++reg_def V0 (SOC, SOC, Op_RegI, 0, V0->as_VMReg()); ++reg_def V0_H (SOC, SOC, Op_RegI, 0, V0->as_VMReg()->next()); ++ ++reg_def T0 (SOC, SOC, Op_RegI, 1, T0->as_VMReg()); ++reg_def T0_H (SOC, SOC, Op_RegI, 1, T0->as_VMReg()->next()); ++reg_def T1 (SOC, SOC, Op_RegI, 2, T1->as_VMReg()); ++reg_def T1_H (SOC, SOC, Op_RegI, 2, T1->as_VMReg()->next()); ++reg_def T2 (SOC, SOC, Op_RegI, 3, T2->as_VMReg()); ++reg_def T2_H (SOC, SOC, Op_RegI, 3, T2->as_VMReg()->next()); ++reg_def T3 (SOC, SOC, Op_RegI, 4, T3->as_VMReg()); ++reg_def T3_H (SOC, SOC, Op_RegI, 4, T3->as_VMReg()->next()); ++reg_def T4 (SOC, SOC, Op_RegI, 5, T4->as_VMReg()); ++reg_def T4_H (SOC, SOC, Op_RegI, 5, T4->as_VMReg()->next()); ++reg_def T5 (SOC, SOC, Op_RegI, 6, T5->as_VMReg()); ++reg_def T5_H (SOC, SOC, Op_RegI, 6, T5->as_VMReg()->next()); ++reg_def T6 (SOC, SOC, Op_RegI, 7, T6->as_VMReg()); ++reg_def T6_H (SOC, SOC, Op_RegI, 7, T6->as_VMReg()->next()); ++reg_def T7 (SOC, SOC, Op_RegI, 8, T7->as_VMReg()); ++reg_def T7_H (SOC, SOC, Op_RegI, 8, T7->as_VMReg()->next()); ++ ++reg_def S0 (SOC, SOE, Op_RegI, 9, S0->as_VMReg()); ++reg_def S0_H (SOC, SOE, Op_RegI, 9, S0->as_VMReg()->next()); ++reg_def S1 (SOC, SOE, Op_RegI, 10, S1->as_VMReg()); ++reg_def S1_H (SOC, SOE, Op_RegI, 10, S1->as_VMReg()->next()); ++reg_def S2 (SOC, SOE, Op_RegI, 11, S2->as_VMReg()); ++reg_def S2_H (SOC, SOE, Op_RegI, 11, S2->as_VMReg()->next()); ++reg_def S3 (SOC, SOE, Op_RegI, 12, S3->as_VMReg()); ++reg_def S3_H (SOC, SOE, Op_RegI, 12, S3->as_VMReg()->next()); ++reg_def S4 (SOC, SOE, Op_RegI, 13, S4->as_VMReg()); ++reg_def S4_H (SOC, SOE, Op_RegI, 13, S4->as_VMReg()->next()); ++reg_def S5 (SOC, SOE, Op_RegI, 14, S5->as_VMReg()); ++reg_def S5_H (SOC, SOE, Op_RegI, 14, S5->as_VMReg()->next()); ++reg_def FP ( NS, NS, Op_RegI, 15, FP->as_VMReg()); ++reg_def FP_H ( NS, NS, Op_RegI, 15, FP->as_VMReg()->next()); ++ ++reg_def A0 (SOC, SOC, Op_RegI, 16, A0->as_VMReg()); ++reg_def A0_H (SOC, SOC, Op_RegI, 16, A0->as_VMReg()->next()); ++reg_def A1 (SOC, SOC, Op_RegI, 17, A1->as_VMReg()); ++reg_def A1_H (SOC, SOC, Op_RegI, 17, A1->as_VMReg()->next()); ++reg_def A2 (SOC, SOC, Op_RegI, 18, A2->as_VMReg()); ++reg_def A2_H (SOC, SOC, Op_RegI, 18, A2->as_VMReg()->next()); ++reg_def A3 (SOC, SOC, Op_RegI, 19, A3->as_VMReg()); ++reg_def A3_H (SOC, SOC, Op_RegI, 19, A3->as_VMReg()->next()); ++reg_def A4 (SOC, SOC, Op_RegI, 20, A4->as_VMReg()); ++reg_def A4_H (SOC, SOC, Op_RegI, 20, A4->as_VMReg()->next()); ++reg_def A5 (SOC, SOC, Op_RegI, 21, A5->as_VMReg()); ++reg_def A5_H (SOC, SOC, Op_RegI, 21, A5->as_VMReg()->next()); ++ ++reg_def T8 (SOC, SOC, Op_RegI, 22, T8->as_VMReg()); ++reg_def T8_H (SOC, SOC, Op_RegI, 22, T8->as_VMReg()->next()); ++reg_def T9 (SOC, SOC, Op_RegI, 23, T9->as_VMReg()); ++reg_def T9_H (SOC, SOC, Op_RegI, 23, T9->as_VMReg()->next()); ++reg_def T10 (SOC, SOC, Op_RegI, 24, T10->as_VMReg()); ++reg_def T10_H (SOC, SOC, Op_RegI, 24, T10->as_VMReg()->next()); ++reg_def T11 (SOC, SOC, Op_RegI, 25, T11->as_VMReg()); ++reg_def T11_H (SOC, SOC, Op_RegI, 25, T11->as_VMReg()->next()); ++reg_def RA ( NS, NS, Op_RegI, 26, RA->as_VMReg()); ++reg_def RA_H ( NS, NS, Op_RegI, 26, RA->as_VMReg()->next()); ++reg_def T12 (SOC, SOC, Op_RegI, 27, T12->as_VMReg()); ++reg_def T12_H (SOC, SOC, Op_RegI, 27, T12->as_VMReg()->next()); ++reg_def AT ( NS, NS, Op_RegI, 28, AT->as_VMReg()); ++reg_def AT_H ( NS, NS, Op_RegI, 28, AT->as_VMReg()->next()); ++reg_def GP ( NS, NS, Op_RegI, 29, GP->as_VMReg()); ++reg_def GP_H ( NS, NS, Op_RegI, 29, GP->as_VMReg()->next()); ++reg_def SP ( NS, NS, Op_RegI, 30, SP->as_VMReg()); ++reg_def SP_H ( NS, NS, Op_RegI, 30, SP->as_VMReg()->next()); ++reg_def R0 ( NS, NS, Op_RegI, 31, VMRegImpl::Bad()); ++ ++// Floating registers. ++reg_def F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg()); ++reg_def F0_H ( SOC, SOC, Op_RegF, 0, F0->as_VMReg()->next()); ++reg_def F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg()); ++reg_def F1_H ( SOC, SOC, Op_RegF, 1, F1->as_VMReg()->next()); ++reg_def F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg()); ++reg_def F2_H ( SOC, SOC, Op_RegF, 2, F2->as_VMReg()->next()); ++reg_def F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg()); ++reg_def F3_H ( SOC, SOC, Op_RegF, 3, F3->as_VMReg()->next()); ++reg_def F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg()); ++reg_def F4_H ( SOC, SOC, Op_RegF, 4, F4->as_VMReg()->next()); ++reg_def F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg()); ++reg_def F5_H ( SOC, SOC, Op_RegF, 5, F5->as_VMReg()->next()); ++reg_def F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg()); ++reg_def F6_H ( SOC, SOC, Op_RegF, 6, F6->as_VMReg()->next()); ++reg_def F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg()); ++reg_def F7_H ( SOC, SOC, Op_RegF, 7, F7->as_VMReg()->next()); ++reg_def F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg()); ++reg_def F8_H ( SOC, SOC, Op_RegF, 8, F8->as_VMReg()->next()); ++reg_def F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg()); ++reg_def F9_H ( SOC, SOC, Op_RegF, 9, F9->as_VMReg()->next()); ++reg_def F10 ( SOC, SOC, Op_RegF, 10, F10->as_VMReg()); ++reg_def F10_H ( SOC, SOC, Op_RegF, 10, F10->as_VMReg()->next()); ++reg_def F11 ( SOC, SOC, Op_RegF, 11, F11->as_VMReg()); ++reg_def F11_H ( SOC, SOC, Op_RegF, 11, F11->as_VMReg()->next()); ++reg_def F12 ( SOC, SOC, Op_RegF, 12, F12->as_VMReg()); ++reg_def F12_H ( SOC, SOC, Op_RegF, 12, F12->as_VMReg()->next()); ++reg_def F13 ( SOC, SOC, Op_RegF, 13, F13->as_VMReg()); ++reg_def F13_H ( SOC, SOC, Op_RegF, 13, F13->as_VMReg()->next()); ++reg_def F14 ( SOC, SOC, Op_RegF, 14, F14->as_VMReg()); ++reg_def F14_H ( SOC, SOC, Op_RegF, 14, F14->as_VMReg()->next()); ++reg_def F15 ( SOC, SOC, Op_RegF, 15, F15->as_VMReg()); ++reg_def F15_H ( SOC, SOC, Op_RegF, 15, F15->as_VMReg()->next()); ++reg_def F16 ( SOC, SOC, Op_RegF, 16, F16->as_VMReg()); ++reg_def F16_H ( SOC, SOC, Op_RegF, 16, F16->as_VMReg()->next()); ++reg_def F17 ( SOC, SOC, Op_RegF, 17, F17->as_VMReg()); ++reg_def F17_H ( SOC, SOC, Op_RegF, 17, F17->as_VMReg()->next()); ++reg_def F18 ( SOC, SOC, Op_RegF, 18, F18->as_VMReg()); ++reg_def F18_H ( SOC, SOC, Op_RegF, 18, F18->as_VMReg()->next()); ++reg_def F19 ( SOC, SOC, Op_RegF, 19, F19->as_VMReg()); ++reg_def F19_H ( SOC, SOC, Op_RegF, 19, F19->as_VMReg()->next()); ++reg_def F20 ( SOC, SOC, Op_RegF, 20, F20->as_VMReg()); ++reg_def F20_H ( SOC, SOC, Op_RegF, 20, F20->as_VMReg()->next()); ++reg_def F21 ( SOC, SOC, Op_RegF, 21, F21->as_VMReg()); ++reg_def F21_H ( SOC, SOC, Op_RegF, 21, F21->as_VMReg()->next()); ++reg_def F22 ( SOC, SOC, Op_RegF, 22, F22->as_VMReg()); ++reg_def F22_H ( SOC, SOC, Op_RegF, 22, F22->as_VMReg()->next()); ++reg_def F23 ( SOC, SOC, Op_RegF, 23, F23->as_VMReg()); ++reg_def F23_H ( SOC, SOC, Op_RegF, 23, F23->as_VMReg()->next()); ++reg_def F24 ( SOC, SOC, Op_RegF, 24, F24->as_VMReg()); ++reg_def F24_H ( SOC, SOC, Op_RegF, 24, F24->as_VMReg()->next()); ++reg_def F25 ( SOC, SOC, Op_RegF, 25, F25->as_VMReg()); ++reg_def F25_H ( SOC, SOC, Op_RegF, 25, F25->as_VMReg()->next()); ++reg_def F26 ( SOC, SOC, Op_RegF, 26, F26->as_VMReg()); ++reg_def F26_H ( SOC, SOC, Op_RegF, 26, F26->as_VMReg()->next()); ++reg_def F27 ( SOC, SOC, Op_RegF, 27, F27->as_VMReg()); ++reg_def F27_H ( SOC, SOC, Op_RegF, 27, F27->as_VMReg()->next()); ++reg_def F28 ( SOC, SOC, Op_RegF, 28, F28->as_VMReg()); ++reg_def F28_H ( SOC, SOC, Op_RegF, 28, F28->as_VMReg()->next()); ++reg_def F29 ( SOC, SOC, Op_RegF, 29, F29->as_VMReg()); ++reg_def F29_H ( SOC, SOC, Op_RegF, 29, F29->as_VMReg()->next()); ++reg_def F30 ( SOC, SOC, Op_RegF, 30, F30->as_VMReg()); ++reg_def F30_H ( SOC, SOC, Op_RegF, 30, F30->as_VMReg()->next()); ++reg_def F31 ( SOC, SOC, Op_RegF, 31, F31->as_VMReg()); ++reg_def F31_H ( SOC, SOC, Op_RegF, 31, F31->as_VMReg()->next()); ++ ++ ++// ---------------------------- ++// Special Registers ++// Condition Codes Flag Registers ++reg_def SW64_FLAG (SOC, SOC, Op_RegFlags, 1, as_Register(1)->as_VMReg()); ++ ++//S2 is used for get_thread(S2) ++//S5 is uesd for heapbase of compressed oop ++alloc_class chunk0( ++ S0, S0_H, ++ S1, S1_H, ++ S3, S3_H, ++ S4, S4_H, ++ S5, S5_H, ++ S2, S2_H, ++ T2, T2_H, ++ T3, T3_H, ++ T11, T11_H, ++ T12, T12_H, ++ T1, T1_H, // inline_cache_reg ++ A5, A5_H, ++ A4, A4_H, ++ V0, V0_H, ++ A3, A3_H, ++ A2, A2_H, ++ A1, A1_H, ++ A0, A0_H, ++ T0, T0_H, ++ T4, T4_H, ++ T5, T5_H, ++ T6, T6_H, ++ T7, T7_H, ++ T8, T8_H, ++ T9, T9_H, ++ T10, T10_H, ++ GP, GP_H ++ RA, RA_H, ++ SP, SP_H, // stack_pointer ++ FP, FP_H // frame_pointer ++ ); ++ ++alloc_class chunk1( F0, F0_H, ++ F1, F1_H, ++ F2, F2_H, ++ F3, F3_H, ++ F4, F4_H, ++ F5, F5_H, ++ F6, F6_H, ++ F7, F7_H, ++ F8, F8_H, ++ F9, F9_H, ++ F10, F10_H, ++ F11, F11_H, ++ F20, F20_H, ++ F21, F21_H, ++ F22, F22_H, ++ F23, F23_H, ++ F24, F24_H, ++ F25, F25_H, ++ F26, F26_H, ++ F27, F27_H, ++ F28, F28_H, ++ F19, F19_H, ++ F18, F18_H, ++ F17, F17_H, ++ F16, F16_H, ++ F15, F15_H, ++ F14, F14_H, ++ F13, F13_H, ++ F12, F12_H, ++ F29, F29_H, ++ F30, F30_H, ++ F31, F31_H); ++ ++alloc_class chunk2(SW64_FLAG); ++ ++reg_class s_reg( S0, S1, S2, S3, S4, S5 ); ++reg_class s0_reg( S0 ); ++reg_class s1_reg( S1 ); ++reg_class s2_reg( S2 ); ++reg_class s3_reg( S3 ); ++reg_class s4_reg( S4 ); ++reg_class s5_reg( S5 ); ++ ++reg_class t_reg( T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12 ); ++reg_class t0_reg( T0 ); ++reg_class t1_reg( T1 ); ++reg_class t2_reg( T2 ); ++reg_class t3_reg( T3 ); ++reg_class t4_reg( T4 ); ++reg_class t5_reg( T5 ); ++reg_class t6_reg( T6 ); ++reg_class t7_reg( T7 ); ++reg_class t8_reg( T8 ); ++reg_class t9_reg( T9 ); ++reg_class t10_reg( T10 ); ++reg_class t11_reg( T11 ); ++reg_class t12_reg( T12 ); ++ ++reg_class a_reg( A0, A1, A2, A3, A4, A5 ); ++reg_class a0_reg( A0 ); ++reg_class a1_reg( A1 ); ++reg_class a2_reg( A2 ); ++reg_class a3_reg( A3 ); ++reg_class a4_reg( A4 ); ++reg_class a5_reg( A5 ); ++ ++reg_class v0_reg( V0 ); ++ ++reg_class sp_reg( SP, SP_H ); ++reg_class fp_reg( FP, FP_H ); ++ ++reg_class sw64_flags(SW64_FLAG); ++ ++reg_class v0_long_reg( V0, V0_H ); ++ ++reg_class t0_long_reg( T0, T0_H ); ++reg_class t1_long_reg( T1, T1_H ); ++reg_class t2_long_reg( T2, T2_H ); ++reg_class t3_long_reg( T3, T3_H ); ++reg_class t4_long_reg( T4, T4_H ); ++reg_class t5_long_reg( T5, T5_H ); ++reg_class t6_long_reg( T6, T6_H ); ++reg_class t7_long_reg( T7, T7_H ); ++reg_class t8_long_reg( T8, T8_H ); ++reg_class t9_long_reg( T9, T9_H ); ++reg_class t10_long_reg( T10, T10_H ); ++reg_class t11_long_reg( T11, T11_H ); ++reg_class t12_long_reg( T12, T12_H ); ++ ++reg_class a0_long_reg( A0, A0_H ); ++reg_class a1_long_reg( A1, A1_H ); ++reg_class a2_long_reg( A2, A2_H ); ++reg_class a3_long_reg( A3, A3_H ); ++reg_class a4_long_reg( A4, A4_H ); ++reg_class a5_long_reg( A5, A5_H ); ++ ++reg_class s0_long_reg( S0, S0_H ); ++reg_class s1_long_reg( S1, S1_H ); ++reg_class s2_long_reg( S2, S2_H ); ++reg_class s3_long_reg( S3, S3_H ); ++reg_class s4_long_reg( S4, S4_H ); ++reg_class s5_long_reg( S5, S5_H ); ++ ++reg_class int_reg( S1, S0, S4, S3, T11, T2, T3, T1, A5, A4, V0, A3, A2, A1, A0, T0, T4, T5, T6, T7, T8, T9, T10 ); ++ ++reg_class no_Ax_int_reg( S1, S0, S4, S3, T11, T2, T3, T1, V0, T0, T4, T5, T6, T7, T8, T9, T10 ); ++ ++reg_class p_reg( ++ S1, S1_H, ++ S0, S0_H, ++ S4, S4_H, ++ S3, S3_H, ++ T11, T11_H, ++ T2, T2_H, ++ T3, T3_H, ++ T1, T1_H, ++ A5, A5_H, ++ A4, A4_H, ++ A3, A3_H, ++ A2, A2_H, ++ A1, A1_H, ++ A0, A0_H, ++ T0, T0_H, ++ T4, T4_H, ++ T5, T5_H, ++ T6, T6_H, ++ T7, T7_H, ++ T8, T8_H, ++ T9, T9_H, ++ T10, T10_H ++ ); ++ ++reg_class no_T11_p_reg( ++ S1, S1_H, ++ S0, S0_H, ++ S4, S4_H, ++ S3, S3_H, ++ T2, T2_H, ++ T3, T3_H, ++ T1, T1_H, ++ A5, A5_H, ++ A4, A4_H, ++ A3, A3_H, ++ A2, A2_H, ++ A1, A1_H, ++ A0, A0_H, ++ T0, T0_H, ++ T4, T4_H, ++ T5, T5_H, ++ T6, T6_H, ++ T7, T7_H, ++ T8, T8_H, ++ T9, T9_H, ++ T10, T10_H ++ ); ++ ++reg_class long_reg( ++ S1, S1_H, ++ S0, S0_H, ++ S4, S4_H, ++ S3, S3_H, ++ T11, T11_H, ++ T2, T2_H, ++ T3, T3_H, ++ T1, T1_H, ++ A5, A5_H, ++ A4, A4_H, ++ A3, A3_H, ++ A2, A2_H, ++ A1, A1_H, ++ A0, A0_H, ++ T0, T0_H, ++ T4, T4_H, ++ T5, T5_H, ++ T6, T6_H, ++ T7, T7_H, ++ T8, T8_H, ++ T9, T9_H, ++ T10, T10_H ++ ); ++ ++ ++// Floating point registers. ++// 2012/8/23 Fu: F30/F31 are used as temporary registers in D2I ++//2017/9/6 zyh: F28&F29 are used as temporary registers in float cmp instructs ++reg_class flt_reg( F0, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, F13, F14, F15, F16, F17 F18, F19, F20, F21, F22, F23, F24, F25, F26, F27); ++reg_class dbl_reg( F0, F0_H, ++ F1, F1_H, ++ F2, F2_H, ++ F3, F3_H, ++ F4, F4_H, ++ F5, F5_H, ++ F6, F6_H, ++ F7, F7_H, ++ F8, F8_H, ++ F9, F9_H, ++ F10, F10_H, ++ F11, F11_H, ++ F12, F12_H, ++ F13, F13_H, ++ F14, F14_H, ++ F15, F15_H, ++ F16, F16_H, ++ F17, F17_H, ++ F18, F18_H, ++ F19, F19_H, ++ F20, F20_H, ++ F21, F21_H, ++ F22, F22_H, ++ F23, F23_H, ++ F24, F24_H, ++ F25, F25_H, ++ F26, F26_H, ++ F27, F27_H, ++// F28, F28_H, ++// F29, F29_H ++ ); ++ ++reg_class flt_arg0( F16 ); ++reg_class dbl_arg0( F16, F16_H ); ++reg_class dbl_arg1( F17, F17_H ); ++ ++%} ++ ++//----------DEFINITION BLOCK--------------------------------------------------- ++// Define name --> value mappings to inform the ADLC of an integer valued name ++// Current support includes integer values in the range [0, 0x7FFFFFFF] ++// Format: ++// int_def ( , ); ++// Generated Code in ad_.hpp ++// #define () ++// // value == ++// Generated code in ad_.cpp adlc_verification() ++// assert( == , "Expect () to equal "); ++// ++definitions %{ ++ int_def DEFAULT_COST ( 100, 100); ++ int_def HUGE_COST (1000000, 1000000); ++ ++ // Memory refs are twice as expensive as run-of-the-mill. ++ int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2); ++ ++ // Branches are even more expensive. ++ int_def BRANCH_COST ( 300, DEFAULT_COST * 3); ++ // we use jr instruction to construct call, so more expensive ++ int_def CALL_COST ( 500, DEFAULT_COST * 5); ++%} ++ ++ ++ ++//----------SOURCE BLOCK------------------------------------------------------- ++// This is a block of C++ code which provides values, functions, and ++// definitions necessary in the rest of the architecture description ++ ++source_hpp %{ ++// Header information of the source block. ++// Method declarations/definitions which are used outside ++// the ad-scope can conveniently be defined here. ++// ++// To keep related declarations/definitions/uses close together, ++// we switch between source %{ }% and source_hpp %{ }% freely as needed. ++ ++class CallStubImpl { ++ ++ //-------------------------------------------------------------- ++ //---< Used for optimization in Compile::shorten_branches >--- ++ //-------------------------------------------------------------- ++ ++ public: ++ // Size of call trampoline stub. ++ static uint size_call_trampoline() { ++ return 0; // no call trampolines on this platform ++ } ++ ++ // number of relocations needed by a call trampoline stub ++ static uint reloc_call_trampoline() { ++ return 0; // no call trampolines on this platform ++ } ++}; ++ ++class HandlerImpl { ++ ++ public: ++ ++ static int emit_exception_handler(CodeBuffer &cbuf); ++ static int emit_deopt_handler(CodeBuffer& cbuf); ++ ++ static uint size_exception_handler() { ++ // NativeCall instruction size is the same as NativeJump. ++ // exception handler starts out as jump and can be patched to ++ // a call be deoptimization. (4932387) ++ // Note that this value is also credited (in output.cpp) to ++ // the size of the code section. ++ int size = NativeCall::instruction_size; ++ return round_to(size, 16); ++ } ++ ++ static uint size_deopt_handler() { ++ int size = NativeCall::instruction_size; ++ return round_to(size, 16); ++ } ++}; ++ ++ bool is_CAS(int opcode); ++ bool unnecessary_release(const Node *barrier); ++ // predicate controlling translation of StoreCM ++ bool unnecessary_storestore(const Node *storecm); ++ ++%} // end source_hpp ++ ++source %{ ++ ++#define NO_INDEX 0 ++#define RELOC_IMM64 Assembler::imm_operand ++#define RELOC_DISP32 Assembler::disp32_operand ++ ++ ++#define __ _masm. ++ ++#ifdef PRODUCT ++#define BLOCK_COMMENT(str) /* nothing */ ++#else ++#define BLOCK_COMMENT(str) { char line[1024];sprintf(line,"%s:%s:%d",str,__FILE__, __LINE__); __ block_comment(line);} ++#endif ++ ++#define BIND(label) bind(label); BLOCK_COMMENT(#label ":") ++ ++// Emit exception handler code. ++// Stuff framesize into a register and call a VM stub routine. ++int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) { ++ // Note that the code buffer's insts_mark is always relative to insts. ++ // That's why we must use the macroassembler to generate a handler. ++ MacroAssembler _masm(&cbuf); ++ address base = __ start_a_stub(size_exception_handler()); ++ if (base == NULL) { ++ ciEnv::current()->record_failure("CodeCache is full"); ++ return 0; // CodeBuffer::expand failed ++ } ++ ++ int offset = __ offset(); ++ ++ __ block_comment("; emit_exception_handler"); ++ ++ cbuf.set_insts_mark(); ++ __ relocate(relocInfo::runtime_call_type); ++ __ patchable_jump((address)OptoRuntime::exception_blob()->entry_point()); ++ __ align(16); ++ assert(__ offset() - offset <= (int) size_exception_handler(), "overflow"); ++ __ end_a_stub(); ++ return offset; ++} ++ ++// Emit deopt handler code. ++int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) { ++ // Note that the code buffer's insts_mark is always relative to insts. ++ // That's why we must use the macroassembler to generate a handler. ++ MacroAssembler _masm(&cbuf); ++ address base = __ start_a_stub(size_deopt_handler()); ++ if (base == NULL) { ++ ciEnv::current()->record_failure("CodeCache is full"); ++ return 0; // CodeBuffer::expand failed ++ } ++ ++ int offset = __ offset(); ++ ++ __ block_comment("; emit_deopt_handler"); ++ ++ cbuf.set_insts_mark(); ++ __ relocate(relocInfo::runtime_call_type); ++ __ patchable_call_setfpec1(SharedRuntime::deopt_blob()->unpack()); ++ __ align(16); ++ assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow"); ++ __ end_a_stub(); ++ return offset; ++} ++ ++ ++const bool Matcher::match_rule_supported(int opcode) { ++ if (!has_match_rule(opcode)) ++ return false; ++ ++ switch (opcode) { ++ case Op_CountLeadingZerosI: ++ case Op_CountLeadingZerosL: ++ if (!UseCountLeadingZerosInstruction) ++ return false; ++ break; ++ case Op_CountTrailingZerosI: ++ case Op_CountTrailingZerosL: ++ if (!UseCountTrailingZerosInstruction) ++ return false; ++ break; ++ } ++ ++ return true; // Per default match rules are supported. ++} ++ ++// emit call stub, compiled java to interpreter ++void emit_java_to_interp(CodeBuffer &cbuf ) { ++ // Stub is fixed up when the corresponding call is converted from calling ++ // compiled code to calling interpreted code. ++ // mov S3,0 ++ // jmp -1 ++ ++ address mark = cbuf.insts_mark(); // get mark within main instrs section ++ ++ // Note that the code buffer's insts_mark is always relative to insts. ++ // That's why we must use the macroassembler to generate a stub. ++ MacroAssembler _masm(&cbuf); ++ ++ address base = __ start_a_stub(Compile::MAX_stubs_size); ++ if (base == NULL) { // CodeBuffer::expand failed ++ ciEnv::current()->record_failure("CodeCache is full"); ++ } ++ ++ // static stub relocation stores the instruction address of the call ++ ++ __ relocate(static_stub_Relocation::spec(mark), 0); ++ ++ // static stub relocation also tags the methodOop in the code-stream. ++ __ patchable_set48(S3, (long)0); ++ // This is recognized as unresolved by relocs/nativeInst/ic code ++ ++ __ relocate(relocInfo::runtime_call_type); ++ ++ cbuf.set_insts_mark(); ++ address call_pc = (address)-1; ++ __ patchable_jump(call_pc); ++ __ align(16); ++ __ end_a_stub(); ++ // Update current stubs pointer and restore code_end. ++} ++ ++// size of call stub, compiled java to interpretor ++uint size_java_to_interp() { ++ int size = 4 * 4 + NativeCall::instruction_size; // sizeof(li48) + NativeCall::instruction_size ++ return round_to(size, 16); ++} ++ ++// relocation entries for call stub, compiled java to interpreter ++uint reloc_java_to_interp() { ++ return 16; // in emit_java_to_interp + in Java_Static_Call ++} ++ ++bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) { ++ int offs = offset - br_size + 4; ++ // To be conservative on SW64 ++ // branch node should be end with branch inst ++ const int safety_zone = 3 * BytesPerInstWord; ++ return Assembler::is_simm16((offs<0 ? offs-safety_zone : offs+safety_zone) >> 2); ++ } ++ ++ ++// No additional cost for CMOVL. ++const int Matcher::long_cmove_cost() { return 0; } ++ ++// No CMOVF/CMOVD with SSE2 ++const int Matcher::float_cmove_cost() { return ConditionalMoveLimit; } ++ ++// Does the CPU require late expand (see block.cpp for description of late expand)? ++const bool Matcher::require_postalloc_expand = false; ++ ++// Should the Matcher clone shifts on addressing modes, expecting them ++// to be subsumed into complex addressing expressions or compute them ++// into registers? True for Intel but false for most RISCs ++const bool Matcher::clone_shift_expressions = false; ++ ++// Do we need to mask the count passed to shift instructions or does ++// the cpu only look at the lower 5/6 bits anyway? ++const bool Matcher::need_masked_shift_count = false; ++ ++bool Matcher::narrow_oop_use_complex_address() { ++ NOT_LP64(ShouldNotCallThis()); ++ assert(UseCompressedOops, "only for compressed oops code"); ++ return false; ++} ++ ++bool Matcher::narrow_klass_use_complex_address() { ++ NOT_LP64(ShouldNotCallThis()); ++ assert(UseCompressedClassPointers, "only for compressed klass code"); ++ return false; ++} ++ ++// This is UltraSparc specific, true just means we have fast l2f conversion ++const bool Matcher::convL2FSupported(void) { ++ return true; ++} ++ ++// Max vector size in bytes. 0 if not supported. ++const int Matcher::vector_width_in_bytes(BasicType bt) { ++ if (MaxVectorSize == 0) ++ return 0; ++ assert(MaxVectorSize == 8, ""); ++ return 8; ++} ++ ++// Vector ideal reg ++const uint Matcher::vector_ideal_reg(int size) { ++ assert(MaxVectorSize == 8, ""); ++ switch(size) { ++ case 8: return Op_VecD; ++ } ++ ShouldNotReachHere(); ++ return 0; ++} ++ ++// Only lowest bits of xmm reg are used for vector shift count. ++const uint Matcher::vector_shift_count_ideal_reg(int size) { ++ fatal("vector shift is not supported"); ++ return Node::NotAMachineReg; ++} ++ ++// Limits on vector size (number of elements) loaded into vector. ++const int Matcher::max_vector_size(const BasicType bt) { ++ assert(is_java_primitive(bt), "only primitive type vectors"); ++ return vector_width_in_bytes(bt)/type2aelembytes(bt); ++} ++ ++const int Matcher::min_vector_size(const BasicType bt) { ++ return max_vector_size(bt); // Same as max. ++} ++ ++// SW64 supports misaligned vectors store/load ++const bool Matcher::misaligned_vectors_ok() { ++ return false; ++} ++ ++// Register for DIVI projection of divmodI ++RegMask Matcher::divI_proj_mask() { ++ ShouldNotReachHere(); ++ return RegMask(); ++} ++ ++// Register for MODI projection of divmodI ++RegMask Matcher::modI_proj_mask() { ++ ShouldNotReachHere(); ++ return RegMask(); ++} ++ ++// Register for DIVL projection of divmodL ++RegMask Matcher::divL_proj_mask() { ++ ShouldNotReachHere(); ++ return RegMask(); ++} ++ ++int Matcher::regnum_to_fpu_offset(int regnum) { ++ return regnum - 32; // The FP registers are in the second chunk ++} ++ ++ ++const bool Matcher::isSimpleConstant64(jlong value) { ++ // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?. ++ return true; ++} ++ ++ // is_CAS(int opcode) ++ // ++ // return true if opcode is one of the possible CompareAndSwapX ++ // values otherwise false. ++ ++bool is_CAS(int opcode) ++{ ++ switch(opcode) { ++ // We handle these ++ case Op_CompareAndSwapI: ++ case Op_CompareAndSwapL: ++ case Op_CompareAndSwapP: ++ case Op_CompareAndSwapN: ++ case Op_GetAndSetI: ++ case Op_GetAndSetL: ++ case Op_GetAndSetP: ++ case Op_GetAndSetN: ++ case Op_GetAndAddI: ++ case Op_GetAndAddL: ++ return true; ++ default: ++ return false; ++ } ++} ++ ++bool unnecessary_release(const Node *n) ++{ ++ assert((n->is_MemBar() && ++ n->Opcode() == Op_MemBarRelease), ++ "expecting a release membar"); ++ ++ MemBarNode *barrier = n->as_MemBar(); ++ ++ if (!barrier->leading()) { ++ return false; ++ } else { ++ Node* trailing = barrier->trailing_membar(); ++ MemBarNode* trailing_mb = trailing->as_MemBar(); ++ assert(trailing_mb->trailing(), "Not a trailing membar?"); ++ assert(trailing_mb->leading_membar() == n, "inconsistent leading/trailing membars"); ++ ++ Node* mem = trailing_mb->in(MemBarNode::Precedent); ++ if (!mem->is_Store()) { ++ assert(mem->is_LoadStore(), ""); ++ assert(trailing_mb->Opcode() == Op_MemBarAcquire, ""); ++ return is_CAS(mem->Opcode()); ++ } ++ } ++ ++ return false; ++} ++ ++bool unnecessary_storestore(const Node *storecm) ++{ ++ assert(storecm->Opcode() == Op_StoreCM, "expecting a StoreCM"); ++ // we need to generate a dmb ishst between an object put and the ++ // associated card mark when we are using CMS without conditional ++ // card marking ++ if (UseConcMarkSweepGC && !UseCondCardMark) { ++ return false; ++ } ++ // a storestore is unnecesary in all other cases ++ return true; ++} ++ ++// Return whether or not this register is ever used as an argument. This ++// function is used on startup to build the trampoline stubs in generateOptoStub. ++// Registers not mentioned will be killed by the VM call in the trampoline, and ++// arguments in those registers not be available to the callee. ++bool Matcher::can_be_java_arg( int reg ) { ++ /* Refer to: [sharedRuntime_sw64.cpp] SharedRuntime::java_calling_convention() */ ++ if ( /* reg == T0_num || reg == T0_H_num ++ || */ reg == A0_num || reg == A0_H_num ++ || reg == A1_num || reg == A1_H_num ++ || reg == A2_num || reg == A2_H_num ++ || reg == A3_num || reg == A3_H_num ++ || reg == A4_num || reg == A4_H_num ++ || reg == A5_num || reg == A5_H_num ) ++ return true; ++ ++ if ( reg == F16_num || reg == F16_H_num ++ || reg == F17_num || reg == F17_H_num ++ || reg == F18_num || reg == F18_H_num ++ || reg == F19_num || reg == F19_H_num ++ || reg == F20_num || reg == F20_H_num ++ || reg == F21_num || reg == F21_H_num ) ++ return true; ++ ++ return false; ++} ++ ++bool Matcher::is_spillable_arg( int reg ) { ++ return can_be_java_arg(reg); ++} ++ ++bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) { ++ return false; ++} ++ ++// Register for MODL projection of divmodL ++RegMask Matcher::modL_proj_mask() { ++ ShouldNotReachHere(); ++ return RegMask(); ++} ++ ++const RegMask Matcher::method_handle_invoke_SP_save_mask() { ++ return FP_REG_mask(); ++} ++ ++// SW64 doesn't support AES intrinsics ++const bool Matcher::pass_original_key_for_aes() { ++ return false; ++} ++ ++#ifdef ZHJPAD ++// The address of the call instruction needs to be 16-byte aligned to ++// ensure that it does not span a cache line so that it can be patched. ++ ++int CallStaticJavaDirectNode::compute_padding(int current_offset) const { ++ //ldi ++ //sll ++ //ldih ++ //lsi ++ //call ++ //nop ++ return round_to(current_offset, alignment_required()) - current_offset; ++} ++ ++// The address of the call instruction needs to be 16-byte aligned to ++// ensure that it does not span a cache line so that it can be patched. ++int CallDynamicJavaDirectNode::compute_padding(int current_offset) const { ++ //li48 <--- skip ++ ++ //ldi ++ //slll ++ //ldih ++ //ldi ++ //call ++ //nop ++ ++//ZHJ current_offset += 4 * 6; // skip li64 ++ current_offset += 4 * BytesPerInstWord; // skip li48 ++ return round_to(current_offset, alignment_required()) - current_offset; ++} ++#endif ++ ++int CallLeafNoFPDirectNode::compute_padding(int current_offset) const { ++ //ldi ++ //slll ++ //ldih ++ //ldi ++ //call ++ //nop ++ return round_to(current_offset, alignment_required()) - current_offset; ++} ++ ++int CallLeafDirectNode::compute_padding(int current_offset) const { ++ return round_to(current_offset, alignment_required()) - current_offset; ++} ++ ++int CallRuntimeDirectNode::compute_padding(int current_offset) const { ++ return round_to(current_offset, alignment_required()) - current_offset; ++} ++ ++// If CPU can load and store mis-aligned doubles directly then no fixup is ++// needed. Else we split the double into 2 integer pieces and move it ++// piece-by-piece. Only happens when passing doubles into C code as the ++// Java calling convention forces doubles to be aligned. ++const bool Matcher::misaligned_doubles_ok = false; ++// Do floats take an entire double register or just half? ++bool Matcher::float_in_double() { return true; } //TODO: ZHJ20180613 ++// Threshold size for cleararray. ++const int Matcher::init_array_short_size = 8 * BytesPerLong; ++// Do ints take an entire long register or just half? ++const bool Matcher::int_in_long = true; ++// Is it better to copy float constants, or load them directly from memory? ++// Intel can load a float constant from a direct address, requiring no ++// extra registers. Most RISCs will have to materialize an address into a ++// register first, so they would do better to copy the constant from stack. ++const bool Matcher::rematerialize_float_constants = false; ++// Advertise here if the CPU requires explicit rounding operations ++// to implement the UseStrictFP mode. ++const bool Matcher::strict_fp_requires_explicit_rounding = false; ++// false => size gets scaled to BytesPerLong, ok. ++const bool Matcher::init_array_count_is_in_bytes = false; ++ ++// Indicate if the safepoint node needs the polling page as an input. ++// Since SW64 doesn't have absolute addressing, it needs. ++bool SafePointNode::needs_polling_address_input() { ++ //TODO: ZHJ20180613 ++ return true; ++} ++ ++// Special hack to get all type of calls to specify the byte offset ++// from the start of the call to the point where the return address ++// will point. ++int MachCallStaticJavaNode::ret_addr_offset() { ++ if (SafePatch) { ++ assert(NativeCall::instruction_size == 24, "in MachCallStaticJavaNode::ret_addr_offset"); ++ } else { ++ assert(NativeCall::instruction_size == 20, "in MachCallStaticJavaNode::ret_addr_offset"); ++ } ++ return NativeCall::instruction_size; ++} ++ ++int MachCallDynamicJavaNode::ret_addr_offset() { ++ if (SafePatch) { ++ assert(NativeCall::instruction_size == 24, "in MachCallDynamicJavaNode::ret_addr_offset"); ++ } else { ++ assert(NativeCall::instruction_size == 20, "in MachCallStaticJavaNode::ret_addr_offset"); // don't consider setfpec1 ++ } ++ //li48 IC_Klass, ++ ++ //ldi T12 ++ //slll T12 ++ //ldih T12 ++ //ldi T12 ++ //call T12 ++ //nop ++ return 4 * BytesPerInstWord + NativeCall::instruction_size; // don't consider setfpec1 ++} ++ ++//============================================================================= ++ ++// Figure out which register class each belongs in: rc_int, rc_float, rc_stack ++enum RC { rc_bad, rc_int, rc_float, rc_stack }; ++static enum RC rc_class( OptoReg::Name reg ) { ++ if( !OptoReg::is_valid(reg) ) return rc_bad; ++ if (OptoReg::is_stack(reg)) return rc_stack; ++ VMReg r = OptoReg::as_VMReg(reg); ++ if (r->is_Register()) return rc_int; ++ assert(r->is_FloatRegister(), "must be"); ++ return rc_float; ++} ++ ++uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const { ++ // Get registers to move ++ OptoReg::Name src_second = ra_->get_reg_second(in(1)); ++ OptoReg::Name src_first = ra_->get_reg_first(in(1)); ++ OptoReg::Name dst_second = ra_->get_reg_second(this ); ++ OptoReg::Name dst_first = ra_->get_reg_first(this ); ++ ++ enum RC src_second_rc = rc_class(src_second); ++ enum RC src_first_rc = rc_class(src_first); ++ enum RC dst_second_rc = rc_class(dst_second); ++ enum RC dst_first_rc = rc_class(dst_first); ++ ++ assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" ); ++ ++ // Generate spill code! ++ int size = 0; ++ ++ if( src_first == dst_first && src_second == dst_second ) ++ return 0; // Self copy, no move ++ ++ if (src_first_rc == rc_stack) { ++ // mem -> ++ if (dst_first_rc == rc_stack) { ++ // mem -> mem ++ assert(src_second != dst_first, "overlap"); ++ if ((src_first & 1) == 0 && src_first + 1 == src_second && ++ (dst_first & 1) == 0 && dst_first + 1 == dst_second) { ++ // 64-bit ++ int src_offset = ra_->reg2offset(src_first); ++ int dst_offset = ra_->reg2offset(dst_first); ++ if (cbuf) { ++ MacroAssembler _masm(cbuf); ++ __ ldl(AT, Address(SP, src_offset)); ++ __ stl(AT, Address(SP, dst_offset)); ++#ifndef PRODUCT ++ } else { ++ if(!do_size){ ++ if (size != 0) st->print("\n\t"); ++ st->print("ldl AT, [SP + #%d]\t# 64-bit mem-mem spill 1\n\t" ++ "stl AT, [SP + #%d]", ++ src_offset, dst_offset); ++ } ++#endif ++ } ++ size += 8; ++ } else { ++ // 32-bit ++ assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform"); ++ assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform"); ++ int src_offset = ra_->reg2offset(src_first); ++ int dst_offset = ra_->reg2offset(dst_first); ++ if (cbuf) { ++ MacroAssembler _masm(cbuf); ++ __ ldw_signed(AT, Address(SP, src_offset)); ++ __ stw(AT, Address(SP, dst_offset)); ++#ifndef PRODUCT ++ } else { ++ if(!do_size){ ++ if (size != 0) st->print("\n\t"); ++ st->print("ldw AT, [SP + #%d] spill 2\n\t" ++ "stw AT, [SP + #%d]\n\t", ++ src_offset, dst_offset); ++ } ++#endif ++ } ++ size += 8; ++ } ++ return size; ++ } else if (dst_first_rc == rc_int) { ++ // mem -> gpr ++ if ((src_first & 1) == 0 && src_first + 1 == src_second && ++ (dst_first & 1) == 0 && dst_first + 1 == dst_second) { ++ // 64-bit ++ int offset = ra_->reg2offset(src_first); ++ if (cbuf) { ++ MacroAssembler _masm(cbuf); ++ __ ldl(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset)); ++#ifndef PRODUCT ++ } else { ++ if(!do_size){ ++ if (size != 0) st->print("\n\t"); ++ st->print("ldl %s, [SP + #%d]\t# spill 3", ++ Matcher::regName[dst_first], ++ offset); ++ } ++#endif ++ } ++ size += 4; ++ } else { ++ // 32-bit ++ assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform"); ++ assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform"); ++ int offset = ra_->reg2offset(src_first); ++ if (cbuf) { ++ MacroAssembler _masm(cbuf); ++ if (this->ideal_reg() == Op_RegI) ++ __ ldw_signed(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset)); ++ else ++ __ ldw_unsigned(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset)); ++#ifndef PRODUCT ++ } else { ++ if(!do_size){ ++ if (size != 0) st->print("\n\t"); ++ if (this->ideal_reg() == Op_RegI) ++ st->print("ldw %s, [SP + #%d]\t# spill 4", ++ Matcher::regName[dst_first], ++ offset); ++ else ++ st->print("ldwu %s, [SP + #%d]\t# spill 5", ++ Matcher::regName[dst_first], ++ offset); ++ } ++#endif ++ } ++ if (this->ideal_reg() == Op_RegI) { ++ size += 4; ++ } else { ++ size += 8; ++ } ++ } ++ return size; ++ } else if (dst_first_rc == rc_float) { ++ // mem-> xmm ++ if ((src_first & 1) == 0 && src_first + 1 == src_second && ++ (dst_first & 1) == 0 && dst_first + 1 == dst_second) { ++ // 64-bit ++ int offset = ra_->reg2offset(src_first); ++ if (cbuf) { ++ MacroAssembler _masm(cbuf); ++ __ fldd( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset)); ++#ifndef PRODUCT ++ } else { ++ if(!do_size){ ++ if (size != 0) st->print("\n\t"); ++ st->print("fldd %s, [SP + #%d]\t# spill 6", ++ Matcher::regName[dst_first], ++ offset); ++ } ++#endif ++ } ++ size += 4; ++ } else { ++ // 32-bit ++ assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform"); ++ assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform"); ++ int offset = ra_->reg2offset(src_first); ++ if (cbuf) { ++ MacroAssembler _masm(cbuf); ++ __ flds( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset)); ++#ifndef PRODUCT ++ } else { ++ if(!do_size){ ++ if (size != 0) st->print("\n\t"); ++ st->print("flds %s, [SP + #%d]\t# spill 7", ++ Matcher::regName[dst_first], ++ offset); ++ } ++#endif ++ } ++ size += 4; ++ } ++ return size; ++ } ++ } else if (src_first_rc == rc_int) { ++ // gpr -> ++ if (dst_first_rc == rc_stack) { ++ // gpr -> mem ++ if ((src_first & 1) == 0 && src_first + 1 == src_second && ++ (dst_first & 1) == 0 && dst_first + 1 == dst_second) { ++ // 64-bit ++ int offset = ra_->reg2offset(dst_first); ++ if (cbuf) { ++ MacroAssembler _masm(cbuf); ++ __ stl(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset)); ++#ifndef PRODUCT ++ } else { ++ if(!do_size){ ++ if (size != 0) st->print("\n\t"); ++ st->print("stl %s, [SP + #%d] # spill 8", ++ Matcher::regName[src_first], ++ offset); ++ } ++#endif ++ } ++ size += 4; ++ } else { ++ // 32-bit ++ assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform"); ++ assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform"); ++ int offset = ra_->reg2offset(dst_first); ++ if (cbuf) { ++ MacroAssembler _masm(cbuf); ++ __ stw(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset)); ++#ifndef PRODUCT ++ } else { ++ if (!do_size) { ++ if (size != 0) st->print("\n\t"); ++ st->print("stl %s, [SP + #%d]\t# spill 9", ++ Matcher::regName[src_first], offset); ++ } ++#endif ++ } ++ size += 4; ++ } ++ return size; ++ } else if (dst_first_rc == rc_int) { ++ // gpr -> gpr ++ if ((src_first & 1) == 0 && src_first + 1 == src_second && ++ (dst_first & 1) == 0 && dst_first + 1 == dst_second) { ++ // 64-bit ++ if (cbuf) { ++ MacroAssembler _masm(cbuf); ++ __ addl(as_Register(Matcher::_regEncode[dst_first]), ++ as_Register(Matcher::_regEncode[src_first]), R0); ++#ifndef PRODUCT ++ } else { ++ if(!do_size){ ++ if (size != 0) st->print("\n\t"); ++ st->print("move(64bit) %s <-- %s\t# spill 10", ++ Matcher::regName[dst_first], ++ Matcher::regName[src_first]); ++ } ++#endif ++ } ++ size += 4; ++ return size; ++ } else { ++ // 32-bit ++ assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform"); ++ assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform"); ++ if (cbuf) { ++ MacroAssembler _masm(cbuf); ++ if (this->ideal_reg() == Op_RegI) ++ __ addw(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]), R0); ++ else ++ __ addl(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]), R0); ++#ifndef PRODUCT ++ } else { ++ if (!do_size) { ++ if (size != 0) st->print("\n\t"); ++ st->print("move(32-bit) %s <-- %s\t# spill 11", ++ Matcher::regName[dst_first], ++ Matcher::regName[src_first]); ++ } ++#endif ++ } ++ size += 4; ++ return size; ++ } ++ } else if (dst_first_rc == rc_float) { ++ // gpr -> xmm ++ if ((src_first & 1) == 0 && src_first + 1 == src_second && ++ (dst_first & 1) == 0 && dst_first + 1 == dst_second) { ++ // 64-bit ++ if (cbuf) { ++ MacroAssembler _masm(cbuf); ++ __ ifmovd(as_FloatRegister(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first])); ++#ifndef PRODUCT ++ } else { ++ if(!do_size){ ++ if (size != 0) st->print("\n\t"); ++ st->print("ifmovd %s, %s\t# spill 12", ++ Matcher::regName[dst_first], ++ Matcher::regName[src_first]); ++ } ++#endif ++ } ++ size += 4; ++ } else { ++ // 32-bit ++ assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform"); ++ assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform"); ++ if (cbuf) { ++ MacroAssembler _masm(cbuf); ++ __ ifmovs(as_FloatRegister(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first])); ++#ifndef PRODUCT ++ } else { ++ if(!do_size){ ++ if (size != 0) st->print("\n\t"); ++ st->print("ifmovs %s, %s\t# spill 13", ++ Matcher::regName[dst_first], ++ Matcher::regName[src_first]); ++ } ++#endif ++ } ++ size += 4; ++ } ++ return size; ++ } ++ } else if (src_first_rc == rc_float) { ++ // xmm -> ++ if (dst_first_rc == rc_stack) { ++ // xmm -> mem ++ if ((src_first & 1) == 0 && src_first + 1 == src_second && ++ (dst_first & 1) == 0 && dst_first + 1 == dst_second) { ++ // 64-bit ++ int offset = ra_->reg2offset(dst_first); ++ if (cbuf) { ++ MacroAssembler _masm(cbuf); ++ __ fstd( as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset) ); ++#ifndef PRODUCT ++ } else { ++ if(!do_size){ ++ if (size != 0) st->print("\n\t"); ++ st->print("fstd %s, [SP + #%d]\t# spill 14", ++ Matcher::regName[src_first], ++ offset); ++ } ++#endif ++ } ++ size += 4; ++ } else { ++ // 32-bit ++ assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform"); ++ assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform"); ++ int offset = ra_->reg2offset(dst_first); ++ if (cbuf) { ++ MacroAssembler _masm(cbuf); ++ __ fsts(as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset)); ++#ifndef PRODUCT ++ } else { ++ if(!do_size){ ++ if (size != 0) st->print("\n\t"); ++ st->print("fsts %s, [SP + #%d]\t# spill 15", ++ Matcher::regName[src_first], ++ offset); ++ } ++#endif ++ } ++ size += 4; ++ } ++ return size; ++ } else if (dst_first_rc == rc_int) { ++ // xmm -> gpr ++ if ((src_first & 1) == 0 && src_first + 1 == src_second && ++ (dst_first & 1) == 0 && dst_first + 1 == dst_second) { ++ // 64-bit ++ if (cbuf) { ++ MacroAssembler _masm(cbuf); ++ __ fimovd(as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first])); ++#ifndef PRODUCT ++ } else { ++ if(!do_size){ ++ if (size != 0) st->print("\n\t"); ++ st->print("fimovd %s, %s\t# spill 16", ++ Matcher::regName[src_first], ++ Matcher::regName[dst_first]); ++ } ++#endif ++ } ++ size += 4; ++ } else { ++ // 32-bit ++ assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform"); ++ assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform"); ++ if (cbuf) { ++ MacroAssembler _masm(cbuf); ++ __ fimovs(as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first])); ++#ifndef PRODUCT ++ } else { ++ if(!do_size){ ++ if (size != 0) st->print("\n\t"); ++ st->print("fimovs %s, %s\t# spill 17", ++ Matcher::regName[src_first], ++ Matcher::regName[dst_first]); ++ } ++#endif ++ } ++ size += 4; ++ } ++ return size; ++ } else if (dst_first_rc == rc_float) { ++ // xmm -> xmm ++ if ((src_first & 1) == 0 && src_first + 1 == src_second && ++ (dst_first & 1) == 0 && dst_first + 1 == dst_second) { ++ // 64-bit ++ if (cbuf) { ++ MacroAssembler _masm(cbuf); ++ __ fmovd( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first])); ++#ifndef PRODUCT ++ } else { ++ if(!do_size){ ++ if (size != 0) st->print("\n\t"); ++ st->print("fmovd %s <-- %s\t# spill 18", ++ Matcher::regName[dst_first], ++ Matcher::regName[src_first]); ++ } ++#endif ++ } ++ size += 4; ++ } else { ++ // 32-bit ++ assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform"); ++ assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform"); ++ if (cbuf) { ++ MacroAssembler _masm(cbuf); ++ __ fmovs( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first])); ++#ifndef PRODUCT ++ } else { ++ if(!do_size){ ++ if (size != 0) st->print("\n\t"); ++ st->print("fmovs %s <-- %s\t# spill 19", ++ Matcher::regName[dst_first], ++ Matcher::regName[src_first]); ++ } ++#endif ++ } ++ size += 4; ++ } ++ return size; ++ } ++ } ++ ++ assert(0," foo "); ++ Unimplemented(); ++ return size; ++ ++} ++ ++#ifndef PRODUCT ++void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream* st ) const { ++ implementation( NULL, ra_, false, st ); ++} ++#endif ++ ++void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { ++ implementation( &cbuf, ra_, false, NULL ); ++} ++ ++uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const { ++ return implementation( NULL, ra_, true, NULL ); ++} ++ ++//============================================================================= ++# ++ ++#ifndef PRODUCT ++void MachBreakpointNode::format( PhaseRegAlloc *, outputStream* st ) const { ++ st->print("INT3"); ++} ++#endif ++ ++void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc* ra_) const { ++ MacroAssembler _masm(&cbuf); ++ __ int3(); ++} ++ ++uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const { ++ return MachNode::size(ra_); ++} ++ ++ ++//============================================================================= ++#ifndef PRODUCT ++void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const { ++ Compile *C = ra_->C; ++ int framesize = C->frame_size_in_bytes(); ++ ++ assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned"); ++ ++ st->print("ldl RA, SP, %d # Restore RA @ MachEpilogNode", framesize - wordSize); ++ st->cr(); st->print("\t"); ++ st->print("ldl FP, SP, %d # Restore FP @ MachEpilogNode", framesize - wordSize*2); ++ st->cr(); st->print("\t"); ++ st->print("add_simm16 SP, SP, %d # Rlease stack @ MachEpilogNode",framesize); ++ st->cr(); st->print("\t"); ++ ++ if( do_polling() && C->is_method_compilation() ) { ++ st->print("\t"); ++ st->print_cr("Poll Safepoint # MachEpilogNode"); ++ } ++} ++#endif ++ ++void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { ++ Compile *C = ra_->C; ++ MacroAssembler _masm(&cbuf); ++ int framesize = C->frame_size_in_bytes(); ++ ++ assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned"); ++ ++ __ ldl(RA, SP, framesize - wordSize); ++ __ ldl(FP, SP, framesize - wordSize * 2 ); ++ __ add_simm16(SP, SP, framesize); ++ ++ if( do_polling() && C->is_method_compilation() ) { ++ __ set64(AT, (long)os::get_polling_page()); ++ __ relocate(relocInfo::poll_return_type); ++ __ ldw(AT, AT, 0); ++ } ++} ++ ++uint MachEpilogNode::size(PhaseRegAlloc *ra_) const { ++ return MachNode::size(ra_); ++} ++ ++int MachEpilogNode::reloc() const { ++ return 0; ++} ++ ++const Pipeline * MachEpilogNode::pipeline() const { ++ return MachNode::pipeline_class(); ++} ++ ++int MachEpilogNode::safepoint_offset() const { return 0; } ++ ++//============================================================================= ++ ++#ifndef PRODUCT ++void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream* st ) const { ++ int offset = ra_->reg2offset(in_RegMask(0).find_first_elem()); ++ int reg = ra_->get_reg_first(this); ++ st->print("addl %s, SP, %d @BoxLockNode",Matcher::regName[reg],offset); ++} ++#endif ++ ++ ++uint BoxLockNode::size(PhaseRegAlloc *ra_) const { ++ return 8; // TODO: Why use 8? ++} ++ ++void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { ++ MacroAssembler _masm(&cbuf); ++ int offset = ra_->reg2offset(in_RegMask(0).find_first_elem()); ++ int reg = ra_->get_encode(this); ++ ++ __ add_simm16(as_Register(reg), SP, offset); ++} ++ ++ ++//static int sizeof_FFree_Float_Stack_All = -1; ++ ++int MachCallRuntimeNode::ret_addr_offset() { ++ //li48 ++ //call ++ ++ if (SafePatch) { ++ assert(NativeCall::instruction_size == 24, "in MachCallRuntimeNode::ret_addr_offset()"); ++ } else { ++ assert(NativeCall::instruction_size == 20, "in MachCallRuntimeNode::ret_addr_offset()"); ++ } ++ return 4 * BytesPerInstWord + NativeCall::instruction_size; // don't consider setfpec1 ++} ++ ++ ++//============================================================================= ++#ifndef PRODUCT ++void MachNopNode::format( PhaseRegAlloc *, outputStream* st ) const { ++ st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count); ++} ++#endif ++ ++void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const { ++ MacroAssembler _masm(&cbuf); ++ int i = 0; ++ for(i = 0; i < _count; i++) ++ __ nop(); ++} ++ ++uint MachNopNode::size(PhaseRegAlloc *) const { ++ return 4 * _count; ++} ++ ++const Pipeline* MachNopNode::pipeline() const { ++ return MachNode::pipeline_class(); ++} ++ ++//============================================================================= ++ ++//============================================================================= ++#ifndef PRODUCT ++void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const { ++ st->print_cr("load_klass(T12, A1)"); ++ st->print_cr("\tbeq(T12, iCache, L)"); ++ st->print_cr("\tjmp(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type)"); ++ st->print_cr("\tnop"); ++ st->print_cr(" L:"); ++} ++#endif ++ ++ ++void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { ++ MacroAssembler _masm(&cbuf); ++#ifdef ASSERT ++ //uint code_size = cbuf.code_size(); ++#endif ++ int ic_reg = Matcher::inline_cache_reg_encode(); ++ Label L; ++ Register receiver = A1; ++ Register iCache = as_Register(ic_reg); ++ __ load_klass(T12, receiver); ++ __ beq(T12, iCache, L); ++ ++ __ relocate(relocInfo::runtime_call_type); ++ __ patchable_jump((address)SharedRuntime::get_ic_miss_stub()); ++ ++ // WARNING these NOPs are critical so that verified entry point is properly ++ // 8 bytes aligned for patching by NativeJump::patch_verified_entry() ++ __ align(CodeEntryAlignment); ++ __ BIND(L); ++} ++ ++uint MachUEPNode::size(PhaseRegAlloc *ra_) const { ++ return MachNode::size(ra_); ++} ++ ++ ++ ++//============================================================================= ++ ++const RegMask& MachConstantBaseNode::_out_RegMask = P_REG_mask(); ++ ++int Compile::ConstantTable::calculate_table_base_offset() const { ++ return 0; // absolute addressing, no offset ++} ++ ++bool MachConstantBaseNode::requires_postalloc_expand() const { return false; } ++void MachConstantBaseNode::postalloc_expand(GrowableArray *nodes, PhaseRegAlloc *ra_) { ++ ShouldNotReachHere(); ++} ++ ++void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const { ++ Compile* C = ra_->C; ++ Compile::ConstantTable& constant_table = C->constant_table(); ++ MacroAssembler _masm(&cbuf); ++ ++ Register Rtoc = as_Register(ra_->get_encode(this)); ++ CodeSection* consts_section = __ code()->consts(); ++ int consts_size = consts_section->align_at_start(consts_section->size()); ++ assert(constant_table.size() == consts_size, "must be equal"); ++ ++ if (consts_section->size()) { ++ // Materialize the constant table base. ++ address baseaddr = consts_section->start() + -(constant_table.table_base_offset()); ++ // RelocationHolder rspec = internal_word_Relocation::spec(baseaddr); ++ __ relocate(relocInfo::internal_word_type); ++ __ patchable_set48(Rtoc, (long)baseaddr); ++ } ++} ++ ++uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const { ++ // patchable_set48 (4 insts) ++ return 4 * 4; ++} ++ ++#ifndef PRODUCT ++void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const { ++ Register r = as_Register(ra_->get_encode(this)); ++ st->print("patchable_set48 %s, &constanttable (constant table base) @ MachConstantBaseNode", r->name()); ++} ++#endif ++ ++ ++//============================================================================= ++#ifndef PRODUCT ++void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream* st ) const { ++ Compile* C = ra_->C; ++ ++ int framesize = C->frame_size_in_bytes(); ++ int bangsize = C->bang_size_in_bytes(); ++ assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned"); ++ ++ // Calls to C2R adapters often do not accept exceptional returns. ++ // We require that their callers must bang for them. But be careful, because ++ // some VM calls (such as call site linkage) can use several kilobytes of ++ // stack. But the stack safety zone should account for that. ++ // See bugs 4446381, 4468289, 4497237. ++ if (C->need_stack_bang(bangsize)) { ++ st->print_cr("# stack bang"); st->print("\t"); ++ } ++ st->print("add_simm16 SP, SP, -%d \t",framesize); ++ st->print("stl RA, %d(SP) @ MachPrologNode\n\t", framesize-wordSize); ++ st->print("stl FP, %d(SP) @ MachPrologNode\n\t", framesize-wordSize*2); ++ st->print("add_simm16 FP, SP, %d \n\t", framesize-wordSize*2); ++} ++#endif ++ ++ ++void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { ++ Compile* C = ra_->C; ++ MacroAssembler _masm(&cbuf); ++ ++ int framesize = C->frame_size_in_bytes(); ++ int bangsize = C->bang_size_in_bytes(); ++ ++ assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned"); ++ ++ if (C->need_stack_bang(bangsize)) { ++ __ generate_stack_overflow_check(bangsize); ++ } ++ ++ __ add_simm16(SP, SP, -framesize); ++ __ stl(RA, SP, framesize - wordSize); ++ __ stl(FP, SP, framesize - wordSize*2); ++ __ add_simm16(FP, SP, framesize - wordSize*2); ++ __ nop(); // Make enough room for patch_verified_entry() ++ __ nop(); ++ ++ C->set_frame_complete(cbuf.insts_size()); ++ if (C->has_mach_constant_base_node()) { ++ // NOTE: We set the table base offset here because users might be ++ // emitted before MachConstantBaseNode. ++ Compile::ConstantTable& constant_table = C->constant_table(); ++ constant_table.set_table_base_offset(constant_table.calculate_table_base_offset()); ++ } ++ ++} ++ ++ ++uint MachPrologNode::size(PhaseRegAlloc *ra_) const { ++ return MachNode::size(ra_); // too many variables; just compute it the hard way ++} ++ ++int MachPrologNode::reloc() const { ++ return 0; // a large enough number ++} ++ ++%} ++ ++//----------ENCODING BLOCK----------------------------------------------------- ++// This block specifies the encoding classes used by the compiler to output ++// byte streams. Encoding classes generate functions which are called by ++// Machine Instruction Nodes in order to generate the bit encoding of the ++// instruction. Operands specify their base encoding interface with the ++// interface keyword. There are currently supported four interfaces, ++// REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an ++// operand to generate a function which returns its register number when ++// queried. CONST_INTER causes an operand to generate a function which ++// returns the value of the constant when queried. MEMORY_INTER causes an ++// operand to generate four functions which return the Base Register, the ++// Index Register, the Scale Value, and the Offset Value of the operand when ++// queried. COND_INTER causes an operand to generate six functions which ++// return the encoding code (ie - encoding bits for the instruction) ++// associated with each basic boolean condition for a conditional instruction. ++// Instructions specify two basic values for encoding. They use the ++// ins_encode keyword to specify their encoding class (which must be one of ++// the class names specified in the encoding block), and they use the ++// opcode keyword to specify, in order, their primary, secondary, and ++// tertiary opcode. Only the opcode sections which a particular instruction ++// needs for encoding need to be specified. ++encode %{ ++ ++ //Load byte signed ++ enc_class load_B_enc (mRegI dst, memory mem) %{ ++ MacroAssembler _masm(&cbuf); ++ int dst = $dst$$reg; ++ int base = $mem$$base; ++ int disp = $mem$$disp; ++ ++ if( Assembler::is_simm16(disp) ) { ++ __ ldb_signed(as_Register(dst), as_Register(base), disp); ++ } else { ++ __ move(T12, disp); ++ __ addl(AT, as_Register(base), T12); ++ __ ldb_signed(as_Register(dst), AT, 0); ++ } ++ %} ++ ++ //Load byte unsigned ++ enc_class load_UB_enc (mRegI dst, memory mem) %{ ++ MacroAssembler _masm(&cbuf); ++ int dst = $dst$$reg; ++ int base = $mem$$base; ++ int disp = $mem$$disp; ++ ++ if( Assembler::is_simm16(disp) ) { ++ __ ldbu(as_Register(dst), as_Register(base), disp); ++ } else { ++ __ move(T12, disp); ++ __ addl(AT, as_Register(base), T12); ++ __ ldbu(as_Register(dst), AT, 0); ++ } ++ %} ++ ++ enc_class store_B_reg_enc (memory mem, mRegI src) %{ ++ MacroAssembler _masm(&cbuf); ++ int src = $src$$reg; ++ int base = $mem$$base; ++ int disp = $mem$$disp; ++ ++ if( Assembler::is_simm16(disp) ) { ++ __ stb(as_Register(src), as_Register(base), disp); ++ } else { ++ __ move(T12, disp); ++ __ addl(AT, as_Register(base), T12); ++ __ stb(as_Register(src), AT, 0); ++ } ++ %} ++ ++ /* no necessary ++ enc_class store_B_reg_enc_sync (memory mem, mRegI src) %{ ++ MacroAssembler _masm(&cbuf); ++ int src = $src$$reg; ++ int base = $mem$$base; ++ int disp = $mem$$disp; ++ ++ if( Assembler::is_simm16(disp) ) { ++ __ stb(as_Register(src), as_Register(base), disp); ++ } else { ++ Unimplemented(); ++ } ++ __ memb(); ++ %}*/ ++ ++ enc_class store_B_immI0_enc_sync (memory mem, immI0 src) %{ ++ MacroAssembler _masm(&cbuf); ++ int base = $mem$$base; ++ int disp = $mem$$disp; ++ if(UseWmemb) ++ __ wmemb(); ++ else ++ __ memb(); ++ if( Assembler::is_simm16(disp) ) { ++ __ stb(R0, as_Register(base), disp); ++ } else { ++ Unimplemented(); ++ } ++ %} ++ ++ enc_class store_B_immI0 (memory mem, immI0 src) %{ ++ MacroAssembler _masm(&cbuf); ++ int base = $mem$$base; ++ int disp = $mem$$disp; ++ ++ if( Assembler::is_simm16(disp) ) { ++ __ stb(R0, as_Register(base), disp); ++ } else { ++ Unimplemented(); ++ } ++ %} ++ ++ // Load Short (16bit signed) ++ enc_class load_S_enc (mRegI dst, memory mem) %{ ++ MacroAssembler _masm(&cbuf); ++ int dst = $dst$$reg; ++ int base = $mem$$base; ++ int disp = $mem$$disp; ++ ++ if( Assembler::is_simm16(disp) ) { ++ __ ldh_signed(as_Register(dst), as_Register(base), disp); ++ } else { ++ __ move(T12, disp); ++ __ addl(AT, as_Register(base), T12); ++ __ ldh_signed(as_Register(dst), AT, 0); ++ } ++ %} ++ ++ // Load Char (16bit unsigned) ++ enc_class load_C_enc (mRegI dst, memory mem) %{ ++ MacroAssembler _masm(&cbuf); ++ int dst = $dst$$reg; ++ int base = $mem$$base; ++ int disp = $mem$$disp; ++ ++ if( Assembler::is_simm16(disp) ) { ++ __ ldhu(as_Register(dst), as_Register(base), disp); ++ } else { ++ __ move(T12, disp); ++ __ addl(AT, as_Register(base), T12); ++ __ ldhu(as_Register(dst), AT, 0); ++ } ++ %} ++ ++ // Store Char (16bit unsigned) ++ enc_class store_C_reg_enc (memory mem, mRegI src) %{ ++ MacroAssembler _masm(&cbuf); ++ int src = $src$$reg; ++ int base = $mem$$base; ++ int disp = $mem$$disp; ++ ++ if( Assembler::is_simm16(disp) ) { ++ __ sth(as_Register(src), as_Register(base), disp); ++ } else { ++ __ move(T12, disp); ++ __ addl(AT, as_Register(base), T12); ++ __ sth(as_Register(src), AT, 0); ++ } ++ %} ++ ++ enc_class store_C0_enc (memory mem) %{ ++ MacroAssembler _masm(&cbuf); ++ int base = $mem$$base; ++ int disp = $mem$$disp; ++ ++ if( Assembler::is_simm16(disp) ) { ++ __ sth(R0, as_Register(base), disp); ++ } else { ++ __ move(T12, disp); ++ __ addl(AT, as_Register(base), T12); ++ __ sth(R0, AT, 0); ++ } ++ %} ++ ++ enc_class load_I_enc (mRegI dst, memory mem) %{ ++ MacroAssembler _masm(&cbuf); ++ int dst = $dst$$reg; ++ int base = $mem$$base; ++ int disp = $mem$$disp; ++ ++ if( Assembler::is_simm16(disp) ) { ++ __ ldw(as_Register(dst), as_Register(base), disp); ++ } else { ++ __ move(T12, disp); ++ __ addl(AT, as_Register(base), T12); ++ __ ldw(as_Register(dst), AT, 0); ++ } ++ ++ %} ++ ++ enc_class store_I_reg_enc (memory mem, mRegI src) %{ ++ MacroAssembler _masm(&cbuf); ++ int src = $src$$reg; ++ int base = $mem$$base; ++ int disp = $mem$$disp; ++ ++ if( Assembler::is_simm16(disp) ) { ++ __ stw(as_Register(src), as_Register(base), disp); ++ } else { ++ __ move(T12, disp); ++ __ addl(AT, as_Register(base), T12); ++ __ stw(as_Register(src), AT, 0); ++ } ++ %} ++ ++ enc_class store_I_immI0_enc (memory mem, immI0 src) %{ ++ MacroAssembler _masm(&cbuf); ++ int base = $mem$$base; ++ int disp = $mem$$disp; ++ ++ if (Assembler::is_simm16(disp) ) { ++ __ stw(R0, as_Register(base), disp); ++ } else { ++ Unimplemented(); ++ } ++ %} ++ ++ enc_class load_N_enc (mRegN dst, memory mem) %{ ++ MacroAssembler _masm(&cbuf); ++ int dst = $dst$$reg; ++ int base = $mem$$base; ++ int disp = $mem$$disp; ++ ++ relocInfo::relocType disp_reloc = $mem->disp_reloc(); ++ assert(disp_reloc == relocInfo::none, "cannot have disp"); ++ ++ if( Assembler::is_simm16(disp) ) { ++ __ ldw_unsigned(as_Register(dst), as_Register(base), disp); ++ } else { ++ __ set64(T12, disp); ++ __ addl(AT, as_Register(base), T12); ++ __ ldw_unsigned(as_Register(dst), AT, 0); ++ } ++ ++ %} ++ ++ ++ enc_class load_P_enc (mRegP dst, memory mem) %{ ++ MacroAssembler _masm(&cbuf); ++ int dst = $dst$$reg; ++ int base = $mem$$base; ++ int disp = $mem$$disp; ++ ++ relocInfo::relocType disp_reloc = $mem->disp_reloc(); ++ assert(disp_reloc == relocInfo::none, "cannot have disp"); ++ ++ if( Assembler::is_simm16(disp) ) { ++ __ ldl(as_Register(dst), as_Register(base), disp); ++ } else { ++ __ set64(T12, disp); ++ __ addl(AT, as_Register(base), T12); ++ __ ldl(as_Register(dst), AT, 0); ++ } ++ %} ++ ++ // Load acquire. ++ // load_P_enc + sync ++ enc_class load_P_enc_ac (mRegP dst, memory mem) %{ ++ MacroAssembler _masm(&cbuf); ++ int dst = $dst$$reg; ++ int base = $mem$$base; ++ int disp = $mem$$disp; ++ relocInfo::relocType disp_reloc = $mem->disp_reloc(); ++ assert(disp_reloc == relocInfo::none, "cannot have disp"); ++ ++ if( Assembler::is_simm16(disp) ) { ++ __ ldl(as_Register(dst), as_Register(base), disp); ++ } else { ++ __ set64(T9, disp); ++ __ addl(AT, as_Register(base), T9); ++ __ ldl(as_Register(dst), AT, 0); ++ } ++ __ memb(); ++ %} ++ ++ enc_class store_P_reg_enc (memory mem, mRegP src) %{ ++ MacroAssembler _masm(&cbuf); ++ int src = $src$$reg; ++ int base = $mem$$base; ++ int disp = $mem$$disp; ++ ++ if( Assembler::is_simm16(disp) ) { ++ __ stl(as_Register(src), as_Register(base), disp); ++ } else { ++ __ move(T12, disp); ++ __ addl(AT, as_Register(base), T12); ++ __ stl(as_Register(src), AT, 0); ++ } ++ %} ++ ++ enc_class store_N_reg_enc (memory mem, mRegN src) %{ ++ MacroAssembler _masm(&cbuf); ++ int src = $src$$reg; ++ int base = $mem$$base; ++ int disp = $mem$$disp; ++ ++ if( Assembler::is_simm16(disp) ) { ++ __ stw(as_Register(src), as_Register(base), disp); ++ } else { ++ __ move(T12, disp); ++ __ addl(AT, as_Register(base), T12); ++ __ stw(as_Register(src), AT, 0); ++ } ++ %} ++ ++ enc_class store_P_immP0_enc (memory mem) %{ ++ MacroAssembler _masm(&cbuf); ++ int base = $mem$$base; ++ int disp = $mem$$disp; ++ ++ if( Assembler::is_simm16(disp) ) { ++ __ stl(R0, as_Register(base), disp); ++ } else { ++ __ move(T12, disp); ++ __ addl(AT, as_Register(base), T12); ++ __ stl(R0, AT, 0); ++ } ++ %} ++ ++ enc_class storeImmN0_enc(memory mem, ImmN0 src) %{ ++ MacroAssembler _masm(&cbuf); ++ int base = $mem$$base; ++ int disp = $mem$$disp; ++ ++ if( Assembler::is_simm16(disp) ) { ++ __ stw(R0, as_Register(base), disp); ++ } else { ++ __ move(T12, disp); ++ __ addl(AT, as_Register(base), T12); ++ __ stw(R0, AT, 0); ++ } ++ %} ++ ++ enc_class load_L_enc (mRegL dst, memory mem) %{ ++ MacroAssembler _masm(&cbuf); ++ int base = $mem$$base; ++ int disp = $mem$$disp; ++ Register dst_reg = as_Register($dst$$reg); ++ ++ // For implicit null check ++ __ ldb_signed(AT, as_Register(base), 0); ++ ++ if( Assembler::is_simm16(disp) ) { ++ __ ldl(dst_reg, as_Register(base), disp); ++ } else { ++ __ move(T12, disp); ++ __ addl(AT, as_Register(base), T12); ++ __ ldl(dst_reg, AT, 0); ++ } ++ %} ++ ++ enc_class store_L_reg_enc (memory mem, mRegL src) %{ ++ MacroAssembler _masm(&cbuf); ++ int base = $mem$$base; ++ int disp = $mem$$disp; ++ Register src_reg = as_Register($src$$reg); ++ ++ if( Assembler::is_simm16(disp) ) { ++ __ stl(src_reg, as_Register(base), disp); ++ } else { ++ __ move(T12, disp); ++ __ addl(AT, as_Register(base), T12); ++ __ stl(src_reg, AT, 0); ++ } ++ %} ++ ++ enc_class store_L_immL0_enc (memory mem, immL0 src) %{ ++ MacroAssembler _masm(&cbuf); ++ int base = $mem$$base; ++ int disp = $mem$$disp; ++ ++ if( Assembler::is_simm16(disp) ) { ++ __ stl(R0, as_Register(base), disp); ++ } else { ++ __ move(T12, disp); ++ __ addl(AT, as_Register(base), T12); ++ __ stl(R0, AT, 0); ++ } ++ %} ++ ++ enc_class load_F_enc (regF dst, memory mem) %{ ++ MacroAssembler _masm(&cbuf); ++ int base = $mem$$base; ++ int disp = $mem$$disp; ++ FloatRegister dst = $dst$$FloatRegister; ++ ++ if( Assembler::is_simm16(disp) ) { ++ __ flds(dst, as_Register(base), disp); ++ } else { ++ __ move(T12, disp); ++ __ addl(AT, as_Register(base), T12); ++ __ flds(dst, AT, 0); ++ } ++ %} ++ ++ enc_class store_F_reg_enc (memory mem, regF src) %{ ++ MacroAssembler _masm(&cbuf); ++ int base = $mem$$base; ++ int disp = $mem$$disp; ++ FloatRegister src = $src$$FloatRegister; ++ ++ if( Assembler::is_simm16(disp) ) { ++ __ fsts(src, as_Register(base), disp); ++ } else { ++ __ move(T12, disp); ++ __ addl(AT, as_Register(base), T12); ++ __ fsts(src, AT, 0); ++ } ++ %} ++ ++ enc_class load_D_enc (regD dst, memory mem) %{ ++ MacroAssembler _masm(&cbuf); ++ int base = $mem$$base; ++ int disp = $mem$$disp; ++ FloatRegister dst_reg = as_FloatRegister($dst$$reg); ++ ++ if( Assembler::is_simm16(disp) ) { ++ __ fldd(dst_reg, as_Register(base), disp); ++ } else { ++ __ move(T12, disp); ++ __ addl(AT, as_Register(base), T12); ++ __ fldd(dst_reg, AT, 0); ++ } ++ %} ++ ++ enc_class store_D_reg_enc (memory mem, regD src) %{ ++ MacroAssembler _masm(&cbuf); ++ int base = $mem$$base; ++ int disp = $mem$$disp; ++ FloatRegister src_reg = as_FloatRegister($src$$reg); ++ if( Assembler::is_simm16(disp) ) { ++ __ fstd(src_reg, as_Register(base), disp); ++ } else { ++ __ move(T12, disp); ++ __ addl(AT, as_Register(base), T12); ++ __ fstd(src_reg, AT, 0); ++ } ++ %} ++ ++ enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime, Java_To_Runtime_Leaf ++ MacroAssembler _masm(&cbuf); ++ // This is the instruction starting address for relocation info. ++ __ block_comment("Java_To_Runtime"); ++ if(UseAddpi){ ++ intptr_t patch_off = 2 + (NativeCall::return_address_offset)/BytesPerInstWord; ++ __ addpi(patch_off, AT); ++ }else { ++ intptr_t patch_off = 3 * BytesPerInstWord + NativeCall::return_address_offset; ++ __ br(AT, 0); ++ __ addl(AT, AT, patch_off); ++ } ++ __ stl(AT, S2thread, in_bytes(JavaThread::last_Java_pc_offset())); ++ __ nop(); // need it by zhj. ++ ++ cbuf.set_insts_mark(); ++ __ relocate(relocInfo::runtime_call_type); ++ ++ __ patchable_call_setfpec1((address)$meth$$method); ++ %} ++ ++ enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL ++ // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine ++ // who we intended to call. ++ MacroAssembler _masm(&cbuf); ++ cbuf.set_insts_mark(); ++ ++ if ( !_method ) { ++ __ relocate(relocInfo::runtime_call_type); ++ } else if(_optimized_virtual) { ++ __ relocate(relocInfo::opt_virtual_call_type); ++ } else { ++ __ relocate(relocInfo::static_call_type); ++ } ++ ++ __ patchable_call((address)($meth$$method)); ++ if( _method ) { // Emit stub for static call ++ emit_java_to_interp(cbuf); ++ } ++ %} ++ ++ ++ // ++ // [Ref: LIR_Assembler::ic_call() ] ++ // ++ enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL ++ MacroAssembler _masm(&cbuf); ++ __ block_comment("Java_Dynamic_Call"); ++ __ ic_call((address)$meth$$method); ++ %} ++ ++ ++ enc_class Set_Flags_After_Fast_Lock_Unlock(FlagsReg cr) %{ ++ Register flags = $cr$$Register; ++ Label L; ++ ++ MacroAssembler _masm(&cbuf); ++ ++ __ addl(flags, R0, R0); ++ __ beq(AT, L); ++ __ move(flags, 0xFFFFFFFF); ++ __ BIND(L); ++ %} ++ ++ enc_class enc_PartialSubtypeCheck(mRegP result, mRegP sub, mRegP super, mRegI tmp) %{ ++ Register result = $result$$Register; ++ Register sub = $sub$$Register; ++ Register super = $super$$Register; ++ Register length = $tmp$$Register; ++ Register tmp = T12; ++ Label miss; ++ ++ ++ MacroAssembler _masm(&cbuf); ++ Label done; ++ __ check_klass_subtype_slow_path(sub, super, length, tmp, ++ NULL, &miss, ++ /*set_cond_codes:*/ true); ++ // Refer to X86_64's RDI ++ __ move(result, 0); ++ __ beq(R0, done); ++ ++ __ BIND(miss); ++ __ move(result, 1); ++ __ BIND(done); ++ %} ++ ++%} ++ ++ ++//---------SW64 FRAME-------------------------------------------------------------- ++// Definition of frame structure and management information. ++// ++// S T A C K L A Y O U T Allocators stack-slot number ++// | (to get allocators register number ++// G Owned by | | v add SharedInfo::stack0) ++// r CALLER | | ++// o | +--------+ pad to even-align allocators stack-slot ++// w V | pad0 | numbers; owned by CALLER ++// t -----------+--------+----> Matcher::_in_arg_limit, unaligned ++// h ^ | in | 5 ++// | | args | 4 Holes in incoming args owned by SELF ++// | | old | | 3 ++// | | SP-+--------+----> Matcher::_old_SP, even aligned ++// v | | ret | 3 return address ++// Owned by +--------+ ++// Self | pad2 | 2 pad to align old SP ++// | +--------+ 1 ++// | | locks | 0 ++// | +--------+----> SharedInfo::stack0, even aligned ++// | | pad1 | 11 pad to align new SP ++// | +--------+ ++// | | | 10 ++// | | spills | 9 spills ++// V | | 8 (pad0 slot for callee) ++// -----------+--------+----> Matcher::_out_arg_limit, unaligned ++// ^ | out | 7 ++// | | args | 6 Holes in outgoing args owned by CALLEE ++// Owned by new | | ++// Callee SP-+--------+----> Matcher::_new_SP, even aligned ++// | | ++// ++// Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is ++// known from SELF's arguments and the Java calling convention. ++// Region 6-7 is determined per call site. ++// Note 2: If the calling convention leaves holes in the incoming argument ++// area, those holes are owned by SELF. Holes in the outgoing area ++// are owned by the CALLEE. Holes should not be nessecary in the ++// incoming area, as the Java calling convention is completely under ++// the control of the AD file. Doubles can be sorted and packed to ++// avoid holes. Holes in the outgoing arguments may be nessecary for ++// varargs C calling conventions. ++// Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is ++// even aligned with pad0 as needed. ++// Region 6 is even aligned. Region 6-7 is NOT even aligned; ++// region 6-11 is even aligned; it may be padded out more so that ++// the region from SP to FP meets the minimum stack alignment. ++// Note 4: For I2C adapters, the incoming FP may not meet the minimum stack ++// alignment. Region 11, pad1, may be dynamically extended so that ++// SP meets the minimum alignment. ++ ++ ++frame %{ ++ ++ stack_direction(TOWARDS_LOW); ++ ++ // These two registers define part of the calling convention ++ // between compiled code and the interpreter. ++ // SEE StartI2CNode::calling_convention & StartC2INode::calling_convention & StartOSRNode::calling_convention ++ // for more information. ++ ++ inline_cache_reg(T1); // Inline Cache Register ++ interpreter_method_oop_reg(S3); // Method Oop Register when calling interpreter ++ ++ // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset] ++ cisc_spilling_operand_name(indOffset32); ++ ++ // Number of stack slots consumed by locking an object ++ // generate Compile::sync_stack_slots ++ sync_stack_slots(2); ++ ++ frame_pointer(SP); ++ ++ // Interpreter stores its frame pointer in a register which is ++ // stored to the stack by I2CAdaptors. ++ // I2CAdaptors convert from interpreted java to compiled java. ++ ++ interpreter_frame_pointer(FP); ++ ++ // generate Matcher::stack_alignment ++ stack_alignment(StackAlignmentInBytes); //wordSize = sizeof(char*); ++ ++ // Number of stack slots between incoming argument block and the start of ++ // a new frame. The PROLOG must add this many slots to the stack. The ++ // EPILOG must remove this many slots. ++ in_preserve_stack_slots(4); //Now VerifyStackAtCalls is defined as false ! Leave two stack slots for ra and fp ++ ++ // Number of outgoing stack slots killed above the out_preserve_stack_slots ++ // for calls to C. Supports the var-args backing area for register parms. ++ varargs_C_out_slots_killed(0); ++ ++ // The after-PROLOG location of the return address. Location of ++ // return address specifies a type (REG or STACK) and a number ++ // representing the register number (i.e. - use a register name) or ++ // stack slot. ++ // Ret Addr is on stack in slot 0 if no locks or verification or alignment. ++ // Otherwise, it is above the locks and verification slot and alignment word ++ //return_addr(STACK -1+ round_to(1+VerifyStackAtCalls+Compile::current()->sync()*Compile::current()->sync_stack_slots(),WordsPerLong)); ++ return_addr(REG RA); ++ ++ // Body of function which returns an integer array locating ++ // arguments either in registers or in stack slots. Passed an array ++ // of ideal registers called "sig" and a "length" count. Stack-slot ++ // offsets are based on outgoing arguments, i.e. a CALLER setting up ++ // arguments for a CALLEE. Incoming stack arguments are ++ // automatically biased by the preserve_stack_slots field above. ++ ++ ++ // will generated to Matcher::calling_convention(OptoRegPair *sig, uint length, bool is_outgoing) ++ // StartNode::calling_convention call this. ++ calling_convention %{ ++ SharedRuntime::java_calling_convention(sig_bt, regs, length, false); ++ %} ++ ++ ++ ++ ++ // Body of function which returns an integer array locating ++ // arguments either in registers or in stack slots. Passed an array ++ // of ideal registers called "sig" and a "length" count. Stack-slot ++ // offsets are based on outgoing arguments, i.e. a CALLER setting up ++ // arguments for a CALLEE. Incoming stack arguments are ++ // automatically biased by the preserve_stack_slots field above. ++ ++ ++ // SEE CallRuntimeNode::calling_convention for more information. ++ c_calling_convention %{ ++ (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length); ++ %} ++ ++ ++ // Location of C & interpreter return values ++ // register(s) contain(s) return value for Op_StartI2C and Op_StartOSR. ++ // SEE Matcher::match. ++ c_return_value %{ ++ assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" ); ++ /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */ ++ static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num }; ++ static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num }; ++ return OptoRegPair(hi[ideal_reg],lo[ideal_reg]); ++ %} ++ ++ // Location of return values ++ // register(s) contain(s) return value for Op_StartC2I and Op_Start. ++ // SEE Matcher::match. ++ ++ return_value %{ ++ assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" ); ++ /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */ ++ static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num }; ++ static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num}; ++ return OptoRegPair(hi[ideal_reg],lo[ideal_reg]); ++ %} ++ ++%} ++ ++//----------ATTRIBUTES--------------------------------------------------------- ++//----------Operand Attributes------------------------------------------------- ++op_attrib op_cost(0); // Required cost attribute ++ ++//----------Instruction Attributes--------------------------------------------- ++ins_attrib ins_cost(100); // Required cost attribute ++ins_attrib ins_size(32); // Required size attribute (in bits) ++ins_attrib ins_pc_relative(0); // Required PC Relative flag ++ins_attrib ins_short_branch(0); // Required flag: is this instruction a ++ // non-matching short branch variant of some ++ // long branch? ++ins_attrib ins_alignment(4); // Required alignment attribute (must be a power of 2) ++ // specifies the alignment that some part of the instruction (not ++ // necessarily the start) requires. If > 1, a compute_padding() ++ // function must be provided for the instruction ++ ++//----------OPERANDS----------------------------------------------------------- ++// Operand definitions must precede instruction definitions for correct parsing ++// in the ADLC because operands constitute user defined types which are used in ++// instruction definitions. ++ ++// Vectors ++operand vecD() %{ ++ constraint(ALLOC_IN_RC(dbl_reg)); ++ match(VecD); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++// Flags register, used as output of compare instructions ++operand FlagsReg() %{ ++ constraint(ALLOC_IN_RC(sw64_flags)); ++ match(RegFlags); ++ ++ format %{ "AT" %} ++ interface(REG_INTER); ++%} ++ ++//----------Simple Operands---------------------------------------------------- ++//TODO: Should we need to define some more special immediate number ? ++// Immediate Operands ++// Integer Immediate ++operand immI() %{ ++ match(ConI); ++ ++ op_cost(20); ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++// Long Immediate 8-bit ++operand immL8() ++%{ ++ predicate(-0x80L <= n->get_long() && n->get_long() < 0x80L); ++ match(ConL); ++ ++ op_cost(5); ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++// Constant for test vs zero ++operand immI0() %{ ++ predicate(n->get_int() == 0); ++ match(ConI); ++ ++ op_cost(0); ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++// Constant for increment ++operand immI1() %{ ++ predicate(n->get_int() == 1); ++ match(ConI); ++ ++ op_cost(0); ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++// Constant for decrement ++operand immI_M1() %{ ++ predicate(n->get_int() == -1); ++ match(ConI); ++ ++ op_cost(0); ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++operand immI_M1_255() %{ ++ predicate(-255 <= n->get_int() && (n->get_int() <= -1)); ++ match(ConI); ++ ++ op_cost(0); ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++operand immI_MaxI() %{ ++ predicate(n->get_int() == 2147483647); ++ match(ConI); ++ ++ op_cost(0); ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++// Valid scale values for addressing modes ++operand immI2() %{ ++ predicate(0 <= n->get_int() && (n->get_int() <= 3)); ++ match(ConI); ++ ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++operand immI16() %{ ++ predicate((-32768 <= n->get_int()) && (n->get_int() <= 32767)); ++ match(ConI); ++ ++ op_cost(10); ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++// Constant for long shifts ++operand immI_32() %{ ++ predicate( n->get_int() == 32 ); ++ match(ConI); ++ ++ op_cost(0); ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++operand immI_64() %{ ++ predicate(n->get_int() == 64); ++ match(ConI); ++ ++ op_cost(0); ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++operand immI_0_31() %{ ++ predicate( n->get_int() >= 0 && n->get_int() <= 31 ); ++ match(ConI); ++ op_cost(0); ++ ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++operand immI_0_63() %{ ++ predicate( n->get_int() >= 0 && n->get_int() <= 63 ); ++ match(ConI); ++ ++ op_cost(0); ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++// Operand for non-negtive integer mask ++ ++operand immI16_sub() %{ ++ predicate((-32767 <= n->get_int()) && (n->get_int() <= 32768)); ++ match(ConI); ++ ++ op_cost(10); ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++operand immI_0_255() %{ ++ predicate( n->get_int() >= 0 && n->get_int() <= 255 ); ++ match(ConI); ++ op_cost(0); ++ ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++operand immI_32_63() %{ ++ predicate(n->get_int() >= 32 && n->get_int() <= 63); ++ match(ConI); ++ op_cost(0); ++ ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++operand immI_1() %{ ++ predicate( n->get_int() == 1 ); ++ match(ConI); ++ ++ op_cost(0); ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++operand immI_2() %{ ++ predicate( n->get_int() == 2 ); ++ match(ConI); ++ ++ op_cost(0); ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++operand immI_3() %{ ++ predicate( n->get_int() == 3 ); ++ match(ConI); ++ ++ op_cost(0); ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++operand immI_7() %{ ++ predicate( n->get_int() == 7 ); ++ match(ConI); ++ ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++// Immediates for special shifts (sign extend) ++ ++// Constants for increment ++operand immI_16() %{ ++ predicate( n->get_int() == 16 ); ++ match(ConI); ++ ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++operand immI_24() %{ ++ predicate( n->get_int() == 24 ); ++ match(ConI); ++ ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++operand immI_31() %{ ++ predicate( n->get_int() == 31 ); ++ match(ConI); ++ ++ op_cost(0); ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++operand immI_63() %{ ++ predicate( n->get_int() == 63 ); ++ match(ConI); ++ ++ op_cost(0); ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++// Constant for byte-wide masking ++operand immI_255() %{ ++ predicate( n->get_int() == 255 ); ++ match(ConI); ++ ++ op_cost(0); ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++operand immI_65535() %{ ++ predicate( n->get_int() == 65535 ); ++ match(ConI); ++ ++ op_cost(5); ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++operand immI_65536() %{ ++ predicate( n->get_int() == 65536 ); ++ match(ConI); ++ ++ op_cost(5); ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++operand immI_M65536() %{ ++ predicate( n->get_int() == -65536 ); ++ match(ConI); ++ ++ op_cost(5); ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++// Pointer Immediate ++operand immP() %{ ++ match(ConP); ++ ++ op_cost(10); ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++// NULL Pointer Immediate ++operand immP0() %{ ++ predicate( n->get_ptr() == 0 ); ++ match(ConP); ++ op_cost(0); ++ ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++// Pointer Immediate: 64-bit ++operand immP_set() %{ ++ match(ConP); ++ ++ op_cost(5); ++ // formats are generated automatically for constants and base registers ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++// Pointer Immediate: 64-bit ++operand immP_load() %{ ++ predicate(n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set64(n->get_ptr()) > 3)); ++ match(ConP); ++ ++ op_cost(5); ++ // formats are generated automatically for constants and base registers ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++// Pointer Immediate: 64-bit ++operand immP_no_oop_cheap() %{ ++ predicate(!n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set64(n->get_ptr()) <= 3)); ++ match(ConP); ++ ++ op_cost(5); ++ // formats are generated automatically for constants and base registers ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++// Pointer for polling page ++operand immP_poll() %{ ++ predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page()); ++ match(ConP); ++ op_cost(5); ++ ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++// Pointer Immediate ++operand immN() %{ ++ match(ConN); ++ ++ op_cost(10); ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++operand immNKlass() %{ ++ match(ConNKlass); ++ ++ op_cost(10); ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++// NULL Pointer Immediate ++operand immN0() %{ ++ predicate(n->get_narrowcon() == 0); ++ match(ConN); ++ ++ op_cost(5); ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++// Long Immediate ++operand immL() %{ ++ match(ConL); ++ ++ op_cost(20); ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++// Long Immediate zero ++operand immL0() %{ ++ predicate( n->get_long() == 0L ); ++ match(ConL); ++ op_cost(0); ++ ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++operand immL7() %{ ++ predicate( n->get_long() == 7L ); ++ match(ConL); ++ op_cost(0); ++ ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++operand immL_M1() %{ ++ predicate( n->get_long() == -1L ); ++ match(ConL); ++ op_cost(0); ++ ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++// bit 0..2 zero ++operand immL_M8() %{ ++ predicate( n->get_long() == -8L ); ++ match(ConL); ++ op_cost(0); ++ ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++// bit 2 zero ++operand immL_M5() %{ ++ predicate( n->get_long() == -5L ); ++ match(ConL); ++ op_cost(0); ++ ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++// bit 1..2 zero ++operand immL_M7() %{ ++ predicate( n->get_long() == -7L ); ++ match(ConL); ++ op_cost(0); ++ ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++// bit 0..1 zero ++operand immL_M4() %{ ++ predicate( n->get_long() == -4L ); ++ match(ConL); ++ op_cost(0); ++ ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++// bit 3..6 zero ++operand immL_M121() %{ ++ predicate( n->get_long() == -121L ); ++ match(ConL); ++ op_cost(0); ++ ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++// Long immediate from 0 to 127. ++// Used for a shorter form of long mul by 10. ++operand immL_127() %{ ++ predicate((0 <= n->get_long()) && (n->get_long() <= 127)); ++ match(ConL); ++ op_cost(0); ++ ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++operand immL_0_255() %{ ++ predicate( n->get_long() >= 0 && n->get_long() <= 255 ); ++ match(ConL); ++ op_cost(0); ++ ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++// Long Immediate: cheap (materialize in <= 3 instructions) ++operand immL_cheap() %{ ++ predicate(MacroAssembler::insts_for_set64(n->get_long()) <= 3); ++ match(ConL); ++ op_cost(0); ++ ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++// Long Immediate: expensive (materialize in > 3 instructions) ++operand immL_expensive() %{ ++ predicate(MacroAssembler::insts_for_set64(n->get_long()) > 3); ++ match(ConL); ++ op_cost(0); ++ ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++operand immL16() %{ ++ predicate((-32768 <= n->get_long()) && (n->get_long() <= 32767)); ++ match(ConL); ++ ++ op_cost(10); ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++operand immL16_sub() %{ ++ predicate((-32767 <= n->get_long()) && (n->get_long() <= 32768)); ++ match(ConL); ++ ++ op_cost(10); ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++// Long Immediate: low 32-bit mask ++operand immL_32bits() %{ ++ predicate(n->get_long() == 0xFFFFFFFFL); ++ match(ConL); ++ op_cost(20); ++ ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++// Long Immediate 32-bit signed ++operand immL32() ++%{ ++ predicate(n->get_long() == (int) (n->get_long())); ++ match(ConL); ++ ++ op_cost(15); ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++ ++//single-precision floating-point zero ++operand immF0() %{ ++ predicate(jint_cast(n->getf()) == 0); ++ match(ConF); ++ ++ op_cost(5); ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++//single-precision floating-point immediate ++operand immF() %{ ++ match(ConF); ++ ++ op_cost(20); ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++//double-precision floating-point zero ++operand immD0() %{ ++ predicate(jlong_cast(n->getd()) == 0); ++ match(ConD); ++ ++ op_cost(5); ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++//double-precision floating-point immediate ++operand immD() %{ ++ match(ConD); ++ ++ op_cost(20); ++ format %{ %} ++ interface(CONST_INTER); ++%} ++ ++// Register Operands ++// Integer Register ++operand mRegI() %{ ++ constraint(ALLOC_IN_RC(int_reg)); ++ match(RegI); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand no_Ax_mRegI() %{ ++ constraint(ALLOC_IN_RC(no_Ax_int_reg)); ++ match(RegI); ++ match(mRegI); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand mS0RegI() %{ ++ constraint(ALLOC_IN_RC(s0_reg)); ++ match(RegI); ++ match(mRegI); ++ ++ format %{ "S0" %} ++ interface(REG_INTER); ++%} ++ ++operand mS1RegI() %{ ++ constraint(ALLOC_IN_RC(s1_reg)); ++ match(RegI); ++ match(mRegI); ++ ++ format %{ "S1" %} ++ interface(REG_INTER); ++%} ++ ++operand mS2RegI() %{ ++ constraint(ALLOC_IN_RC(s2_reg)); ++ match(RegI); ++ match(mRegI); ++ ++ format %{ "S2" %} ++ interface(REG_INTER); ++%} ++ ++operand mS3RegI() %{ ++ constraint(ALLOC_IN_RC(s3_reg)); ++ match(RegI); ++ match(mRegI); ++ ++ format %{ "S3" %} ++ interface(REG_INTER); ++%} ++ ++operand mS4RegI() %{ ++ constraint(ALLOC_IN_RC(s4_reg)); ++ match(RegI); ++ match(mRegI); ++ ++ format %{ "S4" %} ++ interface(REG_INTER); ++%} ++ ++operand mS5RegI() %{ ++ constraint(ALLOC_IN_RC(s5_reg)); ++ match(RegI); ++ match(mRegI); ++ ++ format %{ "S5" %} ++ interface(REG_INTER); ++%} ++ ++operand mT0RegI() %{ ++ constraint(ALLOC_IN_RC(t0_reg)); ++ match(RegI); ++ match(mRegI); ++ ++ format %{ "T0" %} ++ interface(REG_INTER); ++%} ++ ++operand mT1RegI() %{ ++ constraint(ALLOC_IN_RC(t1_reg)); ++ match(RegI); ++ match(mRegI); ++ ++ format %{ "T1" %} ++ interface(REG_INTER); ++%} ++ ++operand mT2RegI() %{ ++ constraint(ALLOC_IN_RC(t2_reg)); ++ match(RegI); ++ match(mRegI); ++ ++ format %{ "T2" %} ++ interface(REG_INTER); ++%} ++ ++operand mT3RegI() %{ ++ constraint(ALLOC_IN_RC(t3_reg)); ++ match(RegI); ++ match(mRegI); ++ ++ format %{ "T3" %} ++ interface(REG_INTER); ++%} ++ ++operand mT4RegI() %{ ++ constraint(ALLOC_IN_RC(t4_reg)); ++ match(RegI); ++ match(mRegI); ++ ++ format %{ "T4" %} ++ interface(REG_INTER); ++%} ++ ++operand mT5RegI() %{ ++ constraint(ALLOC_IN_RC(t5_reg)); ++ match(RegI); ++ match(mRegI); ++ ++ format %{ "T5" %} ++ interface(REG_INTER); ++%} ++ ++operand mT6RegI() %{ ++ constraint(ALLOC_IN_RC(t6_reg)); ++ match(RegI); ++ match(mRegI); ++ ++ format %{ "T6" %} ++ interface(REG_INTER); ++%} ++ ++operand mT7RegI() %{ ++ constraint(ALLOC_IN_RC(t7_reg)); ++ match(RegI); ++ match(mRegI); ++ ++ format %{ "T7" %} ++ interface(REG_INTER); ++%} ++ ++operand mT8RegI() %{ ++ constraint(ALLOC_IN_RC(t8_reg)); ++ match(RegI); ++ match(mRegI); ++ ++ format %{ "T8" %} ++ interface(REG_INTER); ++%} ++ ++operand mT9RegI() %{ ++ constraint(ALLOC_IN_RC(t9_reg)); ++ match(RegI); ++ match(mRegI); ++ ++ format %{ "T9" %} ++ interface(REG_INTER); ++%} ++ ++operand mT10RegI() %{ ++ constraint(ALLOC_IN_RC(t10_reg)); ++ match(RegI); ++ match(mRegI); ++ ++ format %{ "T10" %} ++ interface(REG_INTER); ++%} ++ ++operand mT11RegI() %{ ++ constraint(ALLOC_IN_RC(t11_reg)); ++ match(RegI); ++ match(mRegI); ++ ++ format %{ "T11" %} ++ interface(REG_INTER); ++%} ++ ++operand mT12RegI() %{ ++ constraint(ALLOC_IN_RC(t12_reg)); ++ match(RegI); ++ match(mRegI); ++ ++ format %{ "T12" %} ++ interface(REG_INTER); ++%} ++ ++operand mA0RegI() %{ ++ constraint(ALLOC_IN_RC(a0_reg)); ++ match(RegI); ++ match(mRegI); ++ ++ format %{ "A0" %} ++ interface(REG_INTER); ++%} ++ ++operand mA1RegI() %{ ++ constraint(ALLOC_IN_RC(a1_reg)); ++ match(RegI); ++ match(mRegI); ++ ++ format %{ "A1" %} ++ interface(REG_INTER); ++%} ++ ++operand mA2RegI() %{ ++ constraint(ALLOC_IN_RC(a2_reg)); ++ match(RegI); ++ match(mRegI); ++ ++ format %{ "A2" %} ++ interface(REG_INTER); ++%} ++ ++operand mA3RegI() %{ ++ constraint(ALLOC_IN_RC(a3_reg)); ++ match(RegI); ++ match(mRegI); ++ ++ format %{ "A3" %} ++ interface(REG_INTER); ++%} ++ ++operand mA4RegI() %{ ++ constraint(ALLOC_IN_RC(a4_reg)); ++ match(RegI); ++ match(mRegI); ++ ++ format %{ "A4" %} ++ interface(REG_INTER); ++%} ++ ++operand mA5RegI() %{ ++ constraint(ALLOC_IN_RC(a5_reg)); ++ match(RegI); ++ match(mRegI); ++ ++ format %{ "A5" %} ++ interface(REG_INTER); ++%} ++ ++operand mV0RegI() %{ ++ constraint(ALLOC_IN_RC(v0_reg)); ++ match(RegI); ++ match(mRegI); ++ ++ format %{ "V0" %} ++ interface(REG_INTER); ++%} ++ ++operand mRegN() %{ ++ constraint(ALLOC_IN_RC(int_reg)); ++ match(RegN); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand t0_RegN() %{ ++ constraint(ALLOC_IN_RC(t0_reg)); ++ match(RegN); ++ match(mRegN); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand t1_RegN() %{ ++ constraint(ALLOC_IN_RC(t1_reg)); ++ match(RegN); ++ match(mRegN); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand t2_RegN() %{ ++ constraint(ALLOC_IN_RC(t2_reg)); ++ match(RegN); ++ match(mRegN); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand t3_RegN() %{ ++ constraint(ALLOC_IN_RC(t3_reg)); ++ match(RegN); ++ match(mRegN); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand t4_RegN() %{ ++ constraint(ALLOC_IN_RC(t4_reg)); ++ match(RegN); ++ match(mRegN); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand t5_RegN() %{ ++ constraint(ALLOC_IN_RC(t5_reg)); ++ match(RegN); ++ match(mRegN); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand t6_RegN() %{ ++ constraint(ALLOC_IN_RC(t6_reg)); ++ match(RegN); ++ match(mRegN); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand t7_RegN() %{ ++ constraint(ALLOC_IN_RC(t7_reg)); ++ match(RegN); ++ match(mRegN); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand t8_RegN() %{ ++ constraint(ALLOC_IN_RC(t8_reg)); ++ match(RegN); ++ match(mRegN); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand t9_RegN() %{ ++ constraint(ALLOC_IN_RC(t9_reg)); ++ match(RegN); ++ match(mRegN); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand t10_RegN() %{ ++ constraint(ALLOC_IN_RC(t10_reg)); ++ match(RegN); ++ match(mRegN); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand t11_RegN() %{ ++ constraint(ALLOC_IN_RC(t11_reg)); ++ match(RegN); ++ match(mRegN); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand t12_RegN() %{ ++ constraint(ALLOC_IN_RC(t12_reg)); ++ match(RegN); ++ match(mRegN); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand a0_RegN() %{ ++ constraint(ALLOC_IN_RC(a0_reg)); ++ match(RegN); ++ match(mRegN); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand a1_RegN() %{ ++ constraint(ALLOC_IN_RC(a1_reg)); ++ match(RegN); ++ match(mRegN); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand a2_RegN() %{ ++ constraint(ALLOC_IN_RC(a2_reg)); ++ match(RegN); ++ match(mRegN); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand a3_RegN() %{ ++ constraint(ALLOC_IN_RC(a3_reg)); ++ match(RegN); ++ match(mRegN); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand a4_RegN() %{ ++ constraint(ALLOC_IN_RC(a4_reg)); ++ match(RegN); ++ match(mRegN); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand a5_RegN() %{ ++ constraint(ALLOC_IN_RC(a5_reg)); ++ match(RegN); ++ match(mRegN); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand s0_RegN() %{ ++ constraint(ALLOC_IN_RC(s0_reg)); ++ match(RegN); ++ match(mRegN); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand s1_RegN() %{ ++ constraint(ALLOC_IN_RC(s1_reg)); ++ match(RegN); ++ match(mRegN); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand s2_RegN() %{ ++ constraint(ALLOC_IN_RC(s2_reg)); ++ match(RegN); ++ match(mRegN); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand s3_RegN() %{ ++ constraint(ALLOC_IN_RC(s3_reg)); ++ match(RegN); ++ match(mRegN); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand s4_RegN() %{ ++ constraint(ALLOC_IN_RC(s4_reg)); ++ match(RegN); ++ match(mRegN); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand s5_RegN() %{ ++ constraint(ALLOC_IN_RC(s5_reg)); ++ match(RegN); ++ match(mRegN); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand v0_RegN() %{ ++ constraint(ALLOC_IN_RC(v0_reg)); ++ match(RegN); ++ match(mRegN); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++// Pointer Register ++operand mRegP() %{ ++ constraint(ALLOC_IN_RC(p_reg)); ++ match(RegP); ++ match(a0_RegP); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand no_T11_mRegP() %{ ++ constraint(ALLOC_IN_RC(no_T11_p_reg)); ++ match(RegP); ++ match(mRegP); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand s0_RegP() ++%{ ++ constraint(ALLOC_IN_RC(s0_long_reg)); ++ match(RegP); ++ match(mRegP); ++ match(no_T11_mRegP); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand s1_RegP() ++%{ ++ constraint(ALLOC_IN_RC(s1_long_reg)); ++ match(RegP); ++ match(mRegP); ++ match(no_T11_mRegP); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand s2_RegP() ++%{ ++ constraint(ALLOC_IN_RC(s2_long_reg)); ++ match(RegP); ++ match(mRegP); ++ match(no_T11_mRegP); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand s3_RegP() ++%{ ++ constraint(ALLOC_IN_RC(s3_long_reg)); ++ match(RegP); ++ match(mRegP); ++ match(no_T11_mRegP); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand s4_RegP() ++%{ ++ constraint(ALLOC_IN_RC(s4_long_reg)); ++ match(RegP); ++ match(mRegP); ++ match(no_T11_mRegP); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand s5_RegP() ++%{ ++ constraint(ALLOC_IN_RC(s5_long_reg)); ++ match(RegP); ++ match(mRegP); ++ match(no_T11_mRegP); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand t0_RegP() ++%{ ++ constraint(ALLOC_IN_RC(t0_long_reg)); ++ match(RegP); ++ match(mRegP); ++ match(no_T11_mRegP); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand t1_RegP() ++%{ ++ constraint(ALLOC_IN_RC(t1_long_reg)); ++ match(RegP); ++ match(mRegP); ++ match(no_T11_mRegP); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand t2_RegP() ++%{ ++ constraint(ALLOC_IN_RC(t2_long_reg)); ++ match(RegP); ++ match(mRegP); ++ match(no_T11_mRegP); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand t3_RegP() ++%{ ++ constraint(ALLOC_IN_RC(t3_long_reg)); ++ match(RegP); ++ match(mRegP); ++ match(no_T11_mRegP); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand t4_RegP() ++%{ ++ constraint(ALLOC_IN_RC(t4_long_reg)); ++ match(RegP); ++ match(mRegP); ++ match(no_T11_mRegP); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand t5_RegP() ++%{ ++ constraint(ALLOC_IN_RC(t5_long_reg)); ++ match(RegP); ++ match(mRegP); ++ match(no_T11_mRegP); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand t6_RegP() ++%{ ++ constraint(ALLOC_IN_RC(t6_long_reg)); ++ match(RegP); ++ match(mRegP); ++ match(no_T11_mRegP); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand t7_RegP() ++%{ ++ constraint(ALLOC_IN_RC(t7_long_reg)); ++ match(RegP); ++ match(mRegP); ++ match(no_T11_mRegP); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand t8_RegP() ++%{ ++ constraint(ALLOC_IN_RC(t8_long_reg)); ++ match(RegP); ++ match(mRegP); ++ match(no_T11_mRegP); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand t9_RegP() ++%{ ++ constraint(ALLOC_IN_RC(t9_long_reg)); ++ match(RegP); ++ match(mRegP); ++ match(no_T11_mRegP); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand t10_RegP() ++%{ ++ constraint(ALLOC_IN_RC(t10_long_reg)); ++ match(RegP); ++ match(mRegP); ++ match(no_T11_mRegP); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand t11_RegP() ++%{ ++ constraint(ALLOC_IN_RC(t11_long_reg)); ++ match(RegP); ++ match(mRegP); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand t12_RegP() ++%{ ++ constraint(ALLOC_IN_RC(t12_long_reg)); ++ match(RegP); ++ match(mRegP); ++ match(no_T11_mRegP); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand a0_RegP() ++%{ ++ constraint(ALLOC_IN_RC(a0_long_reg)); ++ match(RegP); ++ match(mRegP); ++ match(no_T11_mRegP); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand a1_RegP() ++%{ ++ constraint(ALLOC_IN_RC(a1_long_reg)); ++ match(RegP); ++ match(mRegP); ++ match(no_T11_mRegP); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand a2_RegP() ++%{ ++ constraint(ALLOC_IN_RC(a2_long_reg)); ++ match(RegP); ++ match(mRegP); ++ match(no_T11_mRegP); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand a3_RegP() ++%{ ++ constraint(ALLOC_IN_RC(a3_long_reg)); ++ match(RegP); ++ match(mRegP); ++ match(no_T11_mRegP); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand a4_RegP() ++%{ ++ constraint(ALLOC_IN_RC(a4_long_reg)); ++ match(RegP); ++ match(mRegP); ++ match(no_T11_mRegP); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++ ++operand a5_RegP() ++%{ ++ constraint(ALLOC_IN_RC(a5_long_reg)); ++ match(RegP); ++ match(mRegP); ++ match(no_T11_mRegP); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand v0_RegP() ++%{ ++ constraint(ALLOC_IN_RC(v0_long_reg)); ++ match(RegP); ++ match(mRegP); ++ match(no_T11_mRegP); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand mRegL() %{ ++ constraint(ALLOC_IN_RC(long_reg)); ++ match(RegL); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand mRegI2L(mRegI reg) %{ ++ match(ConvI2L reg); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand v0RegL() %{ ++ constraint(ALLOC_IN_RC(v0_long_reg)); ++ match(RegL); ++ match(mRegL); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand t0RegL() %{ ++ constraint(ALLOC_IN_RC(t0_long_reg)); ++ match(RegL); ++ match(mRegL); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand t1RegL() %{ ++ constraint(ALLOC_IN_RC(t1_long_reg)); ++ match(RegL); ++ match(mRegL); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand t2RegL() %{ ++ constraint(ALLOC_IN_RC(t2_long_reg)); ++ match(RegL); ++ match(mRegL); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand t3RegL() %{ ++ constraint(ALLOC_IN_RC(t3_long_reg)); ++ match(RegL); ++ match(mRegL); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand t4RegL() %{ ++ constraint(ALLOC_IN_RC(t4_long_reg)); ++ match(RegL); ++ match(mRegL); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand t5RegL() %{ ++ constraint(ALLOC_IN_RC(t5_long_reg)); ++ match(RegL); ++ match(mRegL); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand t6RegL() %{ ++ constraint(ALLOC_IN_RC(t6_long_reg)); ++ match(RegL); ++ match(mRegL); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand t7RegL() %{ ++ constraint(ALLOC_IN_RC(t7_long_reg)); ++ match(RegL); ++ match(mRegL); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand s0RegL() %{ ++ constraint(ALLOC_IN_RC(s0_long_reg)); ++ match(RegL); ++ match(mRegL); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand s1RegL() %{ ++ constraint(ALLOC_IN_RC(s1_long_reg)); ++ match(RegL); ++ match(mRegL); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand s2RegL() %{ ++ constraint(ALLOC_IN_RC(s2_long_reg)); ++ match(RegL); ++ match(mRegL); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand s3RegL() %{ ++ constraint(ALLOC_IN_RC(s3_long_reg)); ++ match(RegL); ++ match(mRegL); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand s4RegL() %{ ++ constraint(ALLOC_IN_RC(s4_long_reg)); ++ match(RegL); ++ match(mRegL); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand s5RegL() %{ ++ constraint(ALLOC_IN_RC(s5_long_reg)); ++ match(RegL); ++ match(mRegL); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand a0RegL() %{ ++ constraint(ALLOC_IN_RC(a0_long_reg)); ++ match(RegL); ++ match(mRegL); ++ ++ format %{ "A0" %} ++ interface(REG_INTER); ++%} ++ ++operand a1RegL() %{ ++ constraint(ALLOC_IN_RC(a1_long_reg)); ++ match(RegL); ++ match(mRegL); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand a2RegL() %{ ++ constraint(ALLOC_IN_RC(a2_long_reg)); ++ match(RegL); ++ match(mRegL); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand a3RegL() %{ ++ constraint(ALLOC_IN_RC(a3_long_reg)); ++ match(RegL); ++ match(mRegL); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand a4RegL() %{ ++ constraint(ALLOC_IN_RC(a4_long_reg)); ++ match(RegL); ++ match(mRegL); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand a5RegL() %{ ++ constraint(ALLOC_IN_RC(a5_long_reg)); ++ match(RegL); ++ match(mRegL); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand indOffset16(mRegP reg, immL16 off) ++%{ ++ constraint(ALLOC_IN_RC(p_reg)); ++ match(AddP reg off); ++ ++ op_cost(10); ++ format %{ "[$reg + $off (16-bit)] @ indOffset16" %} ++ interface(MEMORY_INTER) %{ ++ base($reg); ++ index(0x0); /* NO_INDEX */ ++ scale(0x0); ++ disp($off); ++ %} ++%} ++ ++operand t8RegL() %{ ++ constraint(ALLOC_IN_RC(t8_long_reg)); ++ match(RegL); ++ match(mRegL); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand t9RegL() %{ ++ constraint(ALLOC_IN_RC(t9_long_reg)); ++ match(RegL); ++ match(mRegL); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand t10RegL() %{ ++ constraint(ALLOC_IN_RC(t10_long_reg)); ++ match(RegL); ++ match(mRegL); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++operand t11RegL() %{ ++ constraint(ALLOC_IN_RC(t11_long_reg)); ++ match(RegL); ++ match(mRegL); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++// Floating register operands ++operand regF() %{ ++ constraint(ALLOC_IN_RC(flt_reg)); ++ match(RegF); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++//Double Precision Floating register operands ++operand regD() %{ ++ constraint(ALLOC_IN_RC(dbl_reg)); ++ match(RegD); ++ ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++//----------Memory Operands---------------------------------------------------- ++// Indirect Memory Operand ++operand indirect(mRegP reg) %{ ++ constraint(ALLOC_IN_RC(p_reg)); ++ match(reg); ++ ++ format %{ "[$reg] @ indirect" %} ++ interface(MEMORY_INTER) %{ ++ base($reg); ++ index(0x0); /* NO_INDEX */ ++ scale(0x0); ++ disp(0x0); ++ %} ++%} ++ ++// Indirect Memory Plus Short Offset Operand ++operand indOffset8(mRegP reg, immL8 off) ++%{ ++ constraint(ALLOC_IN_RC(p_reg)); ++ match(AddP reg off); ++ ++ op_cost(10); ++ format %{ "[$reg + $off (8-bit)] @ indOffset8" %} ++ interface(MEMORY_INTER) %{ ++ base($reg); ++ index(0x0); /* NO_INDEX */ ++ scale(0x0); ++ disp($off); ++ %} ++%} ++ ++ ++// Indirect Memory Plus Long Offset Operand ++operand indOffset32(mRegP reg, immL32 off) %{ ++ constraint(ALLOC_IN_RC(p_reg)); ++ op_cost(20); ++ match(AddP reg off); ++ ++ format %{ "[$reg + $off (32-bit)] @ indOffset32" %} ++ interface(MEMORY_INTER) %{ ++ base($reg); ++ index(0x0); /* NO_INDEX */ ++ scale(0x0); ++ disp($off); ++ %} ++%} ++ ++operand indirectNarrowKlass(mRegN reg) ++%{ ++ predicate(Universe::narrow_klass_shift() == 0); ++ constraint(ALLOC_IN_RC(p_reg)); ++ op_cost(10); ++ match(DecodeNKlass reg); ++ ++ format %{ "[$reg] @ indirectNarrowKlass" %} ++ interface(MEMORY_INTER) %{ ++ base($reg); ++ index(0x0); ++ scale(0x0); ++ disp(0x0); ++ %} ++%} ++ ++operand indOffset8NarrowKlass(mRegN reg, immL8 off) ++%{ ++ predicate(Universe::narrow_klass_shift() == 0); ++ constraint(ALLOC_IN_RC(p_reg)); ++ op_cost(10); ++ match(AddP (DecodeNKlass reg) off); ++ ++ format %{ "[$reg + $off (8-bit)] @ indOffset8NarrowKlass" %} ++ interface(MEMORY_INTER) %{ ++ base($reg); ++ index(0x0); ++ scale(0x0); ++ disp($off); ++ %} ++%} ++ ++operand indOffset32NarrowKlass(mRegN reg, immL32 off) ++%{ ++ predicate(Universe::narrow_klass_shift() == 0); ++ constraint(ALLOC_IN_RC(p_reg)); ++ op_cost(10); ++ match(AddP (DecodeNKlass reg) off); ++ ++ format %{ "[$reg + $off (32-bit)] @ indOffset32NarrowKlass" %} ++ interface(MEMORY_INTER) %{ ++ base($reg); ++ index(0x0); ++ scale(0x0); ++ disp($off); ++ %} ++%} ++ ++// Indirect Memory Operand ++operand indirectNarrow(mRegN reg) ++%{ ++ predicate(Universe::narrow_oop_shift() == 0); ++ constraint(ALLOC_IN_RC(p_reg)); ++ op_cost(10); ++ match(DecodeN reg); ++ ++ format %{ "[$reg] @ indirectNarrow" %} ++ interface(MEMORY_INTER) %{ ++ base($reg); ++ index(0x0); ++ scale(0x0); ++ disp(0x0); ++ %} ++%} ++ ++operand indOffset16Narrow(mRegN reg, immL16 off) ++%{ ++ predicate(Universe::narrow_oop_shift() == 0); ++ constraint(ALLOC_IN_RC(p_reg)); ++ op_cost(10); ++ match(AddP (DecodeN reg) off); ++ ++ format %{ "[$reg + $off (16-bit)] @ indOffset16Narrow" %} ++ interface(MEMORY_INTER) %{ ++ base($reg); ++ index(0x0); ++ scale(0x0); ++ disp($off); ++ %} ++%} ++ ++// Indirect Memory Plus Short Offset Operand ++operand indOffset8Narrow(mRegN reg, immL8 off) ++%{ ++ predicate(Universe::narrow_oop_shift() == 0); ++ constraint(ALLOC_IN_RC(p_reg)); ++ op_cost(10); ++ match(AddP (DecodeN reg) off); ++ ++ format %{ "[$reg + $off (8-bit)] @ indOffset8Narrow" %} ++ interface(MEMORY_INTER) %{ ++ base($reg); ++ index(0x0); ++ scale(0x0); ++ disp($off); ++ %} ++%} ++ ++//----------Load Long Memory Operands------------------------------------------ ++// The load-long idiom will use it's address expression again after loading ++// the first word of the long. If the load-long destination overlaps with ++// registers used in the addressing expression, the 2nd half will be loaded ++// from a clobbered address. Fix this by requiring that load-long use ++// address registers that do not overlap with the load-long target. ++ ++// load-long support ++operand load_long_RegP() %{ ++ constraint(ALLOC_IN_RC(p_reg)); ++ match(RegP); ++ match(mRegP); ++ op_cost(100); ++ format %{ %} ++ interface(REG_INTER); ++%} ++ ++// Indirect Memory Operand Long ++operand load_long_indirect(load_long_RegP reg) %{ ++ constraint(ALLOC_IN_RC(p_reg)); ++ match(reg); ++ ++ format %{ "[$reg]" %} ++ interface(MEMORY_INTER) %{ ++ base($reg); ++ index(0x0); ++ scale(0x0); ++ disp(0x0); ++ %} ++%} ++ ++operand load_long_indOffset16(load_long_RegP reg, immL16 off) %{ ++ match(AddP reg off); ++ ++ format %{ "[$reg + $off(16-bit)]" %} ++ interface(MEMORY_INTER) %{ ++ base($reg); ++ index(0x0); ++ scale(0x0); ++ disp($off); ++ %} ++%} ++ ++// Indirect Memory Plus Long Offset Operand ++operand load_long_indOffset32(load_long_RegP reg, immL32 off) %{ ++ match(AddP reg off); ++ ++ format %{ "[$reg + $off]" %} ++ interface(MEMORY_INTER) %{ ++ base($reg); ++ index(0x0); ++ scale(0x0); ++ disp($off); ++ %} ++%} ++ ++//----------Conditional Branch Operands---------------------------------------- ++// Comparison Op - This is the operation of the comparison, and is limited to ++// the following set of codes: ++// L (<), LE (<=), G (>), GE (>=), E (==), NE (!=) ++// ++// Other attributes of the comparison, such as unsignedness, are specified ++// by the comparison instruction that sets a condition code flags register. ++// That result is represented by a flags operand whose subtype is appropriate ++// to the unsignedness (etc.) of the comparison. ++// ++// Later, the instruction which matches both the Comparison Op (a Bool) and ++// the flags (produced by the Cmp) specifies the coding of the comparison op ++// by matching a specific subtype of Bool operand below, such as cmpOpU. ++ ++// Comparision Code ++operand cmpOp() %{ ++ match(Bool); ++ ++ format %{ "" %} ++ interface(COND_INTER) %{ ++ equal(0x01); ++ not_equal(0x02); ++ greater(0x03); ++ greater_equal(0x04); ++ less(0x05); ++ less_equal(0x06); ++ overflow(0x7); ++ no_overflow(0x8); ++ %} ++%} ++ ++ ++// Comparision Code ++// Comparison Code, unsigned compare. Used by FP also, with ++// C2 (unordered) turned into GT or LT already. The other bits ++// C0 and C3 are turned into Carry & Zero flags. ++operand cmpOpU() %{ ++ match(Bool); ++ ++ format %{ "" %} ++ interface(COND_INTER) %{ ++ equal(0x01); ++ not_equal(0x02); ++ greater(0x03); ++ greater_equal(0x04); ++ less(0x05); ++ less_equal(0x06); ++ overflow(0x7); ++ no_overflow(0x8); ++ %} ++%} ++ ++ ++//----------Special Memory Operands-------------------------------------------- ++// Stack Slot Operand - This operand is used for loading and storing temporary ++// values on the stack where a match requires a value to ++// flow through memory. ++operand stackSlotP(sRegP reg) %{ ++ constraint(ALLOC_IN_RC(stack_slots)); ++ // No match rule because this operand is only generated in matching ++ op_cost(50); ++ format %{ "[$reg]" %} ++ interface(MEMORY_INTER) %{ ++ base(0x1d); // SP ++ index(0x0); // No Index ++ scale(0x0); // No Scale ++ disp($reg); // Stack Offset ++ %} ++%} ++ ++operand stackSlotI(sRegI reg) %{ ++ constraint(ALLOC_IN_RC(stack_slots)); ++ // No match rule because this operand is only generated in matching ++ op_cost(50); ++ format %{ "[$reg]" %} ++ interface(MEMORY_INTER) %{ ++ base(0x1d); // SP ++ index(0x0); // No Index ++ scale(0x0); // No Scale ++ disp($reg); // Stack Offset ++ %} ++%} ++ ++operand stackSlotF(sRegF reg) %{ ++ constraint(ALLOC_IN_RC(stack_slots)); ++ // No match rule because this operand is only generated in matching ++ op_cost(50); ++ format %{ "[$reg]" %} ++ interface(MEMORY_INTER) %{ ++ base(0x1d); // SP ++ index(0x0); // No Index ++ scale(0x0); // No Scale ++ disp($reg); // Stack Offset ++ %} ++%} ++ ++operand stackSlotD(sRegD reg) %{ ++ constraint(ALLOC_IN_RC(stack_slots)); ++ // No match rule because this operand is only generated in matching ++ op_cost(50); ++ format %{ "[$reg]" %} ++ interface(MEMORY_INTER) %{ ++ base(0x1d); // SP ++ index(0x0); // No Index ++ scale(0x0); // No Scale ++ disp($reg); // Stack Offset ++ %} ++%} ++ ++operand stackSlotL(sRegL reg) %{ ++ constraint(ALLOC_IN_RC(stack_slots)); ++ // No match rule because this operand is only generated in matching ++ op_cost(50); ++ format %{ "[$reg]" %} ++ interface(MEMORY_INTER) %{ ++ base(0x1d); // SP ++ index(0x0); // No Index ++ scale(0x0); // No Scale ++ disp($reg); // Stack Offset ++ %} ++%} ++ ++ ++//------------------------OPERAND CLASSES-------------------------------------- ++//opclass memory( direct, indirect, indOffset16, indOffset32, indOffset32X, indIndexOffset ); ++//opclass memory( indirect, indirectNarrow, indOffset8, indOffset32, load_long_indirect, load_long_indOffset32, indOffset8Narrow); ++opclass memory( indirect, indirectNarrow, indOffset16, load_long_indirect, load_long_indOffset16, indOffset16Narrow); ++opclass mRegLorI2L(mRegI2L, mRegL); ++//----------PIPELINE----------------------------------------------------------- ++// Rules which define the behavior of the target architectures pipeline. ++ ++pipeline %{ ++ ++//----------ATTRIBUTES--------------------------------------------------------- ++attributes %{ ++ fixed_size_instructions; // Fixed size instructions ++ max_instructions_per_bundle = 1; // 1 instruction per bundle ++ max_bundles_per_cycle = 4; // Up to 4 bundles per cycle ++ bundle_unit_size=4; ++ instruction_unit_size = 4; // An instruction is 4 bytes long ++ instruction_fetch_unit_size = 16; // The processor fetches one line ++ instruction_fetch_units = 1; // of 16 bytes ++ ++ // List of nop instructions ++ nops( MachNop ); ++ %} ++ ++ //----------RESOURCES---------------------------------------------------------- ++ // Resources are the functional units available to the machine ++ ++ resources(D1, D2, D3, D4, DECODE = D1 | D2 | D3| D4, ALU1, ALU2, ALU = ALU1 | ALU2, FPU1, FPU2, FPU = FPU1 | FPU2, MEM, BR); ++ ++ //----------PIPELINE DESCRIPTION----------------------------------------------- ++ // Pipeline Description specifies the stages in the machine's pipeline ++ ++ // IF: fetch ++ // ID: decode ++ // RD: read ++ // CA: caculate ++ // WB: write back ++ // CM: commit ++ ++ pipe_desc(IF, ID, RD, CA, WB, CM); ++ ++ ++ //----------PIPELINE CLASSES--------------------------------------------------- ++ // Pipeline Classes describe the stages in which input and output are ++ // referenced by the hardware pipeline. ++ ++ //No.1 Integer ALU reg-reg operation : dst <-- reg1 op reg2 ++ pipe_class ialu_regI_regI(mRegI dst, mRegI src1, mRegI src2) %{ ++ single_instruction; ++ src1 : RD(read); ++ src2 : RD(read); ++ dst : WB(write)+1; ++ DECODE : ID; ++ ALU : CA; ++ %} ++ ++ //No.19 Integer mult operation : dst <-- reg1 mult reg2 ++ pipe_class ialu_mult(mRegI dst, mRegI src1, mRegI src2) %{ ++ src1 : RD(read); ++ src2 : RD(read); ++ dst : WB(write)+5; ++ DECODE : ID; ++ ALU2 : CA; ++ %} ++ ++ pipe_class mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{ ++ src1 : RD(read); ++ src2 : RD(read); ++ dst : WB(write)+10; ++ DECODE : ID; ++ ALU2 : CA; ++ %} ++ ++ pipe_class ialu_mult_imm(mRegI dst, mRegI src1, immI_0_255 src2) %{ ++ src1 : RD(read); ++ dst : WB(write)+5; ++ DECODE : ID; ++ ALU2 : CA; ++ %} ++ ++ pipe_class mulL_reg_imm(mRegL dst, mRegL src1, immL_0_255 src2) %{ ++ src1 : RD(read); ++ dst : WB(write)+10; ++ DECODE : ID; ++ ALU2 : CA; ++ %} ++ ++ //No.19 Integer div operation : dst <-- reg1 div reg2 ++ pipe_class ialu_div(mRegI dst, mRegI src1, mRegI src2) %{ ++ src1 : RD(read); ++ src2 : RD(read); ++ dst : WB(write)+10; ++ DECODE : ID; ++ ALU2 : CA; ++ %} ++ ++ //No.19 Integer mod operation : dst <-- reg1 mod reg2 ++ pipe_class ialu_mod(mRegI dst, mRegI src1, mRegI src2) %{ ++ instruction_count(2); ++ src1 : RD(read); ++ src2 : RD(read); ++ dst : WB(write)+10; ++ DECODE : ID; ++ ALU2 : CA; ++ %} ++ ++ //No.15 Long ALU reg-reg operation : dst <-- reg1 op reg2 ++ pipe_class ialu_regL_regL(mRegL dst, mRegL src1, mRegL src2) %{ ++ instruction_count(2); ++ src1 : RD(read); ++ src2 : RD(read); ++ dst : WB(write); ++ DECODE : ID; ++ ALU : CA; ++ %} ++ ++ //No.18 Long ALU reg-imm operation : dst <-- reg1 op immL_0_255 ++ pipe_class ialu_regL_imm(mRegL dst, mRegL src) %{ ++ instruction_count(2); ++ src : RD(read); ++ dst : WB(write); ++ DECODE : ID; ++ ALU : CA; ++ %} ++ ++ //No.18 Long ALU reg-imm16 operation : dst <-- reg1 op imm16 ++ pipe_class ialu_regL_imm16(mRegL dst, mRegL src) %{ ++ instruction_count(2); ++ src : RD(read); ++ dst : WB(write); ++ DECODE : ID; ++ ALU : CA; ++ %} ++ ++ //no.16 load Long from memory : ++ pipe_class ialu_loadL(mRegL dst, memory mem) %{ ++ instruction_count(2); ++ mem : RD(read); ++ dst : WB(write)+5; ++ DECODE : ID; ++ MEM : RD; ++ %} ++ ++ //No.17 Store Long to Memory : ++ pipe_class ialu_storeL(mRegL src, memory mem) %{ ++ instruction_count(2); ++ mem : RD(read); ++ src : RD(read); ++ DECODE : ID; ++ MEM : RD; ++ %} ++ ++ //No.2 Integer ALU reg-imm16 operation : dst <-- reg1 op imm16 ++ pipe_class ialu_regI_imm16(mRegI dst, mRegI src) %{ ++ single_instruction; ++ src : RD(read); ++ dst : WB(write); ++ DECODE : ID; ++ ALU : CA; ++ %} ++ ++ //No.3 Integer move operation : dst <-- reg ++ pipe_class ialu_regI_mov(mRegI dst, mRegI src) %{ ++ src : RD(read); ++ dst : WB(write); ++ DECODE : ID; ++ ALU : CA; ++ %} ++ ++ //No.4 No instructions : do nothing ++ pipe_class empty( ) %{ ++ instruction_count(0); ++ %} ++ ++ //No.5 UnConditional branch : ++ pipe_class pipe_jump( label labl ) %{ ++ multiple_bundles; ++ DECODE : ID; ++ BR : RD; ++ %} ++ ++ //No.6 ALU Conditional branch : ++ pipe_class pipe_alu_branch(mRegI src1, mRegI src2, label labl ) %{ ++ multiple_bundles; ++ src1 : RD(read); ++ src2 : RD(read); ++ DECODE : ID; ++ BR : RD; ++ %} ++ ++ //no.7 load integer from memory : ++ pipe_class ialu_loadI(mRegI dst, memory mem) %{ ++ mem : RD(read); ++ dst : WB(write)+3; ++ DECODE : ID; ++ MEM : RD; ++ %} ++ ++ //No.8 Store Integer to Memory : ++ pipe_class ialu_storeI(mRegI src, memory mem) %{ ++ mem : RD(read); ++ src : RD(read); ++ DECODE : ID; ++ MEM : RD; ++ %} ++ ++ ++ //No.10 Floating FPU reg-reg operation : dst <-- reg1 op reg2 ++ pipe_class fpu_regF_regF(regF dst, regF src1, regF src2) %{ ++ src1 : RD(read); ++ src2 : RD(read); ++ dst : WB(write); ++ DECODE : ID; ++ FPU : CA; ++ %} ++ ++ //No.22 Floating div operation : dst <-- reg1 div reg2 ++ pipe_class fpu_div(regF dst, regF src1, regF src2) %{ ++ src1 : RD(read); ++ src2 : RD(read); ++ dst : WB(write); ++ DECODE : ID; ++ FPU2 : CA; ++ %} ++ ++ pipe_class fcvt_I2D(regD dst, mRegI src) %{ ++ src : RD(read); ++ dst : WB(write); ++ DECODE : ID; ++ FPU1 : CA; ++ %} ++ ++ pipe_class fcvt_D2I(mRegI dst, regD src) %{ ++ src : RD(read); ++ dst : WB(write); ++ DECODE : ID; ++ FPU1 : CA; ++ %} ++ ++ ++ //No.23 Floating sqrt operation : dst <-- reg1 sqrt reg2 ++ pipe_class fpu_sqrt(regF dst, regF src1, regF src2) %{ ++ multiple_bundles; ++ src1 : RD(read); ++ src2 : RD(read); ++ dst : WB(write); ++ DECODE : ID; ++ FPU2 : CA; ++ %} ++ ++ //No.11 Load Floating from Memory : ++ pipe_class fpu_loadF(regF dst, memory mem) %{ ++ instruction_count(1); ++ mem : RD(read); ++ dst : WB(write)+3; ++ DECODE : ID; ++ MEM : RD; ++ %} ++ ++ //No.12 Store Floating to Memory : ++ pipe_class fpu_storeF(regF src, memory mem) %{ ++ instruction_count(1); ++ mem : RD(read); ++ src : RD(read); ++ DECODE : ID; ++ MEM : RD; ++ %} ++ ++ //No.13 FPU Conditional branch : ++ pipe_class pipe_fpu_branch(regF src1, regF src2, label labl ) %{ ++ multiple_bundles; ++ src1 : RD(read); ++ src2 : RD(read); ++ DECODE : ID; ++ BR : RD; ++ %} ++ ++//No.14 Floating FPU reg operation : dst <-- op reg ++ pipe_class fpu1_regF(regF dst, regF src) %{ ++ src : RD(read); ++ dst : WB(write); ++ DECODE : ID; ++ FPU : CA; ++ %} ++ ++ pipe_class long_memory_op() %{ ++ instruction_count(10); multiple_bundles; force_serialization; ++ fixed_latency(30); ++ %} ++ ++ pipe_class simple_call() %{ ++ instruction_count(10); multiple_bundles; force_serialization; ++ fixed_latency(200); ++ BR : RD; ++ %} ++ ++ pipe_class call() %{ ++ instruction_count(10); multiple_bundles; force_serialization; ++ fixed_latency(200); ++ %} ++ ++ ++ //No.9 Piple slow : for multi-instructions ++ pipe_class pipe_slow( ) %{ ++ instruction_count(20); ++ force_serialization; ++ multiple_bundles; ++ fixed_latency(50); ++ %} ++ ++%} ++ ++ ++ ++//----------INSTRUCTIONS------------------------------------------------------- ++// ++// match -- States which machine-independent subtree may be replaced ++// by this instruction. ++// ins_cost -- The estimated cost of this instruction is used by instruction ++// selection to identify a minimum cost tree of machine ++// instructions that matches a tree of machine-independent ++// instructions. ++// format -- A string providing the disassembly for this instruction. ++// The value of an instruction's operand may be inserted ++// by referring to it with a '$' prefix. ++// opcode -- Three instruction opcodes may be provided. These are referred ++// to within an encode class as $primary, $secondary, and $tertiary ++// respectively. The primary opcode is commonly used to ++// indicate the type of machine instruction, while secondary ++// and tertiary are often used for prefix options or addressing ++// modes. ++// ins_encode -- A list of encode classes with parameters. The encode class ++// name must have been defined in an 'enc_class' specification ++// in the encode section of the architecture description. ++ ++instruct s4AddLp(mRegP dst, mRegI index, immI_2 dis,mRegP base) %{ ++ match(Set dst (AddP base (LShiftL (ConvI2L index) dis))); ++ ins_cost(10); ++ format %{ " s4addl $index,$base,$dst @ s4AddLp " %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register op1 = as_Register($index$$reg); ++ Register op2 = as_Register($base$$reg); ++ __ s4addl(dst, op1, op2); ++ %} ++ ins_pipe( ialu_regL_regL ); ++%} ++ ++instruct s8AddLp(mRegP dst, mRegI index, immI_3 scale,mRegP base) %{ ++ match(Set dst (AddP base (LShiftL (ConvI2L index) scale))); ++ ins_cost(10); ++ format %{ " s8addl $index,$base,$dst @ s8AddLp " %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register op1 = as_Register($index$$reg); ++ Register op2 = as_Register($base$$reg); ++ __ s8addl(dst, op1, op2); ++ %} ++ ins_pipe( ialu_regL_regL ); ++%} ++ ++instruct s4AddWp(mRegI dst, mRegI index, immI_2 scale, mRegI base) %{ ++ match(Set dst (AddI base (LShiftI index scale))); ++ ins_cost(10); ++ format %{ " s4addw $index,$base,$dst @ s4AddWp " %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register op1 = as_Register($index$$reg); ++ Register op2 = as_Register($base$$reg); ++ __ s4addw(dst, op1, op2); ++ %} ++ ins_pipe( ialu_regI_regI ); ++%} ++ ++instruct s8AddWp(mRegI dst, mRegI index, immI_3 scale, mRegI base) %{ ++ match(Set dst (AddI base (LShiftI index scale))); ++ ins_cost(10); ++ format %{ " s8addw $index,$base,$dst @ s8AddWp " %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register op1 = as_Register($index$$reg); ++ Register op2 = as_Register($base$$reg); ++ __ s8addw(dst, op1, op2); ++ %} ++ ins_pipe( ialu_regI_regI ); ++%} ++ ++ ++// Load Integer ++instruct loadI(mRegI dst, memory mem) %{ ++ match(Set dst (LoadI mem)); ++ ++ ins_cost(125); ++ format %{ "ldw $dst, $mem #@loadI" %} ++ ins_encode (load_I_enc(dst, mem)); ++ ins_pipe( ialu_loadI ); ++%} ++ ++instruct loadI_convI2L(mRegL dst, memory mem) %{ ++ match(Set dst (ConvI2L (LoadI mem))); ++ ++ ins_cost(125); ++ format %{ "ldw $dst, $mem #@loadI_convI2L" %} ++ ins_encode (load_I_enc(dst, mem)); ++ ins_pipe( ialu_loadI ); ++%} ++ ++// Load Integer (32 bit signed) to Byte (8 bit signed) ++instruct loadI2B(mRegI dst, memory mem, immI_24 twentyfour) %{ ++ match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour)); ++ ++ ins_cost(125); ++ format %{ "ldb_signed $dst, $mem\t# int -> byte #@loadI2B" %} ++ ins_encode(load_B_enc(dst, mem)); ++ ins_pipe(ialu_loadI); ++%} ++ ++// Load Integer (32 bit signed) to Unsigned Byte (8 bit UNsigned) ++instruct loadI2UB(mRegI dst, memory mem, immI_255 mask) %{ ++ match(Set dst (AndI (LoadI mem) mask)); ++ ++ ins_cost(125); ++ format %{ "ldbu $dst, $mem\t# int -> ubyte #@loadI2UB" %} ++ ins_encode(load_UB_enc(dst, mem)); ++ ins_pipe(ialu_loadI); ++%} ++ ++// Load Integer (32 bit signed) to Short (16 bit signed) ++instruct loadI2S(mRegI dst, memory mem, immI_16 sixteen) %{ ++ match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen)); ++ ++ ins_cost(125); ++ format %{ "ldh $dst, $mem\t# int -> short #@loadI2S" %} ++ ins_encode(load_S_enc(dst, mem)); ++ ins_pipe(ialu_loadI); ++%} ++ ++// Load Integer (32 bit signed) to Unsigned Short/Char (16 bit UNsigned) ++instruct loadI2US(mRegI dst, memory mem, immI_65535 mask) %{ ++ match(Set dst (AndI (LoadI mem) mask)); ++ ++ ins_cost(125); ++ format %{ "ldhu $dst, $mem\t# int -> ushort/char #@loadI2US" %} ++ ins_encode(load_C_enc(dst, mem)); ++ ins_pipe(ialu_loadI); ++%} ++ ++// Load Long. ++instruct loadL(mRegL dst, memory mem) %{ ++// predicate(!((LoadLNode*)n)->require_atomic_access()); ++ match(Set dst (LoadL mem)); ++ ++ ins_cost(250); ++ format %{ "ldl $dst, $mem #@loadL" %} ++ ins_encode %{ ++ if (UseGetLongIntrinsic) { ++ MacroAssembler _masm(&cbuf); ++ int base = $mem$$base; ++ int disp = $mem$$disp; ++ Register dst_reg = as_Register($dst$$reg); ++ Register tem1 = AT; ++ Register tem2 = GP; ++ ++ // For implicit null check ++ __ ldbu(tem1, as_Register(base), disp); ++ ++ __ li32(tem2, disp); ++ __ addl(tem1, as_Register(base), tem2); ++ __ ldl_u(tem2, tem1, 0); // load long whether the data is aligned or not ++ __ ldl_u(dst_reg, tem1, 7); ++ __ extll(tem2, tem2, tem1); ++ __ exthl(dst_reg, dst_reg, tem1); ++ __ or_ins(dst_reg, tem2, dst_reg); ++ } else { ++ MacroAssembler _masm(&cbuf); ++ int base = $mem$$base; ++ int disp = $mem$$disp; ++ Register dst_reg = as_Register($dst$$reg); ++ ++ // For implicit null check ++ __ ldbu(AT, as_Register(base), disp); ++ ++ if( Assembler::is_simm16(disp) ) { ++ __ ldl(dst_reg, as_Register(base), disp); ++ } else { ++ Unimplemented(); ++ } ++ } ++ %} ++ ins_pipe( ialu_loadL ); ++%} ++ ++// Load Long - UNaligned ++instruct loadL_unaligned(mRegL dst, memory mem) %{ ++ match(Set dst (LoadL_unaligned mem)); ++ ++ ins_cost(450); ++ format %{ "ldl $dst, $mem #@loadL_unaligned\n\t" %} ++ ins_encode(load_L_enc(dst, mem)); ++ ins_pipe( ialu_loadL ); ++%} ++ ++// Store Long ++instruct storeL_reg(memory mem, mRegL src) %{ ++ match(Set mem (StoreL mem src)); ++ ++ ins_cost(200); ++ format %{ "stl $mem, $src #@storeL_reg\n" %} ++ ins_encode(store_L_reg_enc(mem, src)); ++ ins_pipe( ialu_storeL ); ++%} ++ ++instruct storeL_immL0(memory mem, immL0 zero) %{ ++ match(Set mem (StoreL mem zero)); ++ ++ ins_cost(180); ++ format %{ "stl zero, $mem #@storeL_immL0" %} ++ ins_encode(store_L_immL0_enc(mem, zero)); ++ ins_pipe( ialu_storeL ); ++%} ++ ++// Load Compressed Pointer ++instruct loadN(mRegN dst, memory mem) ++%{ ++ match(Set dst (LoadN mem)); ++ ++ ins_cost(125); // XXX ++ format %{ "ldwu $dst, $mem\t# compressed ptr @ loadN" %} ++ ins_encode (load_N_enc(dst, mem)); ++ ins_pipe( ialu_loadI ); // XXX ++%} ++ ++instruct loadN2P(mRegP dst, memory mem) ++%{ ++ match(Set dst (DecodeN (LoadN mem))); ++ predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0); ++ ++ ins_cost(125); // XXX ++ format %{ "ldwu $dst, $mem\t# @ loadN2P" %} ++ ins_encode (load_N_enc(dst, mem)); ++ ins_pipe( ialu_loadI ); // XXX ++%} ++ ++// Load Pointer ++instruct loadP(mRegP dst, memory mem) %{ ++ match(Set dst (LoadP mem)); ++ ++ ins_cost(125); ++ format %{ "ldl $dst, $mem #@loadP" %} ++ ins_encode (load_P_enc(dst, mem)); ++ ins_pipe( ialu_loadI ); ++%} ++ ++// Load Klass Pointer ++instruct loadKlass(mRegP dst, memory mem) %{ ++ match(Set dst (LoadKlass mem)); ++ ++ ins_cost(125); ++ format %{ "MOV $dst,$mem @ loadKlass" %} ++ ins_encode (load_P_enc(dst, mem)); ++ ins_pipe( ialu_loadI ); ++%} ++ ++// Load narrow Klass Pointer ++instruct loadNKlass(mRegN dst, memory mem) ++%{ ++ match(Set dst (LoadNKlass mem)); ++ ++ ins_cost(125); // XXX ++ format %{ "ldwu $dst, $mem\t# compressed klass ptr @ loadNKlass" %} ++ ins_encode (load_N_enc(dst, mem)); ++ ins_pipe( ialu_loadI ); // XXX ++%} ++ ++instruct loadN2PKlass(mRegP dst, memory mem) ++%{ ++ match(Set dst (DecodeNKlass (LoadNKlass mem))); ++ predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0); ++ ++ ins_cost(125); // XXX ++ format %{ "ldwu $dst, $mem\t# compressed klass ptr @ loadN2PKlass" %} ++ ins_encode (load_N_enc(dst, mem)); ++ ins_pipe( ialu_loadI ); // XXX ++%} ++ ++// Load Constant ++instruct loadConI(mRegI dst, immI src) %{ ++ match(Set dst src); ++ ++ ins_cost(150); ++ format %{ "mov $dst, $src #@loadConI" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ int value = $src$$constant; ++ __ move(dst, value); ++ %} ++ ins_pipe( ialu_regI_regI ); ++%} ++ ++ ++instruct loadConL_set64(mRegL dst, immL src) %{ ++ match(Set dst src); ++ ins_cost(120); ++ format %{ "li $dst, $src @ loadConL_set64" %} ++ ins_encode %{ ++ __ set64($dst$$Register, $src$$constant); ++ %} ++ ins_pipe(ialu_regL_regL); ++%} ++ ++ ++instruct loadConL16(mRegL dst, immL16 src) %{ ++ match(Set dst src); ++ ins_cost(105); ++ format %{ "mov $dst, $src #@loadConL16" %} ++ ins_encode %{ ++ Register dst_reg = as_Register($dst$$reg); ++ int value = $src$$constant; ++ __ add_simm16(dst_reg, R0, value); ++ %} ++ ins_pipe( ialu_regL_regL ); ++%} ++ ++ ++instruct loadConL0(mRegL dst, immL0 src) %{ ++ match(Set dst src); ++ ins_cost(100); ++ format %{ "mov $dst, zero #@loadConL0" %} ++ ins_encode %{ ++ Register dst_reg = as_Register($dst$$reg); ++ __ addl(dst_reg, R0, R0); ++ %} ++ ins_pipe( ialu_regL_regL ); ++%} ++ ++// Load Range ++instruct loadRange(mRegI dst, memory mem) %{ ++ match(Set dst (LoadRange mem)); ++ ++ ins_cost(125); ++ format %{ "MOV $dst,$mem @ loadRange" %} ++ ins_encode(load_I_enc(dst, mem)); ++ ins_pipe( ialu_loadI ); ++%} ++ ++ ++instruct storeP(memory mem, mRegP src ) %{ ++ match(Set mem (StoreP mem src)); ++ ++ ins_cost(125); ++ format %{ "stl $src, $mem #@storeP" %} ++ ins_encode(store_P_reg_enc(mem, src)); ++ ins_pipe( ialu_storeI ); ++%} ++ ++// Store NULL Pointer, mark word, or other simple pointer constant. ++instruct storeImmP0(memory mem, immP0 zero) %{ ++ match(Set mem (StoreP mem zero)); ++ ++ ins_cost(125); ++ format %{ "mov $mem, $zero #@storeImmP0" %} ++ ins_encode(store_P_immP0_enc(mem)); ++ ins_pipe( ialu_storeI ); ++%} ++ ++// Store Compressed Pointer ++instruct storeN(memory mem, mRegN src) ++%{ ++ match(Set mem (StoreN mem src)); ++ ++ ins_cost(125); // XXX ++ format %{ "stw $mem, $src\t# compressed ptr @ storeN" %} ++ ins_encode(store_N_reg_enc(mem, src)); ++ ins_pipe( ialu_storeI ); ++%} ++ ++instruct storeP2N(memory mem, mRegP src) ++%{ ++ match(Set mem (StoreN mem (EncodeP src))); ++ predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0); ++ ++ ins_cost(125); // XXX ++ format %{ "stw $mem, $src\t# @ storeP2N" %} ++ ins_encode(store_N_reg_enc(mem, src)); ++ ins_pipe( ialu_storeI ); ++%} ++ ++instruct storeNKlass(memory mem, mRegN src) ++%{ ++ match(Set mem (StoreNKlass mem src)); ++ ++ ins_cost(125); // XXX ++ format %{ "stw $mem, $src\t# compressed klass ptr @ storeNKlass" %} ++ ins_encode(store_N_reg_enc(mem, src)); ++ ins_pipe( ialu_storeI ); ++%} ++ ++instruct storeP2NKlass(memory mem, mRegP src) ++%{ ++ match(Set mem (StoreNKlass mem (EncodePKlass src))); ++ predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0); ++ ++ ins_cost(125); // XXX ++ format %{ "stw $mem, $src\t# @ storeP2NKlass" %} ++ ins_encode(store_N_reg_enc(mem, src)); ++ ins_pipe( ialu_storeI ); ++%} ++ ++instruct storeImmN0(memory mem, immN0 zero) ++%{ ++ match(Set mem (StoreN mem zero)); ++ ++ ins_cost(125); // XXX ++ format %{ "storeN0 zero, $mem\t# compressed ptr" %} ++ ins_encode(storeImmN0_enc(mem, zero)); ++ ins_pipe( ialu_storeI ); ++%} ++ ++// Store Byte ++instruct storeB(memory mem, mRegI src) %{ ++ match(Set mem (StoreB mem src)); ++ ++ ins_cost(125); ++ format %{ "stb $src, $mem #@storeB" %} ++ ins_encode(store_B_reg_enc(mem, src)); ++ ins_pipe( ialu_storeI ); ++%} ++ ++instruct storeB_convL2I(memory mem, mRegL src) %{ ++ match(Set mem (StoreB mem (ConvL2I src))); ++ ++ ins_cost(125); ++ format %{ "stb $src, $mem #@storeB_convL2I" %} ++ ins_encode(store_B_reg_enc(mem, src)); ++ ins_pipe( ialu_storeI ); ++%} ++ ++// Load Byte (8bit signed) ++instruct loadB(mRegI dst, memory mem) %{ ++ match(Set dst (LoadB mem)); ++ ++ ins_cost(125); ++ format %{ "ldbu $dst, $mem #@loadB" %} ++ ins_encode(load_B_enc(dst, mem)); ++ ins_pipe( ialu_loadI ); ++%} ++ ++instruct loadB_convI2L(mRegL dst, memory mem) %{ ++ match(Set dst (ConvI2L (LoadB mem))); ++ ++ ins_cost(125); ++ format %{ "ldb_signed $dst, $mem #@loadB_convI2L" %} ++ ins_encode(load_B_enc(dst, mem)); ++ ins_pipe( ialu_loadI ); ++%} ++ ++// Load Byte (8bit UNsigned) ++instruct loadUB(mRegI dst, memory mem) %{ ++ match(Set dst (LoadUB mem)); ++ ++ ins_cost(125); ++ format %{ "lbu $dst, $mem #@loadUB" %} ++ ins_encode(load_UB_enc(dst, mem)); ++ ins_pipe( ialu_loadI ); ++%} ++ ++instruct loadUB_convI2L(mRegL dst, memory mem) %{ ++ match(Set dst (ConvI2L (LoadUB mem))); ++ ++ ins_cost(125); ++ format %{ "ldbu $dst, $mem #@loadUB_convI2L" %} ++ ins_encode(load_UB_enc(dst, mem)); ++ ins_pipe( ialu_loadI ); ++%} ++ ++// Load Short (16bit signed) ++instruct loadS(mRegI dst, memory mem) %{ ++ match(Set dst (LoadS mem)); ++ ++ ins_cost(125); ++ format %{ "ldh $dst, $mem #@loadS" %} ++ ins_encode(load_S_enc(dst, mem)); ++ ins_pipe( ialu_loadI ); ++%} ++ ++// Load Short (16 bit signed) to Byte (8 bit signed) ++instruct loadS2B(mRegI dst, memory mem, immI_24 twentyfour) %{ ++ match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour)); ++ ++ ins_cost(125); ++ format %{ "ldb_signed $dst, $mem\t# short -> byte #@loadS2B" %} ++ ins_encode(load_B_enc(dst, mem)); ++ ins_pipe(ialu_loadI); ++%} ++ ++instruct loadS_convI2L(mRegL dst, memory mem) %{ ++ match(Set dst (ConvI2L (LoadS mem))); ++ ++ ins_cost(125); ++ format %{ "ldh $dst, $mem #@loadS_convI2L" %} ++ ins_encode(load_S_enc(dst, mem)); ++ ins_pipe( ialu_loadI ); ++%} ++ ++// Store Integer Immediate 0 ++instruct storeImmI0(memory mem, immI0 src) %{ ++ match(Set mem (StoreI mem src)); ++ ++ ins_cost(150); ++ format %{ "mov $mem, $src #@storeImmI0" %} ++ ins_encode(store_I_immI0_enc(mem, src)); ++ ins_pipe( ialu_storeI ); ++%} ++ ++// Store Integer ++instruct storeI(memory mem, mRegI src) %{ ++ match(Set mem (StoreI mem src)); ++ ++ ins_cost(125); ++ format %{ "stw $mem, $src #@storeI" %} ++ ins_encode(store_I_reg_enc(mem, src)); ++ ins_pipe( ialu_storeI ); ++%} ++ ++instruct storeI_convL2I(memory mem, mRegL src) %{ ++ match(Set mem (StoreI mem (ConvL2I src))); ++ ++ ins_cost(125); ++ format %{ "stw $mem, $src #@storeI_convL2I" %} ++ ins_encode(store_I_reg_enc(mem, src)); ++ ins_pipe( ialu_storeI ); ++%} ++ ++// Load Float ++instruct loadF(regF dst, memory mem) %{ ++ match(Set dst (LoadF mem)); ++ ++ ins_cost(150); ++ format %{ "loadF $dst, $mem #@loadF" %} ++ ins_encode(load_F_enc(dst, mem)); ++ ins_pipe( ialu_loadI ); ++%} ++ ++instruct loadConP_general(mRegP dst, immP src) %{ ++ match(Set dst src); ++ ++ ins_cost(120); ++ format %{ "li $dst, $src #@loadConP_general" %} ++ ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ long* value = (long*)$src$$constant; ++ ++ if($src->constant_reloc() == relocInfo::metadata_type){ ++ int klass_index = __ oop_recorder()->find_index((Klass*)value); ++ RelocationHolder rspec = metadata_Relocation::spec(klass_index); ++ ++ __ relocate(rspec); ++ __ patchable_set48(dst, (long)value); ++ }else if($src->constant_reloc() == relocInfo::oop_type){ ++ int oop_index = __ oop_recorder()->find_index((jobject)value); ++ RelocationHolder rspec = oop_Relocation::spec(oop_index); ++ ++ __ relocate(rspec); ++ __ patchable_set48(dst, (long)value); ++ } else if ($src->constant_reloc() == relocInfo::none) { ++ __ set64(dst, (long)value); ++ } ++ %} ++ ++ ins_pipe( ialu_regI_regI ); ++%} ++ ++ ++instruct loadConP_no_oop_cheap(mRegP dst, immP_no_oop_cheap src) %{ ++ match(Set dst src); ++ ++ ins_cost(80); ++ format %{ "li $dst, $src @ loadConP_no_oop_cheap" %} ++ ++ ins_encode %{ ++ __ set64($dst$$Register, $src$$constant); ++ %} ++ ++ ins_pipe(ialu_regI_regI); ++%} ++ ++ ++instruct loadConP_poll(mRegP dst, immP_poll src) %{ ++ match(Set dst src); ++ ++ ins_cost(50); ++ format %{ "li $dst, $src #@loadConP_poll" %} ++ ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ intptr_t value = (intptr_t)$src$$constant; ++ ++ __ set64(dst, (jlong)value); ++ %} ++ ++ ins_pipe( ialu_regI_regI ); ++%} ++ ++instruct loadConP0(mRegP dst, immP0 src) ++%{ ++ match(Set dst src); ++ ++ ins_cost(50); ++ format %{ "mov $dst, R0\t# ptr" %} ++ ins_encode %{ ++ Register dst_reg = $dst$$Register; ++ __ addl(dst_reg, R0, R0); ++ %} ++ ins_pipe( ialu_regI_regI ); ++%} ++ ++instruct loadConN0(mRegN dst, immN0 src) %{ ++ match(Set dst src); ++ format %{ "move $dst, R0\t# compressed NULL ptr" %} ++ ins_encode %{ ++ __ move($dst$$Register, R0); ++ %} ++ ins_pipe( ialu_regI_regI ); ++%} ++ ++instruct loadConN(mRegN dst, immN src) %{ ++ match(Set dst src); ++ ++ ins_cost(125); ++ format %{ "li $dst, $src\t# compressed ptr @ loadConN" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ __ set_narrow_oop(dst, (jobject)$src$$constant); ++ %} ++ ins_pipe( ialu_regI_regI ); // XXX ++%} ++ ++instruct loadConNKlass(mRegN dst, immNKlass src) %{ ++ match(Set dst src); ++ ++ ins_cost(125); ++ format %{ "li $dst, $src\t# compressed klass ptr @ loadConNKlass" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ __ set_narrow_klass(dst, (Klass*)$src$$constant); ++ %} ++ ins_pipe( ialu_regI_regI ); // XXX ++%} ++ ++ ++// Tail Call; Jump from runtime stub to Java code. ++// Also known as an 'interprocedural jump'. ++// Target of jump will eventually return to caller. ++// TailJump below removes the return address. ++instruct TailCalljmpInd(mRegP jump_target, mRegP method_oop) %{ ++ match(TailCall jump_target method_oop ); ++ ins_cost(300); ++ format %{ "JMP $jump_target \t# @TailCalljmpInd" %} ++ ++ ins_encode %{ ++ Register target = $jump_target$$Register; ++ Register oop = $method_oop$$Register; ++ ++ // RA will be used in generate_forward_exception() ++ __ push(RA); ++ ++ __ move(S3, oop); ++ __ jmp(target); ++ %} ++ ++ ins_pipe( pipe_jump ); ++%} ++ ++// Create exception oop: created by stack-crawling runtime code. ++// Created exception is now available to this handler, and is setup ++// just prior to jumping to this handler. No code emitted. ++instruct CreateException( a0_RegP ex_oop ) ++%{ ++ match(Set ex_oop (CreateEx)); ++ ++ // use the following format syntax ++ format %{ "# exception oop is in A0; no code emitted @CreateException" %} ++ ins_encode %{ ++ // SW64 leaves this function empty ++ __ block_comment("CreateException is empty in SW64"); ++ %} ++ ins_pipe( empty ); ++%} ++ ++// The exception oop will come in the first argument position. ++// Then JUMP (not call) to the rethrow stub code. ++instruct RethrowException() ++%{ ++ match(Rethrow); ++ ++ // use the following format syntax ++ format %{ "JMP rethrow_stub #@RethrowException" %} ++ ins_encode %{ ++ __ block_comment("@ RethrowException"); ++ ++ cbuf.set_insts_mark(); ++ cbuf.relocate(cbuf.insts_mark(), runtime_call_Relocation::spec()); ++ ++ // call OptoRuntime::rethrow_stub to get the exception handler in parent method ++ __ patchable_jump((address)OptoRuntime::rethrow_stub()); ++ %} ++ ins_pipe( pipe_jump ); ++%} ++ ++//SW64:OKOK: ++instruct branchConP_zero(cmpOpU cmp, mRegP op1, immP0 zero, label labl) %{ ++ match(If cmp (CmpP op1 zero)); ++ effect(USE labl); ++ ++ ins_cost(180); ++ format %{ "b$cmp $op1, R0, $labl #@branchConP_zero_short" %} ++ ++ ins_encode %{ ++ Register op1 = $op1$$Register; ++ Register op2 = R0; ++ Label &L = *($labl$$label); ++ int flag = $cmp$$cmpcode; ++ ++ switch(flag) { ++ case 0x01: //equal ++ if (&L) ++ __ beq(op1, op2, L); ++ else ++ __ beq(op1, op2, (int)0); ++ break; ++ case 0x02: //not_equal ++ if (&L) ++ __ bne(op1, op2, L); ++ else ++ __ bne(op1, op2, (int)0); ++ break; ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pc_relative(1); ++ ins_pipe( pipe_alu_branch ); ++%} ++ ++instruct branchConN2P_zero_short(cmpOpU cmp, mRegN op1, immP0 zero, label labl) %{ ++ match(If cmp (CmpP (DecodeN op1) zero)); ++ predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0); ++ effect(USE labl); ++ ++ ins_cost(180); ++ format %{ "b$cmp $op1, R0, $labl #@branchConN2P_zero_short" %} ++ ++ ins_encode %{ ++ Register op1 = $op1$$Register; ++ Register op2 = R0; ++ Label &L = *($labl$$label); ++ int flag = $cmp$$cmpcode; ++ ++ switch(flag) ++ { ++ case 0x01: //equal ++ if (&L) ++ __ beq(op1, op2, L); ++ else ++ __ beq(op1, op2, (int)0); ++ break; ++ case 0x02: //not_equal ++ if (&L) ++ __ bne(op1, op2, L); ++ else ++ __ bne(op1, op2, (int)0); ++ break; ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pc_relative(1); ++ ins_pipe( pipe_alu_branch ); ++%} ++ ++ ++instruct branchConP_short(cmpOpU cmp, mRegP op1, mRegP op2, label labl) %{ ++ match(If cmp (CmpP op1 op2)); ++// predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf)); ++ effect(USE labl); ++ ++ ins_cost(200); ++ format %{ "b$cmp $op1, $op2, $labl #@branchConP_short" %} ++ ++ ins_encode %{ ++ Register op1 = $op1$$Register; ++ Register op2 = $op2$$Register; ++ Label &L = *($labl$$label); ++ int flag = $cmp$$cmpcode; ++ ++ switch(flag) { ++ case 0x01: //equal ++ if (&L) ++ __ beq(op1, op2, L); ++ else ++ __ beq(op1, op2, (int)0); ++ break; ++ case 0x02: //not_equal ++ if (&L) ++ __ bne(op1, op2, L); ++ else ++ __ bne(op1, op2, (int)0); ++ break; ++ case 0x03: //above ++ __ cmpult(AT, op2, op1); ++ if(&L) ++ __ bne(AT, L); ++ else ++ __ bne(AT, (int)0); ++ break; ++ case 0x04: //above_equal ++ __ cmpult(AT, op1, op2); ++ if(&L) ++ __ beq(AT, L); ++ else ++ __ beq(AT, (int)0); ++ break; ++ case 0x05: //below ++ __ cmpult(AT, op1, op2); ++ if(&L) ++ __ bne(AT, L); ++ else ++ __ bne(AT, (int)0); ++ break; ++ case 0x06: //below_equal ++ __ cmpult(AT, op2, op1); ++ if(&L) ++ __ beq(AT, L); ++ else ++ __ beq(AT, (int)0); ++ break; ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pc_relative(1); ++ ins_pipe( pipe_alu_branch ); ++%} ++ ++instruct cmpN_null_branch_short(cmpOp cmp, mRegN op1, immN0 null, label labl) %{ ++ match(If cmp (CmpN op1 null)); ++ effect(USE labl); ++ ++ ins_cost(180); ++ format %{ "CMP $op1,0\t! compressed ptr\n\t" ++ "BP$cmp $labl @ cmpN_null_branch_short" %} ++ ins_encode %{ ++ Register op1 = $op1$$Register; ++ Register op2 = R0; ++ Label &L = *($labl$$label); ++ int flag = $cmp$$cmpcode; ++ ++ switch(flag) { ++ case 0x01: //equal ++ if (&L) ++ __ beq(op1, op2, L); ++ else ++ __ beq(op1, op2, (int)0); ++ break; ++ case 0x02: //not_equal ++ if (&L) ++ __ bne(op1, op2, L); ++ else ++ __ bne(op1, op2, (int)0); ++ break; ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++//TODO: pipe_branchP or create pipe_branchN LEE ++ ins_pc_relative(1); ++ ins_pipe( pipe_alu_branch ); ++%} ++ ++instruct cmpN_reg_branch_short(cmpOp cmp, mRegN op1, mRegN op2, label labl) %{ ++ match(If cmp (CmpN op1 op2)); ++ effect(USE labl); ++ ++ ins_cost(180); ++ format %{ "CMP $op1,$op2\t! compressed ptr\n\t" ++ "BP$cmp $labl @ cmpN_reg_branch_short" %} ++ ins_encode %{ ++ Register op1_reg = $op1$$Register; ++ Register op2_reg = $op2$$Register; ++ Label &L = *($labl$$label); ++ int flag = $cmp$$cmpcode; ++ ++ switch(flag) { ++ case 0x01: //equal ++ if (&L) ++ __ beq(op1_reg, op2_reg, L); ++ else ++ __ beq(op1_reg, op2_reg, (int)0); ++ break; ++ case 0x02: //not_equal ++ if (&L) ++ __ bne(op1_reg, op2_reg, L); ++ else ++ __ bne(op1_reg, op2_reg, (int)0); ++ break; ++ case 0x03: //above ++ __ cmpult(AT, op2_reg, op1_reg); ++ if(&L) ++ __ bne(AT, L); ++ else ++ __ bne(AT, (int)0); ++ break; ++ case 0x04: //above_equal ++ __ cmpult(AT, op1_reg, op2_reg); ++ if(&L) ++ __ beq(AT, L); ++ else ++ __ beq(AT, (int)0); ++ break; ++ case 0x05: //below ++ __ cmpult(AT, op1_reg, op2_reg); ++ if(&L) ++ __ bne(AT, L); ++ else ++ __ bne(AT, (int)0); ++ break; ++ case 0x06: //below_equal ++ __ cmpult(AT, op2_reg, op1_reg); ++ if(&L) ++ __ beq(AT, L); ++ else ++ __ beq(AT, (int)0); ++ break; ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pc_relative(1); ++ ins_pipe( pipe_alu_branch ); ++%} ++ ++instruct branchConIU_reg_reg_short(cmpOpU cmp, mRegI src1, mRegI src2, label labl) %{ ++ match( If cmp (CmpU src1 src2) ); ++ effect(USE labl); ++ format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_reg_short" %} ++ ++ ins_encode %{ ++ Register op1 = $src1$$Register; ++ Register op2 = $src2$$Register; ++ Label &L = *($labl$$label); ++ int flag = $cmp$$cmpcode; ++ ++ switch(flag) { ++ case 0x01: //equal ++ if (&L) ++ __ beq(op1, op2, L); ++ else ++ __ beq(op1, op2, (int)0); ++ break; ++ case 0x02: //not_equal ++ if (&L) ++ __ bne(op1, op2, L); ++ else ++ __ bne(op1, op2, (int)0); ++ break; ++ case 0x03: //above ++ __ cmpult(AT, op2, op1); ++ if(&L) ++ __ bne(AT, L); ++ else ++ __ bne(AT, (int)0); ++ break; ++ case 0x04: //above_equal ++ __ cmpult(AT, op1, op2); ++ if(&L) ++ __ beq(AT, L); ++ else ++ __ beq(AT, (int)0); ++ break; ++ case 0x05: //below ++ __ cmpult(AT, op1, op2); ++ if(&L) ++ __ bne(AT, L); ++ else ++ __ bne(AT, (int)0); ++ break; ++ case 0x06: //below_equal ++ __ cmpult(AT, op2, op1); ++ if(&L) ++ __ beq(AT, L); ++ else ++ __ beq(AT, (int)0); ++ break; ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pc_relative(1); ++ ins_pipe( pipe_alu_branch ); ++%} ++ ++ ++instruct branchConIU_reg_imm_short(cmpOpU cmp, mRegI src1, immI src2, label labl) %{ ++ match( If cmp (CmpU src1 src2) ); ++ effect(USE labl); ++ format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_imm_short" %} ++ ++ ins_encode %{ ++ Register op1 = $src1$$Register; ++ int val = $src2$$constant; ++ Label &L = *($labl$$label); ++ int flag = $cmp$$cmpcode; ++ ++ __ move(AT, val); ++ switch(flag) { ++ case 0x01: //equal ++ if (&L) ++ __ beq(op1, AT, L); ++ else ++ __ beq(op1, AT, (int)0); ++ break; ++ case 0x02: //not_equal ++ if (&L) ++ __ bne(op1, AT, L); ++ else ++ __ bne(op1, AT, (int)0); ++ break; ++ case 0x03: //above ++ __ cmpult(AT, AT, op1); ++ if(&L) ++ __ bne(AT, L); ++ else ++ __ bne(AT, (int)0); ++ break; ++ case 0x04: //above_equal ++ __ cmpult(AT, op1, AT); ++ if(&L) ++ __ beq(AT, L); ++ else ++ __ beq(AT, (int)0); ++ break; ++ case 0x05: //below ++ __ cmpult(AT, op1, AT); ++ if(&L) ++ __ bne(AT, L); ++ else ++ __ bne(AT, (int)0); ++ break; ++ case 0x06: //below_equal ++ __ cmpult(AT, AT, op1); ++ if(&L) ++ __ beq(AT, L); ++ else ++ __ beq(AT, (int)0); ++ break; ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pc_relative(1); ++ ins_pipe( pipe_alu_branch ); ++%} ++ ++instruct branchConI_reg_reg_short(cmpOp cmp, mRegI src1, mRegI src2, label labl) %{ ++ match( If cmp (CmpI src1 src2) ); ++ effect(USE labl); ++ format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_reg_short" %} ++ ++ ins_encode %{ ++ Register op1 = $src1$$Register; ++ Register op2 = $src2$$Register; ++ Label &L = *($labl$$label); ++ int flag = $cmp$$cmpcode; ++ ++ switch(flag) { ++ case 0x01: //equal ++ if (&L) ++ __ beq(op1, op2, L); ++ else ++ __ beq(op1, op2, (int)0); ++ break; ++ case 0x02: //not_equal ++ if (&L) ++ __ bne(op1, op2, L); ++ else ++ __ bne(op1, op2, (int)0); ++ break; ++ case 0x03: //above ++ __ cmplt(AT, op2, op1); ++ if(&L) ++ __ bne(AT, L); ++ else ++ __ bne(AT, (int)0); ++ break; ++ case 0x04: //above_equal ++ __ cmplt(AT, op1, op2); ++ if(&L) ++ __ beq(AT, L); ++ else ++ __ beq(AT, (int)0); ++ break; ++ case 0x05: //below ++ __ cmplt(AT, op1, op2); ++ if(&L) ++ __ bne(AT, L); ++ else ++ __ bne(AT, (int)0); ++ break; ++ case 0x06: //below_equal ++ __ cmplt(AT, op2, op1); ++ if(&L) ++ __ beq(AT, L); ++ else ++ __ beq(AT, (int)0); ++ break; ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pc_relative(1); ++ ins_pipe( pipe_alu_branch ); ++%} ++ ++instruct branchConI_reg_imm0_short(cmpOp cmp, mRegI src1, immI0 src2, label labl) %{ ++ match( If cmp (CmpI src1 src2) ); ++ effect(USE labl); ++ ins_cost(170); ++ format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm0_short" %} ++ ++ ins_encode %{ ++ Register op1 = $src1$$Register; ++ Label &L = *($labl$$label); ++ int flag = $cmp$$cmpcode; ++ ++ switch(flag) { ++ case 0x01: //equal ++ if (&L) ++ __ beq(op1, L); ++ else ++ __ beq(op1, (int)0); ++ break; ++ case 0x02: //not_equal ++ if (&L) ++ __ bne(op1, L); ++ else ++ __ bne(op1, (int)0); ++ break; ++ case 0x03: //greater ++ if(&L) ++ __ bgt(op1, L); ++ else ++ __ bgt(op1, (int)0); ++ break; ++ case 0x04: //greater_equal ++ if(&L) ++ __ bge(op1, L); ++ else ++ __ bge(op1, (int)0); ++ break; ++ case 0x05: //less ++ if(&L) ++ __ blt(op1, L); ++ else ++ __ blt(op1, (int)0); ++ break; ++ case 0x06: //less_equal ++ if(&L) ++ __ ble(op1, L); ++ else ++ __ ble(op1, (int)0); ++ break; ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pc_relative(1); ++ ins_pipe( pipe_alu_branch ); ++%} ++ ++ ++instruct branchConI_reg_imm_short(cmpOp cmp, mRegI src1, immI src2, label labl) %{ ++ match( If cmp (CmpI src1 src2) ); ++ effect(USE labl); ++ ins_cost(200); ++ format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm_short" %} ++ ++ ins_encode %{ ++ Register op1 = $src1$$Register; ++ int val = $src2$$constant; ++ Label &L = *($labl$$label); ++ int flag = $cmp$$cmpcode; ++ ++ __ move(AT, val); ++ switch(flag) { ++ case 0x01: //equal ++ if (&L) ++ __ beq(op1, AT, L); ++ else ++ __ beq(op1, AT, (int)0); ++ break; ++ case 0x02: //not_equal ++ if (&L) ++ __ bne(op1, AT, L); ++ else ++ __ bne(op1, AT, (int)0); ++ break; ++ case 0x03: //greater ++ __ cmplt(AT, AT, op1); ++ if(&L) ++ __ bne(AT, L); ++ else ++ __ bne(AT, (int)0); ++ break; ++ case 0x04: //greater_equal ++ __ cmplt(AT, op1, AT); ++ if(&L) ++ __ beq(AT, L); ++ else ++ __ beq(AT, (int)0); ++ break; ++ case 0x05: //less ++ __ cmplt(AT, op1, AT); ++ if(&L) ++ __ bne(AT, L); ++ else ++ __ bne(AT, (int)0); ++ break; ++ case 0x06: //less_equal ++ __ cmplt(AT, AT, op1); ++ if(&L) ++ __ beq(AT, L); ++ else ++ __ beq(AT, (int)0); ++ break; ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pc_relative(1); ++ ins_pipe( pipe_alu_branch ); ++%} ++ ++instruct branchConIU_reg_imm0_short(cmpOpU cmp, mRegI src1, immI0 zero, label labl) %{ ++ match( If cmp (CmpU src1 zero) ); ++ effect(USE labl); ++ format %{ "BR$cmp $src1, zero, $labl #@branchConIU_reg_imm0_short" %} ++ ++ ins_encode %{ ++ Register op1 = $src1$$Register; ++ Label &L = *($labl$$label); ++ int flag = $cmp$$cmpcode; ++ ++ switch(flag) { ++ case 0x01: //equal ++ if (&L) ++ __ beq(op1, L); ++ else ++ __ beq(op1, (int)0); ++ break; ++ case 0x02: //not_equal ++ if (&L) ++ __ bne(op1, L); ++ else ++ __ bne(op1, (int)0); ++ break; ++ case 0x03: //above ++ if(&L) ++ __ bne(op1, L); ++ else ++ __ bne(op1, (int)0); ++ break; ++ case 0x04: //above_equal ++ if(&L) ++ __ beq(R0, L); ++ else ++ __ beq(R0, (int)0); ++ break; ++ case 0x05: //below ++ return; ++ break; ++ case 0x06: //below_equal ++ if(&L) ++ __ beq(op1, L); ++ else ++ __ beq(op1, (int)0); ++ break; ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pc_relative(1); ++ ins_pipe( pipe_alu_branch ); ++%} ++ ++////instruct branchConIU_reg_imm_0_255(cmpOpU cmp, mRegI src1, immI_0_255 src2, label labl) %{ ++//// match( If cmp (CmpU src1 src2) ); ++//// effect(USE labl); ++//// ins_cost(180); ++//// format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_imm_0_255" %} ++//// ++//// ins_encode %{ ++//// Register op1 = $src1$$Register; ++//// int val = $src2$$constant; ++//// Label &L = *($labl$$label); ++//// int flag = $cmp$$cmpcode; ++//// ++//// __ subl(AT, op1, val); ++//// switch(flag) ++//// { ++//// case 0x01: //equal ++//// if (&L) ++//// __ beq(AT, L); ++//// else ++//// __ beq(AT, (int)0); ++//// break; ++//// case 0x02: //not_equal ++//// if (&L) ++//// __ bne(AT, L); ++//// else ++//// __ bne(AT, (int)0); ++//// break; ++//// case 0x03: //above ++//// if(&L) ++//// __ bgtz(AT, L); ++//// else ++//// __ bgt(AT, (int)0); ++//// break; ++//// case 0x04: //above_equal ++//// if(&L) ++//// __ bgez(AT, L); ++//// else ++//// __ bge(AT, (int)0); ++//// break; ++//// case 0x05: //below ++//// if(&L) ++//// __ bltz(AT, L); ++//// else ++//// __ blt(AT, (int)0); ++//// break; ++//// case 0x06: //below_equal ++//// if(&L) ++//// __ blez(AT, L); ++//// else ++//// __ ble(AT, (int)0); ++//// break; ++//// default: ++//// Unimplemented(); ++//// } ++//// %} ++//// ++//// ins_pc_relative(1); ++//// ins_pipe( pipe_alu_branch ); ++////%} ++ ++instruct branchConIU_reg_immI16_short(cmpOpU cmp, mRegI src1, immI16_sub src2, label labl) %{ ++ match( If cmp (CmpU src1 src2) ); ++ effect(USE labl); ++ ins_cost(180); ++ format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_immI16_short" %} ++ ++ ins_encode %{ ++ Register op1 = $src1$$Register; ++ int val = $src2$$constant; ++ Label &L = *($labl$$label); ++ int flag = $cmp$$cmpcode; ++ ++ __ add_simm16(AT, op1, -1 * val); ++ switch(flag) { ++ case 0x01: //equal ++ if (&L) ++ __ beq(AT, L); ++ else ++ __ beq(AT, (int)0); ++ break; ++ case 0x02: //not_equal ++ if (&L) ++ __ bne(AT, L); ++ else ++ __ bne(AT, (int)0); ++ break; ++ case 0x03: //above ++ if(&L) ++ __ bgt(AT, L); ++ else ++ __ bgt(AT, (int)0); ++ break; ++ case 0x04: //above_equal ++ if(&L) ++ __ bge(AT, L); ++ else ++ __ bge(AT, (int)0); ++ break; ++ case 0x05: //below ++ if(&L) ++ __ blt(AT, L); ++ else ++ __ blt(AT, (int)0); ++ break; ++ case 0x06: //below_equal ++ if(&L) ++ __ ble(AT, L); ++ else ++ __ ble(AT, (int)0); ++ break; ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pc_relative(1); ++ ins_pipe( pipe_alu_branch ); ++%} ++ ++ ++instruct branchConL_regL_regL_short(cmpOp cmp, mRegL src1, mRegL src2, label labl) %{ ++ match( If cmp (CmpL src1 src2) ); ++ effect(USE labl); ++ format %{ "BR$cmp $src1, $src2, $labl #@branchConL_regL_regL_short" %} ++ ins_cost(250); ++ ++ ins_encode %{ ++ Register opr1_reg = as_Register($src1$$reg); ++ Register opr2_reg = as_Register($src2$$reg); ++ ++ Label &target = *($labl$$label); ++ int flag = $cmp$$cmpcode; ++ ++ switch(flag) { ++ case 0x01: //equal ++ if (&target) ++ __ beq(opr1_reg, opr2_reg, target); ++ else ++ __ beq(opr1_reg, opr2_reg, (int)0); ++ break; ++ ++ case 0x02: //not_equal ++ if(&target) ++ __ bne(opr1_reg, opr2_reg, target); ++ else ++ __ bne(opr1_reg, opr2_reg, (int)0); ++ break; ++ ++ case 0x03: //greater ++ __ cmplt(AT, opr2_reg, opr1_reg); ++ if(&target) ++ __ bne(AT, target); ++ else ++ __ bne(AT, (int)0); ++ break; ++ ++ case 0x04: //greater_equal ++ __ cmplt(AT, opr1_reg, opr2_reg); ++ if(&target) ++ __ beq(AT, target); ++ else ++ __ beq(AT, (int)0); ++ break; ++ ++ case 0x05: //less ++ __ cmplt(AT, opr1_reg, opr2_reg); ++ if(&target) ++ __ bne(AT, target); ++ else ++ __ bne(AT, (int)0); ++ break; ++ ++ case 0x06: //less_equal ++ __ cmplt(AT, opr2_reg, opr1_reg); ++ if(&target) ++ __ beq(AT, target); ++ else ++ __ beq(AT, (int)0); ++ break; ++ ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pc_relative(1); ++ ins_pipe( pipe_alu_branch ); ++%} ++ ++////instruct branchConI_reg_imm_0_255_sub(cmpOp cmp, mRegI src1, immI_0_255 src2, label labl) %{ ++//// match( If cmp (CmpI src1 src2) ); ++//// effect(USE labl); ++//// ins_cost(180); ++//// format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm_0_255_sub" %} ++//// ++//// ins_encode %{ ++//// Register op1 = $src1$$Register; ++//// int val = $src2$$constant; ++//// Label &L = *($labl$$label); ++//// int flag = $cmp$$cmpcode; ++//// ++//// __ subl(AT, op1, val); ++//// switch(flag) ++//// { ++//// case 0x01: //equal ++//// if (&L) ++//// __ beq(AT, L); ++//// else ++//// __ beq(AT, (int)0); ++//// break; ++//// case 0x02: //not_equal ++//// if (&L) ++//// __ bne(AT, L); ++//// else ++//// __ bne(AT, (int)0); ++//// break; ++//// case 0x03: //greater ++//// if(&L) ++//// __ bgtz(AT, L); ++//// else ++//// __ bgt(AT, (int)0); ++//// break; ++//// case 0x04: //greater_equal ++//// if(&L) ++//// __ bgez(AT, L); ++//// else ++//// __ bge(AT, (int)0); ++//// break; ++//// case 0x05: //less ++//// if(&L) ++//// __ bltz(AT, L); ++//// else ++//// __ blt(AT, (int)0); ++//// break; ++//// case 0x06: //less_equal ++//// if(&L) ++//// __ blez(AT, L); ++//// else ++//// __ ble(AT, (int)0); ++//// break; ++//// default: ++//// Unimplemented(); ++//// } ++//// %} ++//// ++//// ins_pc_relative(1); ++//// ins_pipe( pipe_alu_branch ); ++////%} ++ ++instruct branchConI_reg_imm16_sub(cmpOp cmp, mRegI src1, immI16_sub src2, label labl) %{ ++ match( If cmp (CmpI src1 src2) ); ++ effect(USE labl); ++ ins_cost(180); ++ format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm16_sub" %} ++ ++ ins_encode %{ ++ Register op1 = $src1$$Register; ++ int val = $src2$$constant; ++ Label &L = *($labl$$label); ++ int flag = $cmp$$cmpcode; ++ ++ __ add_simm16(AT, op1, -1 * val); ++ switch(flag) ++ { ++ case 0x01: //equal ++ if (&L) ++ __ beq(AT, L); ++ else ++ __ beq(AT, (int)0); ++ break; ++ case 0x02: //not_equal ++ if (&L) ++ __ bne(AT, L); ++ else ++ __ bne(AT, (int)0); ++ break; ++ case 0x03: //greater ++ if(&L) ++ __ bgt(AT, L); ++ else ++ __ bgt(AT, (int)0); ++ break; ++ case 0x04: //greater_equal ++ if(&L) ++ __ bge(AT, L); ++ else ++ __ bge(AT, (int)0); ++ break; ++ case 0x05: //less ++ if(&L) ++ __ blt(AT, L); ++ else ++ __ blt(AT, (int)0); ++ break; ++ case 0x06: //less_equal ++ if(&L) ++ __ ble(AT, L); ++ else ++ __ ble(AT, (int)0); ++ break; ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pc_relative(1); ++ ins_pipe( pipe_alu_branch ); ++%} ++ ++instruct branchConL_regL_immL0_short(cmpOp cmp, mRegL src1, immL0 zero, label labl) %{ ++ match( If cmp (CmpL src1 zero) ); ++ effect(USE labl); ++ format %{ "BR$cmp $src1, zero, $labl #@branchConL_regL_immL0_short" %} ++ ins_cost(150); ++ ++ ins_encode %{ ++ Register opr1_reg = as_Register($src1$$reg); ++ Label &target = *($labl$$label); ++ int flag = $cmp$$cmpcode; ++ ++ switch(flag) { ++ case 0x01: //equal ++ if (&target) ++ __ beq(opr1_reg, target); ++ else ++ __ beq(opr1_reg, int(0)); ++ break; ++ ++ case 0x02: //not_equal ++ if(&target) ++ __ bne(opr1_reg, target); ++ else ++ __ bne(opr1_reg, (int)0); ++ break; ++ ++ case 0x03: //greater ++ if(&target) ++ __ bgt(opr1_reg, target); ++ else ++ __ bgt(opr1_reg, (int)0); ++ break; ++ ++ case 0x04: //greater_equal ++ if(&target) ++ __ bge(opr1_reg, target); ++ else ++ __ bge(opr1_reg, (int)0); ++ break; ++ ++ case 0x05: //less ++ if (&target) ++ __ blt(opr1_reg, target); ++ else ++ __ blt(opr1_reg, int(0)); ++ break; ++ ++ case 0x06: //less_equal ++ if (&target) ++ __ ble(opr1_reg, target); ++ else ++ __ ble(opr1_reg, int(0)); ++ break; ++ ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pc_relative(1); ++ ins_pipe( pipe_alu_branch ); ++%} ++ ++instruct branchConL_regL_immL_short(cmpOp cmp, mRegL src1, immL src2, label labl) %{ ++ match( If cmp (CmpL src1 src2) ); ++ effect(USE labl); ++ format %{ "BR$cmp $src1, $src2, $labl #@branchConL_regL_immL_short" %} ++ ins_cost(180); ++ ++ ins_encode %{ ++ Register opr1_reg = as_Register($src1$$reg); ++ Register opr2_reg = AT; ++ ++ Label &target = *($labl$$label); ++ int flag = $cmp$$cmpcode; ++ ++ __ set64(opr2_reg, $src2$$constant); ++ switch(flag) { ++ case 0x01: //equal ++ if (&target) ++ __ beq(opr1_reg, opr2_reg, target); ++ else ++ __ beq(opr1_reg, opr2_reg, (int)0); ++ break; ++ ++ case 0x02: //not_equal ++ if(&target) ++ __ bne(opr1_reg, opr2_reg, target); ++ else ++ __ bne(opr1_reg, opr2_reg, (int)0); ++ break; ++ ++ case 0x03: //greater ++ __ cmplt(AT, opr2_reg, opr1_reg); ++ if(&target) ++ __ bne(AT, target); ++ else ++ __ bne(AT, (int)0); ++ break; ++ ++ case 0x04: //greater_equal ++ __ cmplt(AT, opr1_reg, opr2_reg); ++ if(&target) ++ __ beq(AT, target); ++ else ++ __ beq(AT, (int)0); ++ break; ++ ++ case 0x05: //less ++ __ cmplt(AT, opr1_reg, opr2_reg); ++ if(&target) ++ __ bne(AT, target); ++ else ++ __ bne(AT, (int)0); ++ break; ++ ++ case 0x06: //less_equal ++ __ cmplt(AT, opr2_reg, opr1_reg); ++ if(&target) ++ __ beq(AT, target); ++ else ++ __ beq(AT, (int)0); ++ break; ++ ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pc_relative(1); ++ ins_pipe( pipe_alu_branch ); ++%} ++ ++ ++ ++instruct branchConF_reg_reg_short(cmpOp cmp, regF src1, regF src2, label labl) %{ ++ match( If cmp (CmpF src1 src2) ); ++ effect(USE labl); ++ format %{ "BR$cmp $src1, $src2, $labl #@branchConF_reg_reg_short" %} ++ ++ ins_encode %{ ++ FloatRegister reg_op1 = $src1$$FloatRegister; ++ FloatRegister reg_op2 = $src2$$FloatRegister; ++ Label &L = *($labl$$label); ++ int flag = $cmp$$cmpcode; ++ ++ if (!&L) ++ Unimplemented(); ++ ++ switch(flag) { ++ case 0x01: //equal ++ __ fcmpeq(FcmpRES, reg_op1, reg_op2); ++ __ fbne(FcmpRES, L); ++ break; ++ case 0x02: //not_equal ++ __ fcmpeq(FcmpRES, reg_op1, reg_op2); ++ __ fbeq(FcmpRES, L); ++ break; ++ case 0x03: //greater ++ __ fcmplt(FcmpRES, reg_op2, reg_op1); ++ __ fbne(FcmpRES, L); ++ break; ++ case 0x04: //greater_equal ++ __ fcmple(FcmpRES, reg_op2, reg_op1); ++ __ fbne(FcmpRES, L); ++ break; ++ case 0x05: //less ++ __ fcmple(FcmpRES, reg_op2, reg_op1); ++ __ fbeq(FcmpRES, L); ++ break; ++ case 0x06: //less_equal ++ __ fcmplt(FcmpRES, reg_op2, reg_op1); ++ __ fbeq(FcmpRES, L); ++ break; ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pc_relative(1); ++ ins_pipe(pipe_slow); ++%} ++ ++instruct branchConD_reg_reg_short(cmpOp cmp, regD src1, regD src2, label labl) %{ ++ match( If cmp (CmpD src1 src2) ); ++ effect(USE labl); ++ format %{ "BR$cmp $src1, $src2, $labl #@branchConD_reg_reg_short" %} ++ ++ ins_encode %{ ++ FloatRegister reg_op1 = $src1$$FloatRegister; ++ FloatRegister reg_op2 = $src2$$FloatRegister; ++ Label &L = *($labl$$label); ++ int flag = $cmp$$cmpcode; ++ ++ switch(flag) { ++ case 0x01: //equal ++ __ fcmpeq(FcmpRES, reg_op1, reg_op2); ++ if (&L) ++ __ fbne(FcmpRES, L); ++ else ++ __ fbne(FcmpRES, (int)0); ++ break; ++ case 0x02: //not_equal ++ // c_ueq_d cannot distinguish NaN from equal. Double.isNaN(Double) is implemented by 'f != f', so the use of c_ueq_d causes bugs. ++ __ fcmpeq(FcmpRES, reg_op1, reg_op2); ++ if (&L) ++ __ fbeq(FcmpRES, L); ++ else ++ __ fbeq(FcmpRES, (int)0); ++ break; ++ case 0x03: //greater ++ __ fcmplt(FcmpRES, reg_op2, reg_op1); ++ if(&L) ++ __ fbne(FcmpRES, L); ++ else ++ __ fbne(FcmpRES, (int)0); ++ break; ++ case 0x04: //greater_equal ++ __ fcmple(FcmpRES, reg_op2, reg_op1); ++ if(&L) ++ __ fbne(FcmpRES, L); ++ else ++ __ fbne(FcmpRES, (int)0); ++ break; ++ case 0x05: //less ++ __ fcmple(FcmpRES, reg_op2, reg_op1); ++ if(&L) ++ __ fbeq(FcmpRES, L); ++ else ++ __ fbeq(FcmpRES, (int)0); ++ break; ++ case 0x06: //less_equal ++ __ fcmplt(FcmpRES, reg_op2, reg_op1); ++ if(&L) ++ __ fbeq(FcmpRES, L); ++ else ++ __ fbeq(FcmpRES, (int)0); ++ break; ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pc_relative(1); ++ ins_pipe(pipe_slow); ++%} ++ ++// =================== End of branch instructions ========================== ++ ++// Call Runtime Instruction ++instruct CallRuntimeDirect(method meth) %{ ++ match(CallRuntime ); ++ effect(USE meth); ++ ++ ins_cost(300); ++ format %{ "CALL,runtime #@CallRuntimeDirect" %} ++ ins_encode( Java_To_Runtime( meth ) ); ++ ins_pipe( pipe_slow ); ++ ins_alignment(16); ++%} ++ ++//------------------------BSWAP Instructions------------------------------- ++instruct bytes_reverse_int(mRegI dst, mRegI src) %{ ++ match(Set dst (ReverseBytesI src)); ++ predicate(UseSW8A); ++ ins_cost(10); ++ format %{ "revbw $dst, $src" %} ++ ++ ins_encode %{ ++ Register dst = as_Register($dst$$reg); ++ Register src = as_Register($src$$reg); ++ if(UseSW8A) { ++ __ revbw(src, dst); ++ }else{ ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe(ialu_regI_regI); ++%} ++ ++instruct bytes_reverse_long(mRegL dst, mRegL src) %{ ++ match(Set dst (ReverseBytesL src)); ++ predicate(UseSW8A); ++ ins_cost(10); ++ format %{ "revbl $dst, $src" %} ++ ++ ins_encode %{ ++ Register dst = as_Register($dst$$reg); ++ Register src = as_Register($src$$reg); ++ if(UseSW8A) { ++ __ revbl(src, dst); ++ }else{ ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe(ialu_regI_regI); ++%} ++ ++instruct bytes_reverse_unsigned_short(mRegI dst, mRegI src) %{ ++ match(Set dst (ReverseBytesUS src)); ++ predicate(UseSW8A); ++ ins_cost(10); ++ format %{ "revbh $dst, $src" %} ++ ++ ins_encode %{ ++ Register dst = as_Register($dst$$reg); ++ Register src = as_Register($src$$reg); ++ if(UseSW8A) { ++ __ revbh(src, dst); ++ }else{ ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe(ialu_regI_regI); ++%} ++ ++instruct bytes_reverse_short(mRegI dst, mRegI src) %{ ++ match(Set dst (ReverseBytesS src)); ++ predicate(UseSW8A); ++ ins_cost(10); ++ format %{ "revbh $dst, $src\n\t" ++ "sexth $dst, $dst" %} ++ ++ ins_encode %{ ++ Register dst = as_Register($dst$$reg); ++ Register src = as_Register($src$$reg); ++ if(UseSW8A) { ++ __ revbh(src, dst); ++ __ sexth(dst, dst); ++ }else{ ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe(ialu_regI_regI); ++%} ++ ++//------------------------MemBar Instructions------------------------------- ++//Memory barrier flavors ++ ++instruct membar_acquire() %{ ++ match(MemBarAcquire); ++ ins_cost(0); ++ ++ size(4); ++ format %{ "MEMBAR-acquire @ membar_acquire" %} ++ ins_encode %{ ++ if (UseNecessaryMembar) { ++ __ memb(); ++ } ++ %} ++ ins_pipe(empty); ++%} ++ ++instruct load_fence() %{ ++ match(LoadFence); ++ ins_cost(400); ++ ++ format %{ "MEMBAR @ load_fence" %} ++ ins_encode %{ ++ __ memb(); ++ %} ++ ins_pipe(pipe_slow); ++%} ++ ++instruct membar_acquire_lock() %{ ++ match(MemBarAcquireLock); ++ ins_cost(0); ++ ++ size(0); ++ format %{ "MEMBAR-acquire (acquire as part of CAS in prior FastLock so empty encoding) @ membar_acquire_lock" %} ++ ins_encode(); ++ ins_pipe(empty); ++%} ++ ++instruct unnecessary_membar_release() %{ ++ predicate(unnecessary_release(n)); ++ match(MemBarRelease); ++ ins_cost(0); ++ ++ format %{ "membar_release (elided)" %} ++ ++ ins_encode %{ ++ __ block_comment("membar_release (elided)"); ++ %} ++ ins_pipe(pipe_slow); ++%} ++ ++instruct membar_release() %{ ++ match(MemBarRelease); ++ ins_cost(400); ++ ++ format %{ "MEMBAR-release @ membar_release" %} ++ ++ ins_encode %{ ++ // Attention: DO NOT DELETE THIS GUY! ++ __ memb(); ++ %} ++ ++ ins_pipe(pipe_slow); ++%} ++ ++instruct store_fence() %{ ++ match(StoreFence); ++ ins_cost(400); ++ ++ format %{ "MEMBAR @ store_fence" %} ++ ++ ins_encode %{ ++ __ memb(); ++ %} ++ ++ ins_pipe(pipe_slow); ++%} ++ ++instruct membar_release_lock() %{ ++ match(MemBarReleaseLock); ++ ins_cost(0); ++ ++ size(0); ++ format %{ "MEMBAR-release-lock (release in FastUnlock so empty) @ membar_release_lock" %} ++ ins_encode(); ++ ins_pipe(empty); ++%} ++ ++ ++instruct membar_volatile() %{ ++ match(MemBarVolatile); ++ ins_cost(400); ++ ++ format %{ "MEMBAR-volatile" %} ++ ins_encode %{ ++ if( !os::is_MP() ) return; // Not needed on single CPU ++ __ memb(); ++ ++ %} ++ ins_pipe(pipe_slow); ++%} ++ ++instruct unnecessary_membar_volatile() %{ ++ match(MemBarVolatile); ++ predicate(Matcher::post_store_load_barrier(n)); ++ ins_cost(0); ++ ++ size(0); ++ format %{ "MEMBAR-volatile (unnecessary so empty encoding) @ unnecessary_membar_volatile" %} ++ ins_encode( ); ++ ins_pipe(empty); ++%} ++ ++instruct membar_storestore() %{ ++ match(MemBarStoreStore); ++ ++ ins_cost(0); ++ size(4); ++ format %{ "MEMBAR-storestore @ membar_storestore" %} ++ ins_encode %{ ++ if (UseWmemb && UseNecessaryMembar) { ++ __ wmemb(); ++ } else if (UseNecessaryMembar) { ++ __ memb(); ++ } ++ %} ++ ins_pipe(empty); ++%} ++ ++//----------Move Instructions-------------------------------------------------- ++instruct castX2P(mRegP dst, mRegL src) %{ ++ match(Set dst (CastX2P src)); ++ format %{ "castX2P $dst, $src @ castX2P" %} ++ ins_encode %{ ++ Register src = $src$$Register; ++ Register dst = $dst$$Register; ++ ++ if(src != dst) ++ __ move(dst, src); ++ %} ++ ins_cost(10); ++ ins_pipe( ialu_regI_mov ); ++%} ++ ++instruct castP2X(mRegL dst, mRegP src ) %{ ++ match(Set dst (CastP2X src)); ++ ++ format %{ "mov $dst, $src\t #@castP2X" %} ++ ins_encode %{ ++ Register src = $src$$Register; ++ Register dst = $dst$$Register; ++ ++ if(src != dst) ++ __ move(dst, src); ++ %} ++ ins_pipe( ialu_regI_mov ); ++%} ++ ++instruct MoveF2I_reg_reg(mRegI dst, regF src) %{ ++ match(Set dst (MoveF2I src)); ++ effect(DEF dst, USE src); ++ ins_cost(85); ++ format %{ "MoveF2I $dst, $src @ MoveF2I_reg_reg" %} ++ ins_encode %{ ++ Register dst = as_Register($dst$$reg); ++ FloatRegister src = as_FloatRegister($src$$reg); ++ ++ __ fimovs(dst, src); ++ __ addw(dst, dst, 0); ++ %} ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct MoveI2F_reg_reg(regF dst, mRegI src) %{ ++ match(Set dst (MoveI2F src)); ++ effect(DEF dst, USE src); ++ ins_cost(85); ++ format %{ "MoveI2F $dst, $src @ MoveI2F_reg_reg" %} ++ ins_encode %{ ++ Register src = as_Register($src$$reg); ++ FloatRegister dst = as_FloatRegister($dst$$reg); ++ ++ __ ifmovs(dst, src); ++ %} ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct MoveD2L_reg_reg(mRegL dst, regD src) %{ ++ match(Set dst (MoveD2L src)); ++ effect(DEF dst, USE src); ++ ins_cost(85); ++ format %{ "MoveD2L $dst, $src @ MoveD2L_reg_reg" %} ++ ins_encode %{ ++ Register dst = as_Register($dst$$reg); ++ FloatRegister src = as_FloatRegister($src$$reg); ++ ++ __ fimovd(dst, src); ++ %} ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct MoveL2D_reg_reg(regD dst, mRegL src) %{ ++ match(Set dst (MoveL2D src)); ++ effect(DEF dst, USE src); ++ ins_cost(85); ++ format %{ "MoveL2D $dst, $src @ MoveL2D_reg_reg" %} ++ ins_encode %{ ++ FloatRegister dst = as_FloatRegister($dst$$reg); ++ Register src = as_Register($src$$reg); ++ ++ __ ifmovd(dst, src); ++ %} ++ ins_pipe( pipe_slow ); ++%} ++ ++//----------Conditional Move--------------------------------------------------- ++// Conditional move ++instruct cmovI_cmpI_reg_reg2(mRegI dst, mRegI src1, mRegI src2, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{ ++ match(Set dst (CMoveI (Binary cop (CmpI tmp1 tmp2)) (Binary src2 src1))); ++ ins_cost(80); ++ format %{ ++ "CMP$cop $tmp1, $tmp2\t @cmovI_cmpI_reg_reg\n" ++ "\tCMOV $dst,$src1,$src2 \t @cmovI_cmpI_reg_reg" ++ %} ++ ++ ins_encode %{ ++ Register op1 = $tmp1$$Register; ++ Register op2 = $tmp2$$Register; ++ Register dst = $dst$$Register; ++ Register src1 = $src1$$Register; ++ Register src2 = $src2$$Register; ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) { ++ case 0x01: //equal ++ __ subw(AT, op1, op2); ++ __ seleq(AT, src1, src2, dst); ++ break; ++ ++ case 0x02: //not_equal ++ __ subw(AT, op1, op2); ++ __ selne(AT, src1, src2, dst); ++ break; ++ ++ case 0x03: //great ++ __ cmplt(AT, op2, op1); ++ __ selne(AT, src1, src2, dst); ++ break; ++ ++ case 0x04: //great_equal ++ __ cmplt(AT, op1, op2); ++ __ seleq(AT, src1, src2, dst); ++ break; ++ ++ case 0x05: //less ++ __ cmplt(AT, op1, op2); ++ __ selne(AT, src1, src2, dst); ++ break; ++ ++ case 0x06: //less_equal ++ __ cmplt(AT, op2, op1); ++ __ seleq(AT, src1, src2, dst); ++ break; ++ ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmovI_cmpI_reg_imm(mRegI dst, immI_0_255 src1, mRegI src2, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{ ++ match(Set dst (CMoveI (Binary cop (CmpI tmp1 tmp2)) (Binary src2 src1))); ++ ins_cost(80); ++ format %{ ++ "CMP$cop $tmp1, $tmp2\t @cmovI_cmpI_reg_reg\n" ++ "\tCMOV $dst,$src1,$src2 \t @cmovI_cmpI_reg_imm" ++ %} ++ ++ ins_encode %{ ++ Register op1 = $tmp1$$Register; ++ Register op2 = $tmp2$$Register; ++ Register dst = $dst$$Register; ++ Register src2 = $src2$$Register; ++ int src1 = $src1$$constant & ((1<<8)-1); ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) ++ { ++ case 0x01: //equal ++ __ subl(AT, op1, op2); ++ __ seleq(AT,src1,src2,dst); ++ break; ++ ++ case 0x02: //not_equal ++ __ subl(AT, op1, op2); ++ __ selne(AT,src1,src2,dst); ++ break; ++ ++ case 0x03: //great ++ __ cmplt(AT, op2, op1); ++ __ selne(AT,src1,src2,dst); ++ break; ++ ++ case 0x04: //great_equal ++ __ cmplt(AT, op1, op2); ++ __ seleq(AT,src1,src2,dst); ++ break; ++ ++ case 0x05: //less ++ __ cmplt(AT, op1, op2); ++ __ selne(AT,src1,src2,dst); ++ break; ++ ++ case 0x06: //less_equal ++ __ cmplt(AT, op2, op1); ++ __ seleq(AT,src1,src2,dst); ++ break; ++ ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmovI_cmpP_reg_reg(mRegI dst, mRegI src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{ ++ match(Set dst (CMoveI (Binary cop (CmpP tmp1 tmp2)) (Binary dst src))); ++ ins_cost(80); ++ format %{ ++ "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpP_reg_reg\n\t" ++ "CMOV $dst,$src\t @cmovI_cmpP_reg_reg" ++ %} ++ ins_encode %{ ++ Register op1 = $tmp1$$Register; ++ Register op2 = $tmp2$$Register; ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) { ++ case 0x01: //equal ++ __ subl(AT, op1, op2); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ case 0x02: //not_equal ++ __ subl(AT, op1, op2); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x03: //above ++ __ cmpult(AT, op2, op1); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x04: //above_equal ++ __ cmpult(AT, op1, op2); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ case 0x05: //below ++ __ cmpult(AT, op1, op2); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x06: //below_equal ++ __ cmpult(AT, op2, op1); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmovI_cmpP_reg_imm(mRegI dst, immI_0_255 src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{ ++ match(Set dst (CMoveI (Binary cop (CmpP tmp1 tmp2)) (Binary dst src))); ++ ins_cost(80); ++ format %{ ++ "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpP_reg_reg\n\t" ++ "CMOV $dst,$src\t @cmovI_cmpP_reg_reg" ++ %} ++ ins_encode %{ ++ Register op1 = $tmp1$$Register; ++ Register op2 = $tmp2$$Register; ++ Register dst = $dst$$Register; ++ int src = $src$$constant & ((1<<8)-1); // Mask to 8 bits ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) ++ { ++ case 0x01: //equal ++ __ subl(AT, op1, op2); ++ __ seleq(AT,src,dst,dst); ++ break; ++ ++ case 0x02: //not_equal ++ __ subl(AT, op1, op2); ++ __ selne(AT,src,dst,dst); ++ break; ++ ++ case 0x03: //above ++ __ cmpult(AT, op2, op1); ++ __ selne(AT,src,dst,dst); ++ break; ++ ++ case 0x04: //above_equal ++ __ cmpult(AT, op1, op2); ++ __ seleq(AT,src,dst,dst); ++ break; ++ ++ case 0x05: //below ++ __ cmpult(AT, op1, op2); ++ __ selne(AT,src,dst,dst); ++ break; ++ ++ case 0x06: //below_equal ++ __ cmpult(AT, op2, op1); ++ __ seleq(AT,src,dst,dst); ++ break; ++ ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++ %} ++ ++instruct cmovI_cmpN_reg_reg(mRegI dst, mRegI src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{ ++ match(Set dst (CMoveI (Binary cop (CmpN tmp1 tmp2)) (Binary dst src))); ++ ins_cost(80); ++ format %{ ++ "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpN_reg_reg\n\t" ++ "CMOV $dst,$src\t @cmovI_cmpN_reg_reg" ++ %} ++ ins_encode %{ ++ Register op1 = $tmp1$$Register; ++ Register op2 = $tmp2$$Register; ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) { ++ case 0x01: //equal ++ __ subw(AT, op1, op2); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ case 0x02: //not_equal ++ __ subw(AT, op1, op2); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x03: //above ++ __ cmpult(AT, op2, op1); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x04: //above_equal ++ __ cmpult(AT, op1, op2); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ case 0x05: //below ++ __ cmpult(AT, op1, op2); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x06: //below_equal ++ __ cmpult(AT, op2, op1); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmovI_cmpN_reg_imm(mRegI dst, immI_0_255 src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{ ++ match(Set dst (CMoveI (Binary cop (CmpN tmp1 tmp2)) (Binary dst src))); ++ ins_cost(80); ++ format %{ ++ "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpN_reg_imm\n\t" ++ "CMOV $dst,$src\t @cmovI_cmpN_reg_imm" ++ %} ++ ins_encode %{ ++ Register op1 = $tmp1$$Register; ++ Register op2 = $tmp2$$Register; ++ Register dst = $dst$$Register; ++ int src = $src$$constant & ((1<<8)-1); // Mask to 8 bits ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) ++ { ++ case 0x01: //equal ++ __ subl(AT, op1, op2); ++ __ seleq(AT,src,dst,dst); ++ break; ++ ++ case 0x02: //not_equal ++ __ subl(AT, op1, op2); ++ __ selne(AT,src,dst,dst); ++ break; ++ ++ case 0x03: //above ++ __ cmpult(AT, op2, op1); ++ __ selne(AT,src,dst,dst); ++ break; ++ ++ case 0x04: //above_equal ++ __ cmpult(AT, op1, op2); ++ __ seleq(AT,src,dst,dst); ++ break; ++ ++ case 0x05: //below ++ __ cmpult(AT, op1, op2); ++ __ selne(AT,src,dst,dst); ++ break; ++ ++ case 0x06: //below_equal ++ __ cmpult(AT, op2, op1); ++ __ seleq(AT,src,dst,dst); ++ break; ++ ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmovP_cmpU_reg_reg(mRegP dst, mRegP src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{ ++ match(Set dst (CMoveP (Binary cop (CmpU tmp1 tmp2)) (Binary dst src))); ++ ins_cost(80); ++ format %{ ++ "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpU_reg_reg\n\t" ++ "CMOV $dst,$src\t @cmovP_cmpU_reg_reg" ++ %} ++ ins_encode %{ ++ Register op1 = $tmp1$$Register; ++ Register op2 = $tmp2$$Register; ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) { ++ case 0x01: //equal ++ __ subw(AT, op1, op2); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ case 0x02: //not_equal ++ __ subw(AT, op1, op2); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x03: //above ++ __ cmpult(AT, op2, op1); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x04: //above_equal ++ __ cmpult(AT, op1, op2); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ case 0x05: //below ++ __ cmpult(AT, op1, op2); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x06: //below_equal ++ __ cmpult(AT, op2, op1); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmovP_cmpF_reg_reg(mRegP dst, mRegP src, regF tmp1, regF tmp2, cmpOp cop ) %{ ++ match(Set dst (CMoveP (Binary cop (CmpF tmp1 tmp2)) (Binary dst src))); ++ ins_cost(80); ++ format %{ ++ "CMP$cop $tmp1, $tmp2\t @cmovP_cmpF_reg_reg\n" ++ "\tCMOV $dst,$src \t @cmovP_cmpF_reg_reg" ++ %} ++ ++ ins_encode %{ ++ FloatRegister reg_op1 = $tmp1$$FloatRegister; ++ FloatRegister reg_op2 = $tmp2$$FloatRegister; ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) { ++ case 0x01: //equal ++ __ fcmpeq(FcmpRES, reg_op1, reg_op2); ++ __ fimovd(GP, FcmpRES); ++ __ selne(GP, src, dst, dst); ++ break; ++ case 0x02: //not_equal ++ __ fcmpeq(FcmpRES, reg_op1, reg_op2); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src, dst, dst); ++ break; ++ case 0x03: //greater ++ __ fcmple(FcmpRES, reg_op1, reg_op2); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src, dst, dst); ++ break; ++ case 0x04: //greater_equal ++ __ fcmplt(FcmpRES, reg_op1, reg_op2); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src, dst, dst); ++ break; ++ case 0x05: //less ++ __ fcmple(FcmpRES, reg_op2, reg_op1); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src, dst, dst); ++ break; ++ case 0x06: //less_equal ++ __ fcmplt(FcmpRES, reg_op2, reg_op1); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src, dst, dst); ++ break; ++ default: ++ Unimplemented(); ++ } ++ %} ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmovP_cmpN_reg_reg(mRegP dst, mRegP src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{ ++ match(Set dst (CMoveP (Binary cop (CmpN tmp1 tmp2)) (Binary dst src))); ++ ins_cost(80); ++ format %{ ++ "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpN_reg_reg\n\t" ++ "CMOV $dst,$src\t @cmovP_cmpN_reg_reg" ++ %} ++ ins_encode %{ ++ Register op1 = $tmp1$$Register; ++ Register op2 = $tmp2$$Register; ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) { ++ case 0x01: //equal ++ __ subw(AT, op1, op2); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ case 0x02: //not_equal ++ __ subw(AT, op1, op2); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x03: //above ++ __ cmpult(AT, op2, op1); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x04: //above_equal ++ __ cmpult(AT, op1, op2); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ case 0x05: //below ++ __ cmpult(AT, op1, op2); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x06: //below_equal ++ __ cmpult(AT, op2, op1); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmovN_cmpP_reg_reg(mRegN dst, mRegN src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{ ++ match(Set dst (CMoveN (Binary cop (CmpP tmp1 tmp2)) (Binary dst src))); ++ ins_cost(80); ++ format %{ ++ "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpP_reg_reg\n\t" ++ "CMOV $dst,$src\t @cmovN_cmpP_reg_reg" ++ %} ++ ins_encode %{ ++ Register op1 = $tmp1$$Register; ++ Register op2 = $tmp2$$Register; ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) { ++ case 0x01: //equal ++ __ subl(AT, op1, op2); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ case 0x02: //not_equal ++ __ subl(AT, op1, op2); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x03: //above ++ __ cmpult(AT, op2, op1); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x04: //above_equal ++ __ cmpult(AT, op1, op2); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ case 0x05: //below ++ __ cmpult(AT, op1, op2); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x06: //below_equal ++ __ cmpult(AT, op2, op1); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmovN_cmpP_reg_imm(mRegN dst, immI_0_255 src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{ ++ match(Set dst (CMoveN (Binary cop (CmpP tmp1 tmp2)) (Binary dst src))); ++ ins_cost(80); ++ format %{ ++ "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpP_reg_imm\n\t" ++ "CMOV $dst,$src\t @cmovN_cmpP_reg_imm" ++ %} ++ ins_encode %{ ++ Register op1 = $tmp1$$Register; ++ Register op2 = $tmp2$$Register; ++ Register dst = $dst$$Register; ++ int src = $src$$constant & ((1<<8)-1); // Mask to 8 bits ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) ++ { ++ case 0x01: //equal ++ __ subl(AT, op1, op2); ++ __ seleq(AT,src,dst,dst); ++ break; ++ ++ case 0x02: //not_equal ++ __ subl(AT, op1, op2); ++ __ selne(AT,src,dst,dst); ++ break; ++ ++ case 0x03: //above ++ __ cmpult(AT, op2, op1); ++ __ selne(AT,src,dst,dst); ++ break; ++ ++ case 0x04: //above_equal ++ __ cmpult(AT, op1, op2); ++ __ seleq(AT,src,dst,dst); ++ break; ++ ++ case 0x05: //below ++ __ cmpult(AT, op1, op2); ++ __ selne(AT,src,dst,dst); ++ break; ++ ++ case 0x06: //below_equal ++ __ cmpult(AT, op2, op1); ++ __ seleq(AT,src,dst,dst); ++ break; ++ ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmovP_cmpD_reg_reg(mRegP dst, mRegP src, regD tmp1, regD tmp2, cmpOp cop ) %{ ++ match(Set dst (CMoveP (Binary cop (CmpD tmp1 tmp2)) (Binary dst src))); ++ ins_cost(80); ++ format %{ ++ "CMP$cop $tmp1, $tmp2\t @cmovP_cmpD_reg_reg\n" ++ "\tCMOV $dst,$src \t @cmovP_cmpD_reg_reg" ++ %} ++ ins_encode %{ ++ FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg); ++ FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg); ++ Register dst = as_Register($dst$$reg); ++ Register src = as_Register($src$$reg); ++ ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) { ++ case 0x01: //equal ++ __ fcmpeq(FcmpRES, reg_op1, reg_op2); ++ __ fimovd(GP, FcmpRES); ++ __ selne(GP, src, dst, dst); ++ break; ++ case 0x02: //not_equal ++ __ fcmpeq(FcmpRES, reg_op1, reg_op2); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src, dst, dst); ++ break; ++ case 0x03: //greater ++ __ fcmple(FcmpRES, reg_op1, reg_op2); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src, dst, dst); ++ break; ++ case 0x04: //greater_equal ++ __ fcmplt(FcmpRES, reg_op1, reg_op2); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src, dst, dst); ++ break; ++ case 0x05: //less ++ __ fcmple(FcmpRES, reg_op2, reg_op1); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src, dst, dst); ++ break; ++ case 0x06: //less_equal ++ __ fcmplt(FcmpRES, reg_op2, reg_op1); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src, dst, dst); ++ break; ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++ ++instruct cmovN_cmpN_reg_reg(mRegN dst, mRegN src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{ ++ match(Set dst (CMoveN (Binary cop (CmpN tmp1 tmp2)) (Binary dst src))); ++ ins_cost(80); ++ format %{ ++ "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpN_reg_reg\n\t" ++ "CMOV $dst,$src\t @cmovN_cmpN_reg_reg" ++ %} ++ ins_encode %{ ++ Register op1 = $tmp1$$Register; ++ Register op2 = $tmp2$$Register; ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) { ++ case 0x01: //equal ++ __ subw(AT, op1, op2); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ case 0x02: //not_equal ++ __ subw(AT, op1, op2); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x03: //above ++ __ cmpult(AT, op2, op1); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x04: //above_equal ++ __ cmpult(AT, op1, op2); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ case 0x05: //below ++ __ cmpult(AT, op1, op2); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x06: //below_equal ++ __ cmpult(AT, op2, op1); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmovN_cmpN_reg_imm(mRegN dst, immI_0_255 src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{ ++ match(Set dst (CMoveN (Binary cop (CmpN tmp1 tmp2)) (Binary dst src))); ++ ins_cost(80); ++ format %{ ++ "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpN_reg_imm\n\t" ++ "CMOV $dst,$src\t @cmovN_cmpN_reg_imm" ++ %} ++ ins_encode %{ ++ Register op1 = $tmp1$$Register; ++ Register op2 = $tmp2$$Register; ++ Register dst = $dst$$Register; ++ int src = $src$$constant & ((1<<8)-1); // Mask to 8 bits ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) ++ { ++ case 0x01: //equal ++ __ subl(AT, op1, op2); ++ __ seleq(AT,src,dst,dst); ++ break; ++ ++ case 0x02: //not_equal ++ __ subl(AT, op1, op2); ++ __ selne(AT,src,dst,dst); ++ break; ++ ++ case 0x03: //above ++ __ cmpult(AT, op2, op1); ++ __ selne(AT,src,dst,dst); ++ break; ++ ++ case 0x04: //above_equal ++ __ cmpult(AT, op1, op2); ++ __ seleq(AT,src,dst,dst); ++ break; ++ ++ case 0x05: //below ++ __ cmpult(AT, op1, op2); ++ __ selne(AT,src,dst,dst); ++ break; ++ ++ case 0x06: //below_equal ++ __ cmpult(AT, op2, op1); ++ __ seleq(AT,src,dst,dst); ++ break; ++ ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmovI_cmpU_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{ ++ match(Set dst (CMoveI (Binary cop (CmpU tmp1 tmp2)) (Binary dst src))); ++ ins_cost(80); ++ format %{ ++ "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpU_reg_reg\n\t" ++ "CMOV $dst,$src\t @cmovI_cmpU_reg_reg" ++ %} ++ ins_encode %{ ++ Register op1 = $tmp1$$Register; ++ Register op2 = $tmp2$$Register; ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) { ++ case 0x01: //equal ++ __ subl(AT, op1, op2); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ case 0x02: //not_equal ++ __ subl(AT, op1, op2); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x03: //above ++ __ cmpult(AT, op2, op1); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x04: //above_equal ++ __ cmpult(AT, op1, op2); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ case 0x05: //below ++ __ cmpult(AT, op1, op2); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x06: //below_equal ++ __ cmpult(AT, op2, op1); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmovI_cmpU_reg_imm(mRegI dst, immI_0_255 src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{ ++ match(Set dst (CMoveI (Binary cop (CmpU tmp1 tmp2)) (Binary dst src))); ++ ins_cost(80); ++ format %{ ++ "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpU_reg_imm\n\t" ++ "CMOV $dst,$src\t @cmovI_cmpU_reg_imm" ++ %} ++ ins_encode %{ ++ Register op1 = $tmp1$$Register; ++ Register op2 = $tmp2$$Register; ++ Register dst = $dst$$Register; ++ int src = $src$$constant & ((1<<8)-1); // Mask to 8 bits ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) ++ { ++ case 0x01: //equal ++ __ subl(AT, op1, op2); ++ __ seleq(AT,src,dst,dst); ++ break; ++ ++ case 0x02: //not_equal ++ __ subl(AT, op1, op2); ++ __ selne(AT,src,dst,dst); ++ break; ++ ++ case 0x03: //above ++ __ cmpult(AT, op2, op1); ++ __ selne(AT,src,dst,dst); ++ break; ++ ++ case 0x04: //above_equal ++ __ cmpult(AT, op1, op2); ++ __ seleq(AT,src,dst,dst); ++ break; ++ ++ case 0x05: //below ++ __ cmpult(AT, op1, op2); ++ __ selne(AT,src,dst,dst); ++ break; ++ ++ case 0x06: //below_equal ++ __ cmpult(AT, op2, op1); ++ __ seleq(AT,src,dst,dst); ++ break; ++ ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmovI_cmpL_reg_reg(mRegI dst, mRegI src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{ ++ match(Set dst (CMoveI (Binary cop (CmpL tmp1 tmp2)) (Binary dst src))); ++ ins_cost(80); ++ format %{ ++ "CMP$cop $tmp1, $tmp2\t @cmovI_cmpL_reg_reg\n" ++ "\tCMOV $dst,$src \t @cmovI_cmpL_reg_reg" ++ %} ++ ins_encode %{ ++ Register opr1 = as_Register($tmp1$$reg); ++ Register opr2 = as_Register($tmp2$$reg); ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) { ++ case 0x01: //equal ++ __ subl(AT, opr1, opr2); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ case 0x02: //not_equal ++ __ subl(AT, opr1, opr2); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x03: //greater ++ __ cmplt(AT, opr2, opr1); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x04: //greater_equal ++ __ cmplt(AT, opr1, opr2); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ case 0x05: //less ++ __ cmplt(AT, opr1, opr2); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x06: //less_equal ++ __ cmplt(AT, opr2, opr1); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmovI_cmpL_reg_imm(mRegI dst, immI_0_255 src1, mRegI src2, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{ ++ match(Set dst (CMoveI (Binary cop (CmpL tmp1 tmp2)) (Binary src2 src1))); ++ ins_cost(80); ++ format %{ ++ "CMP$cop $tmp1, $tmp2\t @cmovI_cmpL_reg_imm\n" ++ "\tCMOV $src2,$src1 \t @cmovI_cmpL_reg_imm" ++ %} ++ ins_encode %{ ++ Register opr1 = as_Register($tmp1$$reg); ++ Register opr2 = as_Register($tmp2$$reg); ++ Register dst = $dst$$Register; ++ int src1 = $src1$$constant & ((1<<8)-1); // Mask to 8 bits ++ Register src2 = $src2$$Register; ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) ++ { ++ case 0x01: //equal ++ __ subl(AT, opr1, opr2); ++ __ seleq(AT, src1, src2, dst); ++ break; ++ ++ case 0x02: //not_equal ++ __ subl(AT, opr1, opr2); ++ __ selne(AT, src1, src2, dst); ++ break; ++ ++ case 0x03: //greater ++ __ cmplt(AT, opr2, opr1); ++ __ selne(AT, src1, src2, dst); ++ break; ++ ++ case 0x04: //greater_equal ++ __ cmplt(AT, opr1, opr2); ++ __ seleq(AT, src1, src2, dst); ++ break; ++ ++ case 0x05: //less ++ __ cmplt(AT, opr1, opr2); ++ __ selne(AT, src1, src2, dst); ++ break; ++ ++ case 0x06: //less_equal ++ __ cmplt(AT, opr2, opr1); ++ __ seleq(AT, src1, src2, dst); ++ break; ++ ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmovP_cmpL_reg_reg(mRegP dst, mRegP src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{ ++ match(Set dst (CMoveP (Binary cop (CmpL tmp1 tmp2)) (Binary dst src))); ++ ins_cost(80); ++ format %{ ++ "CMP$cop $tmp1, $tmp2\t @cmovP_cmpL_reg_reg\n" ++ "\tCMOV $dst,$src \t @cmovP_cmpL_reg_reg" ++ %} ++ ins_encode %{ ++ Register opr1 = as_Register($tmp1$$reg); ++ Register opr2 = as_Register($tmp2$$reg); ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) { ++ case 0x01: //equal ++ __ subl(AT, opr1, opr2); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ case 0x02: //not_equal ++ __ subl(AT, opr1, opr2); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x03: //greater ++ __ cmplt(AT, opr2, opr1); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x04: //greater_equal ++ __ cmplt(AT, opr1, opr2); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ case 0x05: //less ++ __ cmplt(AT, opr1, opr2); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x06: //less_equal ++ __ cmplt(AT, opr2, opr1); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmovI_cmpD_reg_reg(mRegI dst, mRegI src1, mRegI src2, regD tmp1, regD tmp2, cmpOp cop ) %{ ++ match(Set dst (CMoveI (Binary cop (CmpD tmp1 tmp2)) (Binary src2 src1))); ++ ins_cost(80); ++ format %{ ++ "CMP$cop $tmp1, $tmp2\t @cmovI_cmpD_reg_reg\n" ++ "\tCMOV $dst,$src1,$src2 \t @cmovI_cmpD_reg_reg" ++ %} ++ ins_encode %{ ++ FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg); ++ FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg); ++ Register dst = as_Register($dst$$reg); ++ Register src1 = as_Register($src1$$reg); ++ Register src2 = as_Register($src2$$reg); ++ ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) { ++ case 0x01: //equal ++ __ fcmpeq(FcmpRES, reg_op1, reg_op2); ++ __ fimovd(GP, FcmpRES); ++ __ selne(GP, src1, src2, dst); ++ break; ++ case 0x02: //not_equal ++ // See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change. ++ __ fcmpeq(FcmpRES, reg_op1, reg_op2); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src1, src2, dst); ++ break; ++ case 0x03: //greater ++ __ fcmple(FcmpRES, reg_op1, reg_op2); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src1, src2, dst); ++ break; ++ case 0x04: //greater_equal ++ __ fcmplt(FcmpRES, reg_op1, reg_op2); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src1, src2, dst); ++ break; ++ case 0x05: //less ++ __ fcmple(FcmpRES, reg_op2, reg_op1); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src1, src2, dst); ++ break; ++ case 0x06: //less_equal ++ __ fcmplt(FcmpRES, reg_op2, reg_op1); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src1, src2, dst); ++ break; ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmovI_cmpD_reg_imm(mRegI dst, immI_0_255 src1, mRegI src2, regD tmp1, regD tmp2, cmpOp cop ) %{ ++ match(Set dst (CMoveI (Binary cop (CmpD tmp1 tmp2)) (Binary src2 src1))); ++ ins_cost(80); ++ format %{ ++ "CMP$cop $tmp1, $tmp2\t @cmovI_cmpD_reg_imm\n" ++ "\tCMOV $dst,$src1,$src2 \t @cmovI_cmpD_reg_imm" ++ %} ++ ins_encode %{ ++ FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg); ++ FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg); ++ Register dst = as_Register($dst$$reg); ++ Register src2 = as_Register($src2$$reg); ++ int src1 = $src1$$constant & ((1<<8)-1); // Mask to 8 bits ++ ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) ++ { ++ case 0x01: //equal ++ __ fcmpeq(FcmpRES, reg_op1, reg_op2); ++ __ fimovd(GP, FcmpRES); ++ __ selne(GP, src1, src2, dst); ++ break; ++ case 0x02: //not_equal ++ __ fcmpeq(FcmpRES, reg_op1, reg_op2); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src1, src2, dst); ++ break; ++ case 0x03: //greater ++ __ fcmple(FcmpRES, reg_op1, reg_op2); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src1, src2, dst); ++ break; ++ case 0x04: //greater_equal ++ __ fcmplt(FcmpRES, reg_op1, reg_op2); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src1, src2, dst); ++ break; ++ case 0x05: //less ++ __ fcmplt(FcmpRES, reg_op1, reg_op2); ++ __ fimovd(GP, FcmpRES); ++ __ selne(GP, src1, src2, dst); ++ break; ++ case 0x06: //less_equal ++ __ fcmple(FcmpRES, reg_op1, reg_op2); ++ __ fimovd(GP, FcmpRES); ++ __ selne(GP, src1, src2, dst); ++ break; ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmovP_cmpP_reg_reg(mRegP dst, mRegP src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{ ++ match(Set dst (CMoveP (Binary cop (CmpP tmp1 tmp2)) (Binary dst src))); ++ ins_cost(80); ++ format %{ ++ "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpP_reg_reg\n\t" ++ "CMOV $dst,$src\t @cmovP_cmpP_reg_reg" ++ %} ++ ins_encode %{ ++ Register op1 = $tmp1$$Register; ++ Register op2 = $tmp2$$Register; ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) { ++ case 0x01: //equal ++ __ subl(AT, op1, op2); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ case 0x02: //not_equal ++ __ subl(AT, op1, op2); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x03: //above ++ __ cmpult(AT, op2, op1); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x04: //above_equal ++ __ cmpult(AT, op1, op2); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ case 0x05: //below ++ __ cmpult(AT, op1, op2); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x06: //below_equal ++ __ cmpult(AT, op2, op1); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmovP_cmpI_reg_reg(mRegP dst, mRegP src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{ ++ match(Set dst (CMoveP (Binary cop (CmpI tmp1 tmp2)) (Binary dst src))); ++ ins_cost(80); ++ format %{ ++ "CMP$cop $tmp1,$tmp2\t @cmovP_cmpI_reg_reg\n\t" ++ "CMOV $dst,$src\t @cmovP_cmpI_reg_reg" ++ %} ++ ins_encode %{ ++ Register op1 = $tmp1$$Register; ++ Register op2 = $tmp2$$Register; ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) { ++ case 0x01: //equal ++ __ subw(AT, op1, op2); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ case 0x02: //not_equal ++ __ subw(AT, op1, op2); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x03: //above ++ __ cmplt(AT, op2, op1); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x04: //above_equal ++ __ cmplt(AT, op1, op2); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ case 0x05: //below ++ __ cmplt(AT, op1, op2); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x06: //below_equal ++ __ cmplt(AT, op2, op1); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmovL_cmpP_reg_reg(mRegL dst, mRegL src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{ ++ match(Set dst (CMoveL (Binary cop (CmpP tmp1 tmp2)) (Binary dst src))); ++ ins_cost(80); ++ format %{ ++ "CMPU$cop $tmp1,$tmp2\t @cmovL_cmpP_reg_reg\n\t" ++ "CMOV $dst,$src\t @cmovL_cmpP_reg_reg" ++ %} ++ ins_encode %{ ++ Register op1 = $tmp1$$Register; ++ Register op2 = $tmp2$$Register; ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) { ++ case 0x01: //equal ++ __ subl(AT, op1, op2); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ case 0x02: //not_equal ++ __ subl(AT, op1, op2); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x03: //above ++ __ cmpult(AT, op2, op1); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x04: //above_equal ++ __ cmpult(AT, op1, op2); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ case 0x05: //below ++ __ cmpult(AT, op1, op2); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x06: //below_equal ++ __ cmpult(AT, op2, op1); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmovN_cmpU_reg_reg(mRegN dst, mRegN src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{ ++ match(Set dst (CMoveN (Binary cop (CmpU tmp1 tmp2)) (Binary dst src))); ++ ins_cost(80); ++ format %{ ++ "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpU_reg_reg\n\t" ++ "CMOV $dst,$src\t @cmovN_cmpU_reg_reg" ++ %} ++ ins_encode %{ ++ Register op1 = $tmp1$$Register; ++ Register op2 = $tmp2$$Register; ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) { ++ case 0x01: //equal ++ __ subw(AT, op1, op2); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ case 0x02: //not_equal ++ __ subw(AT, op1, op2); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x03: //above ++ __ cmpult(AT, op2, op1); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x04: //above_equal ++ __ cmpult(AT, op1, op2); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ case 0x05: //below ++ __ cmpult(AT, op1, op2); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x06: //below_equal ++ __ cmpult(AT, op2, op1); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmovN_cmpL_reg_reg(mRegN dst, mRegN src, mRegL tmp1, mRegL tmp2, cmpOp cop) %{ ++ match(Set dst (CMoveN (Binary cop (CmpL tmp1 tmp2)) (Binary dst src))); ++ ins_cost(80); ++ format %{ ++ "CMP$cop $tmp1, $tmp2\t @cmovN_cmpL_reg_reg\n" ++ "\tCMOV $dst,$src \t @cmovN_cmpL_reg_reg" ++ %} ++ ins_encode %{ ++ Register opr1 = as_Register($tmp1$$reg); ++ Register opr2 = as_Register($tmp2$$reg); ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) { ++ case 0x01: //equal ++ __ subl(AT, opr1, opr2); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ case 0x02: //not_equal ++ __ subl(AT, opr1, opr2); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x03: //greater ++ __ cmplt(AT, opr2, opr1); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x04: //greater_equal ++ __ cmplt(AT, opr1, opr2); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ case 0x05: //less ++ __ cmplt(AT, opr1, opr2); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x06: //less_equal ++ __ cmplt(AT, opr2, opr1); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmovN_cmpI_reg_reg(mRegN dst, mRegN src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{ ++ match(Set dst (CMoveN (Binary cop (CmpI tmp1 tmp2)) (Binary dst src))); ++ ins_cost(80); ++ format %{ ++ "CMP$cop $tmp1,$tmp2\t @cmovN_cmpI_reg_reg\n\t" ++ "CMOV $dst,$src\t @cmovN_cmpI_reg_reg" ++ %} ++ ins_encode %{ ++ Register op1 = $tmp1$$Register; ++ Register op2 = $tmp2$$Register; ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) { ++ case 0x01: //equal ++ __ subw(AT, op1, op2); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ case 0x02: //not_equal ++ __ subw(AT, op1, op2); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x03: //above ++ __ cmplt(AT, op2, op1); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x04: //above_equal ++ __ cmplt(AT, op1, op2); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ case 0x05: //below ++ __ cmplt(AT, op1, op2); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x06: //below_equal ++ __ cmplt(AT, op2, op1); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmovN_cmpI_reg_imm(mRegN dst, immI_0_255 src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{ ++ match(Set dst (CMoveN (Binary cop (CmpI tmp1 tmp2)) (Binary dst src))); ++ ins_cost(80); ++ format %{ ++ "CMP$cop $tmp1,$tmp2\t @cmovN_cmpI_reg_imm\n\t" ++ "CMOV $dst,$src\t @cmovN_cmpI_reg_imm" ++ %} ++ ins_encode %{ ++ Register op1 = $tmp1$$Register; ++ Register op2 = $tmp2$$Register; ++ Register dst = $dst$$Register; ++ int src = $src$$constant & ((1<<8)-1); // Mask to 8 bits ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) ++ { ++ case 0x01: //equal ++ __ subl(AT, op1, op2); ++ __ seleq(AT,src,dst,dst); ++ break; ++ ++ case 0x02: //not_equal ++ __ subl(AT, op1, op2); ++ __ selne(AT,src,dst,dst); ++ break; ++ ++ case 0x03: //above ++ __ cmplt(AT, op2, op1); ++ __ selne(AT,src,dst,dst); ++ break; ++ ++ case 0x04: //above_equal ++ __ cmplt(AT, op1, op2); ++ __ seleq(AT,src,dst,dst); ++ break; ++ ++ case 0x05: //below ++ __ cmplt(AT, op1, op2); ++ __ selne(AT,src,dst,dst); ++ break; ++ ++ case 0x06: //below_equal ++ __ cmplt(AT, op2, op1); ++ __ seleq(AT,src,dst,dst); ++ break; ++ ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmovL_cmpU_reg_reg(mRegL dst, mRegL src1, mRegL src2, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{ ++ match(Set dst (CMoveL (Binary cop (CmpU tmp1 tmp2)) (Binary src2 src1))); ++ ins_cost(80); ++ format %{ ++ "CMPU$cop $tmp1,$tmp2\t @cmovL_cmpU_reg_reg\n\t" ++ "CMOV $src2,$src1\t @cmovL_cmpU_reg_reg" ++ %} ++ ins_encode %{ ++ Register op1 = $tmp1$$Register; ++ Register op2 = $tmp2$$Register; ++ Register dst = $dst$$Register; ++ Register src1 = $src1$$Register; ++ Register src2 = $src2$$Register; ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) { ++ case 0x01: //equal ++ __ subw(AT, op1, op2); ++ __ seleq(AT, src1, src2, dst); ++ break; ++ ++ case 0x02: //not_equal ++ __ subw(AT, op1, op2); ++ __ selne(AT, src1, src2, dst); ++ break; ++ ++ case 0x03: //above ++ __ cmpult(AT, op2, op1); ++ __ selne(AT, src1, src2, dst); ++ break; ++ ++ case 0x04: //above_equal ++ __ cmpult(AT, op1, op2); ++ __ seleq(AT, src1, src2, dst); ++ break; ++ ++ case 0x05: //below ++ __ cmpult(AT, op1, op2); ++ __ selne(AT, src1, src2, dst); ++ break; ++ ++ case 0x06: //below_equal ++ __ cmpult(AT, op2, op1); ++ __ seleq(AT, src1, src2, dst); ++ break; ++ ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmovL_cmpF_reg_reg(mRegL dst, mRegL src1, mRegL src2, regF tmp1, regF tmp2, cmpOp cop ) %{ ++ match(Set dst (CMoveL (Binary cop (CmpF tmp1 tmp2)) (Binary src2 src1))); ++ ins_cost(80); ++ format %{ ++ "CMP$cop $tmp1, $tmp2\t @cmovL_cmpF_reg_reg\n" ++ "\tCMOV $src2,$src1 \t @cmovL_cmpF_reg_reg" ++ %} ++ ++ ins_encode %{ ++ FloatRegister reg_op1 = $tmp1$$FloatRegister; ++ FloatRegister reg_op2 = $tmp2$$FloatRegister; ++ Register dst = $dst$$Register; ++ Register src1 = $src1$$Register; ++ Register src2 = $src2$$Register; ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) { ++ case 0x01: //equal ++ __ fcmpeq(FcmpRES, reg_op1, reg_op2); ++ __ fimovd(GP, FcmpRES); ++ __ selne(GP, src1, src2, dst); ++ break; ++ case 0x02: //not_equal ++ __ fcmpeq(FcmpRES, reg_op1, reg_op2); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src1, src2, dst); ++ break; ++ case 0x03: //greater ++ __ fcmple(FcmpRES, reg_op1, reg_op2); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src1, src2, dst); ++ break; ++ case 0x04: //greater_equal ++ __ fcmplt(FcmpRES, reg_op1, reg_op2); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src1, src2, dst); ++ break; ++ case 0x05: //less ++ __ fcmple(FcmpRES, reg_op2, reg_op1); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src1, src2, dst); ++ break; ++ case 0x06: //less_equal ++ __ fcmplt(FcmpRES, reg_op2, reg_op1); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src1, src2, dst); ++ break; ++ default: ++ Unimplemented(); ++ } ++ %} ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmovL_cmpI_reg_reg(mRegL dst, mRegL src1, mRegL src2, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{ ++ match(Set dst (CMoveL (Binary cop (CmpI tmp1 tmp2)) (Binary src2 src1))); ++ ins_cost(80); ++ format %{ ++ "CMP$cop $tmp1, $tmp2\t @cmovL_cmpI_reg_reg\n" ++ "\tCMOV $src2,$src1 \t @cmovL_cmpI_reg_reg" ++ %} ++ ++ ins_encode %{ ++ Register op1 = $tmp1$$Register; ++ Register op2 = $tmp2$$Register; ++ Register dst = as_Register($dst$$reg); ++ Register src1 = as_Register($src1$$reg); ++ Register src2 = as_Register($src2$$reg); ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) ++ { ++ case 0x01: //equal ++ __ subw(AT, op1, op2); ++ __ seleq(AT, src1, src2, dst); ++ break; ++ ++ case 0x02: //not_equal ++ __ subw(AT, op1, op2); ++ __ selne(AT, src1, src2, dst); ++ break; ++ ++ case 0x03: //great ++ __ cmplt(AT, op2, op1); ++ __ selne(AT, src1, src2, dst); ++ break; ++ ++ case 0x04: //great_equal ++ __ cmplt(AT, op1, op2); ++ __ seleq(AT, src1, src2, dst); ++ break; ++ ++ case 0x05: //less ++ __ cmplt(AT, op1, op2); ++ __ selne(AT, src1, src2, dst); ++ break; ++ ++ case 0x06: //less_equal ++ __ cmplt(AT, op2, op1); ++ __ seleq(AT, src1, src2, dst); ++ break; ++ ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmovL_cmpI_reg_imm(mRegL dst, immI_0_255 src1, mRegL src2, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{ ++ match(Set dst (CMoveL (Binary cop (CmpI tmp1 tmp2)) (Binary src2 src1))); ++ ins_cost(80); ++ format %{ ++ "CMP$cop $tmp1, $tmp2\t @cmovL_cmpI_reg_imm\n" ++ "\tCMOV $src2,$src1 \t @cmovL_cmpI_reg_imm" ++ %} ++ ++ ins_encode %{ ++ Register op1 = $tmp1$$Register; ++ Register op2 = $tmp2$$Register; ++ Register dst = as_Register($dst$$reg); ++ int src1 = $src1$$constant & ((1<<8)-1); // Mask to 8 bits ++ Register src2 = as_Register($src2$$reg); ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) ++ { ++ case 0x01: //equal ++ __ subl(AT, op1, op2); ++ __ seleq(AT, src1, src2, dst); ++ break; ++ ++ case 0x02: //not_equal ++ __ subl(AT, op1, op2); ++ __ selne(AT, src1, src2, dst); ++ break; ++ ++ case 0x03: //great ++ __ cmplt(AT, op2, op1); ++ __ selne(AT, src1, src2, dst); ++ break; ++ ++ case 0x04: //great_equal ++ __ cmplt(AT, op1, op2); ++ __ seleq(AT, src1, src2, dst); ++ break; ++ ++ case 0x05: //less ++ __ cmplt(AT, op1, op2); ++ __ selne(AT, src1, src2, dst); ++ break; ++ ++ case 0x06: //less_equal ++ __ cmplt(AT, op2, op1); ++ __ seleq(AT, src1, src2, dst); ++ break; ++ ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmovL_cmpL_reg_reg(mRegL dst, mRegL src1, mRegL src2, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{ ++ match(Set dst (CMoveL (Binary cop (CmpL tmp1 tmp2)) (Binary src2 src1))); ++ ins_cost(80); ++ format %{ ++ "CMP$cop $tmp1, $tmp2\t @cmovL_cmpL_reg_reg\n" ++ "\tCMOV $src2,$src1 \t @cmovL_cmpL_reg_reg" ++ %} ++ ins_encode %{ ++ Register opr1 = as_Register($tmp1$$reg); ++ Register opr2 = as_Register($tmp2$$reg); ++ Register dst = as_Register($dst$$reg); ++ Register src1 = as_Register($src1$$reg); ++ Register src2 = as_Register($src2$$reg); ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) { ++ case 0x01: //equal ++ __ subl(AT, opr1, opr2); ++ __ seleq(AT, src1, src2, dst); ++ break; ++ ++ case 0x02: //not_equal ++ __ subl(AT, opr1, opr2); ++ __ selne(AT, src1, src2, dst); ++ break; ++ ++ case 0x03: //greater ++ __ cmplt(AT, opr2, opr1); ++ __ selne(AT, src1, src2, dst); ++ break; ++ ++ case 0x04: //greater_equal ++ __ cmplt(AT, opr1, opr2); ++ __ seleq(AT, src1, src2, dst); ++ break; ++ ++ case 0x05: //less ++ __ cmplt(AT, opr1, opr2); ++ __ selne(AT, src1, src2, dst); ++ break; ++ ++ case 0x06: //less_equal ++ __ cmplt(AT, opr2, opr1); ++ __ seleq(AT, src1, src2, dst); ++ break; ++ ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmovL_cmpL_reg_imm(mRegL dst, immI_0_255 src1, mRegL src2, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{ ++ match(Set dst (CMoveL (Binary cop (CmpL tmp1 tmp2)) (Binary src2 src1))); ++ ins_cost(80); ++ format %{ ++ "CMP$cop $tmp1, $tmp2\t @cmovL_cmpL_reg_imm\n" ++ "\tCMOV $src2,$src1 \t @cmovL_cmpL_reg_imm" ++ %} ++ ins_encode %{ ++ Register opr1 = as_Register($tmp1$$reg); ++ Register opr2 = as_Register($tmp2$$reg); ++ Register dst = as_Register($dst$$reg); ++ int src1 = $src1$$constant & ((1<<8)-1); // Mask to 8 bits ++ Register src2 = as_Register($src2$$reg); ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) ++ { ++ case 0x01: //equal ++ __ subl(AT, opr1, opr2); ++ __ seleq(AT, src1, src2, dst); ++ break; ++ ++ case 0x02: //not_equal ++ __ subl(AT, opr1, opr2); ++ __ selne(AT, src1, src2, dst); ++ break; ++ ++ case 0x03: //greater ++ __ cmplt(AT, opr2, opr1); ++ __ selne(AT, src1, src2, dst); ++ break; ++ ++ case 0x04: //greater_equal ++ __ cmplt(AT, opr1, opr2); ++ __ seleq(AT, src1, src2, dst); ++ break; ++ ++ case 0x05: //less ++ __ cmplt(AT, opr1, opr2); ++ __ selne(AT, src1, src2, dst); ++ break; ++ ++ case 0x06: //less_equal ++ __ cmplt(AT, opr2, opr1); ++ __ seleq(AT, src1, src2, dst); ++ break; ++ ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmovL_cmpN_reg_reg(mRegL dst, mRegL src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{ ++ match(Set dst (CMoveL (Binary cop (CmpN tmp1 tmp2)) (Binary dst src))); ++ ins_cost(80); ++ format %{ ++ "CMPU$cop $tmp1,$tmp2\t @cmovL_cmpN_reg_reg\n\t" ++ "CMOV $dst,$src\t @cmovL_cmpN_reg_reg" ++ %} ++ ins_encode %{ ++ Register op1 = $tmp1$$Register; ++ Register op2 = $tmp2$$Register; ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) { ++ case 0x01: //equal ++ __ subw(AT, op1, op2); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ case 0x02: //not_equal ++ __ subw(AT, op1, op2); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x03: //above ++ __ cmpult(AT, op2, op1); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x04: //above_equal ++ __ cmpult(AT, op1, op2); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ case 0x05: //below ++ __ cmpult(AT, op1, op2); ++ __ selne(AT, src, dst, dst); ++ break; ++ ++ case 0x06: //below_equal ++ __ cmpult(AT, op2, op1); ++ __ seleq(AT, src, dst, dst); ++ break; ++ ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmovL_cmpN_reg_imm(mRegL dst, immI_0_255 src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{ ++ match(Set dst (CMoveL (Binary cop (CmpN tmp1 tmp2)) (Binary dst src))); ++ ins_cost(80); ++ format %{ ++ "CMPU$cop $tmp1,$tmp2\t @cmovL_cmpN_reg_imm\n\t" ++ "CMOV $dst,$src\t @cmovL_cmpN_reg_imm" ++ %} ++ ins_encode %{ ++ Register op1 = $tmp1$$Register; ++ Register op2 = $tmp2$$Register; ++ Register dst = $dst$$Register; ++ int src = $src$$constant & ((1<<8)-1); // Mask to 8 bits ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) ++ { ++ case 0x01: //equal ++ __ subl(AT, op1, op2); ++ __ seleq(AT,src,dst,dst); ++ break; ++ ++ case 0x02: //not_equal ++ __ subl(AT, op1, op2); ++ __ selne(AT,src,dst,dst); ++ break; ++ ++ case 0x03: //above ++ __ cmpult(AT, op2, op1); ++ __ selne(AT,src,dst,dst); ++ break; ++ ++ case 0x04: //above_equal ++ __ cmpult(AT, op1, op2); ++ __ seleq(AT,src,dst,dst); ++ break; ++ ++ case 0x05: //below ++ __ cmpult(AT, op1, op2); ++ __ selne(AT,src,dst,dst); ++ break; ++ ++ case 0x06: //below_equal ++ __ cmpult(AT, op2, op1); ++ __ seleq(AT,src,dst,dst); ++ break; ++ ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmovL_cmpD_reg_reg(mRegL dst, mRegL src1, mRegL src2, regD tmp1, regD tmp2, cmpOp cop ) %{ ++ match(Set dst (CMoveL (Binary cop (CmpD tmp1 tmp2)) (Binary src2 src1))); ++ ins_cost(80); ++ format %{ ++ "CMP$cop $tmp1, $tmp2\t @cmovL_cmpD_reg_reg\n" ++ "\tCMOV $src2,$src1 \t @cmovL_cmpD_reg_reg" ++ %} ++ ins_encode %{ ++ FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg); ++ FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg); ++ Register dst = as_Register($dst$$reg); ++ Register src1 = as_Register($src1$$reg); ++ Register src2 = as_Register($src2$$reg); ++ ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) { ++ case 0x01: //equal ++ __ fcmpeq(FcmpRES, reg_op1, reg_op2); ++ __ fimovd(GP, FcmpRES); ++ __ selne(GP, src1, src2, dst); ++ break; ++ case 0x02: //not_equal ++ __ fcmpeq(FcmpRES, reg_op1, reg_op2); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src1, src2, dst); ++ break; ++ case 0x03: //greater ++ __ fcmple(FcmpRES, reg_op1, reg_op2); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src1, src2, dst); ++ break; ++ case 0x04: //greater_equal ++ __ fcmplt(FcmpRES, reg_op1, reg_op2); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src1, src2, dst); ++ break; ++ case 0x05: //less ++ __ fcmple(FcmpRES, reg_op2, reg_op1); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src1, src2, dst); ++ break; ++ case 0x06: //less_equal ++ __ fcmplt(FcmpRES, reg_op2, reg_op1); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src1, src2, dst); ++ break; ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmovL_cmpD_reg_imm(mRegL dst, immI_0_255 src1, mRegL src2, regD tmp1, regD tmp2, cmpOp cop ) %{ ++ match(Set dst (CMoveL (Binary cop (CmpD tmp1 tmp2)) (Binary src2 src1))); ++ ins_cost(80); ++ format %{ ++ "CMP$cop $tmp1, $tmp2\t @cmovL_cmpD_reg_imm\n" ++ "\tCMOV $src2,$src1 \t @cmovL_cmpD_reg_imm" ++ %} ++ ins_encode %{ ++ FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg); ++ FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg); ++ Register dst = as_Register($dst$$reg); ++ int src1 = $src1$$constant & ((1<<8)-1); // Mask to 8 bits ++ Register src2 = as_Register($src2$$reg); ++ ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) ++ { ++ case 0x01: //equal ++ __ fcmpeq(FcmpRES, reg_op1, reg_op2); ++ __ fimovd(GP, FcmpRES); ++ __ selne(GP, src1, src2, dst); ++ break; ++ case 0x02: //not_equal ++ __ fcmpeq(FcmpRES, reg_op1, reg_op2); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src1, src2, dst); ++ break; ++ case 0x03: //greater ++ __ fcmple(FcmpRES, reg_op1, reg_op2); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src1, src2, dst); ++ break; ++ case 0x04: //greater_equal ++ __ fcmplt(FcmpRES, reg_op1, reg_op2); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src1, src2, dst); ++ break; ++ case 0x05: //less ++ __ fcmple(FcmpRES, reg_op2, reg_op1); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src1, src2, dst); ++ break; ++ case 0x06: //less_equal ++ __ fcmplt(FcmpRES, reg_op2, reg_op1); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src1, src2, dst); ++ break; ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmovD_cmpD_reg_reg(regD dst, regD src1, regD src2, regD tmp1, regD tmp2, cmpOp cop ) %{ ++ match(Set dst (CMoveD (Binary cop (CmpD tmp1 tmp2)) (Binary src2 src1))); ++ ins_cost(200); ++ format %{ ++ "CMP$cop $tmp1, $tmp2\t @cmovD_cmpD_reg_reg\n" ++ "\tCMOV $dst,$src1 \t @cmovD_cmpD_reg_reg" ++ %} ++ ins_encode %{ ++ FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg); ++ FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg); ++ FloatRegister dst = as_FloatRegister($dst$$reg); ++ FloatRegister src1 = as_FloatRegister($src1$$reg); ++ FloatRegister src2 = as_FloatRegister($src2$$reg); ++ ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) { ++ case 0x01: //equal ++ __ fcmpeq(FcmpRES, reg_op1, reg_op2); ++ __ fselne(FcmpRES, src1, src2, dst); ++ break; ++ case 0x02: //not_equal ++ __ fcmpeq(FcmpRES, reg_op1, reg_op2); ++ __ fseleq(FcmpRES, src1, src2, dst); ++ break; ++ case 0x03: //greater ++ __ fcmple(FcmpRES, reg_op1, reg_op2); ++ __ fseleq(FcmpRES, src1, src2, dst); ++ break; ++ case 0x04: //greater_equal ++ __ fcmplt(FcmpRES, reg_op1, reg_op2); ++ __ fseleq(FcmpRES, src1, src2, dst); ++ break; ++ case 0x05: //less ++ __ fcmple(FcmpRES, reg_op2, reg_op1); ++ __ fseleq(FcmpRES, src1, src2, dst); ++ break; ++ case 0x06: //less_equal ++ __ fcmplt(FcmpRES, reg_op2, reg_op1); ++ __ fseleq(FcmpRES, src1, src2, dst); ++ break; ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmovD_cmpF_reg_reg(regD dst, regD src1, regD src2, regF tmp1, regF tmp2, cmpOp cop ) %{ ++ match(Set dst (CMoveD (Binary cop (CmpF tmp1 tmp2)) (Binary src2 src1))); ++ ins_cost(200); ++ format %{ ++ "CMP$cop $tmp1, $tmp2\t @cmovD_cmpF_reg_reg\n" ++ "\tCMOV $src2,$src1 \t @cmovD_cmpF_reg_reg" ++ %} ++ ins_encode %{ ++ FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg); ++ FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg); ++ FloatRegister dst = as_FloatRegister($dst$$reg); ++ FloatRegister src1 = as_FloatRegister($src1$$reg); ++ FloatRegister src2 = as_FloatRegister($src2$$reg); ++ ++ int flag = $cop$$cmpcode; ++ ++ Label L; ++ ++ switch(flag) ++ { ++ case 0x01: //equal ++ __ fcmpeq(FcmpRES, reg_op1, reg_op2); ++ __ fselne(FcmpRES, src1, src2, dst); ++ break; ++ case 0x02: //not_equal ++ __ fcmpeq(FcmpRES, reg_op1, reg_op2); ++ __ fseleq(FcmpRES, src1, src2, dst); ++ break; ++ case 0x03: //greater ++ __ fcmple(FcmpRES, reg_op1, reg_op2); ++ __ fseleq(FcmpRES, src1, src2, dst); ++ break; ++ case 0x04: //greater_equal ++ __ fcmplt(FcmpRES, reg_op1, reg_op2); ++ __ fseleq(FcmpRES, src1, src2, dst); ++ break; ++ case 0x05: //less ++ __ fcmple(FcmpRES, reg_op2, reg_op1); ++ __ fseleq(FcmpRES, src1, src2, dst); ++ break; ++ case 0x06: //less_equal ++ __ fcmplt(FcmpRES, reg_op2, reg_op1); ++ __ fseleq(FcmpRES, src1, src2, dst); ++ break; ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++ ++ %} ++ ++instruct cmovF_cmpI_reg_reg(regF dst, regF src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{ ++ match(Set dst (CMoveF (Binary cop (CmpI tmp1 tmp2)) (Binary dst src))); ++ ins_cost(200); ++ format %{ ++ "CMP$cop $tmp1, $tmp2\t @cmovF_cmpI_reg_reg\n" ++ "\tCMOV $dst, $src \t @cmovF_cmpI_reg_reg" ++ %} ++ ++ ins_encode %{ ++ Register op1 = $tmp1$$Register; ++ Register op2 = $tmp2$$Register; ++ FloatRegister dst = as_FloatRegister($dst$$reg); ++ FloatRegister src = as_FloatRegister($src$$reg); ++ int flag = $cop$$cmpcode; ++ Label L; ++ ++ switch(flag) { ++ case 0x01: //equal ++ __ bne(op1, op2, L); ++ __ fmovs(dst, src); ++ __ BIND(L); ++ break; ++ case 0x02: //not_equal ++ __ beq(op1, op2, L); ++ __ fmovs(dst, src); ++ __ BIND(L); ++ break; ++ case 0x03: //great ++ __ cmplt(AT, op2, op1); ++ __ beq(AT, L); ++ __ fmovs(dst, src); ++ __ BIND(L); ++ break; ++ case 0x04: //great_equal ++ __ cmplt(AT, op1, op2); ++ __ bne(AT, L); ++ __ fmovs(dst, src); ++ __ BIND(L); ++ break; ++ case 0x05: //less ++ __ cmplt(AT, op1, op2); ++ __ beq(AT, L); ++ __ fmovs(dst, src); ++ __ BIND(L); ++ break; ++ case 0x06: //less_equal ++ __ cmplt(AT, op2, op1); ++ __ bne(AT, L); ++ __ fmovs(dst, src); ++ __ BIND(L); ++ break; ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmovD_cmpI_reg_reg(regD dst, regD src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{ ++ match(Set dst (CMoveD (Binary cop (CmpI tmp1 tmp2)) (Binary dst src))); ++ ins_cost(200); ++ format %{ ++ "CMP$cop $tmp1, $tmp2\t @cmovD_cmpI_reg_reg\n" ++ "\tCMOV $dst, $src \t @cmovD_cmpI_reg_reg" ++ %} ++ ++ ins_encode %{ ++ Register op1 = $tmp1$$Register; ++ Register op2 = $tmp2$$Register; ++ FloatRegister dst = as_FloatRegister($dst$$reg); ++ FloatRegister src = as_FloatRegister($src$$reg); ++ int flag = $cop$$cmpcode; ++ Label L; ++ ++ switch(flag) { ++ case 0x01: //equal ++ __ bne(op1, op2, L); ++ __ fmovd(dst, src); ++ __ BIND(L); ++ break; ++ case 0x02: //not_equal ++ __ beq(op1, op2, L); ++ __ fmovd(dst, src); ++ __ BIND(L); ++ break; ++ case 0x03: //great ++ __ cmplt(AT, op2, op1); ++ __ beq(AT, L); ++ __ fmovd(dst, src); ++ __ BIND(L); ++ break; ++ case 0x04: //great_equal ++ __ cmplt(AT, op1, op2); ++ __ bne(AT, L); ++ __ fmovd(dst, src); ++ __ BIND(L); ++ break; ++ case 0x05: //less ++ __ cmplt(AT, op1, op2); ++ __ beq(AT, L); ++ __ fmovd(dst, src); ++ __ BIND(L); ++ break; ++ case 0x06: //less_equal ++ __ cmplt(AT, op2, op1); ++ __ bne(AT, L); ++ __ fmovd(dst, src); ++ __ BIND(L); ++ break; ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmovD_cmpP_reg_reg(regD dst, regD src, mRegP tmp1, mRegP tmp2, cmpOp cop ) %{ ++ match(Set dst (CMoveD (Binary cop (CmpP tmp1 tmp2)) (Binary dst src))); ++ ins_cost(200); ++ format %{ ++ "CMP$cop $tmp1, $tmp2\t @cmovD_cmpP_reg_reg\n" ++ "\tCMOV $dst, $src \t @cmovD_cmpP_reg_reg" ++ %} ++ ++ ins_encode %{ ++ Register op1 = $tmp1$$Register; ++ Register op2 = $tmp2$$Register; ++ FloatRegister dst = as_FloatRegister($dst$$reg); ++ FloatRegister src = as_FloatRegister($src$$reg); ++ int flag = $cop$$cmpcode; ++ Label L; ++ ++ switch(flag) { ++ case 0x01: //equal ++ __ bne(op1, op2, L); ++ __ fmovd(dst, src); ++ __ BIND(L); ++ break; ++ case 0x02: //not_equal ++ __ beq(op1, op2, L); ++ __ fmovd(dst, src); ++ __ BIND(L); ++ break; ++ case 0x03: //great ++ __ cmplt(AT, op2, op1); ++ __ beq(AT, L); ++ __ fmovd(dst, src); ++ __ BIND(L); ++ break; ++ case 0x04: //great_equal ++ __ cmplt(AT, op1, op2); ++ __ bne(AT, L); ++ __ fmovd(dst, src); ++ __ BIND(L); ++ break; ++ case 0x05: //less ++ __ cmplt(AT, op1, op2); ++ __ beq(AT, L); ++ __ fmovd(dst, src); ++ __ BIND(L); ++ break; ++ case 0x06: //less_equal ++ __ cmplt(AT, op2, op1); ++ __ bne(AT, L); ++ __ fmovd(dst, src); ++ __ BIND(L); ++ break; ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++ ++instruct cmovI_cmpF_reg_reg(mRegI dst, mRegI src1, mRegI src2, regF tmp1, regF tmp2, cmpOp cop ) %{ ++ match(Set dst (CMoveI (Binary cop (CmpF tmp1 tmp2)) (Binary src2 src1))); ++ ins_cost(80); ++ format %{ ++ "CMP$cop $tmp1, $tmp2\t @cmovI_cmpF_reg_reg\n" ++ "\tCMOV $src2,$src1 \t @cmovI_cmpF_reg_reg" ++ %} ++ ++ ins_encode %{ ++ FloatRegister reg_op1 = $tmp1$$FloatRegister; ++ FloatRegister reg_op2 = $tmp2$$FloatRegister; ++ Register dst = $dst$$Register; ++ Register src1 = $src1$$Register; ++ Register src2 = $src2$$Register; ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) { ++ case 0x01: //equal ++ __ fcmpeq(FcmpRES, reg_op1, reg_op2); ++ __ fimovd(GP, FcmpRES); ++ __ selne(GP, src1, src2, dst); ++ break; ++ case 0x02: //not_equal ++ __ fcmpeq(FcmpRES, reg_op1, reg_op2); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src1, src2, dst); ++ break; ++ case 0x03: //greater ++ __ fcmple(FcmpRES, reg_op1, reg_op2); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src1, src2, dst); ++ break; ++ case 0x04: //greater_equal ++ __ fcmplt(FcmpRES, reg_op1, reg_op2); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src1, src2, dst); ++ break; ++ case 0x05: //less ++ __ fcmple(FcmpRES, reg_op2, reg_op1); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src1, src2, dst); ++ break; ++ case 0x06: //less_equal ++ __ fcmplt(FcmpRES, reg_op2, reg_op1); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src1, src2, dst); ++ break; ++ default: ++ Unimplemented(); ++ } ++ %} ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmovI_cmpF_reg_imm(mRegI dst, immI_0_255 src1, mRegI src2, regF tmp1, regF tmp2, cmpOp cop ) %{ ++ match(Set dst (CMoveI (Binary cop (CmpF tmp1 tmp2)) (Binary src2 src1))); ++ ins_cost(80); ++ format %{ ++ "CMP$cop $tmp1, $tmp2\t @cmovI_cmpF_reg_imm\n" ++ "\tCMOV $src2,$src1 \t @cmovI_cmpF_reg_imm" ++ %} ++ ++ ins_encode %{ ++ FloatRegister reg_op1 = $tmp1$$FloatRegister; ++ FloatRegister reg_op2 = $tmp2$$FloatRegister; ++ Register dst = $dst$$Register; ++ int src1 = $src1$$constant & ((1<<8)-1); // Mask to 8 bits ++ Register src2 = $src2$$Register; ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) ++ { ++ case 0x01: //equal ++ __ fcmpeq(FcmpRES, reg_op1, reg_op2); ++ __ fimovd(GP, FcmpRES); ++ __ selne(GP, src1, src2, dst); ++ break; ++ case 0x02: //not_equal ++ __ fcmpeq(FcmpRES, reg_op1, reg_op2); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src1, src2, dst); ++ break; ++ case 0x03: //greater ++ __ fcmple(FcmpRES, reg_op1, reg_op2); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src1, src2, dst); ++ break; ++ case 0x04: //greater_equal ++ __ fcmplt(FcmpRES, reg_op1, reg_op2); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src1, src2, dst); ++ break; ++ case 0x05: //less ++ __ fcmple(FcmpRES, reg_op2, reg_op1); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src1, src2, dst); ++ break; ++ case 0x06: //less_equal ++ __ fcmplt(FcmpRES, reg_op2, reg_op1); ++ __ fimovd(GP, FcmpRES); ++ __ seleq(GP, src1, src2, dst); ++ break; ++ default: ++ Unimplemented(); ++ } ++ %} ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmovF_cmpF_reg_reg(regF dst, regF src1, regF src2, regF tmp1, regF tmp2, cmpOp cop ) %{ ++ match(Set dst (CMoveF (Binary cop (CmpF tmp1 tmp2)) (Binary src2 src1))); ++ ins_cost(200); ++ format %{ ++ "CMP$cop $tmp1, $tmp2\t @cmovF_cmpF_reg_reg\n" ++ "\tCMOV $src2,$src1 \t @cmovF_cmpF_reg_reg" ++ %} ++ ++ ins_encode %{ ++ FloatRegister reg_op1 = $tmp1$$FloatRegister; ++ FloatRegister reg_op2 = $tmp2$$FloatRegister; ++ FloatRegister dst = $dst$$FloatRegister; ++ FloatRegister src1 = $src1$$FloatRegister; ++ FloatRegister src2 = $src2$$FloatRegister; ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) { ++ case 0x01: //equal ++ __ fcmpeq(FcmpRES, reg_op1, reg_op2); ++ __ fselne(FcmpRES, src1, src2, dst); ++ break; ++ case 0x02: //not_equal ++ __ fcmpeq(FcmpRES, reg_op1, reg_op2); ++ __ fseleq(FcmpRES, src1, src2, dst); ++ break; ++ case 0x03: //greater ++ __ fcmple(FcmpRES, reg_op1, reg_op2); ++ __ fseleq(FcmpRES, src1, src2, dst); ++ break; ++ case 0x04: //greater_equal ++ __ fcmplt(FcmpRES, reg_op1, reg_op2); ++ __ fseleq(FcmpRES, src1, src2, dst); ++ break; ++ case 0x05: //less ++ __ fcmple(FcmpRES, reg_op2, reg_op1); ++ __ fseleq(FcmpRES, src1, src2, dst); ++ break; ++ case 0x06: //less_equal ++ __ fcmplt(FcmpRES, reg_op2, reg_op1); ++ __ fseleq(FcmpRES, src1, src2, dst); ++ break; ++ default: ++ Unimplemented(); ++ } ++ %} ++ ins_pipe( pipe_slow ); ++%} ++ ++// Manifest a CmpL result in an integer register. Very painful. ++// This is the test to avoid. ++instruct cmpL3_reg_reg(mRegI dst, mRegL src1, mRegL src2) %{ ++ match(Set dst (CmpL3 src1 src2)); ++ ins_cost(1000); ++ format %{ "cmpL3 $dst, $src1, $src2 @ cmpL3_reg_reg" %} ++ ins_encode %{ ++ Register opr1 = as_Register($src1$$reg); ++ Register opr2 = as_Register($src2$$reg); ++ Register dst = as_Register($dst$$reg); ++ ++ Label done; ++ ++ __ subl(AT, opr1, opr2); ++ __ subl(dst, R0, 1); ++ __ blt(AT, done); ++ ++ __ selgt(AT, 1, R0, dst); ++ __ BIND(done); ++ %} ++ ins_pipe( pipe_slow ); ++%} ++ ++// ++// less_rsult = -1 ++// greater_result = 1 ++// equal_result = 0 ++// nan_result = -1 ++// ++instruct cmpF3_reg_reg(mRegI dst, regF src1, regF src2) %{ ++ match(Set dst (CmpF3 src1 src2)); ++ ins_cost(1000); ++ format %{ "cmpF3 $dst, $src1, $src2 @ cmpF3_reg_reg" %} ++ ins_encode %{ ++ FloatRegister src1 = as_FloatRegister($src1$$reg); ++ FloatRegister src2 = as_FloatRegister($src2$$reg); ++ Register dst = as_Register($dst$$reg); ++ ++ Label Done; ++ ++ __ move(dst, -1); ++ __ fcmple(FcmpRES, src2, src1); ++ __ fbeq(FcmpRES, Done); ++ ++ __ fcmpeq(FcmpRES, src1, src2); ++ __ move(dst, 1); ++ __ fimovd(GP, FcmpRES); ++ __ selne(GP, R0, dst, dst); ++ ++ __ BIND(Done); ++ %} ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct cmpD3_reg_reg(mRegI dst, regD src1, regD src2) %{ ++ match(Set dst (CmpD3 src1 src2)); ++ ins_cost(1000); ++ format %{ "cmpD3 $dst, $src1, $src2 @ cmpD3_reg_reg" %} ++ ins_encode %{ ++ FloatRegister src1 = as_FloatRegister($src1$$reg); ++ FloatRegister src2 = as_FloatRegister($src2$$reg); ++ Register dst = as_Register($dst$$reg); ++ ++ Label Done; ++ ++ __ move(dst, -1); ++ __ fcmple(FcmpRES, src2, src1); ++ __ fbeq(FcmpRES, Done); ++ ++ __ fcmpeq(FcmpRES, src1, src2); ++ __ move(dst, 1); ++ __ fimovd(GP, FcmpRES); ++ __ selne(GP, R0, dst, dst); ++ ++ __ BIND(Done); ++ %} ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct clear_array(mRegL cnt, mRegP base, Universe dummy) %{ ++ match(Set dummy (ClearArray cnt base)); ++ format %{ "CLEAR_ARRAY base = $base, cnt = $cnt # Clear doublewords" %} ++ ins_encode %{ ++ //Assume cnt is the number of bytes in an array to be cleared, ++ //and base points to the starting address of the array. ++ Register base = $base$$Register; ++ Register num = $cnt$$Register; ++ Label Loop, done; ++ ++ __ move(T12, num); /* T12 = words */ ++ __ beq(T12, done); ++ __ move(AT, base); ++ ++ __ BIND(Loop); ++ __ stl(R0, Address(AT, 0)); ++ __ addl(AT, AT, wordSize); ++ __ subl(T12, T12, 1); ++ __ bne(T12, Loop); ++ __ BIND(done); ++ %} ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct string_compare(a4_RegP str1, mA5RegI cnt1, t6_RegP str2, mT7RegI cnt2, mT3RegI result) %{ ++ match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2))); ++ effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2); ++ ++ format %{ "String Compare $str1[len: $cnt1], $str2[len: $cnt2] -> $result @ string_compare" %} ++ ins_encode %{ ++ // Get the first character position in both strings ++ // [8] char array, [12] offset, [16] count ++ Register str1 = $str1$$Register; ++ Register str2 = $str2$$Register; ++ Register cnt1 = $cnt1$$Register; ++ Register cnt2 = $cnt2$$Register; ++ Register result = $result$$Register; ++ ++ Label L, Loop, haveResult, done; ++ ++ // compute the and difference of lengths (in result) ++ __ subl(result, cnt1, cnt2); // result holds the difference of two lengths ++ ++ // compute the shorter length (in cnt1) ++ __ cmplt(AT, cnt2, cnt1); ++ __ selne(AT, cnt2, cnt1, cnt1); ++ ++ // Now the shorter length is in cnt1 and cnt2 can be used as a tmp register ++ __ BIND(Loop); // Loop begin ++ __ ldhu(AT, str1, 0); ++ __ beq(cnt1, done); ++ ++ // compare current character ++ __ ldhu(cnt2, str2, 0); ++ __ addl(str1, str1, 2); ++ __ bne(AT, cnt2, haveResult); ++ __ addl(str2, str2, 2); ++ __ subl(cnt1, cnt1, 1); ++ __ beq(R0, Loop); ++ ++ __ BIND(haveResult); ++ __ subl(result, AT, cnt2); ++ ++ __ BIND(done); ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++// intrinsic optimization ++instruct string_equals(a4_RegP str1, a5_RegP str2, mT6RegI cnt, mT7RegI temp, no_Ax_mRegI result) %{ ++ match(Set result (StrEquals (Binary str1 str2) cnt)); ++ effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL temp); ++ ++ format %{ "String Equal $str1, $str2, len:$cnt tmp:$temp -> $result @ string_equals" %} ++ ins_encode %{ ++ // Get the first character position in both strings ++ // [8] char array, [12] offset, [16] count ++ Register str1 = $str1$$Register; ++ Register str2 = $str2$$Register; ++ Register cnt = $cnt$$Register; ++ Register tmp = $temp$$Register; ++ Register result = $result$$Register; ++ ++ Label L, Loop, haveResult, done; ++ ++ __ cmpeq(result, str1, str2); //same char[] ? ++ __ bne(result, done); ++ ++ __ or_ins(result, R0, 1); ++ __ beq(cnt,done); ++ ++ //check for alignment and position the pointers to the ends ++ __ or_ins(result, str1, str2); ++ __ and_imm8(result, result, 0x3); ++ // notZero means at least one not 4-byte aligned. ++ // We could optimize the case when both arrays are not aligned ++ // but it is not frequent case and it requires additional checks. ++ __ slll(cnt, cnt, exact_log2(sizeof(jchar))); // set byte count ++ __ bne(result, Loop); // char by char compare ++ ++ // Compare char[] arrays aligned to 4 bytes. ++ __ char_arrays_equals(str1, str2, cnt, result, ++ result, tmp, done); ++ __ or_ins(result, R0, 1); ++ __ beq(R0, done); ++ ++ // char by char compare ++ __ BIND(Loop); ++ __ ldhu(AT, str1, 0); // Loop begin ++ __ beq(cnt, done); // count == 0 ++ ++ // compare current character ++ __ ldhu(tmp, str2, 0); ++ __ addl(str1, str1, 2); ++ __ bne(AT, tmp, haveResult); ++ __ addl(str2, str2, 2); ++ __ subl(cnt, cnt, 2); ++ __ beq(R0, Loop); ++ ++ __ BIND(haveResult); ++ __ subl(result, AT, tmp); ++ ++ __ BIND(done); ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct array_equals(a4_RegP ary1, a5_RegP ary2, no_Ax_mRegI result) %{ ++ match(Set result (AryEq ary1 ary2)); ++ effect(USE_KILL ary1, USE_KILL ary2); ++ ++ format %{ "Array Equals $ary1,$ary2 -> $result @ array_equals" %} ++ ins_encode %{ ++ ++ Register ary1 = $ary1$$Register; ++ Register ary2 = $ary2$$Register; ++ Register result = $result$$Register; ++ ++ Label Lvector, Ldone, Lloop; ++ Label Ldone_hop, Lloop_hop, Ldone1; ++ ++ int length_offset = arrayOopDesc::length_offset_in_bytes(); ++ int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR); ++ ++ // return true if the same array ++ __ cmpeq(GP, ary1, ary2); ++ __ bne(GP, Ldone); ++ ++ __ beq(ary1, Ldone1); ++ __ beq(ary2, Ldone1); ++ ++ //load the lengths of arrays ++ __ ldw_signed(AT, Address(ary1, length_offset)); ++ __ ldw_signed(GP, Address(ary2, length_offset)); ++ ++ // return false if the two arrays are not equal length ++ __ cmpeq(GP, AT, GP); ++ __ beq(GP, Ldone1); ++ __ beq(AT, Ldone); ++ ++ // load array addresses ++ __ ldi(ary1, ary1, base_offset); ++ __ ldi(ary2, ary2, base_offset); ++ ++ // set byte count ++ __ slll(AT, AT, exact_log2(sizeof(jchar))); ++ ++ // Compare char[] arrays aligned to 4 bytes. ++ __ char_arrays_equals(ary1, ary2, AT, result, ++ result, GP, Ldone_hop); ++ __ or_ins(result, R0, 1); // equals ++ __ beq(R0, Ldone_hop); ++ ++ __ BIND(Ldone); ++ __ or_ins(result, R0, 1); // equal ++ __ beq(R0, Ldone_hop); ++ ++ __ BIND(Ldone1); ++ __ or_ins(result, R0, 0); // not equal ++ ++ __ BIND(Ldone_hop); ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++//----------Arithmetic Instructions------------------------------------------- ++//----------Addition Instructions--------------------------------------------- ++instruct addI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{ ++ match(Set dst (AddI src1 src2)); ++ ++ format %{ "addw $dst, $src1, $src2 #@addI_Reg_Reg" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src1 = $src1$$Register; ++ Register src2 = $src2$$Register; ++ __ addw(dst, src1, src2); ++ %} ++ ins_pipe( ialu_regI_regI ); ++%} ++ ++instruct addI_Reg_imm_0_255(mRegI dst, mRegI src1, immI_0_255 src2) %{ ++ match(Set dst (AddI src1 src2)); ++ ++ ins_cost(40); ++ format %{ "addw $dst, $src1, $src2 #@addI_Reg_imm_0_255" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src1 = $src1$$Register; ++ int imm = $src2$$constant; ++ __ addw(dst, src1, imm); ++ %} ++ ins_pipe( ialu_regI_imm16 ); ++%} ++ ++//instruct addI_Reg_imm(mRegI dst, mRegI src1, immI src2) %{ ++// match(Set dst (AddI src1 src2)); ++// ++// format %{ "addw $dst, $src1, $src2 #@addI_Reg_imm" %} ++// ins_encode %{ ++// Register dst = $dst$$Register; ++// Register src1 = $src1$$Register; ++// int imm = $src2$$constant; ++// ++// if(Assembler::is_uimm8(imm)) { ++// __ addw(dst, src1, imm); ++// } else { ++// __ move(AT, imm); ++// __ addw(dst, src1, AT); ++// } ++// %} ++// ins_pipe( ialu_regI_regI ); ++//%} ++ ++instruct addP_reg_reg(mRegP dst, mRegP src1, mRegLorI2L src2) %{ ++ match(Set dst (AddP src1 src2)); ++ ++ format %{ "ADDP $dst, $src1, $src2 #@addP_reg_reg" %} ++ ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src1 = $src1$$Register; ++ Register src2 = $src2$$Register; ++ __ addl(dst, src1, src2); ++ %} ++ ++ ins_pipe( ialu_regI_regI ); ++%} ++ ++instruct addP_reg_reg_convI2L(mRegP dst, mRegP src1, mRegI src2) %{ ++ match(Set dst (AddP src1 (ConvI2L src2))); ++ ++ format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg_convI2L" %} ++ ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src1 = $src1$$Register; ++ Register src2 = $src2$$Register; ++ __ addl(dst, src1, src2); ++ %} ++ ++ ins_pipe( ialu_regI_regI ); ++%} ++ ++instruct addP_reg_imm_0_255(mRegP dst, mRegP src1, immL_0_255 src2) %{ ++ match(Set dst (AddP src1 src2)); ++ ++ ins_cost(40); ++ format %{ "dadd $dst, $src1, $src2 #@addP_reg_imm_0_255" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src1 = $src1$$Register; ++ int imm = $src2$$constant; ++ __ addl(dst, src1, imm); ++ %} ++ ins_pipe( ialu_regL_imm16 ); ++%} ++ ++instruct addP_reg_imm(mRegP dst, mRegP src1, immL src2) %{ ++ match(Set dst (AddP src1 src2)); ++ ++ format %{ "add_simm16 $dst, $src1, $src2 #@addP_reg_imm" %} ++ ins_encode %{ ++ Register src1 = $src1$$Register; ++ long src2 = $src2$$constant; ++ Register dst = $dst$$Register; ++ ++ if(Assembler::is_simm16(src2)) { ++ __ add_simm16(dst, src1, src2); ++ } else { ++ __ set64(AT, src2); ++ __ addl(dst, src1, AT); ++ } ++ %} ++ ins_pipe( ialu_regI_imm16 ); ++%} ++ ++// Add Long Register with Register ++instruct addL_Reg_Reg(mRegL dst, mRegLorI2L src1, mRegLorI2L src2) %{ ++ match(Set dst (AddL src1 src2)); ++ ins_cost(200); ++ format %{ "ADDL $dst, $src1, $src2 #@addL_Reg_Reg\t" %} ++ ++ ins_encode %{ ++ Register dst_reg = as_Register($dst$$reg); ++ Register src1_reg = as_Register($src1$$reg); ++ Register src2_reg = as_Register($src2$$reg); ++ ++ __ addl(dst_reg, src1_reg, src2_reg); ++ %} ++ ++ ins_pipe( ialu_regL_regL ); ++%} ++ ++instruct addL_Reg_imm(mRegL dst, mRegLorI2L src1, immL16 src2) ++%{ ++ match(Set dst (AddL src1 src2)); ++ ++ format %{ "ADDL $dst, $src1, $src2 #@addL_Reg_immL16 " %} ++ ins_encode %{ ++ Register dst_reg = as_Register($dst$$reg); ++ Register src1_reg = as_Register($src1$$reg); ++ int src2_imm = $src2$$constant; ++ ++ __ add_simm16(dst_reg, src1_reg, src2_imm); ++ %} ++ ++ ins_pipe( ialu_regL_regL ); ++%} ++ ++ ++instruct addI_Reg_immI_M1_255(mRegI dst, mRegI src1, immI_M1_255 src2) %{ ++ match(Set dst (AddI src1 src2)); ++ ++ ins_cost(10); ++ format %{ "addw $dst, $src1, $src2 #@addI_Reg_immI_M1_255" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src1 = $src1$$Register; ++ int imm = $src2$$constant; ++ __ subw(dst, src1, -imm); ++ %} ++ ins_pipe( ialu_regI_imm16 ); ++%} ++ ++//----------Subtraction Instructions------------------------------------------- ++// Integer Subtraction Instructions ++instruct subI_Reg_immI_M1(mRegI dst, mRegI src1, immI_M1_255 src2) %{ ++ match(Set dst (SubI src1 src2)); ++ ++ ins_cost(10); ++ format %{ "subw $dst, $src1, $src2 #@subI_Reg_immI_M1" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src1 = $src1$$Register; ++ int imm = $src2$$constant; ++ __ addw(dst, src1, -imm); ++ %} ++ ins_pipe( ialu_regI_imm16 ); ++%} ++ ++instruct subL_Reg_immL16(mRegL dst, mRegL src1, immL16_sub src2) %{ ++ match(Set dst (SubL src1 src2)); ++ ++ ins_cost(10); ++ format %{ "sul $dst, $src1, $src2 #@subL_Reg_immI_M1" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src1 = $src1$$Register; ++ int imm = $src2$$constant; ++ __ ldi(dst, src1, -1 * imm); ++ %} ++ ins_pipe( ialu_regI_imm16 ); ++%} ++ ++instruct subI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{ ++ match(Set dst (SubI src1 src2)); ++ ins_cost(100); ++ ++ format %{ "subw $dst, $src1, $src2 #@subI_Reg_Reg" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src1 = $src1$$Register; ++ Register src2 = $src2$$Register; ++ __ subw(dst, src1, src2); ++ %} ++ ins_pipe( ialu_regI_regI ); ++%} ++ ++instruct subI_Reg_imm_0_255(mRegI dst, mRegI src1, immI_0_255 src2) %{ ++ match(Set dst (SubI src1 src2)); ++ ins_cost(80); ++ ++ format %{ "subw $dst, $src1, $src2 #@subI_Reg_imm_0_255" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src1 = $src1$$Register; ++ int imm = $src2$$constant; ++ __ subw(dst, src1, imm); ++ %} ++ ins_pipe( ialu_regI_imm16 ); ++%} ++ ++instruct negI_reg(mRegI dst, immI0 zero, mRegI src2) %{ ++ match(Set dst (SubI zero src2)); ++ ins_cost(80); ++ ++ format %{ "NEG $src2,$dst #@negI_reg" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src = $src2$$Register; ++ __ subw (dst, R0, src); ++ %} ++ ins_pipe( ialu_regI_regI ); ++%} ++ ++instruct negL_Reg(mRegL dst, immL0 zero, mRegLorI2L src) %{ ++ match(Set dst (SubL zero src)); ++ ins_cost(80); ++ ++ format %{ "neg $dst, $src #@negL_Reg" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ __ subl(dst, R0, src); ++ %} ++ ins_pipe( ialu_regI_regI ); ++%} ++ ++instruct subL_Reg_imm_0_255(mRegL dst, mRegL src1, immI_0_255 src2) %{ ++ match(Set dst (SubL src1 src2)); ++ ins_cost(80); ++ format %{ "SubL $dst, $src1, $src2 #@subL_Reg_imm_0_255" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src1 = $src1$$Register; ++ int imm = $src2$$constant; ++ __ subl(dst, src1, imm); ++ %} ++ ins_pipe( ialu_regL_imm16 ); ++%} ++ ++ ++// Subtract Long Register with Register. ++instruct subL_Reg_Reg(mRegL dst, mRegLorI2L src1, mRegLorI2L src2) %{ ++ match(Set dst (SubL src1 src2)); ++ ins_cost(100); ++ format %{ "SubL $dst, $src1, $src2 @ subL_Reg_Reg" %} ++ ins_encode %{ ++ Register dst = as_Register($dst$$reg); ++ Register src1 = as_Register($src1$$reg); ++ Register src2 = as_Register($src2$$reg); ++ ++ __ subl(dst, src1, src2); ++ %} ++ ins_pipe( ialu_regL_regL ); ++%} ++ ++ ++// Integer MOD with Register ++instruct modI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2, mT12RegI scratch) %{ ++ match(Set dst (ModI src1 src2)); ++ ins_cost(300); ++ format %{ "modi $dst, $src1, $src2 @ modI_Reg_Reg" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src1 = $src1$$Register; ++ Register src2 = $src2$$Register; ++ Register scratch = $scratch$$Register; ++ ++ if (UseSW8A) { ++ __ remw(src1, src2, dst); ++ } else if (FastIntRem) { ++ __ irem_sw(src1, src2, dst); ++ } else { ++ __ saveTRegisters(); ++ __ move(A0, src2); ++ if(src1==A0) ++ __ ldl(A1, SP, 96); ++ else ++ __ move(A1, src1); ++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::srem), 2); ++ __ move(scratch, V0); ++ __ restoreTRegisters(); ++ __ move(dst, scratch); ++ } ++ %} ++ ++ //ins_pipe( ialu_mod ); ++ ins_pipe( ialu_regI_regI ); ++%} ++ ++instruct modL_reg_reg(mRegL dst, mRegLorI2L src1, mRegLorI2L src2, t12_RegP scratch) %{ ++ match(Set dst (ModL src1 src2)); ++ format %{ "modL $dst, $src1, $src2 @modL_reg_reg" %} ++ ++ ins_encode %{ ++ Register dst = as_Register($dst$$reg); ++ Register op1 = as_Register($src1$$reg); ++ Register op2 = as_Register($src2$$reg); ++ Register scratch = $scratch$$Register; ++ ++ if (UseSW8A) { ++ __ reml(op1, op2, dst); ++ } else if (FastLongRem) { ++ Label lrem, exit; ++ Register tem = AT; ++ ++ __ slll(tem, op1, 0xb); ++ __ sral(tem, tem, 0xb); ++ __ cmpeq(tem, op1, tem); ++ __ bne(tem, lrem); ++ ++ __ saveTRegisters(); ++ __ move(A0, op2); ++ if(op1==A0) ++ __ ldl(A1, SP, 96); ++ else ++ __ move(A1, op1); ++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem), 2); ++ __ move(scratch, V0); ++ __ restoreTRegisters(); ++ __ move(dst, scratch); ++ __ beq(R0, exit); ++ ++ __ BIND(lrem); ++ __ lrem_sw(op1, op2, dst); ++ ++ __ BIND(exit); ++ } else { ++ __ saveTRegisters(); ++ __ move(A0, op2); ++ if(op1==A0) ++ __ ldl(A1, SP, 96); ++ else ++ __ move(A1, op1); ++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem), 2); ++ __ move(scratch, V0); ++ __ restoreTRegisters(); ++ __ move(dst, scratch); ++ } ++ %} ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct mulI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{ ++ match(Set dst (MulI src1 src2)); ++ ++ ins_cost(300); ++ format %{ "mul $dst, $src1, $src2 @ mulI_Reg_Reg" %} ++ ins_encode %{ ++ Register src1 = $src1$$Register; ++ Register src2 = $src2$$Register; ++ Register dst = $dst$$Register; ++ ++ __ mulw(dst, src1, src2); ++ %} ++ ins_pipe( ialu_mult ); ++%} ++ ++instruct mulI_Reg_imm_0_255(mRegI dst, mRegI src1, immI_0_255 src2) %{ ++ match(Set dst (MulI src1 src2)); ++ ++ ins_cost(300); ++ format %{ "mul $dst, $src1, $src2 @ mulI_Reg_imm_0_255" %} ++ ins_encode %{ ++ Register src1 = $src1$$Register; ++ int src2 = $src2$$constant; ++ Register dst = $dst$$Register; ++ ++ __ mulw(dst, src1, src2); ++ %} ++ ins_pipe( ialu_mult_imm ); ++%} ++ ++instruct maddI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2, mRegI src3) %{ ++ match(Set dst (AddI (MulI src1 src2) src3)); ++ ++ ins_cost(999); ++ format %{ "madd $dst, $src1 * $src2 + $src3 #@maddI_Reg_Reg" %} ++ ins_encode %{ ++ Register src1 = $src1$$Register; ++ Register src2 = $src2$$Register; ++ Register src3 = $src3$$Register; ++ Register dst = $dst$$Register; ++ ++ __ mulw(src1, src2, AT); ++ __ addw(dst, AT, src3); ++ %} ++ ins_pipe( ialu_mult ); ++%} ++ ++//SW64:OKOK:lix 20170526 ++instruct divI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2, mT12RegI scratch) %{ ++ match(Set dst (DivI src1 src2)); ++ ++ ++ format %{ "div $dst, $src1, $src2 @ divI_Reg_Reg" %} ++ ins_encode %{ ++ Register src1 = $src1$$Register; ++ Register src2 = $src2$$Register; ++ Register dst = $dst$$Register; ++ Register scratch = $scratch$$Register; ++ ++ if (UseSW8A) { ++ __ corrected_idivw(src1, src2, dst); ++ } else if (FastIntDiv) { ++ __ idiv_sw(src1, src2, dst); ++ } else { ++ __ saveTRegisters(); ++ __ move(A0, src2); ++ if(src1==A0) ++ __ ldl(A1, SP, 96); ++ else ++ __ move(A1, src1); ++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::sdiv), 2); ++ __ move(scratch, V0); ++ __ restoreTRegisters(); ++ __ move(dst, scratch); ++ } ++ %} ++ ins_pipe( ialu_mod ); ++%} ++ ++instruct divF_Reg_Reg(regF dst, regF src1, regF src2) %{ ++ match(Set dst (DivF src1 src2)); ++ ++ ins_cost(300); ++ format %{ "divF $dst, $src1, $src2 @ divF_Reg_Reg" %} ++ ins_encode %{ ++ FloatRegister src1 = $src1$$FloatRegister; ++ FloatRegister src2 = $src2$$FloatRegister; ++ FloatRegister dst = $dst$$FloatRegister; ++ ++ /* Here do we need to trap an exception manually ? */ ++ __ div_s(dst, src1, src2); ++ %} ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct divD_Reg_Reg(regD dst, regD src1, regD src2) %{ ++ match(Set dst (DivD src1 src2)); ++ ++ ins_cost(300); ++ format %{ "divD $dst, $src1, $src2 @ divD_Reg_Reg" %} ++ ins_encode %{ ++ FloatRegister src1 = $src1$$FloatRegister; ++ FloatRegister src2 = $src2$$FloatRegister; ++ FloatRegister dst = $dst$$FloatRegister; ++ ++ /* Here do we need to trap an exception manually ? */ ++ __ div_d(dst, src1, src2); ++ %} ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct mulL_reg_reg(mRegL dst, mRegLorI2L src1, mRegLorI2L src2) %{ ++ match(Set dst (MulL src1 src2)); ++ format %{ "mulL $dst, $src1, $src2 @mulL_reg_reg" %} ++ ins_encode %{ ++ Register dst = as_Register($dst$$reg); ++ Register op1 = as_Register($src1$$reg); ++ Register op2 = as_Register($src2$$reg); ++ ++ __ mull(op1, op2, dst); ++ %} ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct mulL_reg_regI2L(mRegL dst, mRegL src1, mRegI src2) %{ ++ match(Set dst (MulL src1 (ConvI2L src2))); ++ format %{ "mulL $dst, $src1, $src2 @mulL_reg_regI2L" %} ++ ins_encode %{ ++ Register dst = as_Register($dst$$reg); ++ Register op1 = as_Register($src1$$reg); ++ Register op2 = as_Register($src2$$reg); ++ ++ __ mull(op1, op2, dst); ++ %} ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct mulL_reg_imm_0_255(mRegL dst, mRegL src1, immL_0_255 src2) %{ ++ match(Set dst (MulI src1 src2)); ++ ins_cost(80); ++ format %{ "mul $dst, $src1, $src2 @ mulL_reg_imm_0_255" %} ++ ins_encode %{ ++ Register src1 = $src1$$Register; ++ int src2 = $src2$$constant; ++ Register dst = $dst$$Register; ++ ++ __ mull(src1, src2, dst); ++ %} ++ ins_pipe( mulL_reg_imm ); ++%} ++ ++instruct divL_reg_reg(mRegL dst, mRegL src1, mRegL src2, t12_RegP scratch) %{ ++ match(Set dst (DivL src1 src2)); ++ format %{ "divL $dst, $src1, $src2 @divL_reg_reg" %} ++ ++ ins_encode %{ ++ Register dst = as_Register($dst$$reg); ++ Register op1 = as_Register($src1$$reg); ++ Register op2 = as_Register($src2$$reg); ++ Register scratch = $scratch$$Register; ++ ++ if (UseSW8A) { ++ __ corrected_idivl(op1, op2, dst); ++ } else if (FastLongDiv) { ++ Label ldiv, exit; ++ Register tem = AT; ++ ++ __ slll(tem, op1, 0xb); ++ __ sral(tem, tem, 0xb); ++ __ cmpeq(tem, op1, tem); ++ __ bne(tem, ldiv); ++ ++ __ saveTRegisters(); ++ __ move(A0, op2); ++ if(op1 == A0) ++ __ ldl(A1, SP, 96); ++ else ++ __ move(A1, op1); ++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv), 2); ++ __ move(scratch, V0); ++ __ restoreTRegisters(); ++ __ move(dst, scratch); ++ __ beq(R0, exit); ++ ++ __ BIND(ldiv); ++ __ ldiv_sw(op1, op2, dst); ++ ++ __ BIND(exit); ++ } else { ++ __ saveTRegisters(); ++ __ move(A0, op2); ++ if(op1 == A0) ++ __ ldl(A1, SP, 96); ++ else ++ __ move(A1, op1); ++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv), 2); ++ __ move(scratch, V0); ++ __ restoreTRegisters(); ++ __ move(dst, scratch); ++ } ++ %} ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct addF_reg_reg(regF dst, regF src1, regF src2) %{ ++ match(Set dst (AddF src1 src2)); ++ format %{ "AddF $dst, $src1, $src2 @addF_reg_reg" %} ++ ins_encode %{ ++ FloatRegister src1 = as_FloatRegister($src1$$reg); ++ FloatRegister src2 = as_FloatRegister($src2$$reg); ++ FloatRegister dst = as_FloatRegister($dst$$reg); ++ ++ __ add_s(dst, src1, src2); ++ %} ++ ins_pipe( fpu_regF_regF ); ++%} ++ ++instruct subF_reg_reg(regF dst, regF src1, regF src2) %{ ++ match(Set dst (SubF src1 src2)); ++ format %{ "SubF $dst, $src1, $src2 @subF_reg_reg" %} ++ ins_encode %{ ++ FloatRegister src1 = as_FloatRegister($src1$$reg); ++ FloatRegister src2 = as_FloatRegister($src2$$reg); ++ FloatRegister dst = as_FloatRegister($dst$$reg); ++ ++ __ sub_s(dst, src1, src2); ++ %} ++ ins_pipe( fpu_regF_regF ); ++%} ++ ++instruct addD_reg_reg(regD dst, regD src1, regD src2) %{ ++ match(Set dst (AddD src1 src2)); ++ format %{ "AddD $dst, $src1, $src2 @addD_reg_reg" %} ++ ins_encode %{ ++ FloatRegister src1 = as_FloatRegister($src1$$reg); ++ FloatRegister src2 = as_FloatRegister($src2$$reg); ++ FloatRegister dst = as_FloatRegister($dst$$reg); ++ ++ __ add_d(dst, src1, src2); ++ %} ++ ins_pipe( fpu_regF_regF ); ++%} ++ ++instruct subD_reg_reg(regD dst, regD src1, regD src2) %{ ++ match(Set dst (SubD src1 src2)); ++ format %{ "SubD $dst, $src1, $src2 @subD_reg_reg" %} ++ ins_encode %{ ++ FloatRegister src1 = as_FloatRegister($src1$$reg); ++ FloatRegister src2 = as_FloatRegister($src2$$reg); ++ FloatRegister dst = as_FloatRegister($dst$$reg); ++ ++ __ sub_d(dst, src1, src2); ++ %} ++ ins_pipe( fpu_regF_regF ); ++%} ++ ++instruct negF_reg(regF dst, regF src) %{ ++ match(Set dst (NegF src)); ++ format %{ "negF $dst, $src @negF_reg" %} ++ ins_encode %{ ++ FloatRegister src = as_FloatRegister($src$$reg); ++ FloatRegister dst = as_FloatRegister($dst$$reg); ++ ++ __ fneg(dst, src); ++ %} ++ ins_pipe( fpu_regF_regF ); ++%} ++ ++instruct negD_reg(regD dst, regD src) %{ ++ match(Set dst (NegD src)); ++ format %{ "negD $dst, $src @negD_reg" %} ++ ins_encode %{ ++ FloatRegister src = as_FloatRegister($src$$reg); ++ FloatRegister dst = as_FloatRegister($dst$$reg); ++ ++ __ fneg(dst, src); ++ %} ++ ins_pipe( fpu_regF_regF ); ++%} ++ ++ ++instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{ ++ match(Set dst (MulF src1 src2)); ++ format %{ "MULF $dst, $src1, $src2 @mulF_reg_reg" %} ++ ins_encode %{ ++ FloatRegister src1 = $src1$$FloatRegister; ++ FloatRegister src2 = $src2$$FloatRegister; ++ FloatRegister dst = $dst$$FloatRegister; ++ ++ __ mul_s(dst, src1, src2); ++ %} ++ ins_pipe( fpu_regF_regF ); ++%} ++ ++instruct maddF_reg_reg(regF dst, regF src1, regF src2, regF src3) %{ ++ match(Set dst (AddF (MulF src1 src2) src3)); ++ ins_cost(10000); ++ format %{ "maddF $dst, $src1, $src2, $src3 @maddF_reg_reg" %} ++ ins_encode %{ ++ FloatRegister src1 = $src1$$FloatRegister; ++ FloatRegister src2 = $src2$$FloatRegister; ++ FloatRegister src3 = $src3$$FloatRegister; ++ FloatRegister dst = $dst$$FloatRegister; ++ ++ __ mul_s(F29, src1, src2); //F29 as FcmpRes, here use as tmp FloatRegister ++ __ add_s(dst, F29, src3); ++ %} ++ ins_pipe( fpu_regF_regF ); ++%} ++ ++// Mul two double precision floating piont number ++instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{ ++ match(Set dst (MulD src1 src2)); ++ format %{ "MULD $dst, $src1, $src2 @mulD_reg_reg" %} ++ ins_encode %{ ++ FloatRegister src1 = $src1$$FloatRegister; ++ FloatRegister src2 = $src2$$FloatRegister; ++ FloatRegister dst = $dst$$FloatRegister; ++ ++ __ mul_d(dst, src1, src2); ++ %} ++ ins_pipe( fpu_regF_regF ); ++%} ++ ++instruct maddD_reg_reg(regD dst, regD src1, regD src2, regD src3) %{ ++ match(Set dst (AddD (MulD src1 src2) src3)); ++ ins_cost(10000); ++ format %{ "maddD $dst, $src1, $src2, $src3 @maddD_reg_reg" %} ++ ins_encode %{ ++ FloatRegister src1 = $src1$$FloatRegister; ++ FloatRegister src2 = $src2$$FloatRegister; ++ FloatRegister src3 = $src3$$FloatRegister; ++ FloatRegister dst = $dst$$FloatRegister; ++ ++ __ mul_d(F29, src1, src2); //F29 as FcmpRes, here use as tmp FloatRegister ++ __ add_d(dst, F29, src3); ++ %} ++ ins_pipe( fpu_regF_regF ); ++%} ++ ++instruct absF_reg(regF dst, regF src) %{ ++ match(Set dst (AbsF src)); ++ ins_cost(100); ++ format %{ "absF $dst, $src @absF_reg" %} ++ ins_encode %{ ++ FloatRegister src = as_FloatRegister($src$$reg); ++ FloatRegister dst = as_FloatRegister($dst$$reg); ++ ++ __ fabs(dst, src); ++ %} ++ ins_pipe( fpu_regF_regF ); ++%} ++ ++ ++// intrinsics for math_native. ++// AbsD SqrtD CosD SinD TanD LogD Log10D ++ ++instruct absD_reg(regD dst, regD src) %{ ++ match(Set dst (AbsD src)); ++ ins_cost(100); ++ format %{ "absD $dst, $src @absD_reg" %} ++ ins_encode %{ ++ FloatRegister src = as_FloatRegister($src$$reg); ++ FloatRegister dst = as_FloatRegister($dst$$reg); ++ ++ __ fabs(dst, src); ++ %} ++ ins_pipe( fpu_regF_regF ); ++%} ++ ++instruct sqrtD_reg(regD dst, regD src) %{ ++ match(Set dst (SqrtD src)); ++ ins_cost(100); ++ format %{ "SqrtD $dst, $src @sqrtD_reg" %} ++ ins_encode %{ ++ FloatRegister src = as_FloatRegister($src$$reg); ++ FloatRegister dst = as_FloatRegister($dst$$reg); ++ ++ __ sqrt_d(dst, src); ++ %} ++ ins_pipe( fpu_regF_regF ); ++%} ++ ++instruct sqrtF_reg(regF dst, regF src) %{ ++ match(Set dst (ConvD2F (SqrtD (ConvF2D src)))); ++ ins_cost(100); ++ format %{ "SqrtF $dst, $src @sqrtF_reg" %} ++ ins_encode %{ ++ FloatRegister src = as_FloatRegister($src$$reg); ++ FloatRegister dst = as_FloatRegister($dst$$reg); ++ ++ __ sqrt_s(dst, src); ++ %} ++ ins_pipe( fpu_regF_regF ); ++%} ++ ++//----------------------------------Logical Instructions---------------------- ++//__________________________________Integer Logical Instructions------------- ++ ++//And Instuctions ++// And Register with Immediate ++instruct andI_Reg_immI(mRegI dst, mRegI src1, immI src2) %{ ++ match(Set dst (AndI src1 src2)); ++ ++ format %{ "and $dst, $src1, $src2 #@andI_Reg_immI" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src = $src1$$Register; ++ int val = $src2$$constant; ++ ++ __ move(AT, val); ++ __ and_reg(dst, src, AT); ++ %} ++ ins_pipe( ialu_regI_regI ); ++%} ++ ++instruct andI_Reg_imm_0_255(mRegI dst, mRegI src1, immI_0_255 src2) %{ ++ match(Set dst (AndI src1 src2)); ++ ins_cost(60); ++ ++ format %{ "and $dst, $src1, $src2 #@andI_Reg_imm_0_255" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src = $src1$$Register; ++ int val = $src2$$constant; ++ ++ __ and_imm8(dst, src, val); ++ %} ++ ins_pipe( ialu_regI_regI ); ++%} ++ ++instruct xorI_Reg_imm_0_255(mRegI dst, mRegI src1, immI_0_255 src2) %{ ++ match(Set dst (XorI src1 src2)); ++ ins_cost(60); ++ ++ format %{ "xori $dst, $src1, $src2 #@xorI_Reg_imm_0_255" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src = $src1$$Register; ++ int val = $src2$$constant; ++ ++ __ xor_ins(dst, src, val); ++ %} ++ ins_pipe( ialu_regI_regI ); ++%} ++ ++instruct xorL_Reg_imm_0_255(mRegL dst, mRegL src1, immL_0_255 src2) %{ ++ match(Set dst (XorL src1 src2)); ++ ins_cost(60); ++ ++ format %{ "xori $dst, $src1, $src2 #@xorL_Reg_imm_0_255" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src = $src1$$Register; ++ int val = $src2$$constant; ++ ++ __ xor_ins(dst, src, val); ++ %} ++ ins_pipe( ialu_regI_regI ); ++%} ++ ++ ++instruct lbu_and_lmask(mRegI dst, memory mem, immI_255 mask) %{ ++ match(Set dst (AndI mask (LoadB mem))); ++ ins_cost(60); ++ ++ format %{ "lhu $dst, $mem #@lbu_and_lmask" %} ++ ins_encode(load_UB_enc(dst, mem)); ++ ins_pipe( ialu_loadI ); ++%} ++ ++instruct lbu_and_rmask(mRegI dst, memory mem, immI_255 mask) %{ ++ match(Set dst (AndI (LoadB mem) mask)); ++ ins_cost(60); ++ ++ format %{ "lhu $dst, $mem #@lbu_and_rmask" %} ++ ins_encode(load_UB_enc(dst, mem)); ++ ins_pipe( ialu_loadI ); ++%} ++ ++instruct andI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{ ++ match(Set dst (AndI src1 src2)); ++ ++ format %{ "and $dst, $src1, $src2 #@andI_Reg_Reg" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src1 = $src1$$Register; ++ Register src2 = $src2$$Register; ++ __ and_reg(dst, src1, src2); ++ %} ++ ins_pipe( ialu_regI_regI ); ++%} ++ ++// And Long Register with Register ++instruct andL_Reg_Reg(mRegL dst, mRegL src1, mRegLorI2L src2) %{ ++ match(Set dst (AndL src1 src2)); ++ format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg\n\t" %} ++ ins_encode %{ ++ Register dst_reg = as_Register($dst$$reg); ++ Register src1_reg = as_Register($src1$$reg); ++ Register src2_reg = as_Register($src2$$reg); ++ ++ __ and_reg(dst_reg, src1_reg, src2_reg); ++ %} ++ ins_pipe( ialu_regL_regL ); ++%} ++ ++instruct andL_Reg_imm_0_255(mRegL dst, mRegL src1, immL_0_255 src2) %{ ++ match(Set dst (AndL src1 src2)); ++ ins_cost(60); ++ ++ format %{ "and $dst, $src1, $src2 #@andL_Reg_imm_0_255" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src = $src1$$Register; ++ long val = $src2$$constant; ++ ++ __ and_imm8(dst, src, val); ++ %} ++ ins_pipe( ialu_regI_regI ); ++%} ++ ++instruct andL2I_Reg_imm_0_255(mRegI dst, mRegL src1, immL_0_255 src2) %{ ++ match(Set dst (ConvL2I (AndL src1 src2))); ++ ins_cost(60); ++ ++ format %{ "and $dst, $src1, $src2 #@andL2I_Reg_imm_0_255" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src = $src1$$Register; ++ long val = $src2$$constant; ++ ++ __ and_imm8(dst, src, val); ++ %} ++ ins_pipe( ialu_regI_regI ); ++%} ++ ++// Or Long Register with Register ++instruct orL_Reg_Reg(mRegL dst, mRegLorI2L src1, mRegLorI2L src2) %{ ++ match(Set dst (OrL src1 src2)); ++ format %{ "OR $dst, $src1, $src2 @ orL_Reg_Reg\t" %} ++ ins_encode %{ ++ Register dst_reg = $dst$$Register; ++ Register src1_reg = $src1$$Register; ++ Register src2_reg = $src2$$Register; ++ ++ __ or_ins(dst_reg, src1_reg, src2_reg); ++ %} ++ ins_pipe( ialu_regL_regL ); ++%} ++ ++instruct orL_Reg_P2XReg(mRegL dst, mRegP src1, mRegLorI2L src2) %{ ++ match(Set dst (OrL (CastP2X src1) src2)); ++ format %{ "OR $dst, $src1, $src2 @ orL_Reg_P2XReg\t" %} ++ ins_encode %{ ++ Register dst_reg = $dst$$Register; ++ Register src1_reg = $src1$$Register; ++ Register src2_reg = $src2$$Register; ++ ++ __ or_ins(dst_reg, src1_reg, src2_reg); ++ %} ++ ins_pipe( ialu_regL_regL ); ++%} ++ ++instruct orL_Reg_imm_0_255(mRegL dst, mRegL src1, immL_0_255 src2) %{ ++ match(Set dst (OrL src1 src2)); ++ ins_cost(80); ++ format %{ "OR $dst, $src1, $src2 @ orL_Reg_imm_0_255\t" %} ++ ins_encode %{ ++ Register dst_reg = $dst$$Register; ++ Register src1_reg = $src1$$Register; ++ int src2 = $src2$$constant; ++ ++ __ or_ins(dst_reg, src1_reg, src2); ++ %} ++ ins_pipe( ialu_regL_regL ); ++%} ++ ++// Xor Long Register with Register ++instruct xorL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{ ++ match(Set dst (XorL src1 src2)); ++ format %{ "XOR $dst, $src1, $src2 @ xorL_Reg_Reg\t" %} ++ ins_encode %{ ++ Register dst_reg = as_Register($dst$$reg); ++ Register src1_reg = as_Register($src1$$reg); ++ Register src2_reg = as_Register($src2$$reg); ++ ++ __ xor_ins(dst_reg, src1_reg, src2_reg); ++ %} ++ ins_pipe( ialu_regL_regL ); ++%} ++ ++// Bic Int Register with Register ++instruct andI_Reg_not_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 m1) %{ ++ match(Set dst (AndI src1 (XorI src2 m1))); ++ ++ format %{ "ANDNOT $dst, $src1, $src2 #@bicI_Reg_Reg\t" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src1 = $src1$$Register; ++ Register src2 = $src2$$Register; ++ __ bic(dst, src1, src2); ++ %} ++ ins_pipe( ialu_regI_regI ); ++%} ++ ++// Bic long Register with Register ++instruct andL_Reg_not_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 m1) %{ ++ match(Set dst (AndL src1 (XorL src2 m1))); ++ ++ format %{ "ANDNOT $dst, $src1, $src2 #@bicL_Reg_Reg\t" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src1 = $src1$$Register; ++ Register src2 = $src2$$Register; ++ __ bic(dst, src1, src2); ++ %} ++ ins_pipe( ialu_regL_regL ); ++%} ++ ++// Shift Left by 8-bit immediate ++instruct salI_Reg_imm(mRegI dst, mRegI src, immI_0_255 shift) %{ ++ match(Set dst (LShiftI src shift)); ++ ++ format %{ "SHL $dst, $src, $shift #@salI_Reg_imm" %} ++ ins_encode %{ ++ Register src = $src$$Register; ++ Register dst = $dst$$Register; ++ int shamt = $shift$$constant; ++ if(UseSW8A) { ++ __ sllw(src, shamt, dst); ++ } else { ++ __ sllw_signed(dst, src, shamt); ++ } ++ %} ++ ins_pipe( ialu_regI_regI ); ++%} ++ ++instruct land7_2_s(mRegI dst, mRegL src, immL7 seven, immI_16 sixteen) ++%{ ++ match(Set dst (RShiftI (LShiftI (ConvL2I (AndL src seven)) sixteen) sixteen)); ++ ++ format %{ "andi $dst, $src, 7\t# @land7_2_s" %} ++ ins_encode %{ ++ Register src = $src$$Register; ++ Register dst = $dst$$Register; ++ ++ __ and_imm8(dst, src, 7); ++ %} ++ ins_pipe(ialu_regI_regI); ++%} ++ ++// Logical Shift Right by 16, followed by Arithmetic Shift Left by 16. ++// This idiom is used by the compiler the i2s bytecode. ++instruct i2s(mRegI dst, mRegI src, immI_16 sixteen) ++%{ ++ match(Set dst (RShiftI (LShiftI src sixteen) sixteen)); ++ ++ format %{ "i2s $dst, $src\t# @i2s" %} ++ ins_encode %{ ++ Register src = $src$$Register; ++ Register dst = $dst$$Register; ++ ++ __ sexth(dst, src); // __ seh(dst, src); ++ %} ++ ins_pipe(ialu_regI_regI); ++%} ++ ++// Logical Shift Right by 24, followed by Arithmetic Shift Left by 24. ++// This idiom is used by the compiler for the i2b bytecode. ++instruct i2b(mRegI dst, mRegI src, immI_24 twentyfour) ++%{ ++ match(Set dst (RShiftI (LShiftI src twentyfour) twentyfour)); ++ ++ format %{ "i2b $dst, $src\t# @i2b" %} ++ ins_encode %{ ++ Register src = $src$$Register; ++ Register dst = $dst$$Register; ++ ++ __ sextb(dst, src); // __ seb(dst, src); ++ %} ++ ins_pipe(ialu_regI_regI); ++%} ++ ++ ++instruct salI_RegL2I_imm(mRegI dst, mRegL src, immI_0_255 shift) %{ ++ match(Set dst (LShiftI (ConvL2I src) shift)); ++ ++ format %{ "SHL $dst, $src, $shift #@salI_RegL2I_imm" %} ++ ins_encode %{ ++ Register src = $src$$Register; ++ Register dst = $dst$$Register; ++ int shamt = $shift$$constant; ++ if(UseSW8A) { ++ __ sllw(src, shamt, dst); ++ } else { ++ __ sllw_signed(dst, src, shamt); ++ } ++ %} ++ ins_pipe( ialu_regI_regI ); ++%} ++ ++// Shift Left by 8-bit immediate ++instruct salI_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{ ++ match(Set dst (LShiftI src shift)); ++ ++ format %{ "SHL $dst, $src, $shift #@salI_Reg_Reg" %} ++ ins_encode %{ ++ Register src = $src$$Register; ++ Register dst = $dst$$Register; ++ Register shamt = $shift$$Register; ++ ++ if (UseSW8A) { ++ __ sllw(src, shamt, dst); ++ } else { ++ __ and_imm8(GP, shamt, 0x1f); ++ __ slll(dst, src, GP); ++ __ addw(dst, dst, 0x0); ++ } ++ %} ++ ins_pipe( ialu_regI_regI ); ++%} ++ ++ ++// Shift Left Long ++instruct salL_Reg_imm(mRegL dst, mRegLorI2L src, immI_0_255 shift) %{ ++ //predicate(UseNewLongLShift); ++ match(Set dst (LShiftL src shift)); ++ ins_cost(100); ++ format %{ "salL $dst, $src, $shift @ salL_Reg_imm" %} ++ ins_encode %{ ++ Register src_reg = as_Register($src$$reg); ++ Register dst_reg = as_Register($dst$$reg); ++ int shamt = $shift$$constant; ++ ++ __ slll(dst_reg, src_reg, shamt); ++ %} ++ ins_pipe( ialu_regL_regL ); ++%} ++ ++// Shift Left Long ++instruct salL_Reg_Reg(mRegL dst, mRegLorI2L src, mRegI shift) %{ ++ //predicate(UseNewLongLShift); ++ match(Set dst (LShiftL src shift)); ++ ins_cost(100); ++ format %{ "salL $dst, $src, $shift @ salL_Reg_Reg" %} ++ ins_encode %{ ++ Register src_reg = as_Register($src$$reg); ++ Register dst_reg = as_Register($dst$$reg); ++ ++ __ slll(dst_reg, src_reg, $shift$$Register); ++ %} ++ ins_pipe( ialu_regL_regL ); ++%} ++ ++// Shift Right Long ++instruct sarL_Reg_imm(mRegL dst, mRegLorI2L src, immI_0_255 shift) %{ ++ match(Set dst (RShiftL src shift)); ++ ins_cost(100); ++ format %{ "sarL $dst, $src, $shift @ sarL_Reg_imm" %} ++ ins_encode %{ ++ __ sral($dst$$Register, $src$$Register, $shift$$constant); ++ %} ++ ins_pipe( ialu_regL_regL ); ++%} ++ ++// Shift Right Long arithmetically ++instruct sarL_Reg_Reg(mRegL dst, mRegLorI2L src, mRegI shift) %{ ++ ++ match(Set dst (RShiftL src shift)); ++ ins_cost(100); ++ format %{ "sarL $dst, $src, $shift @ sarL_Reg_Reg" %} ++ ins_encode %{ ++ Register src_reg = as_Register($src$$reg); ++ Register dst_reg = as_Register($dst$$reg); ++ ++ __ sral(dst_reg, src_reg, $shift$$Register); ++ %} ++ ins_pipe( ialu_regL_regL ); ++%} ++ ++// Shift Right Long logically ++instruct slrL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{ ++ match(Set dst (URShiftL src shift)); ++ ins_cost(100); ++ format %{ "slrL $dst, $src, $shift @ slrL_Reg_Reg" %} ++ ins_encode %{ ++ Register src_reg = as_Register($src$$reg); ++ Register dst_reg = as_Register($dst$$reg); ++ ++ __ srll(dst_reg, src_reg, $shift$$Register); ++ %} ++ ins_pipe( ialu_regL_regL ); ++%} ++ ++instruct slrL_Reg_immI_0_63(mRegL dst, mRegLorI2L src, immI_0_63 shift) %{ ++ match(Set dst (URShiftL src shift)); ++ ins_cost(80); ++ format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_0_63" %} ++ ins_encode %{ ++ Register src_reg = as_Register($src$$reg); ++ Register dst_reg = as_Register($dst$$reg); ++ int shamt = $shift$$constant; ++ ++ __ srll(dst_reg, src_reg, shamt); ++ %} ++ ins_pipe( ialu_regL_regL ); ++%} ++ ++instruct slrL_P2XReg_immI_0_63(mRegL dst, mRegP src, immI_0_63 shift) %{ ++ match(Set dst (URShiftL (CastP2X src) shift)); ++ ins_cost(80); ++ format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_0_63" %} ++ ins_encode %{ ++ Register src_reg = as_Register($src$$reg); ++ Register dst_reg = as_Register($dst$$reg); ++ int shamt = $shift$$constant; ++ ++ __ srll(dst_reg, src_reg, shamt); ++ %} ++ ins_pipe( ialu_regL_regL ); ++%} ++ ++// Xor Instructions ++// Xor Register with Register ++instruct xorI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{ ++ match(Set dst (XorI src1 src2)); ++ ++ format %{ "XOR $dst, $src1, $src2 #@xorI_Reg_Reg" %} ++ ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src1 = $src1$$Register; ++ Register src2 = $src2$$Register; ++ __ xor_ins(dst, src1, src2); ++ %} ++ ++ ins_pipe( ialu_regI_regI ); ++%} ++ ++// Or Instructions ++// Or Register with Register ++instruct orI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{ ++ match(Set dst (OrI src1 src2)); ++ ++ format %{ "OR $dst, $src1, $src2 #@orI_Reg_Reg" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src1 = $src1$$Register; ++ Register src2 = $src2$$Register; ++ __ or_ins(dst, src1, src2); ++ %} ++ ++ ins_pipe( ialu_regI_regI ); ++%} ++ ++instruct orI_Reg_castP2X(mRegL dst, mRegL src1, mRegP src2) %{ ++ match(Set dst (OrI src1 (CastP2X src2))); ++ ++ format %{ "OR $dst, $src1, $src2 #@orI_Reg_castP2X" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src1 = $src1$$Register; ++ Register src2 = $src2$$Register; ++ __ or_ins(dst, src1, src2); ++ %} ++ ++ ins_pipe( ialu_regI_regI ); ++%} ++ ++instruct orI_Reg_imm_0_255(mRegI dst, mRegI src1, immI_0_255 src2) %{ ++ match(Set dst (OrI src1 src2)); ++ ins_cost(80); ++ ++ format %{ "OR $dst, $src1, $src2 #@orI_Reg_imm_0_255" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src1 = $src1$$Register; ++ int src2 = $src2$$constant; ++ __ or_ins(dst, src1, src2); ++ %} ++ ++ ins_pipe( ialu_regI_imm16 ); ++%} ++ ++// Logical Shift Right by 8-bit immediate ++instruct shr_logical_Reg_imm(mRegI dst, mRegI src, immI_0_255 shift) %{ ++ match(Set dst (URShiftI src shift)); ++ // effect(KILL cr); ++ ++ format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_imm" %} ++ ins_encode %{ ++ Register src = $src$$Register; ++ Register dst = $dst$$Register; ++ int shift = $shift$$constant; ++ if(UseSW8A) { ++ __ srlw(src, shift, dst); ++ } else { ++ __ zapnot(dst, src, 0xf); ++ __ srll(dst, dst, shift&0x1f); ++ } ++ %} ++ ins_pipe( ialu_regI_regI ); ++%} ++ ++// Logical Shift Right ++instruct shr_logical_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{ ++ match(Set dst (URShiftI src shift)); ++ ++ format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_Reg" %} ++ ins_encode %{ ++ Register src = $src$$Register; ++ Register dst = $dst$$Register; ++ Register shift = $shift$$Register; ++ if(UseSW8A) { ++ __ srlw(src, shift, dst); ++ } else { ++ __ and_imm8(GP, shift, 0x1f); ++ __ zapnot(dst, src, 0xf); ++ __ srll(dst, dst, GP); ++ } ++ %} ++ ins_pipe( ialu_regI_regI ); ++%} ++ ++ ++instruct shr_arith_Reg_imm(mRegI dst, mRegI src, immI_0_255 shift) %{ ++ match(Set dst (RShiftI src shift)); ++ // effect(KILL cr); ++ ++ format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_imm" %} ++ ins_encode %{ ++ Register src = $src$$Register; ++ Register dst = $dst$$Register; ++ int shift = $shift$$constant; ++ if(UseSW8A) { ++ __ sraw(src, shift, dst); ++ } else { ++ __ sral(dst, src, shift&0x1f); ++ } ++ %} ++ ins_pipe( ialu_regI_regI ); ++%} ++ ++instruct shr_arith_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{ ++ match(Set dst (RShiftI src shift)); ++ // effect(KILL cr); ++ ++ format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_Reg" %} ++ ins_encode %{ ++ Register src = $src$$Register; ++ Register dst = $dst$$Register; ++ Register shift = $shift$$Register; ++ if(UseSW8A) { ++ __ sraw(src, shift, dst); ++ } else { ++ __ and_imm8(GP, shift, 0x1f); ++ __ addw(dst, src, 0); ++ __ sral(dst, dst, GP); ++ } ++ %} ++ ins_pipe( ialu_regI_regI ); ++%} ++ ++instruct signExtract(mRegI dst, mRegI src1, immI_31 div1, immI_31 div2) %{ ++ match(Set dst (URShiftI (RShiftI src1 div1) div2)); ++ predicate(UseSW8A); ++ ins_cost(100); ++ format %{ "srlw $dst, $src1, $div1" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src = $src1$$Register; ++ int shamt = $div1$$constant; ++ if (UseSW8A) { ++ __ srlw(src, shamt, dst); ++ }else{ ++ Unimplemented(); ++ } ++ %} ++ ins_pipe(ialu_regI_regI); ++%} ++ ++instruct signExtractL(mRegL dst, mRegL src1, immI_63 div1, immI_63 div2) %{ ++ match(Set dst (URShiftL (RShiftL src1 div1) div2)); ++ ins_cost(100); ++ format %{ "srll $dst, $src1, $div1" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src = $src1$$Register; ++ int shamt = $div1$$constant; ++ __ srll(dst, src, shamt); ++ %} ++ ins_pipe(ialu_regL_regL); ++%} ++ ++instruct rolOrI(mRegI dst, mRegI src, immI lshift, immI rshift) %{ ++ predicate((0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f))&&UseSW8A); ++ match(Set dst (OrI (LShiftI src lshift) (URShiftI src rshift))); ++ ++ ins_cost(100); ++ format %{ "rolI $dst, $src, $lshift #@rolOrI" %} ++ ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ int sa = $lshift$$constant & 0x1f; ++ ++ if (UseSW8A) { ++ __ rolw(src, sa, dst); ++ }else{ ++ Unimplemented(); ++ } ++ %} ++ ins_pipe(ialu_regI_regI); ++%} ++ ++instruct rolOrL(mRegL dst, mRegL src, immL lshift, immL rshift) %{ ++ predicate((0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f))&&UseSW8A); ++ match(Set dst (OrL (LShiftL src lshift) (URShiftL src rshift))); ++ ++ ins_cost(100); ++ format %{ "rolL $dst, $src, $lshift #@rolOrL" %} ++ ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ int sa = $lshift$$constant & 0x3f; ++ ++ if (UseSW8A) { ++ __ roll(src, sa, dst); ++ }else{ ++ Unimplemented(); ++ } ++ %} ++ ins_pipe(ialu_regL_regL); ++%} ++ ++instruct rolAddI(mRegI dst, mRegI src, immI lshift, immI rshift) %{ ++ predicate((0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f))&&UseSW8A); ++ match(Set dst (AddI (LShiftI src lshift) (URShiftI src rshift))); ++ ++ ins_cost(100); ++ format %{ "rolI $dst, $src, $lshift #@rolAddI" %} ++ ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ int sa = $lshift$$constant & 0x1f; ++ ++ if (UseSW8A) { ++ __ rolw(src, sa, dst); ++ }else{ ++ Unimplemented(); ++ } ++ %} ++ ins_pipe(ialu_regI_regI); ++%} ++ ++instruct rolAddL(mRegL dst, mRegL src, immL lshift, immL rshift) %{ ++ predicate((0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f))&&UseSW8A); ++ match(Set dst (AddL (LShiftL src lshift) (URShiftL src rshift))); ++ ++ ins_cost(100); ++ format %{ "rolL $dst, $src, $lshift #@rolAddL" %} ++ ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ int sa = $lshift$$constant & 0x3f; ++ ++ if (UseSW8A) { ++ __ roll(src, sa, dst); ++ }else{ ++ Unimplemented(); ++ } ++ %} ++ ins_pipe(ialu_regL_regL); ++%} ++ ++instruct rolI_Reg_Var_c_0(mRegI dst, mRegI src, mRegI shift, immI0 c_0) %{ ++ match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_0 shift)))); ++ predicate(UseSW8A); ++ ins_cost(100); ++ format %{ "rolI $dst, $src, $shift #@rolI_Reg_Var_c_0" %} ++ ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ Register sa = $shift$$Register; ++ ++ if (UseSW8A) { ++ __ rolw(src, sa, dst); ++ }else{ ++ Unimplemented(); ++ } ++ %} ++ ins_pipe(ialu_regI_regI); ++%} ++ ++instruct rolL_Reg_Var_c_0(mRegL dst, mRegL src, mRegI shift, immI0 c_0) %{ ++ match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_0 shift)))); ++ predicate(UseSW8A); ++ ins_cost(100); ++ format %{ "rolL $dst, $src, $shift #@rolL_Reg_Var_c_0" %} ++ ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ Register sa = $shift$$Register; ++ ++ if (UseSW8A) { ++ __ roll(src, sa, dst); ++ }else{ ++ Unimplemented(); ++ } ++ %} ++ ins_pipe(ialu_regL_regL); ++%} ++ ++instruct rolI_Reg_Var_c_32(mRegI dst, mRegI src, mRegI shift, immI_32 c_32) %{ ++ match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift)))); ++ predicate(UseSW8A); ++ ins_cost(100); ++ format %{ "rolI $dst, $src, $shift #@rolI_Reg_Var_c_32" %} ++ ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ Register sa = $shift$$Register; ++ ++ if (UseSW8A) { ++ __ rolw(src, sa, dst); ++ }else{ ++ Unimplemented(); ++ } ++ %} ++ ins_pipe(ialu_regI_regI); ++%} ++ ++instruct rolL_Reg_Var_c_64(mRegL dst, mRegL src, mRegI shift, immI_64 c_64) %{ ++ match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift)))); ++ predicate(UseSW8A); ++ ins_cost(100); ++ format %{ "rolL $dst, $src, $shift #@rolL_Reg_Var_c_64" %} ++ ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ Register sa = $shift$$Register; ++ ++ if (UseSW8A) { ++ __ roll(src, sa, dst); ++ }else{ ++ Unimplemented(); ++ } ++ %} ++ ins_pipe(ialu_regL_regL); ++%} ++ ++instruct rorI_Reg_Var_c_0(mRegI dst, mRegI src, mRegI shift, immI0 c_0) %{ ++ match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_0 shift)))); ++ predicate(UseSW8A); ++ ins_cost(100); ++ format %{ "rorI $dst, $src, $shift #@rorI_Reg_Var_c_0" %} ++ ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ Register sa = $shift$$Register; ++ ++ if (UseSW8A) { ++ __ and_imm8(GP, sa, 0x1f); ++ __ subw(GP, R0, GP); ++ __ rolw(src, GP, dst); ++ }else{ ++ Unimplemented(); ++ } ++ %} ++ ins_pipe(ialu_regI_regI); ++%} ++ ++instruct rorL_Reg_Var_c_0(mRegL dst, mRegL src, mRegI shift, immI0 c_0) %{ ++ match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_0 shift)))); ++ predicate(UseSW8A); ++ ins_cost(100); ++ format %{ "rorL $dst, $src, $shift #@rorL_Reg_Var_c_0" %} ++ ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ Register sa = $shift$$Register; ++ ++ if (UseSW8A) { ++ __ and_imm8(GP, sa, 0x3f); ++ __ subw(GP, R0, GP); ++ __ roll(src, GP, dst); ++ }else{ ++ Unimplemented(); ++ } ++ %} ++ ins_pipe(ialu_regL_regL); ++%} ++ ++instruct rorI_Reg_Var_c_32(mRegI dst, mRegI src, mRegI shift, immI_32 c_32) %{ ++ match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift)))); ++ predicate(UseSW8A); ++ ins_cost(100); ++ format %{ "rorI $dst, $src, $shift #@rorI_Reg_Var_c_32" %} ++ ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ Register sa = $shift$$Register; ++ ++ if (UseSW8A) { ++ __ and_imm8(GP, sa, 0x1f); ++ __ subw(GP, R0, GP); ++ __ rolw(src, GP, dst); ++ }else{ ++ Unimplemented(); ++ } ++ %} ++ ins_pipe(ialu_regI_regI); ++%} ++ ++instruct rorL_Reg_Var_c_64(mRegL dst, mRegL src, mRegI shift, immI_64 c_64) %{ ++ match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift)))); ++ predicate(UseSW8A); ++ ins_cost(100); ++ format %{ "rorL $dst, $src, $shift #@rorL_Reg_Var_c_64" %} ++ ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ Register sa = $shift$$Register; ++ ++ if (UseSW8A) { ++ __ and_imm8(GP, sa, 0x3f); ++ __ subw(GP, R0, GP); ++ __ roll(src, GP, dst); ++ }else{ ++ Unimplemented(); ++ } ++ %} ++ ins_pipe(ialu_regL_regL); ++%} ++ ++//----------Convert Int to Boolean--------------------------------------------- ++ ++instruct movI_nocopy(mRegI dst, mRegI src) %{ ++ effect( DEF dst, USE src ); ++ format %{ "MOV $dst, $src @ movI_nocopy" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ __ move(dst, src); ++ %} ++ ins_pipe( ialu_regI_regI ); ++%} ++ ++instruct ci2b(mRegI dst, mRegI src) %{ ++ effect( USE_DEF dst, USE src ); ++ ++ format %{ "NEG $dst @ ci2b\n\t" ++ "ADC $dst,$src @ ci2b" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ Label L; ++//If ( dst != 0 ) CF = 1; ++ guarantee(dst != src, "in ci2b"); ++ __ move(AT, src); ++ __ beq(dst, L); ++ __ addl(AT, AT, 1); ++ __ BIND(L); ++// __ neg(dst); ++ __ subw(dst, R0, dst); ++ __ addl(dst, dst, AT); ++ %} ++ ++ ins_pipe( ialu_regL_regL ); ++%} ++ ++ ++instruct convI2B(mRegI dst, mRegI src) %{ ++ match(Set dst (Conv2B src)); ++ ++ ins_cost(100); ++ format %{ "convI2B $dst, $src @ convI2B" %} ++ ins_encode %{ ++ Register dst = as_Register($dst$$reg); ++ Register src = as_Register($src$$reg); ++ ++ __ selne(src, 1, src, dst); ++ %} ++ ++ ins_pipe( ialu_regL_regL ); ++%} ++ ++instruct convI2L_reg( mRegL dst, mRegI src) %{ ++ match(Set dst (ConvI2L src)); ++ ++ ins_cost(100); ++ format %{ "SLL $dst, $src @ convI2L_reg\t" %} ++ ins_encode %{ ++ Register dst = as_Register($dst$$reg); ++ Register src = as_Register($src$$reg); ++ ++ if(dst != src) __ addw(dst, src, 0); ++ %} ++ ins_pipe( ialu_regL_regL ); ++%} ++ ++ ++instruct convL2I_reg( mRegI dst, mRegLorI2L src ) %{ ++ match(Set dst (ConvL2I src)); ++ ++ format %{ "MOV $dst, $src @ convL2I_reg" %} ++ ins_encode %{ ++ Register dst = as_Register($dst$$reg); ++ Register src = as_Register($src$$reg); ++ ++ __ addw(dst, src, 0); ++ %} ++ ++ ins_pipe( ialu_regI_regI ); ++%} ++ ++instruct convL2I2L_reg( mRegL dst, mRegL src ) %{ ++ match(Set dst (ConvI2L (ConvL2I src))); ++ ++ format %{ "sll $dst, $src, 0 @ convL2I2L_reg" %} ++ ins_encode %{ ++ Register dst = as_Register($dst$$reg); ++ Register src = as_Register($src$$reg); ++ ++ __ sllw_signed(dst, src, 0); ++ %} ++ ++ ins_pipe( ialu_regI_regI ); ++%} ++ ++instruct convL2D_reg( regD dst, mRegL src ) %{ ++ match(Set dst (ConvL2D src)); ++ format %{ "convL2D $dst, $src @ convL2D_reg" %} ++ ins_encode %{ ++ Register src = as_Register($src$$reg); ++ FloatRegister dst = as_FloatRegister($dst$$reg); ++ if (UseSW8A) { ++ __ cmovld(dst, src); ++ } else { ++ __ ifmovd(F30, src); ++ __ fcvtld(dst, F30); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct convD2L_reg_fast( mRegL dst, regD src ) %{ ++ match(Set dst (ConvD2L src)); ++ ins_cost(150); ++ format %{ "convD2L $dst, $src @ convD2L_reg_fast" %} ++ ins_encode %{ ++ Register dst = as_Register($dst$$reg); ++ FloatRegister src = as_FloatRegister($src$$reg); ++ if (UseSW8A) { ++ __ cmovdl_z(dst, src); ++ } else { ++ FloatRegister temp_float_reg = F30; ++ FloatRegister temp_float_reg1 = F28; ++ assert((temp_float_reg1 != $src$$FloatRegister), "can not use F28"); ++ assert((temp_float_reg1 != $dst$$FloatRegister), "can not use F28"); ++ ++ Label Convert,Overflow,Done; ++ __ fcmpun(temp_float_reg, src, src); ++ __ fbne(temp_float_reg, Convert); //If Unorder,Jump to Convert Label ++ ++ __ or_ins(T12, R0, 1); ++ __ slll(T12, T12, 63); ++ ++ __ ifmovd(temp_float_reg, T12); ++ __ fcvtld(temp_float_reg1, temp_float_reg); ++ __ fcmple(temp_float_reg, src, temp_float_reg1); ++ __ fbne(temp_float_reg, Overflow); //If less than min_long(0x8000000000000000),jump to Skip Label ++ ++ __ subl(T12, T12, 0x1); ++ __ ifmovd(temp_float_reg, T12); ++ __ fcvtld(temp_float_reg1, temp_float_reg); ++ __ fcmple(temp_float_reg, temp_float_reg1, src); ++ __ fbne (temp_float_reg, Overflow); //If >= max_long(0x7fffffffffffffff),jump to Skip Label ++ ++ //Label Convert ++ __ BIND(Convert); ++ __ fcvtdl_z(temp_float_reg, src);//lx20121018,result is rounded toward zero ++ __ fimovd(dst, temp_float_reg); ++ __ beq (R0, Done); ++ //Labe Skip ++ __ BIND(Overflow); ++ __ move(dst,T12); ++ __ BIND(Done); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++ ++instruct convD2L_reg_slow( mRegL dst, regD src ) %{ ++ match(Set dst (ConvD2L src)); ++ ins_cost(250); ++ format %{ "convD2L $dst, $src @ convD2L_reg_slow" %} ++ ins_encode %{ ++ Register dst = as_Register($dst$$reg); ++ FloatRegister src = as_FloatRegister($src$$reg); ++ ++ __ saveTRegisters(); ++ __ fmovd(F16, src); ++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1); ++ __ move(T12, V0); ++ __ restoreTRegisters(); ++ __ move(dst, T12); ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++ ++instruct convF2I_reg_fast( mRegI dst, regF src ) %{ ++ match(Set dst (ConvF2I src)); ++ ins_cost(150); ++ format %{ "convf2i $dst, $src @ convF2I_reg_fast" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ FloatRegister src = $src$$FloatRegister; ++ if (UseSW8A) { ++ __ cmovdw_z(dst, src); ++ } else { ++ Register temp_reg = T12; ++ FloatRegister temp_float_reg = F30; ++ FloatRegister temp_float_reg1 = F28; ++ Label Convert,Overflow,Done; ++ ++ __ fcmpun(temp_float_reg, src, src); ++ __ fbne(temp_float_reg,Convert); //If Unorder,Jump to Convert Label ++ ++ __ or_ins(temp_reg, R0, 1); ++ __ slll(temp_reg, temp_reg, 31); ++ ++ __ ifmovs(temp_float_reg, temp_reg); ++ __ fcvtwl(temp_float_reg1, temp_float_reg); ++ __ fcvtls(temp_float_reg, temp_float_reg1); ++ __ fcmple(temp_float_reg1, src, temp_float_reg); ++ __ fbne(temp_float_reg1,Overflow); //If less than min_int(0x80000000),jump to Skip Label ++ ++ __ subw(temp_reg, temp_reg,0x1); ++ __ ifmovs(temp_float_reg, temp_reg); ++ __ fcvtwl(temp_float_reg1, temp_float_reg); ++ __ fcvtls(temp_float_reg, temp_float_reg1); ++ __ fcmple(temp_float_reg1, temp_float_reg, src); ++ __ fbne(temp_float_reg1,Overflow); //If >= max_int(0x7fffffff),jump to Skip Label ++ ++ //Lable Convert ++ __ BIND(Convert); ++ __ fcvtsd(temp_float_reg, src); ++ __ fcvtdl_z(temp_float_reg1, temp_float_reg); ++ __ fcvtlw(temp_float_reg, temp_float_reg1); ++ __ fimovs(dst, temp_float_reg); ++ __ addw(dst, dst, 0); ++ __ beq(R0,Done); ++ ++ //Lable Skip ++ __ BIND(Overflow) ++ __ addw(dst, temp_reg, 0); ++ __ BIND(Done); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++ ++ ++instruct convF2I_reg_slow( mRegI dst, regF src ) %{ ++ match(Set dst (ConvF2I src)); ++ ins_cost(250); ++ format %{ "convf2i $dst, $src @ convF2I_reg_slow" %} ++ ins_encode %{ ++ Register dreg = $dst$$Register; ++ FloatRegister fval = $src$$FloatRegister; ++ ++ __ saveTRegisters(); ++ __ fmovs(F16, fval); ++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1); ++ __ move(T12, V0); ++ __ restoreTRegisters(); ++ __ move(dreg, T12); ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++ ++instruct convF2L_reg_fast( mRegL dst, regF src ) %{ ++ match(Set dst (ConvF2L src)); ++ ins_cost(150); ++ format %{ "convf2l $dst, $src @ convF2L_reg_fast" %} ++ ins_encode %{ ++ Register dst = as_Register($dst$$reg); ++ FloatRegister src = $src$$FloatRegister; ++ if(UseSW8A) { ++ __ cmovdl_z(dst, src); ++ }else { ++ Label Convert,Overflow,Done; ++ FloatRegister temp_float_reg = F30; ++ FloatRegister temp_float_reg1 = F28; ++ ++ __ fcmpun(temp_float_reg, src, src); ++ __ fbne(temp_float_reg,Convert); ++ ++ __ or_ins(T12, R0, 1); ++ __ slll(T12, T12, 63); ++ ++ __ ifmovd(temp_float_reg, T12); ++ __ fcvtls(temp_float_reg1, temp_float_reg); ++ __ fcmple(temp_float_reg, src,temp_float_reg1); ++ __ fbne(temp_float_reg,Overflow); //if less than min_long(0x8000000000000000),jump to Skip Lable ++ ++ __ subl(T12,T12,1); ++ __ ifmovd(temp_float_reg, T12); ++ __ fcvtls(temp_float_reg1, temp_float_reg); ++ __ fcmple(temp_float_reg, temp_float_reg1,src); ++ __ fbne(temp_float_reg,Overflow); // if >=max_long(0x7fffffffffffffff),jump to Skip Lable ++ ++ //Lable Convert ++ __ BIND(Convert); ++ __ fcvtsd(temp_float_reg, src); ++ __ fcvtdl_z(temp_float_reg1, temp_float_reg); ++ __ fimovd(dst, temp_float_reg1); ++ __ beq(R0,Done); ++ ++ //Lable Skip ++ __ BIND(Overflow); ++ __ move(dst,T12); ++ __ BIND(Done); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++ ++instruct convF2L_reg_slow( mRegL dst, regF src ) %{ ++ match(Set dst (ConvF2L src)); ++ ins_cost(250); ++ format %{ "convf2l $dst, $src @ convF2L_reg_slow" %} ++ ins_encode %{ ++ Register dst = as_Register($dst$$reg); ++ FloatRegister fval = $src$$FloatRegister; ++ ++ __ saveTRegisters(); ++ __ fmovs(F16, fval); ++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1); ++ __ move(T12, V0); ++ __ restoreTRegisters(); ++ __ move(dst, T12); ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct convL2F_reg( regF dst, mRegL src ) %{ ++ match(Set dst (ConvL2F src)); ++ format %{ "convl2f $dst, $src @ convL2F_reg" %} ++ ins_encode %{ ++ FloatRegister dst = $dst$$FloatRegister; ++ Register src = as_Register($src$$reg); ++ if (UseSW8A){ ++ __ cmovls(dst, src); ++ } else { ++ __ ifmovd(F30, src); ++ __ fcvtls(dst, F30); ++ } ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct convI2F_reg( regF dst, mRegI src ) %{ ++ match(Set dst (ConvI2F src)); ++ format %{ "convi2f $dst, $src @ convI2F_reg" %} ++ ins_encode %{ ++ Register src = $src$$Register; ++ FloatRegister dst = $dst$$FloatRegister; ++ if (UseSW8A) { ++ __ cmovws(dst, src); ++ } else { ++ __ ifmovs(F30, src); ++ __ fcvtwl(F28, F30); ++ __ fcvtls(dst, F28); ++ } ++ ++ %} ++ ++ ins_pipe( fpu_regF_regF ); ++%} ++ ++instruct cmpLTMask_immI0( mRegI dst, mRegI p, immI0 zero ) %{ ++ match(Set dst (CmpLTMask p zero)); ++ ins_cost(100); ++ ++ format %{ "sra $dst, $p, 31 @ cmpLTMask_immI0" %} ++ ins_encode %{ ++ Register src = $p$$Register; ++ Register dst = $dst$$Register; ++ if(UseSW8A) { ++ __ sraw(src, 31, dst); ++ } else { ++ __ sral(dst, src, 31&0x1f); ++ } ++ %} ++ ins_pipe( pipe_slow ); ++%} ++ ++ ++instruct cmpLTMask( mRegI dst, mRegI p, mRegI q ) %{ ++ match(Set dst (CmpLTMask p q)); ++ ins_cost(400); ++ ++ format %{ "cmpLTMask $dst, $p, $q @ cmpLTMask" %} ++ ins_encode %{ ++ Register p = $p$$Register; ++ Register q = $q$$Register; ++ Register dst = $dst$$Register; ++ ++ __ cmplt(dst, p, q); ++ __ subl(dst, R0, dst); ++ %} ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct convP2B(mRegI dst, mRegP src) %{ ++ match(Set dst (Conv2B src)); ++ ++ ins_cost(100); ++ format %{ "convP2B $dst, $src @convP2B" %} ++ ins_encode %{ ++ Register dst = as_Register($dst$$reg); ++ Register src = as_Register($src$$reg); ++ ++ __ selne(src, 1, src, dst); ++ %} ++ ++ ins_pipe( ialu_regL_regL ); ++%} ++ ++ ++instruct convI2D_reg_reg(regD dst, mRegI src) %{ ++ match(Set dst (ConvI2D src)); ++ format %{ "conI2D $dst, $src @convI2D_reg" %} ++ ins_encode %{ ++ Register src = $src$$Register; ++ FloatRegister dst = $dst$$FloatRegister; ++ if (UseSW8A){ ++ __ cmovwd(dst, src); ++ } else { ++ __ ifmovd(F30, src); ++ __ fcvtld(dst, F30); ++ } ++ %} ++ ins_pipe( fpu_regF_regF ); ++%} ++ ++instruct convF2D_reg_reg(regD dst, regF src) %{ ++ match(Set dst (ConvF2D src)); ++ format %{ "convF2D $dst, $src\t# @convF2D_reg_reg" %} ++ ins_encode %{ ++ FloatRegister dst = $dst$$FloatRegister; ++ FloatRegister src = $src$$FloatRegister; ++ ++ __ fcvtD2S(dst, src); ++ %} ++ ins_pipe( fpu_regF_regF ); ++%} ++ ++instruct convD2F_reg_reg(regF dst, regD src) %{ ++ match(Set dst (ConvD2F src)); ++ format %{ "convD2F $dst, $src\t# @convD2F_reg_reg" %} ++ ins_encode %{ ++ FloatRegister dst = $dst$$FloatRegister; ++ FloatRegister src = $src$$FloatRegister; ++ ++ __ fcvtS2D(dst, src); ++ %} ++ ins_pipe( fpu_regF_regF ); ++%} ++ ++ ++// Convert a double to an int. If the double is a NAN, stuff a zero in instead. ++instruct convD2I_reg_reg_fast( mRegI dst, regD src ) %{ ++ match(Set dst (ConvD2I src)); ++ ++ ins_cost(150); ++ format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_fast" %} ++ ++ ins_encode %{ ++ FloatRegister src = $src$$FloatRegister; ++ Register dst = $dst$$Register; ++ if (UseSW8A) { ++ __ cmovdw_z(dst, src); ++ } else { ++ FloatRegister temp_float_reg = F30; ++ FloatRegister temp_float_reg1 = F28; ++ FloatRegister tmp = F27; ++ ++ assert( (temp_float_reg1 != src), "can not use F28"); ++ assert( (temp_float_reg != src), "can not use F28"); ++ Label Convert,Overflow,Done; ++ __ fcmpun(temp_float_reg, src, src); ++ __ fbne (temp_float_reg, Convert); //If Unorder,Jump to Convert Label ++ ++ __ bis (T12, R0, 1); ++ __ slll(T12, T12, 31); ++ __ ifmovs(temp_float_reg, T12); ++ __ fcvtwl(temp_float_reg1, temp_float_reg); ++ __ fcvtld(temp_float_reg, temp_float_reg1); ++ __ fcmple(temp_float_reg1, src, temp_float_reg); ++ __ fbne (temp_float_reg1, Overflow); //If less than min_int(0x80000000),jump to Skip Label ++ ++ __ subw(T12, T12, 0x1); ++ __ ifmovs(temp_float_reg, T12); ++ __ fcvtwl(temp_float_reg1, temp_float_reg); ++ __ fcvtld(temp_float_reg, temp_float_reg1); ++ __ fcmple(temp_float_reg1, temp_float_reg, src); ++ __ fbne (temp_float_reg1, Overflow); //If >= max_int(0x7fffffff),jump to Skip Label ++ ++ //Label Convert ++ __ BIND(Convert); ++ __ fcvtdl_z(temp_float_reg, src);//lx20121018,result is rounded toward zero ++ __ fcvtlw(tmp, temp_float_reg); ++ __ fimovs(dst, tmp); ++ __ addw(dst, dst, 0); ++ __ beq (R0, Done); ++ //Labe Overflow ++ __ BIND(Overflow); ++ __ addw(dst, T12, 0); ++ __ BIND(Done); ++ } ++ %} ++ ins_pipe( pipe_slow ); ++%} ++ ++ ++instruct convD2I_reg_reg_slow( mRegI dst, regD src ) %{ ++ match(Set dst (ConvD2I src)); ++ ++ ins_cost(250); ++ format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_slow" %} ++ ++ ins_encode %{ ++ FloatRegister src = $src$$FloatRegister; ++ Register dst = $dst$$Register; ++ ++ __ saveTRegisters(); ++ __ fmovd(F16, src); ++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1); ++ __ move(T12, V0); ++ __ restoreTRegisters(); ++ __ move(dst, T12); ++ %} ++ ins_pipe( pipe_slow ); ++%} ++ ++// Convert oop pointer into compressed form ++instruct encodeHeapOop(mRegN dst, mRegP src) %{ ++ predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull); ++ match(Set dst (EncodeP src)); ++ format %{ "encode_heap_oop $dst,$src" %} ++ ins_encode %{ ++ Register src = $src$$Register; ++ Register dst = $dst$$Register; ++ ++ __ encode_heap_oop(dst, src); ++ %} ++ ins_pipe( ialu_regL_regL ); ++%} ++ ++instruct encodeHeapOop_not_null(mRegN dst, mRegP src) %{ ++ predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull); ++ match(Set dst (EncodeP src)); ++ format %{ "encode_heap_oop_not_null $dst,$src @ encodeHeapOop_not_null" %} ++ ins_encode %{ ++ __ encode_heap_oop_not_null($dst$$Register, $src$$Register); ++ %} ++ ins_pipe( ialu_regL_regL ); ++%} ++ ++instruct decodeHeapOop(mRegP dst, mRegN src) %{ ++ predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull && ++ n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant); ++ match(Set dst (DecodeN src)); ++ format %{ "decode_heap_oop $dst,$src @ decodeHeapOop" %} ++ ins_encode %{ ++ Register s = $src$$Register; ++ Register d = $dst$$Register; ++ ++ __ decode_heap_oop(d, s); ++ %} ++ ins_pipe( ialu_regL_regL ); ++%} ++ ++instruct decodeHeapOop_not_null(mRegP dst, mRegN src) %{ ++ predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull || ++ n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant); ++ match(Set dst (DecodeN src)); ++ format %{ "decode_heap_oop_not_null $dst,$src @ decodeHeapOop_not_null" %} ++ ins_encode %{ ++ Register s = $src$$Register; ++ Register d = $dst$$Register; ++ if (s != d) { ++ __ decode_heap_oop_not_null(d, s); ++ } else { ++ __ decode_heap_oop_not_null(d); ++ } ++ %} ++ ins_pipe( ialu_regL_regL ); ++%} ++ ++instruct encodeKlass_not_null(mRegN dst, mRegP src) %{ ++ match(Set dst (EncodePKlass src)); ++ format %{ "encode_heap_oop_not_null $dst,$src @ encodeKlass_not_null" %} ++ ins_encode %{ ++ __ encode_klass_not_null($dst$$Register, $src$$Register); ++ %} ++ ins_pipe( ialu_regL_regL ); ++%} ++ ++instruct decodeKlass_not_null(mRegP dst, mRegN src) %{ ++ match(Set dst (DecodeNKlass src)); ++ format %{ "decode_heap_klass_not_null $dst,$src" %} ++ ins_encode %{ ++ Register s = $src$$Register; ++ Register d = $dst$$Register; ++ if (s != d) { ++ __ decode_klass_not_null(d, s); ++ } else { ++ __ decode_klass_not_null(d); ++ } ++ %} ++ ins_pipe( ialu_regL_regL ); ++%} ++ ++ ++instruct tlsLoadP(mRegP dst) %{ ++ match(Set dst (ThreadLocal)); ++ ++ ins_cost(0); ++ format %{ " get_thread in $dst #@tlsLoadP" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ __ move(dst, S2thread); ++ %} ++ ++ ins_pipe( ialu_loadI ); ++%} ++ ++ ++instruct checkCastPP( mRegP dst ) %{ ++ match(Set dst (CheckCastPP dst)); ++ ++ format %{ "#checkcastPP of $dst (empty encoding) #@chekCastPP" %} ++ ins_encode( /*empty encoding*/ ); ++ ins_pipe( empty ); ++%} ++ ++instruct castPP(mRegP dst) ++%{ ++ match(Set dst (CastPP dst)); ++ ++ size(0); ++ format %{ "# castPP of $dst" %} ++ ins_encode(/* empty encoding */); ++ ins_pipe(empty); ++%} ++ ++instruct castII( mRegI dst ) %{ ++ match(Set dst (CastII dst)); ++ format %{ "#castII of $dst empty encoding" %} ++ ins_encode( /*empty encoding*/ ); ++ ins_cost(0); ++ ins_pipe( empty ); ++%} ++ ++// Return Instruction ++// Remove the return address & jump to it. ++instruct Ret() %{ ++ match(Return); ++ format %{ "RET #@Ret" %} ++ ++ ins_encode %{ ++ __ ret(); ++ %} ++ ++ ins_pipe( pipe_jump ); ++%} ++ ++/* ++// Jr seems too slow, so this rule shouldn't be imported. ++instruct jumpXtnd(mRegL switch_val) %{ ++ match(Jump switch_val); ++ ++ ins_cost(350); ++ ++ format %{ "load T12 <-- [$constanttablebase, $switch_val, $constantoffset] @ jumpXtnd\n\t" ++ "jr T12\n\t" %} ++ ins_encode %{ ++ Register table_base = $constanttablebase; ++ int con_offset = $constantoffset; ++ Register switch_reg = $switch_val$$Register; ++ ++ if (Assembler::is_simm16(con_offset)) { ++ __ addl(T12, table_base, switch_reg); ++ __ ldl(T12, T12, con_offset); ++ } else { ++ __ move(T12, con_offset); ++ __ addl(AT, table_base, switch_reg); ++ __ addl(AT, T12, AT); ++ __ ldl(T12, AT, 0); ++ } ++ ++ __ jmp(T12); ++ ++ %} ++ ins_pipe(pipe_jump); ++%} ++*/ ++ ++// Jump Direct - Label defines a relative address from JMP ++instruct jmpDir(label labl) %{ ++ match(Goto); ++ effect(USE labl); ++ ins_cost(300); ++ format %{ "JMP $labl #@jmpDir" %} ++ ins_encode %{ ++ Label &L = *($labl$$label); ++ if(&L) ++ __ beq(R0, L); ++ else ++ __ beq(R0, int(0)); ++ %} ++ ins_pipe( pipe_jump ); ++ ins_pc_relative(1); ++%} ++ ++// Tail Jump; remove the return address; jump to target. ++// TailCall above leaves the return address around. ++// TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2). ++// ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a ++// "restore" before this instruction (in Epilogue), we need to materialize it ++// in %i0. ++ ++instruct tailjmpInd(mRegP jump_target,mRegP ex_oop) %{ ++ match( TailJump jump_target ex_oop ); ++ ins_cost(200); ++ format %{ "Jmp $jump_target ; ex_oop = $ex_oop #@tailjmpInd" %} ++ ins_encode %{ ++ Register target = $jump_target$$Register; ++ ++ // V0, T4 are indicated in: ++ // [stubGenerator_sw64.cpp] generate_forward_exception() ++ // [runtime_sw64.cpp] OptoRuntime::generate_exception_blob() ++ // ++ Register oop = $ex_oop$$Register; ++ Register exception_oop = V0; ++ Register exception_pc = T4; ++ ++ __ move(exception_pc, RA); ++ __ move(exception_oop, oop); ++ ++ __ jmp(target); ++ %} ++ ins_pipe( pipe_jump ); ++%} ++ ++// ============================================================================ ++// Procedure Call/Return Instructions ++// Call Java Static Instruction ++// Note: If this code changes, the corresponding ret_addr_offset() and ++// compute_padding() functions will have to be adjusted. ++instruct CallStaticJavaDirect(method meth) %{ ++ match(CallStaticJava); ++ effect(USE meth); ++ ++ ins_cost(300); ++ format %{ "CALL,static #@CallStaticJavaDirect " %} ++ ins_encode( Java_Static_Call( meth ) ); ++ ins_pipe( pipe_slow ); ++ ins_pc_relative(1); ++%} ++ ++// Call Java Dynamic Instruction ++// Note: If this code changes, the corresponding ret_addr_offset() and ++// compute_padding() functions will have to be adjusted. ++instruct CallDynamicJavaDirect(method meth) %{ ++ match(CallDynamicJava); ++ effect(USE meth); ++ ++ ins_cost(300); ++ format %{"MOV IC_Klass, #Universe::non_oop_word()\n\t" ++ "CallDynamic @ CallDynamicJavaDirect" %} ++ ins_encode( Java_Dynamic_Call( meth ) ); ++ ins_pipe( pipe_slow ); ++ ins_pc_relative(1); ++%} ++ ++instruct CallLeafNoFPDirect(method meth) %{ ++ match(CallLeafNoFP); ++ effect(USE meth); ++ ++ ins_cost(300); ++ format %{ "CALL_LEAF_NOFP,runtime " %} ++ ins_encode(Java_To_Runtime(meth)); ++ ins_pipe( pipe_slow ); ++ ins_pc_relative(1); ++ ins_alignment(16); ++%} ++ ++// Prefetch instructions. ++ ++instruct prefetchrNTA( memory mem ) %{ ++ match(PrefetchRead mem); ++ ins_cost(125); ++ ++ format %{ "pref $mem\t# Prefetch into non-temporal cache for read @ prefetchrNTA" %} ++ ins_encode %{ ++ int base = $mem$$base; ++ int disp = $mem$$disp; ++ ++ if( Assembler::is_simm16(disp) ) { ++ __ add_simm16(AT, as_Register(base), disp); ++ } else { ++ __ move(T9, disp); ++ __ addl(AT, as_Register(base), T9); ++ } ++ __ fillcs(AT, 0); ++ ++ %} ++ ins_pipe(pipe_slow); ++%} ++ ++instruct prefetchwNTA( memory mem ) %{ ++ match(PrefetchWrite mem); ++ ins_cost(125); ++ format %{ "pref $mem\t# Prefetch to non-temporal cache for write @ prefetchwNTA" %} ++ ins_encode %{ ++ int base = $mem$$base; ++ int disp = $mem$$disp; ++ ++ if( Assembler::is_simm16(disp) ) { ++ __ add_simm16(AT, as_Register(base), disp); ++ } else { ++ __ move(T9, disp); ++ __ addl(AT, as_Register(base), T9); ++ } ++ __ fillde(AT, 0); ++ ++ %} ++ ins_pipe(pipe_slow); ++%} ++ ++// Prefetch instructions for allocation. ++ ++instruct prefetchAllocNTA( memory mem ) %{ ++ match(PrefetchAllocation mem); ++ ins_cost(125); ++ format %{ "pref $mem\t# Prefetch allocation @ prefetchAllocNTA" %} ++ ins_encode %{ ++ int base = $mem$$base; ++ int disp = $mem$$disp; ++ ++ Register dst = R0; ++ ++ if( Assembler::is_simm16(disp) ) { ++ __ fillde(as_Register(base), disp); ++ } else { ++ __ move(T9, disp); ++ __ addl(AT, as_Register(base), T9); ++ __ fillde(AT, 0); ++ } ++// } ++ %} ++ ins_pipe(pipe_slow); ++%} ++ ++ ++// Call runtime without safepoint ++instruct CallLeafDirect(method meth) %{ ++ match(CallLeaf); ++ effect(USE meth); ++ ++ ins_cost(300); ++ format %{ "CALL_LEAF,runtime #@CallLeafDirect " %} ++ ins_encode(Java_To_Runtime(meth)); ++ ins_pipe( pipe_slow ); ++ ins_pc_relative(1); ++ ins_alignment(16); ++%} ++ ++// Load Char (16bit unsigned) ++instruct loadUS(mRegI dst, memory mem) %{ ++ match(Set dst (LoadUS mem)); ++ ++ ins_cost(125); ++ format %{ "loadUS $dst,$mem @ loadC" %} ++ ins_encode(load_C_enc(dst, mem)); ++ ins_pipe( ialu_loadI ); ++%} ++ ++instruct loadUS_convI2L(mRegL dst, memory mem) %{ ++ match(Set dst (ConvI2L (LoadUS mem))); ++ ++ ins_cost(125); ++ format %{ "loadUS $dst,$mem @ loadUS_convI2L" %} ++ ins_encode(load_C_enc(dst, mem)); ++ ins_pipe( ialu_loadI ); ++%} ++ ++// Store Char (16bit unsigned) ++instruct storeC(memory mem, mRegI src) %{ ++ match(Set mem (StoreC mem src)); ++ ++ ins_cost(125); ++ format %{ "storeC $src, $mem @ storeC" %} ++ ins_encode(store_C_reg_enc(mem, src)); ++ ins_pipe( ialu_loadI ); ++%} ++ ++instruct storeC0(memory mem, immI0 zero) %{ ++ match(Set mem (StoreC mem zero)); ++ ++ ins_cost(125); ++ format %{ "storeC $zero, $mem @ storeC0" %} ++ ins_encode(store_C0_enc(mem)); ++ ins_pipe( ialu_loadI ); ++%} ++ ++ ++instruct loadConF0(regF dst, immF0 zero) %{ ++ match(Set dst zero); ++ ins_cost(100); ++ ++ format %{ "mov $dst, zero @ loadConF0\n"%} ++ ins_encode %{ ++ FloatRegister dst = $dst$$FloatRegister; ++ ++ __ ifmovs(dst, R0); ++ %} ++ ins_pipe( fpu_loadF ); ++%} ++ ++ ++instruct loadConF(regF dst, immF src) %{ ++ match(Set dst src); ++ ins_cost(125); ++ ++ format %{ "flds $dst, $constantoffset[$constanttablebase] # load FLOAT $src from table @ loadConF" %} ++ ins_encode %{ ++ int con_offset = $constantoffset($src); ++ ++ if (Assembler::is_simm16(con_offset)) { ++ __ flds($dst$$FloatRegister, $constanttablebase, con_offset); ++ } else { ++ __ set64(AT, con_offset); ++ __ addl(AT, $constanttablebase, AT); ++ __ flds($dst$$FloatRegister, AT, 0); ++ } ++ %} ++ ins_pipe( fpu_loadF ); ++%} ++ ++ ++instruct loadConD0(regD dst, immD0 zero) %{ ++ match(Set dst zero); ++ ins_cost(100); ++ ++ format %{ "mov $dst, zero @ loadConD0"%} ++ ins_encode %{ ++ FloatRegister dst = as_FloatRegister($dst$$reg); ++ ++ __ ifmovd(dst, R0); ++ %} ++ ins_pipe( fpu_loadF ); ++%} ++ ++instruct loadConD(regD dst, immD src) %{ ++ match(Set dst src); ++ ins_cost(125); ++ ++ format %{ "fldd $dst, $constantoffset[$constanttablebase] # load DOUBLE $src from table @ loadConD" %} ++ ins_encode %{ ++ int con_offset = $constantoffset($src); ++ ++ if (Assembler::is_simm16(con_offset)) { ++ __ fldd($dst$$FloatRegister, $constanttablebase, con_offset); ++ } else { ++ __ set64(AT, con_offset); ++ __ addl(AT, $constanttablebase, AT); ++ __ fldd($dst$$FloatRegister, AT, 0); ++ } ++ %} ++ ins_pipe( fpu_loadF ); ++%} ++ ++// Store register Float value (it is faster than store from FPU register) ++instruct storeF_reg( memory mem, regF src) %{ ++ match(Set mem (StoreF mem src)); ++ ++ ins_cost(50); ++ format %{ "store $mem, $src\t# store float @ storeF_reg" %} ++ ins_encode(store_F_reg_enc(mem, src)); ++ ins_pipe( fpu_storeF ); ++%} ++ ++instruct storeF_imm0( memory mem, immF0 zero) %{ ++ match(Set mem (StoreF mem zero)); ++ ++ ins_cost(40); ++ format %{ "store $mem, zero\t# store float @ storeF_imm0" %} ++ ins_encode %{ ++ int base = $mem$$base; ++ int disp = $mem$$disp; ++ ++ if( Assembler::is_simm16(disp) ) { ++ __ fsts(F31, as_Register(base), disp); ++ } else { ++ __ move(T12, disp); ++ __ addl(AT, as_Register(base), T12); ++ __ fsts(F31, AT, 0); ++ } ++ %} ++ ins_pipe( ialu_storeI ); ++%} ++ ++// Load Double ++instruct loadD(regD dst, memory mem) %{ ++ match(Set dst (LoadD mem)); ++ ++ ins_cost(150); ++ format %{ "loadD $dst, $mem #@loadD" %} ++ ins_encode(load_D_enc(dst, mem)); ++ ins_pipe( ialu_loadI ); ++%} ++ ++// Load Double - UNaligned ++instruct loadD_unaligned(regD dst, memory mem ) %{ ++ match(Set dst (LoadD_unaligned mem)); ++ ins_cost(250); ++ format %{ "loadD_unaligned $dst, $mem #@loadD_unaligned" %} ++ ins_encode(load_D_enc(dst, mem)); ++ ins_pipe( ialu_loadI ); ++%} ++ ++instruct storeD_reg( memory mem, regD src) %{ ++ match(Set mem (StoreD mem src)); ++ ++ ins_cost(50); ++ format %{ "store $mem, $src\t# store float @ storeD_reg" %} ++ ins_encode(store_D_reg_enc(mem, src)); ++ ins_pipe( fpu_storeF ); ++%} ++ ++instruct storeD_imm0( memory mem, immD0 zero) %{ ++ match(Set mem (StoreD mem zero)); ++ ++ ins_cost(40); ++ format %{ "store $mem, zero\t# store float @ storeD_imm0" %} ++ ins_encode %{ ++ int base = $mem$$base; ++ int disp = $mem$$disp; ++ ++ if( Assembler::is_simm16(disp) ) { ++ __ fstd(F31, as_Register(base), disp); ++ } else { ++ __ move(T12, disp); ++ __ addl(AT, as_Register(base), T12); ++ __ fstd(F31, AT, 0); ++ } ++ %} ++ ins_pipe( ialu_storeI ); ++%} ++ ++instruct loadSSI(mRegI dst, stackSlotI src) ++%{ ++ match(Set dst src); ++ ++ ins_cost(125); ++ format %{ "ldw $dst, $src\t# int stk @ loadSSI" %} ++ ins_encode %{ ++ guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSI) !"); ++ __ ldw($dst$$Register, SP, $src$$disp); ++ %} ++ ins_pipe(ialu_loadI); ++%} ++ ++instruct storeSSI(stackSlotI dst, mRegI src) ++%{ ++ match(Set dst src); ++ ++ ins_cost(100); ++ format %{ "stw $dst, $src\t# int stk @ storeSSI" %} ++ ins_encode %{ ++ guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSI) !"); ++ __ stw($src$$Register, SP, $dst$$disp); ++ %} ++ ins_pipe(ialu_storeI); ++%} ++ ++instruct loadSSL(mRegL dst, stackSlotL src) ++%{ ++ match(Set dst src); ++ ++ ins_cost(125); ++ format %{ "ld $dst, $src\t# long stk @ loadSSL" %} ++ ins_encode %{ ++ guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSL) !"); ++ __ ldl($dst$$Register, SP, $src$$disp); ++ %} ++ ins_pipe(ialu_loadI); ++%} ++ ++instruct storeSSL(stackSlotL dst, mRegL src) ++%{ ++ match(Set dst src); ++ ++ ins_cost(100); ++ format %{ "stl $dst, $src\t# long stk @ storeSSL" %} ++ ins_encode %{ ++ guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSL) !"); ++ __ stl($src$$Register, SP, $dst$$disp); ++ %} ++ ins_pipe(ialu_storeI); ++%} ++ ++instruct loadSSP(mRegP dst, stackSlotP src) ++%{ ++ match(Set dst src); ++ ++ ins_cost(125); ++ format %{ "ld $dst, $src\t# ptr stk @ loadSSP" %} ++ ins_encode %{ ++ guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSP) !"); ++ __ ldl($dst$$Register, SP, $src$$disp); ++ %} ++ ins_pipe(ialu_loadI); ++%} ++ ++instruct storeSSP(stackSlotP dst, mRegP src) ++%{ ++ match(Set dst src); ++ ++ ins_cost(100); ++ format %{ "stl $dst, $src\t# ptr stk @ storeSSP" %} ++ ins_encode %{ ++ guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSP) !"); ++ __ stl($src$$Register, SP, $dst$$disp); ++ %} ++ ins_pipe(ialu_storeI); ++%} ++ ++instruct loadSSF(regF dst, stackSlotF src) ++%{ ++ match(Set dst src); ++ ++ ins_cost(125); ++ format %{ "flds $dst, $src\t# float stk @ loadSSF" %} ++ ins_encode %{ ++ guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSF) !"); ++ __ flds($dst$$FloatRegister, SP, $src$$disp); ++ %} ++ ins_pipe(ialu_loadI); ++%} ++ ++instruct storeSSF(stackSlotF dst, regF src) ++%{ ++ match(Set dst src); ++ ++ ins_cost(100); ++ format %{ "fsts $dst, $src\t# float stk @ storeSSF" %} ++ ins_encode %{ ++ guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSF) !"); ++ __ fsts($src$$FloatRegister, SP, $dst$$disp); ++ %} ++ ins_pipe(fpu_storeF); ++%} ++ ++// Use the same format since predicate() can not be used here. ++instruct loadSSD(regD dst, stackSlotD src) ++%{ ++ match(Set dst src); ++ ++ ins_cost(125); ++ format %{ "fldd $dst, $src\t# double stk @ loadSSD" %} ++ ins_encode %{ ++ guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSD) !"); ++ __ fldd($dst$$FloatRegister, SP, $src$$disp); ++ %} ++ ins_pipe(ialu_loadI); ++%} ++ ++instruct storeSSD(stackSlotD dst, regD src) ++%{ ++ match(Set dst src); ++ ++ ins_cost(100); ++ format %{ "fstd $dst, $src\t# double stk @ storeSSD" %} ++ ins_encode %{ ++ guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSD) !"); ++ __ fstd($src$$FloatRegister, SP, $dst$$disp); ++ %} ++ ins_pipe(fpu_storeF); ++%} ++ ++instruct cmpFastLock( FlagsReg cr, mRegP object, s0_RegP box, mRegI tmp, mRegP scr, t10RegL tmpT10) %{ ++ match( Set cr (FastLock object box) ); ++ effect( TEMP tmp, TEMP scr, TEMP tmpT10, USE_KILL box ); ++ ins_cost(300); ++ format %{ "FASTLOCK $cr $object, $box, $tmp #@ cmpFastLock" %} ++ ins_encode %{ ++ __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register); ++ %} ++ ++ ins_pipe( pipe_slow ); ++ ins_pc_relative(1); ++%} ++ ++instruct cmpFastUnlock( FlagsReg cr, mRegP object, s0_RegP box, mRegP tmp, t10RegL tmpT10) %{ ++ match( Set cr (FastUnlock object box) ); ++ effect( TEMP tmp, TEMP tmpT10, USE_KILL box ); ++ ins_cost(300); ++ //size(144); //ZHJ20170905 ++ format %{ "FASTUNLOCK $object, $box, $tmp #@cmpFastUnlock" %} ++ ins_encode %{ ++ __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register); ++ %} ++ ++ ins_pipe( pipe_slow ); ++ ins_pc_relative(1); ++%} ++ ++/* no necessary // Store CMS card-mark ++instruct storeImmCM(memory mem, mRegI src) %{ ++ match(Set mem (StoreCM mem src)); ++ ++ ins_cost(200); ++ format %{ "MOV8 $mem,$src\t! CMS card-mark" %} ++ ins_encode(store_B_reg_enc_sync(mem, src)); ++ ins_pipe( ialu_storeI ); ++%} ++*/ ++ ++instruct storeimmCM0_ordered(memory mem, immI0 src) %{ ++ match(Set mem (StoreCM mem src)); ++ ins_cost(200); ++ format %{ "MOV8 $mem,$src\t! CMS card-mark imm0 ordered" %} ++ ins_encode(store_B_immI0_enc_sync(mem, src)); ++ ins_pipe( ialu_storeI ); ++%} ++ ++instruct storeImm0CM(memory mem, immI0 src) %{ ++ match(Set mem (StoreCM mem src)); ++ predicate(unnecessary_storestore(n)); ++ ins_cost(100); ++ format %{ "MOV8 $mem,$src\t! CMS card-mark imm0" %} ++ ins_encode(store_B_immI0(mem, src)); ++ ins_pipe( ialu_storeI ); ++%} ++ ++// Die now ++instruct ShouldNotReachHere( ) ++%{ ++ match(Halt); ++ ins_cost(300); ++ ++ // Use the following format syntax ++ format %{ "ILLTRAP ;#@ShouldNotReachHere" %} ++ ins_encode %{ ++ // Here we should emit illtrap ! ++ ++ __ stop("in ShoudNotReachHere"); ++ ++ %} ++ ins_pipe( pipe_jump ); ++%} ++ ++instruct leaP8Narrow(mRegP dst, indOffset8Narrow mem) ++%{ ++ predicate(Universe::narrow_oop_shift() == 0); ++ match(Set dst mem); ++ ++ ins_cost(110); ++ format %{ "leaq $dst, $mem\t# ptr off8narrow @ leaP8Narrow" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register base = as_Register($mem$$base); ++ int disp = $mem$$disp; ++ ++ __ add_simm16(dst, base, disp); ++ %} ++ ins_pipe( ialu_regI_imm16 ); ++%} ++ ++// Jump Direct Conditional - Label defines a relative address from Jcc+1 ++instruct jmpLoopEnd(cmpOp cop, mRegI src1, mRegI src2, label labl) %{ ++ match(CountedLoopEnd cop (CmpI src1 src2)); ++ effect(USE labl); ++ ++ ins_cost(300); ++ format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd" %} ++ ins_encode %{ ++ Register op1 = $src1$$Register; ++ Register op2 = $src2$$Register; ++ Label &L = *($labl$$label); ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) ++ { ++ case 0x01: //equal ++ if (&L) ++ __ beq(op1, op2, L); ++ else ++ __ beq(op1, op2, (int)0); ++ break; ++ case 0x02: //not_equal ++ if (&L) ++ __ bne(op1, op2, L); ++ else ++ __ bne(op1, op2, (int)0); ++ break; ++ case 0x03: //above ++ __ cmplt(AT, op2, op1); ++ if(&L) ++ __ bne(AT, L); ++ else ++ __ bne(AT, (int)0); ++ break; ++ case 0x04: //above_equal ++ __ cmplt(AT, op1, op2); ++ if(&L) ++ __ beq(AT, L); ++ else ++ __ beq(AT, (int)0); ++ break; ++ case 0x05: //below ++ __ cmplt(AT, op1, op2); ++ if(&L) ++ __ bne(AT, L); ++ else ++ __ bne(AT, (int)0); ++ break; ++ case 0x06: //below_equal ++ __ cmplt(AT, op2, op1); ++ if(&L) ++ __ beq(AT, L); ++ else ++ __ beq(AT, (int)0); ++ break; ++ default: ++ Unimplemented(); ++ } ++ %} ++ ins_pipe( pipe_jump ); ++ ins_pc_relative(1); ++%} ++ ++ instruct jmpLoopEnd_reg_imm16(cmpOp cop, mRegI src1, immI16_sub src2, label labl) %{ ++ match(CountedLoopEnd cop (CmpI src1 src2)); ++ effect(USE labl); ++ ++ ins_cost(150); ++ format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd_reg_imm_16_sub" %} ++ ins_encode %{ ++ Register op1 = $src1$$Register; ++ int val = $src2$$constant; ++ Label &L = *($labl$$label); ++ int flag = $cop$$cmpcode; ++ ++ __ add_simm16(AT, op1, -1 * val); ++ switch(flag) ++ { ++ case 0x01: //equal ++ if (&L) ++ __ beq(AT, L); ++ else ++ __ beq(AT, (int)0); ++ break; ++ case 0x02: //not_equal ++ if (&L) ++ __ bne(AT, L); ++ else ++ __ bne(AT, (int)0); ++ break; ++ case 0x03: //above ++ if(&L) ++ __ bgt(AT, L); ++ else ++ __ bgt(AT, (int)0); ++ break; ++ case 0x04: //above_equal ++ if(&L) ++ __ bge(AT, L); ++ else ++ __ bge(AT,(int)0); ++ break; ++ case 0x05: //below ++ if(&L) ++ __ blt(AT, L); ++ else ++ __ blt(AT, (int)0); ++ break; ++ case 0x06: //below_equal ++ if(&L) ++ __ ble(AT, L); ++ else ++ __ ble(AT, (int)0); ++ break; ++ default: ++ Unimplemented(); ++ } ++ %} ++ ins_pipe( pipe_jump ); ++ ins_pc_relative(1); ++%} ++ ++ instruct jmpLoopEnd_reg_immI(cmpOp cop, mRegI src1, immI src2, label labl) %{ ++ match(CountedLoopEnd cop (CmpI src1 src2)); ++ effect(USE labl); ++ ++ ins_cost(300); ++ format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd_reg_immI" %} ++ ins_encode %{ ++ Register op1 = $src1$$Register; ++ Register op2 = AT; ++ Label &L = *($labl$$label); ++ int flag = $cop$$cmpcode; ++ ++ __ move(op2, $src2$$constant); ++ __ subl(AT, op1, AT); ++ switch(flag) ++ { ++ case 0x01: //equal ++ if (&L) ++ __ beq(AT, L); ++ else ++ __ beq(AT, (int)0); ++ break; ++ case 0x02: //not_equal ++ if (&L) ++ __ bne(AT, L); ++ else ++ __ bne(AT, (int)0); ++ break; ++ case 0x03: //above ++ if(&L) ++ __ bgt(AT, L); ++ else ++ __ bgt(AT, (int)0); ++ break; ++ case 0x04: //above_equal ++ if(&L) ++ __ bge(AT, L); ++ else ++ __ bge(AT,(int)0); ++ break; ++ case 0x05: //below ++ if(&L) ++ __ blt(AT, L); ++ else ++ __ blt(AT, (int)0); ++ break; ++ case 0x06: //below_equal ++ if(&L) ++ __ ble(AT, L); ++ else ++ __ ble(AT, (int)0); ++ break; ++ default: ++ Unimplemented(); ++ } ++ %} ++ ins_pipe( pipe_jump ); ++ ins_pc_relative(1); ++%} ++ ++ instruct jmpLoopEnd_reg_immI0(cmpOp cop, mRegI src1, immI0 src2, label labl) %{ ++ match( CountedLoopEnd cop (CmpI src1 src2) ); ++ effect(USE labl); ++ ins_cost(170); ++ format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd_reg_imm0_short" %} ++ ++ ins_encode %{ ++ Register op1 = $src1$$Register; ++ Label &L = *($labl$$label); ++ int flag = $cop$$cmpcode; ++ ++ switch(flag) { ++ case 0x01: //equal ++ if (&L) ++ __ beq(op1, L); ++ else ++ __ beq(op1, (int)0); ++ break; ++ case 0x02: //not_equal ++ if (&L) ++ __ bne(op1, L); ++ else ++ __ bne(op1, (int)0); ++ break; ++ case 0x03: //greater ++ if(&L) ++ __ bgt(op1, L); ++ else ++ __ bgt(op1, (int)0); ++ break; ++ case 0x04: //greater_equal ++ if(&L) ++ __ bge(op1, L); ++ else ++ __ bge(op1, (int)0); ++ break; ++ case 0x05: //less ++ if(&L) ++ __ blt(op1, L); ++ else ++ __ blt(op1, (int)0); ++ break; ++ case 0x06: //less_equal ++ if(&L) ++ __ ble(op1, L); ++ else ++ __ ble(op1, (int)0); ++ break; ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_jump ); ++ ins_pc_relative(1); ++//ZLONG ins_short_branch(1); ++%} ++ ++instruct jmpCon_flags(cmpOp cop, FlagsReg cr, label labl) %{ ++ match(If cop cr); ++ effect(USE labl); ++ ++ ins_cost(300); ++ format %{ "J$cop $labl #sw64 uses AT as eflag @jmpCon_flags" %} ++ ++ ins_encode %{ ++ Label &L = *($labl$$label); ++ switch($cop$$cmpcode) ++ { ++ case 0x01: //equal ++ if (&L) ++ __ bne(AT, L); ++ else ++ __ bne(AT, (int)0); ++ break; ++ case 0x02: //not equal ++ if (&L) ++ __ beq(AT, L); ++ else ++ __ beq(AT, (int)0); ++ break; ++ default: ++ Unimplemented(); ++ } ++ %} ++ ++ ins_pipe( pipe_jump ); ++ ins_pc_relative(1); ++%} ++ ++ ++// ============================================================================ ++// The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass ++// array for an instance of the superklass. Set a hidden internal cache on a ++// hit (cache is checked with exposed code in gen_subtype_check()). Return ++// NZ for a miss or zero for a hit. The encoding ALSO sets flags. ++instruct partialSubtypeCheck( mRegP result, no_T11_mRegP sub, no_T11_mRegP super, mT11RegI tmp ) %{ ++ match(Set result (PartialSubtypeCheck sub super)); ++ effect(KILL tmp); ++ ins_cost(1100); // slightly larger than the next version ++ format %{ "partialSubtypeCheck result=$result, sub=$sub, super=$super, tmp=$tmp " %} ++ ++ ins_encode( enc_PartialSubtypeCheck(result, sub, super, tmp) ); ++ ins_pipe( pipe_slow ); ++%} ++ ++// Conditional-store of an int value. ++// ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG on Intel. ++instruct storeIConditional( indirect mem, mRegI oldval, mRegI newval, FlagsReg cr ) %{ ++ match(Set cr (StoreIConditional mem (Binary oldval newval))); ++// effect(KILL oldval); ++ format %{ "CMPXCHG $newval, $mem, $oldval \t# @storeIConditional" %} ++ ++ ins_encode %{ ++ Register oldval = $oldval$$Register; ++ Register newval = $newval$$Register; ++ Address addr(as_Register($mem$$base), $mem$$disp); ++ Label again, nequal; ++ ++ int index = $mem$$index; ++ int disp = $mem$$disp; ++ SizedScope sc(&_masm, 40); ++ guarantee(disp == 0, "impossible encoding storeIConditional"); ++ if (UseSW8A) { ++ if (UseCAS) { ++ __ move(GP, newval); ++ __ casw(oldval, addr.base(), GP); ++ __ cmpeq(AT, GP, oldval); ++ } else { ++ __ BIND(again); ++ __ lldw(GP, addr.base(), addr.disp()); ++ __ cmpeq(AT, GP, oldval); ++ __ beq(AT, nequal); ++ __ move(AT, newval); ++ __ lstw(AT, addr.base(), addr.disp()); ++ __ beq(AT, again); ++ __ BIND(nequal); ++ } ++ } else { ++ __ BIND(again); ++ __ lldw(AT, addr.base(), addr.disp()); ++ __ cmpeq(GP, AT, oldval); ++ __ wr_f(GP); ++ __ move(AT, newval); ++ __ align(8); ++ __ lstw(AT, addr.base(), addr.disp()); ++ __ rd_f(AT); ++ __ beq(GP, nequal); ++ __ beq(AT, again); ++ __ BIND(nequal); ++ } ++%} ++ ins_pipe( long_memory_op ); ++%} ++ ++// LoadP-locked same as a regular LoadP when used with compare-swap ++instruct loadPLocked(mRegP dst, memory mem) ++%{ ++ match(Set dst (LoadPLocked mem)); ++ ++ ins_cost(125); ++ format %{ "ld $dst, $mem #@loadPLocked" %} ++ ins_encode (load_P_enc(dst, mem)); ++ ins_pipe( ialu_loadI ); ++%} ++ ++// Conditional-store of a long value. ++// ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG. ++instruct storeLConditional(indirect mem, t2RegL oldval, mRegL newval, FlagsReg cr ) %{ ++ match(Set cr (StoreLConditional mem (Binary oldval newval))); ++ effect(KILL oldval); ++ format %{ "StoreLConditional cmpxchg $mem, $newval\t# If $oldval == $mem then store $newval into $mem" %} ++ ins_encode%{ ++ Register oldval = $oldval$$Register; ++ Register newval = $newval$$Register; ++ Address addr(as_Register($mem$$base), $mem$$disp); ++ Label again, nequal; ++ ++ int index = $mem$$index; ++ int disp = $mem$$disp; ++ ++ guarantee(disp == 0, "impossible encoding storeLConditional"); ++ SizedScope sc(&_masm, 40); ++ if (UseSW8A) { ++ if (UseCAS) { ++ __ move(GP, newval); ++ __ casl(oldval, addr.base(), GP); ++ __ cmpeq(AT, GP, oldval); ++ } else { ++ __ BIND(again); ++ __ lldl(GP, addr.base(), addr.disp()); ++ __ cmpeq(AT, GP, oldval); ++ __ beq(AT, nequal); ++ __ move(AT, newval); ++ __ lstl(AT, addr.base(), addr.disp()); ++ __ beq(AT, again); ++ __ BIND(nequal); ++ } ++ } else { ++ __ BIND(again); ++ __ lldl(AT, addr.base(), addr.disp()); ++ __ cmpeq(GP, AT, oldval); ++ __ wr_f(GP); ++ __ move(AT, newval); ++ __ align(8); ++ __ lstl(AT, addr.base(), addr.disp()); ++ __ rd_f(AT); ++ __ beq(GP, nequal); ++ __ beq(AT, again); ++ __ BIND(nequal); ++ } ++ %} ++ ins_pipe( long_memory_op ); ++%} ++ ++instruct storePConditional(indirect mem, t2_RegP oldval, mRegP newval, FlagsReg cr ) %{ ++ match(Set cr (StorePConditional mem (Binary oldval newval))); ++ effect(KILL oldval); ++ format %{ "StorePConditional cmpxchg $mem, $newval\t# If $oldval == $mem then store $newval into $mem" %} ++ ins_encode%{ ++ Register oldval = $oldval$$Register; ++ Register newval = $newval$$Register; ++ Address addr(as_Register($mem$$base), $mem$$disp); ++ Label again, nequal; ++ ++ int index = $mem$$index; ++ int disp = $mem$$disp; ++ ++ guarantee(disp == 0, "impossible encoding storePConditional"); ++ SizedScope sc(&_masm, 40); ++ if (UseSW8A) { ++ if (UseCAS) { ++ __ move(GP, newval); ++ __ casl(oldval, addr.base(), GP); ++ __ cmpeq(AT, GP, oldval); ++ } else { ++ __ BIND(again); ++ __ lldl(GP, addr.base(), addr.disp()); ++ __ cmpeq(AT, GP, oldval); ++ __ beq(AT, nequal); ++ __ move(AT, newval); ++ __ lstl(AT, addr.base(), addr.disp()); ++ __ beq(AT, again); ++ __ BIND(nequal); ++ } ++ } else { ++ __ BIND(again); ++ __ lldl(AT, addr.base(), addr.disp()); ++ __ cmpeq(GP, AT, oldval); ++ __ wr_f(GP); ++ __ move(AT, newval); ++ __ align(8); ++ __ lstl(AT, addr.base(), addr.disp()); ++ __ rd_f(AT); ++ __ beq(GP, nequal); ++ __ beq(AT, again); ++ __ BIND(nequal); ++ } ++ %} ++ ins_pipe( long_memory_op ); ++%} ++ ++instruct compareAndSwapI( mRegI res, mRegP mem_ptr, mT5RegI oldval, mRegI newval, mT10RegI tmpt10) %{ ++ match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval))); ++ effect(TEMP tmpt10, USE_KILL oldval); ++ // size(60); ++// match(CompareAndSwapI mem_ptr (Binary oldval newval)); ++ format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapI\n\t" ++ "MOV $res, 1 @ compareAndSwapI\n\t" ++ "BNE AT, R0 @ compareAndSwapI\n\t" ++ "MOV $res, 0 @ compareAndSwapI\n" ++ "L:" %} ++ ins_encode %{ ++ Register newval = $newval$$Register; ++ Register oldval = $oldval$$Register; ++ Register res = $res$$Register; ++ Address addr($mem_ptr$$Register, 0); ++ Label L; ++ ++ __ cmpxchg32(newval, addr, oldval); ++ __ move(res, AT); ++ %} ++ ins_pipe( long_memory_op ); ++%} ++ ++instruct compareAndSwapL( mRegI res, mRegP mem_ptr, t5RegL oldval, mRegL newval, t10_RegP tmpt10) %{ ++ predicate(VM_Version::supports_cx8()); ++ match(Set res (CompareAndSwapL mem_ptr (Binary oldval newval))); ++ effect(TEMP tmpt10, USE_KILL oldval); ++ // size(60); //TODO: ZHJ20180613 ++ format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapL\n\t" ++ "MOV $res, AT @ compareAndSwapL\n\t" ++ "L:" %} ++ ins_encode %{ ++ Register newval = $newval$$Register; ++ Register oldval = $oldval$$Register; ++ Register res = $res$$Register; ++ Address addr($mem_ptr$$Register, 0); ++ Label L; ++ ++ __ cmpxchg(newval, addr, oldval); ++ __ move(res, AT); ++ %} ++ ins_pipe( long_memory_op ); ++%} ++ ++instruct compareAndSwapP( mRegI res, mRegP mem_ptr, t5_RegP oldval, mRegP newval, t10_RegP tmpt10) %{ ++ match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval))); ++ effect(TEMP tmpt10, USE_KILL oldval); ++ // size(60); ++ format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapP\n\t" ++ "MOV $res, AT @ compareAndSwapP\n\t" ++ "L:" %} ++ ins_encode %{ ++ Register newval = $newval$$Register; ++ Register oldval = $oldval$$Register; ++ Register res = $res$$Register; ++ Address addr($mem_ptr$$Register, 0); ++ Label L; ++ ++ __ cmpxchg(newval, addr, oldval); ++ __ move(res, AT); ++ %} ++ ins_pipe( long_memory_op ); ++%} ++ ++instruct compareAndSwapN( mRegI res, mRegP mem_ptr, t2_RegN oldval, mRegN newval, t10_RegN tmpT10) %{ ++ match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval))); ++ effect(TEMP tmpT10, USE_KILL oldval); ++ //size(64); ++ format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapN\n\t" ++ "MOV $res, AT @ compareAndSwapN\n\t" ++ "L:" %} ++ ins_encode %{ ++ Register newval = $newval$$Register; ++ Register oldval = $oldval$$Register; ++ Register res = $res$$Register; ++ Address addr($mem_ptr$$Register, 0); ++ Label L; ++ ++ // cmpxchg32 is implemented with lldw, which will do sign extension. ++ // so we should extend oldval's sign for correct comparision. ++ __ addw(oldval, oldval, 0); ++ ++ __ cmpxchg32(newval, addr, oldval); ++ __ move(res, AT); ++ %} ++ ins_pipe( long_memory_op ); ++%} ++ ++instruct getAndAddI(memory mem, mRegI add, mRegI val) %{ ++ // predicate( n->get_int() == 1 && n->get_int() == -1); ++ // val = *mem & *mem = *mem + add ++ match(Set val (GetAndAddI mem add)); ++ format %{ "getAndAddI [$mem],$add" %} ++ ins_encode %{ ++ Register base = as_Register($mem$$base); ++ int disp = $mem$$disp; ++ Register value = $val$$Register; ++ Register add = $add$$Register; ++ Label again; ++ SizedScope sc(&_masm, 40); ++ guarantee(Assembler::is_simm(disp,12), "getAndAddI"); ++ if (UseSW8A) { ++ __ BIND(again); ++ __ lldw(AT, base, disp); ++ __ addw(GP, AT, add); ++ __ lstw(GP, base, disp); ++ __ beq(GP, again); ++ __ move(value, AT); ++ } else { ++ __ BIND(again); ++ __ lldw(AT, base, disp); ++ __ ldi(GP, R0, 1); ++ __ wr_f(GP); ++ __ addw(GP, AT, add); ++ __ align(8); // must align ++ __ lstw(GP, base, disp); ++ __ rd_f(GP); ++ __ beq(GP, again); ++ __ move(value, AT); ++ } ++ %} ++ ins_pipe( long_memory_op ); ++%} ++ ++instruct getAndAddL( memory mem, mRegL add, mRegL val) %{ ++ // val = *mem & *mem = *mem + add ++ match(Set val (GetAndAddL mem add)); ++ format %{ "getAndAddL [$mem],$add" %} ++ ins_encode %{ ++ Register base = as_Register($mem$$base); ++ int disp = $mem$$disp; ++ Register value = $val$$Register; ++ Register add = $add$$Register; ++ Label again; ++ SizedScope sc(&_masm, 40); ++ guarantee(Assembler::is_simm(disp,12), "getAndAddL"); ++ if (UseSW8A) { ++ __ BIND(again); ++ __ lldl(AT, base, disp); ++ __ addl(GP, AT, add); ++ __ lstl(GP, base, disp); ++ __ beq(GP, again); ++ __ move(value, AT); ++ } else { ++ __ BIND(again); ++ __ lldl(AT, base, disp); ++ __ ldi(GP, R0, 1); ++ __ wr_f(GP); ++ __ addl(GP, AT, add); ++ __ align(8); // must align ++ __ lstl(GP, base, disp); ++ __ rd_f(GP); ++ __ beq(GP, again); ++ __ move(value, AT); ++ } ++ %} ++ ins_pipe( long_memory_op ); ++%} ++ ++//----------Max and Min-------------------------------------------------------- ++// Min Instructions ++// *** Min and Max using the conditional move are slower than the ++// *** branch version on a Pentium III. ++// // Conditional move for min ++ ++// Min Register with Register (generic version) ++instruct minI_Reg_Reg(mRegI dst, mRegI src) %{ ++ match(Set dst (MinI dst src)); ++ //effect(KILL flags); ++ ins_cost(80); ++ ++ format %{ "MIN $dst, $src @minI_Reg_Reg" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ ++ __ cmplt(AT, src, dst); ++ __ selne(AT, src, dst, dst); ++ ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++ ++// Max Register with Register (generic version) ++instruct maxI_Reg_Reg(mRegI dst, mRegI src) %{ ++ match(Set dst (MaxI dst src)); ++ ins_cost(80); ++ ++ format %{ "MAX $dst, $src @maxI_Reg_Reg" %} ++ ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ ++ __ cmplt(AT, dst, src); ++ __ selne(AT, src, dst, dst); ++ ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct maxI_Reg_zero(mRegI dst, immI0 zero) %{ ++ match(Set dst (MaxI dst zero)); ++ ins_cost(50); ++ ++ format %{ "MAX $dst, 0 @maxI_Reg_zero" %} ++ ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ ++ __ cmplt(AT, dst, R0); ++ __ selne(AT, R0, dst, dst); ++ ++ %} ++ ++ ins_pipe( pipe_slow ); ++%} ++ ++instruct zerox_long_reg_reg(mRegL dst, mRegL src, immL_32bits mask) ++%{ ++ match(Set dst (AndL src mask)); ++ ++ format %{ "movl $dst, $src\t# zero-extend long @ zerox_long_reg_reg" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ ++ __ zapnot(dst, src, 0xf); // __ dext(dst, src, 0, 32); ++ ++ %} ++ ins_pipe(ialu_regI_regI); ++%} ++ ++// Zero-extend convert int to long ++instruct convI2L_reg_reg_zex(mRegL dst, mRegI src, immL_32bits mask) ++%{ ++ match(Set dst (AndL (ConvI2L src) mask)); ++ ++ format %{ "movl $dst, $src\t# i2l zero-extend @ convI2L_reg_reg_zex" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ ++ __ zapnot(dst, src, 0xf); // __ dext(dst, src, 0, 32); ++ ++ %} ++ ins_pipe(ialu_regI_regI); ++%} ++ ++instruct convL2I2L_reg_reg_zex(mRegL dst, mRegL src, immL_32bits mask) ++%{ ++ match(Set dst (AndL (ConvI2L (ConvL2I src)) mask)); ++ ++ format %{ "movl $dst, $src\t# i2l zero-extend @ convL2I2L_reg_reg_zex" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ ++ __ zapnot(dst, src, 0xf); // __ dext(dst, src, 0, 32); ++ ++ %} ++ ins_pipe(ialu_regI_regI); ++%} ++ ++// Match loading integer and casting it to unsigned int in long register. ++// LoadI + ConvI2L + AndL 0xffffffff. ++instruct loadUI2L_rmask(mRegL dst, memory mem, immL_32bits mask) %{ ++ match(Set dst (AndL (ConvI2L (LoadI mem)) mask)); ++ ++ format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_rmask" %} ++ ins_encode (load_N_enc(dst, mem)); ++ ins_pipe(ialu_loadI); ++%} ++ ++instruct loadUI2L_lmask(mRegL dst, memory mem, immL_32bits mask) %{ ++ match(Set dst (AndL mask (ConvI2L (LoadI mem)))); ++ ++ format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_lmask" %} ++ ins_encode (load_N_enc(dst, mem)); ++ ins_pipe(ialu_loadI); ++%} ++ ++ ++// ============================================================================ ++// Safepoint Instruction ++instruct safePoint_poll_reg(mRegP poll) %{ ++ match(SafePoint poll); ++ effect(USE poll); ++ ++ ins_cost(125); ++ format %{ "Safepoint @ [$poll] : poll for GC @ safePoint_poll_reg" %} ++ ++ ins_encode %{ ++ Register poll_reg = $poll$$Register; ++ ++ __ block_comment("Safepoint:"); ++ __ relocate(relocInfo::poll_type); ++ __ ldw(AT, poll_reg, 0); ++ %} ++ ++ ins_pipe( ialu_storeI ); ++%} ++ ++/* instruct safePoint_poll() %{ ++ match(SafePoint); ++ ++ ins_cost(105); ++ format %{ "poll for GC @ safePoint_poll" %} ++ ++ ins_encode %{ ++ __ block_comment("Safepoint:"); ++ __ set64(T12, (long)os::get_polling_page()); ++ __ relocate(relocInfo::poll_type); ++ __ ldw(AT, T12, 0); ++ %} ++ ++ ins_pipe( ialu_storeI ); ++%} */ ++ ++//----------Arithmetic Conversion Instructions--------------------------------- ++ ++instruct roundFloat_nop(regF dst) ++%{ ++ match(Set dst (RoundFloat dst)); ++ ++ ins_cost(0); ++ ins_encode(); ++ ins_pipe(empty); ++%} ++ ++instruct roundDouble_nop(regD dst) ++%{ ++ match(Set dst (RoundDouble dst)); ++ ++ ins_cost(0); ++ ins_encode(); ++ ins_pipe(empty); ++%} ++ ++//---------- Zeros Count Instructions ------------------------------------------ ++// CountLeadingZerosINode CountTrailingZerosINode ++instruct countLeadingZerosI(mRegI dst, mRegI src) %{ ++ predicate(UseCountLeadingZerosInstruction); ++ match(Set dst (CountLeadingZerosI src)); ++ ++ format %{ "CTLZ $dst, $dst #@countLeadingZerosI" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ ++ __ zapnot(dst, src, 0xf); ++ __ ctlz(dst, dst); ++ __ subw(dst, dst, 32); ++ %} ++ ins_pipe( ialu_regL_regL ); ++%} ++ ++instruct countLeadingZerosL(mRegI dst, mRegL src) %{ ++ predicate(UseCountLeadingZerosInstruction); ++ match(Set dst (CountLeadingZerosL src)); ++ ++ format %{ "CTLZ $src,$dst #@countLeadingZerosL" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ ++ __ ctlz(dst, src); ++ %} ++ ins_pipe(ialu_regL_regL); ++%} ++ ++instruct countTrailingZerosI(mRegI dst, mRegI src) %{ ++ predicate(UseCountTrailingZerosInstruction); ++ match(Set dst (CountTrailingZerosI src)); ++ ++ format %{ "CTTZ $src, $dst\n\t #@countTrailingZerosI"%} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ ++ __ slll(dst, src, 32); ++ __ cttz(dst, dst); ++ __ subw(dst, dst, 32); ++ %} ++ ins_pipe( ialu_regL_regL ); ++%} ++ ++instruct countTrailingZerosL(mRegI dst, mRegL src) %{ ++ predicate(UseCountTrailingZerosInstruction); ++ match(Set dst (CountTrailingZerosL src)); ++ ++ format %{ "CTTZ $src,$dst #@countTrailingZerosL" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ ++ __ cttz(dst, src); ++ %} ++ ins_pipe( ialu_regL_regL ); ++%} ++ ++//---------- Population Count Instructions ------------------------------------- ++instruct popCountI(mRegI dst, mRegI src) %{ ++ predicate(UsePopCountInstruction); ++ match(Set dst (PopCountI src)); ++ ++ format %{ "CTPOP $src, $dst #@popCountI" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ ++ __ zapnot(dst, src, 0xf); ++ __ ctpop(dst, dst); ++ %} ++ ins_pipe(ialu_regL_regL); ++%} ++ ++// Note: Long.bitCount(long) returns an int. ++instruct popCountL(mRegI dst, mRegL src) %{ ++ predicate(UsePopCountInstruction); ++ match(Set dst (PopCountL src)); ++ ++ format %{ "CTPOP $src, $dst #@popCountL" %} ++ ins_encode %{ ++ Register dst = $dst$$Register; ++ Register src = $src$$Register; ++ ++ __ ctpop(dst, src); ++ %} ++ ins_pipe(ialu_regL_regL); ++%} ++ ++// ====================VECTOR INSTRUCTIONS===================================== ++ ++ ++// ====================VECTOR ARITHMETIC======================================= ++ ++// --------------------------------- ADD -------------------------------------- ++ ++// Floats vector add ++ ++// --------------------------------- SUB -------------------------------------- ++ ++// Floats vector sub ++ ++// --------------------------------- MUL -------------------------------------- ++ ++// Floats vector mul ++ ++// --------------------------------- DIV -------------------------------------- ++ ++// --------------------------------- MADD -------------------------------------- ++// Floats vector madd ++ ++ ++//----------PEEPHOLE RULES----------------------------------------------------- ++// These must follow all instruction definitions as they use the names ++// defined in the instructions definitions. ++// ++// peepmatch ( root_instr_name [preceeding_instruction]* ); ++// ++// peepconstraint %{ ++// (instruction_number.operand_name relational_op instruction_number.operand_name ++// [, ...] ); ++// // instruction numbers are zero-based using left to right order in peepmatch ++// ++// peepreplace ( instr_name ( [instruction_number.operand_name]* ) ); ++// // provide an instruction_number.operand_name for each operand that appears ++// // in the replacement instruction's match rule ++// ++// ---------VM FLAGS--------------------------------------------------------- ++// ++// All peephole optimizations can be turned off using -XX:-OptoPeephole ++// ++// Each peephole rule is given an identifying number starting with zero and ++// increasing by one in the order seen by the parser. An individual peephole ++// can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=# ++// on the command-line. ++// ++// ---------CURRENT LIMITATIONS---------------------------------------------- ++// ++// Only match adjacent instructions in same basic block ++// Only equality constraints ++// Only constraints between operands, not (0.dest_reg == EAX_enc) ++// Only one replacement instruction ++// ++// ---------EXAMPLE---------------------------------------------------------- ++// ++// // pertinent parts of existing instructions in architecture description ++// instruct movI(eRegI dst, eRegI src) %{ ++// match(Set dst (CopyI src)); ++// %} ++// ++// instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{ ++// match(Set dst (AddI dst src)); ++// effect(KILL cr); ++// %} ++// ++// // Change (inc mov) to lea ++// peephole %{ ++// // increment preceeded by register-register move ++// peepmatch ( incI_eReg movI ); ++// // require that the destination register of the increment ++// // match the destination register of the move ++// peepconstraint ( 0.dst == 1.dst ); ++// // construct a replacement instruction that sets ++// // the destination to ( move's source register + one ) ++// peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) ); ++// %} ++// ++// Implementation no longer uses movX instructions since ++// machine-independent system no longer uses CopyX nodes. ++// ++// peephole %{ ++// peepmatch ( incI_eReg movI ); ++// peepconstraint ( 0.dst == 1.dst ); ++// peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) ); ++// %} ++// ++// peephole %{ ++// peepmatch ( decI_eReg movI ); ++// peepconstraint ( 0.dst == 1.dst ); ++// peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) ); ++// %} ++// ++// peephole %{ ++// peepmatch ( addI_eReg_imm movI ); ++// peepconstraint ( 0.dst == 1.dst ); ++// peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) ); ++// %} ++// ++// peephole %{ ++// peepmatch ( addP_eReg_imm movP ); ++// peepconstraint ( 0.dst == 1.dst ); ++// peepreplace ( leaP_eReg_immI( 0.dst 1.src 0.src ) ); ++// %} ++ ++// // Change load of spilled value to only a spill ++// instruct storeI(memory mem, eRegI src) %{ ++// match(Set mem (StoreI mem src)); ++// %} ++// ++// instruct loadI(eRegI dst, memory mem) %{ ++// match(Set dst (LoadI mem)); ++// %} ++// ++//peephole %{ ++// peepmatch ( loadI storeI ); ++// peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem ); ++// peepreplace ( storeI( 1.mem 1.mem 1.src ) ); ++//%} ++ ++//----------SMARTSPILL RULES--------------------------------------------------- ++// These must follow all instruction definitions as they use the names ++// defined in the instructions definitions. ++ +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/templateInterpreterGenerator_sw64.hpp afu8u/hotspot/src/cpu/sw64/vm/templateInterpreterGenerator_sw64.hpp +--- openjdk/hotspot/src/cpu/sw64/vm/templateInterpreterGenerator_sw64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/templateInterpreterGenerator_sw64.hpp 2025-05-06 10:53:44.911633666 +0800 +@@ -0,0 +1,34 @@ ++/* ++ * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef CPU_SW64_VM_TEMPLATEINTERPRETERGENERATOR_SW64_HPP ++#define CPU_SW64_VM_TEMPLATEINTERPRETERGENERATOR_SW64_HPP ++ ++ protected: ++ ++ void generate_fixed_frame(bool native_call); ++ ++ // address generate_asm_interpreter_entry(bool synchronized); ++ ++#endif // CPU_SW64_VM_TEMPLATEINTERPRETERGENERATOR_SW64_HPP +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/templateInterpreter_sw64.cpp afu8u/hotspot/src/cpu/sw64/vm/templateInterpreter_sw64.cpp +--- openjdk/hotspot/src/cpu/sw64/vm/templateInterpreter_sw64.cpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/templateInterpreter_sw64.cpp 2025-05-06 10:53:44.911633666 +0800 +@@ -0,0 +1,2244 @@ ++/* ++ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++#include "asm/macroAssembler.hpp" ++#include "interpreter/bytecodeHistogram.hpp" ++#include "interpreter/interpreter.hpp" ++#include "interpreter/interpreterGenerator.hpp" ++#include "interpreter/interpreterRuntime.hpp" ++#include "interpreter/templateTable.hpp" ++#include "oops/arrayOop.hpp" ++#include "oops/methodData.hpp" ++#include "oops/method.hpp" ++#include "oops/oop.inline.hpp" ++#include "prims/jvmtiExport.hpp" ++#include "prims/jvmtiThreadState.hpp" ++#include "runtime/arguments.hpp" ++#include "runtime/deoptimization.hpp" ++#include "runtime/frame.inline.hpp" ++#include "runtime/sharedRuntime.hpp" ++#include "runtime/stubRoutines.hpp" ++#include "runtime/synchronizer.hpp" ++#include "runtime/timer.hpp" ++#include "runtime/vframeArray.hpp" ++#include "utilities/debug.hpp" ++ ++#define __ _masm-> ++ ++#ifdef PRODUCT ++#define BLOCK_COMMENT(str) /* nothing */ ++#else ++#define BLOCK_COMMENT(str) { char line[1024]; sprintf(line,"%s:%s:%d",str,__FILE__, __LINE__); __ block_comment(line);} ++#endif ++#define BIND(label) bind(label); BLOCK_COMMENT(#label ":") ++ ++#ifndef CC_INTERP ++ ++// asm based interpreter deoptimization helpers ++int AbstractInterpreter::size_activation(int max_stack, ++ int temps, ++ int extra_args, ++ int monitors, ++ int callee_params, ++ int callee_locals, ++ bool is_top_frame) { ++ // Note: This calculation must exactly parallel the frame setup ++ // in AbstractInterpreterGenerator::generate_method_entry. ++ ++ // fixed size of an interpreter frame: ++ int overhead = frame::sender_sp_offset - ++ frame::interpreter_frame_initial_sp_offset; ++ // Our locals were accounted for by the caller (or last_frame_adjust ++ // on the transistion) Since the callee parameters already account ++ // for the callee's params we only need to account for the extra ++ // locals. ++ int size = overhead + ++ (callee_locals - callee_params)*Interpreter::stackElementWords + ++ monitors * frame::interpreter_frame_monitor_size() + ++ temps* Interpreter::stackElementWords + extra_args; ++ ++ return size; ++} ++ ++ ++const int Interpreter::return_sentinel = 0xfeedbeed; ++const int method_offset = frame::interpreter_frame_method_offset * wordSize; ++const int bci_offset = frame::interpreter_frame_bcx_offset * wordSize; ++const int locals_offset = frame::interpreter_frame_locals_offset * wordSize; ++ ++//----------------------------------------------------------------------------- ++ ++address TemplateInterpreterGenerator::generate_StackOverflowError_handler() { ++ address entry = __ pc(); ++ ++#ifdef ASSERT ++ { ++ Label L; ++ __ add_simm16(T1, FP, frame::interpreter_frame_monitor_block_top_offset * wordSize); ++ __ subl(T1, T1, SP); // T1 = maximal sp for current fp ++ __ bge(T1, L); // check if frame is complete ++ __ stop("interpreter frame not set up"); ++ __ BIND(L); ++ } ++#endif // ASSERT ++ ++ // Restore bcp under the assumption that the current frame is still ++ // interpreted ++ // S0 is the conventional register for bcp ++ __ restore_bcp(); ++ ++ // expression stack must be empty before entering the VM if an ++ // exception happened ++ __ empty_expression_stack(); ++ // throw exception ++ __ call_VM(NOREG, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError)); ++ return entry; ++} ++ ++address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler( ++ const char* name) { ++ address entry = __ pc(); ++ // expression stack must be empty before entering the VM if an exception happened ++ __ empty_expression_stack(); ++ __ li(A1, (long)name); ++ __ call_VM(noreg, CAST_FROM_FN_PTR(address, ++ InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), A1, A2); ++ return entry; ++} ++ ++address TemplateInterpreterGenerator::generate_ClassCastException_handler() { ++ address entry = __ pc(); ++ ++ // expression stack must be empty before entering the VM if an exception happened ++ __ empty_expression_stack(); ++ __ empty_FPU_stack(); ++ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException), FSR); ++ return entry; ++} ++ ++address TemplateInterpreterGenerator::generate_exception_handler_common( ++ const char* name, const char* message, bool pass_oop) { ++ assert(!pass_oop || message == NULL, "either oop or message but not both"); ++ address entry = __ pc(); ++ ++ // expression stack must be empty before entering the VM if an exception happened ++ __ empty_expression_stack(); ++ // setup parameters ++ __ li(A1, (long)name); ++ if (pass_oop) { ++ __ call_VM(V0, ++ CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), A1, FSR); ++ } else { ++ __ li(A2, (long)message); ++ __ call_VM(V0, ++ CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), A1, A2); ++ } ++ // throw exception ++ __ jmp(Interpreter::throw_exception_entry(), relocInfo::none); ++ return entry; ++} ++ ++ ++address TemplateInterpreterGenerator::generate_continuation_for(TosState state) { ++ address entry = __ pc(); ++ // NULL last_sp until next java call ++ __ stl(R0,Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); ++ __ dispatch_next(state); ++ return entry; ++} ++ ++ ++address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { ++ ++ address entry = __ pc(); ++ ++ // Restore stack bottom in case i2c adjusted stack ++ __ ldl(SP, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); ++ // and NULL it as marker that sp is now tos until next java call ++ __ stl(R0, FP, frame::interpreter_frame_last_sp_offset * wordSize); ++ ++ __ restore_bcp(); ++ __ restore_locals(); ++ ++ // mdp: T11 ++ // ret: FSR ++ // tmp: T12 ++ if (state == atos) { ++ Register mdp = T11; ++ Register tmp = T12; ++ __ profile_return_type(mdp, FSR, tmp); ++ } ++ ++ ++ const Register cache = T12; ++ const Register index = T3; ++ __ get_cache_and_index_at_bcp(cache, index, 1, index_size); ++ ++ const Register flags = cache; ++ __ slll(AT, index, Address::times_ptr); ++ __ addl(AT, cache, AT); ++ __ ldw(flags, AT, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); ++ __ and_imm8(flags, flags, ConstantPoolCacheEntry::parameter_size_mask); ++ __ slll(AT, flags, Interpreter::stackElementScale()); ++ __ addl(SP, SP, AT); ++ ++ __ dispatch_next(state, step); ++ ++ return entry; ++} ++ ++ ++address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, ++ int step) { ++ address entry = __ pc(); ++ // NULL last_sp until next java call ++ __ stl(R0, FP, frame::interpreter_frame_last_sp_offset * wordSize); ++ __ restore_bcp(); ++ __ restore_locals(); ++ // handle exceptions ++ { ++ Label L; ++ const Register thread = S2thread; ++ __ ldw(AT, thread, in_bytes(Thread::pending_exception_offset())); ++ __ beq(AT, L); ++ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); ++ __ should_not_reach_here(); ++ __ BIND(L); ++ } ++ __ dispatch_next(state, step); ++ return entry; ++} ++ ++int AbstractInterpreter::BasicType_as_index(BasicType type) { ++ int i = 0; ++ switch (type) { ++ case T_BOOLEAN: i = 0; break; ++ case T_CHAR : i = 1; break; ++ case T_BYTE : i = 2; break; ++ case T_SHORT : i = 3; break; ++ case T_INT : // fall through ++ case T_LONG : // fall through ++ case T_VOID : i = 4; break; ++ case T_FLOAT : i = 5; break; ++ case T_DOUBLE : i = 6; break; ++ case T_OBJECT : // fall through ++ case T_ARRAY : i = 7; break; ++ default : ShouldNotReachHere(); ++ } ++ assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, ++ "index out of bounds"); ++ return i; ++} ++ ++ ++address TemplateInterpreterGenerator::generate_result_handler_for( ++ BasicType type) { ++ address entry = __ pc(); ++ switch (type) { ++ case T_BOOLEAN: __ c2bool(V0); break; ++ case T_CHAR : __ zapnot(V0, V0, 0x3); break; ++ case T_BYTE : __ sign_extend_byte (V0); break; ++ case T_SHORT : __ sign_extend_short(V0); break; ++ case T_INT : /* nothing to do */ break; ++ case T_FLOAT : /* nothing to do */ break; ++ case T_DOUBLE : /* nothing to do */ break; ++ case T_OBJECT : ++ { ++ __ ldl(V0, FP, frame::interpreter_frame_oop_temp_offset * wordSize); ++ __ verify_oop(V0); // and verify it ++ } ++ break; ++ default : ShouldNotReachHere(); ++ } ++ __ ret(); // return from result handler ++ return entry; ++} ++ ++address TemplateInterpreterGenerator::generate_safept_entry_for( ++ TosState state, ++ address runtime_entry) { ++ address entry = __ pc(); ++ __ push(state); ++ __ call_VM(noreg, runtime_entry); ++ __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos)); ++ return entry; ++} ++ ++ ++ ++// Helpers for commoning out cases in the various type of method entries. ++// ++ ++ ++// increment invocation count & check for overflow ++// ++// Note: checking for negative value instead of overflow ++// so we have a 'sticky' overflow test ++// ++// Rmethod: method ++// T3 : invocation counter ++// ++void InterpreterGenerator::generate_counter_incr( ++ Label* overflow, ++ Label* profile_method, ++ Label* profile_method_continue) { ++ Label done; ++ if (TieredCompilation) { ++ int increment = InvocationCounter::count_increment; ++ int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift; ++ Label no_mdo; ++ if (ProfileInterpreter) { ++ // Are we profiling? ++ __ ldl(FSR, Address(Rmethod, Method::method_data_offset())); ++ __ beq(FSR, no_mdo); ++ // Increment counter in the MDO ++ const Address mdo_invocation_counter(FSR, in_bytes(MethodData::invocation_counter_offset()) + ++ in_bytes(InvocationCounter::counter_offset())); ++ __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, T3, false, Assembler::zero, overflow); ++ __ beq(R0, done); ++ } ++ __ BIND(no_mdo); ++ // Increment counter in MethodCounters ++ const Address invocation_counter(FSR, ++ MethodCounters::invocation_counter_offset() + ++ InvocationCounter::counter_offset()); ++ __ get_method_counters(Rmethod, FSR, done); ++ __ increment_mask_and_jump(invocation_counter, increment, mask, T3, false, Assembler::zero, overflow); ++ __ BIND(done); ++ } else { ++ const Address invocation_counter(FSR, in_bytes(MethodCounters::invocation_counter_offset()) ++ + in_bytes(InvocationCounter::counter_offset())); ++ const Address backedge_counter (FSR, in_bytes(MethodCounters::backedge_counter_offset()) ++ + in_bytes(InvocationCounter::counter_offset())); ++ ++ __ get_method_counters(Rmethod, FSR, done); ++ ++ if (ProfileInterpreter) { // %%% Merge this into methodDataOop ++ __ ldw(T12, FSR, in_bytes(MethodCounters::interpreter_invocation_counter_offset())); ++ __ incrementl(T12, 1); ++ __ stw(T12, FSR, in_bytes(MethodCounters::interpreter_invocation_counter_offset())); ++ } ++ // Update standard invocation counters ++ __ ldw(T3, invocation_counter.base(), invocation_counter.disp()); ++ __ increment(T3, InvocationCounter::count_increment); ++ __ stw(T3, invocation_counter); // save invocation count ++ ++ __ ldw_signed(FSR, backedge_counter); // load backedge counter ++ if (Assembler::is_simm16(InvocationCounter::count_mask_value)) { ++ __ ldi(AT, R0, InvocationCounter::count_mask_value); ++ } else { ++ __ li(AT, InvocationCounter::count_mask_value); // mask out the status bits ++ } ++ __ and_reg(FSR, FSR, AT); ++ ++ __ addl(T3, T3, FSR); // add both counters ++ ++ if (ProfileInterpreter && profile_method != NULL) { ++ // Test to see if we should create a method data oop ++ __ li32(AT, (long)InvocationCounter::InterpreterProfileLimit); ++ __ cmplt(AT, T3, AT); ++ __ bne(AT, *profile_method_continue); ++ ++ // if no method data exists, go to profile_method ++ __ test_method_data_pointer(FSR, *profile_method); ++ } ++ ++ __ li32(AT, (long)InvocationCounter::InterpreterInvocationLimit); ++ __ cmplt(AT, T3, AT); ++ __ beq(AT, *overflow); ++ ++ __ BIND(done); ++ } ++} ++ ++void InterpreterGenerator::generate_counter_overflow(Label* do_continue) { ++ ++ // Asm interpreter on entry ++ // S1 - locals ++ // S0 - bcp ++ // Rmethod - method ++ // FP - interpreter frame ++ ++ // On return (i.e. jump to entry_point) ++ // Rmethod - method ++ // RA - return address of interpreter caller ++ // tos - the last parameter to Java method ++ // SP - sender_sp ++ ++ ++ // the bcp is valid if and only if it's not null ++ __ call_VM(NOREG, CAST_FROM_FN_PTR(address, ++ InterpreterRuntime::frequency_counter_overflow), R0); ++ __ ldl(Rmethod, FP, method_offset); ++ // Preserve invariant that S0/S1 contain bcp/locals of sender frame ++ __ b_far(*do_continue); ++} ++ ++// See if we've got enough room on the stack for locals plus overhead. ++// The expression stack grows down incrementally, so the normal guard ++// page mechanism will work for that. ++// ++// NOTE: Since the additional locals are also always pushed (wasn't ++// obvious in generate_method_entry) so the guard should work for them ++// too. ++// ++// Args: ++// T2: number of additional locals this frame needs (what we must check) ++// T0: Method* ++// ++void InterpreterGenerator::generate_stack_overflow_check(void) { ++ // see if we've got enough room on the stack for locals plus overhead. ++ // the expression stack grows down incrementally, so the normal guard ++ // page mechanism will work for that. ++ // ++ // Registers live on entry: ++ // ++ // T0: Method* ++ // T2: number of additional locals this frame needs (what we must check) ++ ++ // NOTE: since the additional locals are also always pushed (wasn't obvious in ++ // generate_method_entry) so the guard should work for them too. ++ // ++ ++ const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; ++ ++ // total overhead size: entry_size + (saved fp thru expr stack bottom). ++ // be sure to change this if you add/subtract anything to/from the overhead area ++ const int overhead_size = -(frame::interpreter_frame_initial_sp_offset*wordSize) ++ + entry_size; ++ ++ const int page_size = os::vm_page_size(); ++ ++ Label after_frame_check; ++ ++ // see if the frame is greater than one page in size. If so, ++ // then we need to verify there is enough stack space remaining ++ // for the additional locals. ++ __ move(AT, (page_size - overhead_size) / Interpreter::stackElementSize); ++ __ cmplt(AT, AT, T2); ++ __ beq(AT, after_frame_check); ++ ++ // compute sp as if this were going to be the last frame on ++ // the stack before the red zone ++ Register thread = S2thread; ++ ++ // locals + overhead, in bytes ++ __ slll(T3, T2, Interpreter::stackElementScale()); ++ __ add_simm16(T3, T3, overhead_size); // locals * 4 + overhead_size --> T3 ++ ++#ifdef ASSERT ++ Label stack_base_okay, stack_size_okay; ++ // verify that thread stack base is non-zero ++ __ ldl(AT, thread, in_bytes(Thread::stack_base_offset())); ++ __ bne(AT, stack_base_okay); ++ __ stop("stack base is zero"); ++ __ BIND(stack_base_okay); ++ // verify that thread stack size is non-zero ++ __ ldl(AT, thread, in_bytes(Thread::stack_size_offset())); ++ __ bne(AT, stack_size_okay); ++ __ stop("stack size is zero"); ++ __ BIND(stack_size_okay); ++#endif ++ ++ // Add stack base to locals and subtract stack size ++ __ ldl(AT, thread, in_bytes(Thread::stack_base_offset())); // stack_base --> AT ++ __ addl(T3, T3, AT); // locals * 4 + overhead_size + stack_base--> T3 ++ __ ldl(AT, thread, in_bytes(Thread::stack_size_offset())); // stack_size --> AT ++ __ subl(T3, T3, AT); // locals * 4 + overhead_size + stack_base - stack_size --> T3 ++ ++ ++ // add in the redzone and yellow size ++ __ move(AT, (StackRedPages+StackYellowPages) * page_size); ++ __ addl(T3, T3, AT); ++ ++ // check against the current stack bottom ++ __ cmplt(AT, T3, SP); ++ __ bne(AT, after_frame_check); ++ ++ // Note: the restored frame is not necessarily interpreted. ++ // Use the shared runtime version of the StackOverflowError. ++ __ move(SP, Rsender); ++ assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated"); ++ __ jmp(StubRoutines::throw_StackOverflowError_entry(), relocInfo::runtime_call_type); ++ ++ // all done with frame size check ++ __ BIND(after_frame_check); ++} ++ ++// Allocate monitor and lock method (asm interpreter) ++// Rmethod - Method* ++void InterpreterGenerator::lock_method(void) { ++ // synchronize method ++ const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; ++ ++#ifdef ASSERT ++ { Label L; ++ __ ldw(T0, Rmethod, in_bytes(Method::access_flags_offset())); ++ __ and_imm8(T0, T0, JVM_ACC_SYNCHRONIZED); ++ __ bne(T0, L); ++ __ stop("method doesn't need synchronization"); ++ __ BIND(L); ++ } ++#endif // ASSERT ++ // get synchronization object ++ { ++ Label done; ++ const int mirror_offset = in_bytes(Klass::java_mirror_offset()); ++ __ ldw(T0, Rmethod, in_bytes(Method::access_flags_offset())); ++ __ and_imm8(T2, T0, JVM_ACC_STATIC); ++ __ ldl(T0, LVP, Interpreter::local_offset_in_bytes(0)); ++ __ beq(T2, done); ++ __ ldl(T0, Rmethod, in_bytes(Method::const_offset())); ++ __ ldl(T0, T0, in_bytes(ConstMethod::constants_offset())); ++ __ ldl(T0, T0, ConstantPool::pool_holder_offset_in_bytes()); ++ __ ldl(T0, T0, mirror_offset); ++ __ BIND(done); ++ } ++ // add space for monitor & lock ++ __ add_simm16(SP, SP, (-1) * entry_size); // add space for a monitor entry ++ __ stl(SP, FP, frame::interpreter_frame_monitor_block_top_offset * wordSize); ++ // set new monitor block top ++ __ stl(T0, SP, BasicObjectLock::obj_offset_in_bytes()); // store object ++ __ move(c_rarg0, SP); // object address ++ __ lock_object(c_rarg0); ++} ++ ++// Generate a fixed interpreter frame. This is identical setup for ++// interpreted methods and for native methods hence the shared code. ++void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { ++ ++ // [ local var m-1 ] <--- sp ++ // ... ++ // [ local var 0 ] ++ // [ argumnet word n-1 ] <--- T0(sender's sp) ++ // ... ++ // [ argument word 0 ] <--- S1 ++ ++ // initialize fixed part of activation frame ++ // sender's sp in Rsender ++ int i = 0; ++ const int frame_size = 8; ++ const int total_size = 2 + frame_size; ++ __ add_simm16(SP, SP, -total_size * wordSize); ++ __ stl(RA, SP, (total_size - 1) * wordSize); // save return address ++ __ stl(FP, SP, (total_size - 2) * wordSize); // save sender's fp ++ __ add_simm16(FP, SP, (total_size - 2) * wordSize); ++ __ stl(Rsender, FP, (-++i) * wordSize); // save sender's sp ++ __ stl(R0, FP,(-++i)*wordSize); //save last_sp as null ++ __ stl(LVP, FP, (-++i) * wordSize); // save locals offset ++ __ ldl(BCP, Rmethod, in_bytes(Method::const_offset())); // get constMethodOop ++ __ add_simm16(BCP, BCP, in_bytes(ConstMethod::codes_offset())); // get codebase ++ __ stl(Rmethod, FP, (-++i) * wordSize); // save Method* ++#ifndef CORE ++ if (ProfileInterpreter) { ++ Label method_data_continue; ++ __ ldl(AT, Rmethod, in_bytes(Method::method_data_offset())); ++ __ beq(AT, method_data_continue); ++ __ add_simm16(AT, AT, in_bytes(MethodData::data_offset())); ++ __ BIND(method_data_continue); ++ __ stl(AT, FP, (-++i) * wordSize); ++ } else { ++ __ stl(R0, FP, (-++i) * wordSize); ++ } ++#endif // !CORE ++ ++ __ ldl(T2, Rmethod, in_bytes(Method::const_offset())); ++ __ ldl(T2, T2, in_bytes(ConstMethod::constants_offset())); ++ __ ldl(T2, T2, ConstantPool::cache_offset_in_bytes()); ++ __ stl(T2, FP, (-++i) * wordSize); // set constant pool cache ++ if (native_call) { ++ __ stl(R0, FP, (-++i) * wordSize); // no bcp ++ } else { ++ __ stl(BCP, FP, (-++i) * wordSize); // set bcp ++ } ++ __ stl(SP, FP, (-++i) * wordSize); // reserve word for pointer to expression stack bottom ++ assert(frame_size == i, "stack frame size error."); ++} ++ ++// End of helpers ++ ++// Various method entries ++//------------------------------------------------------------------------------------------------------------------------ ++// ++// ++ ++// Call an accessor method (assuming it is resolved, otherwise drop ++// into vanilla (slow path) entry ++address InterpreterGenerator::generate_accessor_entry(void) { ++ ++ // Rmethod: Method* ++ // V0: receiver (preserve for slow entry into asm interpreter) ++ // Rsender: senderSP must preserved for slow path, set SP to it on fast path ++ ++ address entry_point = __ pc(); ++ Label xreturn_path; ++ // do fastpath for resolved accessor methods ++ if (UseFastAccessorMethods) { ++ Label slow_path; ++ __ li(T2, SafepointSynchronize::address_of_state()); ++ __ ldw(AT, T2, 0); ++ __ add_simm16(AT, AT, -(SafepointSynchronize::_not_synchronized)); ++ __ bne(AT, slow_path); ++ // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof; ++ // parameter size = 1 ++ // Note: We can only use this code if the getfield has been resolved ++ // and if we don't have a null-pointer exception => check for ++ // these conditions first and use slow path if necessary. ++ // Rmethod: method ++ // V0: receiver ++ ++ // [ receiver ] <-- sp ++ __ ldl(T0, SP, 0); ++ ++ // check if local 0 != NULL and read field ++ __ beq(T0, slow_path); ++ __ ldl(T2, Rmethod, in_bytes(Method::const_offset())); ++ __ ldl(T2, T2, in_bytes(ConstMethod::constants_offset())); ++ // read first instruction word and extract bytecode @ 1 and index @ 2 ++ __ ldl(T3, Rmethod, in_bytes(Method::const_offset())); ++ __ ldw(T3, T3, in_bytes(ConstMethod::codes_offset())); ++ // Shift codes right to get the index on the right. ++ // The bytecode fetched looks like <0xb4><0x2a> ++ __ srll(T3, T3, 2 * BitsPerByte); ++ __ slll(T3, T3, exact_log2(in_words(ConstantPoolCacheEntry::size()))); ++ __ ldl(T2, T2, ConstantPool::cache_offset_in_bytes()); ++ ++ // T0: local 0 ++ // Rmethod: method ++ // V0: receiver - do not destroy since it is needed for slow path! ++ // T1: scratch use which register instead ? ++ // T3: constant pool cache index ++ // T2: constant pool cache ++ // Rsender: send's sp ++ // check if getfield has been resolved and read constant pool cache entry ++ // check the validity of the cache entry by testing whether _indices field ++ // contains Bytecode::_getfield in b1 byte. ++ assert(in_words(ConstantPoolCacheEntry::size()) == 4, "adjust shift below"); ++ ++ __ slll(T11, T3, Address::times_8); ++ __ addl(T11, T2, T11); ++ __ ldw(T1, T11, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset())); ++ ++ __ srll(T1, T1, 2 * BitsPerByte); ++ __ and_imm8(T1, T1, 0xFF); ++ __ add_simm16(T1, T1, (-1) * Bytecodes::_getfield); ++ __ bne(T1, slow_path); ++ __ memb();// Order succeeding loads wrt. load of _indices field from cpool_cache. ++ ++ // Note: constant pool entry is not valid before bytecode is resolved ++ ++ __ ldw(AT, T11, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset())); ++ __ ldw(T3, T11, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); ++ __ move(T11, 1 << ConstantPoolCacheEntry::is_volatile_shift); ++ __ and_reg(T11, T11, T3); ++ ++ { ++ Label notVolatile; ++ __ beq(T11, notVolatile); ++ if(os::is_MP()) __ memb(); ++ __ BIND(notVolatile); ++ } ++ Label notByte, notBool, notShort, notChar, notObj; ++ ++ // Need to differentiate between igetfield, agetfield, bgetfield etc. ++ // because they are different sizes. ++ // Use the type from the constant pool cache ++ __ zapnot(T3, T3, 0xf); ++ __ srll(T3, T3, ConstantPoolCacheEntry::tos_state_shift); ++ // Make sure we don't need to mask T3 for tosBits after the above shift ++ ConstantPoolCacheEntry::verify_tos_state_shift(); ++ // btos = 0 ++ __ addl(T0, T0, AT); ++ __ bne(T3, notByte); ++ ++ __ ldb_signed(V0, T0, 0); ++ __ beq(R0, xreturn_path); ++ ++ //ztos ++ __ BIND(notByte); ++ __ add_simm16(T1, T3, (-1) * ztos); ++ __ bne(T1, notBool); ++ __ ldb_signed(V0, T0, 0); ++ __ beq(R0, xreturn_path); ++ ++ //stos ++ __ BIND(notBool); ++ __ add_simm16(T1, T3, (-1) * stos); ++ __ bne(T1, notShort); ++ __ ldh_signed(V0, T0, 0); ++ __ beq(R0, xreturn_path); ++ ++ //ctos ++ __ BIND(notShort); ++ __ add_simm16(T1, T3, (-1) * ctos); ++ __ bne(T1, notChar); ++ __ ldhu(V0, T0, 0); ++ __ beq(R0, xreturn_path); ++ ++ //atos ++ __ BIND(notChar); ++ __ add_simm16(T1, T3, (-1) * atos); ++ __ bne(T1, notObj); ++ //add for compressedoops ++ __ load_heap_oop(V0, Address(T0, 0)); ++ __ beq(R0, xreturn_path); ++ ++ //itos ++ __ BIND(notObj); ++#ifdef ASSERT ++ Label okay; ++ __ add_simm16(T1, T3, (-1) * itos); ++ __ beq(T1, okay); ++ __ stop("what type is this?"); ++ __ BIND(okay); ++#endif // ASSERT ++ __ ldw(V0, T0, 0); ++ ++ __ BIND(xreturn_path); ++ { ++ Label notVolatile; ++ __ beq(T11, notVolatile); ++ if(os::is_MP()) __ memb(); ++ __ BIND(notVolatile); ++ } ++ ++ // _ireturn/_areturn ++ __ move(SP, Rsender);// set sender's fp to SP ++ __ ret(); ++ ++ // generate a vanilla interpreter entry as the slow path ++ __ BIND(slow_path); ++ (void) generate_normal_entry(false); ++ } else { ++ (void) generate_normal_entry(false); ++ } ++ ++ return entry_point; ++} ++ ++// Method entry for java.lang.ref.Reference.get. ++address InterpreterGenerator::generate_Reference_get_entry(void) { ++#if INCLUDE_ALL_GCS ++ // Code: _aload_0, _getfield, _areturn ++ // parameter size = 1 ++ // ++ // The code that gets generated by this routine is split into 2 parts: ++ // 1. The "intrinsified" code for G1 (or any SATB based GC), ++ // 2. The slow path - which is an expansion of the regular method entry. ++ // ++ // Notes:- ++ // * In the G1 code we do not check whether we need to block for ++ // a safepoint. If G1 is enabled then we must execute the specialized ++ // code for Reference.get (except when the Reference object is null) ++ // so that we can log the value in the referent field with an SATB ++ // update buffer. ++ // If the code for the getfield template is modified so that the ++ // G1 pre-barrier code is executed when the current method is ++ // Reference.get() then going through the normal method entry ++ // will be fine. ++ // * The G1 code can, however, check the receiver object (the instance ++ // of java.lang.Reference) and jump to the slow path if null. If the ++ // Reference object is null then we obviously cannot fetch the referent ++ // and so we don't need to call the G1 pre-barrier. Thus we can use the ++ // regular method entry code to generate the NPE. ++ // ++ // This code is based on generate_accessor_enty. ++ // ++ // Rmethod: Method* ++ ++ // Rsender: senderSP must preserve for slow path, set SP to it on fast path (Rsender) ++ ++ address entry = __ pc(); ++ ++ const int referent_offset = java_lang_ref_Reference::referent_offset; ++ guarantee(referent_offset > 0, "referent offset not initialized"); ++ ++ if (UseG1GC) { ++ Label slow_path; ++ ++ // Check if local 0 != NULL ++ // If the receiver is null then it is OK to jump to the slow path. ++ __ ldl(V0, SP, 0); ++ ++ __ beq(V0, slow_path); ++ ++ // Generate the G1 pre-barrier code to log the value of ++ // the referent field in an SATB buffer. ++ ++ // Load the value of the referent field. ++ const Address field_address(V0, referent_offset); ++ __ load_heap_oop(V0, field_address); ++ ++ __ push(RA); ++ // Generate the G1 pre-barrier code to log the value of ++ // the referent field in an SATB buffer. ++ __ g1_write_barrier_pre(noreg /* obj */, ++ V0 /* pre_val */, ++ S2thread /* thread */, ++ Rmethod /* tmp */, ++ true /* tosca_live */, ++ true /* expand_call */); ++ __ pop(RA); ++ ++ __ addl(SP, Rsender, R0); // set sp to sender sp ++ __ ret(); ++ ++ // generate a vanilla interpreter entry as the slow path ++ __ BIND(slow_path); ++ (void) generate_normal_entry(false); ++ ++ return entry; ++ } ++#endif // INCLUDE_ALL_GCS ++ ++ // If G1 is not enabled then attempt to go through the accessor entry point ++ // Reference.get is an accessor ++ return generate_accessor_entry(); ++} ++ ++/** ++ * Method entry for static native methods: ++ * int java.util.zip.CRC32.update(int crc, int b) ++ */ ++address InterpreterGenerator::generate_CRC32_update_entry() { ++ if (UseCRC32Intrinsics) { ++ address entry = __ pc(); ++ ++ Label slow_path; ++ // If we need a safepoint check, generate full interpreter entry. ++ __ li(GP, SafepointSynchronize::address_of_state()); ++ __ ldw(AT, GP, 0); ++ __ move(GP, (SafepointSynchronize::_not_synchronized)); ++ __ bne(AT, GP, slow_path); ++ ++ // We don't generate local frame and don't align stack because ++ // we call stub code and there is no safepoint on this path. ++ // Load parameters ++ const Register crc = V0; // crc ++ const Register val = A0; // source java byte value ++ const Register tbl = A1; // scratch ++ ++ // Arguments are reversed on java expression stack ++ __ ldw(val, SP, 0); // byte value ++ __ ldw(crc, SP, wordSize); // Initial CRC ++ address stubAddr = StubRoutines::crc_table_addr(); ++ __ li(tbl, stubAddr); ++ __ ornot(crc, R0, crc); // ~crc ++ __ zapnot(crc, crc, 0xF); ++ __ update_byte_crc32(crc, val, tbl); ++ __ ornot(crc, R0, crc); // ~crc ++ __ zapnot(crc, crc, 0xF); ++ // result in V0 ++ // _areturn ++ __ addl(SP, Rsender, R0); // set sp to sender sp ++ __ ret(); ++ ++ // generate a vanilla native entry as the slow path ++ __ bind(slow_path); ++ ++ (void) generate_native_entry(false); ++ ++ return entry; ++ } ++ return generate_native_entry(false); ++} ++ ++/** ++ * Method entry for static native methods: ++ * int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len) ++ * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len) ++ */ ++address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { ++ if (UseCRC32Intrinsics) { ++ address entry = __ pc(); ++ ++ // // rbx,: Method* ++ // // r13: senderSP must preserved for slow path, set SP to it on fast path ++ // // If we need a safepoint check, generate full interpreter entry. ++ // We don't generate local frame and don't align stack because ++ // // we call stub code and there is no safepoint on this path. ++ Label slow_path; ++ __ li(GP, SafepointSynchronize::address_of_state()); ++ __ ldw(AT, GP, 0); ++ __ move(GP, (SafepointSynchronize::_not_synchronized)); ++ __ bne(AT, GP, slow_path); ++ ++ // Load parameters ++ const Register crc = A0; // crc ++ const Register buf = A1; // source java byte array address ++ const Register len = A2; // length ++ const Register off = len; // offset (never overlaps with 'len') ++ const Register tmp = A3; ++ const Register tmp3 = A4; ++ ++ // // Arguments are reversed on java expression stack ++ // // Calculate address of start element ++ if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) { ++ __ ldl(buf, SP, 2 * wordSize); // long buf ++ __ ldw(off, SP, 1 * wordSize); // offset ++ __ zapnot(off, off, 0xF); ++ __ addl(buf, buf, off); // + offset ++ __ ldw(crc, SP, 4 * wordSize); // Initial CRC ++ } else { ++ __ ldw(off, SP, 1 * wordSize); ++ __ zapnot(off, off, 0xF); ++ __ ldl(buf, SP, 2 * wordSize); // byte[] array ++ __ addl(buf, buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size ++ __ addl(buf, buf, off); // offset ++ __ ldw(crc, SP, 3 * wordSize); // Initial CRC ++ } ++ // Can now load 'len' since we're finished with 'off' ++ __ ldw(len, SP, 0 * wordSize); ++ __ zapnot(len, len, 0xF); ++ __ enter(); ++ if (UseCRC32) { ++ __ kernel_crc32(crc, buf, len, tmp, tmp3); ++ } else { ++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::updateBytesCRC32), 3); ++ } ++ // _areturn ++ __ leave(); ++ __ addl(SP, Rsender, R0); // set sp to sender sp ++ __ ret(); ++ // generate a vanilla native entry as the slow path ++ __ bind(slow_path); ++ (void) generate_native_entry(false); ++ ++ return entry; ++ } ++ return generate_native_entry(false); ++} ++ ++// Interpreter stub for calling a native method. (asm interpreter) ++// This sets up a somewhat different looking stack for calling the ++// native method than the typical interpreter frame setup. ++address InterpreterGenerator::generate_native_entry(bool synchronized) { ++ // determine code generation flags ++ bool inc_counter = UseCompiler || CountCompiledCalls; ++ // Rsender: sender's sp ++ // Rmethod: Method* ++ address entry_point = __ pc(); ++ ++#ifndef CORE ++ const Address invocation_counter(Rmethod,in_bytes(MethodCounters::invocation_counter_offset() + ++ InvocationCounter::counter_offset())); ++#endif ++ ++ // get parameter size (always needed) ++ // the size in the java stack ++ __ ldl(V0, Rmethod, in_bytes(Method::const_offset())); ++ __ ldhu(V0, V0, in_bytes(ConstMethod::size_of_parameters_offset())); ++ ++ // native calls don't need the stack size check since they have no expression stack ++ // and the arguments are already on the stack and we only add a handful of words ++ // to the stack ++ ++ // Rmethod: Method* ++ // V0: size of parameters ++ // Layout of frame at this point ++ // ++ // [ argument word n-1 ] <--- sp ++ // ... ++ // [ argument word 0 ] ++ ++ // for natives the size of locals is zero ++ ++ // compute beginning of parameters (S1) ++ __ slll(LVP, V0, Address::times_8); ++ __ add_simm16(LVP, LVP, (-1) * wordSize); ++ __ addl(LVP, LVP, SP); ++ ++ ++ // add 2 zero-initialized slots for native calls ++ __ add_simm16(SP, SP, (-2) * wordSize); ++ __ stl(R0, SP, 1 * wordSize); // slot for native oop temp offset (setup via runtime) ++ __ stl(R0, SP, 0 * wordSize); // slot for static native result handler3 (setup via runtime) ++ ++ // Layout of frame at this point ++ // [ method holder mirror ] <--- sp ++ // [ result type info ] ++ // [ argument word n-1 ] <--- T0 ++ // ... ++ // [ argument word 0 ] <--- LVP ++ ++ ++#ifndef CORE ++ if (inc_counter) __ ldw_signed(T3, invocation_counter); // (pre-)fetch invocation count ++#endif ++ ++ // initialize fixed part of activation frame ++ generate_fixed_frame(true); ++ // after this function, the layout of frame is as following ++ // ++ // [ monitor block top ] <--- sp ( the top monitor entry ) ++ // [ byte code pointer (0) ] (if native, bcp = 0) ++ // [ constant pool cache ] ++ // [ Method* ] ++ // [ locals offset ] ++ // [ sender's sp ] ++ // [ sender's fp ] ++ // [ return address ] <--- fp ++ // [ method holder mirror ] ++ // [ result type info ] ++ // [ argumnet word n-1 ] <--- sender's sp ++ // ... ++ // [ argument word 0 ] <--- S1 ++ ++ ++ // make sure method is native & not abstract ++#ifdef ASSERT ++ __ ldw(T0, Rmethod, in_bytes(Method::access_flags_offset())); ++ { ++ Label L; ++// __ and_uimm8(AT, T0, JVM_ACC_NATIVE); ++ __ ldi(GP, R0, JVM_ACC_NATIVE); ++ __ and_reg(AT, T0, GP); ++ __ bne(AT, L); ++ __ stop("tried to execute native method as non-native"); ++ __ BIND(L); ++ } ++ { ++ Label L; ++// __ and_uimm8(AT, T0, JVM_ACC_ABSTRACT); ++ __ ldi(GP, R0, JVM_ACC_ABSTRACT); ++ __ and_reg(AT, T0, GP); ++ __ beq(AT, L); ++ __ stop("tried to execute abstract method in interpreter"); ++ __ BIND(L); ++ } ++#endif ++ ++ // Since at this point in the method invocation the exception handler ++ // would try to exit the monitor of synchronized methods which hasn't ++ // been entered yet, we set the thread local variable ++ // _do_not_unlock_if_synchronized to true. The remove_activation will ++ // check this flag. ++ Register thread = S2thread; ++ __ move(AT, (int)true); ++ __ stb(AT, thread, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); ++ ++#ifndef CORE ++ // increment invocation count & check for overflow ++ Label invocation_counter_overflow; ++ if (inc_counter) { ++ generate_counter_incr(&invocation_counter_overflow, NULL, NULL); ++ } ++ ++ Label continue_after_compile; ++ __ BIND(continue_after_compile); ++#endif // CORE ++ ++ bang_stack_shadow_pages(true); ++ ++ // reset the _do_not_unlock_if_synchronized flag ++ __ stb(R0, thread, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); ++ ++ // check for synchronized methods ++ // Must happen AFTER invocation_counter check and stack overflow check, ++ // so method is not locked if overflows. ++ if (synchronized) { ++ lock_method(); ++ } else { ++ // no synchronization necessary ++#ifdef ASSERT ++ { ++ Label L; ++ __ ldw(T0, Rmethod, in_bytes(Method::access_flags_offset())); ++ __ and_imm8(AT, T0, JVM_ACC_SYNCHRONIZED); ++ __ beq(AT, L); ++ __ stop("method needs synchronization"); ++ __ BIND(L); ++ } ++#endif ++ } ++ ++ // after method_lock, the layout of frame is as following ++ // ++ // [ monitor entry ] <--- sp ++ // ... ++ // [ monitor entry ] ++ // [ monitor block top ] ( the top monitor entry ) ++ // [ byte code pointer (0) ] (if native, bcp = 0) ++ // [ constant pool cache ] ++ // [ Method* ] ++ // [ locals offset ] ++ // [ sender's sp ] ++ // [ sender's fp ] ++ // [ return address ] <--- fp ++ // [ method holder mirror ] ++ // [ result type info ] ++ // [ argumnet word n-1 ] <--- ( sender's sp ) ++ // ... ++ // [ argument word 0 ] <--- S1 ++ ++ // start execution ++#ifdef ASSERT ++ { ++ Label L; ++ __ ldl(AT, FP, frame::interpreter_frame_monitor_block_top_offset * wordSize); ++ __ beq(AT, SP, L); ++ __ stop("broken stack frame setup in interpreter in asm"); ++ __ BIND(L); ++ } ++#endif ++ ++ // jvmti/jvmpi support ++ __ notify_method_entry(); ++ ++ // work registers ++ const Register method = Rmethod; ++ //const Register thread = T2; ++ const Register t = RT4; ++ ++ __ get_method(method); ++ //__ verify_oop(method); ++ { ++ Label L, Lstatic; ++ __ ldl(t,method,in_bytes(Method::const_offset())); ++ __ ldhu(t, t, in_bytes(ConstMethod::size_of_parameters_offset())); ++ // SW64 ABI: caller does not reserve space for the register auguments. ++ // A0 and A1(if needed) ++ __ ldw(AT, Rmethod, in_bytes(Method::access_flags_offset())); ++ __ and_imm8(AT, AT, JVM_ACC_STATIC); ++ __ beq(AT, Lstatic); ++ __ addl(t, t, 1); ++ __ BIND(Lstatic); ++ __ add_simm16(t, t, -Argument::n_register_parameters + 1); ++ __ ble(t, L); ++ __ slll(t, t, Address::times_8); ++ __ subl(SP, SP, t); ++ __ BIND(L); ++ } ++ __ move(AT, -(StackAlignmentInBytes)); ++ __ and_reg(SP, SP, AT); ++ __ move(AT, SP); ++ // [ ] <--- sp ++ // ... (size of parameters - 8 ) ++ // [ monitor entry ] ++ // ... ++ // [ monitor entry ] ++ // [ monitor block top ] ( the top monitor entry ) ++ // [ byte code pointer (0) ] (if native, bcp = 0) ++ // [ constant pool cache ] ++ // [ Method* ] ++ // [ locals offset ] ++ // [ sender's sp ] ++ // [ sender's fp ] ++ // [ return address ] <--- fp ++ // [ method holder mirror ] ++ // [ result type info ] ++ // [ argumnet word n-1 ] <--- ( sender's sp ) ++ // ... ++ // [ argument word 0 ] <--- LVP ++ ++ // get signature handler ++ { ++ Label L; ++ __ ldl(T12, method, in_bytes(Method::signature_handler_offset())); ++ __ bne(T12, L); ++ __ call_VM(NOREG, CAST_FROM_FN_PTR(address, ++ InterpreterRuntime::prepare_native_call), method); ++ __ get_method(method); ++ __ ldl(T12, method, in_bytes(Method::signature_handler_offset())); ++ __ BIND(L); ++ } ++ ++ // call signature handler ++ // from: begin of parameters ++ assert(InterpreterRuntime::SignatureHandlerGenerator::from() == LVP, "adjust this code"); ++ // to: current sp ++ assert(InterpreterRuntime::SignatureHandlerGenerator::to () == SP, "adjust this code"); ++ // temp: T3 ++ assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == t , "adjust this code"); ++ ++ __ call(T12); ++ __ get_method(method); ++ ++ // ++ // if native function is static, and its second parameter has type length of double word, ++ // and first parameter has type length of word, we have to reserve one word ++ // if native function is not static, and its third parameter has type length of double word, ++ // and second parameter has type length of word, we have to reserve one word for the second ++ // parameter. ++ // ++ ++ ++ // result handler is in V0 ++ // set result handler ++ __ stl(V0, FP, (frame::interpreter_frame_result_handler_offset)*wordSize); ++ __ memb();// Acquire signature handler before trying to fetch the native entry point and klass mirror. ?? ++ ++ // pass mirror handle if static call ++ { ++ Label L; ++ const int mirror_offset = in_bytes(Klass::java_mirror_offset()); ++ __ ldw(t, method, in_bytes(Method::access_flags_offset())); ++ __ and_imm8(AT, t, JVM_ACC_STATIC); ++ __ beq(AT, L); ++ ++ // get mirror ++ __ ldl(t, method, in_bytes(Method:: const_offset())); ++ __ ldl(t, t, in_bytes(ConstMethod::constants_offset())); //?? ++ __ ldl(t, t, ConstantPool::pool_holder_offset_in_bytes()); ++ __ ldl(t, t, mirror_offset); ++ // copy mirror into activation frame ++ //__ sw(t, FP, frame::interpreter_frame_oop_temp_offset * wordSize); ++ // pass handle to mirror ++ __ stl(t, FP, frame::interpreter_frame_oop_temp_offset * wordSize); ++ __ add_simm16(t, FP, frame::interpreter_frame_oop_temp_offset * wordSize); ++ __ move(A1, t); ++ __ BIND(L); ++ } ++ ++ // [ mthd holder mirror ptr ] <--- sp --------------------| (only for static method) ++ // [ ] | ++ // ... size of parameters(or +1) | ++ // [ monitor entry ] | ++ // ... | ++ // [ monitor entry ] | ++ // [ monitor block top ] ( the top monitor entry ) | ++ // [ byte code pointer (0) ] (if native, bcp = 0) | ++ // [ constant pool cache ] | ++ // [ Method* ] | ++ // [ locals offset ] | ++ // [ sender's sp ] | ++ // [ sender's fp ] | ++ // [ return address ] <--- fp | ++ // [ method holder mirror ] <----------------------------| ++ // [ result type info ] ++ // [ argumnet word n-1 ] <--- ( sender's sp ) ++ // ... ++ // [ argument word 0 ] <--- S1 ++ ++ // get native function entry point ++ { Label L; ++ __ ldl(T12, method, in_bytes(Method::native_function_offset())); ++ __ li(T4, SharedRuntime::native_method_throw_unsatisfied_link_error_entry()); ++ __ bne(T4, T12, L); ++ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method); ++ __ get_method(method); ++ // __ verify_oop(method); ++ __ ldl(T12, method, in_bytes(Method::native_function_offset())); ++ __ BIND(L); ++ } ++ ++ // pass JNIEnv ++ // native function in T12 ++ __ add_simm16(t, thread, in_bytes(JavaThread::jni_environment_offset())); ++ __ move(A0, t); ++ // [ jni environment ] <--- sp ++ // [ mthd holder mirror ptr ] ---------------------------->| (only for static method) ++ // [ ] | ++ // ... size of parameters | ++ // [ monitor entry ] | ++ // ... | ++ // [ monitor entry ] | ++ // [ monitor block top ] ( the top monitor entry ) | ++ // [ byte code pointer (0) ] (if native, bcp = 0) | ++ // [ constant pool cache ] | ++ // [ Method* ] | ++ // [ locals offset ] | ++ // [ sender's sp ] | ++ // [ sender's fp ] | ++ // [ return address ] <--- fp | ++ // [ method holder mirror ] <----------------------------| ++ // [ result type info ] ++ // [ argumnet word n-1 ] <--- ( sender's sp ) ++ // ... ++ // [ argument word 0 ] <--- S1 ++ ++ // set_last_Java_frame_before_call ++ __ stl(FP, thread, in_bytes(JavaThread::last_Java_fp_offset())); ++ // Change state to native (we save the return address in the thread, since it might not ++ // be pushed on the stack when we do a a stack traversal). It is enough that the pc() ++ // points into the right code segment. It does not have to be the correct return pc. ++ __ li(t, __ pc()); ++ __ stl(t, thread, in_bytes(JavaThread::last_Java_pc_offset())); ++ __ stl(SP, thread, in_bytes(JavaThread::last_Java_sp_offset())); ++ ++ // change thread state ++#ifdef ASSERT ++ { ++ Label L; ++ __ ldw(t, thread, in_bytes(JavaThread::thread_state_offset())); ++ __ add_simm16(t, t, (-1) * _thread_in_Java); ++ __ beq(t, L); ++ __ stop("Wrong thread state in native stub"); ++ __ BIND(L); ++ } ++#endif ++ ++ __ move(t, _thread_in_native); ++ __ stw(t, thread, in_bytes(JavaThread::thread_state_offset())); ++ ++ // call native method ++ __ call(T12); ++ // result potentially in V0 or F0 ++ ++ ++ // via _last_native_pc and not via _last_jave_sp ++ // NOTE: the order of theses push(es) is known to frame::interpreter_frame_result. ++ // If the order changes or anything else is added to the stack the code in ++ // interpreter_frame_result will have to be changed. ++ // save return value to keep the value from being destroyed by other calls ++ __ push_d(F0); ++ __ push(V0); ++ ++ // change thread state ++ __ get_thread(thread); ++ __ move(t, _thread_in_native_trans); ++ __ stw(t, thread, in_bytes(JavaThread::thread_state_offset())); ++ ++ if( os::is_MP() ) __ memb(); // Force this write out before the read below ++ ++ // check for safepoint operation in progress and/or pending suspend requests ++ { Label Continue; ++ ++ // Don't use call_VM as it will see a possible pending exception and forward it ++ // and never return here preventing us from clearing _last_native_pc down below. ++ // Also can't use call_VM_leaf either as it will check to see if BCP & LVP are ++ // preserved and correspond to the bcp/locals pointers. So we do a runtime call ++ // by hand. ++ // ++ Label L; ++ __ li(AT, SafepointSynchronize::address_of_state()); ++ __ ldw(AT, AT, 0); ++ __ bne(AT, L); ++ __ ldw(AT, thread, in_bytes(JavaThread::suspend_flags_offset())); ++ __ beq(AT, Continue); ++ __ BIND(L); ++ __ memb(); ++ __ move(A0, thread); ++ __ call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), ++ relocInfo::runtime_call_type); ++ ++ //add for compressedoops ++ __ reinit_heapbase(); ++ __ BIND(Continue); ++ } ++ ++ // change thread state ++ __ move(t, _thread_in_Java); ++ __ stw(t, thread, in_bytes(JavaThread::thread_state_offset())); ++ __ reset_last_Java_frame(thread, true); ++ ++ // reset handle block ++ __ ldl(t, thread, in_bytes(JavaThread::active_handles_offset())); ++ __ stw(R0, t, JNIHandleBlock::top_offset_in_bytes()); ++ ++ // If result was an oop then unbox and save it in the frame ++ { ++ Label no_oop; ++ __ ldl(AT, FP, frame::interpreter_frame_result_handler_offset*wordSize); ++ __ li(T0, AbstractInterpreter::result_handler(T_OBJECT)); ++ __ bne(AT, T0, no_oop); ++ __ pop(V0); ++ // Unbox oop result, e.g. JNIHandles::resolve value. ++ __ resolve_jobject(V0, thread, AT); ++ __ stl(V0, FP, (frame::interpreter_frame_oop_temp_offset)*wordSize); ++ // keep stack depth as expected by pushing oop which will eventually be discarded ++ __ push(V0); ++ __ BIND(no_oop); ++ } ++ { ++ Label no_reguard; ++ __ ldw(t, thread, in_bytes(JavaThread::stack_guard_state_offset())); ++ __ move(AT,(int) JavaThread::stack_guard_yellow_disabled); ++ __ bne(t, AT, no_reguard); ++ __ pushad(); ++ __ move(S5_heapbase, SP); ++ __ move(AT, -StackAlignmentInBytes); ++ __ and_reg(SP, SP, AT); ++ __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages), relocInfo::runtime_call_type); ++ __ move(SP, S5_heapbase); ++ __ popad(); ++ //add for compressedoops ++ __ reinit_heapbase(); ++ __ BIND(no_reguard); ++ } ++ // restore BCP to have legal interpreter frame, ++ // i.e., bci == 0 <=> BCP == code_base() ++ // Can't call_VM until bcp is within reasonable. ++ __ get_method(method); // method is junk from thread_in_native to now. ++ //__ verify_oop(method); ++ __ ldl(BCP, method, in_bytes(Method::const_offset())); ++ __ lea(BCP, Address(BCP, in_bytes(ConstMethod::codes_offset()))); ++ // handle exceptions (exception handling will handle unlocking!) ++ { ++ Label L; ++ __ ldw(t, thread, in_bytes(Thread::pending_exception_offset())); ++ __ beq(t, L); ++ // Note: At some point we may want to unify this with the code used in ++ // call_VM_base(); ++ // i.e., we should use the StubRoutines::forward_exception code. For now this ++ // doesn't work here because the sp is not correctly set at this point. ++ __ MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, ++ InterpreterRuntime::throw_pending_exception)); ++ __ should_not_reach_here(); ++ __ BIND(L); ++ } ++ ++ // do unlocking if necessary ++ { ++ Label L; ++ __ ldw(t, method, in_bytes(Method::access_flags_offset())); ++ __ and_imm8(t, t, JVM_ACC_SYNCHRONIZED); ++ __ beq(t, L); ++ // the code below should be shared with interpreter macro assembler implementation ++ { ++ Label unlock; ++ // BasicObjectLock will be first in list, ++ // since this is a synchronized method. However, need ++ // to check that the object has not been unlocked by ++ // an explicit monitorexit bytecode. ++ __ add_simm16(c_rarg0, FP, frame::interpreter_frame_initial_sp_offset ++ * wordSize - (int)sizeof(BasicObjectLock)); ++ // address of first monitor ++ ++ __ ldl(t, c_rarg0, BasicObjectLock::obj_offset_in_bytes()); ++ __ bne(t, unlock); ++ ++ // Entry already unlocked, need to throw exception ++ __ MacroAssembler::call_VM(NOREG, CAST_FROM_FN_PTR(address, ++ InterpreterRuntime::throw_illegal_monitor_state_exception)); ++ __ should_not_reach_here(); ++ ++ __ BIND(unlock); ++ __ unlock_object(c_rarg0); ++ } ++ __ BIND(L); ++ } ++ ++ // jvmti/jvmpi support ++ // Note: This must happen _after_ handling/throwing any exceptions since ++ // the exception handler code notifies the runtime of method exits ++ // too. If this happens before, method entry/exit notifications are ++ // not properly paired (was bug - gri 11/22/99). ++ __ notify_method_exit(false, vtos, InterpreterMacroAssembler::NotifyJVMTI ); ++ ++ // restore potential result in V0, ++ // call result handler to restore potential result in ST0 & handle result ++ __ pop(V0); ++ __ pop_d(); ++ __ ldl(t, FP, (frame::interpreter_frame_result_handler_offset) * wordSize); ++ __ call(t); ++ ++ ++ // remove activation ++ __ ldl(SP, FP, frame::interpreter_frame_sender_sp_offset * wordSize); // get sender sp ++ __ ldl(RA, FP, frame::interpreter_frame_return_addr_offset * wordSize); // get return address ++ __ ldl(FP, FP, frame::interpreter_frame_sender_fp_offset * wordSize); // restore sender's fp ++ __ ret(); ++ ++#ifndef CORE ++ if (inc_counter) { ++ // Handle overflow of counter and compile method ++ __ BIND(invocation_counter_overflow); ++ generate_counter_overflow(&continue_after_compile); ++ // entry_point is the beginning of this ++ // function and checks again for compiled code ++ } ++#endif ++ return entry_point; ++} ++ ++// ++// Generic interpreted method entry to (asm) interpreter ++// ++// Layout of frame just at the entry ++// ++// [ argument word n-1 ] <--- sp ++// ... ++// [ argument word 0 ] ++// assume Method* in Rmethod before call this method. ++// prerequisites to the generated stub : the callee Method* in Rmethod ++// note you must save the caller bcp before call the generated stub ++// ++address InterpreterGenerator::generate_normal_entry(bool synchronized) { ++ // determine code generation flags ++ bool inc_counter = UseCompiler || CountCompiledCalls; ++ ++ // Rmethod: Method* ++ // Rsender: sender 's sp ++ address entry_point = __ pc(); ++ ++ const Address invocation_counter(Rmethod, ++ in_bytes(MethodCounters::invocation_counter_offset() + InvocationCounter::counter_offset())); ++ ++ // get parameter size (always needed) ++ __ ldl(T3, Rmethod, in_bytes(Method::const_offset())); //T3 --> Rmethod._constMethod ++ __ ldhu(V0, T3, in_bytes(ConstMethod::size_of_parameters_offset())); ++ ++ // Rmethod: Method* ++ // V0: size of parameters ++ // Rsender: sender 's sp ,could be different frome sp+ wordSize if we call via c2i ++ // get size of locals in words to T2 ++ __ ldhu(T2, T3, in_bytes(ConstMethod::size_of_locals_offset())); ++ __ subl(T2, T2, V0); ++ ++ // see if we've got enough room on the stack for locals plus overhead. ++ // Layout of frame at this point ++ // ++ // [ argument word n-1 ] <--- sp ++ // ... ++ // [ argument word 0 ] ++ generate_stack_overflow_check(); ++ // after this function, the layout of frame does not change ++ ++ // compute beginning of parameters (LVP) ++ __ slll(LVP, V0, LogBytesPerWord); ++ __ add_simm16(LVP, LVP, (-1) * wordSize); ++ __ addl(LVP, LVP, SP); ++ ++ // T2 - # of additional locals ++ // allocate space for locals ++ // explicitly initialize locals ++ { ++ Label exit, loop; ++ __ beq(T2, exit); ++ ++ __ BIND(loop); ++ __ stl(R0, SP, -1 * wordSize); // initialize local variables ++ __ subl(T2, T2, 1); // until everything initialized ++ ++ __ add_simm16(SP, SP, (-1) * wordSize); ++ __ bne(T2, loop); ++ __ BIND(exit); ++ } ++ ++ // ++ // [ local var m-1 ] <--- sp ++ // ... ++ // [ local var 0 ] ++ // [ argument word n-1 ] <--- T0? ++ // ... ++ // [ argument word 0 ] <--- LVP ++ ++ // initialize fixed part of activation frame ++ ++ generate_fixed_frame(false); ++ ++ ++ // after this function, the layout of frame is as following ++ // ++ // [ monitor block top ] <--- sp ( the top monitor entry ) ++ // [ byte code pointer ] (if native, bcp = 0) ++ // [ constant pool cache ] ++ // [ Method* ] ++ // [ locals offset ] ++ // [ sender's sp ] ++ // [ sender's fp ] <--- fp ++ // [ return address ] ++ // [ local var m-1 ] ++ // ... ++ // [ local var 0 ] ++ // [ argumnet word n-1 ] <--- ( sender's sp ) ++ // ... ++ // [ argument word 0 ] <--- LVP ++ ++ ++ // make sure method is not native & not abstract ++#ifdef ASSERT ++ __ ldl(AT, Rmethod, in_bytes(Method::access_flags_offset())); ++ { ++ Label L; ++ __ ldi(GP, R0, JVM_ACC_NATIVE); ++ __ and_reg(T2, AT, GP); ++ __ beq(T2, L); ++ __ stop("tried to execute native method as non-native"); ++ __ BIND(L); ++ } ++ { ++ Label L; ++ __ ldi(GP, R0, JVM_ACC_ABSTRACT); ++ __ and_reg(T2, AT, GP); ++ __ beq(T2, L); ++ __ stop("tried to execute abstract method in interpreter"); ++ __ BIND(L); ++ } ++#endif ++ ++ // Since at this point in the method invocation the exception handler ++ // would try to exit the monitor of synchronized methods which hasn't ++ // been entered yet, we set the thread local variable ++ // _do_not_unlock_if_synchronized to true. The remove_activation will ++ // check this flag. ++ ++ Register thread = S2thread; ++ __ move(AT, (int)true); ++ __ stb(AT, thread, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); ++ ++#ifndef CORE ++ ++ // mdp : T11 ++ // tmp1: T12 ++ // tmp2: T2 ++ __ profile_parameters_type(T11, T12, T2); ++ ++ // increment invocation count & check for overflow ++ Label invocation_counter_overflow; ++ Label profile_method; ++ Label profile_method_continue; ++ if (inc_counter) { ++ generate_counter_incr(&invocation_counter_overflow, ++ &profile_method, ++ &profile_method_continue); ++ if (ProfileInterpreter) { ++ __ BIND(profile_method_continue); ++ } ++ } ++ ++ Label continue_after_compile; ++ __ BIND(continue_after_compile); ++ ++#endif // CORE ++ ++ bang_stack_shadow_pages(false); ++ ++ // reset the _do_not_unlock_if_synchronized flag ++ __ stb(R0, thread, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset())); ++ ++ // check for synchronized methods ++ // Must happen AFTER invocation_counter check and stack overflow check, ++ // so method is not locked if overflows. ++ // ++ if (synchronized) { ++ // Allocate monitor and lock method ++ lock_method(); ++ } else { ++ // no synchronization necessary ++#ifdef ASSERT ++ { Label L; ++ __ ldw(AT, Rmethod, in_bytes(Method::access_flags_offset())); ++ __ and_imm8(T2, AT, JVM_ACC_SYNCHRONIZED); ++ __ beq(T2, L); ++ __ stop("method needs synchronization"); ++ __ BIND(L); ++ } ++#endif ++ } ++ ++ // layout of frame after lock_method ++ // [ monitor entry ] <--- sp ++ // ... ++ // [ monitor entry ] ++ // [ monitor block top ] ( the top monitor entry ) ++ // [ byte code pointer ] (if native, bcp = 0) ++ // [ constant pool cache ] ++ // [ Method* ] ++ // [ locals offset ] ++ // [ sender's sp ] ++ // [ sender's fp ] ++ // [ return address ] <--- fp ++ // [ local var m-1 ] ++ // ... ++ // [ local var 0 ] ++ // [ argumnet word n-1 ] <--- ( sender's sp ) ++ // ... ++ // [ argument word 0 ] <--- LVP ++ ++ ++ // start execution ++#ifdef ASSERT ++ { ++ Label L; ++ __ ldl(AT, FP, frame::interpreter_frame_monitor_block_top_offset * wordSize); ++ __ beq(AT, SP, L); ++ __ stop("broken stack frame setup in interpreter in native"); ++ __ BIND(L); ++ } ++#endif ++ ++ // jvmti/jvmpi support ++ __ notify_method_entry(); ++ ++ __ dispatch_next(vtos); ++ ++ // invocation counter overflow ++ if (inc_counter) { ++ if (ProfileInterpreter) { ++ // We have decided to profile this method in the interpreter ++ __ BIND(profile_method); ++ __ call_VM(noreg, CAST_FROM_FN_PTR(address, ++ InterpreterRuntime::profile_method)); ++ __ set_method_data_pointer_for_bcp(); ++ __ get_method(Rmethod); ++ __ beq(R0, profile_method_continue); ++ } ++ // Handle overflow of counter and compile method ++ __ BIND(invocation_counter_overflow); ++ generate_counter_overflow(&continue_after_compile); ++ } ++ ++ return entry_point; ++} ++ ++// Entry points ++// ++// Here we generate the various kind of entries into the interpreter. ++// The two main entry type are generic bytecode methods and native ++// call method. These both come in synchronized and non-synchronized ++// versions but the frame layout they create is very similar. The ++// other method entry types are really just special purpose entries ++// that are really entry and interpretation all in one. These are for ++// trivial methods like accessor, empty, or special math methods. ++// ++// When control flow reaches any of the entry types for the interpreter ++// the following holds -> ++// ++// Arguments: ++// ++// Rmethod: Method* ++// V0: receiver ++// ++// ++// Stack layout immediately at entry ++// ++// [ parameter n-1 ] <--- sp ++// ... ++// [ parameter 0 ] ++// [ expression stack ] (caller's java expression stack) ++ ++// Assuming that we don't go to one of the trivial specialized entries ++// the stack will look like below when we are ready to execute the ++// first bytecode (or call the native routine). The register usage ++// will be as the template based interpreter expects (see ++// interpreter_SW64.hpp). ++// ++// local variables follow incoming parameters immediately; i.e. ++// the return address is moved to the end of the locals). ++// ++// [ monitor entry ] <--- sp ++// ... ++// [ monitor entry ] ++// [ monitor block top ] ( the top monitor entry ) ++// [ byte code pointer ] (if native, bcp = 0) ++// [ constant pool cache ] ++// [ Method* ] ++// [ locals offset ] ++// [ sender's sp ] ++// [ sender's fp ] ++// [ return address ] <--- fp ++// [ local var m-1 ] ++// ... ++// [ local var 0 ] ++// [ argumnet word n-1 ] <--- ( sender's sp ) ++// ... ++// [ argument word 0 ] <--- S1 ++ ++address AbstractInterpreterGenerator::generate_method_entry( ++ AbstractInterpreter::MethodKind kind) { ++ // determine code generation flags ++ bool synchronized = false; ++ address entry_point = NULL; ++ switch (kind) { ++ case Interpreter::zerolocals : ++ break; ++ case Interpreter::zerolocals_synchronized: ++ synchronized = true; ++ break; ++ case Interpreter::native : ++ entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false); ++ break; ++ case Interpreter::native_synchronized : ++ entry_point = ((InterpreterGenerator*)this)->generate_native_entry(true); ++ break; ++ case Interpreter::empty : ++ entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); ++ break; ++ case Interpreter::accessor : ++ entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); ++ break; ++ case Interpreter::abstract : ++ entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); ++ break; ++ ++ case Interpreter::java_lang_math_sin : // fall thru ++ case Interpreter::java_lang_math_cos : // fall thru ++ case Interpreter::java_lang_math_tan : // fall thru ++ case Interpreter::java_lang_math_log : // fall thru ++ case Interpreter::java_lang_math_log10 : // fall thru ++ case Interpreter::java_lang_math_pow : // fall thru ++ case Interpreter::java_lang_math_exp : break; ++ case Interpreter::java_lang_math_abs : // fall thru ++ case Interpreter::java_lang_math_sqrt : ++ entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break; ++ case Interpreter::java_lang_ref_reference_get: ++ entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break; ++ case Interpreter::java_util_zip_CRC32_update ++ : entry_point = ((InterpreterGenerator*)this)->generate_CRC32_update_entry(); break; ++ case Interpreter::java_util_zip_CRC32_updateBytes ++ : // fall thru ++ case Interpreter::java_util_zip_CRC32_updateByteBuffer ++ : entry_point = ((InterpreterGenerator*)this)->generate_CRC32_updateBytes_entry(kind); break; ++ default: ++ fatal(err_msg("unexpected method kind: %d", kind)); ++ break; ++ } ++ if (entry_point) return entry_point; ++ ++ return ((InterpreterGenerator*)this)->generate_normal_entry(synchronized); ++} ++ ++// These should never be compiled since the interpreter will prefer ++// the compiled version to the intrinsic version. ++bool AbstractInterpreter::can_be_compiled(methodHandle m) { ++ switch (method_kind(m)) { ++ case Interpreter::java_lang_math_sin : // fall thru ++ case Interpreter::java_lang_math_cos : // fall thru ++ case Interpreter::java_lang_math_tan : // fall thru ++ case Interpreter::java_lang_math_abs : // fall thru ++ case Interpreter::java_lang_math_log : // fall thru ++ case Interpreter::java_lang_math_log10 : // fall thru ++ case Interpreter::java_lang_math_sqrt : // fall thru ++ case Interpreter::java_lang_math_pow : // fall thru ++ case Interpreter::java_lang_math_exp : ++ return false; ++ default: ++ return true; ++ } ++} ++ ++// How much stack a method activation needs in words. ++int AbstractInterpreter::size_top_interpreter_activation(Method* method) { ++ ++ const int entry_size = frame::interpreter_frame_monitor_size(); ++ ++ // total overhead size: entry_size + (saved fp thru expr stack bottom). ++ // be sure to change this if you add/subtract anything to/from the overhead area ++ const int overhead_size = -(frame::interpreter_frame_initial_sp_offset) + entry_size; ++ ++ const int stub_code = 6; // see generate_call_stub ++ // return overhead_size + method->max_locals() + method->max_stack() + stub_code; ++ const int method_stack = (method->max_locals() + method->max_stack()) * ++ Interpreter::stackElementWords; ++ return overhead_size + method_stack + stub_code; ++} ++ ++void AbstractInterpreter::layout_activation(Method* method, ++ int tempcount, ++ int popframe_extra_args, ++ int moncount, ++ int caller_actual_parameters, ++ int callee_param_count, ++ int callee_locals, ++ frame* caller, ++ frame* interpreter_frame, ++ bool is_top_frame, ++ bool is_bottom_frame) { ++ // Note: This calculation must exactly parallel the frame setup ++ // in AbstractInterpreterGenerator::generate_method_entry. ++ // If interpreter_frame!=NULL, set up the method, locals, and monitors. ++ // The frame interpreter_frame, if not NULL, is guaranteed to be the ++ // right size, as determined by a previous call to this method. ++ // It is also guaranteed to be walkable even though it is in a skeletal state ++ ++ // fixed size of an interpreter frame: ++ ++ int max_locals = method->max_locals() * Interpreter::stackElementWords; ++ int extra_locals = (method->max_locals() - method->size_of_parameters()) * Interpreter::stackElementWords; ++ ++#ifdef ASSERT ++ if (!EnableInvokeDynamic) { ++ // Probably, since deoptimization doesn't work yet. ++ assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable"); ++ } ++ assert(caller->sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable(2)"); ++#endif ++ ++ interpreter_frame->interpreter_frame_set_method(method); ++ // NOTE the difference in using sender_sp and interpreter_frame_sender_sp ++ // interpreter_frame_sender_sp is the original sp of the caller (the unextended_sp) ++ // and sender_sp is fp+8 ++ intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1; ++ ++#ifdef ASSERT ++ if (caller->is_interpreted_frame()) { ++ assert(locals < caller->fp() + frame::interpreter_frame_initial_sp_offset, "bad placement"); ++ } ++#endif ++ ++ interpreter_frame->interpreter_frame_set_locals(locals); ++ BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin(); ++ BasicObjectLock* monbot = montop - moncount; ++ interpreter_frame->interpreter_frame_set_monitor_end(montop - moncount); ++ ++ //set last sp; ++ intptr_t* sp = (intptr_t*) monbot - tempcount*Interpreter::stackElementWords - ++ popframe_extra_args; ++ interpreter_frame->interpreter_frame_set_last_sp(sp); ++ // All frames but the initial interpreter frame we fill in have a ++ // value for sender_sp that allows walking the stack but isn't ++ // truly correct. Correct the value here. ++ // ++ // int extra_locals = method->max_locals() - method->size_of_parameters(); ++ if (extra_locals != 0 && ++ interpreter_frame->sender_sp() == interpreter_frame->interpreter_frame_sender_sp() ) { ++ interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() + extra_locals); ++ } ++ *interpreter_frame->interpreter_frame_cache_addr() = method->constants()->cache(); ++} ++ ++//----------------------------------------------------------------------------- ++// Exceptions ++ ++void TemplateInterpreterGenerator::generate_throw_exception() { ++ // Entry point in previous activation (i.e., if the caller was ++ // interpreted) ++ Interpreter::_rethrow_exception_entry = __ pc(); ++ ++ // Restore sp to interpreter_frame_last_sp even though we are going ++ // to empty the expression stack for the exception processing. ++ __ stl(R0,FP, frame::interpreter_frame_last_sp_offset * wordSize); ++ ++ // V0: exception ++ // T4: return address/pc that threw exception ++ __ restore_bcp(); // BCP points to call/send ++ __ restore_locals(); ++ ++ //add for compressedoops ++ __ reinit_heapbase(); ++ // Entry point for exceptions thrown within interpreter code ++ Interpreter::_throw_exception_entry = __ pc(); ++ // expression stack is undefined here ++ // V0: exception ++ // BCP: exception bcp ++ __ verify_oop(V0); ++ ++ // expression stack must be empty before entering the VM in case of an exception ++ __ empty_expression_stack(); ++ // find exception handler address and preserve exception oop ++ __ move(A1, V0); ++ __ call_VM(T4, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), A1); ++ // V0: exception handler entry point ++ // T4: preserved exception oop ++ // S0: bcp for exception handler ++ __ add_simm16(SP, SP, (-1) * wordSize); ++ __ stl(T4, SP, 0); // push exception which is now the only value on the stack ++ __ jmp(V0); // jump to exception handler (may be _remove_activation_entry!) ++ ++ // If the exception is not handled in the current frame the frame is removed and ++ // the exception is rethrown (i.e. exception continuation is _rethrow_exception). ++ // ++ // Note: At this point the bci is still the bxi for the instruction which caused ++ // the exception and the expression stack is empty. Thus, for any VM calls ++ // at this point, GC will find a legal oop map (with empty expression stack). ++ ++ // In current activation ++ // V0: exception ++ // BCP: exception bcp ++ ++ // ++ // JVMTI PopFrame support ++ // ++ ++ Interpreter::_remove_activation_preserving_args_entry = __ pc(); ++ __ empty_expression_stack(); ++ // Set the popframe_processing bit in pending_popframe_condition indicating that we are ++ // currently handling popframe, so that call_VMs that may happen later do not trigger new ++ // popframe handling cycles. ++ Register thread = S2thread; ++ __ ldw(T3, thread, in_bytes(JavaThread::popframe_condition_offset())); ++ __ or_ins(T3, T3, JavaThread::popframe_processing_bit); ++ __ stw(T3, thread, in_bytes(JavaThread::popframe_condition_offset())); ++ ++#ifndef CORE ++ { ++ // Check to see whether we are returning to a deoptimized frame. ++ // (The PopFrame call ensures that the caller of the popped frame is ++ // either interpreted or compiled and deoptimizes it if compiled.) ++ // In this case, we can't call dispatch_next() after the frame is ++ // popped, but instead must save the incoming arguments and restore ++ // them after deoptimization has occurred. ++ // ++ // Note that we don't compare the return PC against the ++ // deoptimization blob's unpack entry because of the presence of ++ // adapter frames in C2. ++ Label caller_not_deoptimized; ++ __ ldl(A0, FP, frame::return_addr_offset * wordSize); ++ __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), A0); ++ __ bne(V0, caller_not_deoptimized); ++ ++ // Compute size of arguments for saving when returning to deoptimized caller ++ __ get_method(A1); ++ // __ verify_oop(A1); ++ __ ldl(A1,A1,in_bytes(Method::const_offset())); ++ __ ldhu(A1, A1, in_bytes(ConstMethod::size_of_parameters_offset())); ++ __ shl(A1, Interpreter::logStackElementSize); ++ __ restore_locals(); ++ __ subl(A2, LVP, A1); ++ __ add_simm16(A2, A2, wordSize); ++ // Save these arguments ++ __ move(A0, S2thread); ++ __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), A0, A1, A2); ++ ++ __ remove_activation(vtos, T12, false, false, false); ++ ++ // Inform deoptimization that it is responsible for restoring these arguments ++ __ move(AT, JavaThread::popframe_force_deopt_reexecution_bit); ++ __ stw(AT, thread, in_bytes(JavaThread::popframe_condition_offset())); ++ // Continue in deoptimization handler ++ __ jmp(T12); ++ ++ __ BIND(caller_not_deoptimized); ++ } ++#endif /* !CORE */ ++ ++ __ remove_activation(vtos, T3, ++ /* throw_monitor_exception */ false, ++ /* install_monitor_exception */ false, ++ /* notify_jvmdi */ false); ++ ++ // Clear the popframe condition flag ++ // Finish with popframe handling ++ // A previous I2C followed by a deoptimization might have moved the ++ // outgoing arguments further up the stack. PopFrame expects the ++ // mutations to those outgoing arguments to be preserved and other ++ // constraints basically require this frame to look exactly as ++ // though it had previously invoked an interpreted activation with ++ // no space between the top of the expression stack (current ++ // last_sp) and the top of stack. Rather than force deopt to ++ // maintain this kind of invariant all the time we call a small ++ // fixup routine to move the mutated arguments onto the top of our ++ // expression stack if necessary. ++ __ move(T11, SP); ++ __ ldl(A2, FP, frame::interpreter_frame_last_sp_offset * wordSize); ++ // PC must point into interpreter here ++ __ set_last_Java_frame(thread, noreg, FP, __ pc()); ++ __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, T11, A2); ++ __ reset_last_Java_frame(thread, true); ++ // Restore the last_sp and null it out ++ __ ldl(SP, FP, frame::interpreter_frame_last_sp_offset * wordSize); ++ __ stl(R0, FP, frame::interpreter_frame_last_sp_offset * wordSize); ++ ++ ++ ++ __ move(AT, JavaThread::popframe_inactive); ++ __ stw(AT, thread, in_bytes(JavaThread::popframe_condition_offset())); ++ ++ // Finish with popframe handling ++ __ restore_bcp(); ++ __ restore_locals(); ++#ifndef CORE ++ // The method data pointer was incremented already during ++ // call profiling. We have to restore the mdp for the current bcp. ++ if (ProfileInterpreter) { ++ __ set_method_data_pointer_for_bcp(); ++ } ++#endif // !CORE ++ // Clear the popframe condition flag ++ __ move(AT, JavaThread::popframe_inactive); ++ __ stw(AT, thread, in_bytes(JavaThread::popframe_condition_offset())); ++ __ dispatch_next(vtos); ++ // end of PopFrame support ++ ++ Interpreter::_remove_activation_entry = __ pc(); ++ ++ // preserve exception over this code sequence ++ __ ldl(T0, SP, 0); ++ __ add_simm16(SP, SP, wordSize); ++ __ stl(T0, thread, in_bytes(JavaThread::vm_result_offset())); ++ // remove the activation (without doing throws on illegalMonitorExceptions) ++ __ remove_activation(vtos, T3, false, true, false); ++ // restore exception ++ __ get_vm_result(T0, thread); ++ //__ verify_oop(T0); ++ ++ // Inbetween activations - previous activation type unknown yet ++ // compute continuation point - the continuation point expects ++ // the following registers set up: ++ // ++ // T0: exception ++ // T1: return address/pc that threw exception ++ // SP: expression stack of caller ++ // FP: fp of caller ++ __ add_simm16(SP, SP, (-2) * wordSize); ++ __ stl(T0, SP, wordSize); // save exception ++ __ stl(T3, SP, 0); // save return address ++ __ move(A1, T3); ++ __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, A1); ++ __ move(T12, V0); // save exception handler ++ __ ldl(V0, SP, wordSize); // restore exception ++ __ ldl(T4, SP, 0); // restore return address ++ __ add_simm16(SP, SP, 2 * wordSize); ++ ++ // Note that an "issuing PC" is actually the next PC after the call ++ __ jmp(T12); // jump to exception handler of caller ++} ++ ++ ++// ++// JVMTI ForceEarlyReturn support ++// ++address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { ++ address entry = __ pc(); ++ __ restore_bcp(); ++ __ restore_locals(); ++ __ empty_expression_stack(); ++ __ empty_FPU_stack(); ++ __ load_earlyret_value(state); ++ ++ __ ld_ptr(T12, S2thread, in_bytes(JavaThread::jvmti_thread_state_offset())); ++ ++ const Address cond_addr(T12, in_bytes(JvmtiThreadState::earlyret_state_offset())); ++ // Clear the earlyret state ++ __ move(AT,JvmtiThreadState::earlyret_inactive); ++ __ stw(AT,cond_addr); ++ __ memb(); ++ ++ __ remove_activation(state, T0, ++ false, /* throw_monitor_exception */ ++ false, /* install_monitor_exception */ ++ true); /* notify_jvmdi */ ++ __ memb(); ++ __ jmp(T0); ++ return entry; ++} // end of ForceEarlyReturn support ++ ++ ++//----------------------------------------------------------------------------- ++// Helper for vtos entry point generation ++ ++void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, ++ address& bep, ++ address& cep, ++ address& sep, ++ address& aep, ++ address& iep, ++ address& lep, ++ address& fep, ++ address& dep, ++ address& vep) { ++ assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); ++ Label L; ++ fep = __ pc(); __ push(ftos); __ beq(R0, L); ++ dep = __ pc(); __ push(dtos); __ beq(R0, L); ++ lep = __ pc(); __ push(ltos); __ beq(R0, L); ++ aep =__ pc(); __ push(atos); __ beq(R0, L); ++ bep = cep = sep = iep = __ pc(); __ push(itos); ++ vep = __ pc(); ++ __ BIND(L); // fall through ++ generate_and_dispatch(t); ++} ++ ++ ++//----------------------------------------------------------------------------- ++// Generation of individual instructions ++ ++// helpers for generate_and_dispatch ++ ++ ++InterpreterGenerator::InterpreterGenerator(StubQueue* code) ++ : TemplateInterpreterGenerator(code) { ++ generate_all(); // down here so it can be "virtual" ++} ++ ++//----------------------------------------------------------------------------- ++ ++// Non-product code ++#ifndef PRODUCT ++address TemplateInterpreterGenerator::generate_trace_code(TosState state) { ++ address entry = __ pc(); ++ ++ // prepare expression stack ++ __ push(state); // save tosca ++ ++ // tos & tos2 ++ // trace_bytecode need actually 4 args, the last two is tos&tos2 ++ // to the stack position it think is the tos&tos2 ++ // when the expression stack have no more than 2 data, error occur. ++ __ ldl(A2, SP, 0); ++ __ ldl(A3, SP, 1 * wordSize); ++ ++ // pass arguments & call tracer ++ __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), RA, A2, A3); ++ __ move(RA, V0); // make sure return address is not destroyed by pop(state) ++ ++ // restore expression stack ++ __ pop(state); // restore tosca ++ ++ // return ++ __ ret(); ++ ++ return entry; ++} ++ ++void TemplateInterpreterGenerator::count_bytecode() { ++ __ li(T11, (long)&BytecodeCounter::_counter_value); ++ __ ldw(AT, T11, 0); ++ __ addl(AT, AT, 1); ++ __ stw(AT, T11, 0); ++} ++ ++void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { ++ __ li(T11, (long)&BytecodeHistogram::_counters[t->bytecode()]); ++ __ ldw(AT, T11, 0); ++ __ addl(AT, AT, 1); ++ __ stw(AT, T11, 0); ++} ++ ++void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { ++ __ li(T11, (long)&BytecodePairHistogram::_index); ++ __ ldw(T12, T11, 0); ++ __ srll(T12, T12, BytecodePairHistogram::log2_number_of_codes); ++ __ li(T11, ((long)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes); ++ __ bis(T12, T12, T11); ++ __ li(T11, (long)&BytecodePairHistogram::_index); ++ __ stw(T12, T11, 0); ++ __ slll(T12, T12, 2); ++ __ li(T11, (long)BytecodePairHistogram::_counters); ++ __ addl(T11, T11, T12); ++ __ ldw(AT, T11, 0); ++ __ addl(AT, AT, 1); ++ __ stw(AT, T11, 0); ++} ++ ++ ++void TemplateInterpreterGenerator::trace_bytecode(Template* t) { ++ // Call a little run-time stub to avoid blow-up for each bytecode. ++ // The run-time runtime saves the right registers, depending on ++ // the tosca in-state for the given template. ++ ++ address entry = Interpreter::trace_code(t->tos_in()); ++ assert(entry != NULL, "entry must have been generated"); ++ __ call(entry, relocInfo::none); ++ //add for compressedoops ++ __ reinit_heapbase(); ++} ++ ++ ++void TemplateInterpreterGenerator::stop_interpreter_at() { ++ Label L; ++ __ li(T11, long(&BytecodeCounter::_counter_value)); ++ __ ldw(T11, T11, 0); ++ __ move(AT, StopInterpreterAt); ++ __ bne(T11, AT, L); ++ __ move(GP, 1); ++ __ bne(GP, -1); ++ __ call(CAST_FROM_FN_PTR(address, os::breakpoint), relocInfo::runtime_call_type); ++ __ BIND(L); ++} ++#endif // !PRODUCT ++#endif // ! CC_INTERP +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/templateInterpreter_sw64.hpp afu8u/hotspot/src/cpu/sw64/vm/templateInterpreter_sw64.hpp +--- openjdk/hotspot/src/cpu/sw64/vm/templateInterpreter_sw64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/templateInterpreter_sw64.hpp 2025-05-06 10:53:44.911633666 +0800 +@@ -0,0 +1,40 @@ ++/* ++ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef CPU_SW64_VM_TEMPLATEINTERPRETER_SW64_HPP ++#define CPU_SW64_VM_TEMPLATEINTERPRETER_SW64_HPP ++ ++ ++ protected: ++ ++ // Size of interpreter code. Increase if too small. Interpreter will ++ // fail with a guarantee ("not enough space for interpreter generation"); ++ // if too small. ++ // Run with +PrintInterpreter to get the VM to print out the size. ++ // Max size with JVMTI ++ // The sethi() instruction generates lots more instructions when shell ++ // stack limit is unlimited, so that's why this is much bigger. ++ const static int InterpreterCodeSize = 500 * K; ++ ++#endif // CPU_SW64_VM_TEMPLATEINTERPRETER_SW64_HPP +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/templateTable_sw64.cpp afu8u/hotspot/src/cpu/sw64/vm/templateTable_sw64.cpp +--- openjdk/hotspot/src/cpu/sw64/vm/templateTable_sw64.cpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/templateTable_sw64.cpp 2025-05-06 10:53:44.915633666 +0800 +@@ -0,0 +1,4258 @@ ++/* ++ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++#include "asm/macroAssembler.hpp" ++#include "interpreter/interpreter.hpp" ++#include "interpreter/interpreterRuntime.hpp" ++#include "interpreter/templateTable.hpp" ++#include "memory/universe.inline.hpp" ++#include "oops/methodData.hpp" ++#include "oops/objArrayKlass.hpp" ++#include "oops/oop.inline.hpp" ++#include "prims/methodHandles.hpp" ++#include "runtime/sharedRuntime.hpp" ++#include "runtime/stubRoutines.hpp" ++#include "runtime/synchronizer.hpp" ++#include "utilities/macros.hpp" ++ ++ ++#ifndef CC_INTERP ++ ++#define __ _masm-> ++ ++#ifdef PRODUCT ++ #define BLOCK_COMMENT(str) /* nothing */ ++#else ++ #define BLOCK_COMMENT(str) { char line[1024];sprintf(line,"%s:%s:%d",str, __FILE__, __LINE__); __ block_comment(line);} ++#endif ++#define BIND(label) bind(label); BLOCK_COMMENT(#label ":") ++ ++ ++// Platform-dependent initialization ++ ++void TemplateTable::pd_initialize() { ++ // No sw64 specific initialization ++} ++ ++// Address computation: local variables ++ ++static inline Address iaddress(int n) { ++ return Address(LVP, Interpreter::local_offset_in_bytes(n)); ++} ++ ++static inline Address laddress(int n) { ++ return iaddress(n + 1); ++} ++ ++static inline Address faddress(int n) { ++ return iaddress(n); ++} ++ ++static inline Address daddress(int n) { ++ return laddress(n); ++} ++ ++static inline Address aaddress(int n) { ++ return iaddress(n); ++} ++ ++// never be used, why is here? jx ++static inline Address haddress(int n) { ++ return iaddress(n + 0); ++} ++ ++ ++static inline Address at_sp() { return Address(SP, 0); } ++static inline Address at_sp_p1() { return Address(SP, 1 * wordSize); } ++static inline Address at_sp_p2() { return Address(SP, 2 * wordSize); } ++ ++// At top of Java expression stack which may be different than sp(). It ++// isn't for category 1 objects. ++static inline Address at_tos () { ++ Address tos = Address(SP, Interpreter::expr_offset_in_bytes(0)); ++ return tos; ++} ++ ++static inline Address at_tos_p1() { ++ return Address(SP, Interpreter::expr_offset_in_bytes(1)); ++} ++ ++static inline Address at_tos_p2() { ++ return Address(SP, Interpreter::expr_offset_in_bytes(2)); ++} ++ ++static inline Address at_tos_p3() { ++ return Address(SP, Interpreter::expr_offset_in_bytes(3)); ++} ++ ++// we use S0 as bcp, be sure you have bcp in S0 before you call any of the Template generator ++Address TemplateTable::at_bcp(int offset) { ++ assert(_desc->uses_bcp(), "inconsistent uses_bcp information"); ++ return Address(BCP, offset); ++} ++ ++// Miscelaneous helper routines ++// Store an oop (or NULL) at the address described by obj. ++// If val == noreg this means store a NULL ++ ++static void do_oop_store(InterpreterMacroAssembler* _masm, ++ Address obj, ++ Register val, ++ BarrierSet::Name barrier, ++ bool precise) { ++ assert(val == noreg || val == V0, "parameter is just for looks"); ++ switch (barrier) { ++#if INCLUDE_ALL_GCS ++ case BarrierSet::G1SATBCT: ++ case BarrierSet::G1SATBCTLogging: ++ { ++ // flatten object address if needed ++ if (obj.index() == noreg && obj.disp() == 0) { ++ if (obj.base() != T3) { ++ __ move(T3, obj.base()); ++ } ++ } else { ++ __ lea(T3, obj); ++ } ++ __ g1_write_barrier_pre(T3 /* obj */, ++ T1 /* pre_val */, ++ S2thread /* thread */, ++ T12 /* tmp */, ++ val != noreg /* tosca_live */, ++ false /* expand_call */); ++ if (val == noreg) { ++ __ store_heap_oop_null(Address(T3, 0)); ++ } else { ++ // G1 barrier needs uncompressed oop for region cross check. ++ Register new_val = val; ++ if (UseCompressedOops) { ++ new_val = T1; ++ __ move(new_val, val); ++ } ++ __ store_heap_oop(Address(T3, 0), val); ++ __ g1_write_barrier_post(T3 /* store_adr */, ++ new_val /* new_val */, ++ S2thread /* thread */, ++ T12 /* tmp */, ++ T1 /* tmp2 */); ++ } ++ } ++ break; ++#endif // INCLUDE_ALL_GCS ++ case BarrierSet::CardTableModRef: ++ case BarrierSet::CardTableExtension: ++ { ++ if (val == noreg) { ++ __ store_heap_oop_null(obj); ++ } else { ++ __ store_heap_oop(obj, val); ++ // flatten object address if needed ++ if (!precise || (obj.index() == noreg && obj.disp() == 0)) { ++ __ store_check(obj.base()); ++ } else { ++ __ lea(T12, obj); ++ __ store_check(T12); ++ } ++ } ++ } ++ break; ++ case BarrierSet::ModRef: ++ case BarrierSet::Other: ++ if (val == noreg) { ++ __ store_heap_oop_null(obj); ++ } else { ++ __ store_heap_oop(obj, val); ++ } ++ break; ++ default : ++ ShouldNotReachHere(); ++ ++ } ++} ++ ++// bytecode folding ++void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg, ++ Register tmp_reg, bool load_bc_into_bc_reg/*=true*/, ++ int byte_no) { ++ if (!RewriteBytecodes) return; ++ Label L_patch_done; ++ ++ switch (bc) { ++ case Bytecodes::_fast_aputfield: ++ case Bytecodes::_fast_bputfield: ++ case Bytecodes::_fast_zputfield: ++ case Bytecodes::_fast_cputfield: ++ case Bytecodes::_fast_dputfield: ++ case Bytecodes::_fast_fputfield: ++ case Bytecodes::_fast_iputfield: ++ case Bytecodes::_fast_lputfield: ++ case Bytecodes::_fast_sputfield: ++ { ++ // We skip bytecode quickening for putfield instructions when ++ // the put_code written to the constant pool cache is zero. ++ // This is required so that every execution of this instruction ++ // calls out to InterpreterRuntime::resolve_get_put to do ++ // additional, required work. ++ assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); ++ assert(load_bc_into_bc_reg, "we use bc_reg as temp"); ++ __ get_cache_and_index_and_bytecode_at_bcp(tmp_reg, bc_reg, tmp_reg, byte_no, 1); ++ __ add_simm16(bc_reg, R0, bc); ++ __ beq(tmp_reg, L_patch_done); ++ } ++ break; ++ default: ++ assert(byte_no == -1, "sanity"); ++ // the pair bytecodes have already done the load. ++ if (load_bc_into_bc_reg) { ++ __ move(bc_reg, bc); ++ } ++ } ++ ++ if (JvmtiExport::can_post_breakpoint()) { ++ Label L_fast_patch; ++ // if a breakpoint is present we can't rewrite the stream directly ++ __ ldbu(tmp_reg, at_bcp(0)); ++ __ move(AT, Bytecodes::_breakpoint); ++ __ bne(tmp_reg, AT, L_fast_patch); ++ ++ __ get_method(tmp_reg); ++ // Let breakpoint table handling rewrite to quicker bytecode ++ __ call_VM(NOREG, CAST_FROM_FN_PTR(address, ++ InterpreterRuntime::set_original_bytecode_at), tmp_reg, BCP, bc_reg); ++ ++ __ beq(R0, L_patch_done); ++ __ BIND(L_fast_patch); ++ } ++ ++#ifdef ASSERT ++ Label L_okay; ++ __ ldbu(tmp_reg, at_bcp(0)); ++ __ move(AT, (int)Bytecodes::java_code(bc)); ++ __ beq(tmp_reg, AT, L_okay); ++ __ beq(tmp_reg, bc_reg, L_patch_done); ++ __ stop("patching the wrong bytecode"); ++ __ BIND(L_okay); ++#endif ++ ++ // patch bytecode ++ __ stb(bc_reg, at_bcp(0)); ++ __ BIND(L_patch_done); ++} ++ ++ ++// Individual instructions ++ ++void TemplateTable::nop() { ++ transition(vtos, vtos); ++ // nothing to do ++} ++ ++void TemplateTable::shouldnotreachhere() { ++ transition(vtos, vtos); ++ __ stop("shouldnotreachhere bytecode"); ++} ++ ++void TemplateTable::aconst_null() { ++ transition(vtos, atos); ++ __ move(FSR, R0); ++} ++ ++void TemplateTable::iconst(int value) { ++ transition(vtos, itos); ++ if (value == 0) { ++ __ move(FSR, R0); ++ } else { ++ __ move(FSR, value); ++ } ++} ++ ++void TemplateTable::lconst(int value) { ++ transition(vtos, ltos); ++ if (value == 0) { ++ __ move(FSR, R0); ++ } else { ++ __ move(FSR, value); ++ } ++} ++ ++void TemplateTable::fconst(int value) { ++ static float _f1 = 1.0, _f2 = 2.0; ++ transition(vtos, ftos); ++ float* p; ++ switch( value ) { ++ default: ShouldNotReachHere(); ++ case 0: __ fmovs(FSF, F31); return; ++ case 1: p = &_f1; break; ++ case 2: p = &_f2; break; ++ } ++ __ li(AT, (address)p); ++ __ flds(FSF, AT, 0); ++} ++ ++void TemplateTable::dconst(int value) { ++ static double _d1 = 1.0; ++ transition(vtos, dtos); ++ double* p; ++ switch( value ) { ++ default: ShouldNotReachHere(); ++ case 0: __ fmovd(FSF, F31); return; ++ case 1: p = &_d1; break; ++ } ++ __ li(AT, (address)p); ++ __ fldd(FSF, AT, 0); ++} ++ ++void TemplateTable::bipush() { ++ transition(vtos, itos); ++ __ ldb_signed(FSR, at_bcp(1)); ++} ++ ++void TemplateTable::sipush() { ++ transition(vtos, itos); ++ __ ldb_signed(FSR, BCP, 1); ++ __ ldbu(AT, BCP, 2); ++ __ slll(FSR, FSR, 8); ++ __ or_ins(FSR, FSR, AT); ++} ++ ++// T1 : tags ++// T2 : index ++// T3 : cpool ++// T11 : tag ++void TemplateTable::ldc(bool wide) { ++ transition(vtos, vtos); ++ Label call_ldc, notFloat, notClass, Done; ++ // get index in cpool ++ if (wide) { ++ __ get_unsigned_2_byte_index_at_bcp(T2, 1); ++ } else { ++ __ ldbu(T2, at_bcp(1)); ++ } ++ ++ __ get_cpool_and_tags(T3, T1); ++ ++ const int base_offset = ConstantPool::header_size() * wordSize; ++ const int tags_offset = Array::base_offset_in_bytes(); ++ ++ // get type ++ __ addl(AT, T1, T2); ++ __ ldbu(T1, AT, tags_offset); ++ //now T1 is the tag ++ ++ // unresolved class - get the resolved class ++ __ add_simm16(AT, T1, - JVM_CONSTANT_UnresolvedClass); ++ __ beq(AT, call_ldc); ++ ++ // unresolved class in error (resolution failed) - call into runtime ++ // so that the same error from first resolution attempt is thrown. ++ __ add_simm16(AT, T1, -JVM_CONSTANT_UnresolvedClassInError); ++ __ beq(AT, call_ldc); ++ ++ // resolved class - need to call vm to get java mirror of the class ++ __ add_simm16(AT, T1, - JVM_CONSTANT_Class); ++ __ slll(T2, T2, Address::times_8); ++ __ bne(AT, notClass); ++ ++ __ BIND(call_ldc); ++ __ move(A1, wide); ++ call_VM(FSR, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), A1); ++ //__ push(atos); ++ __ stl(FSR, SP, - Interpreter::stackElementSize); ++ __ add_simm16(SP, SP, - Interpreter::stackElementSize); ++ __ beq(R0, Done); ++ ++ __ BIND(notClass); ++ __ add_simm16(AT, T1, -JVM_CONSTANT_Float); ++ __ bne(AT, notFloat); ++ // ftos ++ __ addl(AT, T3, T2); ++ __ flds(FSF, AT, base_offset); ++ //__ push_f(); ++ __ fsts(FSF, SP, - Interpreter::stackElementSize); ++ __ add_simm16(SP, SP, - Interpreter::stackElementSize); ++ __ beq(R0, Done); ++ __ BIND(notFloat); ++#ifdef ASSERT ++ { ++ Label L; ++ __ add_simm16(AT, T1, -JVM_CONSTANT_Integer); ++ __ beq(AT, L); ++ __ stop("unexpected tag type in ldc"); ++ __ BIND(L); ++ } ++#endif ++ // itos JVM_CONSTANT_Integer only ++ __ addl(T0, T3, T2); ++ __ ldw(FSR, T0, base_offset); ++ __ push(itos); ++ __ BIND(Done); ++} ++ ++// Fast path for caching oop constants. ++void TemplateTable::fast_aldc(bool wide) { ++ transition(vtos, atos); ++ ++ Register result = FSR; ++ Register tmp = SSR; ++ int index_size = wide ? sizeof(u2) : sizeof(u1); ++ ++ Label resolved; ++ ++ // We are resolved if the resolved reference cache entry contains a ++ // non-null object (String, MethodType, etc.) ++ assert_different_registers(result, tmp); ++ __ get_cache_index_at_bcp(tmp, 1, index_size); ++ __ load_resolved_reference_at_index(result, tmp); ++ __ bne(result, resolved); ++ ++ address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); ++ // first time invocation - must resolve first ++ int i = (int)bytecode(); ++ __ move(tmp, i); ++ __ call_VM(result, entry, tmp); ++ ++ __ BIND(resolved); ++ ++ if (VerifyOops) { ++ __ verify_oop(result); ++ } ++} ++ ++ ++// used register: T2, T3, T1 ++// T2 : index ++// T3 : cpool ++// T1 : tag ++void TemplateTable::ldc2_w() { ++ transition(vtos, vtos); ++ Label Long, Done; ++ ++ // get index in cpool ++ __ get_unsigned_2_byte_index_at_bcp(T2, 1); ++ ++ __ get_cpool_and_tags(T3, T1); ++ ++ const int base_offset = ConstantPool::header_size() * wordSize; ++ const int tags_offset = Array::base_offset_in_bytes(); ++ ++ // get type in T1 ++ __ addl(AT, T1, T2); ++ __ ldbu(T1, AT, tags_offset); ++ ++ __ add_simm16(AT, T1, - JVM_CONSTANT_Double); ++ __ slll(T2, T2, Address::times_8); ++ __ bne(AT, Long); ++ ++ // dtos ++ __ addl(AT, T3, T2); ++ __ fldd(FSF, AT, base_offset); ++ __ fstd(FSF, SP, - 2 * wordSize); ++ __ add_simm16(SP, SP, - 2 * wordSize); ++ __ beq(R0, Done); ++ ++ // ltos ++ __ BIND(Long); ++ __ addl(AT, T3, T2); ++ __ ldl(FSR, AT, base_offset); ++ __ push(ltos); ++ ++ __ BIND(Done); ++} ++ ++// we compute the actual local variable address here ++// the x86 dont do so for it has scaled index memory access model, we dont have, so do here ++void TemplateTable::locals_index(Register reg, int offset) { ++ __ ldbu(reg, at_bcp(offset)); ++ __ slll(reg, reg, Address::times_8); ++ __ subl(reg, LVP, reg); ++} ++ ++// this method will do bytecode folding of the two form: ++// iload iload iload caload ++// used register : T2, T3 ++// T2 : bytecode ++// T3 : folded code ++void TemplateTable::iload() { ++ transition(vtos, itos); ++ if (RewriteFrequentPairs) { ++ Label rewrite, done; ++ // get the next bytecode in T2 ++ __ ldbu(T2, at_bcp(Bytecodes::length_for(Bytecodes::_iload))); ++ // if _iload, wait to rewrite to iload2. We only want to rewrite the ++ // last two iloads in a pair. Comparing against fast_iload means that ++ // the next bytecode is neither an iload or a caload, and therefore ++ // an iload pair. ++ __ move(AT, Bytecodes::_iload); ++ __ beq(AT, T2, done); ++ ++ __ move(T3, Bytecodes::_fast_iload2); ++ __ move(AT, Bytecodes::_fast_iload); ++ __ beq(AT, T2, rewrite); ++ ++ // if _caload, rewrite to fast_icaload ++ __ move(T3, Bytecodes::_fast_icaload); ++ __ move(AT, Bytecodes::_caload); ++ __ beq(AT, T2, rewrite); ++ ++ // rewrite so iload doesn't check again. ++ __ move(T3, Bytecodes::_fast_iload); ++ ++ // rewrite ++ // T3 : fast bytecode ++ __ BIND(rewrite); ++ patch_bytecode(Bytecodes::_iload, T3, T2, false); ++ __ BIND(done); ++ } ++ ++ // Get the local value into tos ++ locals_index(T2); ++ __ ldw(FSR, T2, 0); ++} ++ ++// used register T2 ++// T2 : index ++void TemplateTable::fast_iload2() { ++ transition(vtos, itos); ++ locals_index(T2); ++ __ ldw(FSR, T2, 0); ++ __ push(itos); ++ locals_index(T2, 3); ++ __ ldw(FSR, T2, 0); ++} ++ ++// used register T2 ++// T2 : index ++void TemplateTable::fast_iload() { ++ transition(vtos, itos); ++ locals_index(T2); ++ __ ldw(FSR, T2, 0); ++} ++ ++// used register T2 ++// T2 : index ++void TemplateTable::lload() { ++ transition(vtos, ltos); ++ locals_index(T2); ++ __ ldl(FSR, T2, -wordSize); ++} ++ ++// used register T2 ++// T2 : index ++void TemplateTable::fload() { ++ transition(vtos, ftos); ++ locals_index(T2); ++ __ flds(FSF, T2, 0); ++} ++ ++// used register T2 ++// T2 : index ++void TemplateTable::dload() { ++ transition(vtos, dtos); ++ locals_index(T2); ++ __ fldd(FSF, T2, -wordSize); ++} ++ ++// used register T2 ++// T2 : index ++void TemplateTable::aload() { ++ transition(vtos, atos); ++ locals_index(T2); ++ __ ldl(FSR, T2, 0); ++} ++ ++void TemplateTable::locals_index_wide(Register reg) { ++ __ get_unsigned_2_byte_index_at_bcp(reg, 2); ++ __ slll(reg, reg, Address::times_8); ++ __ subl(reg, LVP, reg); ++} ++ ++// used register T2 ++// T2 : index ++void TemplateTable::wide_iload() { ++ transition(vtos, itos); ++ locals_index_wide(T2); ++ __ ldl(FSR, T2, 0); ++} ++ ++// used register T2 ++// T2 : index ++void TemplateTable::wide_lload() { ++ transition(vtos, ltos); ++ locals_index_wide(T2); ++ __ ldl(FSR, T2, -wordSize); ++} ++ ++// used register T2 ++// T2 : index ++void TemplateTable::wide_fload() { ++ transition(vtos, ftos); ++ locals_index_wide(T2); ++ __ flds(FSF, T2, 0); ++} ++ ++// used register T2 ++// T2 : index ++void TemplateTable::wide_dload() { ++ transition(vtos, dtos); ++ locals_index_wide(T2); ++ __ fldd(FSF, T2, -wordSize); ++} ++ ++// used register T2 ++// T2 : index ++void TemplateTable::wide_aload() { ++ transition(vtos, atos); ++ locals_index_wide(T2); ++ __ ldl(FSR, T2, 0); ++} ++ ++// we use A2 as the regiser for index, BE CAREFUL! ++// we dont use our tge 29 now, for later optimization ++void TemplateTable::index_check(Register array, Register index) { ++ // Pop ptr into array ++ __ pop_ptr(array); ++ index_check_without_pop(array, index); ++} ++ ++void TemplateTable::index_check_without_pop(Register array, Register index) { ++ // destroys A2 ++ // check array ++ __ null_check(array, arrayOopDesc::length_offset_in_bytes()); ++ ++ // sign extend since tos (index) might contain garbage in upper bits ++ __ addw(index, index, 0); ++ ++ // check index ++ Label ok; ++ __ ldw(AT, array, arrayOopDesc::length_offset_in_bytes()); ++ __ blt(index, AT, ok); ++ //throw_ArrayIndexOutOfBoundsException assume abberrant index in A2 ++ if (A2 != index) __ move(A2, index); ++ __ jmp(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry); ++ __ BIND(ok); ++} ++ ++void TemplateTable::iaload() { ++ transition(itos, itos); ++ index_check(SSR, FSR); ++ __ slll(FSR, FSR, 2); ++ __ addl(FSR, SSR, FSR); ++ __ ldw(FSR, FSR, arrayOopDesc::base_offset_in_bytes(T_INT)); ++} ++ ++void TemplateTable::laload() { ++ transition(itos, ltos); ++ index_check(SSR, FSR); ++ __ slll(AT, FSR, Address::times_8); ++ __ addl(AT, SSR, AT); ++ __ ldl(FSR, AT, arrayOopDesc::base_offset_in_bytes(T_LONG)); ++} ++ ++void TemplateTable::faload() { ++ transition(itos, ftos); ++ index_check(SSR, FSR); ++ __ slll(FSR, FSR, 2); ++ __ addl(FSR, SSR, FSR); ++ __ flds(FSF, FSR, arrayOopDesc::base_offset_in_bytes(T_FLOAT)); ++} ++ ++void TemplateTable::daload() { ++ transition(itos, dtos); ++ //__ pop(SSR); ++ index_check(SSR, FSR); ++ __ slll(AT, FSR, 3); ++ __ addl(AT, SSR, AT); ++ __ fldd(FSF, AT, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)); ++} ++ ++void TemplateTable::aaload() { ++ transition(itos, atos); ++ index_check(SSR, FSR); ++ __ slll(FSR, FSR, UseCompressedOops ? Address::times_4 : Address::times_8); ++ __ addl(FSR, SSR, FSR); ++ //add for compressedoops ++ __ load_heap_oop(FSR, Address(FSR, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); ++} ++ ++void TemplateTable::baload() { ++ transition(itos, itos); ++ index_check(SSR, FSR); ++ __ addl(FSR, SSR, FSR); ++ __ ldb_signed(FSR, FSR, arrayOopDesc::base_offset_in_bytes(T_BYTE)); ++} ++ ++void TemplateTable::caload() { ++ transition(itos, itos); ++ index_check(SSR, FSR); ++ __ slll(FSR, FSR, Address::times_2); ++ __ addl(FSR, SSR, FSR); ++ __ ldhu(FSR, FSR, arrayOopDesc::base_offset_in_bytes(T_CHAR)); ++} ++ ++// iload followed by caload frequent pair ++// used register : T2 ++// T2 : index ++void TemplateTable::fast_icaload() { ++ ++ transition(vtos, itos); ++ // load index out of locals ++ locals_index(T2); ++ __ ldw(FSR, T2, 0); ++ index_check(SSR, FSR); ++ __ slll(FSR, FSR, 1); ++ __ addl(FSR, SSR, FSR); ++ __ ldhu(FSR, FSR, arrayOopDesc::base_offset_in_bytes(T_CHAR)); ++} ++ ++void TemplateTable::saload() { ++ transition(itos, itos); ++ index_check(SSR, FSR); ++ __ slll(FSR, FSR, Address::times_2); ++ __ addl(FSR, SSR, FSR); ++ __ ldh_signed(FSR, FSR, arrayOopDesc::base_offset_in_bytes(T_SHORT)); ++} ++ ++void TemplateTable::iload(int n) { ++ transition(vtos, itos); ++ __ ldw_signed(FSR, iaddress(n)); ++} ++ ++void TemplateTable::lload(int n) { ++ transition(vtos, ltos); ++ __ ldl(FSR, laddress(n)); ++} ++ ++void TemplateTable::fload(int n) { ++ transition(vtos, ftos); ++ __ flds(FSF, faddress(n)); ++} ++ ++void TemplateTable::dload(int n) { ++ transition(vtos, dtos); ++ __ fldd(FSF, laddress(n)); ++} ++ ++void TemplateTable::aload(int n) { ++ transition(vtos, atos); ++ __ ldl(FSR, aaddress(n)); ++} ++ ++// used register : T2, T3 ++// T2 : bytecode ++// T3 : folded code ++void TemplateTable::aload_0() { ++ transition(vtos, atos); ++ // According to bytecode histograms, the pairs: ++ // ++ // _aload_0, _fast_igetfield ++ // _aload_0, _fast_agetfield ++ // _aload_0, _fast_fgetfield ++ // ++ // occur frequently. If RewriteFrequentPairs is set, the (slow) ++ // _aload_0 bytecode checks if the next bytecode is either ++ // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then ++ // rewrites the current bytecode into a pair bytecode; otherwise it ++ // rewrites the current bytecode into _fast_aload_0 that doesn't do ++ // the pair check anymore. ++ // ++ // Note: If the next bytecode is _getfield, the rewrite must be ++ // delayed, otherwise we may miss an opportunity for a pair. ++ // ++ // Also rewrite frequent pairs ++ // aload_0, aload_1 ++ // aload_0, iload_1 ++ // These bytecodes with a small amount of code are most profitable ++ // to rewrite ++ if (RewriteFrequentPairs) { ++ Label rewrite, done; ++ // get the next bytecode in T2 ++ __ ldbu(T2, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0))); ++ ++ // do actual aload_0 ++ aload(0); ++ ++ // if _getfield then wait with rewrite ++ __ move(AT, Bytecodes::_getfield); ++ __ beq(AT, T2, done); ++ ++ // if _igetfield then reqrite to _fast_iaccess_0 ++ assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == ++ Bytecodes::_aload_0, ++ "fix bytecode definition"); ++ __ move(T3, Bytecodes::_fast_iaccess_0); ++ __ move(AT, Bytecodes::_fast_igetfield); ++ __ beq(AT, T2, rewrite); ++ ++ // if _agetfield then reqrite to _fast_aaccess_0 ++ assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == ++ Bytecodes::_aload_0, ++ "fix bytecode definition"); ++ __ move(T3, Bytecodes::_fast_aaccess_0); ++ __ move(AT, Bytecodes::_fast_agetfield); ++ __ beq(AT, T2, rewrite); ++ ++ // if _fgetfield then reqrite to _fast_faccess_0 ++ assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == ++ Bytecodes::_aload_0, ++ "fix bytecode definition"); ++ __ move(T3, Bytecodes::_fast_faccess_0); ++ __ move(AT, Bytecodes::_fast_fgetfield); ++ __ beq(AT, T2, rewrite); ++ ++ // else rewrite to _fast_aload0 ++ assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == ++ Bytecodes::_aload_0, ++ "fix bytecode definition"); ++ __ move(T3, Bytecodes::_fast_aload_0); ++ ++ // rewrite ++ __ BIND(rewrite); ++ patch_bytecode(Bytecodes::_aload_0, T3, T2, false); ++ ++ __ BIND(done); ++ } else { ++ aload(0); ++ } ++} ++ ++void TemplateTable::istore() { ++ transition(itos, vtos); ++ locals_index(T2); ++ __ stw(FSR, T2, 0); ++} ++ ++void TemplateTable::lstore() { ++ transition(ltos, vtos); ++ locals_index(T2); ++ __ stl(FSR, T2, -wordSize); ++} ++ ++void TemplateTable::fstore() { ++ transition(ftos, vtos); ++ locals_index(T2); ++ __ fsts(FSF, T2, 0); ++} ++ ++void TemplateTable::dstore() { ++ transition(dtos, vtos); ++ locals_index(T2); ++ __ fstd(FSF, T2, -wordSize); ++} ++ ++void TemplateTable::astore() { ++ transition(vtos, vtos); ++ __ pop_ptr(FSR); ++ locals_index(T2); ++ __ stl(FSR, T2, 0); ++} ++ ++void TemplateTable::wide_istore() { ++ transition(vtos, vtos); ++ __ pop_i(FSR); ++ locals_index_wide(T2); ++ __ stl(FSR, T2, 0); ++} ++ ++void TemplateTable::wide_lstore() { ++ transition(vtos, vtos); ++ __ pop_l(FSR); ++ locals_index_wide(T2); ++ __ stl(FSR, T2, -wordSize); ++} ++ ++void TemplateTable::wide_fstore() { ++ wide_istore(); ++} ++ ++void TemplateTable::wide_dstore() { ++ wide_lstore(); ++} ++ ++void TemplateTable::wide_astore() { ++ transition(vtos, vtos); ++ __ pop_ptr(FSR); ++ locals_index_wide(T2); ++ __ stl(FSR, T2, 0); ++} ++ ++// used register : T2 ++void TemplateTable::iastore() { ++ transition(itos, vtos); ++ __ pop_i(SSR); // T2: array SSR: index ++ index_check(T2, SSR); // prefer index in SSR ++ __ slll(SSR, SSR, Address::times_4); ++ __ addl(T2, T2, SSR); ++ __ stw(FSR, T2, arrayOopDesc::base_offset_in_bytes(T_INT)); ++} ++ ++ ++ ++// used register T2, T3 ++void TemplateTable::lastore() { ++ transition(ltos, vtos); ++ __ pop_i (T2); ++ index_check(T3, T2); ++ __ slll(T2, T2, Address::times_8); ++ __ addl(T3, T3, T2); ++ __ stl(FSR, T3, arrayOopDesc::base_offset_in_bytes(T_LONG)); ++} ++ ++// used register T2 ++void TemplateTable::fastore() { ++ transition(ftos, vtos); ++ __ pop_i(SSR); ++ index_check(T2, SSR); ++ __ slll(SSR, SSR, Address::times_4); ++ __ addl(T2, T2, SSR); ++ __ fsts(FSF, T2, arrayOopDesc::base_offset_in_bytes(T_FLOAT)); ++} ++ ++// used register T2, T3 ++void TemplateTable::dastore() { ++ transition(dtos, vtos); ++ __ pop_i (T2); ++ index_check(T3, T2); ++ __ slll(T2, T2, Address::times_8); ++ __ addl(T3, T3, T2); ++ __ fstd(FSF, T3, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)); ++ ++} ++ ++// used register : T2, T3, T11 ++// T2 : array ++// T3 : subklass ++// T11 : supklass ++void TemplateTable::aastore() { ++ Label is_null, ok_is_subtype, done; ++ transition(vtos, vtos); ++ // stack: ..., array, index, value ++ __ ldl(FSR, at_tos()); // Value ++ __ ldw_signed(SSR, at_tos_p1()); // Index ++ __ ldl(T2, at_tos_p2()); // Array ++ ++ // index_check(T2, SSR); ++ index_check_without_pop(T2, SSR); ++ // do array store check - check for NULL value first ++ __ beq(FSR, is_null); ++ ++ // Move subklass into T3 ++ //add for compressedoops ++ __ load_klass(T3, FSR); ++ // Move superklass into T11 ++ //add for compressedoops ++ __ load_klass(T11, T2); ++ __ ldl(T11, Address(T11, ObjArrayKlass::element_klass_offset())); ++ // Compress array+index*4+12 into a single register. T2 ++ __ slll(AT, SSR, UseCompressedOops? Address::times_4 : Address::times_8); ++ __ addl(T2, T2, AT); ++ __ add_simm16(T2, T2, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); ++ ++ // Generate subtype check. ++ // Superklass in T11. Subklass in T3. ++ __ gen_subtype_check(T11, T3, ok_is_subtype); ++ // Come here on failure ++ // object is at FSR ++ __ jmp(Interpreter::_throw_ArrayStoreException_entry); ++ // Come here on success ++ __ BIND(ok_is_subtype); ++ do_oop_store(_masm, Address(T2, 0), FSR, _bs->kind(), true); ++ __ beq(R0, done); ++ ++ // Have a NULL in FSR, T2=array, SSR=index. Store NULL at ary[idx] ++ __ BIND(is_null); ++ __ profile_null_seen(T12); ++ __ slll(AT, SSR, UseCompressedOops? Address::times_4 : Address::times_8); ++ __ addl(T2, T2, AT); ++ do_oop_store(_masm, Address(T2, arrayOopDesc::base_offset_in_bytes(T_OBJECT)), noreg, _bs->kind(), true); ++ ++ __ BIND(done); ++ __ add_simm16(SP, SP, 3 * Interpreter::stackElementSize); ++} ++ ++void TemplateTable::bastore() { ++ transition(itos, vtos); ++ __ pop_i(SSR); ++ index_check(T2, SSR); ++ ++ // Need to check whether array is boolean or byte ++ // since both types share the bastore bytecode. ++ __ load_klass(T12, T2); ++ __ ldw(T12, T12, in_bytes(Klass::layout_helper_offset())); ++ ++ int diffbit = Klass::layout_helper_boolean_diffbit(); ++ __ move(AT, diffbit); ++ ++ Label L_skip; ++ __ and_reg(AT, T12, AT); ++ __ beq(AT, L_skip); ++ __ and_imm8(FSR, FSR, 0x1); ++ __ BIND(L_skip); ++ ++ __ addl(SSR, T2, SSR); ++ __ stb(FSR, SSR, arrayOopDesc::base_offset_in_bytes(T_BYTE)); ++} ++ ++void TemplateTable::castore() { ++ transition(itos, vtos); ++ __ pop_i(SSR); ++ index_check(T2, SSR); ++ __ slll(SSR, SSR, Address::times_2); ++ __ addl(SSR, T2, SSR); ++ __ sth(FSR, SSR, arrayOopDesc::base_offset_in_bytes(T_CHAR)); ++} ++ ++void TemplateTable::sastore() { ++ castore(); ++} ++ ++void TemplateTable::istore(int n) { ++ transition(itos, vtos); ++ __ stw(FSR, iaddress(n)); ++} ++ ++void TemplateTable::lstore(int n) { ++ transition(ltos, vtos); ++ __ stl(FSR, laddress(n)); ++} ++ ++void TemplateTable::fstore(int n) { ++ transition(ftos, vtos); ++ __ fsts(FSF, faddress(n)); ++} ++ ++void TemplateTable::dstore(int n) { ++ transition(dtos, vtos); ++ __ fstd(FSF, laddress(n)); ++} ++ ++void TemplateTable::astore(int n) { ++ transition(vtos, vtos); ++ __ pop_ptr(FSR); ++ __ stl(FSR, aaddress(n)); ++} ++ ++void TemplateTable::pop() { ++ transition(vtos, vtos); ++ __ addl(SP, SP, Interpreter::stackElementSize); ++} ++ ++void TemplateTable::pop2() { ++ transition(vtos, vtos); ++ __ addl(SP, SP, 2 * Interpreter::stackElementSize); ++} ++ ++void TemplateTable::dup() { ++ transition(vtos, vtos); ++ // stack: ..., a ++ __ load_ptr(0, FSR); ++ __ push_ptr(FSR); ++ // stack: ..., a, a ++} ++ ++// blows FSR ++void TemplateTable::dup_x1() { ++ transition(vtos, vtos); ++ // stack: ..., a, b ++ __ load_ptr(0, FSR); // load b ++ __ load_ptr(1, A5); // load a ++ __ store_ptr(1, FSR); // store b ++ __ store_ptr(0, A5); // store a ++ __ push_ptr(FSR); // push b ++ // stack: ..., b, a, b ++} ++ ++// blows FSR ++void TemplateTable::dup_x2() { ++ transition(vtos, vtos); ++ // stack: ..., a, b, c ++ __ load_ptr(0, FSR); // load c ++ __ load_ptr(2, A5); // load a ++ __ store_ptr(2, FSR); // store c in a ++ __ push_ptr(FSR); // push c ++ // stack: ..., c, b, c, c ++ __ load_ptr(2, FSR); // load b ++ __ store_ptr(2, A5); // store a in b ++ // stack: ..., c, a, c, c ++ __ store_ptr(1, FSR); // store b in c ++ // stack: ..., c, a, b, c ++} ++ ++// blows FSR ++void TemplateTable::dup2() { ++ transition(vtos, vtos); ++ // stack: ..., a, b ++ __ load_ptr(1, FSR); // load a ++ __ push_ptr(FSR); // push a ++ __ load_ptr(1, FSR); // load b ++ __ push_ptr(FSR); // push b ++ // stack: ..., a, b, a, b ++} ++ ++// blows FSR ++void TemplateTable::dup2_x1() { ++ transition(vtos, vtos); ++ // stack: ..., a, b, c ++ __ load_ptr(0, T2); // load c ++ __ load_ptr(1, FSR); // load b ++ __ push_ptr(FSR); // push b ++ __ push_ptr(T2); // push c ++ // stack: ..., a, b, c, b, c ++ __ store_ptr(3, T2); // store c in b ++ // stack: ..., a, c, c, b, c ++ __ load_ptr(4, T2); // load a ++ __ store_ptr(2, T2); // store a in 2nd c ++ // stack: ..., a, c, a, b, c ++ __ store_ptr(4, FSR); // store b in a ++ // stack: ..., b, c, a, b, c ++ ++ // stack: ..., b, c, a, b, c ++} ++ ++// blows FSR, SSR ++void TemplateTable::dup2_x2() { ++ transition(vtos, vtos); ++ // stack: ..., a, b, c, d ++ // stack: ..., a, b, c, d ++ __ load_ptr(0, T2); // load d ++ __ load_ptr(1, FSR); // load c ++ __ push_ptr(FSR); // push c ++ __ push_ptr(T2); // push d ++ // stack: ..., a, b, c, d, c, d ++ __ load_ptr(4, FSR); // load b ++ __ store_ptr(2, FSR); // store b in d ++ __ store_ptr(4, T2); // store d in b ++ // stack: ..., a, d, c, b, c, d ++ __ load_ptr(5, T2); // load a ++ __ load_ptr(3, FSR); // load c ++ __ store_ptr(3, T2); // store a in c ++ __ store_ptr(5, FSR); // store c in a ++ // stack: ..., c, d, a, b, c, d ++ ++ // stack: ..., c, d, a, b, c, d ++} ++ ++// blows FSR ++void TemplateTable::swap() { ++ transition(vtos, vtos); ++ // stack: ..., a, b ++ ++ __ load_ptr(1, A5); // load a ++ __ load_ptr(0, FSR); // load b ++ __ store_ptr(0, A5); // store a in b ++ __ store_ptr(1, FSR); // store b in a ++ ++ // stack: ..., b, a ++} ++ ++void TemplateTable::iop2(Operation op) { ++ transition(itos, itos); ++ ++ __ pop_i(SSR); ++ if (UseSW8A) { ++ switch (op) { ++ case add : __ addw(FSR, SSR, FSR); break; ++ case sub : __ subw(FSR, SSR, FSR); break; ++ case mul : __ mulw(FSR, SSR, FSR); break; ++ case _and : __ and_reg(FSR, SSR, FSR); break; ++ case _or : __ or_ins(FSR, SSR, FSR); break; ++ case _xor : __ xor_ins(FSR, SSR, FSR); break; ++ case shl : __ sllw(SSR, FSR, FSR); break; ++ case shr : __ sraw(SSR, FSR, FSR); break; ++ case ushr : __ srlw(SSR, FSR, FSR); break; ++ default : ShouldNotReachHere(); ++ } ++ } else { ++ switch (op) { ++ case add : __ addw(FSR, SSR, FSR); break; ++ case sub : __ subw(FSR, SSR, FSR); break; ++ case mul : __ mulw(FSR, SSR, FSR); break; ++ case _and : __ and_reg(FSR, SSR, FSR); break; ++ case _or : __ or_ins(FSR, SSR, FSR); break; ++ case _xor : __ xor_ins(FSR, SSR, FSR); break; ++ case shl : __ and_imm8(GP, FSR, 0x1f); __ slll(FSR, SSR, GP); __ addw(FSR, FSR, 0x0); break; ++ case shr : __ and_imm8(GP, FSR, 0x1f); __ addw(FSR, SSR, 0x0); __ sral(FSR, FSR, GP); break; ++ case ushr : __ and_imm8(GP, FSR, 0x1f); __ zapnot(FSR, SSR, 0xf); __ srll(FSR, FSR, GP); __ addw(FSR, FSR, 0x0); break; ++ default : ShouldNotReachHere(); ++ } ++ } ++} ++ ++// the result stored in FSR, SSR, ++// used registers : T2, T3 ++void TemplateTable::lop2(Operation op) { ++ transition(ltos, ltos); ++ __ pop_l(T2); ++ ++ switch (op) { ++ case add : __ addl(FSR, T2, FSR); break; ++ case sub : __ subl(FSR, T2, FSR); break; ++ case _and: __ and_reg(FSR, T2, FSR); break; ++ case _or : __ or_ins(FSR, T2, FSR); break; ++ case _xor: __ xor_ins(FSR, T2, FSR); break; ++ default : ShouldNotReachHere(); ++ } ++} ++ ++// java require this bytecode could handle 0x80000000/-1, dont cause a overflow exception, ++void TemplateTable::idiv() { ++ transition(itos, itos); ++ Label not_zero; ++ ++ __ bne(FSR, not_zero); ++ __ jmp(Interpreter::_throw_ArithmeticException_entry); ++ __ BIND(not_zero); ++ ++ __ pop_i(SSR); ++ if (UseSW8A) { ++ __ corrected_idivw(SSR, FSR, FSR); ++ } else if (FastIntDiv) { ++ __ idiv_sw(SSR, FSR, FSR); ++ } else { ++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::sdiv), FSR, SSR); ++ } ++} ++ ++void TemplateTable::irem() { ++ transition(itos, itos); ++ Label not_zero; ++ __ pop_i(SSR); ++ ++ __ bne(FSR, not_zero); ++ __ jmp(Interpreter::_throw_ArithmeticException_entry); ++ ++ __ BIND(not_zero); ++ if (UseSW8A) { ++ __ remw(SSR, FSR, FSR); ++ } else if (FastIntRem) { ++ __ irem_sw(SSR, FSR, FSR); ++ } else { ++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::srem), FSR, SSR); ++ } ++} ++ ++void TemplateTable::lmul() { ++ transition(ltos, ltos); ++ __ pop_l(T2); ++ __ mull(FSR, T2, FSR); ++} ++ ++// NOTE: i DONT use the Interpreter::_throw_ArithmeticException_entry ++void TemplateTable::ldiv() { ++ transition(ltos, ltos); ++ Label normal; ++ ++ __ bne(FSR, normal); ++ ++ //__ brk(7); //generate FPE ++ __ jmp(Interpreter::_throw_ArithmeticException_entry); ++ ++ __ BIND(normal); ++ __ pop_l(A2); ++ if (UseSW8A) { ++ __ corrected_idivl(A2, FSR, FSR); ++ } else if (FastLongDiv) { ++ Label ldiv, exit; ++ __ slll(T7, A2, 0xb); ++ __ sral(T7, T7, 0xb); ++ __ cmpeq(T7, A2, T7); ++ __ bne(T7, ldiv); ++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv), FSR, A2); ++ __ beq(R0, exit); ++ ++ __ BIND(ldiv); ++ __ ldiv_sw(A2, FSR, FSR); ++ ++ __ BIND(exit); ++ } else { ++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv), FSR, A2); ++ } ++} ++ ++// NOTE: i DONT use the Interpreter::_throw_ArithmeticException_entry ++void TemplateTable::lrem() { ++ transition(ltos, ltos); ++ Label normal; ++ ++ __ bne(FSR, normal); ++ ++ __ jmp(Interpreter::_throw_ArithmeticException_entry); ++ ++ __ BIND(normal); ++ __ pop_l (A2); ++ if (UseSW8A) { ++ __ reml(A2, FSR, FSR); ++ } else if (FastLongRem) { ++ Label lrem, exit; ++ __ slll(T7, A2, 0xb); ++ __ sral(T7, T7, 0xb); ++ __ cmpeq(T7, A2, T7); ++ __ bne(T7, lrem); ++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem), FSR, A2); ++ __ beq(R0, exit); ++ ++ __ BIND(lrem); ++ __ lrem_sw(A2, FSR, FSR); ++ ++ __ BIND(exit); ++ } else { ++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem), FSR, A2); ++ } ++} ++ ++// result in FSR ++// used registers : T0 ++void TemplateTable::lshl() { ++ transition(itos, ltos); ++ __ pop_l(T0); ++ __ slll(FSR, T0, FSR); ++} ++ ++// used registers : T0 ++void TemplateTable::lshr() { ++ transition(itos, ltos); ++ __ pop_l(T0); ++ __ sral(FSR, T0, FSR); ++} ++ ++// used registers : T0 ++void TemplateTable::lushr() { ++ transition(itos, ltos); ++ __ pop_l(T0); ++ __ srll(FSR, T0, FSR); ++} ++ ++// result in FSF ++void TemplateTable::fop2(Operation op) { ++ transition(ftos, ftos); ++ switch (op) { ++ case add: ++ __ flds(FTF, at_sp()); ++ __ add_s(FSF, FTF, FSF); ++ break; ++ case sub: ++ __ flds(FTF, at_sp()); ++ __ sub_s(FSF, FTF, FSF); ++ break; ++ case mul: ++ __ flds(FTF, at_sp()); ++ __ mul_s(FSF, FTF, FSF); ++ break; ++ case div: ++ __ flds(FTF, at_sp()); ++ __ div_s(FSF, FTF, FSF); ++ break; ++ case rem: ++ { ++ __ flds(F16, at_sp()); //x ++ __ fmovd(F17, FSF); ++ Label nan, cont, end; ++ ++ // y = 0.0f ++ __ fbeq(F17, nan); ++ // x = NaN infinity ++ __ boundary_test(F16, GP); ++ __ beq(GP, nan); ++ // y = NaN ++ __ boundary_test(F17, GP); ++ __ bne(GP, cont); ++ __ fimovd(AT, F17); ++ __ slll(GP, AT, 12); ++ __ bne(GP, nan); ++ ++ __ BIND(cont); ++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), 2); ++ __ beq(R0, end); ++ ++ __ BIND(nan); ++ __ fdivd(FSF, F31, F31); ++ __ BIND(end); ++ } ++ ++ break; ++ default : ShouldNotReachHere(); ++ } ++ ++ __ add_simm16(SP, SP, 1 * wordSize); ++} ++ ++// result in SSF||FSF ++// i dont handle the strict flags ++void TemplateTable::dop2(Operation op) { ++ transition(dtos, dtos); ++ switch (op) { ++ case add: ++ __ fldd(FTF, at_sp()); ++ __ add_d(FSF, FTF, FSF); ++ break; ++ case sub: ++ __ fldd(FTF, at_sp()); ++ __ sub_d(FSF, FTF, FSF); ++ break; ++ case mul: ++ __ fldd(FTF, at_sp()); ++ __ mul_d(FSF, FTF, FSF); ++ break; ++ case div: ++ __ fldd(FTF, at_sp()); ++ __ div_d(FSF, FTF, FSF); ++ break; ++ case rem: ++ { ++ __ fldd(F16, at_sp()); //x ++ __ fmovd(F17, FSF); ++ Label nan, cont, end; ++ // y = 0.0f ++ __ fbeq(F17, nan); ++ // x = NaN infinity ++ __ boundary_test(F16, GP); ++ __ beq(GP, nan); ++ // y = NaN ++ __ boundary_test(F17, GP); ++ __ bne(GP, cont); ++ __ fimovd(AT, F17); ++ __ slll(GP, AT, 12); ++ __ bne(GP, nan); ++ ++ __ BIND(cont); ++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), 2); ++ __ beq(R0, end); ++ ++ __ BIND(nan); ++ __ fdivd(FSF, F31, F31); ++ __ BIND(end); ++ } ++ break; ++ default : ShouldNotReachHere(); ++ } ++ ++ __ add_simm16(SP, SP, 2 * wordSize); ++} ++ ++void TemplateTable::ineg() { ++ transition(itos, itos); ++ __ subw(FSR, R0, FSR); ++} ++ ++void TemplateTable::lneg() { ++ transition(ltos, ltos); ++ __ subl(FSR, R0, FSR); ++} ++ ++void TemplateTable::fneg() { ++ transition(ftos, ftos); ++ __ fneg(FSF, FSF); ++} ++ ++void TemplateTable::dneg() { ++ transition(dtos, dtos); ++ __ fneg(FSF, FSF); ++} ++ ++// used registers : T2 ++void TemplateTable::iinc() { ++ transition(vtos, vtos); ++ locals_index(T2); ++ __ ldw(FSR, T2, 0); ++ __ ldb_signed(AT, at_bcp(2)); // get constant ++ __ addl(FSR, FSR, AT); ++ __ stw(FSR, T2, 0); ++} ++ ++// used register : T2 ++void TemplateTable::wide_iinc() { ++ transition(vtos, vtos); ++ locals_index_wide(T2); ++ __ get_2_byte_integer_at_bcp(FSR, AT, 4); ++ __ hswap(FSR); ++ __ ldw(AT, T2, 0); ++ __ addl(FSR, AT, FSR); ++ __ stw(FSR, T2, 0); ++} ++ ++void TemplateTable::convert() { ++ // Checking ++#ifdef ASSERT ++ { ++ TosState tos_in = ilgl; ++ TosState tos_out = ilgl; ++ switch (bytecode()) { ++ case Bytecodes::_i2l: // fall through ++ case Bytecodes::_i2f: // fall through ++ case Bytecodes::_i2d: // fall through ++ case Bytecodes::_i2b: // fall through ++ case Bytecodes::_i2c: // fall through ++ case Bytecodes::_i2s: tos_in = itos; break; ++ case Bytecodes::_l2i: // fall through ++ case Bytecodes::_l2f: // fall through ++ case Bytecodes::_l2d: tos_in = ltos; break; ++ case Bytecodes::_f2i: // fall through ++ case Bytecodes::_f2l: // fall through ++ case Bytecodes::_f2d: tos_in = ftos; break; ++ case Bytecodes::_d2i: // fall through ++ case Bytecodes::_d2l: // fall through ++ case Bytecodes::_d2f: tos_in = dtos; break; ++ default : ShouldNotReachHere(); ++ } ++ switch (bytecode()) { ++ case Bytecodes::_l2i: // fall through ++ case Bytecodes::_f2i: // fall through ++ case Bytecodes::_d2i: // fall through ++ case Bytecodes::_i2b: // fall through ++ case Bytecodes::_i2c: // fall through ++ case Bytecodes::_i2s: tos_out = itos; break; ++ case Bytecodes::_i2l: // fall through ++ case Bytecodes::_f2l: // fall through ++ case Bytecodes::_d2l: tos_out = ltos; break; ++ case Bytecodes::_i2f: // fall through ++ case Bytecodes::_l2f: // fall through ++ case Bytecodes::_d2f: tos_out = ftos; break; ++ case Bytecodes::_i2d: // fall through ++ case Bytecodes::_l2d: // fall through ++ case Bytecodes::_f2d: tos_out = dtos; break; ++ default : ShouldNotReachHere(); ++ } ++ transition(tos_in, tos_out); ++ } ++#endif // ASSERT ++ ++ // Conversion ++ switch (bytecode()) { ++ case Bytecodes::_i2l: ++ __ addw(FSR, FSR, 0); //ZHJ __ sll(FSR, FSR, 0); ++ break; ++ case Bytecodes::_i2f: ++ if (UseSW8A) { ++ __ cmovws(FSF, FSR); ++ }else { ++ __ ifmovd(F30, FSR); ++ __ fcvtls(FSF, F30); ++ } ++ break; ++ case Bytecodes::_i2d: ++ if (UseSW8A) { ++ __ cmovwd(FSF, FSR); ++ }else { ++ __ ifmovd(F30, FSR); ++ __ fcvtld(FSF, F30); ++ } ++ break; ++ case Bytecodes::_i2b: ++ __ sextb(FSR, FSR); ++ break; ++ case Bytecodes::_i2c: ++ __ zapnot(FSR, FSR, 0x3); ++ break; ++ case Bytecodes::_i2s: ++ __ sexth(FSR, FSR); ++ break; ++ case Bytecodes::_l2i: ++ __ addw(FSR, FSR, 0); ++ break; ++ case Bytecodes::_l2f: ++ if (UseSW8A) { ++ __ cmovls(FSF, FSR); ++ }else { ++ __ ifmovd(FSF, FSR); ++ __ fcvtS2L(FSF, FSF); ++ } ++ break; ++ case Bytecodes::_l2d: ++ if (UseSW8A) { ++ __ cmovld(FSF, FSR); ++ }else { ++ __ ifmovd(FSF, FSR); ++ __ fcvtD2L(FSF, FSF); ++ } ++ break; ++ case Bytecodes::_f2i: ++ { ++ if (UseSW8A) { ++ __ cmovdw_z(FSR, FSF); ++ }else { ++ Label L; ++ ++ __ fmovs(F16, FSF); ++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1); ++ __ BIND(L); ++ } ++ } ++ break; ++ case Bytecodes::_f2l: ++ { ++ if (UseSW8A) { ++ __ cmovdl_z(FSR, FSF); ++ }else { ++ Label L; ++ ++ __ fmovs(F16, FSF); ++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1); ++ __ BIND(L); ++ } ++ } ++ break; ++ case Bytecodes::_f2d: ++ __ fcvtD2S(FSF, FSF); ++ break; ++ case Bytecodes::_d2i: ++ { ++ if (UseSW8A) { ++ __ cmovdw_z(FSR, FSF); ++ }else { ++ Label L; ++ ++ __ fmovd(F16, FSF); ++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1); ++ __ BIND(L); ++ } ++ } ++ break; ++ case Bytecodes::_d2l: ++ { ++ if (UseSW8A) { ++ __ cmovdl_z(FSR, FSF); ++ }else { ++ Label L; ++ ++ __ fmovd(F16, FSF); ++ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1); ++ __ BIND(L); ++ } ++ } ++ break; ++ case Bytecodes::_d2f: ++ __ fcvtS2D(FSF, FSF); ++ break; ++ default : ++ ShouldNotReachHere(); ++ } ++} ++ ++void TemplateTable::lcmp() { ++ transition(ltos, itos); ++ ++ Label low, high, done; ++ __ pop_l(T0); ++ __ cmplt(AT, T0, FSR); ++ __ bne(AT, low); ++ ++ __ bne(T0, FSR, high); ++ ++ __ ldi(FSR, R0, (long)0); ++ __ beq(R0, done); ++ ++ __ BIND(low); ++ __ ldi(FSR, R0, (long)-1); ++ __ beq(R0, done); ++ ++ __ BIND(high); ++ __ ldi(FSR, R0, (long)1); ++ __ beq(R0, done); ++ ++ __ BIND(done); ++} ++ ++void TemplateTable::float_cmp(bool is_float, int unordered_result) { ++ Label less, done; ++ ++ __ move(FSR, R0); ++ ++ if (is_float) { ++ __ flds(FTF, at_sp()); ++ __ fcmpeq(FcmpRES, FTF, FSF); ++ __ add_simm16(SP, SP, 1 * wordSize); ++ __ fbne(FcmpRES, done); ++ ++ if (unordered_result<0) ++ __ fcmplt_un(FcmpRES, FTF, FSF); ++ else ++ __ fcmplt(FcmpRES, FTF, FSF); ++ } else { ++ __ fldd(FTF, at_sp()); ++ __ fcmpeq(FcmpRES, FTF, FSF); ++ __ add_simm16(SP, SP, 2 * wordSize); ++ __ fbne(FcmpRES, done); ++ ++ if (unordered_result<0) ++ __ fcmplt_un(FcmpRES, FTF, FSF); ++ else ++ __ fcmplt(FcmpRES, FTF, FSF); ++ } ++ __ fbne(FcmpRES, less); ++ __ move(FSR, 1); ++ __ beq(R0, done); ++ __ BIND(less); ++ __ move(FSR, -1); ++ __ BIND(done); ++} ++ ++ ++// used registers : Rmethod, T7, Rnext ++// FSR : return bci, this is defined by the vm specification ++// T2 : MDO taken count ++// Rmethod : method ++// T7 : offset ++// Rnext : next bytecode, this is required by dispatch_base ++void TemplateTable::branch(bool is_jsr, bool is_wide) { ++ __ get_method(Rmethod); ++ __ profile_taken_branch(T7, T2); // only C2 meaningful ++ ++ const ByteSize be_offset = MethodCounters::backedge_counter_offset() + ++ InvocationCounter::counter_offset(); ++ const ByteSize inv_offset = MethodCounters::invocation_counter_offset() + ++ InvocationCounter::counter_offset(); ++ ++ // Load up T4 with the branch displacement ++ if (!is_wide) { ++ __ ldb_signed(T7, BCP, 1); ++ __ ldbu(AT, BCP, 2); ++ __ slll(T7, T7, 8); ++ __ or_ins(T7, T7, AT); ++ } else { ++ __ get_4_byte_integer_at_bcp(T7, AT, 1); ++ __ swap(T7); ++ } ++ ++ // Handle all the JSR stuff here, then exit. ++ // It's much shorter and cleaner than intermingling with the ++ // non-JSR normal-branch stuff occuring below. ++ if (is_jsr) { ++ // Pre-load the next target bytecode into Rnext ++ __ addl(AT, BCP, T7); ++ __ ldbu(Rnext, AT, 0); ++ ++ // compute return address as bci in FSR ++ __ add_simm16(FSR, BCP, (is_wide?5:3) - in_bytes(ConstMethod::codes_offset())); ++ __ ldl(AT, Rmethod, in_bytes(Method::const_offset())); ++ __ subl(FSR, FSR, AT); ++ // Adjust the bcp in BCP by the displacement in T7 ++ __ addl(BCP, BCP, T7); ++ // jsr returns atos that is not an oop ++ // __ dispatch_only_noverify(atos); ++ // Push return address ++ __ push_i(FSR); ++ // jsr returns vtos ++ __ dispatch_only_noverify(vtos); ++ ++ return; ++ } ++ ++ // Normal (non-jsr) branch handling ++ ++ // Adjust the bcp in S0 by the displacement in T4 ++ __ addl(BCP, BCP, T7); ++ ++ assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters"); ++ Label backedge_counter_overflow; ++ Label profile_method; ++ Label dispatch; ++ if (UseLoopCounter) { ++ // increment backedge counter for backward branches ++ // Rmethod: method ++ // T4: target offset ++ // BCP: target bcp ++ // LVP: locals pointer ++ __ bgt(T7, dispatch); // check if forward or backward branch ++ ++ // check if MethodCounters exists ++ Label has_counters; ++ __ ldl(AT, Rmethod, in_bytes(Method::method_counters_offset())); // use AT as MDO, TEMP ++ __ bne(AT, has_counters); ++ //__ push(T3); ++ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters), ++ Rmethod); ++ //__ pop(T3); ++ __ ldl(AT, Rmethod, in_bytes(Method::method_counters_offset())); // use AT as MDO, TEMP ++ __ beq(AT, dispatch); ++ __ BIND(has_counters); ++ ++ if (TieredCompilation) { ++ Label no_mdo; ++ int increment = InvocationCounter::count_increment; ++ int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift; ++ if (ProfileInterpreter) { ++ // Are we profiling? ++ __ ldl(T0, Address(Rmethod, in_bytes(Method::method_data_offset()))); ++ __ beq(T0, no_mdo); ++ // Increment the MDO backedge counter ++ const Address mdo_backedge_counter(T0, in_bytes(MethodData::backedge_counter_offset()) + ++ in_bytes(InvocationCounter::counter_offset())); ++ __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, ++ T1, false, Assembler::zero, &backedge_counter_overflow); ++ __ beq(R0, dispatch); ++ } ++ __ BIND(no_mdo); ++ // Increment backedge counter in MethodCounters* ++ __ ldl(T0, Address(Rmethod, Method::method_counters_offset())); ++ __ increment_mask_and_jump(Address(T0, be_offset), increment, mask, ++ T1, false, Assembler::zero, &backedge_counter_overflow); ++ if (!UseOnStackReplacement) { ++ __ BIND(backedge_counter_overflow); ++ } ++ } else { ++ // increment back edge counter ++ __ ldl(T1, Rmethod, in_bytes(Method::method_counters_offset())); ++ __ ldw(T0, T1, in_bytes(be_offset)); ++ __ increment(T0, InvocationCounter::count_increment); ++ __ stw(T0, T1, in_bytes(be_offset)); ++ ++ // load invocation counter ++ __ ldw(T1, T1, in_bytes(inv_offset)); ++ // buffer bit added, mask no needed ++ ++ // dadd backedge counter & invocation counter ++ __ addl(T1, T1, T0); ++ ++ if (ProfileInterpreter) { ++ // Test to see if we should create a method data oop ++ // T1 : backedge counter & invocation counter ++ __ li32(AT, (long)InvocationCounter::InterpreterProfileLimit); ++ __ cmplt(AT, T1, AT); ++ ++ __ bne(AT, dispatch); ++ ++ // if no method data exists, go to profile method ++ __ test_method_data_pointer(T1, profile_method); ++ ++ if (UseOnStackReplacement) { ++ __ li32(AT, (long)InvocationCounter::InterpreterBackwardBranchLimit); ++ __ cmplt(AT, T2, AT); ++ ++ __ bne(AT, dispatch); ++ ++ // When ProfileInterpreter is on, the backedge_count comes ++ // from the methodDataOop, which value does not get reset on ++ // the call to frequency_counter_overflow(). ++ // To avoid excessive calls to the overflow routine while ++ // the method is being compiled, dadd a second test to make ++ // sure the overflow function is called only once every ++ // overflow_frequency. ++ const int overflow_frequency = 1024; ++ __ ldi(GP, R0, overflow_frequency-1); ++ __ and_reg(AT, T2, GP); ++ __ beq(AT, backedge_counter_overflow); ++ } ++ } else { ++ if (UseOnStackReplacement) { ++ // check for overflow against AT, which is the sum of the counters ++ __ li32(AT, (long)InvocationCounter::InterpreterBackwardBranchLimit); ++ __ cmplt(AT, T1, AT); ++ __ beq(AT, backedge_counter_overflow); ++ } ++ } ++ } ++ __ BIND(dispatch); ++ } ++ ++ // Pre-load the next target bytecode into Rnext ++ __ ldbu(Rnext, BCP, 0); ++ ++ // continue with the bytecode @ target ++ // FSR: return bci for jsr's, unused otherwise ++ // Rnext: target bytecode ++ // BCP: target bcp ++ __ dispatch_only(vtos); ++ ++ if (UseLoopCounter) { ++ if (ProfileInterpreter) { ++ // Out-of-line code to allocate method data oop. ++ __ BIND(profile_method); ++ __ call_VM(NOREG, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); ++ __ ldbu(Rnext, BCP, 0); ++ __ set_method_data_pointer_for_bcp(); ++ __ beq(R0, dispatch); ++ } ++ ++ if (UseOnStackReplacement) { ++ // invocation counter overflow ++ __ BIND(backedge_counter_overflow); ++ __ subl(T7, BCP, T7); // branch bcp ++ call_VM(NOREG, CAST_FROM_FN_PTR(address, ++ InterpreterRuntime::frequency_counter_overflow), T7); ++ __ ldbu(Rnext, BCP, 0); ++ ++ // V0: osr nmethod (osr ok) or NULL (osr not possible) ++ // T4: osr adapter frame return address ++ // Rnext: target bytecode ++ // LVP: locals pointer ++ // BCP: bcp ++ __ beq(V0, dispatch); ++ // nmethod may have been invalidated (VM may block upon call_VM return) ++ __ ldw(T3, V0, nmethod::entry_bci_offset()); ++ __ move(AT, InvalidOSREntryBci); ++ __ beq(AT, T3, dispatch); ++ // We need to prepare to execute the OSR method. First we must ++ // migrate the locals and monitors off of the stack. ++ //V0: osr nmethod (osr ok) or NULL (osr not possible) ++ //T4: osr adapter frame return address ++ //Rnext: target bytecode ++ //LVP: locals pointer ++ //BCP: bcp ++ __ move(BCP, V0); ++ const Register thread = S2thread; ++ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin)); ++ ++ // V0 is OSR buffer, move it to expected parameter location ++ __ move(A1, V0); ++ ++ // pop the interpreter frame ++ __ ldl(T7, Address(FP, frame::interpreter_frame_sender_sp_offset * wordSize)); ++ __ leave(); // remove frame anchor ++ __ move(LVP, RA); ++ __ move(SP, T7); ++ ++ __ move(AT, -(StackAlignmentInBytes)); ++ __ and_reg(SP, SP, AT); ++ ++ // push the (possibly adjusted) return address ++ __ ldl(AT, BCP, nmethod::osr_entry_point_offset()); ++ __ jmp(AT); ++ } ++ } ++} ++ ++ ++void TemplateTable::if_0cmp(Condition cc) { ++ transition(itos, vtos); ++ // assume branch is more often taken than not (loops use backward branches) ++ Label not_taken; ++ switch(cc) { ++ case not_equal: ++ __ beq(FSR, not_taken); ++ break; ++ case equal: ++ __ bne(FSR, not_taken); ++ break; ++ case less: ++ __ bge(FSR, not_taken); ++ break; ++ case less_equal: ++ __ bgt(FSR, not_taken); ++ break; ++ case greater: ++ __ ble(FSR, not_taken); ++ break; ++ case greater_equal: ++ __ blt(FSR, not_taken); ++ break; ++ } ++ ++ branch(false, false); ++ ++ __ BIND(not_taken); ++ __ profile_not_taken_branch(FSR); ++} ++ ++void TemplateTable::if_icmp(Condition cc) { ++ transition(itos, vtos); ++ // assume branch is more often taken than not (loops use backward branches) ++ Label not_taken; ++ ++ __ pop_i(SSR); ++ switch(cc) { ++ case not_equal: ++ __ beq(SSR, FSR, not_taken); ++ break; ++ case equal: ++ __ bne(SSR, FSR, not_taken); ++ break; ++ case less: ++ __ cmplt(AT, SSR, FSR); ++ __ beq(AT, not_taken); ++ break; ++ case less_equal: ++ __ cmplt(AT, FSR, SSR); ++ __ bne(AT, not_taken); ++ break; ++ case greater: ++ __ cmplt(AT, FSR, SSR); ++ __ beq(AT, not_taken); ++ break; ++ case greater_equal: ++ __ cmplt(AT, SSR, FSR); ++ __ bne(AT, not_taken); ++ break; ++ } ++ ++ branch(false, false); ++ ++ __ BIND(not_taken); ++ __ profile_not_taken_branch(FSR); ++} ++ ++void TemplateTable::if_nullcmp(Condition cc) { ++ transition(atos, vtos); ++ // assume branch is more often taken than not (loops use backward branches) ++ Label not_taken; ++ switch(cc) { ++ case not_equal: ++ __ beq(FSR, not_taken); ++ break; ++ case equal: ++ __ bne(FSR, not_taken); ++ break; ++ default: ++ ShouldNotReachHere(); ++ } ++ ++ branch(false, false); ++ ++ __ BIND(not_taken); ++ __ profile_not_taken_branch(FSR); ++} ++ ++ ++void TemplateTable::if_acmp(Condition cc) { ++ transition(atos, vtos); ++ // assume branch is more often taken than not (loops use backward branches) ++ Label not_taken; ++ // __ lw(SSR, SP, 0); ++ __ pop_ptr(SSR); ++ switch(cc) { ++ case not_equal: ++ __ beq(SSR, FSR, not_taken); ++ break; ++ case equal: ++ __ bne(SSR, FSR, not_taken); ++ break; ++ default: ++ ShouldNotReachHere(); ++ } ++ ++ branch(false, false); ++ ++ __ BIND(not_taken); ++ __ profile_not_taken_branch(FSR); ++} ++ ++// used registers : T1, T2, T3 ++// T1 : method ++// T2 : returb bci ++void TemplateTable::ret() { ++ transition(vtos, vtos); ++ ++ locals_index(T2); ++ __ ldl(T2, T2, 0); ++ __ profile_ret(T2, T3); ++ ++ __ get_method(T1); ++ __ ldl(BCP, T1, in_bytes(Method::const_offset())); ++ __ addl(BCP, BCP, T2); ++ __ add_simm16(BCP, BCP, in_bytes(ConstMethod::codes_offset())); ++ ++ __ dispatch_next(vtos); ++} ++ ++// used registers : T1, T2, T3 ++// T1 : method ++// T2 : returb bci ++void TemplateTable::wide_ret() { ++ transition(vtos, vtos); ++ ++ locals_index_wide(T2); ++ __ ldl(T2, T2, 0); // get return bci, compute return bcp ++ __ profile_ret(T2, T3); ++ ++ __ get_method(T1); ++ __ ldl(BCP, T1, in_bytes(Method::const_offset())); ++ __ addl(BCP, BCP, T2); ++ __ add_simm16(BCP, BCP, in_bytes(ConstMethod::codes_offset())); ++ ++ __ dispatch_next(vtos); ++} ++ ++// used register T2, T3, T7, Rnext ++// T2 : bytecode pointer ++// T3 : low ++// T7 : high ++// Rnext : dest bytecode, required by dispatch_base ++void TemplateTable::tableswitch() { ++ Label default_case, continue_execution; ++ transition(itos, vtos); ++ ++ // align BCP ++ __ add_simm16(T2, BCP, BytesPerInt); ++ __ ldi(AT, R0, -BytesPerInt); ++ __ and_reg(T2, T2, AT); ++ ++ // load lo & hi ++ __ ldw(T3, T2, 1 * BytesPerInt); ++ __ swap(T3); ++ __ ldw(T7, T2, 2 * BytesPerInt); ++ __ swap(T7); ++ ++ // check against lo & hi ++ __ cmplt(AT, FSR, T3); ++ __ bne(AT, default_case); ++ ++ __ cmplt(AT, T7, FSR); ++ __ bne(AT, default_case); ++ ++ // lookup dispatch offset, in T7 big endian ++ __ subl(FSR, FSR, T3); ++ __ slll(AT, FSR, Address::times_4); ++ __ addl(AT, T2, AT); ++ __ ldw(T7, AT, 3 * BytesPerInt); ++ __ profile_switch_case(FSR, T12, T3); ++ ++ __ BIND(continue_execution); ++ __ swap(T7); ++ __ addl(BCP, BCP, T7); ++ __ ldbu(Rnext, BCP, 0); ++ __ dispatch_only(vtos); ++ ++ // handle default ++ __ BIND(default_case); ++ __ profile_switch_default(FSR); ++ __ ldw(T7, T2, 0); ++ __ beq(R0, continue_execution); ++} ++ ++void TemplateTable::lookupswitch() { ++ transition(itos, itos); ++ __ stop("lookupswitch bytecode should have been rewritten"); ++} ++ ++// used registers : T2, T3, T7, Rnext ++// T2 : bytecode pointer ++// T3 : pair index ++// T7 : offset ++// Rnext : dest bytecode ++// the data after the opcode is the same as lookupswitch ++// see Rewriter::rewrite_method for more information ++void TemplateTable::fast_linearswitch() { ++ transition(itos, vtos); ++ Label loop_entry, loop, found, continue_execution; ++ ++ // swap FSR so we can avoid swapping the table entries ++ __ swap(FSR); ++ ++ // align BCP ++ __ add_simm16(T2, BCP, BytesPerInt); ++ __ ldi(AT, R0, -BytesPerInt); ++ __ and_reg(T2, T2, AT); ++ ++ // set counter ++ __ ldw(T3, T2, BytesPerInt); ++ __ swap(T3); ++ __ beq(R0, loop_entry); ++ ++ // table search ++ __ BIND(loop); ++ // get the entry value ++ __ slll(AT, T3, Address::times_8); ++ __ addl(AT, T2, AT); ++ __ ldw(AT, AT, 2 * BytesPerInt); ++ ++ // found? ++ __ beq(FSR, AT, found); ++ ++ __ BIND(loop_entry); ++ __ subl(T3, T3, 1); ++ __ bge(T3, loop); ++ ++ // default case ++ __ profile_switch_default(FSR); ++ __ ldw(T7, T2, 0); ++ __ beq(R0, continue_execution); ++ ++ // entry found -> get offset ++ __ BIND(found); ++ __ slll(AT, T3, Address::times_8); ++ __ addl(AT, T2, AT); ++ __ ldw(T7, AT, 3 * BytesPerInt); ++ __ profile_switch_case(T3, FSR, T2); ++ ++ // continue execution ++ __ BIND(continue_execution); ++ __ swap(T7); ++ __ addl(BCP, BCP, T7); ++ __ ldbu(Rnext, BCP, 0); ++ __ dispatch_only(vtos); ++} ++ ++// used registers : T0, T1, T2, T3, T7, Rnext ++// T2 : pairs address(array) ++// Rnext : dest bytecode ++// the data after the opcode is the same as lookupswitch ++// see Rewriter::rewrite_method for more information ++void TemplateTable::fast_binaryswitch() { ++ transition(itos, vtos); ++ // Implementation using the following core algorithm: ++ // ++ // int binary_search(int key, LookupswitchPair* array, int n) { ++ // // Binary search according to "Methodik des Programmierens" by ++ // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. ++ // int i = 0; ++ // int j = n; ++ // while (i+1 < j) { ++ // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) ++ // // with Q: for all i: 0 <= i < n: key < a[i] ++ // // where a stands for the array and assuming that the (inexisting) ++ // // element a[n] is infinitely big. ++ // int h = (i + j) >> 1; ++ // // i < h < j ++ // if (key < array[h].fast_match()) { ++ // j = h; ++ // } else { ++ // i = h; ++ // } ++ // } ++ // // R: a[i] <= key < a[i+1] or Q ++ // // (i.e., if key is within array, i is the correct index) ++ // return i; ++ // } ++ ++ // register allocation ++ const Register array = T2; ++ const Register i = T3, j = T7; ++ const Register h = T1; ++ const Register temp = T0; ++ const Register key = FSR; ++ ++ // setup array ++ __ add_simm16(array, BCP, 3*BytesPerInt); ++ __ ldi(AT, R0, -BytesPerInt); ++ __ and_reg(array, array, AT); ++ ++ // initialize i & j ++ __ move(i, R0); ++ __ ldw(j, array, - 1 * BytesPerInt); ++ // Convert j into native byteordering ++ __ swap(j); ++ ++ // and start ++ Label entry; ++ __ beq(R0, entry); ++ ++ // binary search loop ++ { ++ Label loop; ++ __ BIND(loop); ++ // int h = (i + j) >> 1; ++ __ addl(h, i, j); ++ __ srll(h, h, 1); ++ // if (key < array[h].fast_match()) { ++ // j = h; ++ // } else { ++ // i = h; ++ // } ++ // Convert array[h].match to native byte-ordering before compare ++ __ slll(AT, h, Address::times_8); ++ __ addl(AT, array, AT); ++ __ ldw(temp, AT, 0 * BytesPerInt); ++ __ swap(temp); ++ ++ { ++ Label set_i, end_of_if; ++ __ cmplt(AT, key, temp); ++ __ beq(AT, set_i); ++ ++ __ move(j, h); ++ __ beq(R0, end_of_if); ++ ++ __ BIND(set_i); ++ __ move(i, h); ++ ++ __ BIND(end_of_if); ++ } ++ // while (i+1 < j) ++ __ BIND(entry); ++ __ addl(h, i, 1); ++ __ cmplt(AT, h, j); ++ __ bne(AT, loop); ++ } ++ ++ // end of binary search, result index is i (must check again!) ++ Label default_case; ++ // Convert array[i].match to native byte-ordering before compare ++ __ slll(AT, i, Address::times_8); ++ __ addl(AT, array, AT); ++ __ ldw(temp, AT, 0 * BytesPerInt); ++ __ swap(temp); ++ __ bne(key, temp, default_case); ++ ++ // entry found -> j = offset ++ __ slll(AT, i, Address::times_8); ++ __ addl(AT, array, AT); ++ __ ldw(j, AT, 1 * BytesPerInt); ++ __ profile_switch_case(i, key, array); ++ __ swap(j); ++ ++ __ addl(BCP, j, BCP); ++ __ ldbu(Rnext, BCP, 0); ++ __ dispatch_only(vtos); ++ ++ // default case -> j = default offset ++ __ BIND(default_case); ++ __ profile_switch_default(i); ++ __ ldw(j, array, - 2 * BytesPerInt); ++ __ swap(j); ++ __ addl(BCP, BCP, j); ++ __ ldbu(Rnext, BCP, 0); ++ __ dispatch_only(vtos); ++} ++ ++void TemplateTable::_return(TosState state) { ++ transition(state, state); ++ assert(_desc->calls_vm(), ++ "inconsistent calls_vm information"); // call in remove_activation ++ ++ if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { ++ assert(state == vtos, "only valid state"); ++ __ ldl(T1, aaddress(0)); ++ __ load_klass(LVP, T1); ++ __ ldw(LVP, LVP, in_bytes(Klass::access_flags_offset())); ++ __ move(AT, JVM_ACC_HAS_FINALIZER); ++ __ and_reg(AT, AT, LVP); ++ Label skip_register_finalizer; ++ __ beq(AT, skip_register_finalizer); ++ __ call_VM(noreg, CAST_FROM_FN_PTR(address, ++ InterpreterRuntime::register_finalizer), T1); ++ __ BIND(skip_register_finalizer); ++ } ++ ++ // Narrow result if state is itos but result type is smaller. ++ // Need to narrow in the return bytecode rather than in generate_return_entry ++ // since compiled code callers expect the result to already be narrowed. ++ if (state == itos) { ++ __ narrow(FSR); ++ } ++ ++ __ remove_activation(state, T12); ++ if(UseWmemb) ++ __ wmemb(); ++ else ++ __ memb(); ++ ++ __ jmp(T12); ++} ++ ++// ---------------------------------------------------------------------------- ++// Volatile variables demand their effects be made known to all CPU's ++// in order. Store buffers on most chips allow reads & writes to ++// reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode ++// without some kind of memory barrier (i.e., it's not sufficient that ++// the interpreter does not reorder volatile references, the hardware ++// also must not reorder them). ++// ++// According to the new Java Memory Model (JMM): ++// (1) All volatiles are serialized wrt to each other. ALSO reads & ++// writes act as aquire & release, so: ++// (2) A read cannot let unrelated NON-volatile memory refs that ++// happen after the read float up to before the read. It's OK for ++// non-volatile memory refs that happen before the volatile read to ++// float down below it. ++// (3) Similar a volatile write cannot let unrelated NON-volatile ++// memory refs that happen BEFORE the write float down to after the ++// write. It's OK for non-volatile memory refs that happen after the ++// volatile write to float up before it. ++// ++// We only put in barriers around volatile refs (they are expensive), ++// not _between_ memory refs (that would require us to track the ++// flavor of the previous memory refs). Requirements (2) and (3) ++// require some barriers before volatile stores and after volatile ++// loads. These nearly cover requirement (1) but miss the ++// volatile-store-volatile-load case. This final case is placed after ++// volatile-stores although it could just as well go before ++// volatile-loads. ++void TemplateTable::volatile_barrier() { ++ if(os::is_MP()) __ memb(); ++} ++ ++// we dont shift left 2 bits in get_cache_and_index_at_bcp ++// for we always need shift the index we use it. the ConstantPoolCacheEntry ++// is 16-byte long, index is the index in ++// ConstantPoolCache, so cache + base_offset() + index * 16 is ++// the corresponding ConstantPoolCacheEntry ++// used registers : T2 ++// NOTE : the returned index need also shift left 4 to get the address! ++void TemplateTable::resolve_cache_and_index(int byte_no, ++ Register Rcache, ++ Register index, ++ size_t index_size) { ++ assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); ++ const Register temp = A1; ++ assert_different_registers(Rcache, index); ++ ++ Label resolved, Ldone; ++ __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size); ++ // is resolved? ++ int i = (int)bytecode(); ++ __ add_simm16(temp, temp, -i); ++ __ beq(temp, resolved); ++ // resolve first time through ++ address entry; ++ switch (bytecode()) { ++ case Bytecodes::_getstatic : // fall through ++ case Bytecodes::_putstatic : // fall through ++ case Bytecodes::_getfield : // fall through ++ case Bytecodes::_putfield : ++ entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); ++ break; ++ case Bytecodes::_invokevirtual : // fall through ++ case Bytecodes::_invokespecial : // fall through ++ case Bytecodes::_invokestatic : // fall through ++ case Bytecodes::_invokeinterface: ++ entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); ++ break; ++ case Bytecodes::_invokehandle: ++ entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); ++ break; ++ case Bytecodes::_invokedynamic: ++ entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); ++ break; ++ default : ++ fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode()))); ++ break; ++ } ++ ++ __ move(temp, i); ++ __ call_VM(NOREG, entry, temp); ++ ++ // Update registers with resolved info ++ __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size); ++ __ beq(R0, Ldone); ++ ++ __ BIND(resolved); ++ __ memb(); // Order load wrt. succeeding loads. ++ __ BIND(Ldone); ++} ++ ++// The Rcache and index registers must be set before call ++void TemplateTable::load_field_cp_cache_entry(Register obj, ++ Register cache, ++ Register index, ++ Register off, ++ Register flags, ++ bool is_static = false) { ++ assert_different_registers(cache, index, flags, off); ++ ++ ByteSize cp_base_offset = ConstantPoolCache::base_offset(); ++ // Field offset ++ __ slll(AT, index, Address::times_ptr); ++ __ addl(AT, cache, AT); ++ __ ldl(off, AT, in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset())); ++ // Flags ++ __ ldl(flags, AT, in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset())); ++ ++ // klass overwrite register ++ if (is_static) { ++ __ ldl(obj, AT, in_bytes(cp_base_offset + ConstantPoolCacheEntry::f1_offset())); ++ const int mirror_offset = in_bytes(Klass::java_mirror_offset()); ++ __ ldl(obj, Address(obj, mirror_offset)); ++ ++ //__ verify_oop(obj); ++ } ++} ++ ++// get the method, itable_index and flags of the current invoke ++void TemplateTable::load_invoke_cp_cache_entry(int byte_no, ++ Register method, ++ Register itable_index, ++ Register flags, ++ bool is_invokevirtual, ++ bool is_invokevfinal, /*unused*/ ++ bool is_invokedynamic) { ++ // setup registers ++ const Register cache = T3; ++ const Register index = T1; ++ assert_different_registers(method, flags); ++ assert_different_registers(method, cache, index); ++ assert_different_registers(itable_index, flags); ++ assert_different_registers(itable_index, cache, index); ++ assert(is_invokevirtual == (byte_no == f2_byte), "is invokevirtual flag redundant"); ++ // determine constant pool cache field offsets ++ const int method_offset = in_bytes( ++ ConstantPoolCache::base_offset() + ++ ((byte_no == f2_byte) ++ ? ConstantPoolCacheEntry::f2_offset() ++ : ConstantPoolCacheEntry::f1_offset())); ++ const int flags_offset = in_bytes(ConstantPoolCache::base_offset() + ++ ConstantPoolCacheEntry::flags_offset()); ++ // access constant pool cache fields ++ const int index_offset = in_bytes(ConstantPoolCache::base_offset() + ++ ConstantPoolCacheEntry::f2_offset()); ++ ++ size_t index_size = (is_invokedynamic ? sizeof(u4): sizeof(u2)); ++ resolve_cache_and_index(byte_no, cache, index, index_size); ++ ++ //assert(wordSize == 8, "adjust code below"); ++ // note we shift 4 not 2, for we get is the true inde ++ // of ConstantPoolCacheEntry, not the shifted 2-bit index as x86 version ++ __ slll(AT, index, Address::times_ptr); ++ __ addl(AT, cache, AT); ++ __ ldl(method, AT, method_offset); ++ ++ if (itable_index != NOREG) { ++ __ ldl(itable_index, AT, index_offset); ++ } ++ __ ldl(flags, AT, flags_offset); ++} ++ ++// The registers cache and index expected to be set before call. ++// Correct values of the cache and index registers are preserved. ++void TemplateTable::jvmti_post_field_access(Register cache, Register index, ++ bool is_static, bool has_tos) { ++ // do the JVMTI work here to avoid disturbing the register state below ++ // We use c_rarg registers here because we want to use the register used in ++ // the call to the VM ++ if (JvmtiExport::can_post_field_access()) { ++ // Check to see if a field access watch has been set before we ++ // take the time to call into the VM. ++ Label L1; ++ // kill FSR ++ Register tmp1 = T2; ++ Register tmp2 = T1; ++ Register tmp3 = T3; ++ assert_different_registers(cache, index, AT); ++ __ li(AT, (intptr_t)JvmtiExport::get_field_access_count_addr()); ++ __ ldw(AT, AT, 0); ++ __ beq(AT, L1); ++ ++ __ get_cache_and_index_at_bcp(tmp2, tmp3, 1); ++ ++ // cache entry pointer ++ __ add_simm16(tmp2, tmp2, in_bytes(ConstantPoolCache::base_offset())); ++ __ shl(tmp3, LogBytesPerWord); ++ __ addl(tmp2, tmp2, tmp3); ++ if (is_static) { ++ __ move(tmp1, R0); ++ } else { ++ __ ldl(tmp1, SP, 0); ++ __ verify_oop(tmp1); ++ } ++ // tmp1: object pointer or NULL ++ // tmp2: cache entry pointer ++ // tmp3: jvalue object on the stack ++ __ call_VM(NOREG, CAST_FROM_FN_PTR(address, ++ InterpreterRuntime::post_field_access), ++ tmp1, tmp2, tmp3); ++ __ get_cache_and_index_at_bcp(cache, index, 1); ++ __ BIND(L1); ++ } ++} ++ ++void TemplateTable::pop_and_check_object(Register r) { ++ __ pop_ptr(r); ++ __ null_check(r); // for field access must check obj. ++ __ verify_oop(r); ++} ++ ++// used registers : T1, T2, T3, T1 ++// T1 : flags ++// T2 : off ++// T3 : obj ++// T1 : field address ++// The flags 31, 30, 29, 28 together build a 4 bit number 0 to 8 with the ++// following mapping to the TosState states: ++// btos: 0 ++// ctos: 1 ++// stos: 2 ++// itos: 3 ++// ltos: 4 ++// ftos: 5 ++// dtos: 6 ++// atos: 7 ++// vtos: 8 ++// see ConstantPoolCacheEntry::set_field for more info ++void TemplateTable::getfield_or_static(int byte_no, bool is_static) { ++ transition(vtos, vtos); ++ ++ const Register cache = T3; ++ const Register index = T0; ++ ++ const Register obj = T3; ++ const Register off = T2; ++ const Register flags = T1; ++ ++ const Register scratch = T11; ++ ++ resolve_cache_and_index(byte_no, cache, index, sizeof(u2)); ++ jvmti_post_field_access(cache, index, is_static, false); ++ load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); ++ ++ { ++ __ move(scratch, 1 << ConstantPoolCacheEntry::is_volatile_shift); ++ __ and_reg(scratch, scratch, flags); ++ ++ Label notVolatile; ++ __ beq(scratch, notVolatile); ++ volatile_barrier(); ++ __ BIND(notVolatile); ++ } ++ ++ if (!is_static) pop_and_check_object(obj); ++ __ addl(index, obj, off); ++ ++ ++ Label Done, notByte, notBool, notInt, notShort, notChar, ++ notLong, notFloat, notObj, notDouble; ++ ++ assert(btos == 0, "change code, btos != 0"); ++ __ srll(flags, flags, ConstantPoolCacheEntry::tos_state_shift); ++ __ and_imm8(flags, flags, ConstantPoolCacheEntry::tos_state_mask); ++ __ bne(flags, notByte); ++ ++ // btos ++ __ ldb_signed(FSR, index, 0); ++ __ stl(FSR, SP, - wordSize); ++ ++ // Rewrite bytecode to be faster ++ if (!is_static) { ++ patch_bytecode(Bytecodes::_fast_bgetfield, T3, T2); ++ } ++ __ add_simm16(SP, SP, - wordSize); ++ __ beq(R0, Done); ++ ++ __ BIND(notByte); ++ __ move(AT, ztos); ++ __ bne(flags, AT, notBool); ++ ++ // ztos ++ __ ldb_signed(FSR, index, 0); ++ __ stl(FSR, SP, - wordSize); ++ ++ // Rewrite bytecode to be faster ++ if (!is_static) { ++ // patch_bytecode(Bytecodes::_fast_igetfield, T3, T2); ++ patch_bytecode(Bytecodes::_fast_bgetfield, T3, T2); ++ } ++ __ add_simm16(SP, SP, - wordSize); ++ __ beq(R0, Done); ++ ++ __ BIND(notBool); ++ __ move(AT, itos); ++ __ bne(flags, AT, notInt); ++ ++ // itos ++ __ ldw(FSR, index, 0); ++ __ stl(FSR, SP, - wordSize); ++ ++ // Rewrite bytecode to be faster ++ if (!is_static) { ++ // patch_bytecode(Bytecodes::_fast_igetfield, T3, T2); ++ patch_bytecode(Bytecodes::_fast_igetfield, T3, T2); ++ } ++ __ add_simm16(SP, SP, - wordSize); ++ __ beq(R0, Done); ++ ++ __ BIND(notInt); ++ __ move(AT, atos); ++ __ bne(flags, AT, notObj); ++ ++ // atos ++ //add for compressedoops ++ __ load_heap_oop(FSR, Address(index, 0)); ++ __ stl(FSR, SP, - wordSize); ++ ++ if (!is_static) { ++ //patch_bytecode(Bytecodes::_fast_agetfield, T3, T2); ++ patch_bytecode(Bytecodes::_fast_agetfield, T3, T2); ++ } ++ __ add_simm16(SP, SP, - wordSize); ++ __ beq(R0, Done); ++ ++ __ BIND(notObj); ++ __ move(AT, ctos); ++ __ bne(flags, AT, notChar); ++ ++ // ctos ++ __ ldhu(FSR, index, 0); ++ __ stl(FSR, SP, - wordSize); ++ ++ if (!is_static) { ++ patch_bytecode(Bytecodes::_fast_cgetfield, T3, T2); ++ } ++ __ add_simm16(SP, SP, - wordSize); ++ __ beq(R0, Done); ++ ++ __ BIND(notChar); ++ __ move(AT, stos); ++ __ bne(flags, AT, notShort); ++ ++ // stos ++ __ ldh_signed(FSR, index, 0); ++ __ stl(FSR, SP, - wordSize); ++ ++ if (!is_static) { ++ patch_bytecode(Bytecodes::_fast_sgetfield, T3, T2); ++ } ++ __ add_simm16(SP, SP, - wordSize); ++ __ beq(R0, Done); ++ ++ __ BIND(notShort); ++ __ move(AT, ltos); ++ __ bne(flags, AT, notLong); ++ ++ // ltos ++ __ ldl(FSR, index, 0 * wordSize); ++ __ stl(FSR, SP, -2 * wordSize); ++ __ stl(R0, SP, -1 * wordSize); ++ ++ // Don't rewrite to _fast_lgetfield for potential volatile case. ++ __ add_simm16(SP, SP, - 2 * wordSize); ++ __ beq(R0, Done); ++ ++ __ BIND(notLong); ++ __ move(AT, ftos); ++ __ bne(flags, AT, notFloat); ++ ++ // ftos ++ __ flds(FSF, index, 0); ++ __ fsts(FSF, SP, - wordSize); ++ ++ if (!is_static) { ++ patch_bytecode(Bytecodes::_fast_fgetfield, T3, T2); ++ } ++ __ add_simm16(SP, SP, - wordSize); ++ __ beq(R0, Done); ++ ++ __ BIND(notFloat); ++ __ move(AT, dtos); ++#ifdef ASSERT ++ __ bne(flags, AT, notDouble); ++#endif ++ ++ // dtos ++ __ fldd(FSF, index, 0 * wordSize); ++ __ fstd(FSF, SP, - 2 * wordSize); ++ __ stl(R0, SP, - 1 * wordSize); ++ ++ if (!is_static) { ++ patch_bytecode(Bytecodes::_fast_dgetfield, T3, T2); ++ } ++ ++ __ add_simm16(SP, SP, - 2 * wordSize); ++ ++#ifdef ASSERT ++ __ beq(R0, Done); ++ __ BIND(notDouble); ++ __ stop("Bad state"); ++#endif ++ ++ __ BIND(Done); ++ ++ { ++ Label notVolatile; ++ __ beq(scratch, notVolatile); ++ volatile_barrier(); ++ __ BIND(notVolatile); ++ } ++} ++ ++ ++void TemplateTable::getfield(int byte_no) { ++ getfield_or_static(byte_no, false); ++} ++ ++void TemplateTable::getstatic(int byte_no) { ++ getfield_or_static(byte_no, true); ++} ++ ++// The registers cache and index expected to be set before call. ++// The function may destroy various registers, just not the cache and index registers. ++void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) { ++ transition(vtos, vtos); ++ ++ ByteSize cp_base_offset = ConstantPoolCache::base_offset(); ++ ++ if (JvmtiExport::can_post_field_modification()) { ++ // Check to see if a field modification watch has been set before ++ // we take the time to call into the VM. ++ Label L1; ++ //kill AT, T1, T2, T3, T9 ++ Register tmp1 = T2; ++ Register tmp2 = T1; ++ Register tmp3 = T3; ++ Register tmp4 = T12; ++ assert_different_registers(cache, index, tmp4); ++ ++ __ li(AT, JvmtiExport::get_field_modification_count_addr()); ++ __ ldw(AT, AT, 0); ++ __ beq(AT, L1); ++ ++ __ get_cache_and_index_at_bcp(tmp2, tmp4, 1); ++ ++ if (is_static) { ++ __ move(tmp1, R0); ++ } else { ++ // Life is harder. The stack holds the value on top, followed by ++ // the object. We don't know the size of the value, though; it ++ // could be one or two words depending on its type. As a result, ++ // we must find the type to determine where the object is. ++ Label two_word, valsize_known; ++ __ slll(AT, tmp4, Address::times_8); ++ __ addl(AT, tmp2, AT); ++ __ ldl(tmp3, AT, in_bytes(cp_base_offset + ++ ConstantPoolCacheEntry::flags_offset())); ++ __ shr(tmp3, ConstantPoolCacheEntry::tos_state_shift); ++ ++ ConstantPoolCacheEntry::verify_tos_state_shift(); ++ __ move(tmp1, SP); ++ __ move(AT, ltos); ++ __ beq(tmp3, AT, two_word); ++ __ move(AT, dtos); ++ __ beq(tmp3, AT, two_word); ++ __ add_simm16(tmp1, tmp1, Interpreter::expr_offset_in_bytes(1) ); ++ __ beq(R0, valsize_known); ++ ++ __ BIND(two_word); ++ __ add_simm16(tmp1, tmp1, Interpreter::expr_offset_in_bytes(2)); ++ ++ __ BIND(valsize_known); ++ // setup object pointer ++ __ ldl(tmp1, tmp1, 0*wordSize); ++ } ++ // cache entry pointer ++ __ add_simm16(tmp2, tmp2, in_bytes(cp_base_offset)); ++ __ shl(tmp4, LogBytesPerWord); ++ __ addl(tmp2, tmp2, tmp4); ++ // object (tos) ++ __ move(tmp3, SP); ++ // tmp1: object pointer set up above (NULL if static) ++ // tmp2: cache entry pointer ++ // tmp3: jvalue object on the stack ++ __ call_VM(NOREG, ++ CAST_FROM_FN_PTR(address, ++ InterpreterRuntime::post_field_modification), ++ tmp1, tmp2, tmp3); ++ __ get_cache_and_index_at_bcp(cache, index, 1); ++ __ BIND(L1); ++ } ++} ++ ++// used registers : T0, T1, T2, T3, T11 ++// T1 : flags ++// T2 : off ++// T3 : obj ++// T11 : volatile bit ++// see ConstantPoolCacheEntry::set_field for more info ++void TemplateTable::putfield_or_static(int byte_no, bool is_static) { ++ transition(vtos, vtos); ++ ++ const Register cache = T3; ++ const Register index = T0; ++ const Register obj = T3; ++ const Register off = T2; ++ const Register flags = T1; ++ const Register bc = T3; ++ ++ const Register scratch = T11; ++ ++ resolve_cache_and_index(byte_no, cache, index, sizeof(u2)); ++ jvmti_post_field_mod(cache, index, is_static); ++ load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); ++ ++ Label Done; ++ { ++ __ move(scratch, 1 << ConstantPoolCacheEntry::is_volatile_shift); ++ __ and_reg(scratch, scratch, flags); ++ ++ Label notVolatile; ++ __ beq(scratch, notVolatile); ++ volatile_barrier(); ++ __ BIND(notVolatile); ++ } ++ ++ ++ Label notByte, notBool, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble; ++ ++ assert(btos == 0, "change code, btos != 0"); ++ ++ // btos ++ __ srll(flags, flags, ConstantPoolCacheEntry::tos_state_shift); ++ __ and_imm8(flags, flags, ConstantPoolCacheEntry::tos_state_mask); ++ __ bne(flags, notByte); ++ ++ __ pop(btos); ++ if (!is_static) { ++ pop_and_check_object(obj); ++ } ++ __ addl(AT, obj, off); ++ __ stb(FSR, AT, 0); ++ ++ if (!is_static) { ++ patch_bytecode(Bytecodes::_fast_bputfield, bc, off, true, byte_no); ++ } ++ __ beq(R0, Done); ++ ++ // ztos ++ __ BIND(notByte); ++ __ move(AT, ztos); ++ __ bne(flags, AT, notBool); ++ ++ __ pop(ztos); ++ if (!is_static) { ++ pop_and_check_object(obj); ++ } ++ __ addl(AT, obj, off); ++ __ and_imm8(FSR, FSR, 0x1); ++ __ stb(FSR, AT, 0); ++ ++ if (!is_static) { ++ patch_bytecode(Bytecodes::_fast_zputfield, bc, off, true, byte_no); ++ } ++ __ beq(R0, Done); ++ ++ // itos ++ __ BIND(notBool); ++ __ move(AT, itos); ++ __ bne(flags, AT, notInt); ++ ++ __ pop(itos); ++ if (!is_static) { ++ pop_and_check_object(obj); ++ } ++ __ addl(AT, obj, off); ++ __ stw(FSR, AT, 0); ++ ++ if (!is_static) { ++ patch_bytecode(Bytecodes::_fast_iputfield, bc, off, true, byte_no); ++ } ++ __ beq(R0, Done); ++ ++ // atos ++ __ BIND(notInt); ++ __ move(AT, atos); ++ __ bne(flags, AT, notObj); ++ ++ __ pop(atos); ++ if (!is_static) { ++ pop_and_check_object(obj); ++ } ++ ++ do_oop_store(_masm, Address(obj, off, Address::times_1, 0), FSR, _bs->kind(), false); ++ ++ if (!is_static) { ++ patch_bytecode(Bytecodes::_fast_aputfield, bc, off, true, byte_no); ++ } ++ __ beq(R0, Done); ++ ++ // ctos ++ __ BIND(notObj); ++ __ move(AT, ctos); ++ __ bne(flags, AT, notChar); ++ ++ __ pop(ctos); ++ if (!is_static) { ++ pop_and_check_object(obj); ++ } ++ __ addl(AT, obj, off); ++ __ sth(FSR, AT, 0); ++ if (!is_static) { ++ patch_bytecode(Bytecodes::_fast_cputfield, bc, off, true, byte_no); ++ } ++ __ beq(R0, Done); ++ ++ // stos ++ __ BIND(notChar); ++ __ move(AT, stos); ++ __ bne(flags, AT, notShort); ++ ++ __ pop(stos); ++ if (!is_static) { ++ pop_and_check_object(obj); ++ } ++ __ addl(AT, obj, off); ++ __ sth(FSR, AT, 0); ++ if (!is_static) { ++ patch_bytecode(Bytecodes::_fast_sputfield, bc, off, true, byte_no); ++ } ++ __ beq(R0, Done); ++ ++ // ltos ++ __ BIND(notShort); ++ __ move(AT, ltos); ++ __ bne(flags, AT, notLong); ++ ++ __ pop(ltos); ++ if (!is_static) { ++ pop_and_check_object(obj); ++ } ++ __ addl(AT, obj, off); ++ __ stl(FSR, AT, 0); ++ if (!is_static) { ++ patch_bytecode(Bytecodes::_fast_lputfield, bc, off, true, byte_no); ++ } ++ __ beq(R0, Done); ++ ++ // ftos ++ __ BIND(notLong); ++ __ move(AT, ftos); ++ __ bne(flags, AT, notFloat); ++ ++ __ pop(ftos); ++ if (!is_static) { ++ pop_and_check_object(obj); ++ } ++ __ addl(AT, obj, off); ++ __ fsts(FSF, AT, 0); ++ if (!is_static) { ++ patch_bytecode(Bytecodes::_fast_fputfield, bc, off, true, byte_no); ++ } ++ __ beq(R0, Done); ++ ++ // dtos ++ __ BIND(notFloat); ++ __ move(AT, dtos); ++#ifdef ASSERT ++ __ bne(flags, AT, notDouble); ++#endif ++ ++ __ pop(dtos); ++ if (!is_static) { ++ pop_and_check_object(obj); ++ } ++ __ addl(AT, obj, off); ++ __ fstd(FSF, AT, 0); ++ if (!is_static) { ++ patch_bytecode(Bytecodes::_fast_dputfield, bc, off, true, byte_no); ++ } ++ ++#ifdef ASSERT ++ __ beq(R0, Done); ++ ++ __ BIND(notDouble); ++ __ stop("Bad state"); ++#endif ++ ++ __ BIND(Done); ++ ++ { ++ Label notVolatile; ++ __ beq(scratch, notVolatile); ++ volatile_barrier(); ++ __ BIND(notVolatile); ++ } ++} ++ ++void TemplateTable::putfield(int byte_no) { ++ putfield_or_static(byte_no, false); ++} ++ ++void TemplateTable::putstatic(int byte_no) { ++ putfield_or_static(byte_no, true); ++} ++ ++// used registers : T1, T2, T3 ++// T1 : cp_entry ++// T2 : obj ++// T3 : value pointer ++void TemplateTable::jvmti_post_fast_field_mod() { ++ if (JvmtiExport::can_post_field_modification()) { ++ // Check to see if a field modification watch has been set before ++ // we take the time to call into the VM. ++ Label L2; ++ //kill AT, T1, T2, T3, T11 ++ Register tmp1 = T2; ++ Register tmp2 = T1; ++ Register tmp3 = T3; ++ Register tmp4 = T9; ++ __ li(AT, JvmtiExport::get_field_modification_count_addr()); ++ __ ldw(tmp3, AT, 0); ++ __ beq(tmp3, L2); ++ __ pop_ptr(tmp1); ++ __ verify_oop(tmp1); ++ __ push_ptr(tmp1); ++ switch (bytecode()) { // load values into the jvalue object ++ case Bytecodes::_fast_aputfield: __ push_ptr(FSR); break; ++ case Bytecodes::_fast_bputfield: // fall through ++ case Bytecodes::_fast_zputfield: // fall through ++ case Bytecodes::_fast_sputfield: // fall through ++ case Bytecodes::_fast_cputfield: // fall through ++ case Bytecodes::_fast_iputfield: __ push_i(FSR); break; ++ case Bytecodes::_fast_dputfield: __ push_d(FSF); break; ++ case Bytecodes::_fast_fputfield: __ push_f(); break; ++ case Bytecodes::_fast_lputfield: __ push_l(FSR); break; ++ default: ShouldNotReachHere(); ++ } ++ __ move(tmp3, SP); ++ // access constant pool cache entry ++ __ get_cache_entry_pointer_at_bcp(tmp2, FSR, 1); ++ __ verify_oop(tmp1); ++ // tmp1: object pointer copied above ++ // tmp2: cache entry pointer ++ // tmp3: jvalue object on the stack ++ __ call_VM(NOREG, ++ CAST_FROM_FN_PTR(address, ++ InterpreterRuntime::post_field_modification), ++ tmp1, tmp2, tmp3); ++ ++ switch (bytecode()) { // restore tos values ++ case Bytecodes::_fast_aputfield: __ pop_ptr(FSR); break; ++ case Bytecodes::_fast_bputfield: // fall through ++ case Bytecodes::_fast_zputfield: // fall through ++ case Bytecodes::_fast_sputfield: // fall through ++ case Bytecodes::_fast_cputfield: // fall through ++ case Bytecodes::_fast_iputfield: __ pop_i(FSR); break; ++ case Bytecodes::_fast_dputfield: __ pop_d(); break; ++ case Bytecodes::_fast_fputfield: __ pop_f(); break; ++ case Bytecodes::_fast_lputfield: __ pop_l(FSR); break; ++ default: ShouldNotReachHere(); ++ } ++ __ BIND(L2); ++ } ++} ++ ++// used registers : T2, T3, T1 ++// T2 : index & off & field address ++// T3 : cache & obj ++// T1 : flags ++void TemplateTable::fast_storefield(TosState state) { ++ transition(state, vtos); ++ ++ const Register scratch = T11; ++ ++ ByteSize base = ConstantPoolCache::base_offset(); ++ ++ jvmti_post_fast_field_mod(); ++ ++ // access constant pool cache ++ __ get_cache_and_index_at_bcp(T3, T2, 1); ++ ++ // test for volatile with T1 ++ __ slll(AT, T2, Address::times_8); ++ __ addl(AT, T3, AT); ++ __ ldl(T1, AT, in_bytes(base + ConstantPoolCacheEntry::flags_offset())); ++ ++ // replace index with field offset from cache entry ++ __ ldl(T2, AT, in_bytes(base + ConstantPoolCacheEntry::f2_offset())); ++ ++ Label Done; ++ { ++ __ move(scratch, 1 << ConstantPoolCacheEntry::is_volatile_shift); ++ __ and_reg(scratch, scratch, T1); ++ ++ Label notVolatile; ++ __ beq(scratch, notVolatile); ++ volatile_barrier(); ++ __ BIND(notVolatile); ++ } ++ ++ // Get object from stack ++ pop_and_check_object(T3); ++ ++ if (bytecode() != Bytecodes::_fast_aputfield) { ++ // field address ++ __ addl(T2, T3, T2); ++ } ++ ++ // access field ++ switch (bytecode()) { ++ case Bytecodes::_fast_zputfield: ++ __ and_imm8(FSR, FSR, 0x1); // boolean is true if LSB is 1 ++ // fall through to bputfield ++ case Bytecodes::_fast_bputfield: ++ __ stb(FSR, T2, 0); ++ break; ++ case Bytecodes::_fast_sputfield: // fall through ++ case Bytecodes::_fast_cputfield: ++ __ sth(FSR, T2, 0); ++ break; ++ case Bytecodes::_fast_iputfield: ++ __ stw(FSR, T2, 0); ++ break; ++ case Bytecodes::_fast_lputfield: ++ __ stl(FSR, T2, 0 * wordSize); ++ break; ++ case Bytecodes::_fast_fputfield: ++ __ fsts(FSF, T2, 0); ++ break; ++ case Bytecodes::_fast_dputfield: ++ __ fstd(FSF, T2, 0 * wordSize); ++ break; ++ case Bytecodes::_fast_aputfield: ++ do_oop_store(_masm, Address(T3, T2, Address::times_1, 0), FSR, _bs->kind(), false); ++ break; ++ default: ++ ShouldNotReachHere(); ++ } ++ ++ { ++ Label notVolatile; ++ __ beq(scratch, notVolatile); ++ volatile_barrier(); ++ __ BIND(notVolatile); ++ } ++} ++ ++// used registers : T2, T3, T1 ++// T3 : cp_entry & cache ++// T2 : index & offset ++void TemplateTable::fast_accessfield(TosState state) { ++ transition(atos, state); ++ ++ const Register scratch = T11; ++ ++ // do the JVMTI work here to avoid disturbing the register state below ++ if (JvmtiExport::can_post_field_access()) { ++ // Check to see if a field access watch has been set before we take ++ // the time to call into the VM. ++ Label L1; ++ __ li(AT, (intptr_t)JvmtiExport::get_field_access_count_addr()); ++ __ ldw(T3, AT, 0); ++ __ beq(T3, L1); ++ // access constant pool cache entry ++ __ get_cache_entry_pointer_at_bcp(T3, T1, 1); ++ __ push(FSR); ++ __ verify_oop(FSR); ++ // FSR: object pointer copied above ++ // T3: cache entry pointer ++ __ call_VM(NOREG, ++ CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), ++ FSR, T3); ++ __ pop(FSR); ++ __ BIND(L1); ++ } ++ ++ // access constant pool cache ++ __ get_cache_and_index_at_bcp(T3, T2, 1); ++ // replace index with field offset from cache entry ++ __ slll(AT, T2, Address::times_8); ++ __ addl(AT, T3, AT); ++ __ ldl(T2, AT, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset())); ++ ++ { ++ __ ldl(AT, AT, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); ++ __ move(scratch, 1 << ConstantPoolCacheEntry::is_volatile_shift); ++ __ and_reg(scratch, scratch, AT); ++ ++ Label notVolatile; ++ __ beq(scratch, notVolatile); ++ volatile_barrier(); ++ __ BIND(notVolatile); ++ } ++ ++ // FSR: object ++ __ verify_oop(FSR); ++ __ null_check(FSR); ++ // field addresses ++ __ addl(FSR, FSR, T2); ++ ++ // access field ++ switch (bytecode()) { ++ case Bytecodes::_fast_bgetfield: ++ __ ldb_signed(FSR, FSR, 0); ++ break; ++ case Bytecodes::_fast_sgetfield: ++ __ ldh_signed(FSR, FSR, 0); ++ break; ++ case Bytecodes::_fast_cgetfield: ++ __ ldhu(FSR, FSR, 0); ++ break; ++ case Bytecodes::_fast_igetfield: ++ __ ldw(FSR, FSR, 0); ++ break; ++ case Bytecodes::_fast_lgetfield: ++ __ stop("should not be rewritten"); ++ break; ++ case Bytecodes::_fast_fgetfield: ++ __ flds(FSF, FSR, 0); ++ break; ++ case Bytecodes::_fast_dgetfield: ++ __ fldd(FSF, FSR, 0); ++ break; ++ case Bytecodes::_fast_agetfield: ++ //add for compressedoops ++ __ load_heap_oop(FSR, Address(FSR, 0)); ++ __ verify_oop(FSR); ++ break; ++ default: ++ ShouldNotReachHere(); ++ } ++ ++ { ++ Label notVolatile; ++ __ beq(scratch, notVolatile); ++ volatile_barrier(); ++ __ BIND(notVolatile); ++ } ++} ++ ++// generator for _fast_iaccess_0, _fast_aaccess_0, _fast_faccess_0 ++// used registers : T1, T2, T3, T1 ++// T1 : obj & field address ++// T2 : off ++// T3 : cache ++// T1 : index ++void TemplateTable::fast_xaccess(TosState state) { ++ transition(vtos, state); ++ ++ const Register scratch = T11; ++ ++ // get receiver ++ __ ldl(T1, aaddress(0)); ++ // access constant pool cache ++ __ get_cache_and_index_at_bcp(T3, T2, 2); ++ __ slll(AT, T2, Address::times_8); ++ __ addl(AT, T3, AT); ++ __ ldl(T2, AT, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset())); ++ ++ { ++ __ ldl(AT, AT, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); ++ __ move(scratch, 1 << ConstantPoolCacheEntry::is_volatile_shift); ++ __ and_reg(scratch, scratch, AT); ++ ++ Label notVolatile; ++ __ beq(scratch, notVolatile); ++ volatile_barrier(); ++ __ BIND(notVolatile); ++ } ++ ++ // make sure exception is reported in correct bcp range (getfield is ++ // next instruction) ++ __ addl(BCP, BCP, 1); ++ __ null_check(T1); ++ __ addl(T1, T1, T2); ++ ++ if (state == itos) { ++ __ ldw(FSR, T1, 0); ++ } else if (state == atos) { ++ __ load_heap_oop(FSR, Address(T1, 0)); ++ __ verify_oop(FSR); ++ } else if (state == ftos) { ++ __ flds(FSF, T1, 0); ++ } else { ++ ShouldNotReachHere(); ++ } ++ __ subl(BCP, BCP, 1); ++ ++ { ++ Label notVolatile; ++ __ beq(scratch, notVolatile); ++ volatile_barrier(); ++ __ BIND(notVolatile); ++ } ++} ++ ++ ++ ++//----------------------------------------------------------------------------- ++// Calls ++ ++void TemplateTable::count_calls(Register method, Register temp) { ++ // implemented elsewhere ++ ShouldNotReachHere(); ++} ++ ++// method, index, recv, flags: T1, T2, T3, T1 ++// byte_no = 2 for _invokevirtual, 1 else ++// T0 : return address ++// get the method & index of the invoke, and push the return address of ++// the invoke(first word in the frame) ++// this address is where the return code jmp to. ++// NOTE : this method will set T3&T1 as recv&flags ++void TemplateTable::prepare_invoke(int byte_no, ++ Register method, // linked method (or i-klass) ++ Register index, // itable index, MethodType, etc. ++ Register recv, // if caller wants to see it ++ Register flags // if caller wants to test it ++ ) { ++ // determine flags ++ const Bytecodes::Code code = bytecode(); ++ const bool is_invokeinterface = code == Bytecodes::_invokeinterface; ++ const bool is_invokedynamic = code == Bytecodes::_invokedynamic; ++ const bool is_invokehandle = code == Bytecodes::_invokehandle; ++ const bool is_invokevirtual = code == Bytecodes::_invokevirtual; ++ const bool is_invokespecial = code == Bytecodes::_invokespecial; ++ const bool load_receiver = (recv != noreg); ++ const bool save_flags = (flags != noreg); ++ assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic),""); ++ assert(save_flags == (is_invokeinterface || is_invokevirtual), "need flags for vfinal"); ++ assert(flags == noreg || flags == T1, "error flags reg."); ++ assert(recv == noreg || recv == T3, "error recv reg."); ++ ++ // setup registers & access constant pool cache ++ if(recv == noreg) recv = T3; ++ if(flags == noreg) flags = T1; ++ assert_different_registers(method, index, recv, flags); ++ ++ // save 'interpreter return address' ++ __ save_bcp(); ++ ++ load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic); ++ ++ if (is_invokedynamic || is_invokehandle) { ++ Label L_no_push; ++ __ move(AT, (1 << ConstantPoolCacheEntry::has_appendix_shift)); ++ __ and_reg(AT, AT, flags); ++ __ beq(AT, L_no_push); ++ // Push the appendix as a trailing parameter. ++ // This must be done before we get the receiver, ++ // since the parameter_size includes it. ++ Register tmp = SSR; ++ __ push(tmp); ++ __ move(tmp, index); ++ assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0"); ++ __ load_resolved_reference_at_index(index, tmp); ++ __ pop(tmp); ++ __ push(index); // push appendix (MethodType, CallSite, etc.) ++ __ BIND(L_no_push); ++ } ++ ++ // load receiver if needed (after appendix is pushed so parameter size is correct) ++ // Note: no return address pushed yet ++ if (load_receiver) { ++ __ move(AT, ConstantPoolCacheEntry::parameter_size_mask); ++ __ and_reg(recv, flags, AT); ++ // Since we won't push RA on stack, no_return_pc_pushed_yet should be 0. ++ const int no_return_pc_pushed_yet = 0; // argument slot correction before we push return address ++ const int receiver_is_at_end = -1; // back off one slot to get receiver ++ Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end); ++ __ ldl(recv, recv_addr); ++ __ verify_oop(recv); ++ } ++ if(save_flags) { ++ __ move(BCP, flags); ++ } ++ ++ // compute return type ++ __ srll(flags, flags, ConstantPoolCacheEntry::tos_state_shift); ++ __ and_imm8(flags, flags, 0xf); ++ ++ // Make sure we don't need to mask flags for tos_state_shift after the above shift ++ ConstantPoolCacheEntry::verify_tos_state_shift(); ++ // load return address ++ { ++ const address table = (address) Interpreter::invoke_return_entry_table_for(code); ++ __ li(AT, (long)table); ++ __ slll(flags, flags, LogBytesPerWord); ++ __ addl(AT, AT, flags); ++ __ ldl(RA, AT, 0); ++ } ++ ++ if (save_flags) { ++ __ move(flags, BCP); ++ __ restore_bcp(); ++ } ++} ++ ++// used registers : T0, T3, T1, T2 ++// T3 : recv, this two register using convention is by prepare_invoke ++// T1 : flags, klass ++// Rmethod : method, index must be Rmethod ++void TemplateTable::invokevirtual_helper(Register index, ++ Register recv, ++ Register flags) { ++ ++ assert_different_registers(index, recv, flags, T2); ++ ++ // Test for an invoke of a final method ++ Label notFinal; ++ __ move(AT, (1 << ConstantPoolCacheEntry::is_vfinal_shift)); ++ __ and_reg(AT, flags, AT); ++ __ beq(AT, notFinal); ++ Register method = index; // method must be Rmethod ++ assert(method == Rmethod, "methodOop must be Rmethod for interpreter calling convention"); ++ ++ // do the call - the index is actually the method to call ++ // the index is indeed methodOop, for this is vfinal, ++ // see ConstantPoolCacheEntry::set_method for more info ++ ++ //__ verify_oop(method); ++ ++ // It's final, need a null check here! ++ __ null_check(recv); ++ ++ // profile this call ++ __ profile_final_call(T2); ++ ++ // T2: tmp, used for mdp ++ // method: callee ++ // T12: tmp ++ // is_virtual: true ++ __ profile_arguments_type(T2, method, T12, true); ++ ++ __ jump_from_interpreted(method, T2); ++ ++ __ BIND(notFinal); ++ ++ // get receiver klass ++ __ null_check(recv, oopDesc::klass_offset_in_bytes()); ++ __ load_klass(T2, recv); ++ //__ verify_oop(T2); ++ ++ // profile this call ++ __ profile_virtual_call(T2, T0, T1); ++ ++ // get target methodOop & entry point ++ const int base = InstanceKlass::vtable_start_offset() * wordSize; ++ assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); ++ __ slll(AT, index, Address::times_ptr); ++ // T2: receiver ++ __ addl(AT, T2, AT); ++ //this is a ualign read ++ __ ldl(method, AT, base + vtableEntry::method_offset_in_bytes()); ++ __ profile_arguments_type(T2, method, T12, true); ++ __ jump_from_interpreted(method, T2); ++ ++} ++ ++void TemplateTable::invokevirtual(int byte_no) { ++ transition(vtos, vtos); ++ assert(byte_no == f2_byte, "use this argument"); ++ prepare_invoke(byte_no, Rmethod, NOREG, T3, T1); ++ // now recv & flags in T3, T1 ++ invokevirtual_helper(Rmethod, T3, T1); ++} ++ ++// T12 : entry ++// Rmethod : method ++void TemplateTable::invokespecial(int byte_no) { ++ transition(vtos, vtos); ++ assert(byte_no == f1_byte, "use this argument"); ++ prepare_invoke(byte_no, Rmethod, NOREG, T3); ++ // now recv & flags in T3, T1 ++ __ verify_oop(T3); ++ __ null_check(T3); ++ __ profile_call(T12); ++ ++ // T11: tmp, used for mdp ++ // Rmethod: callee ++ // T12: tmp ++ // is_virtual: false ++ __ profile_arguments_type(T11, Rmethod, T12, false); ++ ++ __ jump_from_interpreted(Rmethod, T12); ++ __ move(T0, T3); ++} ++ ++void TemplateTable::invokestatic(int byte_no) { ++ transition(vtos, vtos); ++ assert(byte_no == f1_byte, "use this argument"); ++ prepare_invoke(byte_no, Rmethod, NOREG); ++ //__ verify_oop(Rmethod); ++ ++ __ profile_call(T12); ++ ++ // T11: tmp, used for mdp ++ // Rmethod: callee ++ // T12: tmp ++ // is_virtual: false ++ __ profile_arguments_type(T11, Rmethod, T12, false); ++ ++ __ jump_from_interpreted(Rmethod, T12); ++} ++ ++void TemplateTable::fast_invokevfinal(int byte_no) { ++ transition(vtos, vtos); ++ assert(byte_no == f2_byte, "use this argument"); ++ __ stop("fast_invokevfinal not used on sw64"); ++} ++ ++// used registers : T0, T1, T2, T3, T1, T7 ++// T0 : itable, vtable, entry ++// T1 : interface ++// T3 : receiver ++// T1 : flags, klass ++// Rmethod : index, method, this is required by interpreter_entry ++void TemplateTable::invokeinterface(int byte_no) { ++ transition(vtos, vtos); ++ //this method will use T1-T4 and T0 ++ assert(byte_no == f1_byte, "use this argument"); ++ prepare_invoke(byte_no, T2, Rmethod, T3, T1); ++ // T2: reference klass ++ // Rmethod: method ++ // T3: receiver ++ // T1: flags ++ ++ // Special case of invokeinterface called for virtual method of ++ // java.lang.Object. See cpCacheOop.cpp for details. ++ // This code isn't produced by javac, but could be produced by ++ // another compliant java compiler. ++ Label notMethod; ++ __ move(AT, (1 << ConstantPoolCacheEntry::is_forced_virtual_shift)); ++ __ and_reg(AT, T1, AT); ++ __ beq(AT, notMethod); ++ ++ invokevirtual_helper(Rmethod, T3, T1); ++ __ BIND(notMethod); ++ // Get receiver klass into T1 - also a null check ++ //add for compressedoops ++ __ load_klass(T1, T3); ++ // __ verify_oop(T1); ++ ++ Label no_such_interface, no_such_method; ++ ++ // Receiver subtype check against REFC. ++ // Superklass in T2. Subklass in T1. ++ __ lookup_interface_method(// inputs: rec. class, interface, itable index ++ T1, T2, noreg, ++ // outputs: scan temp. reg, scan temp. reg ++ T0, FSR, ++ no_such_interface, ++ /*return_method=*/false); ++ ++ ++ // profile this call ++ __ profile_virtual_call(T1, T0, FSR); ++ ++ // Get declaring interface class from method, and itable index ++ __ ld_ptr(T2, Rmethod, in_bytes(Method::const_offset())); ++ __ ld_ptr(T2, T2, in_bytes(ConstMethod::constants_offset())); ++ __ ld_ptr(T2, T2, ConstantPool::pool_holder_offset_in_bytes()); ++ __ ldw(Rmethod, Rmethod, in_bytes(Method::itable_index_offset())); ++ __ add_simm16(Rmethod, Rmethod, (-1) * Method::itable_index_max); ++ __ subw(Rmethod, R0, Rmethod); ++ ++ __ lookup_interface_method(// inputs: rec. class, interface, itable index ++ T1, T2, Rmethod, ++ // outputs: method, scan temp. reg ++ Rmethod, T0, ++ no_such_interface); ++ ++ // Rmethod: Method* to call ++ // T3: receiver ++ // Check for abstract method error ++ // Note: This should be done more efficiently via a throw_abstract_method_error ++ // interpreter entry point and a conditional jump to it in case of a null ++ // method. ++ __ beq(Rmethod, no_such_method); ++ ++ __ profile_arguments_type(T1, Rmethod, T0, true); ++ ++ // do the call ++ // T3: receiver ++ // Rmethod: Method* ++ __ jump_from_interpreted(Rmethod, T1); ++ __ should_not_reach_here(); ++ ++ // exception handling code follows... ++ // note: must restore interpreter registers to canonical ++ // state for exception handling to work correctly! ++ ++ __ BIND(no_such_method); ++ // throw exception ++ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); ++ // the call_VM checks for exception, so we should never return here. ++ __ should_not_reach_here(); ++ ++ __ BIND(no_such_interface); ++ // throw exception ++ __ call_VM(noreg, CAST_FROM_FN_PTR(address, ++ InterpreterRuntime::throw_IncompatibleClassChangeError)); ++ // the call_VM checks for exception, so we should never return here. ++ __ should_not_reach_here(); ++ ++} ++ ++ ++void TemplateTable::invokehandle(int byte_no) { ++ transition(vtos, vtos); ++ assert(byte_no == f1_byte, "use this argument"); ++ const Register T2_method = Rmethod; ++ const Register FSR_mtype = FSR; ++ const Register T3_recv = T3; ++ ++ if (!EnableInvokeDynamic) { ++ // rewriter does not generate this bytecode ++ __ should_not_reach_here(); ++ return; ++ } ++ ++ prepare_invoke(byte_no, T2_method, FSR_mtype, T3_recv); ++ __ verify_oop(T3_recv); ++ __ null_check(T3_recv); ++ ++ // T12: MethodType object (from cpool->resolved_references[f1], if necessary) ++ // T2_method: MH.invokeExact_MT method (from f2) ++ ++ // Note: T12 is already pushed (if necessary) by prepare_invoke ++ ++ __ profile_final_call(T12); ++ ++ // T11: tmp, used for mdp ++ // T2_method: callee ++ // T12: tmp ++ // is_virtual: true ++ __ profile_arguments_type(T11, T2_method, T12, true); ++ ++ __ jump_from_interpreted(T2_method, T12); ++} ++ ++ void TemplateTable::invokedynamic(int byte_no) { ++ transition(vtos, vtos); ++ assert(byte_no == f1_byte, "use this argument"); ++ ++ if (!EnableInvokeDynamic) { ++ // We should not encounter this bytecode if !EnableInvokeDynamic. ++ // The verifier will stop it. However, if we get past the verifier, ++ // this will stop the thread in a reasonable way, without crashing the JVM. ++ __ call_VM(noreg, CAST_FROM_FN_PTR(address, ++ InterpreterRuntime::throw_IncompatibleClassChangeError)); ++ // the call_VM checks for exception, so we should never return here. ++ __ should_not_reach_here(); ++ return; ++ } ++ ++ //const Register Rmethod = T2; ++ const Register T2_callsite = T2; ++ ++ prepare_invoke(byte_no, Rmethod, T2_callsite); ++ ++ // T2: CallSite object (from cpool->resolved_references[f1]) ++ // Rmethod: MH.linkToCallSite method (from f2) ++ ++ // Note: T2_callsite is already pushed by prepare_invoke ++ // profile this call ++ __ profile_call(T12); ++ ++ // T11: tmp, used for mdp ++ // Rmethod: callee ++ // T12: tmp ++ // is_virtual: false ++ __ profile_arguments_type(T11, Rmethod, T12, false); ++ ++ __ verify_oop(T2_callsite); ++ ++ __ jump_from_interpreted(Rmethod, T12); ++ } ++ ++//---------------------------------------------------------------------------------------------------- ++// Allocation ++// T1 : tags & buffer end & thread ++// T2 : object end ++// T3 : klass ++// T1 : object size ++// A1 : cpool ++// A2 : cp index ++// return object in FSR ++void TemplateTable::_new() { ++ transition(vtos, atos); ++ __ get_unsigned_2_byte_index_at_bcp(A2, 1); ++ ++ Label slow_case; ++ Label done; ++ Label initialize_header; ++ Label initialize_object; // including clearing the fields ++ Label allocate_shared; ++ ++ // get InstanceKlass in T3 ++ __ get_cpool_and_tags(A1, T1); ++ ++ __ slll(AT, A2, Address::times_8); ++ __ addl(AT, A1, AT); ++ __ memb(); ++ __ ldl(T3, AT, sizeof(ConstantPool)); ++ ++ // make sure the class we're about to instantiate has been resolved. ++ // Note: slow_case does a pop of stack, which is why we loaded class/pushed above ++ const int tags_offset = Array::base_offset_in_bytes(); ++ __ addl(T1, T1, A2); ++ __ ldbu(AT, T1, tags_offset); ++ __ add_simm16(AT, AT, - (int)JVM_CONSTANT_Class); ++ __ bne(AT, slow_case); ++ ++ ++ // make sure klass is initialized & doesn't have finalizer ++ // make sure klass is fully initialized ++ __ ldhu(T1, T3, in_bytes(InstanceKlass::init_state_offset())); ++ __ add_simm16(AT, T1, - (int)InstanceKlass::fully_initialized); ++ __ bne(AT, slow_case); ++ ++ // has_finalizer ++ __ ldw(T0, T3, in_bytes(Klass::layout_helper_offset())); ++ __ and_imm8(AT, T0, Klass::_lh_instance_slow_path_bit); ++ __ bne(AT, slow_case); ++ ++ // Allocate the instance ++ // 1) Try to allocate in the TLAB ++ // 2) if fail and the object is large allocate in the shared Eden ++ // 3) if the above fails (or is not applicable), go to a slow case ++ // (creates a new TLAB, etc.) ++ ++ const bool allow_shared_alloc = ++ Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode; ++ ++ const Register thread = S2thread; ++ ++ if (UseTLAB) { ++ // get tlab_top ++ __ ldl(FSR, thread, in_bytes(JavaThread::tlab_top_offset())); ++ // get tlab_end ++ __ ldl(AT, thread, in_bytes(JavaThread::tlab_end_offset())); ++ __ addl(T2, FSR, T0); ++ __ cmplt(AT, AT, T2); ++ __ bne(AT, allow_shared_alloc ? allocate_shared : slow_case); ++ __ stl(T2, thread, in_bytes(JavaThread::tlab_top_offset())); ++ ++ if (ZeroTLAB) { ++ // the fields have been already cleared ++ __ beq(R0, initialize_header); ++ } else { ++ // initialize both the header and fields ++ __ beq(R0, initialize_object); ++ } ++ } ++ ++ // Allocation in the shared Eden , if allowed ++ // T0 : instance size in words ++ if(allow_shared_alloc){ ++ __ BIND(allocate_shared); ++ ++ Label retry; ++ Address heap_top(T1); ++ __ set64(T1, (long)Universe::heap()->top_addr()); ++ __ ldl(FSR, heap_top); ++ ++ __ BIND(retry); ++ __ set64(AT, (long)Universe::heap()->end_addr()); ++ __ ldl(AT, AT, 0); ++ __ addl(T2, FSR, T0); ++ __ cmplt(AT, AT, T2); ++ __ bne(AT, slow_case); ++ ++ // Compare FSR with the top addr, and if still equal, store the new ++ // top addr in T2 at the address of the top addr pointer. Sets AT if was ++ // equal, and clears it otherwise. Use lock prefix for atomicity on MPs. ++ // ++ // FSR: object begin ++ // T2: object end ++ // T0: instance size in words ++ ++ // if someone beat us on the allocation, try again, otherwise continue ++ __ cmpxchg(T2, heap_top, FSR); ++ __ beq(AT, retry); ++ ++ __ incr_allocated_bytes(thread, T0, 0); ++ } ++ ++ if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) { ++ // The object is initialized before the header. If the object size is ++ // zero, go directly to the header initialization. ++ __ BIND(initialize_object); ++ __ set64(AT, - sizeof(oopDesc)); ++ __ addl(T0, T0, AT); ++ __ beq(T0, initialize_header); ++ ++ ++ // initialize remaining object fields: T0 is a multiple of 2 ++ { ++ Label loop; ++ __ addl(T1, FSR, T0); ++ ++ __ BIND(loop); ++ __ add_simm16(T1, T1, -oopSize); //ZHJ ++ __ stl(R0, T1, sizeof(oopDesc) + 0 * oopSize); ++ __ bne(T1, FSR, loop); //dont clear header ++ } ++ ++ //klass in T3, ++ // initialize object header only. ++ __ BIND(initialize_header); ++ if (UseBiasedLocking) { ++ __ ldl(AT, T3, in_bytes(Klass::prototype_header_offset())); ++ __ stl(AT, FSR, oopDesc::mark_offset_in_bytes()); ++ } else { ++ __ set64(AT, (long)markOopDesc::prototype()); ++ __ stl(AT, FSR, oopDesc::mark_offset_in_bytes()); ++ } ++ ++ __ store_klass_gap(FSR, R0); ++ __ store_klass(FSR, T3); ++ ++ { ++ SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0); ++ // Trigger dtrace event for fastpath ++ __ push(atos); ++ __ call_VM_leaf( ++ CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), FSR); ++ __ pop(atos); ++ ++ } ++ __ beq(R0, done); ++ } ++ ++ // slow case ++ __ BIND(slow_case); ++ call_VM(FSR, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), A1, A2); ++ ++ // continue ++ __ BIND(done); ++ if(UseWmemb) ++ __ wmemb(); ++ else ++ __ memb(); ++} ++ ++void TemplateTable::newarray() { ++ transition(itos, atos); ++ __ ldbu(A1, at_bcp(1)); ++ //type, count ++ call_VM(FSR, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), A1, FSR); ++ if(UseWmemb) ++ __ wmemb(); ++ else ++ __ memb(); ++} ++ ++void TemplateTable::anewarray() { ++ transition(itos, atos); ++ __ get_2_byte_integer_at_bcp(A2, AT, 1); ++ __ huswap(A2); ++ __ get_constant_pool(A1); ++ // cp, index, count ++ call_VM(FSR, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), A1, A2, FSR); ++ if(UseWmemb) ++ __ wmemb(); ++ else ++ __ memb(); ++} ++ ++void TemplateTable::arraylength() { ++ transition(atos, itos); ++ __ null_check(FSR, arrayOopDesc::length_offset_in_bytes()); ++ __ ldw(FSR, FSR, arrayOopDesc::length_offset_in_bytes()); ++} ++ ++// when invoke gen_subtype_check, super in T3, sub in T2, object in FSR(it's always) ++// T2 : sub klass ++// T3 : cpool ++// T3 : super klass ++void TemplateTable::checkcast() { ++ transition(atos, atos); ++ Label done, is_null, ok_is_subtype, quicked, resolved; ++ __ beq(FSR, is_null); ++ ++ // Get cpool & tags index ++ __ get_cpool_and_tags(T3, T1); ++ __ get_unsigned_2_byte_index_at_bcp(T2, 1); ++ ++ // See if bytecode has already been quicked ++ __ addl(AT, T1, T2); ++ __ ldbu(AT, AT, Array::base_offset_in_bytes()); ++ __ add_simm16(AT, AT, - (int)JVM_CONSTANT_Class); ++ __ beq(AT, quicked); ++ ++ // In InterpreterRuntime::quicken_io_cc, lots of new classes may be loaded. ++ // Then, GC will move the object in V0 to another places in heap. ++ // Therefore, We should never save such an object in register. ++ // Instead, we should save it in the stack. It can be modified automatically by the GC thread. ++ // After GC, the object address in FSR is changed to a new place. ++ // ++ __ push(atos); ++ const Register thread = S2thread; ++ call_VM(NOREG, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); ++ __ get_vm_result_2(T3, thread); ++ __ pop_ptr(FSR); ++ __ beq(R0, resolved); ++ ++ // klass already in cp, get superklass in T3 ++ __ BIND(quicked); ++ __ slll(AT, T2, Address::times_8); ++ __ addl(AT, T3, AT); ++ __ memb(); ++ __ ldl(T3, AT, sizeof(ConstantPool)); ++ ++ __ BIND(resolved); ++ ++ // get subklass in T2 ++ //add for compressedoops ++ __ load_klass(T2, FSR); ++ // Superklass in T3. Subklass in T2. ++ __ gen_subtype_check(T3, T2, ok_is_subtype); ++ ++ // Come here on failure ++ // object is at FSR ++ __ jmp(Interpreter::_throw_ClassCastException_entry); ++ ++ // Come here on success ++ __ BIND(ok_is_subtype); ++ ++ // Collect counts on whether this check-cast sees NULLs a lot or not. ++ if (ProfileInterpreter) { ++ __ beq(R0, done); ++ __ BIND(is_null); ++ __ profile_null_seen(T3); ++ } else { ++ __ BIND(is_null); ++ } ++ __ BIND(done); ++} ++ ++// i use T3 as cpool, T1 as tags, T2 as index ++// object always in FSR, superklass in T3, subklass in T2 ++void TemplateTable::instanceof() { ++ transition(atos, itos); ++ Label done, is_null, ok_is_subtype, quicked, resolved; ++ ++ __ beq(FSR, is_null); ++ ++ // Get cpool & tags index ++ __ get_cpool_and_tags(T3, T1); ++ // get index ++ __ get_unsigned_2_byte_index_at_bcp(T2, 1); ++ ++ // See if bytecode has already been quicked ++ // quicked ++ __ addl(AT, T1, T2); ++ __ ldbu(AT, AT, Array::base_offset_in_bytes()); ++ __ add_simm16(AT, AT, - (int)JVM_CONSTANT_Class); ++ __ beq(AT, quicked); ++ ++ __ push(atos); ++ const Register thread = S2thread; ++ call_VM(NOREG, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); ++ __ get_vm_result_2(T3, thread); ++ __ pop_ptr(FSR); ++ __ beq(R0, resolved); ++ ++ // get superklass in T3, subklass in T2 ++ __ BIND(quicked); ++ __ slll(AT, T2, Address::times_8); ++ __ addl(AT, T3, AT); ++ __ memb(); ++ __ ldl(T3, AT, sizeof(ConstantPool)); ++ ++ __ BIND(resolved); ++ // get subklass in T2 ++ //add for compressedoops ++ __ load_klass(T2, FSR); ++ ++ // Superklass in T3. Subklass in T2. ++ __ gen_subtype_check(T3, T2, ok_is_subtype); ++ // Come here on failure ++ __ move(FSR, R0); ++ __ beq(R0, done); ++ ++ // Come here on success ++ __ BIND(ok_is_subtype); ++ __ move(FSR, 1); ++ ++ // Collect counts on whether this test sees NULLs a lot or not. ++ if (ProfileInterpreter) { ++ __ beq(R0, done); ++ __ BIND(is_null); ++ __ profile_null_seen(T3); ++ } else { ++ __ BIND(is_null); // same as 'done' ++ } ++ __ BIND(done); ++ // FSR = 0: obj == NULL or obj is not an instanceof the specified klass ++ // FSR = 1: obj != NULL and obj is an instanceof the specified klass ++} ++ ++//-------------------------------------------------------- ++//-------------------------------------------- ++// Breakpoints ++void TemplateTable::_breakpoint() { ++ // Note: We get here even if we are single stepping.. ++ // jbug inists on setting breakpoints at every bytecode ++ // even if we are in single step mode. ++ ++ transition(vtos, vtos); ++ ++ // get the unpatched byte code ++ __ get_method(A1); ++ __ call_VM(NOREG, ++ CAST_FROM_FN_PTR(address, ++ InterpreterRuntime::get_original_bytecode_at), ++ A1, BCP); ++ __ move(Rnext, V0); ++ //save the Rnext in the stack, otherwise, it will be destroied by InterpreterRuntime::_breakpoint below. jx ++ __ stl(Rnext, SP, -8); ++ __ subl(SP, SP, 8); ++ // post the breakpoint event ++ __ get_method(A1); ++ __ call_VM(NOREG, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), A1, BCP); ++ //pop the value of Rnext previously stored in stack. jx ++ __ ldl(Rnext, SP, 0); ++ __ addl(SP, SP, 8); ++ // complete the execution of original bytecode ++ ++ __ dispatch_only_normal(vtos); ++} ++ ++//----------------------------------------------------------------------------- ++// Exceptions ++ ++void TemplateTable::athrow() { ++ transition(atos, vtos); ++ __ null_check(FSR); ++ __ jmp(Interpreter::throw_exception_entry()); ++} ++ ++//----------------------------------------------------------------------------- ++// Synchronization ++// ++// Note: monitorenter & exit are symmetric routines; which is reflected ++// in the assembly code structure as well ++// ++// Stack layout: ++// ++// [expressions ] <--- SP = expression stack top ++// .. ++// [expressions ] ++// [monitor entry] <--- monitor block top = expression stack bot ++// .. ++// [monitor entry] ++// [frame data ] <--- monitor block bot ++// ... ++// [return addr ] <--- FP ++ ++// we use T2 as monitor entry pointer, T3 as monitor top pointer, c_rarg0 as free slot pointer ++// object always in FSR ++void TemplateTable::monitorenter() { ++ transition(atos, vtos); ++ ++ // check for NULL object ++ __ null_check(FSR); ++ ++ const Address monitor_block_top(FP, frame::interpreter_frame_monitor_block_top_offset ++ * wordSize); ++ const int entry_size = (frame::interpreter_frame_monitor_size()* wordSize); ++ Label allocated; ++ ++ // initialize entry pointer ++ __ move(c_rarg0, R0); ++ ++ // find a free slot in the monitor block (result in c_rarg0) ++ { ++ Label entry, loop, exit, next; ++ __ ldl(T2, monitor_block_top); ++ __ add_simm16(T3, FP, frame::interpreter_frame_initial_sp_offset * wordSize); ++ __ beq(R0, entry); ++ ++ // free slot? ++ __ BIND(loop); ++ __ ldl(AT, T2, BasicObjectLock::obj_offset_in_bytes()); ++ __ bne(AT, next); ++ __ move(c_rarg0, T2); ++ ++ __ BIND(next); ++ __ beq(FSR, AT, exit); ++ __ add_simm16(T2, T2, entry_size); ++ ++ __ BIND(entry); ++ __ bne(T3, T2, loop); ++ __ BIND(exit); ++ } ++ ++ __ bne(c_rarg0, allocated); ++ ++ // allocate one if there's no free slot ++ { ++ Label entry, loop; ++ // 1. compute new pointers // SP: old expression stack top ++ __ ldl(c_rarg0, monitor_block_top); ++ __ add_simm16(SP, SP, - entry_size); ++ __ add_simm16(c_rarg0, c_rarg0, - entry_size); ++ __ stl(c_rarg0, monitor_block_top); ++ __ move(T3, SP); ++ __ beq(R0, entry); ++ ++ // 2. move expression stack contents ++ __ BIND(loop); ++ __ ldl(AT, T3, entry_size); ++ __ stl(AT, T3, 0); ++ __ add_simm16(T3, T3, wordSize); ++ __ BIND(entry); ++ __ bne(T3, c_rarg0, loop); ++ } ++ ++ __ BIND(allocated); ++ // Increment bcp to point to the next bytecode, ++ // so exception handling for async. exceptions work correctly. ++ // The object has already been poped from the stack, so the ++ // expression stack looks correct. ++ __ addl(BCP, BCP, 1); ++ __ stl(FSR, c_rarg0, BasicObjectLock::obj_offset_in_bytes()); ++ __ lock_object(c_rarg0); ++ // check to make sure this monitor doesn't cause stack overflow after locking ++ __ save_bcp(); // in case of exception ++ __ generate_stack_overflow_check(0); ++ // The bcp has already been incremented. Just need to dispatch to next instruction. ++ ++ __ dispatch_next(vtos); ++} ++ ++// T2 : top ++// c_rarg0 : entry ++void TemplateTable::monitorexit() { ++ transition(atos, vtos); ++ ++ __ null_check(FSR); ++ ++ const int entry_size =(frame::interpreter_frame_monitor_size()* wordSize); ++ Label found; ++ ++ // find matching slot ++ { ++ Label entry, loop; ++ __ ldl(c_rarg0, FP, frame::interpreter_frame_monitor_block_top_offset * wordSize); ++ __ add_simm16(T2, FP, frame::interpreter_frame_initial_sp_offset * wordSize); ++ __ beq(R0, entry); ++ ++ __ BIND(loop); ++ __ ldl(AT, c_rarg0, BasicObjectLock::obj_offset_in_bytes()); ++ __ beq(FSR, AT, found); ++ __ add_simm16(c_rarg0, c_rarg0, entry_size); ++ __ BIND(entry); ++ __ bne(T2, c_rarg0, loop); ++ } ++ ++ // error handling. Unlocking was not block-structured ++ Label end; ++ __ call_VM(NOREG, CAST_FROM_FN_PTR(address, ++ InterpreterRuntime::throw_illegal_monitor_state_exception)); ++ __ should_not_reach_here(); ++ ++ // call run-time routine ++ // c_rarg0: points to monitor entry ++ __ BIND(found); ++ __ push(FSR); ++ __ unlock_object(c_rarg0); ++ __ pop(FSR); ++ __ BIND(end); ++} ++ ++ ++// Wide instructions ++void TemplateTable::wide() { ++ transition(vtos, vtos); ++ __ ldbu(Rnext, at_bcp(1)); ++ __ slll(T12, Rnext, Address::times_8); ++ __ li(AT, (long)Interpreter::_wentry_point); ++ __ addl(AT, T12, AT); ++ __ ldl(T12, AT, 0); ++ __ jmp(T12); ++} ++ ++ ++void TemplateTable::multianewarray() { ++ transition(vtos, atos); ++ // last dim is on top of stack; we want address of first one: ++ // first_addr = last_addr + (ndims - 1) * wordSize ++ __ ldbu(A1, at_bcp(3)); // dimension ++ __ subl(A1, A1, 1); ++ __ slll(A1, A1, Address::times_8); ++ __ addl(A1, SP, A1); // now A1 pointer to the count array on the stack ++ call_VM(FSR, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), A1); ++ __ ldbu(AT, at_bcp(3)); ++ __ slll(AT, AT, Address::times_8); ++ __ addl(SP, SP, AT); ++ __ memb(); ++} ++#endif // !CC_INTERP +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/templateTable_sw64.hpp afu8u/hotspot/src/cpu/sw64/vm/templateTable_sw64.hpp +--- openjdk/hotspot/src/cpu/sw64/vm/templateTable_sw64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/templateTable_sw64.hpp 2025-05-06 10:53:44.915633666 +0800 +@@ -0,0 +1,42 @@ ++/* ++ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef CPU_SW64_VM_TEMPLATETABLE_SW64_HPP ++#define CPU_SW64_VM_TEMPLATETABLE_SW64_HPP ++ ++ static void prepare_invoke(int byte_no, ++ Register method, ++ Register index = noreg, ++ Register recv = noreg, ++ Register flags = noreg ++ ); ++ static void invokevirtual_helper(Register index, Register recv, ++ Register flags); ++ static void volatile_barrier(); ++ ++ // Helpers ++ static void index_check(Register array, Register index); ++ static void index_check_without_pop(Register array, Register index); ++ ++#endif // CPU_SW64_VM_TEMPLATETABLE_SW64_HPP +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/vmreg_sw64.cpp afu8u/hotspot/src/cpu/sw64/vm/vmreg_sw64.cpp +--- openjdk/hotspot/src/cpu/sw64/vm/vmreg_sw64.cpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/vmreg_sw64.cpp 2025-05-06 10:53:44.915633666 +0800 +@@ -0,0 +1,50 @@ ++/* ++ * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++#include "asm/assembler.hpp" ++#include "code/vmreg.hpp" ++ ++ ++ ++void VMRegImpl::set_regName() { ++ Register reg = ::as_Register(0); ++ int i; ++ for (i = 0; i < ConcreteRegisterImpl::max_gpr ; ) { ++ regName[i++] = reg->name(); ++ regName[i++] = reg->name(); ++ reg = reg->successor(); ++ } ++ ++ FloatRegister freg = ::as_FloatRegister(0); ++ for ( ; i < ConcreteRegisterImpl::max_fpr ; ) { ++ regName[i++] = freg->name(); ++ regName[i++] = freg->name(); ++ freg = freg->successor(); ++ } ++ ++ for ( ; i < ConcreteRegisterImpl::number_of_registers ; i ++ ) { ++ regName[i] = "NON-GPR-FPR"; ++ } ++} +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/vmreg_sw64.hpp afu8u/hotspot/src/cpu/sw64/vm/vmreg_sw64.hpp +--- openjdk/hotspot/src/cpu/sw64/vm/vmreg_sw64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/vmreg_sw64.hpp 2025-05-06 10:53:44.915633666 +0800 +@@ -0,0 +1,34 @@ ++/* ++ * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++ #ifndef CPU_SW64_VM_VMREG_SW64_HPP ++ #define CPU_SW64_VM_VMREG_SW64_HPP ++ ++ bool is_Register(); ++ Register as_Register(); ++ ++ bool is_FloatRegister(); ++ FloatRegister as_FloatRegister(); ++ ++ #endif // CPU_SW64_VM_VMREG_SW64_HPP +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/vmreg_sw64.inline.hpp afu8u/hotspot/src/cpu/sw64/vm/vmreg_sw64.inline.hpp +--- openjdk/hotspot/src/cpu/sw64/vm/vmreg_sw64.inline.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/vmreg_sw64.inline.hpp 2025-05-06 10:53:44.915633666 +0800 +@@ -0,0 +1,66 @@ ++/* ++ * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef CPU_SW64_VM_VMREG_SW64_INLINE_HPP ++#define CPU_SW64_VM_VMREG_SW64_INLINE_HPP ++ ++inline VMReg RegisterImpl::as_VMReg() { ++ if( this==noreg ) return VMRegImpl::Bad(); ++ //FIXME why encoding << 1? what is the meaning of the VMReg's value ++ return VMRegImpl::as_VMReg(encoding() << 1 ); ++} ++ ++inline VMReg FloatRegisterImpl::as_VMReg() { ++ return VMRegImpl::as_VMReg((encoding() << 1) + ConcreteRegisterImpl::max_gpr); ++} ++ ++inline bool VMRegImpl::is_Register() { ++ return (unsigned int) value() < (unsigned int) ConcreteRegisterImpl::max_gpr; ++} ++ ++inline bool VMRegImpl::is_FloatRegister() { ++ return value() >= ConcreteRegisterImpl::max_gpr && value() < ConcreteRegisterImpl::max_fpr; ++} ++ ++inline Register VMRegImpl::as_Register() { ++ ++ assert( is_Register(), "must be"); ++ return ::as_Register(value() >> 1); ++} ++ ++inline FloatRegister VMRegImpl::as_FloatRegister() { ++ assert( is_FloatRegister(), "must be" ); ++ assert( is_even(value()), "must be" ); ++ return ::as_FloatRegister((value() - ConcreteRegisterImpl::max_gpr) >> 1); ++} ++ ++inline bool VMRegImpl::is_concrete() { ++ assert(is_reg(), "must be"); ++ if(is_Register()) return true; ++ if(is_FloatRegister()) return true; ++ assert(false, "what register?"); ++ return false; ++} ++ ++#endif // CPU_SW64_VM_VMREG_SW64_INLINE_HPP +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/vmStructs_sw64.hpp afu8u/hotspot/src/cpu/sw64/vm/vmStructs_sw64.hpp +--- openjdk/hotspot/src/cpu/sw64/vm/vmStructs_sw64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/vmStructs_sw64.hpp 2025-05-06 10:53:44.915633666 +0800 +@@ -0,0 +1,67 @@ ++/* ++ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef CPU_SW64_VM_VMSTRUCTS_SW64_HPP ++#define CPU_SW64_VM_VMSTRUCTS_SW64_HPP ++ ++// These are the CPU-specific fields, types and integer ++// constants required by the Serviceability Agent. This file is ++// referenced by vmStructs.cpp. ++ ++#define VM_STRUCTS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field) \ ++ \ ++ /******************************/ \ ++ /* JavaCallWrapper */ \ ++ /******************************/ \ ++ /******************************/ \ ++ /* JavaFrameAnchor */ \ ++ /******************************/ \ ++ volatile_nonstatic_field(JavaFrameAnchor, _last_Java_fp, intptr_t*) \ ++ \ ++ ++ /* NOTE that we do not use the last_entry() macro here; it is used */ ++ /* in vmStructs__.hpp's VM_STRUCTS_OS_CPU macro (and must */ ++ /* be present there) */ ++ ++ ++#define VM_TYPES_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type) \ ++ ++ /* NOTE that we do not use the last_entry() macro here; it is used */ ++ /* in vmStructs__.hpp's VM_TYPES_OS_CPU macro (and must */ ++ /* be present there) */ ++ ++ ++#define VM_INT_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant) \ ++ ++ /* NOTE that we do not use the last_entry() macro here; it is used */ ++ /* in vmStructs__.hpp's VM_INT_CONSTANTS_OS_CPU macro (and must */ ++ /* be present there) */ ++ ++#define VM_LONG_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant) \ ++ ++ /* NOTE that we do not use the last_entry() macro here; it is used */ ++ /* in vmStructs__.hpp's VM_LONG_CONSTANTS_OS_CPU macro (and must */ ++ /* be present there) */ ++ ++#endif // CPU_SW64_VM_VMSTRUCTS_SW64_HPP +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/vm_version_ext_sw64.cpp afu8u/hotspot/src/cpu/sw64/vm/vm_version_ext_sw64.cpp +--- openjdk/hotspot/src/cpu/sw64/vm/vm_version_ext_sw64.cpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/vm_version_ext_sw64.cpp 2025-05-06 10:53:44.915633666 +0800 +@@ -0,0 +1,88 @@ ++/* ++ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "memory/allocation.inline.hpp" ++#include "vm_version_ext_sw64.hpp" ++ ++// VM_Version_Ext statics ++int VM_Version_Ext::_no_of_threads = 0; ++int VM_Version_Ext::_no_of_cores = 0; ++int VM_Version_Ext::_no_of_sockets = 0; ++bool VM_Version_Ext::_initialized = false; ++char VM_Version_Ext::_cpu_name[CPU_TYPE_DESC_BUF_SIZE] = {0}; ++char VM_Version_Ext::_cpu_desc[CPU_DETAILED_DESC_BUF_SIZE] = {0}; ++ ++void VM_Version_Ext::initialize_cpu_information(void) { ++ // do nothing if cpu info has been initialized ++ if (_initialized) { ++ return; ++ } ++ ++ int core_id = -1; ++ int chip_id = -1; ++ int len = 0; ++ char* src_string = NULL; ++ ++ _no_of_cores = os::processor_count(); ++ _no_of_threads = _no_of_cores; ++ _no_of_sockets = _no_of_cores; ++ snprintf(_cpu_name, CPU_TYPE_DESC_BUF_SIZE - 1, "sw_64"); ++ snprintf(_cpu_desc, CPU_DETAILED_DESC_BUF_SIZE, "%s", cpu_features()); ++ _initialized = true; ++} ++ ++int VM_Version_Ext::number_of_threads(void) { ++ initialize_cpu_information(); ++ return _no_of_threads; ++} ++ ++int VM_Version_Ext::number_of_cores(void) { ++ initialize_cpu_information(); ++ return _no_of_cores; ++} ++ ++int VM_Version_Ext::number_of_sockets(void) { ++ initialize_cpu_information(); ++ return _no_of_sockets; ++} ++ ++const char* VM_Version_Ext::cpu_name(void) { ++ initialize_cpu_information(); ++ char* tmp = NEW_C_HEAP_ARRAY_RETURN_NULL(char, CPU_TYPE_DESC_BUF_SIZE, mtTracing); ++ if (NULL == tmp) { ++ return NULL; ++ } ++ strncpy(tmp, _cpu_name, CPU_TYPE_DESC_BUF_SIZE); ++ return tmp; ++} ++ ++const char* VM_Version_Ext::cpu_description(void) { ++ initialize_cpu_information(); ++ char* tmp = NEW_C_HEAP_ARRAY_RETURN_NULL(char, CPU_DETAILED_DESC_BUF_SIZE, mtTracing); ++ if (NULL == tmp) { ++ return NULL; ++ } ++ strncpy(tmp, _cpu_desc, CPU_DETAILED_DESC_BUF_SIZE); ++ return tmp; ++} +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/vm_version_ext_sw64.hpp afu8u/hotspot/src/cpu/sw64/vm/vm_version_ext_sw64.hpp +--- openjdk/hotspot/src/cpu/sw64/vm/vm_version_ext_sw64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/vm_version_ext_sw64.hpp 2025-05-06 10:53:44.915633666 +0800 +@@ -0,0 +1,53 @@ ++/* ++ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef CPU_SW64_VM_VM_VERSION_EXT_SW64_HPP ++#define CPU_SW64_VM_VM_VERSION_EXT_SW64_HPP ++ ++#include "utilities/macros.hpp" ++#include "runtime/vm_version.hpp" ++ ++class VM_Version_Ext : public VM_Version { ++ private: ++ static const size_t CPU_TYPE_DESC_BUF_SIZE = 256; ++ static const size_t CPU_DETAILED_DESC_BUF_SIZE = 4096; ++ ++ static int _no_of_threads; ++ static int _no_of_cores; ++ static int _no_of_sockets; ++ static bool _initialized; ++ static char _cpu_name[CPU_TYPE_DESC_BUF_SIZE]; ++ static char _cpu_desc[CPU_DETAILED_DESC_BUF_SIZE]; ++ ++ public: ++ static int number_of_threads(void); ++ static int number_of_cores(void); ++ static int number_of_sockets(void); ++ ++ static const char* cpu_name(void); ++ static const char* cpu_description(void); ++ static void initialize_cpu_information(void); ++}; ++ ++#endif // CPU_SW64_VM_VM_VERSION_EXT_SW64_HPP +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/vm_version_sw64.cpp afu8u/hotspot/src/cpu/sw64/vm/vm_version_sw64.cpp +--- openjdk/hotspot/src/cpu/sw64/vm/vm_version_sw64.cpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/vm_version_sw64.cpp 2025-05-06 10:53:44.915633666 +0800 +@@ -0,0 +1,249 @@ ++/* ++ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++#include "asm/macroAssembler.hpp" ++#include "asm/macroAssembler.inline.hpp" ++#include "memory/resourceArea.hpp" ++#include "runtime/java.hpp" ++#include "runtime/stubCodeGenerator.hpp" ++#include "vm_version_sw64.hpp" ++#ifdef TARGET_OS_FAMILY_linux ++# include "os_linux.inline.hpp" ++#endif ++#ifdef TARGET_OS_FAMILY_solaris ++# include "os_solaris.inline.hpp" ++#endif ++#ifdef TARGET_OS_FAMILY_windows ++# include "os_windows.inline.hpp" ++#endif ++#ifdef TARGET_OS_FAMILY_bsd ++# include "os_bsd.inline.hpp" ++#endif ++ ++int VM_Version::_features = VM_Version::unknown_m; ++const char* VM_Version::_features_str = ""; ++ ++void VM_Version::initialize() { ++ ++ // When using CMS or G1, we cannot use memset() in BOT updates ++ // because the sw64 version in libc_psr uses BIS which ++ // exposes "phantom zeros" to concurrent readers. See 6948537. ++ if (FLAG_IS_DEFAULT(UseMemSetInBOT) && (UseConcMarkSweepGC || UseG1GC)) { ++ FLAG_SET_DEFAULT(UseMemSetInBOT, false); ++ } ++ ++ // Issue a stern warning if the user has explicitly set ++ // UseMemSetInBOT (it is known to cause issues), but allow ++ // use for experimentation and debugging. ++ if (UseConcMarkSweepGC || UseG1GC) { ++ if (UseMemSetInBOT) { ++ assert(!FLAG_IS_DEFAULT(UseMemSetInBOT), "Error"); ++ warning("Experimental flag -XX:+UseMemSetInBOT is known to cause instability" ++ " on sw64; please understand that you are using at your own risk!"); ++ } ++ } ++ ++ _features = determine_features(); ++ //no need, Abstract_VM_Version already define it as false ++ _supports_cx8 = true; ++ ++ //////////////////////add some other feature here////////////////// ++ ++ if (UseG1GC && FLAG_IS_DEFAULT(MaxGCPauseMillis)) { ++ FLAG_SET_CMDLINE(uintx, MaxGCPauseMillis, 2000); ++ } ++ ++#ifdef COMPILER2 ++ if (MaxVectorSize > 0) { ++ if (!is_power_of_2(MaxVectorSize)) { ++ warning("MaxVectorSize must be a power of 2"); ++ MaxVectorSize = 8; ++ } ++ } ++ // Vector optimization was closed by default. ++ if (FLAG_IS_DEFAULT(MaxVectorSize)) { ++ MaxVectorSize = 0; ++ } ++ ++ if (is_shenwei()) { ++ if (FLAG_IS_DEFAULT(UseCountLeadingZerosInstruction)) { ++ FLAG_SET_DEFAULT(UseCountLeadingZerosInstruction, 1); ++ } ++ if (FLAG_IS_DEFAULT(UseCountTrailingZerosInstruction)) { ++ FLAG_SET_DEFAULT(UseCountTrailingZerosInstruction, 1); ++ } ++ if (FLAG_IS_DEFAULT(UsePopCountInstruction)) { ++ FLAG_SET_DEFAULT(UsePopCountInstruction, 1); ++ } ++ if (FLAG_IS_DEFAULT(UseSW8A)) { ++ FLAG_SET_DEFAULT(UseSW8A, 1); ++ } ++ if (FLAG_IS_DEFAULT(UseWmemb)) { ++ FLAG_SET_DEFAULT(UseWmemb, 1); ++ } ++ if (FLAG_IS_DEFAULT(UseAddpi)) { ++ FLAG_SET_DEFAULT(UseAddpi, 0); ++ } ++ if (FLAG_IS_DEFAULT(UseCRC32)) { ++ FLAG_SET_DEFAULT(UseCRC32, 1); ++ } ++ if (is_sw8a()) { ++ if (FLAG_IS_DEFAULT(FRegisterConflict)) { ++ FLAG_SET_DEFAULT(FRegisterConflict, 0); ++ } ++ } else { ++ if (FLAG_IS_DEFAULT(FRegisterConflict)) { ++ FLAG_SET_DEFAULT(FRegisterConflict, 1); ++ } ++ if (FLAG_IS_DEFAULT(SolveAlignment)) { ++ FLAG_SET_DEFAULT(SolveAlignment, 1); ++ } ++ if (UseSW8A) { ++ if (!FLAG_IS_DEFAULT(UseSW8A)) ++ warning("UseSW8A specified, but not supported on this CPU"); ++ FLAG_SET_DEFAULT(UseSW8A, 0); ++ } ++ if (UseWmemb) { ++ if (!FLAG_IS_DEFAULT(UseWmemb)) ++ warning("UseWmemb specified, but not supported on this CPU"); ++ FLAG_SET_DEFAULT(UseWmemb, 0); ++ } ++ if (UseAddpi) { ++ if (!FLAG_IS_DEFAULT(UseAddpi)) ++ warning("UseAddpi specified, but not supported on this CPU"); ++ FLAG_SET_DEFAULT(UseAddpi, 0); ++ } ++ if (UseCRC32) { ++ if (!FLAG_IS_DEFAULT(UseCRC32)) ++ warning("UseCRC32 specified, but not supported on this CPU"); ++ FLAG_SET_DEFAULT(UseCRC32, 0); ++ } ++ } ++ } else if (UseCountLeadingZerosInstruction || UseCountTrailingZerosInstruction ++ || UsePopCountInstruction) { ++ if (!FLAG_IS_DEFAULT(UseCountTrailingZerosInstruction)) ++ warning("Only SW CPUs support UseCountTrailingZerosInstruction"); ++ FLAG_SET_DEFAULT(UseCountLeadingZerosInstruction, 0); ++ FLAG_SET_DEFAULT(UseCountTrailingZerosInstruction, 0); ++ FLAG_SET_DEFAULT(UsePopCountInstruction, 0); ++ } ++#endif ++ ++ if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) { ++ UseMontgomeryMultiplyIntrinsic = true; ++ } ++ if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) { ++ UseMontgomerySquareIntrinsic = true; ++ } ++ ++ UseSSE = 0; // Only on x86 and x64 ++ ++ if (TieredCompilation) { ++ if (!FLAG_IS_DEFAULT(TieredCompilation)) ++ warning("TieredCompilation not supported"); ++ FLAG_SET_DEFAULT(TieredCompilation, false); ++ } ++ if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) { ++ FLAG_SET_DEFAULT(UseCRC32Intrinsics, 1); ++ } ++ char buf[256]; ++ jio_snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s", ++ (has_l2_cache() ? ", has_l2_cache" : ""), ++ (has_16k_page() ? ", has_16k_page" : ""), ++ (is_shenwei() ? ", on_shenwei_platform" : ""), ++ (is_sw2f() ? ", SW410(2F)" : ""), ++ (is_sw4a() ? ", SW411(4A)" : "" ), ++ (is_sw6a() ? ", SW421(6A)" : ""), ++ (is_sw6b() ? ", SW422(6B)" : ""), ++ (is_sw1621() ? ", SW1621" : ""), ++ (UseCountTrailingZerosInstruction ? ", UseCountTrailingZerosInstruction" : "")); ++ ++ // buf is started with ", " or is empty ++ _features_str = strdup(strlen(buf) > 2 ? buf + 2 : buf); ++ ++ if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) { ++ FLAG_SET_DEFAULT(AllocatePrefetchStyle, 2); ++ } ++ ++ if (FLAG_IS_DEFAULT(AllocatePrefetchLines)) { ++ FLAG_SET_DEFAULT(AllocatePrefetchLines, 3); ++ } ++ ++ if (FLAG_IS_DEFAULT(AllocatePrefetchStepSize)) { ++ FLAG_SET_DEFAULT(AllocatePrefetchStepSize, 128); ++ } ++ ++ if (FLAG_IS_DEFAULT(AllocatePrefetchDistance)) { ++ FLAG_SET_DEFAULT(AllocatePrefetchDistance, 256); ++ } ++ ++ if (FLAG_IS_DEFAULT(AllocateInstancePrefetchLines)) { ++ FLAG_SET_DEFAULT(AllocateInstancePrefetchLines, 1); ++ } ++ ++ if (FLAG_IS_DEFAULT(PrefetchCopyIntervalInBytes)) { ++ FLAG_SET_DEFAULT(PrefetchCopyIntervalInBytes, 256); ++ } ++ ++ if (FLAG_IS_DEFAULT(PrefetchScanIntervalInBytes)) { ++ FLAG_SET_DEFAULT(PrefetchScanIntervalInBytes, 256); ++ } ++ ++ if (UseSHA) { ++ warning("SHA instructions are not available on this CPU"); ++ FLAG_SET_DEFAULT(UseSHA, false); ++ } ++ ++ if (UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics) { ++ warning("SHA intrinsics are not available on this CPU"); ++ FLAG_SET_DEFAULT(UseSHA1Intrinsics, false); ++ FLAG_SET_DEFAULT(UseSHA256Intrinsics, false); ++ FLAG_SET_DEFAULT(UseSHA512Intrinsics, false); ++ } ++ ++ NOT_PRODUCT( if (PrintMiscellaneous && Verbose) print_features(); ); ++} ++ ++void VM_Version::print_features() { ++ tty->print_cr("Version:%s", cpu_features()); ++} ++ ++int VM_Version::determine_features() { ++ //////////////////////add some other feature here////////////////// ++ int features = platform_features(unknown_m); ++ //spt_16k_page_m; ++ return features; ++} ++ ++static int saved_features = 0; ++ ++void VM_Version::allow_all() { ++ saved_features = _features; ++ _features = all_features_m; ++} ++ ++void VM_Version::revert() { ++ _features = saved_features; ++} +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/vm_version_sw64.hpp afu8u/hotspot/src/cpu/sw64/vm/vm_version_sw64.hpp +--- openjdk/hotspot/src/cpu/sw64/vm/vm_version_sw64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/vm_version_sw64.hpp 2025-05-06 10:53:44.915633666 +0800 +@@ -0,0 +1,102 @@ ++/* ++ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef CPU_SW64_VM_VM_VERSION_SW64_HPP ++#define CPU_SW64_VM_VM_VERSION_SW64_HPP ++ ++#include "runtime/globals_extension.hpp" ++#include "runtime/vm_version.hpp" ++ ++ ++class VM_Version: public Abstract_VM_Version { ++protected: ++ enum Feature_Flag { ++ with_l2_cache = 0, ++ spt_16k_page = 1, ++ sw2f = 2, ++ sw4a = 3, ++ sw6a = 4, ++ sw6b = 5, ++ sw1621 = 6, ++ sw3231 = 7, ++ wx_h8000 = 8, ++ with_sw_support = 9, ++ //////////////////////add some other feature here////////////////// ++ }; ++ ++ enum Feature_Flag_Set { ++ unknown_m = 0, ++ all_features_m = -1, ++ with_l2_cache_m = 1 << with_l2_cache, ++ spt_16k_page_m = 1 << spt_16k_page, ++ sw2f_m = 1 << sw2f, ++ sw4a_m = 1 << sw4a, ++ sw6a_m = 1 << sw6a, ++ sw6b_m = 1 << sw6b, ++ sw1621_m = 1 << sw1621, ++ sw3231_m = 1 << sw3231, ++ wx_h8000_m = 1 << wx_h8000, ++ with_sw_support_m = 1 << with_sw_support, ++ ++ //////////////////////add some other feature here////////////////// ++ }; ++ ++ static int _features; ++ static const char* _features_str; ++ ++ static void print_features(); ++ static int determine_features(); ++ static int platform_features(int features); ++ ++public: ++ // Initialization ++ static void initialize(); ++ ++ // shenwei has no such instructions, use ll/sc instead ++ static bool supports_compare_and_exchange() { return false; } ++ ++ static bool has_l2_cache() { return _features & with_l2_cache_m; } ++ static bool has_16k_page() { return _features & spt_16k_page_m; } ++ static bool is_sw2f() { return _features & sw2f_m; } ++ static bool is_sw4a() { return _features & sw4a_m; } ++ static bool is_sw6a() { return _features & sw6a_m; } ++ static bool is_sw6b() { return _features & sw6b_m; } ++ static bool is_sw8a() { return _features & wx_h8000_m; } ++ static bool is_sw1621() { return _features & sw1621_m; } ++ static bool is_sw3231() { return _features & sw3231_m; } ++ static bool is_shenwei() { return _features & with_sw_support_m; } ++// static bool sw2only() { return is_sw2f() || is_sw4a() || is_sw6a(); } ++ static bool sw2only() { return true; } ++ static bool sw3only() { return is_sw6b(); } ++ ++ //////////////////////add some other feature here////////////////// ++ ++ static const char* cpu_features() { return _features_str; } ++ ++ // Assembler testing ++ static void allow_all(); ++ static void revert(); ++}; ++ ++#endif // CPU_SW64_VM_VM_VERSION_SW64_HPP +diff -uNr openjdk/hotspot/src/cpu/sw64/vm/vtableStubs_sw64.cpp afu8u/hotspot/src/cpu/sw64/vm/vtableStubs_sw64.cpp +--- openjdk/hotspot/src/cpu/sw64/vm/vtableStubs_sw64.cpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/cpu/sw64/vm/vtableStubs_sw64.cpp 2025-05-06 10:53:44.915633666 +0800 +@@ -0,0 +1,281 @@ ++/* ++ * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++#include "asm/macroAssembler.hpp" ++#include "code/vtableStubs.hpp" ++#include "interp_masm_sw64.hpp" ++#include "memory/resourceArea.hpp" ++#include "oops/compiledICHolder.hpp" ++#include "oops/klassVtable.hpp" ++#include "runtime/sharedRuntime.hpp" ++#include "vmreg_sw64.inline.hpp" ++#ifdef COMPILER2 ++#include "opto/runtime.hpp" ++#endif ++ ++ ++// machine-dependent part of VtableStubs: create VtableStub of correct size and ++// initialize its code ++ ++#define __ masm-> ++ ++#ifdef PRODUCT ++#define BLOCK_COMMENT(str) /* nothing */ ++#else ++#define BLOCK_COMMENT(str) { char line[1024];sprintf(line,"%s:%s:%d",str,__FILE__, __LINE__); __ block_comment(line);} ++#endif ++ ++#define BIND(label) bind(label); BLOCK_COMMENT(#label ":") ++ ++#ifndef PRODUCT ++extern "C" void bad_compiled_vtable_index(JavaThread* thread, ++ oop receiver, ++ int index); ++#endif ++ ++// used by compiler only; reciever in T0. ++// used registers : ++// Rmethod : receiver klass & method ++// when reach here, receiver in T0, klass in T11 ++VtableStub* VtableStubs::create_vtable_stub(int vtable_index) { ++ const int sw64_code_length = VtableStub::pd_code_size_limit(true); ++ VtableStub* s = new(sw64_code_length) VtableStub(true, vtable_index); ++ ResourceMark rm; ++ CodeBuffer cb(s->entry_point(), sw64_code_length); ++ MacroAssembler* masm = new MacroAssembler(&cb); ++ Register t1 = T11, t2 = Rmethod; ++#ifndef PRODUCT ++ if (CountCompiledCalls) { ++ __ li(AT, SharedRuntime::nof_megamorphic_calls_addr()); ++ __ ldw(t1, AT , 0); ++ __ addl(t1, t1, 1); ++ __ stw(t1, AT,0); ++ } ++#endif ++ ++ // get receiver (need to skip return address on top of stack) ++ //assert(receiver_location == T0->as_VMReg(), "receiver expected in T0"); ++ ++ // get receiver klass ++ address npe_addr = __ pc(); ++ //add for compressedoops ++ __ load_klass(t1, A1); ++ // compute entry offset (in words) ++ int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size(); ++#ifndef PRODUCT ++ if (DebugVtables) { ++ Label L; ++ // check offset vs vtable length ++ __ ldw(t2, t1, InstanceKlass::vtable_length_offset()*wordSize); ++ assert(Assembler::is_simm16(vtable_index*vtableEntry::size()), "change this code"); ++ __ move(AT, vtable_index*vtableEntry::size()); ++ __ cmplt(AT, AT, t2); ++ __ bne(AT, L); ++ __ move(A2, vtable_index); ++ __ move(A1, A0); ++ __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), A1, A2); ++ __ BIND(L); ++ } ++#endif // PRODUCT ++ // load methodOop and target address ++ const Register method = Rmethod; ++ __ ld_ptr(method, t1, entry_offset*wordSize + vtableEntry::method_offset_in_bytes()); ++ if (DebugVtables) { ++ Label L; ++ __ beq(method, L); ++ // __ cmpl(Address(method, Method::from_compiled_offset()), NULL_WORD); ++ __ ldw(AT, method,in_bytes(Method::from_compiled_offset())); ++ //__ jcc(Assembler::notZero, L); ++ __ bne(AT, L); ++ __ stop("Vtable entry is NULL"); ++ __ BIND(L); ++ } ++ // T11: receiver klass ++ // T0: receiver ++ // Rmethod: methodOop ++ // T12: entry ++ address ame_addr = __ pc(); ++ __ ld_ptr(T12, method,in_bytes(Method::from_compiled_offset())); ++ __ jmp(T12); ++ masm->flush(); ++ s->set_exception_points(npe_addr, ame_addr); ++ return s; ++} ++ ++ ++// used registers : ++// T1 T2 ++// when reach here, the receiver in T0, klass in T1 ++VtableStub* VtableStubs::create_itable_stub(int itable_index) { ++ // Note well: pd_code_size_limit is the absolute minimum we can get ++ // away with. If you add code here, bump the code stub size ++ // returned by pd_code_size_limit! ++ const int sw64_code_length = VtableStub::pd_code_size_limit(false); ++ VtableStub* s = new(sw64_code_length) VtableStub(false, itable_index); ++ ResourceMark rm; ++ CodeBuffer cb(s->entry_point(), sw64_code_length); ++ MacroAssembler* masm = new MacroAssembler(&cb); ++ // we T11,T12 as temparary register, they are free from register allocator ++ Register t1 = T11, t2 = T2; ++ // Entry arguments: ++ // T1: Interface ++ // T0: Receiver ++ ++#ifndef PRODUCT ++ if (CountCompiledCalls) { ++ __ li(AT, SharedRuntime::nof_megamorphic_calls_addr()); ++ __ ldw(T11, AT, 0); ++ __ addl(T11, T11,1); ++ __ stw(T11, AT, 0); ++ } ++#endif /* PRODUCT */ ++ const Register holder_klass_reg = T1; // declaring interface klass (DECC) ++ const Register resolved_klass_reg = Rmethod; // resolved interface klass (REFC) ++ const Register icholder_reg = T1; ++ __ ld_ptr(resolved_klass_reg, icholder_reg, CompiledICHolder::holder_klass_offset()); ++ __ ld_ptr(holder_klass_reg, icholder_reg, CompiledICHolder::holder_metadata_offset()); ++ ++ // get receiver klass (also an implicit null-check) ++ address npe_addr = __ pc(); ++ __ load_klass(t1, A1); ++ { ++ const int base = InstanceKlass::vtable_start_offset() * wordSize; ++ assert(vtableEntry::size() * wordSize == 8, "adjust the scaling in the code below"); ++ assert(Assembler::is_simm16(base), "change this code"); ++ __ add_simm16(t2, t1, base); ++ assert(Assembler::is_simm16(InstanceKlass::vtable_length_offset() * wordSize), "change this code"); ++ __ ldw(AT, t1, InstanceKlass::vtable_length_offset() * wordSize); ++ __ slll(AT, AT, Address::times_8); ++ __ addl(t2, t2, AT); ++ if (HeapWordsPerLong > 1) { ++ __ round_to(t2, BytesPerLong); ++ } ++ ++ Label hit, entry; ++ assert(Assembler::is_simm16(itableOffsetEntry::size() * wordSize), "change this code"); ++ __ BIND(entry); ++ ++#ifdef ASSERT ++ // Check that the entry is non-null ++ if (DebugVtables) { ++ Label L; ++ assert(Assembler::is_simm16(itableOffsetEntry::interface_offset_in_bytes()), "change this code"); ++ __ ldw(AT, t1, itableOffsetEntry::interface_offset_in_bytes()); ++ __ bne(AT, L); ++ __ stop("null entry point found in itable's offset table"); ++ __ BIND(L); ++ } ++#endif ++ assert(Assembler::is_simm16(itableOffsetEntry::interface_offset_in_bytes()), "change this code"); ++ __ ld_ptr(AT, t2, itableOffsetEntry::interface_offset_in_bytes()); ++ __ add_simm16(t2, t2, itableOffsetEntry::size() * wordSize); ++ __ bne(AT, resolved_klass_reg, entry); ++ ++ } ++ ++ //add for compressedoops ++ __ load_klass(t1, A1); ++ // compute itable entry offset (in words) ++ const int base = InstanceKlass::vtable_start_offset() * wordSize; ++ assert(vtableEntry::size() * wordSize == 8, "adjust the scaling in the code below"); ++ assert(Assembler::is_simm16(base), "change this code"); ++ __ add_simm16(t2, t1, base); ++ assert(Assembler::is_simm16(InstanceKlass::vtable_length_offset() * wordSize), "change this code"); ++ __ ldw(AT, t1, InstanceKlass::vtable_length_offset() * wordSize); ++ __ slll(AT, AT, Address::times_8); ++ __ addl(t2, t2, AT); ++ if (HeapWordsPerLong > 1) { ++ __ round_to(t2, BytesPerLong); ++ } ++ ++ Label hit, entry; ++ assert(Assembler::is_simm16(itableOffsetEntry::size() * wordSize), "change this code"); ++ __ BIND(entry); ++ ++#ifdef ASSERT ++ // Check that the entry is non-null ++ if (DebugVtables) { ++ Label L; ++ assert(Assembler::is_simm16(itableOffsetEntry::interface_offset_in_bytes()), "change this code"); ++ __ ldw(AT, t1, itableOffsetEntry::interface_offset_in_bytes()); ++ __ bne(AT, L); ++ __ stop("null entry point found in itable's offset table"); ++ __ BIND(L); ++ } ++#endif ++ assert(Assembler::is_simm16(itableOffsetEntry::interface_offset_in_bytes()), "change this code"); ++ __ ld_ptr(AT, t2, itableOffsetEntry::interface_offset_in_bytes()); ++ __ add_simm16(t2, t2, itableOffsetEntry::size() * wordSize); ++ __ bne(AT, holder_klass_reg, entry); ++ ++ // We found a hit, move offset into T12 ++ __ ld_ptr(t2, t2, itableOffsetEntry::offset_offset_in_bytes() - itableOffsetEntry::size() * wordSize); ++ ++ // Compute itableMethodEntry. ++ const int method_offset = (itableMethodEntry::size() * wordSize * itable_index) + ++ itableMethodEntry::method_offset_in_bytes(); ++ ++ // Get methodOop and entrypoint for compiler ++ const Register method = Rmethod; ++ __ slll(AT, t2, Address::times_1); ++ __ addl(AT, AT, t1 ); ++ __ ld_ptr(method, AT, method_offset); ++ ++#ifdef ASSERT ++ if (DebugVtables) { ++ Label L1; ++ __ beq(method, L1); ++ __ ldw(AT, method,in_bytes(Method::from_compiled_offset())); ++ __ bne(AT, L1); ++ __ stop("methodOop is null"); ++ __ BIND(L1); ++ } ++#endif // ASSERT ++ ++ // Rmethod: methodOop ++ // T0: receiver ++ // T12: entry point ++ address ame_addr = __ pc(); ++ __ ld_ptr(T12, method,in_bytes(Method::from_compiled_offset())); ++ __ jmp(T12); ++ masm->flush(); ++ s->set_exception_points(npe_addr, ame_addr); ++ return s; ++} ++ ++// NOTE : whenever you change the code above, dont forget to change the const here ++int VtableStub::pd_code_size_limit(bool is_vtable_stub) { ++ if (is_vtable_stub) { ++ return ( DebugVtables ? 600 : 28) + (CountCompiledCalls ? 24 : 0)+ ++ (UseCompressedOops ? 16 : 0); ++ } else { ++ return ( DebugVtables ? 636 : 152) + (CountCompiledCalls ? 24 : 0)+ ++ (UseCompressedOops ? 32 : 0); ++ } ++} ++ ++int VtableStub::pd_code_alignment() { ++ return wordSize; ++} +diff -uNr openjdk/hotspot/src/cpu/x86/vm/assembler_x86.cpp afu8u/hotspot/src/cpu/x86/vm/assembler_x86.cpp +--- openjdk/hotspot/src/cpu/x86/vm/assembler_x86.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/cpu/x86/vm/assembler_x86.cpp 2025-05-06 10:53:44.915633666 +0800 +@@ -2936,14 +2936,6 @@ + emit_arith_b(0xF6, 0xC0, dst, imm8); + } + +-void Assembler::testb(Address dst, int imm8) { +- InstructionMark im(this); +- prefix(dst); +- emit_int8((unsigned char)0xF6); +- emit_operand(rax, dst, 1); +- emit_int8(imm8); +-} +- + void Assembler::testl(Register dst, int32_t imm32) { + // not using emit_arith because test + // doesn't support sign-extension of +diff -uNr openjdk/hotspot/src/cpu/x86/vm/assembler_x86.hpp afu8u/hotspot/src/cpu/x86/vm/assembler_x86.hpp +--- openjdk/hotspot/src/cpu/x86/vm/assembler_x86.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/cpu/x86/vm/assembler_x86.hpp 2025-05-06 10:53:44.915633666 +0800 +@@ -1644,7 +1644,6 @@ + void subss(XMMRegister dst, XMMRegister src); + + void testb(Register dst, int imm8); +- void testb(Address dst, int imm8); + + void testl(Register dst, int32_t imm32); + void testl(Register dst, Register src); +diff -uNr openjdk/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp afu8u/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp +--- openjdk/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp 2025-05-06 11:13:08.095672949 +0800 +@@ -39,10 +39,7 @@ + #include "oops/objArrayKlass.hpp" + #include "runtime/sharedRuntime.hpp" + #include "vmreg_x86.inline.hpp" +-#include "utilities/macros.hpp" +-#if INCLUDE_ALL_GCS +-#include "shenandoahBarrierSetAssembler_x86.hpp" +-#endif ++ + + // These masks are used to provide 128-bit aligned bitmasks to the XMM + // instructions, to allow sign-masking or sign-bit flipping. They allow +@@ -2001,44 +1998,21 @@ + if ( op->code() == lir_cas_obj) { + #ifdef _LP64 + if (UseCompressedOops) { +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC && ShenandoahCASBarrier) { +- Register tmp1 = op->tmp1()->as_register(); +- Register tmp2 = op->tmp2()->as_register(); +- Register res = op->result_opr()->as_register(); +- __ encode_heap_oop(cmpval); +- __ mov(rscratch1, newval); +- __ encode_heap_oop(rscratch1); +- ShenandoahBarrierSetAssembler::bsasm()->cmpxchg_oop(_masm, res, Address(addr, 0), cmpval, rscratch1, false, tmp1, tmp2); +- } else +-#endif +- { +- __ encode_heap_oop(cmpval); +- __ mov(rscratch1, newval); +- __ encode_heap_oop(rscratch1); +- if (os::is_MP()) { +- __ lock(); +- } +- // cmpval (rax) is implicitly used by this instruction +- __ cmpxchgl(rscratch1, Address(addr, 0)); ++ __ encode_heap_oop(cmpval); ++ __ mov(rscratch1, newval); ++ __ encode_heap_oop(rscratch1); ++ if (os::is_MP()) { ++ __ lock(); + } ++ // cmpval (rax) is implicitly used by this instruction ++ __ cmpxchgl(rscratch1, Address(addr, 0)); + } else + #endif + { +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC && ShenandoahCASBarrier) { +- Register tmp1 = op->tmp1()->as_register(); +- Register tmp2 = op->tmp2()->as_register(); +- Register res = op->result_opr()->as_register(); +- ShenandoahBarrierSetAssembler::bsasm()->cmpxchg_oop(_masm, res, Address(addr, 0), cmpval, newval, false, tmp1, tmp2); +- } else +-#endif +- { +- if (os::is_MP()) { +- __ lock(); +- } +- __ cmpxchgptr(newval, Address(addr, 0)); +- } ++ if (os::is_MP()) { ++ __ lock(); ++ } ++ __ cmpxchgptr(newval, Address(addr, 0)); + } + } else { + assert(op->code() == lir_cas_int, "lir_cas_int expected"); +@@ -3920,27 +3894,11 @@ + } + + +-void LIR_Assembler::leal(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { +- assert(src->is_address(), "must be an address"); +- assert(dest->is_register(), "must be a register"); +- +- if (!UseShenandoahGC) { +- Register reg = dest->as_pointer_register(); +- __ lea(reg, as_Address(src->as_address_ptr())); +- } else { +- PatchingStub* patch = NULL; +- if (patch_code != lir_patch_none) { +- patch = new PatchingStub(_masm, PatchingStub::access_field_id); +- } +- +- Register reg = dest->as_pointer_register(); +- LIR_Address* addr = src->as_address_ptr(); +- __ lea(reg, as_Address(addr)); +- +- if (patch != NULL) { +- patching_epilog(patch, patch_code, addr->base()->as_register(), info); +- } +- } ++void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest) { ++ assert(addr->is_address() && dest->is_register(), "check"); ++ Register reg; ++ reg = dest->as_pointer_register(); ++ __ lea(reg, as_Address(addr->as_address_ptr())); + } + + +diff -uNr openjdk/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp afu8u/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp +--- openjdk/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp 2025-05-06 11:13:08.095672949 +0800 +@@ -43,10 +43,6 @@ + #define __ gen()->lir()-> + #endif + +-#if INCLUDE_ALL_GCS +-#include "gc_implementation/shenandoah/c1/shenandoahBarrierSetC1.hpp" +-#endif +- + // Item will be loaded into a byte register; Intel only + void LIRItem::load_byte_item() { + load_item(); +@@ -814,19 +810,8 @@ + } + + LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience +- if (type == objectType) { +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC && ShenandoahCASBarrier) { +- LIR_Opr result = rlock_result(x); +- __ cas_obj(addr, cmp.result(), val.result(), new_register(T_OBJECT), new_register(T_OBJECT), result); +- // Shenandoah C1 barrier would do all result management itself, shortcut here. +- return; +- } else +-#endif +- { ++ if (type == objectType) + __ cas_obj(addr, cmp.result(), val.result(), ill, ill); +- } +- } + else if (type == intType) + __ cas_int(addr, cmp.result(), val.result(), ill, ill); + else if (type == longType) +@@ -1524,14 +1509,6 @@ + true /* do_load */, false /* patch */, NULL); + } + __ xchg(LIR_OprFact::address(addr), dst, dst, LIR_OprFact::illegalOpr); +- +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC && is_obj) { +- LIR_Opr tmp = ShenandoahBarrierSet::barrier_set()->bsc1()->load_reference_barrier(this, dst, LIR_OprFact::addressConst(0)); +- __ move(tmp, dst); +- } +-#endif +- + if (is_obj) { + // Seems to be a precise address + post_barrier(LIR_OprFact::address(addr), data); +diff -uNr openjdk/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp afu8u/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp +--- openjdk/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp 2025-05-06 10:53:44.919633666 +0800 +@@ -40,7 +40,6 @@ + #include "vmreg_x86.inline.hpp" + #if INCLUDE_ALL_GCS + #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" +-#include "gc_implementation/shenandoah/shenandoahRuntime.hpp" + #endif + + +@@ -1616,7 +1615,7 @@ + // arg0 : previous value of memory + + BarrierSet* bs = Universe::heap()->barrier_set(); +- if (bs->kind() != BarrierSet::G1SATBCTLogging && bs->kind() != BarrierSet::ShenandoahBarrierSet) { ++ if (bs->kind() != BarrierSet::G1SATBCTLogging) { + __ movptr(rax, (int)id); + __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax); + __ should_not_reach_here(); +@@ -1693,12 +1692,6 @@ + Address store_addr(rbp, 2*BytesPerWord); + + BarrierSet* bs = Universe::heap()->barrier_set(); +- if (bs->kind() == BarrierSet::ShenandoahBarrierSet) { +- __ movptr(rax, (int)id); +- __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax); +- __ should_not_reach_here(); +- break; +- } + CardTableModRefBS* ct = (CardTableModRefBS*)bs; + assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); + +@@ -1779,29 +1772,6 @@ + + } + break; +- case shenandoah_lrb_slow_id: +- { +- StubFrame f(sasm, "shenandoah_load_reference_barrier", dont_gc_arguments); +- // arg0 : object to be resolved +- +- save_live_registers(sasm, 1); +-#ifdef _LP64 +- f.load_argument(0, c_rarg0); +- f.load_argument(1, c_rarg1); +- if (UseCompressedOops) { +- __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow), c_rarg0, c_rarg1); +- } else { +- __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier), c_rarg0, c_rarg1); +- } +-#else +- f.load_argument(0, rax); +- f.load_argument(1, rbx); +- __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier), rax, rbx); +-#endif +- restore_live_registers_except_rax(sasm, true); +- +- } +- break; + #endif // INCLUDE_ALL_GCS + + case predicate_failed_trap_id: +diff -uNr openjdk/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp afu8u/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp +--- openjdk/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp 2025-05-06 10:53:44.923633667 +0800 +@@ -43,8 +43,6 @@ + #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" + #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" + #include "gc_implementation/g1/heapRegion.hpp" +-#include "shenandoahBarrierSetAssembler_x86.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" + #endif // INCLUDE_ALL_GCS + + #ifdef PRODUCT +@@ -4134,7 +4132,7 @@ + movptr(value, Address(value, -JNIHandles::weak_tag_value)); + verify_oop(value); + #if INCLUDE_ALL_GCS +- if (UseG1GC || (UseShenandoahGC && ShenandoahSATBBarrier)) { ++ if (UseG1GC) { + g1_write_barrier_pre(noreg /* obj */, + value /* pre_val */, + thread /* thread */, +@@ -4193,21 +4191,15 @@ + Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() + + PtrQueue::byte_offset_of_buf())); + +- if (UseShenandoahGC) { +- Address gc_state(thread, in_bytes(JavaThread::gc_state_offset())); +- testb(gc_state, ShenandoahHeap::MARKING); +- jcc(Assembler::zero, done); +- } else { +- assert(UseG1GC, "Should be"); +- // Is marking active? +- if (in_bytes(PtrQueue::byte_width_of_active()) == 4) { +- cmpl(in_progress, 0); +- } else { +- assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption"); +- cmpb(in_progress, 0); +- } +- jcc(Assembler::equal, done); ++ ++ // Is marking active? ++ if (in_bytes(PtrQueue::byte_width_of_active()) == 4) { ++ cmpl(in_progress, 0); ++ } else { ++ assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption"); ++ cmpb(in_progress, 0); + } ++ jcc(Assembler::equal, done); + + // Do we need to load the previous value? + if (obj != noreg) { +@@ -4290,13 +4282,6 @@ + assert(thread == r15_thread, "must be"); + #endif // _LP64 + +- if (UseShenandoahGC) { +- // No need for this in Shenandoah. +- return; +- } +- +- assert(UseG1GC, "expect G1 GC"); +- + Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() + + PtrQueue::byte_offset_of_index())); + Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() + +@@ -4632,9 +4617,65 @@ + void MacroAssembler::fp_runtime_fallback(address runtime_entry, int nb_args, int num_fpu_regs_in_use) { + pusha(); + +- save_vector_registers(); +- + // if we are coming from c1, xmm registers may be live ++ int off = 0; ++ if (UseSSE == 1) { ++ subptr(rsp, sizeof(jdouble)*8); ++ movflt(Address(rsp,off++*sizeof(jdouble)),xmm0); ++ movflt(Address(rsp,off++*sizeof(jdouble)),xmm1); ++ movflt(Address(rsp,off++*sizeof(jdouble)),xmm2); ++ movflt(Address(rsp,off++*sizeof(jdouble)),xmm3); ++ movflt(Address(rsp,off++*sizeof(jdouble)),xmm4); ++ movflt(Address(rsp,off++*sizeof(jdouble)),xmm5); ++ movflt(Address(rsp,off++*sizeof(jdouble)),xmm6); ++ movflt(Address(rsp,off++*sizeof(jdouble)),xmm7); ++ } else if (UseSSE >= 2) { ++#ifdef COMPILER2 ++ if (MaxVectorSize > 16) { ++ assert(UseAVX > 0, "256bit vectors are supported only with AVX"); ++ // Save upper half of YMM registes ++ subptr(rsp, 16 * LP64_ONLY(16) NOT_LP64(8)); ++ vextractf128h(Address(rsp, 0),xmm0); ++ vextractf128h(Address(rsp, 16),xmm1); ++ vextractf128h(Address(rsp, 32),xmm2); ++ vextractf128h(Address(rsp, 48),xmm3); ++ vextractf128h(Address(rsp, 64),xmm4); ++ vextractf128h(Address(rsp, 80),xmm5); ++ vextractf128h(Address(rsp, 96),xmm6); ++ vextractf128h(Address(rsp,112),xmm7); ++#ifdef _LP64 ++ vextractf128h(Address(rsp,128),xmm8); ++ vextractf128h(Address(rsp,144),xmm9); ++ vextractf128h(Address(rsp,160),xmm10); ++ vextractf128h(Address(rsp,176),xmm11); ++ vextractf128h(Address(rsp,192),xmm12); ++ vextractf128h(Address(rsp,208),xmm13); ++ vextractf128h(Address(rsp,224),xmm14); ++ vextractf128h(Address(rsp,240),xmm15); ++#endif ++ } ++#endif ++ // Save whole 128bit (16 bytes) XMM regiters ++ subptr(rsp, 16 * LP64_ONLY(16) NOT_LP64(8)); ++ movdqu(Address(rsp,off++*16),xmm0); ++ movdqu(Address(rsp,off++*16),xmm1); ++ movdqu(Address(rsp,off++*16),xmm2); ++ movdqu(Address(rsp,off++*16),xmm3); ++ movdqu(Address(rsp,off++*16),xmm4); ++ movdqu(Address(rsp,off++*16),xmm5); ++ movdqu(Address(rsp,off++*16),xmm6); ++ movdqu(Address(rsp,off++*16),xmm7); ++#ifdef _LP64 ++ movdqu(Address(rsp,off++*16),xmm8); ++ movdqu(Address(rsp,off++*16),xmm9); ++ movdqu(Address(rsp,off++*16),xmm10); ++ movdqu(Address(rsp,off++*16),xmm11); ++ movdqu(Address(rsp,off++*16),xmm12); ++ movdqu(Address(rsp,off++*16),xmm13); ++ movdqu(Address(rsp,off++*16),xmm14); ++ movdqu(Address(rsp,off++*16),xmm15); ++#endif ++ } + + // Preserve registers across runtime call + int incoming_argument_and_return_value_offset = -1; +@@ -4700,73 +4741,7 @@ + addptr(rsp, sizeof(jdouble) * nb_args); + } + +- restore_vector_registers(); +- popa(); +-} +- +-void MacroAssembler::save_vector_registers() { +- int off = 0; +- if (UseSSE == 1) { +- subptr(rsp, sizeof(jdouble)*8); +- movflt(Address(rsp,off++*sizeof(jdouble)),xmm0); +- movflt(Address(rsp,off++*sizeof(jdouble)),xmm1); +- movflt(Address(rsp,off++*sizeof(jdouble)),xmm2); +- movflt(Address(rsp,off++*sizeof(jdouble)),xmm3); +- movflt(Address(rsp,off++*sizeof(jdouble)),xmm4); +- movflt(Address(rsp,off++*sizeof(jdouble)),xmm5); +- movflt(Address(rsp,off++*sizeof(jdouble)),xmm6); +- movflt(Address(rsp,off++*sizeof(jdouble)),xmm7); +- } else if (UseSSE >= 2) { +-#ifdef COMPILER2 +- if (MaxVectorSize > 16) { +- assert(UseAVX > 0, "256bit vectors are supported only with AVX"); +- // Save upper half of YMM registes +- subptr(rsp, 16 * LP64_ONLY(16) NOT_LP64(8)); +- vextractf128h(Address(rsp, 0),xmm0); +- vextractf128h(Address(rsp, 16),xmm1); +- vextractf128h(Address(rsp, 32),xmm2); +- vextractf128h(Address(rsp, 48),xmm3); +- vextractf128h(Address(rsp, 64),xmm4); +- vextractf128h(Address(rsp, 80),xmm5); +- vextractf128h(Address(rsp, 96),xmm6); +- vextractf128h(Address(rsp,112),xmm7); +-#ifdef _LP64 +- vextractf128h(Address(rsp,128),xmm8); +- vextractf128h(Address(rsp,144),xmm9); +- vextractf128h(Address(rsp,160),xmm10); +- vextractf128h(Address(rsp,176),xmm11); +- vextractf128h(Address(rsp,192),xmm12); +- vextractf128h(Address(rsp,208),xmm13); +- vextractf128h(Address(rsp,224),xmm14); +- vextractf128h(Address(rsp,240),xmm15); +-#endif +- } +-#endif +- // Save whole 128bit (16 bytes) XMM regiters +- subptr(rsp, 16 * LP64_ONLY(16) NOT_LP64(8)); +- movdqu(Address(rsp,off++*16),xmm0); +- movdqu(Address(rsp,off++*16),xmm1); +- movdqu(Address(rsp,off++*16),xmm2); +- movdqu(Address(rsp,off++*16),xmm3); +- movdqu(Address(rsp,off++*16),xmm4); +- movdqu(Address(rsp,off++*16),xmm5); +- movdqu(Address(rsp,off++*16),xmm6); +- movdqu(Address(rsp,off++*16),xmm7); +-#ifdef _LP64 +- movdqu(Address(rsp,off++*16),xmm8); +- movdqu(Address(rsp,off++*16),xmm9); +- movdqu(Address(rsp,off++*16),xmm10); +- movdqu(Address(rsp,off++*16),xmm11); +- movdqu(Address(rsp,off++*16),xmm12); +- movdqu(Address(rsp,off++*16),xmm13); +- movdqu(Address(rsp,off++*16),xmm14); +- movdqu(Address(rsp,off++*16),xmm15); +-#endif +- } +-} +- +-void MacroAssembler::restore_vector_registers() { +- int off = 0; ++ off = 0; + if (UseSSE == 1) { + movflt(xmm0, Address(rsp,off++*sizeof(jdouble))); + movflt(xmm1, Address(rsp,off++*sizeof(jdouble))); +@@ -4823,6 +4798,7 @@ + } + #endif + } ++ popa(); + } + + static const double pi_4 = 0.7853981633974483; +@@ -5258,6 +5234,7 @@ + BLOCK_COMMENT("} verify_oop"); + } + ++ + RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr, + Register tmp, + int offset) { +@@ -5786,13 +5763,6 @@ + } + + void MacroAssembler::load_heap_oop(Register dst, Address src) { +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- ShenandoahBarrierSetAssembler::bsasm()->load_heap_oop(this, dst, src); +- return; +- } +-#endif +- + #ifdef _LP64 + // FIXME: Must change all places where we try to load the klass. + if (UseCompressedOops) { +@@ -5805,13 +5775,6 @@ + + // Doesn't do verfication, generates fixed size code + void MacroAssembler::load_heap_oop_not_null(Register dst, Address src) { +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- ShenandoahBarrierSetAssembler::bsasm()->load_heap_oop(this, dst, src); +- return; +- } +-#endif +- + #ifdef _LP64 + if (UseCompressedOops) { + movl(dst, src); +diff -uNr openjdk/hotspot/src/cpu/x86/vm/macroAssembler_x86.hpp afu8u/hotspot/src/cpu/x86/vm/macroAssembler_x86.hpp +--- openjdk/hotspot/src/cpu/x86/vm/macroAssembler_x86.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/cpu/x86/vm/macroAssembler_x86.hpp 2025-05-06 10:53:44.923633667 +0800 +@@ -1278,8 +1278,6 @@ + + #undef VIRTUAL + +- void save_vector_registers(); +- void restore_vector_registers(); + }; + + /** +diff -uNr openjdk/hotspot/src/cpu/x86/vm/nativeInst_x86.cpp afu8u/hotspot/src/cpu/x86/vm/nativeInst_x86.cpp +--- openjdk/hotspot/src/cpu/x86/vm/nativeInst_x86.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/cpu/x86/vm/nativeInst_x86.cpp 2025-05-06 10:53:44.923633667 +0800 +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -317,8 +317,6 @@ + case instruction_code_reg2memb: // 0x88 + case instruction_code_mem2regb: // 0x8a + +- case instruction_code_lea: // 0x8d +- + case instruction_code_float_s: // 0xd9 fld_s a + case instruction_code_float_d: // 0xdd fld_d a + +@@ -391,9 +389,6 @@ + case instruction_code_xmm_lpd: // 0x12 movlpd xmm, a + break; + +- case instruction_code_lea: // 0x8d lea r, a +- break; +- + default: + fatal ("not a mov [reg+offs], reg instruction"); + } +diff -uNr openjdk/hotspot/src/cpu/x86/vm/nativeInst_x86.hpp afu8u/hotspot/src/cpu/x86/vm/nativeInst_x86.hpp +--- openjdk/hotspot/src/cpu/x86/vm/nativeInst_x86.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/cpu/x86/vm/nativeInst_x86.hpp 2025-05-06 10:53:44.923633667 +0800 +@@ -287,8 +287,6 @@ + instruction_code_xmm_store = 0x11, + instruction_code_xmm_lpd = 0x12, + +- instruction_code_lea = 0x8d, +- + instruction_VEX_prefix_2bytes = Assembler::VEX_2bytes, + instruction_VEX_prefix_3bytes = Assembler::VEX_3bytes, + +diff -uNr openjdk/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp afu8u/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp +--- openjdk/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp 2025-05-06 10:53:44.923633667 +0800 +@@ -1298,97 +1298,6 @@ + } + } + +-// Registers need to be saved for runtime call +-static Register caller_saved_registers[] = { +- rcx, rdx, rsi, rdi +-}; +- +-// Save caller saved registers except r1 and r2 +-static void save_registers_except(MacroAssembler* masm, Register r1, Register r2) { +- int reg_len = (int)(sizeof(caller_saved_registers) / sizeof(Register)); +- for (int index = 0; index < reg_len; index ++) { +- Register this_reg = caller_saved_registers[index]; +- if (this_reg != r1 && this_reg != r2) { +- __ push(this_reg); +- } +- } +-} +- +-// Restore caller saved registers except r1 and r2 +-static void restore_registers_except(MacroAssembler* masm, Register r1, Register r2) { +- int reg_len = (int)(sizeof(caller_saved_registers) / sizeof(Register)); +- for (int index = reg_len - 1; index >= 0; index --) { +- Register this_reg = caller_saved_registers[index]; +- if (this_reg != r1 && this_reg != r2) { +- __ pop(this_reg); +- } +- } +-} +- +-// Pin object, return pinned object or null in rax +-static void gen_pin_object(MacroAssembler* masm, +- Register thread, VMRegPair reg) { +- __ block_comment("gen_pin_object {"); +- +- Label is_null; +- Register tmp_reg = rax; +- VMRegPair tmp(tmp_reg->as_VMReg()); +- if (reg.first()->is_stack()) { +- // Load the arg up from the stack +- simple_move32(masm, reg, tmp); +- reg = tmp; +- } else { +- __ movl(tmp_reg, reg.first()->as_Register()); +- } +- __ testptr(reg.first()->as_Register(), reg.first()->as_Register()); +- __ jccb(Assembler::equal, is_null); +- +- // Save registers that may be used by runtime call +- Register arg = reg.first()->is_Register() ? reg.first()->as_Register() : noreg; +- save_registers_except(masm, arg, thread); +- +- __ call_VM_leaf( +- CAST_FROM_FN_PTR(address, SharedRuntime::pin_object), +- thread, reg.first()->as_Register()); +- +- // Restore saved registers +- restore_registers_except(masm, arg, thread); +- +- __ bind(is_null); +- __ block_comment("} gen_pin_object"); +-} +- +-// Unpin object +-static void gen_unpin_object(MacroAssembler* masm, +- Register thread, VMRegPair reg) { +- __ block_comment("gen_unpin_object {"); +- Label is_null; +- +- // temp register +- __ push(rax); +- Register tmp_reg = rax; +- VMRegPair tmp(tmp_reg->as_VMReg()); +- +- simple_move32(masm, reg, tmp); +- +- __ testptr(rax, rax); +- __ jccb(Assembler::equal, is_null); +- +- // Save registers that may be used by runtime call +- Register arg = reg.first()->is_Register() ? reg.first()->as_Register() : noreg; +- save_registers_except(masm, arg, thread); +- +- __ call_VM_leaf( +- CAST_FROM_FN_PTR(address, SharedRuntime::unpin_object), +- thread, rax); +- +- // Restore saved registers +- restore_registers_except(masm, arg, thread); +- __ bind(is_null); +- __ pop(rax); +- __ block_comment("} gen_unpin_object"); +-} +- + // Check GC_locker::needs_gc and enter the runtime if it's true. This + // keeps a new JNI critical region from starting until a GC has been + // forced. Save down any oops in registers and describe them in an +@@ -1947,7 +1856,7 @@ + + __ get_thread(thread); + +- if (is_critical_native && !Universe::heap()->supports_object_pinning()) { ++ if (is_critical_native) { + check_needs_gc_for_critical_native(masm, thread, stack_slots, total_c_args, total_in_args, + oop_handle_offset, oop_maps, in_regs, in_sig_bt); + } +@@ -1985,11 +1894,6 @@ + // + OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); + +- // Inbound arguments that need to be pinned for critical natives +- GrowableArray pinned_args(total_in_args); +- // Current stack slot for storing register based array argument +- int pinned_slot = oop_handle_offset; +- + // Mark location of rbp, + // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, rbp->as_VMReg()); + +@@ -2001,28 +1905,7 @@ + switch (in_sig_bt[i]) { + case T_ARRAY: + if (is_critical_native) { +- VMRegPair in_arg = in_regs[i]; +- if (Universe::heap()->supports_object_pinning()) { +- // gen_pin_object handles save and restore +- // of any clobbered registers +- gen_pin_object(masm, thread, in_arg); +- pinned_args.append(i); +- +- // rax has pinned array +- VMRegPair result_reg(rax->as_VMReg()); +- if (!in_arg.first()->is_stack()) { +- assert(pinned_slot <= stack_slots, "overflow"); +- simple_move32(masm, result_reg, VMRegImpl::stack2reg(pinned_slot)); +- pinned_slot += VMRegImpl::slots_per_word; +- } else { +- // Write back pinned value, it will be used to unpin this argument +- __ movptr(Address(rbp, reg2offset_in(in_arg.first())), result_reg.first()->as_Register()); +- } +- // We have the array in register, use it +- in_arg = result_reg; +- } +- +- unpack_array_argument(masm, in_arg, in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]); ++ unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]); + c_arg++; + break; + } +@@ -2218,26 +2101,6 @@ + default : ShouldNotReachHere(); + } + +- // unpin pinned arguments +- pinned_slot = oop_handle_offset; +- if (pinned_args.length() > 0) { +- // save return value that may be overwritten otherwise. +- save_native_result(masm, ret_type, stack_slots); +- for (int index = 0; index < pinned_args.length(); index ++) { +- int i = pinned_args.at(index); +- assert(pinned_slot <= stack_slots, "overflow"); +- if (!in_regs[i].first()->is_stack()) { +- int offset = pinned_slot * VMRegImpl::stack_slot_size; +- __ movl(in_regs[i].first()->as_Register(), Address(rsp, offset)); +- pinned_slot += VMRegImpl::slots_per_word; +- } +- // gen_pin_object handles save and restore +- // of any other clobbered registers +- gen_unpin_object(masm, thread, in_regs[i]); +- } +- restore_native_result(masm, ret_type, stack_slots); +- } +- + // Switch thread to "native transition" state before reading the synchronization state. + // This additional state is necessary because reading and testing the synchronization + // state is not atomic w.r.t. GC, as this scenario demonstrates: +diff -uNr openjdk/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp afu8u/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp +--- openjdk/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp 2025-05-06 10:53:44.923633667 +0800 +@@ -1351,101 +1351,6 @@ + } + } + +-// Pin incoming array argument of java critical method +-static void pin_critical_native_array(MacroAssembler* masm, +- VMRegPair reg, +- int& pinned_slot) { +- __ block_comment("pin_critical_native_array {"); +- Register tmp_reg = rax; +- +- Label is_null; +- VMRegPair tmp; +- VMRegPair in_reg = reg; +- bool on_stack = false; +- +- tmp.set_ptr(tmp_reg->as_VMReg()); +- if (reg.first()->is_stack()) { +- // Load the arg up from the stack +- move_ptr(masm, reg, tmp); +- reg = tmp; +- on_stack = true; +- } else { +- __ movptr(rax, reg.first()->as_Register()); +- } +- __ testptr(reg.first()->as_Register(), reg.first()->as_Register()); +- __ jccb(Assembler::equal, is_null); +- +- __ push(c_rarg0); +- __ push(c_rarg1); +- __ push(c_rarg2); +- __ push(c_rarg3); +-#ifdef _WIN64 +- // caller-saved registers on Windows +- __ push(r10); +- __ push(r11); +-#else +- __ push(c_rarg4); +- __ push(c_rarg5); +-#endif +- +- if (reg.first()->as_Register() != c_rarg1) { +- __ movptr(c_rarg1, reg.first()->as_Register()); +- } +- __ movptr(c_rarg0, r15_thread); +- __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::pin_object))); +- +-#ifdef _WIN64 +- __ pop(r11); +- __ pop(r10); +-#else +- __ pop(c_rarg5); +- __ pop(c_rarg4); +-#endif +- __ pop(c_rarg3); +- __ pop(c_rarg2); +- __ pop(c_rarg1); +- __ pop(c_rarg0); +- +- if (on_stack) { +- __ movptr(Address(rbp, reg2offset_in(in_reg.first())), rax); +- __ bind(is_null); +- } else { +- __ movptr(reg.first()->as_Register(), rax); +- +- // save on stack for unpinning later +- __ bind(is_null); +- assert(reg.first()->is_Register(), "Must be a register"); +- int offset = pinned_slot * VMRegImpl::stack_slot_size; +- pinned_slot += VMRegImpl::slots_per_word; +- __ movq(Address(rsp, offset), rax); +- } +- __ block_comment("} pin_critical_native_array"); +-} +- +-// Unpin array argument of java critical method +-static void unpin_critical_native_array(MacroAssembler* masm, +- VMRegPair reg, +- int& pinned_slot) { +- __ block_comment("unpin_critical_native_array {"); +- Label is_null; +- +- if (reg.first()->is_stack()) { +- __ movptr(c_rarg1, Address(rbp, reg2offset_in(reg.first()))); +- } else { +- int offset = pinned_slot * VMRegImpl::stack_slot_size; +- pinned_slot += VMRegImpl::slots_per_word; +- __ movq(c_rarg1, Address(rsp, offset)); +- } +- __ testptr(c_rarg1, c_rarg1); +- __ jccb(Assembler::equal, is_null); +- +- __ movptr(c_rarg0, r15_thread); +- __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::unpin_object))); +- +- __ bind(is_null); +- __ block_comment("} unpin_critical_native_array"); +-} +- + + // Check GC_locker::needs_gc and enter the runtime if it's true. This + // keeps a new JNI critical region from starting until a GC has been +@@ -2135,7 +2040,7 @@ + + const Register oop_handle_reg = r14; + +- if (is_critical_native && !Universe::heap()->supports_object_pinning()) { ++ if (is_critical_native) { + check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args, + oop_handle_offset, oop_maps, in_regs, in_sig_bt); + } +@@ -2192,11 +2097,6 @@ + // the incoming and outgoing registers are offset upwards and for + // critical natives they are offset down. + GrowableArray arg_order(2 * total_in_args); +- // Inbound arguments that need to be pinned for critical natives +- GrowableArray pinned_args(total_in_args); +- // Current stack slot for storing register based array argument +- int pinned_slot = oop_handle_offset; +- + VMRegPair tmp_vmreg; + tmp_vmreg.set2(rbx->as_VMReg()); + +@@ -2244,12 +2144,6 @@ + switch (in_sig_bt[i]) { + case T_ARRAY: + if (is_critical_native) { +- // pin before unpack +- if (Universe::heap()->supports_object_pinning()) { +- assert(pinned_slot <= stack_slots, "overflow"); +- pin_critical_native_array(masm, in_regs[i], pinned_slot); +- pinned_args.append(i); +- } + unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]); + c_arg++; + #ifdef ASSERT +@@ -2466,19 +2360,6 @@ + default : ShouldNotReachHere(); + } + +- // unpin pinned arguments +- pinned_slot = oop_handle_offset; +- if (pinned_args.length() > 0) { +- // save return value that may be overwritten otherwise. +- save_native_result(masm, ret_type, stack_slots); +- for (int index = 0; index < pinned_args.length(); index ++) { +- int i = pinned_args.at(index); +- assert(pinned_slot <= stack_slots, "overflow"); +- unpin_critical_native_array(masm, in_regs[i], pinned_slot); +- } +- restore_native_result(masm, ret_type, stack_slots); +- } +- + // Switch thread to "native transition" state before reading the synchronization state. + // This additional state is necessary because reading and testing the synchronization + // state is not atomic w.r.t. GC, as this scenario demonstrates: +diff -uNr openjdk/hotspot/src/cpu/x86/vm/shenandoahBarrierSetAssembler_x86.cpp afu8u/hotspot/src/cpu/x86/vm/shenandoahBarrierSetAssembler_x86.cpp +--- openjdk/hotspot/src/cpu/x86/vm/shenandoahBarrierSetAssembler_x86.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/cpu/x86/vm/shenandoahBarrierSetAssembler_x86.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,552 +0,0 @@ +-/* +- * Copyright (c) 2018, 2020 Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +-#include "c1/c1_MacroAssembler.hpp" +-#include "c1/c1_LIRAssembler.hpp" +-#include "macroAssembler_x86.hpp" +-#include "shenandoahBarrierSetAssembler_x86.hpp" +-#include "gc_implementation/shenandoah/shenandoahBarrierSet.hpp" +-#include "gc_implementation/shenandoah/shenandoahForwarding.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegion.hpp" +-#include "gc_implementation/shenandoah/shenandoahRuntime.hpp" +-#include "gc_implementation/shenandoah/c1/shenandoahBarrierSetC1.hpp" +-#include "runtime/stubCodeGenerator.hpp" +- +-ShenandoahBarrierSetAssembler* ShenandoahBarrierSetAssembler::bsasm() { +- return ShenandoahBarrierSet::barrier_set()->bsasm(); +-} +- +-#define __ masm-> +- +-static void save_xmm_registers(MacroAssembler* masm) { +- __ subptr(rsp, 64); +- __ movdbl(Address(rsp, 0), xmm0); +- __ movdbl(Address(rsp, 8), xmm1); +- __ movdbl(Address(rsp, 16), xmm2); +- __ movdbl(Address(rsp, 24), xmm3); +- __ movdbl(Address(rsp, 32), xmm4); +- __ movdbl(Address(rsp, 40), xmm5); +- __ movdbl(Address(rsp, 48), xmm6); +- __ movdbl(Address(rsp, 56), xmm7); +-} +- +-static void restore_xmm_registers(MacroAssembler* masm) { +- __ movdbl(xmm0, Address(rsp, 0)); +- __ movdbl(xmm1, Address(rsp, 8)); +- __ movdbl(xmm2, Address(rsp, 16)); +- __ movdbl(xmm3, Address(rsp, 24)); +- __ movdbl(xmm4, Address(rsp, 32)); +- __ movdbl(xmm5, Address(rsp, 40)); +- __ movdbl(xmm6, Address(rsp, 48)); +- __ movdbl(xmm7, Address(rsp, 56)); +- __ addptr(rsp, 64); +-} +- +-void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, bool dest_uninitialized, +- Register src, Register dst, Register count) { +- +- if ((ShenandoahSATBBarrier && !dest_uninitialized) || ShenandoahStoreValEnqueueBarrier || ShenandoahLoadRefBarrier) { +-#ifdef _LP64 +- Register thread = r15_thread; +-#else +- Register thread = rax; +- if (thread == src || thread == dst || thread == count) { +- thread = rbx; +- } +- if (thread == src || thread == dst || thread == count) { +- thread = rcx; +- } +- if (thread == src || thread == dst || thread == count) { +- thread = rdx; +- } +- __ push(thread); +- __ get_thread(thread); +-#endif +- assert_different_registers(src, dst, count, thread); +- +- Label done; +- // Short-circuit if count == 0. +- __ testptr(count, count); +- __ jcc(Assembler::zero, done); +- +- // Avoid runtime call when not active. +- Address gc_state(thread, in_bytes(JavaThread::gc_state_offset())); +- int flags; +- if (ShenandoahSATBBarrier && dest_uninitialized) { +- flags = ShenandoahHeap::HAS_FORWARDED; +- } else { +- flags = ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::MARKING; +- } +- __ testb(gc_state, flags); +- __ jcc(Assembler::zero, done); +- +- __ pusha(); // push registers +- +-#ifdef _LP64 +- assert(src == rdi, "expected"); +- assert(dst == rsi, "expected"); +- // commented-out for generate_conjoint_long_oop_copy(), call_VM_leaf() will move +- // register into right place. +- // assert(count == rdx, "expected"); +- if (UseCompressedOops) { +- __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_narrow_oop_entry), +- src, dst, count); +- } else +-#endif +- { +- __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_oop_entry), +- src, dst, count); +- } +- +- __ popa(); +- __ bind(done); +- NOT_LP64(__ pop(thread);) +- } +-} +- +-void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, Register dst, Address src) { +- if (!ShenandoahLoadRefBarrier) { +- return; +- } +- +- bool is_narrow = UseCompressedOops; +- +- Label heap_stable, not_cset; +- +- __ block_comment("load_reference_barrier { "); +- +- // Check if GC is active +-#ifdef _LP64 +- Register thread = r15_thread; +-#else +- Register thread = rsi; +- if (thread == dst) { +- thread = rbx; +- } +- assert_different_registers(dst, src.base(), src.index(), thread); +- __ push(thread); +- __ get_thread(thread); +-#endif +- +- Address gc_state(thread, in_bytes(JavaThread::gc_state_offset())); +- __ testb(gc_state, ShenandoahHeap::HAS_FORWARDED); +- __ jcc(Assembler::zero, heap_stable); +- +- Register tmp1 = noreg, tmp2 = noreg; +- +- // Test for object in cset +- // Allocate temporary registers +- for (int i = 0; i < 8; i++) { +- Register r = as_Register(i); +- if (r != rsp && r != rbp && r != dst && r != src.base() && r != src.index()) { +- if (tmp1 == noreg) { +- tmp1 = r; +- } else { +- tmp2 = r; +- break; +- } +- } +- } +- assert(tmp1 != noreg, "tmp1 allocated"); +- assert(tmp2 != noreg, "tmp2 allocated"); +- assert_different_registers(tmp1, tmp2, src.base(), src.index()); +- assert_different_registers(tmp1, tmp2, dst); +- +- __ push(tmp1); +- __ push(tmp2); +- +- // Optimized cset-test +- __ movptr(tmp1, dst); +- __ shrptr(tmp1, ShenandoahHeapRegion::region_size_bytes_shift_jint()); +- __ movptr(tmp2, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr()); +- __ movbool(tmp1, Address(tmp1, tmp2, Address::times_1)); +- __ testbool(tmp1); +- __ jcc(Assembler::zero, not_cset); +- +- uint num_saved_regs = 4 + (dst != rax ? 1 : 0) LP64_ONLY(+4); +- __ subptr(rsp, num_saved_regs * wordSize); +- uint slot = num_saved_regs; +- if (dst != rax) { +- __ movptr(Address(rsp, (--slot) * wordSize), rax); +- } +- __ movptr(Address(rsp, (--slot) * wordSize), rcx); +- __ movptr(Address(rsp, (--slot) * wordSize), rdx); +- __ movptr(Address(rsp, (--slot) * wordSize), rdi); +- __ movptr(Address(rsp, (--slot) * wordSize), rsi); +-#ifdef _LP64 +- __ movptr(Address(rsp, (--slot) * wordSize), r8); +- __ movptr(Address(rsp, (--slot) * wordSize), r9); +- __ movptr(Address(rsp, (--slot) * wordSize), r10); +- __ movptr(Address(rsp, (--slot) * wordSize), r11); +- // r12-r15 are callee saved in all calling conventions +-#endif +- assert(slot == 0, "must use all slots"); +- +- // Shuffle registers such that dst is in c_rarg0 and addr in c_rarg1. +-#ifdef _LP64 +- Register arg0 = c_rarg0, arg1 = c_rarg1; +-#else +- Register arg0 = rdi, arg1 = rsi; +-#endif +- if (dst == arg1) { +- __ lea(arg0, src); +- __ xchgptr(arg1, arg0); +- } else { +- __ lea(arg1, src); +- __ movptr(arg0, dst); +- } +- +- save_xmm_registers(masm); +- if (is_narrow) { +- __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow), arg0, arg1); +- } else { +- __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier), arg0, arg1); +- } +- restore_xmm_registers(masm); +- +-#ifdef _LP64 +- __ movptr(r11, Address(rsp, (slot++) * wordSize)); +- __ movptr(r10, Address(rsp, (slot++) * wordSize)); +- __ movptr(r9, Address(rsp, (slot++) * wordSize)); +- __ movptr(r8, Address(rsp, (slot++) * wordSize)); +-#endif +- __ movptr(rsi, Address(rsp, (slot++) * wordSize)); +- __ movptr(rdi, Address(rsp, (slot++) * wordSize)); +- __ movptr(rdx, Address(rsp, (slot++) * wordSize)); +- __ movptr(rcx, Address(rsp, (slot++) * wordSize)); +- +- if (dst != rax) { +- __ movptr(dst, rax); +- __ movptr(rax, Address(rsp, (slot++) * wordSize)); +- } +- +- assert(slot == num_saved_regs, "must use all slots"); +- __ addptr(rsp, num_saved_regs * wordSize); +- +- __ bind(not_cset); +- +- __ pop(tmp2); +- __ pop(tmp1); +- +- __ bind(heap_stable); +- +- __ block_comment("} load_reference_barrier"); +- +-#ifndef _LP64 +- __ pop(thread); +-#endif +-} +- +-void ShenandoahBarrierSetAssembler::storeval_barrier(MacroAssembler* masm, Register dst, Register tmp) { +- if (ShenandoahStoreValEnqueueBarrier) { +- storeval_barrier_impl(masm, dst, tmp); +- } +-} +- +-void ShenandoahBarrierSetAssembler::storeval_barrier_impl(MacroAssembler* masm, Register dst, Register tmp) { +- assert(ShenandoahStoreValEnqueueBarrier, "should be enabled"); +- +- if (dst == noreg) return; +- +- if (ShenandoahStoreValEnqueueBarrier) { +- // The set of registers to be saved+restored is the same as in the write-barrier above. +- // Those are the commonly used registers in the interpreter. +- __ pusha(); +- // __ push_callee_saved_registers(); +- __ subptr(rsp, 2 * Interpreter::stackElementSize); +- __ movdbl(Address(rsp, 0), xmm0); +- +-#ifdef _LP64 +- Register thread = r15_thread; +-#else +- Register thread = rcx; +- if (thread == dst || thread == tmp) { +- thread = rdi; +- } +- if (thread == dst || thread == tmp) { +- thread = rbx; +- } +- __ get_thread(thread); +-#endif +- assert_different_registers(dst, tmp, thread); +- +- __ g1_write_barrier_pre(noreg, dst, thread, tmp, true, false); +- __ movdbl(xmm0, Address(rsp, 0)); +- __ addptr(rsp, 2 * Interpreter::stackElementSize); +- //__ pop_callee_saved_registers(); +- __ popa(); +- } +-} +- +-void ShenandoahBarrierSetAssembler::load_heap_oop(MacroAssembler* masm, Register dst, Address src) { +- Register result_dst = dst; +- // Preserve src location for LRB +- if (dst == src.base() || dst == src.index()) { +- dst = rdi; +- __ push(dst); +- assert_different_registers(dst, src.base(), src.index()); +- } +- +-#ifdef _LP64 +- // FIXME: Must change all places where we try to load the klass. +- if (UseCompressedOops) { +- __ movl(dst, src); +- __ decode_heap_oop(dst); +- } else +-#endif +- __ movptr(dst, src); +- +- load_reference_barrier(masm, dst, src); +- +- // Move loaded oop to final destination +- if (dst != result_dst) { +- __ movptr(result_dst, dst); +- __ pop(dst); +- } +-} +- +-// Special Shenandoah CAS implementation that handles false negatives +-// due to concurrent evacuation. +-void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm, +- Register res, Address addr, Register oldval, Register newval, +- bool exchange, Register tmp1, Register tmp2) { +- assert(ShenandoahCASBarrier, "Should only be used when CAS barrier is enabled"); +- assert(oldval == rax, "must be in rax for implicit use in cmpxchg"); +- assert_different_registers(oldval, newval, tmp1, tmp2); +- +- Label L_success, L_failure; +- +- // Remember oldval for retry logic below +-#ifdef _LP64 +- if (UseCompressedOops) { +- __ movl(tmp1, oldval); +- } else +-#endif +- { +- __ movptr(tmp1, oldval); +- } +- +- // Step 1. Fast-path. +- // +- // Try to CAS with given arguments. If successful, then we are done. +- +- if (os::is_MP()) __ lock(); +-#ifdef _LP64 +- if (UseCompressedOops) { +- __ cmpxchgl(newval, addr); +- } else +-#endif +- { +- __ cmpxchgptr(newval, addr); +- } +- __ jcc(Assembler::equal, L_success); +- +- // Step 2. CAS had failed. This may be a false negative. +- // +- // The trouble comes when we compare the to-space pointer with the from-space +- // pointer to the same object. To resolve this, it will suffice to resolve +- // the value from memory -- this will give both to-space pointers. +- // If they mismatch, then it was a legitimate failure. +- // +- // Before reaching to resolve sequence, see if we can avoid the whole shebang +- // with filters. +- +- // Filter: when offending in-memory value is NULL, the failure is definitely legitimate +- __ testptr(oldval, oldval); +- __ jcc(Assembler::zero, L_failure); +- +- // Filter: when heap is stable, the failure is definitely legitimate +-#ifdef _LP64 +- const Register thread = r15_thread; +-#else +- const Register thread = tmp2; +- __ get_thread(thread); +-#endif +- Address gc_state(thread, in_bytes(JavaThread::gc_state_offset())); +- __ testb(gc_state, ShenandoahHeap::HAS_FORWARDED); +- __ jcc(Assembler::zero, L_failure); +- +-#ifdef _LP64 +- if (UseCompressedOops) { +- __ movl(tmp2, oldval); +- __ decode_heap_oop(tmp2); +- } else +-#endif +- { +- __ movptr(tmp2, oldval); +- } +- +- // Decode offending in-memory value. +- // Test if-forwarded +- __ testb(Address(tmp2, oopDesc::mark_offset_in_bytes()), markOopDesc::marked_value); +- __ jcc(Assembler::noParity, L_failure); // When odd number of bits, then not forwarded +- __ jcc(Assembler::zero, L_failure); // When it is 00, then also not forwarded +- +- // Load and mask forwarding pointer +- __ movptr(tmp2, Address(tmp2, oopDesc::mark_offset_in_bytes())); +- __ shrptr(tmp2, 2); +- __ shlptr(tmp2, 2); +- +-#ifdef _LP64 +- if (UseCompressedOops) { +- __ decode_heap_oop(tmp1); // decode for comparison +- } +-#endif +- +- // Now we have the forwarded offender in tmp2. +- // Compare and if they don't match, we have legitimate failure +- __ cmpptr(tmp1, tmp2); +- __ jcc(Assembler::notEqual, L_failure); +- +- // Step 3. Need to fix the memory ptr before continuing. +- // +- // At this point, we have from-space oldval in the register, and its to-space +- // address is in tmp2. Let's try to update it into memory. We don't care if it +- // succeeds or not. If it does, then the retrying CAS would see it and succeed. +- // If this fixup fails, this means somebody else beat us to it, and necessarily +- // with to-space ptr store. We still have to do the retry, because the GC might +- // have updated the reference for us. +- +-#ifdef _LP64 +- if (UseCompressedOops) { +- __ encode_heap_oop(tmp2); // previously decoded at step 2. +- } +-#endif +- +- if (os::is_MP()) __ lock(); +-#ifdef _LP64 +- if (UseCompressedOops) { +- __ cmpxchgl(tmp2, addr); +- } else +-#endif +- { +- __ cmpxchgptr(tmp2, addr); +- } +- +- // Step 4. Try to CAS again. +- // +- // This is guaranteed not to have false negatives, because oldval is definitely +- // to-space, and memory pointer is to-space as well. Nothing is able to store +- // from-space ptr into memory anymore. Make sure oldval is restored, after being +- // garbled during retries. +- // +-#ifdef _LP64 +- if (UseCompressedOops) { +- __ movl(oldval, tmp2); +- } else +-#endif +- { +- __ movptr(oldval, tmp2); +- } +- +- if (os::is_MP()) __ lock(); +-#ifdef _LP64 +- if (UseCompressedOops) { +- __ cmpxchgl(newval, addr); +- } else +-#endif +- { +- __ cmpxchgptr(newval, addr); +- } +- if (!exchange) { +- __ jccb(Assembler::equal, L_success); // fastpath, peeking into Step 5, no need to jump +- } +- +- // Step 5. If we need a boolean result out of CAS, set the flag appropriately. +- // and promote the result. Note that we handle the flag from both the 1st and 2nd CAS. +- // Otherwise, failure witness for CAE is in oldval on all paths, and we can return. +- +- if (exchange) { +- __ bind(L_failure); +- __ bind(L_success); +- } else { +- assert(res != NULL, "need result register"); +- +- Label exit; +- __ bind(L_failure); +- __ xorptr(res, res); +- __ jmpb(exit); +- +- __ bind(L_success); +- __ movptr(res, 1); +- __ bind(exit); +- } +-} +- +-#undef __ +- +-#ifdef COMPILER1 +- +-#define __ ce->masm()-> +- +-void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub) { +- __ bind(*stub->entry()); +- +- Label done; +- Register obj = stub->obj()->as_register(); +- Register res = stub->result()->as_register(); +- Register addr = stub->addr()->as_pointer_register(); +- Register tmp1 = stub->tmp1()->as_register(); +- Register tmp2 = stub->tmp2()->as_register(); +- assert_different_registers(obj, res, addr, tmp1, tmp2); +- +- Label slow_path; +- +- assert(res == rax, "result must arrive in rax"); +- +- if (res != obj) { +- __ mov(res, obj); +- } +- +- // Check for null. +- __ testptr(res, res); +- __ jcc(Assembler::zero, *stub->continuation()); +- +- // Check for object being in the collection set. +- __ mov(tmp1, res); +- __ shrptr(tmp1, ShenandoahHeapRegion::region_size_bytes_shift_jint()); +- __ movptr(tmp2, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr()); +-#ifdef _LP64 +- __ movbool(tmp2, Address(tmp2, tmp1, Address::times_1)); +- __ testbool(tmp2); +-#else +- // On x86_32, C1 register allocator can give us the register without 8-bit support. +- // Do the full-register access and test to avoid compilation failures. +- __ movptr(tmp2, Address(tmp2, tmp1, Address::times_1)); +- __ testptr(tmp2, 0xFF); +-#endif +- __ jcc(Assembler::zero, *stub->continuation()); +- +- __ bind(slow_path); +- ce->store_parameter(res, 0); +- ce->store_parameter(addr, 1); +- __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::shenandoah_lrb_slow_id))); +- +- __ jmp(*stub->continuation()); +-} +- +-#undef __ +- +-#endif // COMPILER1 +diff -uNr openjdk/hotspot/src/cpu/x86/vm/shenandoahBarrierSetAssembler_x86.hpp afu8u/hotspot/src/cpu/x86/vm/shenandoahBarrierSetAssembler_x86.hpp +--- openjdk/hotspot/src/cpu/x86/vm/shenandoahBarrierSetAssembler_x86.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/cpu/x86/vm/shenandoahBarrierSetAssembler_x86.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,60 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef CPU_X86_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_X86_HPP +-#define CPU_X86_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_X86_HPP +- +-#include "asm/macroAssembler.hpp" +-#include "memory/allocation.hpp" +-#ifdef COMPILER1 +-class LIR_Assembler; +-class ShenandoahLoadReferenceBarrierStub; +-class StubAssembler; +-class StubCodeGenerator; +-#endif +- +-class ShenandoahBarrierSetAssembler : public CHeapObj { +-private: +- +- void storeval_barrier_impl(MacroAssembler* masm, Register dst, Register tmp); +- +-public: +- static ShenandoahBarrierSetAssembler* bsasm(); +- +- void storeval_barrier(MacroAssembler* masm, Register dst, Register tmp); +-#ifdef COMPILER1 +- void gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub); +-#endif +- +- void load_reference_barrier(MacroAssembler* masm, Register dst, Address src); +- +- void load_heap_oop(MacroAssembler* masm, Register dst, Address src); +- +- virtual void arraycopy_prologue(MacroAssembler* masm, bool dest_uninitialized, +- Register src, Register dst, Register count); +- virtual void cmpxchg_oop(MacroAssembler* masm, +- Register res, Address addr, Register oldval, Register newval, +- bool exchange, Register tmp1, Register tmp2); +-}; +- +-#endif // CPU_X86_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_X86_HPP +diff -uNr openjdk/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp afu8u/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp +--- openjdk/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp 2025-05-06 10:53:44.927633667 +0800 +@@ -38,14 +38,10 @@ + #include "runtime/stubCodeGenerator.hpp" + #include "runtime/stubRoutines.hpp" + #include "runtime/thread.inline.hpp" +-#include "utilities/macros.hpp" + #include "utilities/top.hpp" + #ifdef COMPILER2 + #include "opto/runtime.hpp" + #endif +-#if INCLUDE_ALL_GCS +-#include "shenandoahBarrierSetAssembler_x86.hpp" +-#endif + + // Declaration and definition of StubGenerator (no .hpp file). + // For a more detailed description of the stub routine structure +@@ -706,7 +702,7 @@ + // Input: + // start - starting address + // count - element count +- void gen_write_ref_array_pre_barrier(Register src, Register start, Register count, bool uninitialized_target) { ++ void gen_write_ref_array_pre_barrier(Register start, Register count, bool uninitialized_target) { + assert_different_registers(start, count); + BarrierSet* bs = Universe::heap()->barrier_set(); + switch (bs->kind()) { +@@ -724,11 +720,6 @@ + case BarrierSet::CardTableExtension: + case BarrierSet::ModRef: + break; +-#if INCLUDE_ALL_GCS +- case BarrierSet::ShenandoahBarrierSet: +- ShenandoahBarrierSetAssembler::bsasm()->arraycopy_prologue(_masm, uninitialized_target, src, start, count); +- break; +-#endif + default : + ShouldNotReachHere(); + +@@ -781,7 +772,6 @@ + } + break; + case BarrierSet::ModRef: +- case BarrierSet::ShenandoahBarrierSet: + break; + default : + ShouldNotReachHere(); +@@ -947,7 +937,7 @@ + if (t == T_OBJECT) { + __ testl(count, count); + __ jcc(Assembler::zero, L_0_count); +- gen_write_ref_array_pre_barrier(from, to, count, dest_uninitialized); ++ gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); + __ mov(saved_to, to); // save 'to' + } + +@@ -1126,7 +1116,7 @@ + if (t == T_OBJECT) { + __ testl(count, count); + __ jcc(Assembler::zero, L_0_count); +- gen_write_ref_array_pre_barrier(src, dst, count, dest_uninitialized); ++ gen_write_ref_array_pre_barrier(dst, count, dest_uninitialized); + } + + // copy from high to low +@@ -1473,7 +1463,7 @@ + Address elem_klass_addr(elem, oopDesc::klass_offset_in_bytes()); + + // Copy from low to high addresses, indexed from the end of each array. +- gen_write_ref_array_pre_barrier(from, to, count, dest_uninitialized); ++ gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); + __ lea(end_from, end_from_addr); + __ lea(end_to, end_to_addr); + assert(length == count, ""); // else fix next line: +@@ -1494,30 +1484,12 @@ + __ BIND(L_store_element); + __ movptr(to_element_addr, elem); // store the oop + __ increment(count); // increment the count toward zero +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- // Shenandoah barrier is too big for 8-bit offsets to work +- __ jcc(Assembler::zero, L_do_card_marks); +- } else +-#endif + __ jccb(Assembler::zero, L_do_card_marks); + + // ======== loop entry is here ======== + __ BIND(L_load_element); +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- // Needs GC barriers +- __ load_heap_oop(elem, from_element_addr); +- } else +-#endif + __ movptr(elem, from_element_addr); // load the oop + __ testptr(elem, elem); +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- // Shenandoah barrier is too big for 8-bit offsets to work +- __ jcc(Assembler::zero, L_store_element); +- } else +-#endif + __ jccb(Assembler::zero, L_store_element); + + // (Could do a trick here: Remember last successful non-null +diff -uNr openjdk/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp afu8u/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp +--- openjdk/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp 2025-05-06 10:53:44.927633667 +0800 +@@ -38,14 +38,10 @@ + #include "runtime/stubCodeGenerator.hpp" + #include "runtime/stubRoutines.hpp" + #include "runtime/thread.inline.hpp" +-#include "utilities/macros.hpp" + #include "utilities/top.hpp" + #ifdef COMPILER2 + #include "opto/runtime.hpp" + #endif +-#if INCLUDE_ALL_GCS +-#include "shenandoahBarrierSetAssembler_x86.hpp" +-#endif + + // Declaration and definition of StubGenerator (no .hpp file). + // For a more detailed description of the stub routine structure +@@ -1182,7 +1178,7 @@ + // + // Destroy no registers! + // +- void gen_write_ref_array_pre_barrier(Register src, Register addr, Register count, bool dest_uninitialized) { ++ void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) { + BarrierSet* bs = Universe::heap()->barrier_set(); + switch (bs->kind()) { + case BarrierSet::G1SATBCT: +@@ -1210,11 +1206,6 @@ + case BarrierSet::CardTableExtension: + case BarrierSet::ModRef: + break; +-#if INCLUDE_ALL_GCS +- case BarrierSet::ShenandoahBarrierSet: +- ShenandoahBarrierSetAssembler::bsasm()->arraycopy_prologue(_masm, dest_uninitialized, src, addr, count); +- break; +-#endif + default: + ShouldNotReachHere(); + +@@ -1276,10 +1267,6 @@ + __ jcc(Assembler::greaterEqual, L_loop); + } + break; +-#if INCLUDE_ALL_GCS +- case BarrierSet::ShenandoahBarrierSet: +- break; +-#endif + default: + ShouldNotReachHere(); + +@@ -1897,7 +1884,7 @@ + // r9 and r10 may be used to save non-volatile registers + if (is_oop) { + __ movq(saved_to, to); +- gen_write_ref_array_pre_barrier(from, to, count, dest_uninitialized); ++ gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); + } + + // 'from', 'to' and 'count' are now valid +@@ -1985,7 +1972,7 @@ + + if (is_oop) { + // no registers are destroyed by this call +- gen_write_ref_array_pre_barrier(from, to, count, dest_uninitialized); ++ gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); + } + + assert_clean_int(count, rax); // Make sure 'count' is clean int. +@@ -2083,7 +2070,7 @@ + // Save to and count for store barrier + __ movptr(saved_count, qword_count); + // no registers are destroyed by this call +- gen_write_ref_array_pre_barrier(from, to, qword_count, dest_uninitialized); ++ gen_write_ref_array_pre_barrier(to, qword_count, dest_uninitialized); + } + + // Copy from low to high addresses. Use 'to' as scratch. +@@ -2170,7 +2157,7 @@ + // Save to and count for store barrier + __ movptr(saved_count, qword_count); + // No registers are destroyed by this call +- gen_write_ref_array_pre_barrier(from, to, saved_count, dest_uninitialized); ++ gen_write_ref_array_pre_barrier(to, saved_count, dest_uninitialized); + } + + __ jmp(L_copy_bytes); +@@ -2344,7 +2331,7 @@ + Address from_element_addr(end_from, count, TIMES_OOP, 0); + Address to_element_addr(end_to, count, TIMES_OOP, 0); + +- gen_write_ref_array_pre_barrier(from, to, count, dest_uninitialized); ++ gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); + + // Copy from low to high addresses, indexed from the end of each array. + __ lea(end_from, end_from_addr); +diff -uNr openjdk/hotspot/src/cpu/x86/vm/stubRoutines_x86_32.hpp afu8u/hotspot/src/cpu/x86/vm/stubRoutines_x86_32.hpp +--- openjdk/hotspot/src/cpu/x86/vm/stubRoutines_x86_32.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/cpu/x86/vm/stubRoutines_x86_32.hpp 2025-05-06 10:53:44.927633667 +0800 +@@ -30,8 +30,8 @@ + // extend it. + + enum platform_dependent_constants { +- code_size1 = 20000, // simply increase if too small (assembler will crash if too small) +- code_size2 = 35300 // simply increase if too small (assembler will crash if too small) ++ code_size1 = 9000, // simply increase if too small (assembler will crash if too small) ++ code_size2 = 22000 // simply increase if too small (assembler will crash if too small) + }; + + class x86 { +diff -uNr openjdk/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp afu8u/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp +--- openjdk/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp 2025-05-06 10:53:44.927633667 +0800 +@@ -746,21 +746,6 @@ + __ jmp(xreturn_path); + + __ bind(notChar); +- +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- Label notObj; +- +- // Needs GC barriers +- __ cmpl(rdx, atos); +- __ jcc(Assembler::notEqual, notObj); +- __ load_heap_oop(rax, field_address); +- __ jmp(xreturn_path); +- +- __ bind(notObj); +- } +-#endif +- + #ifdef ASSERT + Label okay; + __ cmpl(rdx, atos); +@@ -829,7 +814,7 @@ + const int referent_offset = java_lang_ref_Reference::referent_offset; + guarantee(referent_offset > 0, "referent offset not initialized"); + +- if (UseG1GC || UseShenandoahGC) { ++ if (UseG1GC) { + Label slow_path; + + // Check if local 0 != NULL +@@ -853,17 +838,10 @@ + + // Load the value of the referent field. + const Address field_address(rax, referent_offset); +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- // Needs GC barriers +- __ load_heap_oop(rax, field_address); +- } else +-#endif + __ movptr(rax, field_address); + + // Generate the G1 pre-barrier code to log the value of + // the referent field in an SATB buffer. +- if (!UseShenandoahGC || ShenandoahSATBBarrier) { + __ get_thread(rcx); + __ g1_write_barrier_pre(noreg /* obj */, + rax /* pre_val */, +@@ -871,7 +849,6 @@ + rbx /* tmp */, + true /* tosca_save */, + true /* expand_call */); +- } + + // _areturn + __ pop(rsi); // get sender sp +diff -uNr openjdk/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp afu8u/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp +--- openjdk/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp 2025-05-06 10:53:44.927633667 +0800 +@@ -790,7 +790,7 @@ + const int referent_offset = java_lang_ref_Reference::referent_offset; + guarantee(referent_offset > 0, "referent offset not initialized"); + +- if (UseG1GC || UseShenandoahGC) { ++ if (UseG1GC) { + Label slow_path; + // rbx: method + +@@ -815,16 +815,12 @@ + + // Generate the G1 pre-barrier code to log the value of + // the referent field in an SATB buffer. +- if (!UseShenandoahGC || ShenandoahSATBBarrier) { +- if (UseShenandoahGC) __ push_IU_state(); + __ g1_write_barrier_pre(noreg /* obj */, + rax /* pre_val */, + r15_thread /* thread */, + rbx /* tmp */, + true /* tosca_live */, + true /* expand_call */); +- if (UseShenandoahGC) __ pop_IU_state(); +- } + + // _areturn + __ pop(rdi); // get return address +diff -uNr openjdk/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp afu8u/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp +--- openjdk/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp 2025-05-06 10:53:44.927633667 +0800 +@@ -36,9 +36,6 @@ + #include "runtime/stubRoutines.hpp" + #include "runtime/synchronizer.hpp" + #include "utilities/macros.hpp" +-#if INCLUDE_ALL_GCS +-#include "shenandoahBarrierSetAssembler_x86.hpp" +-#endif + + #ifndef CC_INTERP + #define __ _masm-> +@@ -168,41 +165,6 @@ + + } + break; +- case BarrierSet::ShenandoahBarrierSet: +- { +- // flatten object address if needed +- // We do it regardless of precise because we need the registers +- if (obj.index() == noreg && obj.disp() == 0) { +- if (obj.base() != rdx) { +- __ movl(rdx, obj.base()); +- } +- } else { +- __ leal(rdx, obj); +- } +- __ get_thread(rcx); +- __ save_bcp(); +- if (ShenandoahSATBBarrier) { +- __ g1_write_barrier_pre(rdx /* obj */, +- rbx /* pre_val */, +- rcx /* thread */, +- rsi /* tmp */, +- val != noreg /* tosca_live */, +- false /* expand_call */); +- } +- +- // Do the actual store +- // noreg means NULL +- if (val == noreg) { +- __ movptr(Address(rdx, 0), NULL_WORD); +- // No post barrier for NULL +- } else { +- ShenandoahBarrierSetAssembler::bsasm()->storeval_barrier(_masm, val, rsi); +- __ movl(Address(rdx, 0), val); +- } +- __ restore_bcp(); +- +- } +- break; + #endif // INCLUDE_ALL_GCS + case BarrierSet::CardTableModRef: + case BarrierSet::CardTableExtension: +@@ -706,14 +668,7 @@ + // rdx: array + index_check(rdx, rax); // kills rbx, + // rax,: index +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- // Needs GC barriers +- __ load_heap_oop(rax, Address(rdx, rax, Address::times_ptr, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); +- } else +-#endif + __ movptr(rax, Address(rdx, rax, Address::times_ptr, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); +- + } + + +@@ -2348,12 +2303,6 @@ + __ cmpl(flags, atos ); + __ jcc(Assembler::notEqual, notObj); + +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- // Needs GC barriers +- __ load_heap_oop(rax, lo); +- } else +-#endif + __ movl(rax, lo ); + __ push(atos); + if (!is_static) { +@@ -2922,16 +2871,7 @@ + case Bytecodes::_fast_lgetfield: __ stop("should not be rewritten"); break; + case Bytecodes::_fast_fgetfield: __ fld_s(lo); break; + case Bytecodes::_fast_dgetfield: __ fld_d(lo); break; +- case Bytecodes::_fast_agetfield: +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- // Needs GC barriers +- __ load_heap_oop(rax, lo); +- } else +-#endif +- __ movptr(rax, lo); +- __ verify_oop(rax); +- break; ++ case Bytecodes::_fast_agetfield: __ movptr(rax, lo); __ verify_oop(rax); break; + default: + ShouldNotReachHere(); + } +@@ -2957,12 +2897,6 @@ + if (state == itos) { + __ movl(rax, lo); + } else if (state == atos) { +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- // Needs GC barriers +- __ load_heap_oop(rax, lo); +- } else +-#endif + __ movptr(rax, lo); + __ verify_oop(rax); + } else if (state == ftos) { +@@ -3018,12 +2952,6 @@ + if (is_invokedynamic || is_invokehandle) { + Label L_no_push; + __ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift)); +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- // Shenandoah barrier is too large to make short jump. +- __ jcc(Assembler::zero, L_no_push); +- } else +-#endif + __ jccb(Assembler::zero, L_no_push); + // Push the appendix as a trailing parameter. + // This must be done before we get the receiver, +diff -uNr openjdk/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp afu8u/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp +--- openjdk/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp 2025-05-06 10:53:44.927633667 +0800 +@@ -36,9 +36,6 @@ + #include "runtime/stubRoutines.hpp" + #include "runtime/synchronizer.hpp" + #include "utilities/macros.hpp" +-#if INCLUDE_ALL_GCS +-#include "shenandoahBarrierSetAssembler_x86.hpp" +-#endif + + #ifndef CC_INTERP + +@@ -172,32 +169,6 @@ + } + } + break; +- case BarrierSet::ShenandoahBarrierSet: +- { +- // flatten object address if needed +- if (obj.index() == noreg && obj.disp() == 0) { +- if (obj.base() != rdx) { +- __ movq(rdx, obj.base()); +- } +- } else { +- __ leaq(rdx, obj); +- } +- if (ShenandoahSATBBarrier) { +- __ g1_write_barrier_pre(rdx /* obj */, +- rbx /* pre_val */, +- r15_thread /* thread */, +- r8 /* tmp */, +- val != noreg /* tosca_live */, +- false /* expand_call */); +- } +- if (val == noreg) { +- __ store_heap_oop_null(Address(rdx, 0)); +- } else { +- ShenandoahBarrierSetAssembler::bsasm()->storeval_barrier(_masm, val, r8); +- __ store_heap_oop(Address(rdx, 0), val); +- } +- } +- break; + #endif // INCLUDE_ALL_GCS + case BarrierSet::CardTableModRef: + case BarrierSet::CardTableExtension: +diff -uNr openjdk/hotspot/src/cpu/x86/vm/x86_32.ad afu8u/hotspot/src/cpu/x86/vm/x86_32.ad +--- openjdk/hotspot/src/cpu/x86/vm/x86_32.ad 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/cpu/x86/vm/x86_32.ad 2025-05-06 10:53:44.935633667 +0800 +@@ -229,11 +229,6 @@ + + %} + +-source_hpp %{ +-#if INCLUDE_ALL_GCS +-#include "shenandoahBarrierSetAssembler_x86.hpp" +-#endif +-%} + + //----------SOURCE BLOCK------------------------------------------------------- + // This is a block of C++ code which provides values, functions, and +@@ -7305,7 +7300,6 @@ + %} + + instruct compareAndSwapP( rRegI res, pRegP mem_ptr, eAXRegP oldval, eCXRegP newval, eFlagsReg cr) %{ +- predicate(!UseShenandoahGC || !ShenandoahCASBarrier || n->in(3)->in(1)->bottom_type() == TypePtr::NULL_PTR); + match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval))); + effect(KILL cr, KILL oldval); + format %{ "CMPXCHG [$mem_ptr],$newval\t# If EAX==[$mem_ptr] Then store $newval into [$mem_ptr]\n\t" +@@ -7317,28 +7311,6 @@ + ins_pipe( pipe_cmpxchg ); + %} + +-instruct compareAndSwapP_shenandoah(rRegI res, +- memory mem_ptr, +- eRegP tmp1, eRegP tmp2, +- eAXRegP oldval, eCXRegP newval, +- eFlagsReg cr) +-%{ +- predicate(UseShenandoahGC && ShenandoahCASBarrier && n->in(3)->in(1)->bottom_type() != TypePtr::NULL_PTR); +- match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval))); +- effect(TEMP tmp1, TEMP tmp2, KILL cr, KILL oldval); +- +- format %{ "shenandoah_cas_oop $mem_ptr,$newval" %} +- +- ins_encode %{ +- ShenandoahBarrierSetAssembler::bsasm()->cmpxchg_oop(&_masm, +- $res$$Register, $mem_ptr$$Address, $oldval$$Register, $newval$$Register, +- false, // swap +- $tmp1$$Register, $tmp2$$Register +- ); +- %} +- ins_pipe( pipe_cmpxchg ); +-%} +- + instruct compareAndSwapI( rRegI res, pRegP mem_ptr, eAXRegI oldval, eCXRegI newval, eFlagsReg cr) %{ + match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval))); + effect(KILL cr, KILL oldval); +diff -uNr openjdk/hotspot/src/cpu/x86/vm/x86_64.ad afu8u/hotspot/src/cpu/x86/vm/x86_64.ad +--- openjdk/hotspot/src/cpu/x86/vm/x86_64.ad 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/cpu/x86/vm/x86_64.ad 2025-05-06 10:53:44.939633667 +0800 +@@ -526,12 +526,6 @@ + + %} + +-source_hpp %{ +-#if INCLUDE_ALL_GCS +-#include "shenandoahBarrierSetAssembler_x86.hpp" +-#endif +-%} +- + //----------SOURCE BLOCK------------------------------------------------------- + // This is a block of C++ code which provides values, functions, and + // definitions necessary in the rest of the architecture description +@@ -7276,7 +7270,7 @@ + rax_RegP oldval, rRegP newval, + rFlagsReg cr) + %{ +- predicate(VM_Version::supports_cx8() && (!UseShenandoahGC || !ShenandoahCASBarrier || n->in(3)->in(1)->bottom_type() == TypePtr::NULL_PTR)); ++ predicate(VM_Version::supports_cx8()); + match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval))); + effect(KILL cr, KILL oldval); + +@@ -7295,27 +7289,6 @@ + ins_pipe( pipe_cmpxchg ); + %} + +-instruct compareAndSwapP_shenandoah(rRegI res, +- memory mem_ptr, +- rRegP tmp1, rRegP tmp2, +- rax_RegP oldval, rRegP newval, +- rFlagsReg cr) +-%{ +- predicate(VM_Version::supports_cx8() && UseShenandoahGC && ShenandoahCASBarrier && n->in(3)->in(1)->bottom_type() != TypePtr::NULL_PTR); +- match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval))); +- effect(TEMP tmp1, TEMP tmp2, KILL cr, KILL oldval); +- +- format %{ "shenandoah_cas_oop $mem_ptr,$newval" %} +- +- ins_encode %{ +- ShenandoahBarrierSetAssembler::bsasm()->cmpxchg_oop(&_masm, $res$$Register, $mem_ptr$$Address, $oldval$$Register, $newval$$Register, +- false, // swap +- $tmp1$$Register, $tmp2$$Register +- ); +- %} +- ins_pipe( pipe_cmpxchg ); +-%} +- + instruct compareAndSwapL(rRegI res, + memory mem_ptr, + rax_RegL oldval, rRegL newval, +@@ -7368,7 +7341,6 @@ + memory mem_ptr, + rax_RegN oldval, rRegN newval, + rFlagsReg cr) %{ +- predicate(!UseShenandoahGC || !ShenandoahCASBarrier || n->in(3)->in(1)->bottom_type() == TypeNarrowOop::NULL_PTR); + match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval))); + effect(KILL cr, KILL oldval); + +@@ -7387,26 +7359,6 @@ + ins_pipe( pipe_cmpxchg ); + %} + +-instruct compareAndSwapN_shenandoah(rRegI res, +- memory mem_ptr, +- rRegP tmp1, rRegP tmp2, +- rax_RegN oldval, rRegN newval, +- rFlagsReg cr) %{ +- predicate(UseShenandoahGC && ShenandoahCASBarrier && n->in(3)->in(1)->bottom_type() != TypeNarrowOop::NULL_PTR); +- match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval))); +- effect(TEMP tmp1, TEMP tmp2, KILL cr, KILL oldval); +- +- format %{ "shenandoah_cas_oop $mem_ptr,$newval" %} +- +- ins_encode %{ +- ShenandoahBarrierSetAssembler::bsasm()->cmpxchg_oop(&_masm, $res$$Register, $mem_ptr$$Address, $oldval$$Register, $newval$$Register, +- false, // swap +- $tmp1$$Register, $tmp2$$Register +- ); +- %} +- ins_pipe( pipe_cmpxchg ); +-%} +- + instruct xaddI_no_res( memory mem, Universe dummy, immI add, rFlagsReg cr) %{ + predicate(n->as_LoadStore()->result_not_used()); + match(Set dummy (GetAndAddI mem add)); +@@ -11215,32 +11167,6 @@ + ins_pipe(ialu_cr_reg_imm); + %} + +-instruct compB_mem_imm(rFlagsReg cr, memory mem, immI8 imm) +-%{ +- // This match is actually generic, but protect with Shenandoah flag, +- // because it is not tested upstream. +- predicate(UseShenandoahGC); +- match(Set cr (CmpI (LoadB mem) imm)); +- +- ins_cost(125); +- format %{ "cmpb $mem, $imm" %} +- ins_encode %{ __ cmpb($mem$$Address, $imm$$constant); %} +- ins_pipe(ialu_cr_reg_mem); +-%} +- +-instruct testB_mem_imm(rFlagsReg cr, memory mem, immI8 imm, immI0 zero) +-%{ +- // This match is actually generic, but protect with Shenandoah flag, +- // because it is not tested upstream +- predicate(UseShenandoahGC); +- match(Set cr (CmpI (AndI (LoadB mem) imm) zero)); +- +- ins_cost(125); +- format %{ "testb $mem, $imm" %} +- ins_encode %{ __ testb($mem$$Address, $imm$$constant); %} +- ins_pipe(ialu_cr_reg_mem); +-%} +- + //----------Max and Min-------------------------------------------------------- + // Min Instructions + +diff -uNr openjdk/hotspot/src/os/linux/vm/os_linux.cpp afu8u/hotspot/src/os/linux/vm/os_linux.cpp +--- openjdk/hotspot/src/os/linux/vm/os_linux.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/os/linux/vm/os_linux.cpp 2025-05-06 11:13:08.103672950 +0800 +@@ -1030,6 +1030,14 @@ + Monitor* sync_with_child = osthread->startThread_lock(); + MutexLockerEx ml(sync_with_child, Mutex::_no_safepoint_check_flag); + sync_with_child->notify(); ++ ++#if (defined SW64 && !defined ZERO) ++ // To be accessed in NativeGeneralJump::patch_verified_entry() ++ if (thread->is_Java_thread()) ++ { ++ ((JavaThread*)thread)->set_handle_wrong_method_stub(SharedRuntime::get_handle_wrong_method_stub()); ++ } ++#endif + } + + // Free Linux resources related to the OSThread +@@ -1417,6 +1425,9 @@ + #if defined(IA32) || defined(AMD64) || defined(AARCH64) + #define SYS_clock_getres IA32_ONLY(266) AMD64_ONLY(229) AARCH64_ONLY(114) + #define sys_clock_getres(x,y) ::syscall(SYS_clock_getres, x, y) ++#elif defined(SW64) ++#define SYS_clock_getres SW64_ONLY(421) ++#define sys_clock_getres(x,y) ::syscall(SYS_clock_getres, x, y) + #else + #warning "SYS_clock_getres not defined for this platform, disabling fast_thread_cpu_time" + #define sys_clock_getres(x,y) -1 +@@ -1966,7 +1977,11 @@ + #endif + {EM_ARM, EM_ARM, ELFCLASS32, ELFDATA2LSB, (char*)"ARM"}, + {EM_S390, EM_S390, ELFCLASSNONE, ELFDATA2MSB, (char*)"IBM System/390"}, ++#if defined(__sw_64) ++ {EM_SW_64, EM_SW_64, ELFCLASS64, ELFDATA2LSB, (char*)"Sw64"}, ++#elif defined(__alpha) + {EM_ALPHA, EM_ALPHA, ELFCLASS64, ELFDATA2LSB, (char*)"Alpha"}, ++#endif + {EM_MIPS_RS3_LE, EM_MIPS_RS3_LE, ELFCLASS32, ELFDATA2LSB, (char*)"MIPSel"}, + {EM_MIPS, EM_MIPS, ELFCLASS32, ELFDATA2MSB, (char*)"MIPS"}, + {EM_PARISC, EM_PARISC, ELFCLASS32, ELFDATA2MSB, (char*)"PARISC"}, +@@ -1992,6 +2007,8 @@ + static Elf32_Half running_arch_code=EM_ARM; + #elif (defined S390) + static Elf32_Half running_arch_code=EM_S390; ++ #elif (defined __sw_64) && (defined SW64) ++ static Elf32_Half running_arch_code=EM_SW_64; + #elif (defined ALPHA) + static Elf32_Half running_arch_code=EM_ALPHA; + #elif (defined MIPSEL) +@@ -2006,7 +2023,7 @@ + static Elf32_Half running_arch_code=EM_AARCH64; + #else + #error Method os::dll_load requires that one of following is defined:\ +- IA32, AMD64, IA64, __sparc, __powerpc__, ARM, S390, ALPHA, MIPS, MIPSEL, PARISC, M68K, AARCH64 ++ IA32, AMD64, IA64, __sparc, __powerpc__, ARM, S390, ALPHA, MIPS, MIPSEL, PARISC, M68K, AARCH64, SW64 + #endif + + // Identify compatability class for VM's architecture and library's architecture +@@ -2810,9 +2827,14 @@ + } + + // Define MAP_HUGETLB here so we can build HotSpot on old systems. ++#ifdef SW64 ++#define MAP_HUGETLB 0x100000 ++#else ++// Define MAP_HUGETLB here so we can build HotSpot on old systems. + #ifndef MAP_HUGETLB + #define MAP_HUGETLB 0x40000 + #endif ++#endif + + // Define MADV_HUGEPAGE here so we can build HotSpot on old systems. + #ifndef MADV_HUGEPAGE +@@ -3513,7 +3535,7 @@ + + #ifndef ZERO + large_page_size = IA32_ONLY(4 * M) AMD64_ONLY(2 * M) IA64_ONLY(256 * M) SPARC_ONLY(4 * M) +- ARM_ONLY(2 * M) PPC_ONLY(4 * M) AARCH64_ONLY(2 * M); ++ ARM_ONLY(2 * M) PPC_ONLY(4 * M) AARCH64_ONLY(2 * M) SW64_ONLY(8 * M); + #endif // ZERO + + FILE *fp = fopen("/proc/meminfo", "r"); +@@ -5120,7 +5142,12 @@ + Linux::fast_thread_clock_init(); + + // Allocate a single page and mark it as readable for safepoint polling ++#ifdef OPT_SAFEPOINT ++ void * p = (void *)(0x10000); ++ address polling_page = (address) ::mmap(p, Linux::page_size(), PROT_READ, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); ++#else + address polling_page = (address) ::mmap(NULL, Linux::page_size(), PROT_READ, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); ++#endif + guarantee( polling_page != MAP_FAILED, "os::init_2: failed to allocate polling page" ); + + os::set_polling_page( polling_page ); +diff -uNr openjdk/hotspot/src/os/linux/vm/os_perf_linux.cpp afu8u/hotspot/src/os/linux/vm/os_perf_linux.cpp +--- openjdk/hotspot/src/os/linux/vm/os_perf_linux.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/os/linux/vm/os_perf_linux.cpp 2025-05-06 10:53:44.951633667 +0800 +@@ -50,6 +50,9 @@ + #ifdef TARGET_ARCH_ppc + # include "vm_version_ext_ppc.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "vm_version_ext_sw64.hpp" ++#endif + + #include + #include +diff -uNr openjdk/hotspot/src/os_cpu/linux_sw64/vm/assembler_linux_sw64.cpp afu8u/hotspot/src/os_cpu/linux_sw64/vm/assembler_linux_sw64.cpp +--- openjdk/hotspot/src/os_cpu/linux_sw64/vm/assembler_linux_sw64.cpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/os_cpu/linux_sw64/vm/assembler_linux_sw64.cpp 2025-05-06 10:53:44.963633668 +0800 +@@ -0,0 +1,65 @@ ++/* ++ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++#include "asm/macroAssembler.hpp" ++#include "asm/macroAssembler.inline.hpp" ++#include "runtime/os.hpp" ++#include "runtime/threadLocalStorage.hpp" ++ ++void MacroAssembler::int3() { ++ li(T12, CAST_FROM_FN_PTR(address, os::breakpoint)); ++ call(T12); ++} ++ ++void MacroAssembler::get_thread(Register thread) { ++ // call pthread_getspecific ++ // void * pthread_getspecific(pthread_key_t key); ++ ++ pushad(); ++ if (Assembler::is_simm16(ThreadLocalStorage::thread_index())) { ++ ldi(A0, R0, ThreadLocalStorage::thread_index()); ++ } else { ++ li(A0, ThreadLocalStorage::thread_index()); ++ } ++ call(CAST_FROM_FN_PTR(address, pthread_getspecific), relocInfo::runtime_call_type); ++ ++ int off;//depending on the sd sequence in pushad(); ++ ++ if (thread->encoding() >= V0->encoding() && thread->encoding() <= T7->encoding()) ++ { ++ off = 26 - thread->encoding(); ++ stl(V0, SP, off * wordSize); //sd V0 to stack, thus after popad(), thread would not be pop. ++ } ++ else if (thread->encoding() >= FP->encoding() && thread->encoding() <= GP->encoding()) ++ { ++ off = 32 - thread->encoding(); ++ stl(V0, SP, off * wordSize); //sd V0 to stack, thus after popad(), thread would not be pop. ++ } ++ else ++ { ++ move(thread, V0); //thread does not push in stack. ++ } ++ popad(); ++} +diff -uNr openjdk/hotspot/src/os_cpu/linux_sw64/vm/atomic_linux_sw64.inline.hpp afu8u/hotspot/src/os_cpu/linux_sw64/vm/atomic_linux_sw64.inline.hpp +--- openjdk/hotspot/src/os_cpu/linux_sw64/vm/atomic_linux_sw64.inline.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/os_cpu/linux_sw64/vm/atomic_linux_sw64.inline.hpp 2025-05-06 10:53:44.963633668 +0800 +@@ -0,0 +1,230 @@ ++/* ++ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef OS_CPU_LINUX_SW64_VM_ATOMIC_LINUX_SW64_INLINE_HPP ++#define OS_CPU_LINUX_SW64_VM_ATOMIC_LINUX_SW64_INLINE_HPP ++ ++#include "orderAccess_linux_sw64.inline.hpp" ++#include "runtime/atomic.hpp" ++#include "runtime/os.hpp" ++#include "vm_version_sw64.hpp" ++ ++// Implementation of class atomic ++ ++inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; } ++inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; } ++inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; } ++inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; } ++inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; } ++inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; } ++ ++inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; } ++inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; } ++inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; } ++inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; } ++inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; } ++inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void**)dest = store_value; } ++ ++inline jlong Atomic::load (volatile jlong* src) { return *src; } ++ ++///////////implementation of Atomic::add*///////////////// ++inline jint Atomic::add (jint add_value, volatile jint* dest) { ++ jint __ret, __tmp; ++ jlong __addr; ++ __asm__ __volatile__ ( ++ "1: ldi %[__addr],%[__dest]\n\t" ++ " lldw %[__tmp],0(%[__addr])\n\t" ++ " ldi %[__ret],1\n\t" ++ " wr_f %[__ret]\n\t" ++ " addw %[__tmp],%[__val],%[__ret]\n\t" ++ " mov %[__ret],%[__tmp]\n\t" ++ " .align 3\n\t" ++ " lstw %[__tmp],0(%[__addr])\n\t" ++ " rd_f %[__tmp]\n\t" ++ " beq %[__tmp],1b\n\t" ++ " \n\t" ++ : [__ret]"=&r" (__ret), [__addr]"=&r"(__addr), [__tmp]"=&r"(__tmp) ++ : [__dest] "m" (*(volatile jint*)dest), [__val] "Ir" (add_value) ++ : "memory" ); ++ ++ return __ret; ++} ++ ++inline intptr_t Atomic::add_ptr (intptr_t add_value, volatile intptr_t* dest) { ++ jlong __ret, __tmp; ++ jlong __addr; ++ __asm__ __volatile__ ( ++ "1: ldi %[__addr],%[__dest]\n\t" ++ " lldl %[__tmp],0(%[__addr])\n\t" ++ " ldi %[__ret],1\n\t" ++ " wr_f %[__ret]\n\t" ++ " addl %[__tmp],%[__val],%[__ret]\n\t" ++ " mov %[__ret],%[__tmp]\n\t" ++ " .align 3\n\t" ++ " lstl %[__tmp],0(%[__addr])\n\t" ++ " rd_f %[__tmp]\n\t" ++ " beq %[__tmp],1b\n\t" ++ " \n\t" ++ : [__ret]"=&r" (__ret), [__addr]"=&r"(__addr), [__tmp]"=&r"(__tmp) ++ : [__dest] "m" (*(volatile jlong*)dest), [__val] "Ir"(add_value) ++ : "memory" ); ++ ++ return __ret; ++} ++ ++inline void* Atomic::add_ptr (intptr_t add_value, volatile void* dest) { ++ return (void*)add_ptr((intptr_t)add_value, (volatile intptr_t*)dest); ++} ++ ++///////////implementation of Atomic::inc*///////////////// ++inline void Atomic::inc (volatile jint* dest) { (void)add(1, dest); } ++inline void Atomic::inc_ptr (volatile intptr_t* dest) { (void)add_ptr(1, dest); } ++inline void Atomic::inc_ptr (volatile void* dest) { (void)inc_ptr((volatile intptr_t*)dest); } ++ ++///////////implementation of Atomic::dec*///////////////// ++inline void Atomic::dec (volatile jint* dest) { (void)add(-1, dest); } ++inline void Atomic::dec_ptr (volatile intptr_t* dest) { (void)add_ptr(-1, dest); } ++inline void Atomic::dec_ptr (volatile void* dest) { (void)dec_ptr((volatile intptr_t*)dest); } ++ ++ ++///////////implementation of Atomic::xchg*///////////////// ++inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) { ++ jint __ret, __tmp; ++ jlong __addr; ++ __asm__ __volatile__ ( ++ "1: ldi %[__addr],%[__dest]\n\t" ++ " lldw %[__ret],0(%[__addr])\n\t" ++ " ldi %[__tmp],1\n\t" ++ " wr_f %[__tmp]\n\t" ++ " mov %[__val],%[__tmp]\n\t" ++ " .align 3\n\t" ++ " lstw %[__tmp],0(%[__addr])\n\t" ++ " rd_f %[__tmp]\n\t" ++ " beq %[__tmp],1b\n\t" ++ " \n\t" ++ : [__ret]"=&r" (__ret), [__addr]"=&r"(__addr), [__tmp]"=&r"(__tmp) ++ : [__dest] "m" (*(volatile jint*)dest), [__val] "Ir"(exchange_value) /* _val can not be constant in stl */ ++ : "memory" ); ++ return __ret; ++} ++ ++inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { ++ jlong __ret, __tmp; ++ jlong __addr; ++ __asm__ __volatile__ ( ++ "1: ldi %[__addr],%[__dest]\n\t" ++ " lldl %[__ret],0(%[__addr])\n\t" ++ " ldi %[__tmp],1\n\t" ++ " wr_f %[__tmp]\n\t" ++ " mov %[__val],%[__tmp]\n\t" ++ " .align 3\n\t" ++ " lstl %[__tmp],0(%[__addr])\n\t" ++ " rd_f %[__tmp]\n\t" ++ " beq %[__tmp],1b\n\t" ++ " \n\t" ++ : [__ret]"=&r" (__ret), [__addr]"=&r"(__addr), [__tmp]"=&r"(__tmp) ++ : [__dest] "m" (*(volatile jlong*)dest), [__val] "Ir"(exchange_value) /* _val can not be constant in stl */ ++ : "memory" ); ++ ++ return __ret; ++} ++ ++inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { ++ return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest); ++} ++ ++///////////implementation of Atomic::cmpxchg*///////////////// ++inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value) { ++ jint __prev, __cmp; ++ jint __tmp; ++ jlong __addr; ++ __asm__ __volatile__ ( ++ "1: ldi %[__addr],%[__dest]\n\t" ++ " lldw %[__prev],0(%[__addr])\n\t" ++ " cmpeq %[__prev],%[__old],%[__cmp]\n\t" ++ " wr_f %[__cmp]\n\t" ++ " mov %[__val],%[__tmp]\n\t" ++ " .align 3\n\t" ++ " lstw %[__tmp],0(%[__addr])\n\t" ++ " rd_f %[__tmp]\n\t" ++ " beq %[__cmp],2f\n\t" ++ " beq %[__tmp],1b\n\t" ++ "2: \n\t" ++ : [__prev]"=&r" (__prev), [__addr]"=&r" (__addr), [__cmp] "=&r" (__cmp), [__tmp] "=&r" (__tmp) ++ : [__dest] "m" (*(volatile jint*)dest), [__old]"Ir" (compare_value), [__val]"Ir" (exchange_value) /* _val can not be constant in stl */ ++ : "memory" ); ++ return __prev; ++} ++ ++inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value) { ++ jlong __prev, __cmp; ++ jlong __tmp; ++ jlong __addr; ++ __asm__ __volatile__ ( ++ "1: ldi %[__addr],%[__dest]\n\t" ++ " lldl %[__prev],0(%[__addr])\n\t" ++ " cmpeq %[__prev],%[__old],%[__cmp]\n\t" ++ " wr_f %[__cmp]\n\t" ++ " mov %[__val],%[__tmp]\n\t" ++ " .align 3\n\t" ++ " lstl %[__tmp],0(%[__addr])\n\t" ++ " rd_f %[__tmp]\n\t" ++ " beq %[__cmp],2f\n\t" ++ " beq %[__tmp],1b\n\t" ++ "2: \n\t" ++ : [__prev]"=&r" (__prev), [__addr]"=&r" (__addr), [__cmp] "=&r" (__cmp), [__tmp] "=&r" (__tmp) ++ : [__dest] "m" (*(volatile jlong*)dest), [__old]"Ir" (compare_value), [__val]"Ir" (exchange_value) /* _val can not be constant in stl */ ++ : "memory" ); ++ ++ return __prev; ++} ++ ++inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value) { ++ jlong __prev, __cmp; ++ jlong __tmp, __addr; ++ ++ __asm__ __volatile__ ( ++ "1: ldi %[__addr],%[__dest]\n\t" ++ " lldl %[__prev],0(%[__addr])\n\t" ++ " cmpeq %[__prev],%[__old],%[__cmp]\n\t" ++ " wr_f %[__cmp]\n\t" ++ " mov %[__val],%[__tmp]\n\t" ++ " .align 3\n\t" ++ " lstl %[__tmp],0(%[__addr])\n\t" ++ " rd_f %[__tmp]\n\t" ++ " beq %[__cmp],2f\n\t" ++ " beq %[__tmp],1b\n\t" ++ "2: \n\t" ++ : [__prev]"=&r" (__prev), [__addr]"=&r" (__addr), [__cmp] "=&r" (__cmp), [__tmp] "=&r" (__tmp) ++ : [__dest] "m" (*(volatile intptr_t*)dest), [__old]"Ir" (compare_value), [__val]"Ir" (exchange_value) /* _val can not be constant in stl */ ++ : "memory" ); ++ ++ return __prev; ++} ++ ++inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value) { ++ return (void*)cmpxchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest, (intptr_t)compare_value); ++} ++ ++#endif // OS_CPU_LINUX_SW64_VM_ATOMIC_LINUX_SW64_INLINE_HPP +diff -uNr openjdk/hotspot/src/os_cpu/linux_sw64/vm/bytes_linux_sw64.inline.hpp afu8u/hotspot/src/os_cpu/linux_sw64/vm/bytes_linux_sw64.inline.hpp +--- openjdk/hotspot/src/os_cpu/linux_sw64/vm/bytes_linux_sw64.inline.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/os_cpu/linux_sw64/vm/bytes_linux_sw64.inline.hpp 2025-05-06 10:53:44.963633668 +0800 +@@ -0,0 +1,36 @@ ++/* ++ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef OS_CPU_LINUX_SW64_VM_BYTES_LINUX_SW64_INLINE_HPP ++#define OS_CPU_LINUX_SW64_VM_BYTES_LINUX_SW64_INLINE_HPP ++ ++#include ++ ++// Efficient swapping of data bytes from Java byte ++// ordering to native byte ordering and vice versa. ++inline u2 Bytes::swap_u2(u2 x) { return bswap_16(x); } ++inline u4 Bytes::swap_u4(u4 x) { return bswap_32(x); } ++inline u8 Bytes::swap_u8(u8 x) { return bswap_64(x); } ++ ++#endif // OS_CPU_LINUX_SW64_VM_BYTES_LINUX_SW64_INLINE_HPP +diff -uNr openjdk/hotspot/src/os_cpu/linux_sw64/vm/copy_linux_sw64.inline.hpp afu8u/hotspot/src/os_cpu/linux_sw64/vm/copy_linux_sw64.inline.hpp +--- openjdk/hotspot/src/os_cpu/linux_sw64/vm/copy_linux_sw64.inline.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/os_cpu/linux_sw64/vm/copy_linux_sw64.inline.hpp 2025-05-06 10:53:44.963633668 +0800 +@@ -0,0 +1,124 @@ ++/* ++ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef OS_CPU_LINUX_SW64_VM_COPY_LINUX_SW64_INLINE_HPP ++#define OS_CPU_LINUX_SW64_VM_COPY_LINUX_SW64_INLINE_HPP ++ ++static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { ++ (void)memmove(to, from, count * HeapWordSize); ++} ++ ++static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { ++ switch (count) { ++ case 8: to[7] = from[7]; ++ case 7: to[6] = from[6]; ++ case 6: to[5] = from[5]; ++ case 5: to[4] = from[4]; ++ case 4: to[3] = from[3]; ++ case 3: to[2] = from[2]; ++ case 2: to[1] = from[1]; ++ case 1: to[0] = from[0]; ++ case 0: break; ++ default: ++ (void)memcpy(to, from, count * HeapWordSize); ++ break; ++ } ++} ++ ++static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) { ++ switch (count) { ++ case 8: to[7] = from[7]; ++ case 7: to[6] = from[6]; ++ case 6: to[5] = from[5]; ++ case 5: to[4] = from[4]; ++ case 4: to[3] = from[3]; ++ case 3: to[2] = from[2]; ++ case 2: to[1] = from[1]; ++ case 1: to[0] = from[0]; ++ case 0: break; ++ default: ++ while (count-- > 0) { ++ *to++ = *from++; ++ } ++ break; ++ } ++} ++ ++static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { ++ pd_conjoint_words(from, to, count); ++} ++ ++static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { ++ pd_disjoint_words(from, to, count); ++} ++ ++static void pd_conjoint_bytes(void* from, void* to, size_t count) { ++ (void)memmove(to, from, count); ++} ++ ++static void pd_conjoint_bytes_atomic(void* from, void* to, size_t count) { ++ pd_conjoint_bytes(from, to, count); ++} ++ ++static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) { ++ copy_conjoint_atomic(from, to, count); ++} ++ ++static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) { ++ copy_conjoint_atomic(from, to, count); ++} ++ ++static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) { ++ copy_conjoint_atomic(from, to, count); ++} ++ ++static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) { ++ //assert(!UseCompressedOops, "foo!"); ++ assert(HeapWordSize == BytesPerOop, "heapwords and oops must be the same size"); ++ copy_conjoint_atomic(from, to, count); ++} ++ ++static void pd_arrayof_conjoint_bytes(HeapWord* from, HeapWord* to, size_t count) { ++ pd_conjoint_bytes_atomic(from, to, count); ++} ++ ++static void pd_arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) { ++ pd_conjoint_jshorts_atomic((jshort*)from, (jshort*)to, count); ++} ++ ++static void pd_arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) { ++ pd_conjoint_jints_atomic((jint*)from, (jint*)to, count); ++} ++ ++static void pd_arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) { ++ pd_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count); ++} ++ ++static void pd_arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) { ++ //assert(!UseCompressedOops, "foo!"); ++ assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size"); ++ pd_conjoint_oops_atomic((oop*)from, (oop*)to, count); ++} ++ ++#endif // OS_CPU_LINUX_SW64_VM_COPY_LINUX_SW64_INLINE_HPP +diff -uNr openjdk/hotspot/src/os_cpu/linux_sw64/vm/globals_linux_sw64.hpp afu8u/hotspot/src/os_cpu/linux_sw64/vm/globals_linux_sw64.hpp +--- openjdk/hotspot/src/os_cpu/linux_sw64/vm/globals_linux_sw64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/os_cpu/linux_sw64/vm/globals_linux_sw64.hpp 2025-05-06 10:53:44.963633668 +0800 +@@ -0,0 +1,42 @@ ++/* ++ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef OS_CPU_LINUX_SW64_VM_GLOBALS_LINUX_SW64_HPP ++#define OS_CPU_LINUX_SW64_VM_GLOBALS_LINUX_SW64_HPP ++ ++// Sets the default values for platform dependent flags used by the runtime system. ++// (see globals.hpp) ++ ++define_pd_global(bool, DontYieldALot, false); ++define_pd_global(intx, ThreadStackSize, 1024); // 0 => use system default ++define_pd_global(intx, VMThreadStackSize, 1024); ++ ++define_pd_global(intx, CompilerThreadStackSize, 0); ++ ++define_pd_global(uintx,JVMInvokeMethodSlack, 8192); ++ ++// Used on 64 bit platforms for UseCompressedOops base address ++define_pd_global(uintx,HeapBaseMinAddress, 2*G); ++ ++#endif // OS_CPU_LINUX_SW64_VM_GLOBALS_LINUX_SW64_HPP +diff -uNr openjdk/hotspot/src/os_cpu/linux_sw64/vm/linux_sw64.ad afu8u/hotspot/src/os_cpu/linux_sw64/vm/linux_sw64.ad +--- openjdk/hotspot/src/os_cpu/linux_sw64/vm/linux_sw64.ad 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/os_cpu/linux_sw64/vm/linux_sw64.ad 2025-05-06 10:53:44.963633668 +0800 +@@ -0,0 +1,156 @@ ++// ++// Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. ++// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++// ++// This code is free software; you can redistribute it and/or modify it ++// under the terms of the GNU General Public License version 2 only, as ++// published by the Free Software Foundation. ++// ++// This code is distributed in the hope that it will be useful, but WITHOUT ++// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++// version 2 for more details (a copy is included in the LICENSE file that ++// accompanied this code). ++// ++// You should have received a copy of the GNU General Public License version ++// 2 along with this work; if not, write to the Free Software Foundation, ++// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++// ++// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++// or visit www.oracle.com if you need additional information or have any ++// questions. ++// ++// ++ ++// SW64 Linux Architecture Description File ++ ++//----------OS-DEPENDENT ENCODING BLOCK---------------------------------------- ++// This block specifies the encoding classes used by the compiler to ++// output byte streams. Encoding classes generate functions which are ++// called by Machine Instruction Nodes in order to generate the bit ++// encoding of the instruction. Operands specify their base encoding ++// interface with the interface keyword. There are currently ++// supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, & ++// COND_INTER. REG_INTER causes an operand to generate a function ++// which returns its register number when queried. CONST_INTER causes ++// an operand to generate a function which returns the value of the ++// constant when queried. MEMORY_INTER causes an operand to generate ++// four functions which return the Base Register, the Index Register, ++// the Scale Value, and the Offset Value of the operand when queried. ++// COND_INTER causes an operand to generate six functions which return ++// the encoding code (ie - encoding bits for the instruction) ++// associated with each basic boolean condition for a conditional ++// instruction. Instructions specify two basic values for encoding. ++// They use the ins_encode keyword to specify their encoding class ++// (which must be one of the class names specified in the encoding ++// block), and they use the opcode keyword to specify, in order, their ++// primary, secondary, and tertiary opcode. Only the opcode sections ++// which a particular instruction needs for encoding need to be ++// specified. ++ ++encode %{ ++ // Build emit functions for each basic byte or larger field in the intel ++ // encoding scheme (opcode, rm, sib, immediate), and call them from C++ ++ // code in the enc_class source block. Emit functions will live in the ++ // main source block for now. In future, we can generalize this by ++ // adding a syntax that specifies the sizes of fields in an order, ++ // so that the adlc can build the emit functions automagically ++ ++ enc_class Java_To_Runtime(method meth) ++ %{ ++ %} ++ ++ enc_class linux_breakpoint ++ %{ ++ MacroAssembler* masm = new MacroAssembler(&cbuf); ++ masm->call(CAST_FROM_FN_PTR(address, os::breakpoint), relocInfo::runtime_call_type); ++ %} ++ ++ enc_class call_epilog ++ %{ ++ if (VerifyStackAtCalls) { ++ // Check that stack depth is unchanged: find majik cookie on stack ++ int framesize = ra_->reg2offset_unchecked(OptoReg::add(ra_->_matcher._old_SP,-2)); ++ if(framesize >= 128) { ++ emit_opcode(cbuf, 0x81); // cmp [esp+0],0xbadb1ood ++ emit_d8(cbuf,0xBC); ++ emit_d8(cbuf,0x24); ++ emit_d32(cbuf,framesize); // Find majik cookie from ESP ++ emit_d32(cbuf, 0xbadb100d); ++ } ++ else { ++ emit_opcode(cbuf, 0x81); // cmp [esp+0],0xbadb1ood ++ emit_d8(cbuf,0x7C); ++ emit_d8(cbuf,0x24); ++ emit_d8(cbuf,framesize); // Find majik cookie from ESP ++ emit_d32(cbuf, 0xbadb100d); ++ } ++ // jmp EQ around INT3 ++ // QQQ TODO ++ const int jump_around = 5; // size of call to breakpoint, 1 for CC ++ emit_opcode(cbuf, 0x74); ++ emit_d8(cbuf, jump_around); ++ // QQQ temporary ++ emit_break(cbuf); ++ // Die if stack mismatch ++ // emit_opcode(cbuf,0xCC); ++ } ++ %} ++ ++%} ++ ++// INSTRUCTIONS -- Platform dependent ++ ++//----------OS and Locking Instructions---------------------------------------- ++ ++// This name is KNOWN by the ADLC and cannot be changed. ++// The ADLC forces a 'TypeRawPtr::BOTTOM' output type ++// for this guy. ++instruct tlsLoadP(eAXRegP dst, eFlagsReg cr) %{ ++%{ ++ match(Set dst (ThreadLocal)); ++ effect(DEF dst, KILL cr); ++ ++ format %{ "MOV EAX, Thread::current()" %} ++ ins_encode( linux_tlsencode(dst) ); ++ ins_pipe( ialu_reg_fat ); ++%} ++ ++// Die now ++instruct ShouldNotReachHere() ++%{ ++ match(Halt); ++ ++ // Use the following format syntax ++ format %{ "int3\t# ShouldNotReachHere" %} ++ // QQQ TODO for now call breakpoint ++ // opcode(0xCC); ++ // ins_encode(Opc); ++ ins_encode(linux_breakpoint); ++ ins_pipe(pipe_slow); ++%} ++ ++ ++// Platform dependent source ++ ++source ++%{ ++// emit an interrupt that is caught by the debugger ++void emit_break(CodeBuffer& cbuf) { ++ // Debugger doesn't really catch this but best we can do so far QQQ ++#define __ masm. ++ __ lui(T12, Assembler::split_high((int)os::breakpoint)); ++ __ addiu(T12, T12, Assembler::split_low((int)os::breakpoint)); ++ __ jalr(T12); ++} ++ ++void MachBreakpointNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const { ++ emit_break(cbuf); ++} ++ ++uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const { ++ //return 5; ++ return 16; ++} ++ ++%} +diff -uNr openjdk/hotspot/src/os_cpu/linux_sw64/vm/linux_sw64.s afu8u/hotspot/src/os_cpu/linux_sw64/vm/linux_sw64.s +--- openjdk/hotspot/src/os_cpu/linux_sw64/vm/linux_sw64.s 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/os_cpu/linux_sw64/vm/linux_sw64.s 2025-05-06 10:53:44.963633668 +0800 +@@ -0,0 +1,24 @@ ++# ++# Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved. ++# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++# ++# This code is free software; you can redistribute it and/or modify it ++# under the terms of the GNU General Public License version 2 only, as ++# published by the Free Software Foundation. ++# ++# This code is distributed in the hope that it will be useful, but WITHOUT ++# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++# version 2 for more details (a copy is included in the LICENSE file that ++# accompanied this code). ++# ++# You should have received a copy of the GNU General Public License version ++# 2 along with this work; if not, write to the Free Software Foundation, ++# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++# ++# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++# or visit www.oracle.com if you need additional information or have any ++# questions. ++# ++ ++ +diff -uNr openjdk/hotspot/src/os_cpu/linux_sw64/vm/orderAccess_linux_sw64.inline.hpp afu8u/hotspot/src/os_cpu/linux_sw64/vm/orderAccess_linux_sw64.inline.hpp +--- openjdk/hotspot/src/os_cpu/linux_sw64/vm/orderAccess_linux_sw64.inline.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/os_cpu/linux_sw64/vm/orderAccess_linux_sw64.inline.hpp 2025-05-06 10:53:44.963633668 +0800 +@@ -0,0 +1,110 @@ ++/* ++ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef OS_CPU_LINUX_SW64_VM_ORDERACCESS_LINUX_SW64_INLINE_HPP ++#define OS_CPU_LINUX_SW64_VM_ORDERACCESS_LINUX_SW64_INLINE_HPP ++ ++#include "runtime/atomic.hpp" ++#include "runtime/orderAccess.hpp" ++#include "vm_version_sw64.hpp" ++ ++#define inlasm_sync() __asm__ __volatile__ ("memb" : : : "memory"); ++ ++inline void OrderAccess::loadload() { inlasm_sync(); } ++inline void OrderAccess::storestore() { inlasm_sync(); } ++inline void OrderAccess::loadstore() { inlasm_sync(); } ++inline void OrderAccess::storeload() { inlasm_sync(); } ++ ++inline void OrderAccess::acquire() { inlasm_sync(); } ++inline void OrderAccess::release() { inlasm_sync(); } ++inline void OrderAccess::fence() { inlasm_sync(); } ++ ++//implementation of load_acquire ++inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { jbyte data = *p; acquire(); return data; } ++inline jshort OrderAccess::load_acquire(volatile jshort* p) { jshort data = *p; acquire(); return data; } ++inline jint OrderAccess::load_acquire(volatile jint* p) { jint data = *p; acquire(); return data; } ++inline jlong OrderAccess::load_acquire(volatile jlong* p) { jlong tmp = *p; acquire(); return tmp; } ++inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { jubyte data = *p; acquire(); return data; } ++inline jushort OrderAccess::load_acquire(volatile jushort* p) { jushort data = *p; acquire(); return data; } ++inline juint OrderAccess::load_acquire(volatile juint* p) { juint data = *p; acquire(); return data; } ++inline julong OrderAccess::load_acquire(volatile julong* p) { julong tmp = *p; acquire(); return tmp; } ++inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { jfloat data = *p; acquire(); return data; } ++inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { jdouble tmp = *p; acquire(); return tmp; } ++ ++//implementation of load_ptr_acquire ++inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { intptr_t data = *p; acquire(); return data; } ++inline void* OrderAccess::load_ptr_acquire(volatile void* p) { void *data = *(void* volatile *)p; acquire(); return data; } ++inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { void *data = *(void* volatile *)p; acquire(); return data; } ++ ++//implementation of release_store ++inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { release(); *p = v; } ++inline void OrderAccess::release_store(volatile jshort* p, jshort v) { release(); *p = v; } ++inline void OrderAccess::release_store(volatile jint* p, jint v) { release(); *p = v; } ++inline void OrderAccess::release_store(volatile jlong* p, jlong v) { release(); *p = v; } ++inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { release(); *p = v; } ++inline void OrderAccess::release_store(volatile jushort* p, jushort v) { release(); *p = v; } ++inline void OrderAccess::release_store(volatile juint* p, juint v) { release(); *p = v; } ++inline void OrderAccess::release_store(volatile julong* p, julong v) { release(); *p = v; } ++inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { release(); *p = v; } ++inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { release(); *p = v; } ++ ++//implementation of release_store_ptr ++inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { release(); *p = v; } ++inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { release(); *(void* volatile *)p = v; } ++ ++//implementation of store_fence ++inline void OrderAccess::store_fence(jbyte* p, jbyte v) { *p = v; fence(); } ++inline void OrderAccess::store_fence(jshort* p, jshort v) { *p = v; fence(); } ++inline void OrderAccess::store_fence(jint* p, jint v) { *p = v; fence(); } ++inline void OrderAccess::store_fence(jlong* p, jlong v) { *p = v; fence(); } ++inline void OrderAccess::store_fence(jubyte* p, jubyte v) { *p = v; fence(); } ++inline void OrderAccess::store_fence(jushort* p, jushort v) { *p = v; fence(); } ++inline void OrderAccess::store_fence(juint* p, juint v) { *p = v; fence(); } ++inline void OrderAccess::store_fence(julong* p, julong v) { *p = v; fence(); } ++inline void OrderAccess::store_fence(jfloat* p, jfloat v) { *p = v; fence(); } ++inline void OrderAccess::store_fence(jdouble* p, jdouble v) { *p = v; fence(); } ++ ++//implementation of store_ptr_fence ++inline void OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) { *p = v; fence(); } ++inline void OrderAccess::store_ptr_fence(void** p, void* v) { *p = v; fence(); } ++ ++//implementation of release_store_fence ++inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) { release_store(p, v); fence(); } ++inline void OrderAccess::release_store_fence(volatile jshort* p, jshort v) { release_store(p, v); fence(); } ++inline void OrderAccess::release_store_fence(volatile jint* p, jint v) { release_store(p, v); fence(); } ++inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) { release_store(p, v); fence(); } ++inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { release_store(p, v); fence(); } ++inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { release_store(p, v); fence(); } ++inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { release_store(p, v); fence(); } ++inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { release_store(p, v); fence(); } ++inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { release_store(p, v); fence(); } ++inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { release_store(p, v); fence(); } ++ ++//implementaion of release_store_ptr_fence ++inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) { release_store_ptr(p, v); fence(); } ++inline void OrderAccess::release_store_ptr_fence(volatile void* p, void* v) { release_store_ptr(p, v); fence(); } ++ ++#undef inlasm_sync ++ ++#endif // OS_CPU_LINUX_SW64_VM_ORDERACCESS_LINUX_SW64_INLINE_HPP +diff -uNr openjdk/hotspot/src/os_cpu/linux_sw64/vm/os_linux_sw64.cpp afu8u/hotspot/src/os_cpu/linux_sw64/vm/os_linux_sw64.cpp +--- openjdk/hotspot/src/os_cpu/linux_sw64/vm/os_linux_sw64.cpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/os_cpu/linux_sw64/vm/os_linux_sw64.cpp 2025-05-06 10:53:44.963633668 +0800 +@@ -0,0 +1,847 @@ ++/* ++ * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++// no precompiled headers ++#include "asm/macroAssembler.hpp" ++#include "classfile/classLoader.hpp" ++#include "classfile/systemDictionary.hpp" ++#include "classfile/vmSymbols.hpp" ++#include "code/icBuffer.hpp" ++#include "code/vtableStubs.hpp" ++#include "interpreter/interpreter.hpp" ++#include "jvm_linux.h" ++#include "memory/allocation.inline.hpp" ++#include "mutex_linux.inline.hpp" ++#include "os_share_linux.hpp" ++#include "prims/jniFastGetField.hpp" ++#include "prims/jvm.h" ++#include "prims/jvm_misc.hpp" ++#include "runtime/arguments.hpp" ++#include "runtime/extendedPC.hpp" ++#include "runtime/frame.inline.hpp" ++#include "runtime/interfaceSupport.hpp" ++#include "runtime/java.hpp" ++#include "runtime/javaCalls.hpp" ++#include "runtime/mutexLocker.hpp" ++#include "runtime/osThread.hpp" ++#include "runtime/sharedRuntime.hpp" ++#include "runtime/stubRoutines.hpp" ++#include "runtime/thread.inline.hpp" ++#include "runtime/timer.hpp" ++#include "utilities/events.hpp" ++#include "utilities/vmError.hpp" ++#include "utilities/debug.hpp" ++#include "compiler/disassembler.hpp" ++// put OS-includes here ++# include ++# include ++# include ++# include ++# include ++# include ++# include ++# include ++# include ++# include ++# include ++# include ++# include ++# include ++# include ++# include ++# include ++# include ++# include ++# include ++ ++//not sure ++#define REG_SP 30 //// #define REG_SP 29 ++#define REG_FP 15 //// #define REG_FP 30 ++#define REG_RA 26 ++ ++ ++address __attribute__((always_inline)) os::current_stack_pointer() { ++ register void *ssp; ++ __asm__ (" mov $sp,%0\n":"=r"(ssp)); ++ ++ return (address) (char *)ssp; ++} ++ ++char* os::non_memory_address_word() { ++ // Must never look like an address returned by reserve_memory, ++ // even in its subfields (as defined by the CPU immediate fields, ++ // if the CPU splits constants across multiple instructions). ++ ++ return (char*) -1; ++} ++ ++void os::initialize_thread(Thread* thr) { ++// Nothing to do. ++} ++ ++address os::Linux::ucontext_get_pc(ucontext_t * uc) { ++ //return (address)uc->uc_mcontext.gregs[REG_PC]; ++ return (address)uc->uc_mcontext.sc_pc; ++} ++ ++intptr_t* os::Linux::ucontext_get_sp(ucontext_t * uc) { ++ return (intptr_t*)uc->uc_mcontext.sc_regs[REG_SP]; ++} ++ ++intptr_t* os::Linux::ucontext_get_fp(ucontext_t * uc) { ++ return (intptr_t*)uc->uc_mcontext.sc_regs[REG_FP]; ++} ++ ++address os::ucontext_get_ra(const ucontext_t * uc) { ++ return (address)uc->uc_mcontext.sc_regs[REG_RA]; ++} ++ ++// For Forte Analyzer AsyncGetCallTrace profiling support - thread ++// is currently interrupted by SIGPROF. ++// os::Solaris::fetch_frame_from_ucontext() tries to skip nested signal ++// frames. Currently we don't do that on Linux, so it's the same as ++// os::fetch_frame_from_context(). ++ExtendedPC os::Linux::fetch_frame_from_ucontext(Thread* thread, ++ ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) { ++ ++ assert(thread != NULL, "just checking"); ++ assert(ret_sp != NULL, "just checking"); ++ assert(ret_fp != NULL, "just checking"); ++ ++ return os::fetch_frame_from_context(uc, ret_sp, ret_fp); ++} ++ ++ExtendedPC os::fetch_frame_from_context(void* ucVoid, ++ intptr_t** ret_sp, intptr_t** ret_fp) { ++ ++ ExtendedPC epc; ++ ucontext_t* uc = (ucontext_t*)ucVoid; ++ ++ if (uc != NULL) { ++ epc = ExtendedPC(os::Linux::ucontext_get_pc(uc)); ++ if (ret_sp) *ret_sp = os::Linux::ucontext_get_sp(uc); ++ if (ret_fp) *ret_fp = os::Linux::ucontext_get_fp(uc); ++ } else { ++ // construct empty ExtendedPC for return value checking ++ epc = ExtendedPC(NULL); ++ if (ret_sp) *ret_sp = (intptr_t *)NULL; ++ if (ret_fp) *ret_fp = (intptr_t *)NULL; ++ } ++ ++ return epc; ++} ++ ++frame os::fetch_frame_from_context(void* ucVoid) { ++ intptr_t* sp; ++ intptr_t* fp; ++ ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp); ++ frame ret_frame(sp, fp, epc.pc()); ++ ret_frame.fixRa(ucVoid); ++ return ret_frame; ++} ++ ++// By default, gcc always save frame pointer (%ebp/%rbp) on stack. It may get ++// turned off by -fomit-frame-pointer, ++frame os::get_sender_for_C_frame(frame* fr) { ++ return frame(fr->sender_sp(), fr->link(), fr->sender_pc()); ++} ++ ++//intptr_t* _get_previous_fp() { ++intptr_t* __attribute__((always_inline)) os::get_previous_fp() { ++ register void *sfp; ++ __asm__ (" mov $fp,%0\n":"=r"(sfp)); ++ ++ return (intptr_t *)sfp; ++} ++ ++ ++frame os::current_frame() { ++ intptr_t* fp = (intptr_t*)get_previous_fp(); ++ frame myframe((intptr_t*)os::current_stack_pointer(), ++ (intptr_t*)fp, ++ CAST_FROM_FN_PTR(address, os::current_frame)); ++ myframe.init_sender_for_c_frame(CAST_FROM_FN_PTR(address, os::current_frame)); ++ if (os::is_first_C_frame(&myframe)) { ++ // stack is not walkable ++ return frame(); ++ } else { ++ return os::get_sender_for_C_frame(&myframe); ++ } ++} ++ ++//x86 add 2 new assemble function here! ++extern "C" int ++JVM_handle_linux_signal(int sig, ++ siginfo_t* info, ++ void* ucVoid, ++ int abort_if_unrecognized) { ++#ifdef PRINT_SIGNAL_HANDLE ++ tty->print_cr("Signal: signo=%d, sicode=%d, sierrno=%d, siaddr=%lx", ++ info->si_signo, ++ info->si_code, ++ info->si_errno, ++ info->si_addr); ++ if (info->si_signo == 4) // the pc for SIGILL is (info->si_addr)) for SW, but mips and aarch64 are just info->si_addr ++ tty->print_cr("SIGILL 0x%08x", *((int*)(info->si_addr)-1)); ++#endif ++ ++ ucontext_t* uc = (ucontext_t*) ucVoid; ++ ++ Thread* t = ThreadLocalStorage::get_thread_slow(); ++ // Must do this before SignalHandlerMark, if crash protection installed we will longjmp away ++ // (no destructors can be run) ++ os::ThreadCrashProtection::check_crash_protection(sig, t); ++ SignalHandlerMark shm(t); ++ ++ // Note: it's not uncommon that JNI code uses signal/sigset to install ++ // then restore certain signal handler (e.g. to temporarily block SIGPIPE, ++ // or have a SIGILL handler when detecting CPU type). When that happens, ++ // JVM_handle_linux_signal() might be invoked with junk info/ucVoid. To ++ // avoid unnecessary crash when libjsig is not preloaded, try handle signals ++ // that do not require siginfo/ucontext first. ++ ++ if (sig == SIGPIPE/* || sig == SIGXFSZ*/) { ++ // allow chained handler to go first ++ if (os::Linux::chained_handler(sig, info, ucVoid)) { ++ return true; ++ } else { ++ if (PrintMiscellaneous && (WizardMode || Verbose)) { ++ warning("Ignoring SIGPIPE - see bug 4229104"); ++ } ++ return true; ++ } ++ } ++ ++ JavaThread* thread = NULL; ++ VMThread* vmthread = NULL; ++ if (os::Linux::signal_handlers_are_installed) { ++ if (t != NULL ){ ++ if(t->is_Java_thread()) { ++#ifdef PRINT_SIGNAL_HANDLE ++ tty->print_cr("this thread is a java thread"); ++#endif ++ thread = (JavaThread*)t; ++ } ++ else if(t->is_VM_thread()){ ++#ifdef PRINT_SIGNAL_HANDLE ++ tty->print_cr("this thread is a VM thread\n"); ++#endif ++ vmthread = (VMThread *)t; ++ } ++ } ++ } ++ ++ // decide if this trap can be handled by a stub ++ address stub = NULL; ++ address pc = NULL; ++ ++ pc = (address) os::Linux::ucontext_get_pc(uc); ++#ifdef PRINT_SIGNAL_HANDLE ++ tty->print_cr("pc=%lx", pc); ++ os::print_context(tty, uc); ++#endif ++ //%note os_trap_1 ++ if (info != NULL && uc != NULL && thread != NULL) { ++ pc = (address) os::Linux::ucontext_get_pc(uc); ++ // Handle ALL stack overflow variations here ++ if (sig == SIGSEGV) { ++ address addr = (address) info->si_addr; ++#ifdef PRINT_SIGNAL_HANDLE ++ tty->print("handle all stack overflow variations: "); ++ /*tty->print("addr = %lx, stack base = %lx, stack top = %lx\n", ++ addr, ++ thread->stack_base(), ++ thread->stack_base() - thread->stack_size()); ++ */ ++#endif ++ ++ // check if fault address is within thread stack ++ if (addr < thread->stack_base() && ++ addr >= thread->stack_base() - thread->stack_size()) { ++ // stack overflow ++#ifdef PRINT_SIGNAL_HANDLE ++ tty->print("stack exception check \n"); ++#endif ++ if (thread->in_stack_yellow_zone(addr)) { ++#ifdef PRINT_SIGNAL_HANDLE ++ tty->print("exception addr is in yellow zone\n"); ++#endif ++ thread->disable_stack_yellow_zone(); ++ if (thread->thread_state() == _thread_in_Java) { ++ // Throw a stack overflow exception. Guard pages will be reenabled ++ // while unwinding the stack. ++#ifdef PRINT_SIGNAL_HANDLE ++ tty->print("this thread is in java\n"); ++#endif ++ stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW); ++ } else { ++ // Thread was in the vm or native code. Return and try to finish. ++#ifdef PRINT_SIGNAL_HANDLE ++ tty->print("this thread is in vm or native codes and return\n"); ++#endif ++ return 1; ++ } ++ } else if (thread->in_stack_red_zone(addr)) { ++ // Fatal red zone violation. Disable the guard pages and fall through ++ // to handle_unexpected_exception way down below. ++#ifdef PRINT_SIGNAL_HANDLE ++ tty->print("exception addr is in red zone\n"); ++#endif ++ thread->disable_stack_red_zone(); ++ tty->print_raw_cr("An irrecoverable stack overflow has occurred."); ++ ++ // This is a likely cause, but hard to verify. Let's just print ++ // it as a hint. ++ tty->print_raw_cr("Please check if any of your loaded .so files has " ++ "enabled executable stack (see man page execstack(8))"); ++ } else { ++ // Accessing stack address below sp may cause SEGV if current ++ // thread has MAP_GROWSDOWN stack. This should only happen when ++ // current thread was created by user code with MAP_GROWSDOWN flag ++ // and then attached to VM. See notes in os_linux.cpp. ++#ifdef PRINT_SIGNAL_HANDLE ++ tty->print("exception addr is neither in yellow zone nor in the red one\n"); ++#endif ++ if (thread->osthread()->expanding_stack() == 0) { ++ thread->osthread()->set_expanding_stack(); ++ if (os::Linux::manually_expand_stack(thread, addr)) { ++ thread->osthread()->clear_expanding_stack(); ++ return 1; ++ } ++ thread->osthread()->clear_expanding_stack(); ++ } else { ++ fatal("recursive segv. expanding stack."); ++ } ++ } ++ } //addr < ++ } //sig == SIGSEGV ++ ++ if (thread->thread_state() == _thread_in_Java) { ++ // Java thread running in Java code => find exception handler if any ++ // a fault inside compiled code, the interpreter, or a stub ++#ifdef PRINT_SIGNAL_HANDLE ++ tty->print("java thread running in java code\n"); ++#endif ++ ++ // Handle signal from NativeJump::patch_verified_entry(). ++// if (sig == SIGILL & nativeInstruction_at(pc - 4)->is_sigill_zombie_not_entrant()) { ++ if (sig == SIGILL && (nativeInstruction_at(pc)->is_sigill_zombie_not_entrant() || nativeInstruction_at(pc - 4)->is_sigill_zombie_not_entrant())) { ++ ++#ifdef PRINT_SIGNAL_HANDLE ++ tty->print_cr("verified entry = %lx, sig=%d", nativeInstruction_at(pc), sig); ++#endif ++ stub = SharedRuntime::get_handle_wrong_method_stub(); ++ } else if (sig == SIGSEGV && os::is_poll_address((address)info->si_addr)) { ++#ifdef PRINT_SIGNAL_HANDLE ++ tty->print_cr("polling address = %lx, sig=%d", os::get_polling_page(), sig); ++#endif ++ stub = SharedRuntime::get_poll_stub(pc); ++ } else if (sig == SIGBUS /* && info->si_code == BUS_OBJERR */) { ++ // BugId 4454115: A read from a MappedByteBuffer can fault ++ // here if the underlying file has been truncated. ++ // Do not crash the VM in such a case. ++ CodeBlob* cb = CodeCache::find_blob_unsafe(pc); ++ nmethod* nm = cb->is_nmethod() ? (nmethod*)cb : NULL; ++#ifdef PRINT_SIGNAL_HANDLE ++ tty->print("cb = %lx, nm = %lx\n", cb, nm); ++#endif ++ if (nm != NULL && nm->has_unsafe_access()) { ++ stub = StubRoutines::handler_for_unsafe_access(); ++ } ++ } else if (sig == SIGFPE && ++ (info->si_code == FPE_INTDIV || info->si_code == FPE_FLTDIV)) { ++ stub = SharedRuntime::continuation_for_implicit_exception(thread, ++ pc, ++ SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO); ++ } else if (sig == SIGSEGV && ++ !MacroAssembler::needs_explicit_null_check((intptr_t)info->si_addr)) { ++#ifdef PRINT_SIGNAL_HANDLE ++ tty->print("continuation for implicit exception\n"); ++#endif ++ // Determination of interpreter/vtable stub/compiled code null exception ++ stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); ++#ifdef PRINT_SIGNAL_HANDLE ++ tty->print_cr("continuation_for_implicit_exception stub: %lx", stub); ++#endif ++ } ++ } else if (thread->thread_state() == _thread_in_vm && ++ sig == SIGBUS && /* info->si_code == BUS_OBJERR && */ ++ thread->doing_unsafe_access()) { ++#ifdef PRINT_SIGNAL_HANDLE ++ tty->print_cr("SIGBUS in vm thread \n"); ++#endif ++ stub = StubRoutines::handler_for_unsafe_access(); ++ } ++ ++ // jni_fast_GetField can trap at certain pc's if a GC kicks in ++ // and the heap gets shrunk before the field access. ++ if ((sig == SIGSEGV) || (sig == SIGBUS)) { ++#ifdef PRINT_SIGNAL_HANDLE ++ tty->print("jni fast get trap: "); ++#endif ++ address addr = JNI_FastGetField::find_slowcase_pc(pc); ++ if (addr != (address)-1) { ++ stub = addr; ++ } ++#ifdef PRINT_SIGNAL_HANDLE ++ tty->print_cr("addr = %d, stub = %lx", addr, stub); ++#endif ++ } ++ ++ // Check to see if we caught the safepoint code in the ++ // process of write protecting the memory serialization page. ++ // It write enables the page immediately after protecting it ++ // so we can just return to retry the write. ++ if ((sig == SIGSEGV) && ++ os::is_memory_serialize_page(thread, (address) info->si_addr)) { ++#ifdef PRINT_SIGNAL_HANDLE ++ tty->print("write protecting the memory serialiazation page\n"); ++#endif ++ // Block current thread until the memory serialize page permission restored. ++ os::block_on_serialize_page_trap(); ++ return true; ++ } ++ } ++ ++ // Execution protection violation ++ // ++ // This should be kept as the last step in the triage. We don't ++ // have a dedicated trap number for a no-execute fault, so be ++ // conservative and allow other handlers the first shot. ++ // ++ // Note: We don't test that info->si_code == SEGV_ACCERR here. ++ // this si_code is so generic that it is almost meaningless; and ++ // the si_code for this condition may change in the future. ++ // Furthermore, a false-positive should be harmless. ++ if (stub != NULL) { ++#ifdef PRINT_SIGNAL_HANDLE ++ tty->print_cr("resolved stub=%lx\n",stub); ++#endif ++ // save all thread context in case we need to restore it ++ if (thread != NULL) thread->set_saved_exception_pc(pc); ++ ++ uc->uc_mcontext.sc_pc = (greg_t)stub; ++ return true; ++ } ++ ++ // signal-chaining ++ if (os::Linux::chained_handler(sig, info, ucVoid)) { ++#ifdef PRINT_SIGNAL_HANDLE ++ tty->print_cr("signal chaining\n"); ++#endif ++ return true; ++ } ++ ++ if (!abort_if_unrecognized) { ++#ifdef PRINT_SIGNAL_HANDLE ++ tty->print_cr("abort becauce of unrecognized\n"); ++#endif ++ // caller wants another chance, so give it to him ++ return false; ++ } ++ ++ if (pc == NULL && uc != NULL) { ++ pc = os::Linux::ucontext_get_pc(uc); ++ } ++ ++ // unmask current signal ++ sigset_t newset; ++ sigemptyset(&newset); ++ sigaddset(&newset, sig); ++ sigprocmask(SIG_UNBLOCK, &newset, NULL); ++#ifdef PRINT_SIGNAL_HANDLE ++ tty->print_cr("VMError in signal handler\n"); ++#endif ++ VMError err(t, sig, pc, info, ucVoid); ++ err.report_and_die(); ++ ++ ShouldNotReachHere(); ++ return true; // Mute compiler ++} ++ ++// FCSR:...|24| 23 |22|21|... ++// ...|FS|FCC0|FO|FN|... ++void os::Linux::init_thread_fpu_state(void) { ++ // Nothing to do ++} ++ ++int os::Linux::get_fpu_control_word(void) { ++ ShouldNotReachHere(); ++} ++ ++void os::Linux::set_fpu_control_word(int fpu_control) { ++ ShouldNotReachHere(); ++} ++ ++bool os::is_allocatable(size_t bytes) { ++ ++ if (bytes < 2 * G) { ++ return true; ++ } ++ ++ char* addr = reserve_memory(bytes, NULL); ++ ++ if (addr != NULL) { ++ release_memory(addr, bytes); ++ } ++ ++ return addr != NULL; ++} ++ ++//////////////////////////////////////////////////////////////////////////////// ++// thread stack ++ ++size_t os::Linux::min_stack_allowed = 96 * K; ++ ++ ++// Test if pthread library can support variable thread stack size. LinuxThreads ++// in fixed stack mode allocates 2M fixed slot for each thread. LinuxThreads ++// in floating stack mode and NPTL support variable stack size. ++bool os::Linux::supports_variable_stack_size() { ++ if (os::Linux::is_NPTL()) { ++ // NPTL, yes ++ return true; ++ ++ } else { ++ // Note: We can't control default stack size when creating a thread. ++ // If we use non-default stack size (pthread_attr_setstacksize), both ++ // floating stack and non-floating stack LinuxThreads will return the ++ // same value. This makes it impossible to implement this function by ++ // detecting thread stack size directly. ++ // ++ // An alternative approach is to check %gs. Fixed-stack LinuxThreads ++ // do not use %gs, so its value is 0. Floating-stack LinuxThreads use ++ // %gs (either as LDT selector or GDT selector, depending on kernel) ++ // to access thread specific data. ++ // ++ // Note that %gs is a reserved glibc register since early 2001, so ++ // applications are not allowed to change its value (Ulrich Drepper from ++ // Redhat confirmed that all known offenders have been modified to use ++ // either %fs or TSD). In the worst case scenario, when VM is embedded in ++ // a native application that plays with %gs, we might see non-zero %gs ++ // even LinuxThreads is running in fixed stack mode. As the result, we'll ++ // return true and skip _thread_safety_check(), so we may not be able to ++ // detect stack-heap collisions. But otherwise it's harmless. ++ // ++ return false; ++ } ++} ++ ++// return default stack size for thr_type ++size_t os::Linux::default_stack_size(os::ThreadType thr_type) { ++ // default stack size (compiler thread needs larger stack) ++ size_t s = (thr_type == os::compiler_thread ? 2 * M : 512 * K); ++ return s; ++} ++ ++size_t os::Linux::default_guard_size(os::ThreadType thr_type) { ++ // Creating guard page is very expensive. Java thread has HotSpot ++ // guard page, only enable glibc guard page for non-Java threads. ++ return (thr_type == java_thread ? 0 : page_size()); ++} ++ ++// Java thread: ++// ++// Low memory addresses ++// +------------------------+ ++// | |\ JavaThread created by VM does not have glibc ++// | glibc guard page | - guard, attached Java thread usually has ++// | |/ 1 page glibc guard. ++// P1 +------------------------+ Thread::stack_base() - Thread::stack_size() ++// | |\ ++// | HotSpot Guard Pages | - red and yellow pages ++// | |/ ++// +------------------------+ JavaThread::stack_yellow_zone_base() ++// | |\ ++// | Normal Stack | - ++// | |/ ++// P2 +------------------------+ Thread::stack_base() ++// ++// Non-Java thread: ++// ++// Low memory addresses ++// +------------------------+ ++// | |\ ++// | glibc guard page | - usually 1 page ++// | |/ ++// P1 +------------------------+ Thread::stack_base() - Thread::stack_size() ++// | |\ ++// | Normal Stack | - ++// | |/ ++// P2 +------------------------+ Thread::stack_base() ++// ++// ** P1 (aka bottom) and size ( P2 = P1 - size) are the address and stack size returned from ++// pthread_attr_getstack() ++ ++static void current_stack_region(address * bottom, size_t * size) { ++ if (os::is_primordial_thread()) { ++ // primordial thread needs special handling because pthread_getattr_np() ++ // may return bogus value. ++ *bottom = os::Linux::initial_thread_stack_bottom(); ++ *size = os::Linux::initial_thread_stack_size(); ++ } else { ++ pthread_attr_t attr; ++ ++ int rslt = pthread_getattr_np(pthread_self(), &attr); ++ ++ // JVM needs to know exact stack location, abort if it fails ++ if (rslt != 0) { ++ if (rslt == ENOMEM) { ++ vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "pthread_getattr_np"); ++ } else { ++ fatal(err_msg("pthread_getattr_np failed with errno = %d", rslt)); ++ } ++ } ++ ++ if (pthread_attr_getstack(&attr, (void **)bottom, size) != 0) { ++ fatal("Can not locate current stack attributes!"); ++ } ++ ++ pthread_attr_destroy(&attr); ++ ++ } ++ assert(os::current_stack_pointer() >= *bottom && ++ os::current_stack_pointer() < *bottom + *size, "just checking"); ++} ++ ++address os::current_stack_base() { ++ address bottom; ++ size_t size; ++ current_stack_region(&bottom, &size); ++ return (bottom + size); ++} ++ ++size_t os::current_stack_size() { ++ // stack size includes normal stack and HotSpot guard pages ++ address bottom; ++ size_t size; ++ current_stack_region(&bottom, &size); ++ return size; ++} ++ ++///////////////////////////////////////////////////////////////////////////// ++// helper functions for fatal error handler ++void os::print_register_info(outputStream *st, void *context) { ++ if (context == NULL) return; ++ ++ ucontext_t *uc = (ucontext_t*)context; ++ ++ st->print_cr("Register to memory mapping:"); ++ st->cr(); ++ // this is horrendously verbose but the layout of the registers in the ++ // // context does not match how we defined our abstract Register set, so ++ // // we can't just iterate through the gregs area ++ // ++ // // this is only for the "general purpose" registers ++ st->print("V0=" ); print_location(st, uc->uc_mcontext.sc_regs[0]); ++ st->print("T0=" ); print_location(st, uc->uc_mcontext.sc_regs[1]); ++ st->print("T1=" ); print_location(st, uc->uc_mcontext.sc_regs[2]); ++ st->print("T2=" ); print_location(st, uc->uc_mcontext.sc_regs[3]); ++ st->cr(); ++ st->print("T3=" ); print_location(st, uc->uc_mcontext.sc_regs[4]); ++ st->print("T4=" ); print_location(st, uc->uc_mcontext.sc_regs[5]); ++ st->print("T5=" ); print_location(st, uc->uc_mcontext.sc_regs[6]); ++ st->print("T6=" ); print_location(st, uc->uc_mcontext.sc_regs[7]); ++ st->cr(); ++ st->print("T7=" ); print_location(st, uc->uc_mcontext.sc_regs[8]); ++ st->print("S0=" ); print_location(st, uc->uc_mcontext.sc_regs[9]); ++ st->print("S1=" ); print_location(st, uc->uc_mcontext.sc_regs[10]); ++ st->print("S2=" ); print_location(st, uc->uc_mcontext.sc_regs[11]); ++ st->cr(); ++ st->print("S3=" ); print_location(st, uc->uc_mcontext.sc_regs[12]); ++ st->print("S4=" ); print_location(st, uc->uc_mcontext.sc_regs[13]); ++ st->print("S5=" ); print_location(st, uc->uc_mcontext.sc_regs[14]); ++ st->print("FP=" ); print_location(st, uc->uc_mcontext.sc_regs[15]); ++ st->cr(); ++ st->print("A0=" ); print_location(st, uc->uc_mcontext.sc_regs[16]); ++ st->print("A1=" ); print_location(st, uc->uc_mcontext.sc_regs[17]); ++ st->print("A2=" ); print_location(st, uc->uc_mcontext.sc_regs[18]); ++ st->print("A3=" ); print_location(st, uc->uc_mcontext.sc_regs[19]); ++ st->cr(); ++ st->print("A4=" ); print_location(st, uc->uc_mcontext.sc_regs[20]); ++ st->print("A5=" ); print_location(st, uc->uc_mcontext.sc_regs[21]); ++ st->print("T8=" ); print_location(st, uc->uc_mcontext.sc_regs[22]); ++ st->print("T9=" ); print_location(st, uc->uc_mcontext.sc_regs[23]); ++ st->cr(); ++ st->print("T10=" ); print_location(st, uc->uc_mcontext.sc_regs[24]); ++ st->print("T11=" ); print_location(st, uc->uc_mcontext.sc_regs[25]); ++ st->print("RA=" ); print_location(st, uc->uc_mcontext.sc_regs[26]); ++ st->print("T12=" ); print_location(st, uc->uc_mcontext.sc_regs[27]); ++ st->cr(); ++ st->print("AT=" ); print_location(st, uc->uc_mcontext.sc_regs[28]); ++ st->print("GP=" ); print_location(st, uc->uc_mcontext.sc_regs[29]); ++ st->print("SP=" ); print_location(st, uc->uc_mcontext.sc_regs[30]); ++ st->print("R0=" ); print_location(st, uc->uc_mcontext.sc_regs[31]); ++ st->cr(); ++ ++} ++ ++jint inst_at(address addr, int offset) { ++ return *(jint*) ((long) addr + offset); ++} ++ ++jlong long_at(address addr, int offset) { ++ return *(jlong*) ((long) addr + offset); ++} ++ ++static bool is_op(int insn, Assembler::ops_mem op) { ++ return Assembler::sw2_op(insn) == (int) op; ++} ++ ++static bool is_op(int insn, Assembler::ops_bra op) { ++ return Assembler::sw2_op(insn) == (int) op; ++} ++ ++static void print_context_plus(int reg_num, void *context, outputStream *st) { ++ if (context == NULL) return; ++ ucontext_t *uc = (ucontext_t *) context; ++ int16_t msb_l = 0; ++ int16_t lsb_h = 0; ++ int16_t lsb_l = 0; ++ address addr_for_li48 = 0; ++ address addr = (address) uc->uc_mcontext.sc_regs[reg_num]; ++ const char* type = reg_num == 26 ? "call" : "jmp"; ++ const char* reg_name = reg_num == 26 ? "RA" : "AT"; ++ st->print_cr("Instructions: %s=" PTR_FORMAT ")", reg_name, addr); ++ if (CodeCache::find_blob(addr)) { ++ os::print_hex_dump(st, addr - 64, addr + 64, sizeof (char)); ++ Disassembler::decode(addr - 80, addr + 80, st); ++ st->cr(); ++ if (!SafePatch) { ++ if (is_op(inst_at(addr, -20), Assembler::op_ldi) && ++ is_op(inst_at(addr, -12), Assembler::op_ldih) && ++ is_op(inst_at(addr, -8), Assembler::op_ldi)) { ++ msb_l = inst_at(addr, -20) & 0xffff; ++ lsb_h = inst_at(addr, -12) & 0xffff; ++ lsb_l = inst_at(addr, -8) & 0xffff; ++ addr_for_li48 = (address) (((intptr_t) (msb_l) << 32) + ((intptr_t) (lsb_h) << 16) + (intptr_t) (lsb_l)); ++ } ++ } else { ++ if (is_op(inst_at(addr, 0), Assembler::op_ldi) && ++ is_op(inst_at(addr, 4), Assembler::op_br) && ++ is_op(inst_at(addr, 16), Assembler::op_ldl)) { ++ addr_for_li48 = (address) long_at(addr, 8); ++ } else if (is_op(inst_at(addr, 0), Assembler::op_br) && ++ is_op(inst_at(addr, 12), Assembler::op_ldl) && ++ is_op(inst_at(addr, 16), Assembler::op_ldi)) { ++ addr_for_li48 = (address) long_at(addr, 4); ++ } else { ++ st->print_cr("\nError!\n Not a call/jmp in " INTPTR_FORMAT, addr); ++ addr_for_li48 = 0; ++ } ++ } ++ st->print_cr("Instructions: (address for %s = " PTR_FORMAT ")", type, addr_for_li48); ++ st->print_cr("==============(address in T12=" INTPTR_FORMAT, uc->uc_mcontext.sc_regs[27]); ++ if (CodeCache::find_blob(addr_for_li48)) { ++ os::print_hex_dump(st, addr_for_li48 - 64, addr_for_li48 + 64, sizeof (char)); ++ Disassembler::decode(addr_for_li48 - 80, addr_for_li48 + 80, st); ++ st->cr(); ++ } ++ } else { ++ int offset = 0; ++ int buflen = 100; ++ char buf[buflen]; ++ bool found = os::dll_address_to_function_name(addr_for_li48, buf, buflen, &offset); ++ if (found) { ++ st->print_cr("=====The library name is %s =====", buf); ++ st->cr(); ++ } ++ } ++} ++ ++void os::print_context(outputStream *st, void *context) { ++ if (context == NULL) return; ++ ++ ucontext_t *uc = (ucontext_t*)context; ++ st->print_cr("Registers:"); ++ st->print( "V0=" INTPTR_FORMAT, uc->uc_mcontext.sc_regs[0]); ++ st->print(", T0=" INTPTR_FORMAT, uc->uc_mcontext.sc_regs[1]); ++ st->print(", T1=" INTPTR_FORMAT, uc->uc_mcontext.sc_regs[2]); ++ st->print(", T2=" INTPTR_FORMAT, uc->uc_mcontext.sc_regs[3]); ++ st->cr(); ++ st->print( "T3=" INTPTR_FORMAT, uc->uc_mcontext.sc_regs[4]); ++ st->print(", T4=" INTPTR_FORMAT, uc->uc_mcontext.sc_regs[5]); ++ st->print(", T5=" INTPTR_FORMAT, uc->uc_mcontext.sc_regs[6]); ++ st->print(", T6=" INTPTR_FORMAT, uc->uc_mcontext.sc_regs[7]); ++ st->cr(); ++ st->print( "T7=" INTPTR_FORMAT, uc->uc_mcontext.sc_regs[8]); ++ st->print(", S0=" INTPTR_FORMAT, uc->uc_mcontext.sc_regs[9]); ++ st->print(", S1=" INTPTR_FORMAT, uc->uc_mcontext.sc_regs[10]); ++ st->print(", S2=" INTPTR_FORMAT, uc->uc_mcontext.sc_regs[11]); ++ st->cr(); ++ st->print( "S3=" INTPTR_FORMAT, uc->uc_mcontext.sc_regs[12]); ++ st->print(", S4=" INTPTR_FORMAT, uc->uc_mcontext.sc_regs[13]); ++ st->print(", S5=" INTPTR_FORMAT, uc->uc_mcontext.sc_regs[14]); ++ st->print(", FP=" INTPTR_FORMAT, uc->uc_mcontext.sc_regs[15]); ++ st->cr(); ++ st->print( "A0=" INTPTR_FORMAT, uc->uc_mcontext.sc_regs[16]); ++ st->print(", A1=" INTPTR_FORMAT, uc->uc_mcontext.sc_regs[17]); ++ st->print(", A2=" INTPTR_FORMAT, uc->uc_mcontext.sc_regs[18]); ++ st->print(", A3=" INTPTR_FORMAT, uc->uc_mcontext.sc_regs[19]); ++ st->cr(); ++ st->print( "A4=" INTPTR_FORMAT, uc->uc_mcontext.sc_regs[20]); ++ st->print(", A5=" INTPTR_FORMAT, uc->uc_mcontext.sc_regs[21]); ++ st->print(", T8=" INTPTR_FORMAT, uc->uc_mcontext.sc_regs[22]); ++ st->print(", T9=" INTPTR_FORMAT, uc->uc_mcontext.sc_regs[23]); ++ st->cr(); ++ st->print( "T10=" INTPTR_FORMAT, uc->uc_mcontext.sc_regs[24]); ++ st->print(", T11=" INTPTR_FORMAT, uc->uc_mcontext.sc_regs[25]); ++ st->print(", RA=" INTPTR_FORMAT, uc->uc_mcontext.sc_regs[26]); ++ st->print(", T12=" INTPTR_FORMAT, uc->uc_mcontext.sc_regs[27]); ++ st->cr(); ++ st->print( "AT=" INTPTR_FORMAT, uc->uc_mcontext.sc_regs[28]); ++ st->print(", GP=" INTPTR_FORMAT, uc->uc_mcontext.sc_regs[29]); ++ st->print(", SP=" INTPTR_FORMAT, uc->uc_mcontext.sc_regs[30]); ++ st->print(", R0=" INTPTR_FORMAT, uc->uc_mcontext.sc_regs[31]); ++ st->cr(); ++ st->cr(); ++ ++ intptr_t *sp = (intptr_t *)os::Linux::ucontext_get_sp(uc); ++ st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", sp); ++ //print_hex_dump(st, (address)sp, (address)(sp + 8*sizeof(intptr_t)), sizeof(intptr_t)); ++ print_hex_dump(st, (address) sp - 32, (address) (sp + 32), sizeof(intptr_t)); ++ st->cr(); ++ ++ // Note: it may be unsafe to inspect memory near pc. For example, pc may ++ // point to garbage if entry point in an nmethod is corrupted. Leave ++ // this at the end, and hope for the best. ++ address pc = os::Linux::ucontext_get_pc(uc); ++ st->print_cr("Instructions: (pc=" PTR_FORMAT ")", pc - 4); ++ print_hex_dump(st, pc - 64, pc + 64, sizeof(char)); ++ Disassembler::decode(pc - 80, pc + 80, st); ++ ++ st->cr(); ++ print_context_plus(26, context, st); ++ print_context_plus(28, context, st); ++} ++ ++void os::setup_fpu() { ++ ++} ++ ++#ifndef PRODUCT ++void os::verify_stack_alignment() { ++ assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment"); ++} ++#endif +diff -uNr openjdk/hotspot/src/os_cpu/linux_sw64/vm/os_linux_sw64.hpp afu8u/hotspot/src/os_cpu/linux_sw64/vm/os_linux_sw64.hpp +--- openjdk/hotspot/src/os_cpu/linux_sw64/vm/os_linux_sw64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/os_cpu/linux_sw64/vm/os_linux_sw64.hpp 2025-05-06 10:53:44.963633668 +0800 +@@ -0,0 +1,37 @@ ++/* ++ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef OS_CPU_LINUX_SW64_VM_OS_LINUX_SW64_HPP ++#define OS_CPU_LINUX_SW64_VM_OS_LINUX_SW64_HPP ++ ++ static void setup_fpu(); ++ static bool is_allocatable(size_t bytes); ++ static intptr_t *get_previous_fp(); ++ static address ucontext_get_ra(const ucontext_t* uc); ++ ++ // Used to register dynamic code cache area with the OS ++ // Note: Currently only used in 64 bit Windows implementations ++ static bool register_code_area(char *low, char *high) { return true; } ++ ++#endif // OS_CPU_LINUX_SW64_VM_OS_LINUX_SW64_HPP +diff -uNr openjdk/hotspot/src/os_cpu/linux_sw64/vm/prefetch_linux_sw64.inline.hpp afu8u/hotspot/src/os_cpu/linux_sw64/vm/prefetch_linux_sw64.inline.hpp +--- openjdk/hotspot/src/os_cpu/linux_sw64/vm/prefetch_linux_sw64.inline.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/os_cpu/linux_sw64/vm/prefetch_linux_sw64.inline.hpp 2025-05-06 10:53:44.963633668 +0800 +@@ -0,0 +1,48 @@ ++/* ++ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef OS_CPU_LINUX_SW64_VM_PREFETCH_LINUX_SW64_INLINE_HPP ++#define OS_CPU_LINUX_SW64_VM_PREFETCH_LINUX_SW64_INLINE_HPP ++#include "runtime/prefetch.hpp" ++ ++ ++inline void Prefetch::read (void *loc, intx interval) { ++ __asm__ __volatile__ ( ++ " fillcs 0(%0) \n" ++ : ++ : "r" ( ((address)loc) +((long)interval) ) ++ : "memory" ++ ); ++} ++ ++inline void Prefetch::write(void *loc, intx interval) { ++ __asm__ __volatile__ ( ++ " fillde 0(%0) \n" ++ : ++ : "r" ( ((address)loc) +((long)interval) ) ++ : "memory" ++ ); ++} ++ ++#endif // OS_CPU_LINUX_SW64_VM_PREFETCH_LINUX_SW64_INLINE_HPP +diff -uNr openjdk/hotspot/src/os_cpu/linux_sw64/vm/thread_linux_sw64.cpp afu8u/hotspot/src/os_cpu/linux_sw64/vm/thread_linux_sw64.cpp +--- openjdk/hotspot/src/os_cpu/linux_sw64/vm/thread_linux_sw64.cpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/os_cpu/linux_sw64/vm/thread_linux_sw64.cpp 2025-05-06 10:53:44.963633668 +0800 +@@ -0,0 +1,104 @@ ++/* ++ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++#include "runtime/frame.inline.hpp" ++#include "runtime/thread.inline.hpp" ++#include "runtime/sharedRuntime.hpp" ++ ++void JavaThread::pd_initialize() ++{ ++ _anchor.clear(); ++ ++ // A non-existing address as error detector ++ if (CompileBroker::get_compilation_id() > 0) ++ _handle_wrong_method_stub = (address)SharedRuntime::get_handle_wrong_method_stub(); ++ else ++ _handle_wrong_method_stub = (address)0x2B2B2B; ++} ++ ++// For Forte Analyzer AsyncGetCallTrace profiling support - thread is ++// currently interrupted by SIGPROF ++bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr, ++ void* ucontext, bool isInJava) { ++ ++ assert(Thread::current() == this, "caller must be current thread"); ++ return pd_get_top_frame(fr_addr, ucontext, isInJava); ++} ++ ++bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava) { ++ return pd_get_top_frame(fr_addr, ucontext, isInJava); ++} ++ ++bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava) { ++ assert(this->is_Java_thread(), "must be JavaThread"); ++ JavaThread* jt = (JavaThread *)this; ++ ++ // If we have a last_Java_frame, then we should use it even if ++ // isInJava == true. It should be more reliable than ucontext info. ++ if (jt->has_last_Java_frame() && jt->frame_anchor()->walkable()) { ++ *fr_addr = jt->pd_last_frame(); ++ return true; ++ } ++ ++ // At this point, we don't have a last_Java_frame, so ++ // we try to glean some information out of the ucontext ++ // if we were running Java code when SIGPROF came in. ++ if (isInJava) { ++ ucontext_t* uc = (ucontext_t*) ucontext; ++ ++ intptr_t* ret_fp; ++ intptr_t* ret_sp; ++ ExtendedPC addr = os::Linux::fetch_frame_from_ucontext(this, uc, ++ &ret_sp, &ret_fp); ++ if (addr.pc() == NULL || ret_sp == NULL ) { ++ // ucontext wasn't useful ++ return false; ++ } ++ ++ frame ret_frame(ret_sp, ret_fp, addr.pc()); ++ if (!ret_frame.safe_for_sender(jt)) { ++#ifdef COMPILER2 ++ // C2 uses ebp as a general register see if NULL fp helps ++ frame ret_frame2(ret_sp, NULL, addr.pc()); ++ if (!ret_frame2.safe_for_sender(jt)) { ++ // nothing else to try if the frame isn't good ++ return false; ++ } ++ ret_frame = ret_frame2; ++#else ++ // nothing else to try if the frame isn't good ++ return false; ++#endif /* COMPILER2 */ ++ } ++ *fr_addr = ret_frame; ++ return true; ++ } ++ ++ // nothing else to try ++ return false; ++} ++ ++void JavaThread::cache_global_variables() { } ++ +diff -uNr openjdk/hotspot/src/os_cpu/linux_sw64/vm/thread_linux_sw64.hpp afu8u/hotspot/src/os_cpu/linux_sw64/vm/thread_linux_sw64.hpp +--- openjdk/hotspot/src/os_cpu/linux_sw64/vm/thread_linux_sw64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/os_cpu/linux_sw64/vm/thread_linux_sw64.hpp 2025-05-06 10:53:44.963633668 +0800 +@@ -0,0 +1,70 @@ ++/* ++ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++ private: ++ void pd_initialize(); ++ ++ frame pd_last_frame() { ++ assert(has_last_Java_frame(), "must have last_Java_sp() when suspended"); ++ ++ assert(_anchor.last_Java_pc() != NULL, "Ack no pc!"); ++ return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc()); ++ } ++ ++ public: ++ // Mutators are highly dangerous.... ++ intptr_t* last_Java_fp() { return _anchor.last_Java_fp(); } ++ void set_last_Java_fp(intptr_t* fp) { _anchor.set_last_Java_fp(fp); } ++ ++ void set_base_of_stack_pointer(intptr_t* base_sp) { ++ } ++ ++ static ByteSize last_Java_fp_offset() { ++ return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_fp_offset(); ++ } ++ ++ intptr_t* base_of_stack_pointer() { ++ return NULL; ++ } ++ void record_base_of_stack_pointer() { ++ } ++ ++ bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, ++ bool isInJava); ++ ++ bool pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava); ++private: ++ bool pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava); ++public: ++ ++ // These routines are only used on cpu architectures that ++ // have separate register stacks (Itanium). ++ static bool register_stack_overflow() { return false; } ++ static void enable_register_stack_guard() {} ++ static void disable_register_stack_guard() {} ++ ++ // For convenient implementation of NativeGeneralJump::replace_mt_safe() ++ volatile address _handle_wrong_method_stub; ++ static ByteSize handle_wrong_method_stub_offset() { return byte_offset_of(JavaThread, _handle_wrong_method_stub); } ++ void set_handle_wrong_method_stub(address stub) { _handle_wrong_method_stub = stub; } +diff -uNr openjdk/hotspot/src/os_cpu/linux_sw64/vm/threadLS_linux_sw64.cpp afu8u/hotspot/src/os_cpu/linux_sw64/vm/threadLS_linux_sw64.cpp +--- openjdk/hotspot/src/os_cpu/linux_sw64/vm/threadLS_linux_sw64.cpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/os_cpu/linux_sw64/vm/threadLS_linux_sw64.cpp 2025-05-06 10:53:44.963633668 +0800 +@@ -0,0 +1,67 @@ ++/* ++ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++#include "runtime/thread.inline.hpp" ++#include "runtime/threadLocalStorage.hpp" ++ ++// Map stack pointer (%esp) to thread pointer for faster TLS access ++// ++// Here we use a flat table for better performance. Getting current thread ++// is down to one memory access (read _sp_map[%esp>>12]) in generated code ++// and two in runtime code (-fPIC code needs an extra load for _sp_map). ++// ++// This code assumes stack page is not shared by different threads. It works ++// in 32-bit VM when page size is 4K (or a multiple of 4K, if that matters). ++// ++// Notice that _sp_map is allocated in the bss segment, which is ZFOD ++// (zero-fill-on-demand). While it reserves 4M address space upfront, ++// actual memory pages are committed on demand. ++// ++// If an application creates and destroys a lot of threads, usually the ++// stack space freed by a thread will soon get reused by new thread ++// (this is especially true in NPTL or LinuxThreads in fixed-stack mode). ++// No memory page in _sp_map is wasted. ++// ++// However, it's still possible that we might end up populating & ++// committing a large fraction of the 4M table over time, but the actual ++// amount of live data in the table could be quite small. The max wastage ++// is less than 4M bytes. If it becomes an issue, we could use madvise() ++// with MADV_DONTNEED to reclaim unused (i.e. all-zero) pages in _sp_map. ++// MADV_DONTNEED on Linux keeps the virtual memory mapping, but zaps the ++// physical memory page (i.e. similar to MADV_FREE on Solaris). ++ ++ ++void ThreadLocalStorage::generate_code_for_get_thread() { ++ // nothing we can do here for user-level thread ++} ++ ++void ThreadLocalStorage::pd_init() { ++ assert(align_size_down(os::vm_page_size(), PAGE_SIZE) == os::vm_page_size(), ++ "page size must be multiple of PAGE_SIZE"); ++} ++ ++void ThreadLocalStorage::pd_set_thread(Thread* thread) { ++ os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread); ++} +diff -uNr openjdk/hotspot/src/os_cpu/linux_sw64/vm/threadLS_linux_sw64.hpp afu8u/hotspot/src/os_cpu/linux_sw64/vm/threadLS_linux_sw64.hpp +--- openjdk/hotspot/src/os_cpu/linux_sw64/vm/threadLS_linux_sw64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/os_cpu/linux_sw64/vm/threadLS_linux_sw64.hpp 2025-05-06 10:53:44.963633668 +0800 +@@ -0,0 +1,40 @@ ++/* ++ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef OS_CPU_LINUX_SW64_VM_THREADLS_LINUX_SW64_HPP ++#define OS_CPU_LINUX_SW64_VM_THREADLS_LINUX_SW64_HPP ++ ++ // Processor dependent parts of ThreadLocalStorage ++ //only the low 2G space for user program in Linux ++ ++#define SP_BITLENGTH 34 //TODO: ? ++#define PAGE_SHIFT 13 ++#define PAGE_SIZE (1UL << PAGE_SHIFT) ++ ++public: ++ ++ static Thread* thread() { ++ return (Thread*) os::thread_local_storage_at(thread_index()); ++ } ++#endif // OS_CPU_LINUX_SW64_VM_THREADLS_LINUX_SW64_HPP +diff -uNr openjdk/hotspot/src/os_cpu/linux_sw64/vm/vmStructs_linux_sw64.hpp afu8u/hotspot/src/os_cpu/linux_sw64/vm/vmStructs_linux_sw64.hpp +--- openjdk/hotspot/src/os_cpu/linux_sw64/vm/vmStructs_linux_sw64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/os_cpu/linux_sw64/vm/vmStructs_linux_sw64.hpp 2025-05-06 10:53:44.963633668 +0800 +@@ -0,0 +1,54 @@ ++/* ++ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#ifndef OS_CPU_LINUX_SW64_VM_VMSTRUCTS_LINUX_SW64_HPP ++#define OS_CPU_LINUX_SW64_VM_VMSTRUCTS_LINUX_SW64_HPP ++ ++// These are the OS and CPU-specific fields, types and integer ++// constants required by the Serviceability Agent. This file is ++// referenced by vmStructs.cpp. ++ ++#define VM_STRUCTS_OS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field) \ ++ \ ++ /******************************/ \ ++ /* Threads (NOTE: incomplete) */ \ ++ /******************************/ \ ++ nonstatic_field(OSThread, _thread_id, pid_t) \ ++ nonstatic_field(OSThread, _pthread_id, pthread_t) ++ ++ ++#define VM_TYPES_OS_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type) \ ++ \ ++ /**********************/ \ ++ /* Posix Thread IDs */ \ ++ /**********************/ \ ++ \ ++ declare_integer_type(pid_t) \ ++ declare_unsigned_integer_type(pthread_t) ++ ++#define VM_INT_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant) ++ ++#define VM_LONG_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant) ++ ++#endif // OS_CPU_LINUX_SW64_VM_VMSTRUCTS_LINUX_SW64_HPP +diff -uNr openjdk/hotspot/src/os_cpu/linux_sw64/vm/vm_version_linux_sw64.cpp afu8u/hotspot/src/os_cpu/linux_sw64/vm/vm_version_linux_sw64.cpp +--- openjdk/hotspot/src/os_cpu/linux_sw64/vm/vm_version_linux_sw64.cpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/os_cpu/linux_sw64/vm/vm_version_linux_sw64.cpp 2025-05-06 10:53:44.963633668 +0800 +@@ -0,0 +1,119 @@ ++/* ++ * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#include "precompiled.hpp" ++#include "runtime/os.hpp" ++#include "vm_version_sw64.hpp" ++#include "string.h" ++ ++#define CPU_FAMILY_AMOUNT 9 ++ ++const char cpuinfo[CPU_FAMILY_AMOUNT][30] = { ++ "not-sw", // 0 ++ "sw410", // 1 ++ "sw4a", // 2 ++ "sw6a", // 3 ++ "sw6b", // 4 ++ "sw1621", // 5 ++ "sw421", // 6 ++ "sw3231", // 7 ++ "h8000", // 8 WX-H8000 for 8A ++}; ++ ++void read_cpu_info(const char *path, char *result) { ++ FILE *ptr; ++ char buf[1024]; ++ int i = 0; ++ if((ptr=fopen(path, "r")) != NULL) { ++ while(fgets(buf, 1024, ptr)!=NULL) { ++ strcat(result,buf); ++ i++; ++ if (i == 10) break; ++ } ++ fclose(ptr); ++ } else { ++ tty->print_cr("fopen %s error\n", path); ++ } ++} ++ ++void strlwr(char *str){ ++ for (; *str!='\0'; str++) ++ *str = tolower(*str); ++} ++ ++int VM_Version::platform_features(int features) { ++ char res[10240]; ++ int i; ++ features = spt_16k_page_m; //default support ++ memset(res, '\0', 10240 * sizeof(char)); ++ read_cpu_info("/proc/cpuinfo", res); ++ // res is converted to lower case ++ strlwr(res); ++ for (i = 1; i < CPU_FAMILY_AMOUNT; i++) { ++ if (strstr(res, cpuinfo[i])) { ++ break; ++ } ++ } ++ //add some other support when detected on shenwei ++ if (i != CPU_FAMILY_AMOUNT) { ++ features |= with_sw_support_m; ++ } ++ switch (i % CPU_FAMILY_AMOUNT) { ++ case 1 : ++ features |= sw2f_m; ++ //tty->print_cr("sw2f platform"); ++ break; ++ case 2 : ++ features |= sw4a_m; ++ //tty->print_cr("sw4a platform"); ++ break; ++ case 3 : ++ features |= sw6a_m; ++ //tty->print_cr("sw6a platform"); ++ break; ++ case 4 : ++ features |= sw6b_m; ++ //tty->print_cr("sw6b platform"); ++ break; ++ case 5 : ++ features |= sw1621_m; ++ //tty->print_cr("sw6b platform"); ++ break; ++ case 6 : ++ features |= sw4a_m; ++ //tty->print_cr("sw6b platform"); ++ break; ++ case 7 : ++ features |= sw3231_m; ++ break; ++ case 8 : ++ features |= wx_h8000_m; ++ break; ++ default: ++ //tty->print_cr("cpu not support, the cpuinfo is: %s", res); ++ //ShouldNotReachHere(); ++ ; ++ } ++ return features; ++} +diff -uNr openjdk/hotspot/src/os_cpu/linux_zero/vm/atomic_linux_zero.inline.hpp afu8u/hotspot/src/os_cpu/linux_zero/vm/atomic_linux_zero.inline.hpp +--- openjdk/hotspot/src/os_cpu/linux_zero/vm/atomic_linux_zero.inline.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/os_cpu/linux_zero/vm/atomic_linux_zero.inline.hpp 2025-05-06 10:53:44.963633668 +0800 +@@ -169,10 +169,6 @@ + *dest = store_value; + } + +-inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { +- *dest = store_value; +-} +- + inline jint Atomic::add(jint add_value, volatile jint* dest) { + #ifdef ARM + return arm_add_and_fetch(dest, add_value); +diff -uNr openjdk/hotspot/src/share/tools/hsdis/hsdis.c afu8u/hotspot/src/share/tools/hsdis/hsdis.c +--- openjdk/hotspot/src/share/tools/hsdis/hsdis.c 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/tools/hsdis/hsdis.c 2025-05-06 10:53:44.987633669 +0800 +@@ -484,6 +484,9 @@ + #ifdef LIBARCH_amd64 + res = "i386:x86-64"; + #endif ++#if defined(LIBARCH_sw) || defined(LIBARCH_sw64) //SCW201170314 ++ res = "sw_64:sw2"; //SCW20170314, to support sw_64 special instruction. ++#endif + #ifdef LIBARCH_sparc + res = "sparc:v8plusb"; + #endif +@@ -496,6 +499,9 @@ + #ifdef LIBARCH_aarch64 + res = "aarch64"; + #endif ++#ifdef LIBARCH_sw64 ++ res = "sw64"; ++#endif + if (res == NULL) + res = "architecture not set in Makefile!"; + return res; +diff -uNr openjdk/hotspot/src/share/tools/hsdis/Makefile afu8u/hotspot/src/share/tools/hsdis/Makefile +--- openjdk/hotspot/src/share/tools/hsdis/Makefile 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/tools/hsdis/Makefile 2025-05-06 10:53:44.987633669 +0800 +@@ -93,6 +93,9 @@ + CPU = $(shell uname -m) + ARCH1=$(CPU:x86_64=amd64) + ARCH=$(ARCH1:i686=i386) ++# Modify "sw_64" ARCH to "sw64" by SCW20170314 ++ARCH=$(ARCH1:sw_64=sw64) ++ + ifdef LP64 + CFLAGS/sparcv9 += -m64 + CFLAGS/amd64 += -m64 +@@ -176,6 +179,8 @@ + endif # LP64 + + JDKARCH=$(LIBARCH:i386=i586) ++# Modify "sw_64" LIBARCH to "sw64" for "make clean" by SCW20170314 ++JDKARCH=$(LIBARCH:sw_64=sw64) + + ifeq ($(BINUTILS),) + # Pop all the way out of the workspace to look for binutils. +diff -uNr openjdk/hotspot/src/share/tools/hsdis/mk afu8u/hotspot/src/share/tools/hsdis/mk +--- openjdk/hotspot/src/share/tools/hsdis/mk 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/share/tools/hsdis/mk 2025-05-06 10:53:44.987633669 +0800 +@@ -0,0 +1,10 @@ ++#!/bin/bash ++ ++UNAME=`uname -m` ++if [ "$UNAME" == "sw_64" ] ++then ++ make clean ++ make all64 BINUTILS=${HOME}/work/binutils-2.24-4A ++else ++ echo " * Sorry, it need compile in sw server! *" ++fi +diff -uNr openjdk/hotspot/src/share/vm/adlc/formssel.cpp afu8u/hotspot/src/share/vm/adlc/formssel.cpp +--- openjdk/hotspot/src/share/vm/adlc/formssel.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/adlc/formssel.cpp 2025-05-06 10:53:44.991633669 +0800 +@@ -640,6 +640,22 @@ + } + + ++bool InstructForm::is_wide_memory_kill(FormDict &globals) const { ++ if( _matrule == NULL ) return false; ++ if( !_matrule->_opType ) return false; ++ ++ if( strcmp(_matrule->_opType,"MemBarRelease") == 0 ) return true; ++ if( strcmp(_matrule->_opType,"MemBarAcquire") == 0 ) return true; ++ if( strcmp(_matrule->_opType,"MemBarReleaseLock") == 0 ) return true; ++ if( strcmp(_matrule->_opType,"MemBarAcquireLock") == 0 ) return true; ++ if( strcmp(_matrule->_opType,"MemBarStoreStore") == 0 ) return true; ++ if( strcmp(_matrule->_opType,"MemBarVolatile") == 0 ) return true; ++ if( strcmp(_matrule->_opType,"StoreFence") == 0 ) return true; ++ if( strcmp(_matrule->_opType,"LoadFence") == 0 ) return true; ++ ++ return false; ++} ++ + int InstructForm::memory_operand(FormDict &globals) const { + // Machine independent loads must be checked for anti-dependences + // Check if instruction has a USE of a memory operand class, or a def. +@@ -1144,9 +1160,6 @@ + else if (is_ideal_nop()) { + return "MachNopNode"; + } +- else if (is_ideal_membar()) { +- return "MachMemBarNode"; +- } + else if (is_mach_constant()) { + return "MachConstantNode"; + } +@@ -3474,7 +3487,6 @@ + "LoadPLocked", + "StorePConditional", "StoreIConditional", "StoreLConditional", + "CompareAndSwapI", "CompareAndSwapL", "CompareAndSwapP", "CompareAndSwapN", +- "ShenandoahCompareAndSwapN", "ShenandoahCompareAndSwapP", + "StoreCM", + "ClearArray", + "GetAndAddI", "GetAndSetI", "GetAndSetP", +diff -uNr openjdk/hotspot/src/share/vm/adlc/formssel.hpp afu8u/hotspot/src/share/vm/adlc/formssel.hpp +--- openjdk/hotspot/src/share/vm/adlc/formssel.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/adlc/formssel.hpp 2025-05-06 10:53:44.991633669 +0800 +@@ -191,6 +191,7 @@ + // loads from memory, so must check for anti-dependence + virtual bool needs_anti_dependence_check(FormDict &globals) const; + virtual int memory_operand(FormDict &globals) const; ++ bool is_wide_memory_kill(FormDict &globals) const; + + enum memory_operand_type { + NO_MEMORY_OPERAND = -1, +diff -uNr openjdk/hotspot/src/share/vm/adlc/main.cpp afu8u/hotspot/src/share/vm/adlc/main.cpp +--- openjdk/hotspot/src/share/vm/adlc/main.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/adlc/main.cpp 2025-05-06 10:53:44.991633669 +0800 +@@ -239,6 +239,11 @@ + AD.addInclude(AD._CPP_file, "nativeInst_aarch64.hpp"); + AD.addInclude(AD._CPP_file, "vmreg_aarch64.inline.hpp"); + #endif ++#ifdef TARGET_ARCH_sw64 ++ AD.addInclude(AD._CPP_file, "assembler_sw64.inline.hpp"); ++ AD.addInclude(AD._CPP_file, "nativeInst_sw64.hpp"); ++ AD.addInclude(AD._CPP_file, "vmreg_sw64.inline.hpp"); ++#endif + #ifdef TARGET_ARCH_sparc + AD.addInclude(AD._CPP_file, "nativeInst_sparc.hpp"); + AD.addInclude(AD._CPP_file, "vmreg_sparc.inline.hpp"); +diff -uNr openjdk/hotspot/src/share/vm/adlc/output_c.cpp afu8u/hotspot/src/share/vm/adlc/output_c.cpp +--- openjdk/hotspot/src/share/vm/adlc/output_c.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/adlc/output_c.cpp 2025-05-06 10:53:44.991633669 +0800 +@@ -3246,6 +3246,10 @@ + + // Analyze machine instructions that either USE or DEF memory. + int memory_operand = instr->memory_operand(_globalNames); ++ // Some guys kill all of memory ++ if ( instr->is_wide_memory_kill(_globalNames) ) { ++ memory_operand = InstructForm::MANY_MEMORY_OPERANDS; ++ } + + if ( memory_operand != InstructForm::NO_MEMORY_OPERAND ) { + if( memory_operand == InstructForm::MANY_MEMORY_OPERANDS ) { +diff -uNr openjdk/hotspot/src/share/vm/adlc/output_h.cpp afu8u/hotspot/src/share/vm/adlc/output_h.cpp +--- openjdk/hotspot/src/share/vm/adlc/output_h.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/adlc/output_h.cpp 2025-05-06 10:53:44.991633669 +0800 +@@ -2002,6 +2002,10 @@ + + // Analyze machine instructions that either USE or DEF memory. + int memory_operand = instr->memory_operand(_globalNames); ++ // Some guys kill all of memory ++ if ( instr->is_wide_memory_kill(_globalNames) ) { ++ memory_operand = InstructForm::MANY_MEMORY_OPERANDS; ++ } + if ( memory_operand != InstructForm::NO_MEMORY_OPERAND ) { + if( memory_operand == InstructForm::MANY_MEMORY_OPERANDS ) { + fprintf(fp," virtual const TypePtr *adr_type() const;\n"); +diff -uNr openjdk/hotspot/src/share/vm/asm/assembler.hpp afu8u/hotspot/src/share/vm/asm/assembler.hpp +--- openjdk/hotspot/src/share/vm/asm/assembler.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/asm/assembler.hpp 2025-05-06 10:53:44.991633669 +0800 +@@ -53,6 +53,10 @@ + # include "register_ppc.hpp" + # include "vm_version_ppc.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "register_sw64.hpp" ++# include "vm_version_sw64.hpp" ++#endif + #ifdef TARGET_ARCH_aarch64 + # include "register_aarch64.hpp" + # include "vm_version_aarch64.hpp" +@@ -307,6 +311,9 @@ + static bool is_simm9(int64_t x) { return is_simm(x, 9); } + static bool is_simm10(int64_t x) { return is_simm(x, 10); } + static bool is_simm16(int64_t x) { return is_simm(x, 16); } ++#ifdef SW64 ++ static bool is_simm21(int64_t x) { return is_simm(x, 21); } ++#endif + static bool is_simm32(int64_t x) { return is_simm(x, 32); } + + // Test if x is within unsigned immediate range for width. +@@ -468,6 +475,9 @@ + #ifdef TARGET_ARCH_ppc + # include "assembler_ppc.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "assembler_sw64.hpp" ++#endif + + + #endif // SHARE_VM_ASM_ASSEMBLER_HPP +diff -uNr openjdk/hotspot/src/share/vm/asm/assembler.inline.hpp afu8u/hotspot/src/share/vm/asm/assembler.inline.hpp +--- openjdk/hotspot/src/share/vm/asm/assembler.inline.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/asm/assembler.inline.hpp 2025-05-06 10:53:44.991633669 +0800 +@@ -42,6 +42,9 @@ + #ifdef TARGET_ARCH_ppc + # include "assembler_ppc.inline.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "assembler_sw64.inline.hpp" ++#endif + #ifdef TARGET_ARCH_aarch64 + # include "assembler_aarch64.inline.hpp" + #endif +diff -uNr openjdk/hotspot/src/share/vm/asm/codeBuffer.hpp afu8u/hotspot/src/share/vm/asm/codeBuffer.hpp +--- openjdk/hotspot/src/share/vm/asm/codeBuffer.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/asm/codeBuffer.hpp 2025-05-06 10:53:44.991633669 +0800 +@@ -635,6 +635,9 @@ + #ifdef TARGET_ARCH_ppc + # include "codeBuffer_ppc.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "codeBuffer_sw64.hpp" ++#endif + + }; + +diff -uNr openjdk/hotspot/src/share/vm/asm/macroAssembler.hpp afu8u/hotspot/src/share/vm/asm/macroAssembler.hpp +--- openjdk/hotspot/src/share/vm/asm/macroAssembler.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/asm/macroAssembler.hpp 2025-05-06 10:53:44.991633669 +0800 +@@ -42,6 +42,9 @@ + #ifdef TARGET_ARCH_ppc + # include "macroAssembler_ppc.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "macroAssembler_sw64.hpp" ++#endif + #ifdef TARGET_ARCH_aarch64 + # include "macroAssembler_aarch64.hpp" + #endif +diff -uNr openjdk/hotspot/src/share/vm/asm/macroAssembler.inline.hpp afu8u/hotspot/src/share/vm/asm/macroAssembler.inline.hpp +--- openjdk/hotspot/src/share/vm/asm/macroAssembler.inline.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/asm/macroAssembler.inline.hpp 2025-05-06 10:53:44.991633669 +0800 +@@ -42,6 +42,9 @@ + #ifdef TARGET_ARCH_ppc + # include "macroAssembler_ppc.inline.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "macroAssembler_sw64.inline.hpp" ++#endif + #ifdef TARGET_ARCH_aarch64 + # include "macroAssembler_aarch64.inline.hpp" + #endif +diff -uNr openjdk/hotspot/src/share/vm/asm/register.hpp afu8u/hotspot/src/share/vm/asm/register.hpp +--- openjdk/hotspot/src/share/vm/asm/register.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/asm/register.hpp 2025-05-06 10:53:44.995633669 +0800 +@@ -108,6 +108,9 @@ + #ifdef TARGET_ARCH_ppc + # include "register_ppc.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "register_sw64.hpp" ++#endif + #ifdef TARGET_ARCH_aarch64 + # include "register_aarch64.hpp" + #endif +diff -uNr openjdk/hotspot/src/share/vm/c1/c1_LIRAssembler.cpp afu8u/hotspot/src/share/vm/c1/c1_LIRAssembler.cpp +--- openjdk/hotspot/src/share/vm/c1/c1_LIRAssembler.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/c1/c1_LIRAssembler.cpp 2025-05-06 10:53:44.999633669 +0800 +@@ -305,8 +305,7 @@ + // branches since they include block and stub names. Also print + // patching moves since they generate funny looking code. + if (op->code() == lir_branch || +- (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none) || +- (op->code() == lir_leal && op->as_Op1()->patch_code() != lir_patch_none)) { ++ (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none)) { + stringStream st; + op->print_on(&st); + _masm->block_comment(st.as_string()); +@@ -591,7 +590,7 @@ + break; + + case lir_leal: +- leal(op->in_opr(), op->result_opr(), op->patch_code(), op->info()); ++ leal(op->in_opr(), op->result_opr()); + break; + + case lir_null_check: +diff -uNr openjdk/hotspot/src/share/vm/c1/c1_LIRAssembler.hpp afu8u/hotspot/src/share/vm/c1/c1_LIRAssembler.hpp +--- openjdk/hotspot/src/share/vm/c1/c1_LIRAssembler.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/c1/c1_LIRAssembler.hpp 2025-05-06 10:53:44.999633669 +0800 +@@ -245,7 +245,7 @@ + void align_call(LIR_Code code); + + void negate(LIR_Opr left, LIR_Opr dest); +- void leal(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info); ++ void leal(LIR_Opr left, LIR_Opr dest); + + void rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info); + +diff -uNr openjdk/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp afu8u/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp +--- openjdk/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp 2025-05-06 11:13:08.111672950 +0800 +@@ -33,8 +33,6 @@ + #include "ci/ciArrayKlass.hpp" + #include "ci/ciInstance.hpp" + #include "ci/ciObjArray.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.hpp" +-#include "gc_implementation/shenandoah/c1/shenandoahBarrierSetC1.hpp" + #include "runtime/sharedRuntime.hpp" + #include "runtime/stubRoutines.hpp" + #include "utilities/bitMap.inline.hpp" +@@ -1231,15 +1229,6 @@ + + LIR_Opr result = rlock_result(x); + +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- LIR_Opr tmp = new_register(T_OBJECT); +- LIR_Opr addr = ShenandoahBarrierSet::barrier_set()->bsc1()->resolve_address(this, referent_field_adr, T_OBJECT, NULL); +- __ load(addr->as_address_ptr(), tmp, info); +- tmp = ShenandoahBarrierSet::barrier_set()->bsc1()->load_reference_barrier(this, tmp, addr); +- __ move(tmp, result); +- } else +-#endif + __ load(referent_field_adr, result, info); + + // Register the value in the referent field with the pre-barrier +@@ -1451,11 +1440,6 @@ + case BarrierSet::G1SATBCTLogging: + G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info); + break; +- case BarrierSet::ShenandoahBarrierSet: +- if (ShenandoahSATBBarrier) { +- G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info); +- } +- break; + #endif // INCLUDE_ALL_GCS + case BarrierSet::CardTableModRef: + case BarrierSet::CardTableExtension: +@@ -1478,9 +1462,6 @@ + case BarrierSet::G1SATBCTLogging: + G1SATBCardTableModRef_post_barrier(addr, new_val); + break; +- case BarrierSet::ShenandoahBarrierSet: +- ShenandoahBarrierSetC1::bsc1()->storeval_barrier(this, new_val, NULL, false); +- break; + #endif // INCLUDE_ALL_GCS + case BarrierSet::CardTableModRef: + case BarrierSet::CardTableExtension: +@@ -1847,33 +1828,16 @@ + address = generate_address(object.result(), x->offset(), field_type); + } + +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC && (field_type == T_OBJECT || field_type == T_ARRAY)) { +- LIR_Opr tmp = new_register(T_OBJECT); +- LIR_Opr addr = ShenandoahBarrierSet::barrier_set()->bsc1()->resolve_address(this, address, field_type, needs_patching ? info : NULL); +- if (is_volatile) { +- volatile_field_load(addr->as_address_ptr(), tmp, info); +- } else { +- __ load(addr->as_address_ptr(), tmp, info); +- } +- if (is_volatile && os::is_MP()) { +- __ membar_acquire(); +- } +- tmp = ShenandoahBarrierSet::barrier_set()->bsc1()->load_reference_barrier(this, tmp, addr); +- __ move(tmp, reg); +- } else +-#endif +- { + if (is_volatile && !needs_patching) { + volatile_field_load(address, reg, info); + } else { + LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none; + __ load(address, reg, info, patch_code); + } ++ + if (is_volatile && os::is_MP()) { + __ membar_acquire(); + } +- } + } + + +@@ -1990,19 +1954,7 @@ + } + } + +- LIR_Opr result = rlock_result(x, x->elt_type()); +- +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC && (x->elt_type() == T_OBJECT || x->elt_type() == T_ARRAY)) { +- LIR_Opr tmp = new_register(T_OBJECT); +- LIR_Opr addr = ShenandoahBarrierSet::barrier_set()->bsc1()->resolve_address(this, array_addr, x->elt_type(), NULL); +- __ move(addr->as_address_ptr(), tmp, null_check_info); +- tmp = ShenandoahBarrierSet::barrier_set()->bsc1()->load_reference_barrier(this, tmp, addr); +- __ move(tmp, result); +- } else +-#endif +- __ move(array_addr, result, null_check_info); +- ++ __ move(array_addr, rlock_result(x, x->elt_type()), null_check_info); + } + + +@@ -2291,14 +2243,6 @@ + + LIR_Opr value = rlock_result(x, x->basic_type()); + +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC && (type == T_OBJECT || type == T_ARRAY)) { +- LIR_Opr tmp = new_register(T_OBJECT); +- get_Object_unsafe(tmp, src.result(), off.result(), type, x->is_volatile()); +- tmp = ShenandoahBarrierSet::barrier_set()->bsc1()->load_reference_barrier(this, tmp, LIR_OprFact::addressConst(0)); +- __ move(tmp, value); +- } else +-#endif + get_Object_unsafe(value, src.result(), off.result(), type, x->is_volatile()); + + #if INCLUDE_ALL_GCS +@@ -2317,7 +2261,7 @@ + // } + // } + +- if ((UseShenandoahGC || UseG1GC) && type == T_OBJECT) { ++ if (UseG1GC && type == T_OBJECT) { + bool gen_pre_barrier = true; // Assume we need to generate pre_barrier. + bool gen_offset_check = true; // Assume we need to generate the offset guard. + bool gen_source_check = true; // Assume we need to check the src object for null. +diff -uNr openjdk/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp afu8u/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp +--- openjdk/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp 2025-05-06 11:13:08.111672950 +0800 +@@ -154,7 +154,7 @@ + + // only the classes below belong in the same file + class LIRGenerator: public InstructionVisitor, public BlockClosure { +- friend class ShenandoahBarrierSetC1; ++ + private: + Compilation* _compilation; + ciMethod* _method; // method that we are compiling +diff -uNr openjdk/hotspot/src/share/vm/c1/c1_LIR.hpp afu8u/hotspot/src/share/vm/c1/c1_LIR.hpp +--- openjdk/hotspot/src/share/vm/c1/c1_LIR.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/c1/c1_LIR.hpp 2025-05-06 10:53:44.999633669 +0800 +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -2104,7 +2104,7 @@ + void branch_destination(Label* lbl) { append(new LIR_OpLabel(lbl)); } + + void negate(LIR_Opr from, LIR_Opr to) { append(new LIR_Op1(lir_neg, from, to)); } +- void leal(LIR_Opr from, LIR_Opr result_reg, LIR_PatchCode patch_code = lir_patch_none, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_leal, from, result_reg, T_ILLEGAL, patch_code, info)); } ++ void leal(LIR_Opr from, LIR_Opr result_reg) { append(new LIR_Op1(lir_leal, from, result_reg)); } + + // result is a stack location for old backend and vreg for UseLinearScan + // stack_loc_temp is an illegal register for old backend +diff -uNr openjdk/hotspot/src/share/vm/c1/c1_Runtime1.cpp afu8u/hotspot/src/share/vm/c1/c1_Runtime1.cpp +--- openjdk/hotspot/src/share/vm/c1/c1_Runtime1.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/c1/c1_Runtime1.cpp 2025-05-06 10:53:45.003633669 +0800 +@@ -58,10 +58,7 @@ + #include "runtime/vframeArray.hpp" + #include "utilities/copy.hpp" + #include "utilities/events.hpp" +-#include "utilities/macros.hpp" +-#if INCLUDE_ALL_GCS +-#include "gc_implementation/shenandoah/shenandoahBarrierSet.inline.hpp" +-#endif ++ + + // Implementation of StubAssembler + +@@ -202,7 +199,6 @@ + case dtrace_object_alloc_id: + case g1_pre_barrier_slow_id: + case g1_post_barrier_slow_id: +- case shenandoah_lrb_slow_id: + case slow_subtype_check_id: + case fpu2long_stub_id: + case unwind_exception_id: +@@ -1310,13 +1306,6 @@ + BarrierSet* bs = Universe::heap()->barrier_set(); + assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt"); + assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well."); +- +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- ShenandoahBarrierSet::barrier_set()->arraycopy_barrier(src_addr, dst_addr, length); +- } +-#endif +- + if (src == dst) { + // same object, no check + bs->write_ref_array_pre(dst_addr, length); +diff -uNr openjdk/hotspot/src/share/vm/c1/c1_Runtime1.hpp afu8u/hotspot/src/share/vm/c1/c1_Runtime1.hpp +--- openjdk/hotspot/src/share/vm/c1/c1_Runtime1.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/c1/c1_Runtime1.hpp 2025-05-06 10:53:45.003633669 +0800 +@@ -70,7 +70,6 @@ + stub(load_appendix_patching) \ + stub(g1_pre_barrier_slow) \ + stub(g1_post_barrier_slow) \ +- stub(shenandoah_lrb_slow) \ + stub(fpu2long_stub) \ + stub(counter_overflow) \ + stub(predicate_failed_trap) \ +diff -uNr openjdk/hotspot/src/share/vm/ci/ciObjectFactory.cpp afu8u/hotspot/src/share/vm/ci/ciObjectFactory.cpp +--- openjdk/hotspot/src/share/vm/ci/ciObjectFactory.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/ci/ciObjectFactory.cpp 2025-05-06 10:53:45.007633669 +0800 +@@ -403,7 +403,7 @@ + ASSERT_IN_VM; // We're handling raw oops here. + + #if INCLUDE_ALL_GCS +- if (!(UseG1GC || (UseShenandoahGC && ShenandoahSATBBarrier))) { ++ if (!UseG1GC) { + return; + } + Klass* metadata_owner_klass; +diff -uNr openjdk/hotspot/src/share/vm/classfile/bytecodeAssembler.cpp afu8u/hotspot/src/share/vm/classfile/bytecodeAssembler.cpp +--- openjdk/hotspot/src/share/vm/classfile/bytecodeAssembler.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/classfile/bytecodeAssembler.cpp 2025-05-06 10:53:45.007633669 +0800 +@@ -32,6 +32,9 @@ + #ifdef TARGET_ARCH_x86 + # include "bytes_x86.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "bytes_sw64.hpp" ++#endif + #ifdef TARGET_ARCH_sparc + # include "bytes_sparc.hpp" + #endif +diff -uNr openjdk/hotspot/src/share/vm/classfile/classFileStream.hpp afu8u/hotspot/src/share/vm/classfile/classFileStream.hpp +--- openjdk/hotspot/src/share/vm/classfile/classFileStream.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/classfile/classFileStream.hpp 2025-05-06 10:53:45.011633669 +0800 +@@ -32,6 +32,9 @@ + #ifdef TARGET_ARCH_aarch64 + # include "bytes_aarch64.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "bytes_sw64.hpp" ++#endif + #ifdef TARGET_ARCH_sparc + # include "bytes_sparc.hpp" + #endif +diff -uNr openjdk/hotspot/src/share/vm/classfile/classLoaderData.hpp afu8u/hotspot/src/share/vm/classfile/classLoaderData.hpp +--- openjdk/hotspot/src/share/vm/classfile/classLoaderData.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/classfile/classLoaderData.hpp 2025-05-06 10:53:45.011633669 +0800 +@@ -29,7 +29,6 @@ + #include "memory/memRegion.hpp" + #include "memory/metaspace.hpp" + #include "memory/metaspaceCounters.hpp" +-#include "runtime/handles.hpp" + #include "runtime/mutex.hpp" + #include "utilities/growableArray.hpp" + #include "utilities/macros.hpp" +diff -uNr openjdk/hotspot/src/share/vm/classfile/classLoaderStats.hpp afu8u/hotspot/src/share/vm/classfile/classLoaderStats.hpp +--- openjdk/hotspot/src/share/vm/classfile/classLoaderStats.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/classfile/classLoaderStats.hpp 2025-05-06 10:53:45.011633669 +0800 +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -28,7 +28,6 @@ + + #include "classfile/classLoaderData.hpp" + #include "oops/klass.hpp" +-#include "oops/oop.hpp" + #include "oops/oopsHierarchy.hpp" + #include "runtime/vm_operations.hpp" + #include "services/diagnosticCommand.hpp" +diff -uNr openjdk/hotspot/src/share/vm/classfile/javaClasses.cpp afu8u/hotspot/src/share/vm/classfile/javaClasses.cpp +--- openjdk/hotspot/src/share/vm/classfile/javaClasses.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/classfile/javaClasses.cpp 2025-05-06 10:53:45.011633669 +0800 +@@ -52,10 +52,6 @@ + #include "runtime/vframe.hpp" + #include "utilities/preserveException.hpp" + +-#if INCLUDE_ALL_GCS +-#include "gc_implementation/shenandoah/shenandoahBarrierSet.hpp" +-#endif +- + PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC + + #define INJECTED_FIELD_COMPUTE_OFFSET(klass, name, signature, may_be_java) \ +@@ -1212,18 +1208,11 @@ + oop java_lang_Throwable::unassigned_stacktrace() { + InstanceKlass* ik = InstanceKlass::cast(SystemDictionary::Throwable_klass()); + address addr = ik->static_field_addr(static_unassigned_stacktrace_offset); +- oop result; + if (UseCompressedOops) { +- result = oopDesc::load_decode_heap_oop((narrowOop *)addr); ++ return oopDesc::load_decode_heap_oop((narrowOop *)addr); + } else { +- result = oopDesc::load_decode_heap_oop((oop*)addr); +- } +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- result = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(result); ++ return oopDesc::load_decode_heap_oop((oop*)addr); + } +-#endif +- return result; + } + + oop java_lang_Throwable::backtrace(oop throwable) { +@@ -2659,18 +2648,11 @@ + oop java_lang_ref_Reference::pending_list_lock() { + InstanceKlass* ik = InstanceKlass::cast(SystemDictionary::Reference_klass()); + address addr = ik->static_field_addr(static_lock_offset); +- oop result; + if (UseCompressedOops) { +- result = oopDesc::load_decode_heap_oop((narrowOop *)addr); ++ return oopDesc::load_decode_heap_oop((narrowOop *)addr); + } else { +- result = oopDesc::load_decode_heap_oop((oop*)addr); +- } +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- result = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(result); ++ return oopDesc::load_decode_heap_oop((oop*)addr); + } +-#endif +- return result; + } + + HeapWord *java_lang_ref_Reference::pending_list_addr() { +@@ -2682,18 +2664,11 @@ + + oop java_lang_ref_Reference::pending_list() { + char *addr = (char *)pending_list_addr(); +- oop result; + if (UseCompressedOops) { +- result = oopDesc::load_decode_heap_oop((narrowOop *)addr); ++ return oopDesc::load_decode_heap_oop((narrowOop *)addr); + } else { +- result = oopDesc::load_decode_heap_oop((oop*)addr); +- } +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- result = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(result); ++ return oopDesc::load_decode_heap_oop((oop*)addr); + } +-#endif +- return result; + } + + +diff -uNr openjdk/hotspot/src/share/vm/classfile/stackMapTable.hpp afu8u/hotspot/src/share/vm/classfile/stackMapTable.hpp +--- openjdk/hotspot/src/share/vm/classfile/stackMapTable.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/classfile/stackMapTable.hpp 2025-05-06 10:53:45.015633670 +0800 +@@ -34,6 +34,9 @@ + #ifdef TARGET_ARCH_x86 + # include "bytes_x86.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "bytes_sw64.hpp" ++#endif + #ifdef TARGET_ARCH_aarch64 + # include "bytes_aarch64.hpp" + #endif +diff -uNr openjdk/hotspot/src/share/vm/classfile/symbolTable.cpp afu8u/hotspot/src/share/vm/classfile/symbolTable.cpp +--- openjdk/hotspot/src/share/vm/classfile/symbolTable.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/classfile/symbolTable.cpp 2025-05-06 10:53:45.015633670 +0800 +@@ -721,7 +721,7 @@ + // considered dead. The SATB part of G1 needs to get notified about this + // potential resurrection, otherwise the marking might not find the object. + #if INCLUDE_ALL_GCS +- if ((UseG1GC || (UseShenandoahGC && ShenandoahSATBBarrier)) && string != NULL) { ++ if (UseG1GC && string != NULL) { + G1SATBCardTableModRefBS::enqueue(string); + } + #endif +@@ -925,28 +925,6 @@ + buckets_oops_do(f, start_idx, end_idx); + } + } +- +-void StringTable::possibly_parallel_oops_do_shenandoah(OopClosure* f) { +- const int limit = the_table()->table_size(); +- +- // ClaimChunkSize is too small for processing a String table during the pause +- // efficiently: the atomic add costs dominate on many reasonable string tables. +- // Recast the chunk size to give each GC worker about 10 chunks. +- assert(UseShenandoahGC, "Only for Shenandoah"); +- const int chunk_size = MAX2(ClaimChunkSize, limit / (ParallelGCThreads * 10)); +- +- for (;;) { +- // Grab next set of buckets to scan +- int start_idx = Atomic::add(chunk_size, &_parallel_claimed_idx) - chunk_size; +- if (start_idx >= limit) { +- // End of table +- break; +- } +- +- int end_idx = MIN2(limit, start_idx + chunk_size); +- buckets_oops_do(f, start_idx, end_idx); +- } +-} + + // This verification is part of Universe::verify() and needs to be quick. + // See StringTable::verify_and_compare() below for exhaustive verification. +diff -uNr openjdk/hotspot/src/share/vm/classfile/symbolTable.hpp afu8u/hotspot/src/share/vm/classfile/symbolTable.hpp +--- openjdk/hotspot/src/share/vm/classfile/symbolTable.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/classfile/symbolTable.hpp 2025-05-06 10:53:45.015633670 +0800 +@@ -328,7 +328,6 @@ + possibly_parallel_unlink_or_oops_do(cl, NULL, processed, removed); + } + static void possibly_parallel_oops_do(OopClosure* f); +- static void possibly_parallel_oops_do_shenandoah(OopClosure* f); + + // Hashing algorithm, used as the hash value used by the + // StringTable for bucket selection and comparison (stored in the +diff -uNr openjdk/hotspot/src/share/vm/classfile/verifier.cpp afu8u/hotspot/src/share/vm/classfile/verifier.cpp +--- openjdk/hotspot/src/share/vm/classfile/verifier.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/classfile/verifier.cpp 2025-05-06 10:53:45.015633670 +0800 +@@ -48,6 +48,9 @@ + #ifdef TARGET_ARCH_x86 + # include "bytes_x86.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "bytes_sw64.hpp" ++#endif + #ifdef TARGET_ARCH_aarch64 + # include "bytes_aarch64.hpp" + #endif +diff -uNr openjdk/hotspot/src/share/vm/code/codeBlob.cpp afu8u/hotspot/src/share/vm/code/codeBlob.cpp +--- openjdk/hotspot/src/share/vm/code/codeBlob.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/code/codeBlob.cpp 2025-05-06 10:53:45.015633670 +0800 +@@ -57,6 +57,9 @@ + #ifdef TARGET_ARCH_ppc + # include "nativeInst_ppc.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "nativeInst_sw64.hpp" ++#endif + #ifdef COMPILER1 + #include "c1/c1_Runtime1.hpp" + #endif +diff -uNr openjdk/hotspot/src/share/vm/code/codeCache.cpp afu8u/hotspot/src/share/vm/code/codeCache.cpp +--- openjdk/hotspot/src/share/vm/code/codeCache.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/code/codeCache.cpp 2025-05-06 11:13:08.119672950 +0800 +@@ -354,7 +354,7 @@ + void CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure* f) { + assert_locked_or_safepoint(CodeCache_lock); + +- if (UseG1GC || UseShenandoahGC) { ++ if (UseG1GC) { + return; + } + +@@ -399,7 +399,7 @@ + void CodeCache::add_scavenge_root_nmethod(nmethod* nm) { + assert_locked_or_safepoint(CodeCache_lock); + +- if (UseG1GC || UseShenandoahGC) { ++ if (UseG1GC) { + return; + } + +@@ -430,7 +430,7 @@ + void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) { + assert_locked_or_safepoint(CodeCache_lock); + +- if (UseG1GC || UseShenandoahGC) { ++ if (UseG1GC) { + return; + } + +@@ -449,7 +449,7 @@ + void CodeCache::prune_scavenge_root_nmethods() { + assert_locked_or_safepoint(CodeCache_lock); + +- if (UseG1GC || UseShenandoahGC) { ++ if (UseG1GC) { + return; + } + +@@ -481,7 +481,7 @@ + + #ifndef PRODUCT + void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) { +- if (UseG1GC || UseShenandoahGC) { ++ if (UseG1GC) { + return; + } + +@@ -1013,3 +1013,4 @@ + nof_blobs(), nof_nmethods(), nof_adapters(), + unallocated_capacity()); + } ++ +diff -uNr openjdk/hotspot/src/share/vm/code/codeCache.hpp afu8u/hotspot/src/share/vm/code/codeCache.hpp +--- openjdk/hotspot/src/share/vm/code/codeCache.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/code/codeCache.hpp 2025-05-06 10:53:45.019633670 +0800 +@@ -42,11 +42,9 @@ + + class OopClosure; + class DepChange; +-class ShenandoahParallelCodeCacheIterator; + + class CodeCache : AllStatic { + friend class VMStructs; +- friend class ShenandoahParallelCodeCacheIterator; + private: + // CodeHeap is malloc()'ed at startup and never deleted during shutdown, + // so that the generated assembly code is always there when it's needed. +diff -uNr openjdk/hotspot/src/share/vm/code/compiledIC.hpp afu8u/hotspot/src/share/vm/code/compiledIC.hpp +--- openjdk/hotspot/src/share/vm/code/compiledIC.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/code/compiledIC.hpp 2025-05-06 10:53:45.019633670 +0800 +@@ -45,6 +45,9 @@ + #ifdef TARGET_ARCH_ppc + # include "nativeInst_ppc.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "nativeInst_sw64.hpp" ++#endif + + //----------------------------------------------------------------------------- + // The CompiledIC represents a compiled inline cache. +diff -uNr openjdk/hotspot/src/share/vm/code/nmethod.cpp afu8u/hotspot/src/share/vm/code/nmethod.cpp +--- openjdk/hotspot/src/share/vm/code/nmethod.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/code/nmethod.cpp 2025-05-06 11:13:08.119672950 +0800 +@@ -491,7 +491,7 @@ + _oops_do_mark_link = NULL; + _jmethod_id = NULL; + _osr_link = NULL; +- if (UseG1GC || UseShenandoahGC) { ++ if (UseG1GC) { + _unloading_next = NULL; + } else { + _scavenge_root_link = NULL; +@@ -1175,7 +1175,11 @@ + // not-entrant methods. + address low_boundary = verified_entry_point(); + if (!is_in_use()) { +- low_boundary += NativeJump::instruction_size; ++#if defined(SW64) && !defined(ZERO) ++ low_boundary += NativeGeneralJump::instruction_size; ++#else ++ low_boundary += NativeJump::instruction_size; ++#endif + // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. + // This means that the low_boundary is going to be a little too high. + // This shouldn't matter, since oops of non-entrant methods are never used. +@@ -1223,7 +1227,11 @@ + // not-entrant methods. + address low_boundary = verified_entry_point(); + if (!is_in_use()) { ++#if defined(SW64) && !defined(ZERO) ++ low_boundary += NativeGeneralJump::instruction_size; ++#else + low_boundary += NativeJump::instruction_size; ++#endif + // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. + // This means that the low_boundary is going to be a little too high. + // This shouldn't matter, since oops of non-entrant methods are never used. +@@ -1467,8 +1475,13 @@ + // The caller can be calling the method statically or through an inline + // cache call. + if (!is_osr_method() && !is_not_entrant()) { +- NativeJump::patch_verified_entry(entry_point(), verified_entry_point(), +- SharedRuntime::get_handle_wrong_method_stub()); ++#if defined(SW64) && !defined(ZERO) ++ NativeGeneralJump::patch_verified_entry(entry_point(), verified_entry_point(), ++ SharedRuntime::get_handle_wrong_method_stub()); ++#else ++ NativeJump::patch_verified_entry(entry_point(), verified_entry_point(), ++ SharedRuntime::get_handle_wrong_method_stub()); ++#endif + } + + if (is_in_use()) { +@@ -1797,7 +1810,11 @@ + // not-entrant methods. + address low_boundary = verified_entry_point(); + if (is_not_entrant()) { +- low_boundary += NativeJump::instruction_size; ++#if defined(SW64) && !defined(ZERO) ++ low_boundary += NativeGeneralJump::instruction_size; ++#else ++ low_boundary += NativeJump::instruction_size; ++#endif + // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. + // (See comment above.) + } +@@ -1953,7 +1970,11 @@ + // not-entrant methods. + address low_boundary = verified_entry_point(); + if (is_not_entrant()) { ++#if defined(SW64) && !defined(ZERO) ++ low_boundary += NativeGeneralJump::instruction_size; ++#else + low_boundary += NativeJump::instruction_size; ++#endif + // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. + // (See comment above.) + } +@@ -2056,7 +2077,11 @@ + // not-entrant methods. + address low_boundary = verified_entry_point(); + if (is_not_entrant()) { ++#if defined(SW64) && !defined(ZERO) ++ low_boundary += NativeGeneralJump::instruction_size; ++#else + low_boundary += NativeJump::instruction_size; ++#endif + // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. + // (See comment above.) + } +@@ -2157,7 +2182,11 @@ + void nmethod::metadata_do(void f(Metadata*)) { + address low_boundary = verified_entry_point(); + if (is_not_entrant()) { +- low_boundary += NativeJump::instruction_size; ++#if defined(SW64) && !defined(ZERO) ++ low_boundary += NativeGeneralJump::instruction_size; ++#else ++ low_boundary += NativeJump::instruction_size; ++#endif + // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. + // (See comment above.) + } +@@ -2217,7 +2246,11 @@ + // not-entrant methods. + address low_boundary = verified_entry_point(); + if (is_not_entrant()) { +- low_boundary += NativeJump::instruction_size; ++#if defined(SW64) && !defined(ZERO) ++ low_boundary += NativeGeneralJump::instruction_size; ++#else ++ low_boundary += NativeJump::instruction_size; ++#endif + // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. + // (See comment above.) + } +@@ -2724,7 +2757,11 @@ + return; + + // Make sure all the entry points are correctly aligned for patching. +- NativeJump::check_verified_entry_alignment(entry_point(), verified_entry_point()); ++#if defined(SW64) && !defined(ZERO) ++ NativeGeneralJump::check_verified_entry_alignment(entry_point(), verified_entry_point()); ++#else ++ NativeJump::check_verified_entry_alignment(entry_point(), verified_entry_point()); ++#endif + + // assert(method()->is_oop(), "must be valid"); + +@@ -2841,7 +2878,7 @@ + }; + + void nmethod::verify_scavenge_root_oops() { +- if (UseG1GC || UseShenandoahGC) { ++ if (UseG1GC) { + return; + } + +diff -uNr openjdk/hotspot/src/share/vm/code/relocInfo.hpp afu8u/hotspot/src/share/vm/code/relocInfo.hpp +--- openjdk/hotspot/src/share/vm/code/relocInfo.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/code/relocInfo.hpp 2025-05-06 10:53:45.019633670 +0800 +@@ -433,6 +433,9 @@ + #ifdef TARGET_ARCH_ppc + # include "relocInfo_ppc.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "relocInfo_sw64.hpp" ++#endif + + + protected: +diff -uNr openjdk/hotspot/src/share/vm/code/vmreg.hpp afu8u/hotspot/src/share/vm/code/vmreg.hpp +--- openjdk/hotspot/src/share/vm/code/vmreg.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/code/vmreg.hpp 2025-05-06 10:53:45.023633670 +0800 +@@ -47,6 +47,9 @@ + #elif defined TARGET_ARCH_MODEL_ppc_64 + # include "adfiles/adGlobals_ppc_64.hpp" + #endif ++#if defined TARGET_ARCH_MODEL_sw64 ++# include "adfiles/adGlobals_sw64.hpp" ++#endif + #endif + + //------------------------------VMReg------------------------------------------ +@@ -158,6 +161,9 @@ + #ifdef TARGET_ARCH_x86 + # include "vmreg_x86.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "vmreg_sw64.hpp" ++#endif + #ifdef TARGET_ARCH_aarch64 + # include "vmreg_aarch64.hpp" + #endif +diff -uNr openjdk/hotspot/src/share/vm/compiler/disassembler.cpp afu8u/hotspot/src/share/vm/compiler/disassembler.cpp +--- openjdk/hotspot/src/share/vm/compiler/disassembler.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/compiler/disassembler.cpp 2025-05-06 10:53:45.023633670 +0800 +@@ -50,6 +50,9 @@ + #ifdef TARGET_ARCH_ppc + # include "depChecker_ppc.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "depChecker_sw64.hpp" ++#endif + #ifdef SHARK + #include "shark/sharkEntry.hpp" + #endif +diff -uNr openjdk/hotspot/src/share/vm/compiler/disassemblerEnv.hpp afu8u/hotspot/src/share/vm/compiler/disassemblerEnv.hpp +--- openjdk/hotspot/src/share/vm/compiler/disassemblerEnv.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/compiler/disassemblerEnv.hpp 2025-05-06 10:53:45.023633670 +0800 +@@ -0,0 +1,44 @@ ++/* ++ * Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2015, 2016, Wuxi Institute of Advanced Technology. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ * ++ */ ++ ++#if 1 // Added for debugging information by ZHJ20180724 ++ ++#ifdef USE_PRAGMA_IDENT_HDR ++#pragma ident "@(#)disassemblerEnv.hpp 1.14 05/11/18 15:21:38 JVM" ++#endif ++ ++// Call-back interface for external disassembler ++class DisassemblerEnv { ++ public: ++ // printing ++ virtual void print_label(intptr_t value) = 0; ++ virtual void print_raw(const char* str) = 0; ++ virtual void print(const char* format, ...) = 0; ++ // helpers ++ virtual char* string_for_offset(intptr_t value) = 0; ++ virtual char* string_for_constant(unsigned char* pc, intptr_t value, int is_decimal) = 0; ++}; ++ ++#endif +diff -uNr openjdk/hotspot/src/share/vm/compiler/disassembler.hpp afu8u/hotspot/src/share/vm/compiler/disassembler.hpp +--- openjdk/hotspot/src/share/vm/compiler/disassembler.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/compiler/disassembler.hpp 2025-05-06 10:53:45.023633670 +0800 +@@ -96,6 +96,9 @@ + #ifdef TARGET_ARCH_ppc + # include "disassembler_ppc.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "disassembler_sw64.hpp" ++#endif + + + public: +diff -uNr openjdk/hotspot/src/share/vm/compiler/oopMap.hpp afu8u/hotspot/src/share/vm/compiler/oopMap.hpp +--- openjdk/hotspot/src/share/vm/compiler/oopMap.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/compiler/oopMap.hpp 2025-05-06 10:53:45.023633670 +0800 +@@ -43,7 +43,6 @@ + class frame; + class RegisterMap; + class DerivedPointerEntry; +-class OopClosure; + + class OopMapValue: public StackObj { + friend class VMStructs; +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp afu8u/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp 2025-05-06 10:53:45.035633670 +0800 +@@ -1525,8 +1525,6 @@ + + inline bool is_obj_ill(const oop obj) const; + +- inline bool requires_marking(const void* entry) const; +- + bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo); + HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo); + bool is_marked(oop obj, VerifyOption vo); +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp afu8u/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp 2025-05-06 10:53:45.035633670 +0800 +@@ -376,21 +376,4 @@ + } + } + +-inline bool G1CollectedHeap::requires_marking(const void* entry) const { +- // Includes rejection of NULL pointers. +- assert(is_in_reserved(entry), +- err_msg("Non-heap pointer in SATB buffer: " PTR_FORMAT, p2i(entry))); +- +- HeapRegion* region = heap_region_containing(entry); +- assert(region != NULL, err_msg("No region for " PTR_FORMAT, p2i(entry))); +- if (entry >= region->next_top_at_mark_start()) { +- return false; +- } +- +- assert(((oop)entry)->is_oop(true /* ignore mark word */), +- err_msg("Invalid oop in SATB buffer: " PTR_FORMAT, p2i(entry))); +- +- return ! isMarkedNext((oop) entry); +-} +- + #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/g1/satbQueue.cpp afu8u/hotspot/src/share/vm/gc_implementation/g1/satbQueue.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/g1/satbQueue.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/g1/satbQueue.cpp 2025-05-06 10:53:45.047633671 +0800 +@@ -25,7 +25,6 @@ + #include "precompiled.hpp" + #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" + #include "gc_implementation/g1/satbQueue.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" + #include "memory/allocation.inline.hpp" + #include "memory/sharedHeap.hpp" + #include "oops/oop.inline.hpp" +@@ -77,19 +76,21 @@ + // processing must be somewhat circumspect and not assume entries + // in an unfiltered buffer refer to valid objects. + +-template +-inline bool requires_marking(const void* entry, HeapType* heap) { +- return heap->requires_marking(entry); +-} +- +-void ObjPtrQueue::filter() { +- if (UseG1GC) { +- filter_impl(); +- } else if (UseShenandoahGC) { +- filter_impl(); +- } else { +- ShouldNotReachHere(); ++inline bool requires_marking(const void* entry, G1CollectedHeap* heap) { ++ // Includes rejection of NULL pointers. ++ assert(heap->is_in_reserved(entry), ++ err_msg("Non-heap pointer in SATB buffer: " PTR_FORMAT, p2i(entry))); ++ ++ HeapRegion* region = heap->heap_region_containing_raw(entry); ++ assert(region != NULL, err_msg("No region for " PTR_FORMAT, p2i(entry))); ++ if (entry >= region->next_top_at_mark_start()) { ++ return false; + } ++ ++ assert(((oop)entry)->is_oop(true /* ignore mark word */), ++ err_msg("Invalid oop in SATB buffer: " PTR_FORMAT, p2i(entry))); ++ ++ return true; + } + + // This method removes entries from a SATB buffer that will not be +@@ -97,9 +98,8 @@ + // they require marking and are not already marked. Retained entries + // are compacted toward the top of the buffer. + +-template +-void ObjPtrQueue::filter_impl() { +- HeapType* heap = (HeapType*) Universe::heap(); ++void ObjPtrQueue::filter() { ++ G1CollectedHeap* g1h = G1CollectedHeap::heap(); + void** buf = _buf; + size_t sz = _sz; + +@@ -126,7 +126,7 @@ + // far, we'll just end up copying it to the same place. + *p = NULL; + +- if (requires_marking(entry, heap)) { ++ if (requires_marking(entry, g1h) && !g1h->isMarkedNext((oop)entry)) { + assert(new_index > 0, "we should not have already filled up the buffer"); + new_index -= oopSize; + assert(new_index >= i, +@@ -177,22 +177,6 @@ + size_t retained_entries = (sz - _index) / oopSize; + size_t perc = retained_entries * 100 / all_entries; + bool should_enqueue = perc > (size_t) G1SATBBufferEnqueueingThresholdPercent; +- +- if (UseShenandoahGC) { +- Thread* t = Thread::current(); +- if (t->is_force_satb_flush()) { +- if (!should_enqueue && sz != _index) { +- // Non-empty buffer is compacted, and we decided not to enqueue it. +- // Shenandoah still wants to know about leftover work in that buffer eventually. +- // This avoid dealing with these leftovers during the final-mark, after the buffers +- // are drained completely. +- // TODO: This can be extended to handle G1 too +- should_enqueue = true; +- } +- t->set_force_satb_flush(false); +- } +- } +- + return should_enqueue; + } + +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/g1/satbQueue.hpp afu8u/hotspot/src/share/vm/gc_implementation/g1/satbQueue.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/g1/satbQueue.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/g1/satbQueue.hpp 2025-05-06 10:53:45.047633671 +0800 +@@ -49,9 +49,6 @@ + // Filter out unwanted entries from the buffer. + void filter(); + +- template +- void filter_impl(); +- + public: + ObjPtrQueue(PtrQueueSet* qset, bool perm = false) : + // SATB queues are only active during marking cycles. We create +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp afu8u/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp 2025-05-06 10:53:45.055633671 +0800 +@@ -71,6 +71,9 @@ + assert(should_scavenge(p, true), "revisiting object?"); + + oop o = oopDesc::load_decode_heap_oop_not_null(p); ++#if defined(SW64) ++ if (oopDesc::is_null(o)) return; ++#endif + oop new_obj = o->is_forwarded() + ? o->forwardee() + : pm->copy_to_survivor_space(o); +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shared/markBitMap.cpp afu8u/hotspot/src/share/vm/gc_implementation/shared/markBitMap.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shared/markBitMap.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shared/markBitMap.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,82 +0,0 @@ +-/* +- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-// Concurrent marking bit map wrapper +- +-#include "precompiled.hpp" +-#include "gc_implementation/shared/markBitMap.inline.hpp" +-#include "utilities/bitMap.inline.hpp" +- +-MarkBitMapRO::MarkBitMapRO(int shifter) : +- _bm(), +- _shifter(shifter) { +- _bmStartWord = 0; +- _bmWordSize = 0; +-} +- +-#ifndef PRODUCT +-bool MarkBitMapRO::covers(MemRegion heap_rs) const { +- // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); +- assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize, +- "size inconsistency"); +- return _bmStartWord == (HeapWord*)(heap_rs.start()) && +- _bmWordSize == heap_rs.word_size(); +-} +-#endif +- +-void MarkBitMapRO::print_on_error(outputStream* st, const char* prefix) const { +- _bm.print_on_error(st, prefix); +-} +- +-size_t MarkBitMap::compute_size(size_t heap_size) { +- return ReservedSpace::allocation_align_size_up(heap_size / mark_distance()); +-} +- +-size_t MarkBitMap::mark_distance() { +- return MinObjAlignmentInBytes * BitsPerByte; +-} +- +-void MarkBitMap::initialize(MemRegion heap, MemRegion bitmap) { +- _bmStartWord = heap.start(); +- _bmWordSize = heap.word_size(); +- +- _bm.set_map((BitMap::bm_word_t*) bitmap.start()); +- _bm.set_size(_bmWordSize >> _shifter); +- _covered = heap; +-} +- +-void MarkBitMap::do_clear(MemRegion mr, bool large) { +- MemRegion intersection = mr.intersection(_covered); +- assert(!intersection.is_empty(), +- err_msg("Given range from " PTR_FORMAT " to " PTR_FORMAT " is completely outside the heap", +- p2i(mr.start()), p2i(mr.end()))); +- // convert address range into offset range +- size_t beg = heapWordToOffset(intersection.start()); +- size_t end = heapWordToOffset(intersection.end()); +- if (large) { +- _bm.clear_large_range(beg, end); +- } else { +- _bm.clear_range(beg, end); +- } +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shared/markBitMap.hpp afu8u/hotspot/src/share/vm/gc_implementation/shared/markBitMap.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shared/markBitMap.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shared/markBitMap.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,118 +0,0 @@ +-/* +- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHARED_CMBITMAP_HPP +-#define SHARE_VM_GC_SHARED_CMBITMAP_HPP +- +-#include "memory/memRegion.hpp" +-#include "oops/oop.inline.hpp" +-#include "utilities/bitMap.hpp" +-#include "utilities/globalDefinitions.hpp" +- +-// A generic CM bit map. This is essentially a wrapper around the BitMap +-// class, with one bit per (1<<_shifter) HeapWords. +- +-class MarkBitMapRO VALUE_OBJ_CLASS_SPEC { +- protected: +- MemRegion _covered; // The heap area covered by this bitmap. +- HeapWord* _bmStartWord; // base address of range covered by map +- size_t _bmWordSize; // map size (in #HeapWords covered) +- const int _shifter; // map to char or bit +- BitMap _bm; // the bit map itself +- +- public: +- // constructor +- MarkBitMapRO(int shifter); +- +- // inquiries +- HeapWord* startWord() const { return _bmStartWord; } +- // the following is one past the last word in space +- HeapWord* endWord() const { return _bmStartWord + _bmWordSize; } +- +- // read marks +- +- bool isMarked(HeapWord* addr) const { +- assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), +- "outside underlying space?"); +- return _bm.at(heapWordToOffset(addr)); +- } +- +- // iteration +- inline bool iterate(BitMapClosure* cl, MemRegion mr); +- +- // Return the address corresponding to the next marked bit at or after +- // "addr", and before "limit", if "limit" is non-NULL. If there is no +- // such bit, returns "limit" if that is non-NULL, or else "endWord()". +- inline HeapWord* getNextMarkedWordAddress(const HeapWord* addr, +- const HeapWord* limit = NULL) const; +- +- // conversion utilities +- HeapWord* offsetToHeapWord(size_t offset) const { +- return _bmStartWord + (offset << _shifter); +- } +- size_t heapWordToOffset(const HeapWord* addr) const { +- return pointer_delta(addr, _bmStartWord) >> _shifter; +- } +- +- // The argument addr should be the start address of a valid object +- inline HeapWord* nextObject(HeapWord* addr); +- +- void print_on_error(outputStream* st, const char* prefix) const; +- +- // debugging +- NOT_PRODUCT(bool covers(MemRegion rs) const;) +-}; +- +-class MarkBitMap : public MarkBitMapRO { +- private: +- // Clear bitmap range +- void do_clear(MemRegion mr, bool large); +- +- public: +- static size_t compute_size(size_t heap_size); +- // Returns the amount of bytes on the heap between two marks in the bitmap. +- static size_t mark_distance(); +- // Returns how many bytes (or bits) of the heap a single byte (or bit) of the +- // mark bitmap corresponds to. This is the same as the mark distance above. static size_t heap_map_factor() { +- static size_t heap_map_factor() { +- return mark_distance(); +- } +- +- MarkBitMap() : MarkBitMapRO(LogMinObjAlignment) {} +- +- // Initializes the underlying BitMap to cover the given area. +- void initialize(MemRegion heap, MemRegion bitmap); +- +- // Write marks. +- inline void mark(HeapWord* addr); +- inline void clear(HeapWord* addr); +- inline bool parMark(HeapWord* addr); +- +- // Clear range. For larger regions, use *_large. +- void clear() { do_clear(_covered, true); } +- void clear_range(MemRegion mr) { do_clear(mr, false); } +- void clear_range_large(MemRegion mr) { do_clear(mr, true); } +-}; +- +-#endif // SHARE_VM_GC_SHARED_CMBITMAP_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shared/markBitMap.inline.hpp afu8u/hotspot/src/share/vm/gc_implementation/shared/markBitMap.inline.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shared/markBitMap.inline.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shared/markBitMap.inline.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,102 +0,0 @@ +-/* +- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHARED_CMBITMAP_INLINE_HPP +-#define SHARE_VM_GC_SHARED_CMBITMAP_INLINE_HPP +- +-#include "gc_implementation/shared/markBitMap.hpp" +-#include "utilities/bitMap.inline.hpp" +- +-inline HeapWord* MarkBitMapRO::getNextMarkedWordAddress(const HeapWord* addr, +- const HeapWord* limit) const { +- // First we must round addr *up* to a possible object boundary. +- addr = (HeapWord*)align_size_up((intptr_t)addr, +- HeapWordSize << _shifter); +- size_t addrOffset = heapWordToOffset(addr); +- assert(limit != NULL, "limit must not be NULL"); +- size_t limitOffset = heapWordToOffset(limit); +- size_t nextOffset = _bm.get_next_one_offset(addrOffset, limitOffset); +- HeapWord* nextAddr = offsetToHeapWord(nextOffset); +- assert(nextAddr >= addr, "get_next_one postcondition"); +- assert(nextAddr == limit || isMarked(nextAddr), +- "get_next_one postcondition"); +- return nextAddr; +-} +- +-inline bool MarkBitMapRO::iterate(BitMapClosure* cl, MemRegion mr) { +- HeapWord* start_addr = MAX2(startWord(), mr.start()); +- HeapWord* end_addr = MIN2(endWord(), mr.end()); +- +- if (end_addr > start_addr) { +- // Right-open interval [start-offset, end-offset). +- BitMap::idx_t start_offset = heapWordToOffset(start_addr); +- BitMap::idx_t end_offset = heapWordToOffset(end_addr); +- +- start_offset = _bm.get_next_one_offset(start_offset, end_offset); +- while (start_offset < end_offset) { +- if (!cl->do_bit(start_offset)) { +- return false; +- } +- HeapWord* next_addr = MIN2(nextObject(offsetToHeapWord(start_offset)), end_addr); +- BitMap::idx_t next_offset = heapWordToOffset(next_addr); +- start_offset = _bm.get_next_one_offset(next_offset, end_offset); +- } +- } +- return true; +-} +- +-// The argument addr should be the start address of a valid object +-HeapWord* MarkBitMapRO::nextObject(HeapWord* addr) { +- oop obj = (oop) addr; +- HeapWord* res = addr + obj->size(); +- assert(offsetToHeapWord(heapWordToOffset(res)) == res, "sanity"); +- return res; +-} +- +-#define check_mark(addr) \ +- assert(_bmStartWord <= (addr) && (addr) < (_bmStartWord + _bmWordSize), \ +- "outside underlying space?"); \ +- /* assert(G1CollectedHeap::heap()->is_in_exact(addr), \ +- err_msg("Trying to access not available bitmap "PTR_FORMAT \ +- " corresponding to "PTR_FORMAT" (%u)", \ +- p2i(this), p2i(addr), G1CollectedHeap::heap()->addr_to_region(addr))); */ +- +-inline void MarkBitMap::mark(HeapWord* addr) { +- check_mark(addr); +- _bm.set_bit(heapWordToOffset(addr)); +-} +- +-inline void MarkBitMap::clear(HeapWord* addr) { +- check_mark(addr); +- _bm.clear_bit(heapWordToOffset(addr)); +-} +- +-inline bool MarkBitMap::parMark(HeapWord* addr) { +- check_mark(addr); +- return _bm.par_set_bit(heapWordToOffset(addr)); +-} +- +-#undef check_mark +- +-#endif // SHARE_VM_GC_SHARED_CMBITMAP_INLINE_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/c1/shenandoahBarrierSetC1.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/c1/shenandoahBarrierSetC1.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/c1/shenandoahBarrierSetC1.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/c1/shenandoahBarrierSetC1.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,136 +0,0 @@ +-/* +- * Copyright (c) 2018, 2020 Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +-#include "c1/c1_LIRGenerator.hpp" +-#include "c1/c1_IR.hpp" +-#include "gc_implementation/g1/satbQueue.hpp" +-#include "gc_implementation/shenandoah/shenandoahForwarding.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegion.hpp" +-#include "gc_implementation/shenandoah/c1/shenandoahBarrierSetC1.hpp" +- +-#ifdef TARGET_ARCH_aarch64 +-#include "shenandoahBarrierSetAssembler_aarch64.hpp" +-#endif +-#ifdef TARGET_ARCH_x86 +-#include "shenandoahBarrierSetAssembler_x86.hpp" +-#endif +- +-#ifdef ASSERT +-#define __ gen->lir(__FILE__, __LINE__)-> +-#else +-#define __ gen->lir()-> +-#endif +- +-void ShenandoahLoadReferenceBarrierStub::emit_code(LIR_Assembler* ce) { +- ShenandoahBarrierSetAssembler* bs = ShenandoahBarrierSetAssembler::bsasm(); +- bs->gen_load_reference_barrier_stub(ce, this); +-} +- +-ShenandoahBarrierSetC1* ShenandoahBarrierSetC1::bsc1() { +- return ShenandoahBarrierSet::barrier_set()->bsc1(); +-} +- +-LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr) { +- if (ShenandoahLoadRefBarrier) { +- return load_reference_barrier_impl(gen, obj, addr); +- } else { +- return obj; +- } +-} +- +-LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier_impl(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr) { +- assert(ShenandoahLoadRefBarrier, "Should be enabled"); +- obj = ensure_in_register(gen, obj, T_OBJECT); +- assert(obj->is_register(), "must be a register at this point"); +- addr = ensure_in_register(gen, addr, T_ADDRESS); +- assert(addr->is_register(), "must be a register at this point"); +- LIR_Opr result = gen->result_register_for(obj->value_type()); +- __ move(obj, result); +- LIR_Opr tmp1 = gen->new_register(T_ADDRESS); +- LIR_Opr tmp2 = gen->new_register(T_ADDRESS); +- +- LIR_Opr thrd = gen->getThreadPointer(); +- LIR_Address* active_flag_addr = +- new LIR_Address(thrd, +- in_bytes(JavaThread::gc_state_offset()), +- T_BYTE); +- // Read and check the gc-state-flag. +- LIR_Opr flag_val = gen->new_register(T_INT); +- __ load(active_flag_addr, flag_val); +- LIR_Opr mask = LIR_OprFact::intConst(ShenandoahHeap::HAS_FORWARDED | +- ShenandoahHeap::EVACUATION); +- LIR_Opr mask_reg = gen->new_register(T_INT); +- __ move(mask, mask_reg); +- +- if (TwoOperandLIRForm) { +- __ logical_and(flag_val, mask_reg, flag_val); +- } else { +- LIR_Opr masked_flag = gen->new_register(T_INT); +- __ logical_and(flag_val, mask_reg, masked_flag); +- flag_val = masked_flag; +- } +- __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0)); +- +- CodeStub* slow = new ShenandoahLoadReferenceBarrierStub(obj, addr, result, tmp1, tmp2); +- __ branch(lir_cond_notEqual, T_INT, slow); +- __ branch_destination(slow->continuation()); +- +- return result; +-} +- +-LIR_Opr ShenandoahBarrierSetC1::ensure_in_register(LIRGenerator* gen, LIR_Opr obj, BasicType type) { +- if (!obj->is_register()) { +- LIR_Opr obj_reg; +- if (obj->is_constant()) { +- obj_reg = gen->new_register(type); +- __ move(obj, obj_reg); +- } else { +- obj_reg = gen->new_pointer_register(); +- __ leal(obj, obj_reg); +- } +- obj = obj_reg; +- } +- return obj; +-} +- +-LIR_Opr ShenandoahBarrierSetC1::storeval_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool patch) { +- if (ShenandoahStoreValEnqueueBarrier) { +- obj = ensure_in_register(gen, obj, T_OBJECT); +- gen->G1SATBCardTableModRef_pre_barrier(LIR_OprFact::illegalOpr, obj, false, false, NULL); +- } +- return obj; +-} +- +-LIR_Opr ShenandoahBarrierSetC1::resolve_address(LIRGenerator* gen, LIR_Address* addr, BasicType type, CodeEmitInfo* patch_emit_info) { +- LIR_Opr addr_opr = LIR_OprFact::address(addr); +- +- LIR_Opr resolved_addr = gen->new_pointer_register(); +- if (patch_emit_info != NULL) { +- __ leal(addr_opr, resolved_addr, lir_patch_normal, new CodeEmitInfo(patch_emit_info)); +- } else { +- __ leal(addr_opr, resolved_addr); +- } +- return LIR_OprFact::address(new LIR_Address(resolved_addr, type)); +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/c1/shenandoahBarrierSetC1.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/c1/shenandoahBarrierSetC1.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/c1/shenandoahBarrierSetC1.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/c1/shenandoahBarrierSetC1.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,91 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_GC_SHENANDOAH_C1_SHENANDOAHBARRIERSETC1_HPP +-#define SHARE_GC_SHENANDOAH_C1_SHENANDOAHBARRIERSETC1_HPP +- +-#include "c1/c1_CodeStubs.hpp" +-#include "memory/allocation.hpp" +- +-class LIRGenerator; +-class LIRItem; +- +-class ShenandoahLoadReferenceBarrierStub: public CodeStub { +- friend class ShenandoahBarrierSetC1; +-private: +- LIR_Opr _obj; +- LIR_Opr _addr; +- LIR_Opr _result; +- LIR_Opr _tmp1; +- LIR_Opr _tmp2; +- +-public: +- ShenandoahLoadReferenceBarrierStub(LIR_Opr obj, LIR_Opr addr, LIR_Opr result, LIR_Opr tmp1, LIR_Opr tmp2) : +- _obj(obj), _addr(addr), _result(result), _tmp1(tmp1), _tmp2(tmp2) +- { +- assert(_obj->is_register(), "should be register"); +- assert(_addr->is_register(), "should be register"); +- assert(_result->is_register(), "should be register"); +- assert(_tmp1->is_register(), "should be register"); +- assert(_tmp2->is_register(), "should be register"); +- } +- +- LIR_Opr obj() const { return _obj; } +- LIR_Opr addr() const { return _addr; } +- LIR_Opr result() const { return _result; } +- LIR_Opr tmp1() const { return _tmp1; } +- LIR_Opr tmp2() const { return _tmp2; } +- +- virtual void emit_code(LIR_Assembler* e); +- virtual void visit(LIR_OpVisitState* visitor) { +- visitor->do_slow_case(); +- visitor->do_input(_obj); +- visitor->do_temp(_obj); +- visitor->do_input(_addr); +- visitor->do_temp(_addr); +- visitor->do_temp(_result); +- visitor->do_temp(_tmp1); +- visitor->do_temp(_tmp2); +- } +-#ifndef PRODUCT +- virtual void print_name(outputStream* out) const { out->print("ShenandoahLoadReferenceBarrierStub"); } +-#endif // PRODUCT +-}; +- +-class ShenandoahBarrierSetC1 : public CHeapObj{ +-private: +- CodeBlob* _pre_barrier_c1_runtime_code_blob; +-public: +- static ShenandoahBarrierSetC1* bsc1(); +- +- LIR_Opr load_reference_barrier(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr); +- LIR_Opr storeval_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool patch); +- +- LIR_Opr resolve_address(LIRGenerator* gen, LIR_Address* addr, BasicType type, CodeEmitInfo* patch_emit_info); +- +-private: +- LIR_Opr load_reference_barrier_impl(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr); +- LIR_Opr ensure_in_register(LIRGenerator* gen, LIR_Opr obj, BasicType type); +-}; +- +-#endif // SHARE_GC_SHENANDOAH_C1_SHENANDOAHBARRIERSETC1_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/c2/shenandoahBarrierSetC2.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/c2/shenandoahBarrierSetC2.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/c2/shenandoahBarrierSetC2.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/c2/shenandoahBarrierSetC2.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,82 +0,0 @@ +-/* +- * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +-#include "gc_implementation/shenandoah/shenandoahBarrierSet.hpp" +-#include "gc_implementation/shenandoah/shenandoahRuntime.hpp" +-#include "gc_implementation/shenandoah/c2/shenandoahBarrierSetC2.hpp" +-#include "gc_implementation/shenandoah/c2/shenandoahSupport.hpp" +-#include "opto/type.hpp" +-#include "runtime/thread.hpp" +- +-ShenandoahBarrierSetC2* ShenandoahBarrierSetC2::bsc2() { +- return ShenandoahBarrierSet::barrier_set()->bsc2(); +-} +- +-bool ShenandoahBarrierSetC2::is_shenandoah_lrb_call(Node* call) { +- if (!call->is_CallLeaf()) { +- return false; +- } +- +- address entry_point = call->as_CallLeaf()->entry_point(); +- return (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier)) || +- (entry_point == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow)); +-} +- +-bool ShenandoahBarrierSetC2::is_shenandoah_state_load(Node* n) { +- if (!n->is_Load()) return false; +- const int state_offset = in_bytes(JavaThread::gc_state_offset()); +- return n->in(2)->is_AddP() && n->in(2)->in(2)->Opcode() == Op_ThreadLocal +- && n->in(2)->in(3)->is_Con() +- && n->in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == state_offset; +-} +- +-const TypeFunc* ShenandoahBarrierSetC2::shenandoah_load_reference_barrier_Type() { +- const Type **fields = TypeTuple::fields(2); +- fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value +- fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // original load address +- +- const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); +- +- // create result type (range) +- fields = TypeTuple::fields(1); +- fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; +- const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); +- +- return TypeFunc::make(domain, range); +-} +- +-Node* ShenandoahBarrierSetC2::step_over_gc_barrier(Node* c) { +- if (c->Opcode() == Op_ShenandoahLoadReferenceBarrier) { +- return c->in(ShenandoahLoadReferenceBarrierNode::ValueIn); +- } +- return c; +-} +- +-Node* ShenandoahBarrierSetC2::load_reference_barrier(GraphKit* kit, Node* n) const { +- if (ShenandoahLoadRefBarrier) { +- return kit->gvn().transform(new (kit->C) ShenandoahLoadReferenceBarrierNode(NULL, n)); +- } else { +- return n; +- } +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/c2/shenandoahBarrierSetC2.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/c2/shenandoahBarrierSetC2.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/c2/shenandoahBarrierSetC2.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/c2/shenandoahBarrierSetC2.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,47 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_GC_SHENANDOAH_C2_SHENANDOAHBARRIERSETC2_HPP +-#define SHARE_GC_SHENANDOAH_C2_SHENANDOAHBARRIERSETC2_HPP +- +-#include "memory/allocation.hpp" +- +-class GraphKit; +-class Node; +-class TypeFunc; +- +-class ShenandoahBarrierSetC2 : public CHeapObj { +-public: +- static ShenandoahBarrierSetC2* bsc2(); +- +- static bool is_shenandoah_lrb_call(Node* call); +- static bool is_shenandoah_state_load(Node* n); +- +- static const TypeFunc* shenandoah_load_reference_barrier_Type(); +- +- Node* step_over_gc_barrier(Node* c); +- +- Node* load_reference_barrier(GraphKit* kit, Node* n) const; +-}; +- +-#endif // SHARE_GC_SHENANDOAH_C2_SHENANDOAHBARRIERSETC2_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/c2/shenandoahSupport.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/c2/shenandoahSupport.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/c2/shenandoahSupport.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/c2/shenandoahSupport.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,3097 +0,0 @@ +- +-/* +- * Copyright (c) 2015, 2019, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +- +-#include "gc_implementation/shenandoah/shenandoahForwarding.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegion.hpp" +-#include "gc_implementation/shenandoah/shenandoahRuntime.hpp" +-#include "gc_implementation/shenandoah/c2/shenandoahSupport.hpp" +-#include "gc_implementation/shenandoah/c2/shenandoahBarrierSetC2.hpp" +-#include "opto/block.hpp" +-#include "opto/callnode.hpp" +-#include "opto/phaseX.hpp" +-#include "opto/rootnode.hpp" +-#include "opto/runtime.hpp" +-#include "opto/subnode.hpp" +- +-#ifdef _LP64 +-#define LoadXNode LoadLNode +-#else +-#define LoadXNode LoadINode +-#endif +- +-bool ShenandoahBarrierC2Support::expand(Compile* C, PhaseIterGVN& igvn) { +- if (C->shenandoah_barriers_count() > 0) { +- C->clear_major_progress(); +- PhaseIdealLoop ideal_loop(igvn, false, true); +- if (C->failing()) return false; +- PhaseIdealLoop::verify(igvn); +- DEBUG_ONLY(verify_raw_mem(C->root());) +- } +- return true; +-} +- +-bool ShenandoahBarrierC2Support::is_gc_state_test(Node* iff, int mask) { +- if (!UseShenandoahGC) { +- return false; +- } +- assert(iff->is_If(), "bad input"); +- if (iff->Opcode() != Op_If) { +- return false; +- } +- Node* bol = iff->in(1); +- if (!bol->is_Bool() || bol->as_Bool()->_test._test != BoolTest::ne) { +- return false; +- } +- Node* cmp = bol->in(1); +- if (cmp->Opcode() != Op_CmpI) { +- return false; +- } +- Node* in1 = cmp->in(1); +- Node* in2 = cmp->in(2); +- if (in2->find_int_con(-1) != 0) { +- return false; +- } +- if (in1->Opcode() != Op_AndI) { +- return false; +- } +- in2 = in1->in(2); +- if (in2->find_int_con(-1) != mask) { +- return false; +- } +- in1 = in1->in(1); +- +- return is_gc_state_load(in1); +-} +- +-bool ShenandoahBarrierC2Support::is_heap_stable_test(Node* iff) { +- return is_gc_state_test(iff, ShenandoahHeap::HAS_FORWARDED); +-} +- +-bool ShenandoahBarrierC2Support::is_gc_state_load(Node *n) { +- if (!UseShenandoahGC) { +- return false; +- } +- if (n->Opcode() != Op_LoadB && n->Opcode() != Op_LoadUB) { +- return false; +- } +- Node* addp = n->in(MemNode::Address); +- if (!addp->is_AddP()) { +- return false; +- } +- Node* base = addp->in(AddPNode::Address); +- Node* off = addp->in(AddPNode::Offset); +- if (base->Opcode() != Op_ThreadLocal) { +- return false; +- } +- if (off->find_intptr_t_con(-1) != in_bytes(JavaThread::gc_state_offset())) { +- return false; +- } +- return true; +-} +- +-bool ShenandoahBarrierC2Support::has_safepoint_between(Node* start, Node* stop, PhaseIdealLoop *phase) { +- assert(phase->is_dominator(stop, start), "bad inputs"); +- ResourceMark rm; +- Unique_Node_List wq; +- wq.push(start); +- for (uint next = 0; next < wq.size(); next++) { +- Node *m = wq.at(next); +- if (m == stop) { +- continue; +- } +- if (m->is_SafePoint() && !m->is_CallLeaf()) { +- return true; +- } +- if (m->is_Region()) { +- for (uint i = 1; i < m->req(); i++) { +- wq.push(m->in(i)); +- } +- } else { +- wq.push(m->in(0)); +- } +- } +- return false; +-} +- +-#ifdef ASSERT +-bool ShenandoahBarrierC2Support::verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used) { +- assert(phis.size() == 0, ""); +- +- while (true) { +- if (in->bottom_type() == TypePtr::NULL_PTR) { +- if (trace) {tty->print_cr("NULL");} +- } else if (!in->bottom_type()->make_ptr()->make_oopptr()) { +- if (trace) {tty->print_cr("Non oop");} +- } else if (in->bottom_type()->make_ptr()->make_oopptr() == TypeInstPtr::MIRROR) { +- if (trace) {tty->print_cr("Java mirror");} +- } else { +- if (in->is_ConstraintCast() || in->Opcode() == Op_CheckCastPP) { +- in = in->in(1); +- continue; +- } else if (in->is_AddP()) { +- assert(!in->in(AddPNode::Address)->is_top(), "no raw memory access"); +- in = in->in(AddPNode::Address); +- continue; +- } else if (in->is_Con()) { +- if (trace) { +- tty->print("Found constant"); +- in->dump(); +- } +- } else if (in->Opcode() == Op_Parm) { +- if (trace) { +- tty->print("Found argument"); +- } +- } else if (in->Opcode() == Op_CreateEx) { +- if (trace) { +- tty->print("Found create-exception"); +- } +- } else if (in->Opcode() == Op_LoadP && in->adr_type() == TypeRawPtr::BOTTOM) { +- if (trace) { +- tty->print("Found raw LoadP (OSR argument?)"); +- } +- } else if (in->Opcode() == Op_ShenandoahLoadReferenceBarrier) { +- if (t == ShenandoahOopStore) { +- uint i = 0; +- for (; i < phis.size(); i++) { +- Node* n = phis.node_at(i); +- } +- if (i == phis.size()) { +- return false; +- } +- } +- barriers_used.push(in); +- if (trace) {tty->print("Found barrier"); in->dump();} +- } else if (in->is_Proj() && in->in(0)->is_Allocate()) { +- if (trace) { +- tty->print("Found alloc"); +- in->in(0)->dump(); +- } +- } else if (in->is_Proj() && (in->in(0)->Opcode() == Op_CallStaticJava || in->in(0)->Opcode() == Op_CallDynamicJava)) { +- if (trace) { +- tty->print("Found Java call"); +- } +- } else if (in->is_Phi()) { +- if (!visited.test_set(in->_idx)) { +- if (trace) {tty->print("Pushed phi:"); in->dump();} +- phis.push(in, 2); +- in = in->in(1); +- continue; +- } +- if (trace) {tty->print("Already seen phi:"); in->dump();} +- } else if (in->Opcode() == Op_CMoveP || in->Opcode() == Op_CMoveN) { +- if (!visited.test_set(in->_idx)) { +- if (trace) {tty->print("Pushed cmovep:"); in->dump();} +- phis.push(in, CMoveNode::IfTrue); +- in = in->in(CMoveNode::IfFalse); +- continue; +- } +- if (trace) {tty->print("Already seen cmovep:"); in->dump();} +- } else if (in->Opcode() == Op_EncodeP || in->Opcode() == Op_DecodeN) { +- in = in->in(1); +- continue; +- } else { +- return false; +- } +- } +- bool cont = false; +- while (phis.is_nonempty()) { +- uint idx = phis.index(); +- Node* phi = phis.node(); +- if (idx >= phi->req()) { +- if (trace) {tty->print("Popped phi:"); phi->dump();} +- phis.pop(); +- continue; +- } +- if (trace) {tty->print("Next entry(%d) for phi:", idx); phi->dump();} +- in = phi->in(idx); +- phis.set_index(idx+1); +- cont = true; +- break; +- } +- if (!cont) { +- break; +- } +- } +- return true; +-} +- +-void ShenandoahBarrierC2Support::report_verify_failure(const char* msg, Node* n1, Node* n2) { +- if (n1 != NULL) { +- n1->dump(+10); +- } +- if (n2 != NULL) { +- n2->dump(+10); +- } +- fatal(err_msg("%s", msg)); +-} +- +-static const char* call_name(CallNode* call) { +- if (call->is_CallRuntime()) { +- return call->as_CallRuntime()->_name; +- } +- if (call->is_CallStaticJava()) { +- return call->as_CallStaticJava()->_name; +- } +- return NULL; +-} +- +-void ShenandoahBarrierC2Support::verify(RootNode* root) { +- ResourceMark rm; +- Unique_Node_List wq; +- GrowableArray barriers; +- Unique_Node_List barriers_used; +- Node_Stack phis(0); +- VectorSet visited(Thread::current()->resource_area()); +- const bool trace = true; +- const bool verify_no_useless_barrier = false; +- +- wq.push(root); +- for (uint next = 0; next < wq.size(); next++) { +- Node *n = wq.at(next); +- if (n->is_Load()) { +- const bool trace = false; +- if (trace) {tty->print("Verifying"); n->dump();} +- if (n->Opcode() == Op_LoadRange || n->Opcode() == Op_LoadKlass || n->Opcode() == Op_LoadNKlass) { +- if (trace) {tty->print_cr("Load range/klass");} +- } else { +- const TypePtr* adr_type = n->as_Load()->adr_type(); +- +- if (adr_type->isa_oopptr() && adr_type->is_oopptr()->offset() == oopDesc::mark_offset_in_bytes()) { +- if (trace) {tty->print_cr("Mark load");} +- } else if (adr_type->isa_instptr() && +- adr_type->is_instptr()->klass()->is_subtype_of(Compile::current()->env()->Reference_klass()) && +- adr_type->is_instptr()->offset() == java_lang_ref_Reference::referent_offset) { +- if (trace) {tty->print_cr("Reference.get()");} +- } else if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahLoad, trace, barriers_used)) { +- report_verify_failure("Shenandoah verification: Load should have barriers", n); +- } +- } +- } else if (n->is_Store()) { +- const bool trace = false; +- +- if (trace) {tty->print("Verifying"); n->dump();} +- if (n->in(MemNode::ValueIn)->bottom_type()->make_oopptr()) { +- Node* adr = n->in(MemNode::Address); +- bool verify = true; +- +- if (adr->is_AddP() && adr->in(AddPNode::Base)->is_top()) { +- adr = adr->in(AddPNode::Address); +- if (adr->is_AddP()) { +- assert(adr->in(AddPNode::Base)->is_top(), ""); +- adr = adr->in(AddPNode::Address); +- if (adr->Opcode() == Op_LoadP && +- adr->in(MemNode::Address)->in(AddPNode::Base)->is_top() && +- adr->in(MemNode::Address)->in(AddPNode::Address)->Opcode() == Op_ThreadLocal && +- adr->in(MemNode::Address)->in(AddPNode::Offset)->find_intptr_t_con(-1) == in_bytes(JavaThread::satb_mark_queue_offset()) + in_bytes(PtrQueue::byte_offset_of_buf())) { +- if (trace) {tty->print_cr("SATB prebarrier");} +- verify = false; +- } +- } +- } +- +- if (verify && !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahValue, trace, barriers_used)) { +- report_verify_failure("Shenandoah verification: Store should have barriers", n); +- } +- } +- if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) { +- report_verify_failure("Shenandoah verification: Store (address) should have barriers", n); +- } +- } else if (n->Opcode() == Op_CmpP) { +- const bool trace = false; +- +- Node* in1 = n->in(1); +- Node* in2 = n->in(2); +- if (in1->bottom_type()->isa_oopptr()) { +- if (trace) {tty->print("Verifying"); n->dump();} +- +- bool mark_inputs = false; +- if (in1->bottom_type() == TypePtr::NULL_PTR || in2->bottom_type() == TypePtr::NULL_PTR || +- (in1->is_Con() || in2->is_Con())) { +- if (trace) {tty->print_cr("Comparison against a constant");} +- mark_inputs = true; +- } else if ((in1->is_CheckCastPP() && in1->in(1)->is_Proj() && in1->in(1)->in(0)->is_Allocate()) || +- (in2->is_CheckCastPP() && in2->in(1)->is_Proj() && in2->in(1)->in(0)->is_Allocate())) { +- if (trace) {tty->print_cr("Comparison with newly alloc'ed object");} +- mark_inputs = true; +- } else { +- assert(in2->bottom_type()->isa_oopptr(), ""); +- +- if (!verify_helper(in1, phis, visited, ShenandoahStore, trace, barriers_used) || +- !verify_helper(in2, phis, visited, ShenandoahStore, trace, barriers_used)) { +- report_verify_failure("Shenandoah verification: Cmp should have barriers", n); +- } +- } +- if (verify_no_useless_barrier && +- mark_inputs && +- (!verify_helper(in1, phis, visited, ShenandoahValue, trace, barriers_used) || +- !verify_helper(in2, phis, visited, ShenandoahValue, trace, barriers_used))) { +- phis.clear(); +- visited.Reset(); +- } +- } +- } else if (n->is_LoadStore()) { +- if (n->in(MemNode::ValueIn)->bottom_type()->make_ptr() && +- !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahValue, trace, barriers_used)) { +- report_verify_failure("Shenandoah verification: LoadStore (value) should have barriers", n); +- } +- +- if (n->in(MemNode::Address)->bottom_type()->make_oopptr() && !verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) { +- report_verify_failure("Shenandoah verification: LoadStore (address) should have barriers", n); +- } +- } else if (n->Opcode() == Op_CallLeafNoFP || n->Opcode() == Op_CallLeaf) { +- CallNode* call = n->as_Call(); +- +- static struct { +- const char* name; +- struct { +- int pos; +- verify_type t; +- } args[6]; +- } calls[] = { +- "aescrypt_encryptBlock", +- { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad }, +- { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, +- "aescrypt_decryptBlock", +- { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad }, +- { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, +- "multiplyToLen", +- { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahLoad }, { TypeFunc::Parms+4, ShenandoahStore }, +- { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, +- "squareToLen", +- { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahLoad }, { -1, ShenandoahNone}, +- { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, +- "montgomery_multiply", +- { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahLoad }, +- { TypeFunc::Parms+6, ShenandoahStore }, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, +- "montgomery_square", +- { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+5, ShenandoahStore }, +- { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, +- "mulAdd", +- { { TypeFunc::Parms, ShenandoahStore }, { TypeFunc::Parms+1, ShenandoahLoad }, { -1, ShenandoahNone}, +- { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, +- "vectorizedMismatch", +- { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahLoad }, { -1, ShenandoahNone}, +- { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, +- "updateBytesCRC32", +- { { TypeFunc::Parms+1, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone}, +- { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, +- "updateBytesAdler32", +- { { TypeFunc::Parms+1, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone}, +- { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, +- "updateBytesCRC32C", +- { { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahLoad}, { -1, ShenandoahNone}, +- { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, +- "counterMode_AESCrypt", +- { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad }, +- { TypeFunc::Parms+3, ShenandoahStore }, { TypeFunc::Parms+5, ShenandoahStore }, { TypeFunc::Parms+6, ShenandoahStore } }, +- "cipherBlockChaining_encryptAESCrypt", +- { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad }, +- { TypeFunc::Parms+3, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, +- "cipherBlockChaining_decryptAESCrypt", +- { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad }, +- { TypeFunc::Parms+3, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, +- "shenandoah_clone_barrier", +- { { TypeFunc::Parms, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone}, +- { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, +- "ghash_processBlocks", +- { { TypeFunc::Parms, ShenandoahStore }, { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahLoad }, +- { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, +- "sha1_implCompress", +- { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone }, +- { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, +- "sha256_implCompress", +- { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone }, +- { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, +- "sha512_implCompress", +- { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone }, +- { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, +- "sha1_implCompressMB", +- { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone }, +- { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, +- "sha256_implCompressMB", +- { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone }, +- { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, +- "sha512_implCompressMB", +- { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone }, +- { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, +- "encodeBlock", +- { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahStore }, { -1, ShenandoahNone }, +- { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} }, +- }; +- +- if (call->is_CallRuntime() && call->as_CallRuntime()->is_call_to_arraycopystub()) { +- Node* dest = NULL; +- const TypeTuple* args = n->as_Call()->_tf->domain(); +- for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) { +- if (args->field_at(i)->isa_ptr()) { +- j++; +- if (j == 2) { +- dest = n->in(i); +- break; +- } +- } +- } +- if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahLoad, trace, barriers_used) || +- !verify_helper(dest, phis, visited, ShenandoahStore, trace, barriers_used)) { +- report_verify_failure("Shenandoah verification: ArrayCopy should have barriers", n); +- } +- } else if (strlen(call_name(call)) > 5 && +- !strcmp(call_name(call) + strlen(call_name(call)) - 5, "_fill")) { +- if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahStore, trace, barriers_used)) { +- report_verify_failure("Shenandoah verification: _fill should have barriers", n); +- } +- } else if (!strcmp(call_name(call), "g1_wb_pre")) { +- // skip +- } else { +- const int calls_len = sizeof(calls) / sizeof(calls[0]); +- int i = 0; +- for (; i < calls_len; i++) { +- if (!strcmp(calls[i].name, call_name(call))) { +- break; +- } +- } +- if (i != calls_len) { +- const uint args_len = sizeof(calls[0].args) / sizeof(calls[0].args[0]); +- for (uint j = 0; j < args_len; j++) { +- int pos = calls[i].args[j].pos; +- if (pos == -1) { +- break; +- } +- if (!verify_helper(call->in(pos), phis, visited, calls[i].args[j].t, trace, barriers_used)) { +- report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n); +- } +- } +- for (uint j = TypeFunc::Parms; j < call->req(); j++) { +- if (call->in(j)->bottom_type()->make_ptr() && +- call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) { +- uint k = 0; +- for (; k < args_len && calls[i].args[k].pos != (int)j; k++); +- if (k == args_len) { +- fatal(err_msg("arg %d for call %s not covered", j, call_name(call))); +- } +- } +- } +- } else { +- for (uint j = TypeFunc::Parms; j < call->req(); j++) { +- if (call->in(j)->bottom_type()->make_ptr() && +- call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) { +- fatal(err_msg("%s not covered", call_name(call))); +- } +- } +- } +- } +- } else if (n->Opcode() == Op_ShenandoahLoadReferenceBarrier) { +- // skip +- } else if (n->is_AddP() +- || n->is_Phi() +- || n->is_ConstraintCast() +- || n->Opcode() == Op_CheckCastPP +- || n->Opcode() == Op_Return +- || n->Opcode() == Op_CMoveP +- || n->Opcode() == Op_CMoveN +- || n->Opcode() == Op_Rethrow +- || n->is_MemBar() +- || n->Opcode() == Op_Conv2B +- || n->Opcode() == Op_SafePoint +- || n->is_CallJava() +- || n->Opcode() == Op_Unlock +- || n->Opcode() == Op_EncodeP +- || n->Opcode() == Op_DecodeN) { +- // nothing to do +- } else { +- static struct { +- int opcode; +- struct { +- int pos; +- verify_type t; +- } inputs[2]; +- } others[] = { +- Op_FastLock, +- { { 1, ShenandoahLoad }, { -1, ShenandoahNone} }, +- Op_Lock, +- { { TypeFunc::Parms, ShenandoahLoad }, { -1, ShenandoahNone} }, +- Op_AryEq, +- { { 2, ShenandoahLoad }, { 3, ShenandoahLoad } }, +- Op_StrIndexOf, +- { { 2, ShenandoahLoad }, { 4, ShenandoahLoad } }, +- Op_StrComp, +- { { 2, ShenandoahLoad }, { 4, ShenandoahLoad } }, +- Op_StrEquals, +- { { 2, ShenandoahLoad }, { 3, ShenandoahLoad } }, +- Op_EncodeISOArray, +- { { 2, ShenandoahLoad }, { 3, ShenandoahStore } }, +- Op_CastP2X, +- { { 1, ShenandoahLoad }, { -1, ShenandoahNone} }, +- Op_ClearArray, +- { { 3, ShenandoahStore }, { -1, ShenandoahNone} }, +- }; +- +- const int others_len = sizeof(others) / sizeof(others[0]); +- int i = 0; +- for (; i < others_len; i++) { +- if (others[i].opcode == n->Opcode()) { +- break; +- } +- } +- uint stop = n->is_Call() ? n->as_Call()->tf()->domain()->cnt() : n->req(); +- if (i != others_len) { +- const uint inputs_len = sizeof(others[0].inputs) / sizeof(others[0].inputs[0]); +- for (uint j = 0; j < inputs_len; j++) { +- int pos = others[i].inputs[j].pos; +- if (pos == -1) { +- break; +- } +- if (!verify_helper(n->in(pos), phis, visited, others[i].inputs[j].t, trace, barriers_used)) { +- report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n); +- } +- } +- for (uint j = 1; j < stop; j++) { +- if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() && +- n->in(j)->bottom_type()->make_ptr()->make_oopptr()) { +- uint k = 0; +- for (; k < inputs_len && others[i].inputs[k].pos != (int)j; k++); +- if (k == inputs_len) { +- fatal(err_msg("arg %d for node %s not covered", j, n->Name())); +- } +- } +- } +- } else { +- for (uint j = 1; j < stop; j++) { +- if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() && +- n->in(j)->bottom_type()->make_ptr()->make_oopptr()) { +- fatal(err_msg("%s not covered", n->Name())); +- } +- } +- } +- } +- +- if (n->is_SafePoint()) { +- SafePointNode* sfpt = n->as_SafePoint(); +- if (verify_no_useless_barrier && sfpt->jvms() != NULL) { +- for (uint i = sfpt->jvms()->scloff(); i < sfpt->jvms()->endoff(); i++) { +- if (!verify_helper(sfpt->in(i), phis, visited, ShenandoahLoad, trace, barriers_used)) { +- phis.clear(); +- visited.Reset(); +- } +- } +- } +- } +- } +- +- if (verify_no_useless_barrier) { +- for (int i = 0; i < barriers.length(); i++) { +- Node* n = barriers.at(i); +- if (!barriers_used.member(n)) { +- tty->print("XXX useless barrier"); n->dump(-2); +- ShouldNotReachHere(); +- } +- } +- } +-} +-#endif +- +-bool ShenandoahBarrierC2Support::is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase) { +- // That both nodes have the same control is not sufficient to prove +- // domination, verify that there's no path from d to n +- ResourceMark rm; +- Unique_Node_List wq; +- wq.push(d); +- for (uint next = 0; next < wq.size(); next++) { +- Node *m = wq.at(next); +- if (m == n) { +- return false; +- } +- if (m->is_Phi() && m->in(0)->is_Loop()) { +- assert(phase->ctrl_or_self(m->in(LoopNode::EntryControl)) != c, "following loop entry should lead to new control"); +- } else { +- if (m->is_Store() || m->is_LoadStore()) { +- // Take anti-dependencies into account +- Node* mem = m->in(MemNode::Memory); +- for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) { +- Node* u = mem->fast_out(i); +- if (u->is_Load() && phase->C->can_alias(m->adr_type(), phase->C->get_alias_index(u->adr_type())) && +- phase->ctrl_or_self(u) == c) { +- wq.push(u); +- } +- } +- } +- for (uint i = 0; i < m->req(); i++) { +- if (m->in(i) != NULL && phase->ctrl_or_self(m->in(i)) == c) { +- wq.push(m->in(i)); +- } +- } +- } +- } +- return true; +-} +- +-bool ShenandoahBarrierC2Support::is_dominator(Node* d_c, Node* n_c, Node* d, Node* n, PhaseIdealLoop* phase) { +- if (d_c != n_c) { +- return phase->is_dominator(d_c, n_c); +- } +- return is_dominator_same_ctrl(d_c, d, n, phase); +-} +- +-Node* next_mem(Node* mem, int alias) { +- Node* res = NULL; +- if (mem->is_Proj()) { +- res = mem->in(0); +- } else if (mem->is_SafePoint() || mem->is_MemBar()) { +- res = mem->in(TypeFunc::Memory); +- } else if (mem->is_Phi()) { +- res = mem->in(1); +- } else if (mem->is_MergeMem()) { +- res = mem->as_MergeMem()->memory_at(alias); +- } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) { +- assert(alias = Compile::AliasIdxRaw, "following raw memory can't lead to a barrier"); +- res = mem->in(MemNode::Memory); +- } else { +-#ifdef ASSERT +- mem->dump(); +-#endif +- ShouldNotReachHere(); +- } +- return res; +-} +- +-Node* ShenandoahBarrierC2Support::no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase) { +- Node* iffproj = NULL; +- while (c != dom) { +- Node* next = phase->idom(c); +- assert(next->unique_ctrl_out() == c || c->is_Proj() || c->is_Region(), "multiple control flow out but no proj or region?"); +- if (c->is_Region()) { +- ResourceMark rm; +- Unique_Node_List wq; +- wq.push(c); +- for (uint i = 0; i < wq.size(); i++) { +- Node *n = wq.at(i); +- if (n == next) { +- continue; +- } +- if (n->is_Region()) { +- for (uint j = 1; j < n->req(); j++) { +- wq.push(n->in(j)); +- } +- } else { +- wq.push(n->in(0)); +- } +- } +- for (uint i = 0; i < wq.size(); i++) { +- Node *n = wq.at(i); +- assert(n->is_CFG(), ""); +- if (n->is_Multi()) { +- for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { +- Node* u = n->fast_out(j); +- if (u->is_CFG()) { +- if (!wq.member(u) && !u->as_Proj()->is_uncommon_trap_proj(Deoptimization::Reason_none)) { +- return NodeSentinel; +- } +- } +- } +- } +- } +- } else if (c->is_Proj()) { +- if (c->is_IfProj()) { +- if (c->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) != NULL) { +- // continue; +- } else { +- if (!allow_one_proj) { +- return NodeSentinel; +- } +- if (iffproj == NULL) { +- iffproj = c; +- } else { +- return NodeSentinel; +- } +- } +- } else if (c->Opcode() == Op_JumpProj) { +- return NodeSentinel; // unsupported +- } else if (c->Opcode() == Op_CatchProj) { +- return NodeSentinel; // unsupported +- } else if (c->Opcode() == Op_CProj && next->Opcode() == Op_NeverBranch) { +- return NodeSentinel; // unsupported +- } else { +- assert(next->unique_ctrl_out() == c, "unsupported branch pattern"); +- } +- } +- c = next; +- } +- return iffproj; +-} +- +-Node* ShenandoahBarrierC2Support::dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase) { +- ResourceMark rm; +- VectorSet wq(Thread::current()->resource_area()); +- wq.set(mem->_idx); +- mem_ctrl = phase->ctrl_or_self(mem); +- while (!phase->is_dominator(mem_ctrl, ctrl) || mem_ctrl == ctrl) { +- mem = next_mem(mem, alias); +- if (wq.test_set(mem->_idx)) { +- return NULL; +- } +- mem_ctrl = phase->ctrl_or_self(mem); +- } +- if (mem->is_MergeMem()) { +- mem = mem->as_MergeMem()->memory_at(alias); +- mem_ctrl = phase->ctrl_or_self(mem); +- } +- return mem; +-} +- +-Node* ShenandoahBarrierC2Support::find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase) { +- Node* mem = NULL; +- Node* c = ctrl; +- do { +- if (c->is_Region()) { +- Node* phi_bottom = NULL; +- for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax && mem == NULL; i++) { +- Node* u = c->fast_out(i); +- if (u->is_Phi() && u->bottom_type() == Type::MEMORY) { +- if (u->adr_type() == TypePtr::BOTTOM) { +- mem = u; +- } +- } +- } +- } else { +- if (c->is_Call() && c->as_Call()->adr_type() != NULL) { +- CallProjections projs; +- c->as_Call()->extract_projections(&projs, true, false); +- if (projs.fallthrough_memproj != NULL) { +- if (projs.fallthrough_memproj->adr_type() == TypePtr::BOTTOM) { +- if (projs.catchall_memproj == NULL) { +- mem = projs.fallthrough_memproj; +- } else { +- if (phase->is_dominator(projs.fallthrough_catchproj, ctrl)) { +- mem = projs.fallthrough_memproj; +- } else { +- assert(phase->is_dominator(projs.catchall_catchproj, ctrl), "one proj must dominate barrier"); +- mem = projs.catchall_memproj; +- } +- } +- } +- } else { +- Node* proj = c->as_Call()->proj_out(TypeFunc::Memory); +- if (proj != NULL && +- proj->adr_type() == TypePtr::BOTTOM) { +- mem = proj; +- } +- } +- } else { +- for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) { +- Node* u = c->fast_out(i); +- if (u->is_Proj() && +- u->bottom_type() == Type::MEMORY && +- u->adr_type() == TypePtr::BOTTOM) { +- assert(c->is_SafePoint() || c->is_MemBar() || c->is_Start(), ""); +- assert(mem == NULL, "only one proj"); +- mem = u; +- } +- } +- assert(!c->is_Call() || c->as_Call()->adr_type() != NULL || mem == NULL, "no mem projection expected"); +- } +- } +- c = phase->idom(c); +- } while (mem == NULL); +- return mem; +-} +- +-void ShenandoahBarrierC2Support::follow_barrier_uses(Node* n, Node* ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase) { +- for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { +- Node* u = n->fast_out(i); +- if (!u->is_CFG() && phase->get_ctrl(u) == ctrl && (!u->is_Phi() || !u->in(0)->is_Loop() || u->in(LoopNode::LoopBackControl) != n)) { +- uses.push(u); +- } +- } +-} +- +-void ShenandoahBarrierC2Support::test_gc_state(Node*& ctrl, Node* raw_mem, Node*& test_fail_ctrl, +- PhaseIdealLoop* phase, int flags) { +- PhaseIterGVN& igvn = phase->igvn(); +- Node* old_ctrl = ctrl; +- +- Node* thread = new (phase->C) ThreadLocalNode(); +- Node* gc_state_offset = igvn.MakeConX(in_bytes(JavaThread::gc_state_offset())); +- Node* gc_state_addr = new (phase->C) AddPNode(phase->C->top(), thread, gc_state_offset); +- Node* gc_state = new (phase->C) LoadBNode(old_ctrl, raw_mem, gc_state_addr, +- DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(NULL), +- TypeInt::BYTE, MemNode::unordered); +- +- Node* gc_state_and = new (phase->C) AndINode(gc_state, igvn.intcon(flags)); +- Node* gc_state_cmp = new (phase->C) CmpINode(gc_state_and, igvn.zerocon(T_INT)); +- Node* gc_state_bool = new (phase->C) BoolNode(gc_state_cmp, BoolTest::ne); +- +- IfNode* gc_state_iff = new (phase->C) IfNode(old_ctrl, gc_state_bool, PROB_UNLIKELY(0.999), COUNT_UNKNOWN); +- ctrl = new (phase->C) IfTrueNode(gc_state_iff); +- test_fail_ctrl = new (phase->C) IfFalseNode(gc_state_iff); +- +- IdealLoopTree* loop = phase->get_loop(old_ctrl); +- phase->register_control(gc_state_iff, loop, old_ctrl); +- phase->register_control(ctrl, loop, gc_state_iff); +- phase->register_control(test_fail_ctrl, loop, gc_state_iff); +- +- phase->register_new_node(thread, old_ctrl); +- phase->register_new_node(gc_state_addr, old_ctrl); +- phase->register_new_node(gc_state, old_ctrl); +- phase->register_new_node(gc_state_and, old_ctrl); +- phase->register_new_node(gc_state_cmp, old_ctrl); +- phase->register_new_node(gc_state_bool, old_ctrl); +- +- phase->set_ctrl(gc_state_offset, phase->C->root()); +- assert(is_gc_state_test(gc_state_iff, flags), "Should match the shape"); +-} +- +-void ShenandoahBarrierC2Support::test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase) { +- Node* old_ctrl = ctrl; +- PhaseIterGVN& igvn = phase->igvn(); +- +- const Type* val_t = igvn.type(val); +- if (val_t->meet(TypePtr::NULL_PTR) == val_t) { +- Node* null_cmp = new (phase->C) CmpPNode(val, igvn.zerocon(T_OBJECT)); +- Node* null_test = new (phase->C) BoolNode(null_cmp, BoolTest::ne); +- +- IfNode* null_iff = new (phase->C) IfNode(old_ctrl, null_test, PROB_LIKELY(0.999), COUNT_UNKNOWN); +- ctrl = new (phase->C) IfTrueNode(null_iff); +- null_ctrl = new (phase->C) IfFalseNode(null_iff); +- +- IdealLoopTree* loop = phase->get_loop(old_ctrl); +- phase->register_control(null_iff, loop, old_ctrl); +- phase->register_control(ctrl, loop, null_iff); +- phase->register_control(null_ctrl, loop, null_iff); +- +- phase->register_new_node(null_cmp, old_ctrl); +- phase->register_new_node(null_test, old_ctrl); +- } +-} +- +-Node* ShenandoahBarrierC2Support::clone_null_check(Node*& c, Node* val, Node* unc_ctrl, PhaseIdealLoop* phase) { +- IdealLoopTree *loop = phase->get_loop(c); +- Node* iff = unc_ctrl->in(0); +- assert(iff->is_If(), "broken"); +- Node* new_iff = iff->clone(); +- new_iff->set_req(0, c); +- phase->register_control(new_iff, loop, c); +- Node* iffalse = new (phase->C) IfFalseNode(new_iff->as_If()); +- phase->register_control(iffalse, loop, new_iff); +- Node* iftrue = new (phase->C) IfTrueNode(new_iff->as_If()); +- phase->register_control(iftrue, loop, new_iff); +- c = iftrue; +- const Type *t = phase->igvn().type(val); +- assert(val->Opcode() == Op_CastPP, "expect cast to non null here"); +- Node* uncasted_val = val->in(1); +- val = new (phase->C) CastPPNode(uncasted_val, t); +- val->init_req(0, c); +- phase->register_new_node(val, c); +- return val; +-} +- +-void ShenandoahBarrierC2Support::fix_null_check(Node* unc, Node* unc_ctrl, Node* new_unc_ctrl, +- Unique_Node_List& uses, PhaseIdealLoop* phase) { +- IfNode* iff = unc_ctrl->in(0)->as_If(); +- Node* proj = iff->proj_out(0); +- assert(proj != unc_ctrl, "bad projection"); +- Node* use = proj->unique_ctrl_out(); +- +- assert(use == unc || use->is_Region(), "what else?"); +- +- uses.clear(); +- if (use == unc) { +- phase->set_idom(use, new_unc_ctrl, phase->dom_depth(use)); +- for (uint i = 1; i < unc->req(); i++) { +- Node* n = unc->in(i); +- if (phase->has_ctrl(n) && phase->get_ctrl(n) == proj) { +- uses.push(n); +- } +- } +- } else { +- assert(use->is_Region(), "what else?"); +- uint idx = 1; +- for (; use->in(idx) != proj; idx++); +- for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) { +- Node* u = use->fast_out(i); +- if (u->is_Phi() && phase->get_ctrl(u->in(idx)) == proj) { +- uses.push(u->in(idx)); +- } +- } +- } +- for(uint next = 0; next < uses.size(); next++ ) { +- Node *n = uses.at(next); +- assert(phase->get_ctrl(n) == proj, "bad control"); +- phase->set_ctrl_and_loop(n, new_unc_ctrl); +- if (n->in(0) == proj) { +- phase->igvn().replace_input_of(n, 0, new_unc_ctrl); +- } +- for (uint i = 0; i < n->req(); i++) { +- Node* m = n->in(i); +- if (m != NULL && phase->has_ctrl(m) && phase->get_ctrl(m) == proj) { +- uses.push(m); +- } +- } +- } +- +- phase->igvn().rehash_node_delayed(use); +- int nb = use->replace_edge(proj, new_unc_ctrl); +- assert(nb == 1, "only use expected"); +-} +- +-void ShenandoahBarrierC2Support::test_in_cset(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase) { +- Node* old_ctrl = ctrl; +- PhaseIterGVN& igvn = phase->igvn(); +- +- Node* raw_val = new (phase->C) CastP2XNode(old_ctrl, val); +- Node* cset_idx = new (phase->C) URShiftXNode(raw_val, igvn.intcon(ShenandoahHeapRegion::region_size_bytes_shift_jint())); +- +- // Figure out the target cset address with raw pointer math. +- // This avoids matching AddP+LoadB that would emit inefficient code. +- // See JDK-8245465. +- Node* cset_addr_ptr = igvn.makecon(TypeRawPtr::make(ShenandoahHeap::in_cset_fast_test_addr())); +- Node* cset_addr = new (phase->C) CastP2XNode(old_ctrl, cset_addr_ptr); +- Node* cset_load_addr = new (phase->C) AddXNode(cset_addr, cset_idx); +- Node* cset_load_ptr = new (phase->C) CastX2PNode(cset_load_addr); +- +- Node* cset_load = new (phase->C) LoadBNode(old_ctrl, raw_mem, cset_load_ptr, +- DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(NULL), +- TypeInt::BYTE, MemNode::unordered); +- Node* cset_cmp = new (phase->C) CmpINode(cset_load, igvn.zerocon(T_INT)); +- Node* cset_bool = new (phase->C) BoolNode(cset_cmp, BoolTest::ne); +- +- IfNode* cset_iff = new (phase->C) IfNode(old_ctrl, cset_bool, PROB_UNLIKELY(0.999), COUNT_UNKNOWN); +- ctrl = new (phase->C) IfTrueNode(cset_iff); +- not_cset_ctrl = new (phase->C) IfFalseNode(cset_iff); +- +- IdealLoopTree* loop = phase->get_loop(old_ctrl); +- phase->register_control(cset_iff, loop, old_ctrl); +- phase->register_control(ctrl, loop, cset_iff); +- phase->register_control(not_cset_ctrl, loop, cset_iff); +- +- phase->set_ctrl(cset_addr_ptr, phase->C->root()); +- +- phase->register_new_node(raw_val, old_ctrl); +- phase->register_new_node(cset_idx, old_ctrl); +- phase->register_new_node(cset_addr, old_ctrl); +- phase->register_new_node(cset_load_addr, old_ctrl); +- phase->register_new_node(cset_load_ptr, old_ctrl); +- phase->register_new_node(cset_load, old_ctrl); +- phase->register_new_node(cset_cmp, old_ctrl); +- phase->register_new_node(cset_bool, old_ctrl); +-} +- +-void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node* load_addr, Node*& result_mem, Node* raw_mem, bool is_native, PhaseIdealLoop* phase) { +- IdealLoopTree*loop = phase->get_loop(ctrl); +- const TypePtr* obj_type = phase->igvn().type(val)->is_oopptr(); +- +- // The slow path stub consumes and produces raw memory in addition +- // to the existing memory edges +- Node* base = find_bottom_mem(ctrl, phase); +- MergeMemNode* mm = MergeMemNode::make(phase->C, base); +- mm->set_memory_at(Compile::AliasIdxRaw, raw_mem); +- phase->register_new_node(mm, ctrl); +- +- address target = LP64_ONLY(UseCompressedOops) NOT_LP64(false) ? +- CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow) : +- CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier); +- +- Node* call = new (phase->C) CallLeafNode(ShenandoahBarrierSetC2::shenandoah_load_reference_barrier_Type(), +- target, +- "shenandoah_load_reference_barrier", TypeRawPtr::BOTTOM); +- call->init_req(TypeFunc::Control, ctrl); +- call->init_req(TypeFunc::I_O, phase->C->top()); +- call->init_req(TypeFunc::Memory, mm); +- call->init_req(TypeFunc::FramePtr, phase->C->top()); +- call->init_req(TypeFunc::ReturnAdr, phase->C->top()); +- call->init_req(TypeFunc::Parms, val); +- call->init_req(TypeFunc::Parms+1, load_addr); +- phase->register_control(call, loop, ctrl); +- ctrl = new (phase->C) ProjNode(call, TypeFunc::Control); +- phase->register_control(ctrl, loop, call); +- result_mem = new (phase->C) ProjNode(call, TypeFunc::Memory); +- phase->register_new_node(result_mem, call); +- val = new (phase->C) ProjNode(call, TypeFunc::Parms); +- phase->register_new_node(val, call); +- val = new (phase->C) CheckCastPPNode(ctrl, val, obj_type); +- phase->register_new_node(val, ctrl); +-} +- +-void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase) { +- Node* ctrl = phase->get_ctrl(barrier); +- Node* init_raw_mem = fixer.find_mem(ctrl, barrier); +- +- // Update the control of all nodes that should be after the +- // barrier control flow +- uses.clear(); +- // Every node that is control dependent on the barrier's input +- // control will be after the expanded barrier. The raw memory (if +- // its memory is control dependent on the barrier's input control) +- // must stay above the barrier. +- uses_to_ignore.clear(); +- if (phase->has_ctrl(init_raw_mem) && phase->get_ctrl(init_raw_mem) == ctrl && !init_raw_mem->is_Phi()) { +- uses_to_ignore.push(init_raw_mem); +- } +- for (uint next = 0; next < uses_to_ignore.size(); next++) { +- Node *n = uses_to_ignore.at(next); +- for (uint i = 0; i < n->req(); i++) { +- Node* in = n->in(i); +- if (in != NULL && phase->has_ctrl(in) && phase->get_ctrl(in) == ctrl) { +- uses_to_ignore.push(in); +- } +- } +- } +- for (DUIterator_Fast imax, i = ctrl->fast_outs(imax); i < imax; i++) { +- Node* u = ctrl->fast_out(i); +- if (u->_idx < last && +- u != barrier && +- !uses_to_ignore.member(u) && +- (u->in(0) != ctrl || (!u->is_Region() && !u->is_Phi())) && +- (ctrl->Opcode() != Op_CatchProj || u->Opcode() != Op_CreateEx)) { +- Node* old_c = phase->ctrl_or_self(u); +- Node* c = old_c; +- if (c != ctrl || +- is_dominator_same_ctrl(old_c, barrier, u, phase) || +- ShenandoahBarrierSetC2::is_shenandoah_state_load(u)) { +- phase->igvn().rehash_node_delayed(u); +- int nb = u->replace_edge(ctrl, region); +- if (u->is_CFG()) { +- if (phase->idom(u) == ctrl) { +- phase->set_idom(u, region, phase->dom_depth(region)); +- } +- } else if (phase->get_ctrl(u) == ctrl) { +- assert(u != init_raw_mem, "should leave input raw mem above the barrier"); +- uses.push(u); +- } +- assert(nb == 1, "more than 1 ctrl input?"); +- --i, imax -= nb; +- } +- } +- } +-} +- +-static Node* create_phis_on_call_return(Node* ctrl, Node* c, Node* n, Node* n_clone, const CallProjections& projs, PhaseIdealLoop* phase) { +- Node* region = NULL; +- while (c != ctrl) { +- if (c->is_Region()) { +- region = c; +- } +- c = phase->idom(c); +- } +- assert(region != NULL, ""); +- if (n->is_Bool()) { +- Node* bol_clone = n->clone(); +- n = n->in(1); +- n_clone = n_clone->in(1); +- assert(n->is_Cmp() && n_clone->is_Cmp(), "should be cmp"); +- Node* cmp_clone = n->clone(); +- bol_clone->set_req(1, cmp_clone); +- if (n->in(1) != n_clone->in(1)) { +- cmp_clone->set_req(1, create_phis_on_call_return(ctrl, region, n->in(1), n_clone->in(1), projs, phase)); +- } +- if (n->in(2) != n_clone->in(2)) { +- cmp_clone->set_req(2, create_phis_on_call_return(ctrl, region, n->in(2), n_clone->in(2), projs, phase)); +- } +- phase->register_new_node(cmp_clone, region); +- phase->register_new_node(bol_clone, region); +- return bol_clone; +- } +- Node* phi = new (phase->C) PhiNode(region, n->bottom_type()); +- for (uint j = 1; j < region->req(); j++) { +- Node* in = region->in(j); +- if (phase->is_dominator(projs.fallthrough_catchproj, in)) { +- phi->init_req(j, n); +- } else if (phase->is_dominator(projs.catchall_catchproj, in)) { +- phi->init_req(j, n_clone); +- } else { +- phi->init_req(j, create_phis_on_call_return(ctrl, in, n, n_clone, projs, phase)); +- } +- } +- phase->register_new_node(phi, region); +- return phi; +-} +- +-void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) { +- +- Unique_Node_List uses; +- Node_Stack stack(0); +- Node_List clones; +- for (int i = phase->C->shenandoah_barriers_count() - 1; i >= 0; i--) { +- ShenandoahLoadReferenceBarrierNode* lrb = phase->C->shenandoah_barrier(i); +- if (lrb->is_redundant()) { +- continue; +- } +- +- Node* ctrl = phase->get_ctrl(lrb); +- Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn); +- +- CallStaticJavaNode* unc = NULL; +- Node* unc_ctrl = NULL; +- Node* uncasted_val = val; +- +- for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) { +- Node* u = lrb->fast_out(i); +- if (u->Opcode() == Op_CastPP && +- u->in(0) != NULL && +- phase->is_dominator(u->in(0), ctrl)) { +- const Type* u_t = phase->igvn().type(u); +- +- if (u_t->meet(TypePtr::NULL_PTR) != u_t && +- u->in(0)->Opcode() == Op_IfTrue && +- u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) && +- u->in(0)->in(0)->is_If() && +- u->in(0)->in(0)->in(1)->Opcode() == Op_Bool && +- u->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne && +- u->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP && +- u->in(0)->in(0)->in(1)->in(1)->in(1) == val && +- u->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) { +- IdealLoopTree* loop = phase->get_loop(ctrl); +- IdealLoopTree* unc_loop = phase->get_loop(u->in(0)); +- +- if (!unc_loop->is_member(loop)) { +- continue; +- } +- +- Node* branch = no_branches(ctrl, u->in(0), false, phase); +- assert(branch == NULL || branch == NodeSentinel, "was not looking for a branch"); +- if (branch == NodeSentinel) { +- continue; +- } +- +- phase->igvn().replace_input_of(u, 1, val); +- phase->igvn().replace_input_of(lrb, ShenandoahLoadReferenceBarrierNode::ValueIn, u); +- phase->set_ctrl(u, u->in(0)); +- phase->set_ctrl(lrb, u->in(0)); +- unc = u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none); +- unc_ctrl = u->in(0); +- val = u; +- +- for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) { +- Node* u = val->fast_out(j); +- if (u == lrb) continue; +- phase->igvn().rehash_node_delayed(u); +- int nb = u->replace_edge(val, lrb); +- --j; jmax -= nb; +- } +- +- RegionNode* r = new (phase->C) RegionNode(3); +- IfNode* iff = unc_ctrl->in(0)->as_If(); +- +- Node* ctrl_use = unc_ctrl->unique_ctrl_out(); +- Node* unc_ctrl_clone = unc_ctrl->clone(); +- phase->register_control(unc_ctrl_clone, loop, iff); +- Node* c = unc_ctrl_clone; +- Node* new_cast = clone_null_check(c, val, unc_ctrl_clone, phase); +- r->init_req(1, new_cast->in(0)->in(0)->as_If()->proj_out(0)); +- +- phase->igvn().replace_input_of(unc_ctrl, 0, c->in(0)); +- phase->set_idom(unc_ctrl, c->in(0), phase->dom_depth(unc_ctrl)); +- phase->lazy_replace(c, unc_ctrl); +- c = NULL;; +- phase->igvn().replace_input_of(val, 0, unc_ctrl_clone); +- phase->set_ctrl(val, unc_ctrl_clone); +- +- IfNode* new_iff = new_cast->in(0)->in(0)->as_If(); +- fix_null_check(unc, unc_ctrl_clone, r, uses, phase); +- Node* iff_proj = iff->proj_out(0); +- r->init_req(2, iff_proj); +- phase->register_control(r, phase->ltree_root(), iff); +- +- Node* new_bol = new_iff->in(1)->clone(); +- Node* new_cmp = new_bol->in(1)->clone(); +- assert(new_cmp->Opcode() == Op_CmpP, "broken"); +- assert(new_cmp->in(1) == val->in(1), "broken"); +- new_bol->set_req(1, new_cmp); +- new_cmp->set_req(1, lrb); +- phase->register_new_node(new_bol, new_iff->in(0)); +- phase->register_new_node(new_cmp, new_iff->in(0)); +- phase->igvn().replace_input_of(new_iff, 1, new_bol); +- phase->igvn().replace_input_of(new_cast, 1, lrb); +- +- for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) { +- Node* u = lrb->fast_out(i); +- if (u == new_cast || u == new_cmp) { +- continue; +- } +- phase->igvn().rehash_node_delayed(u); +- int nb = u->replace_edge(lrb, new_cast); +- assert(nb > 0, "no update?"); +- --i; imax -= nb; +- } +- +- for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) { +- Node* u = val->fast_out(i); +- if (u == lrb) { +- continue; +- } +- phase->igvn().rehash_node_delayed(u); +- int nb = u->replace_edge(val, new_cast); +- assert(nb > 0, "no update?"); +- --i; imax -= nb; +- } +- +- ctrl = unc_ctrl_clone; +- phase->set_ctrl_and_loop(lrb, ctrl); +- break; +- } +- } +- } +- if ((ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) || ctrl->is_CallJava()) { +- CallNode* call = ctrl->is_Proj() ? ctrl->in(0)->as_CallJava() : ctrl->as_CallJava(); +- if (call->entry_point() == OptoRuntime::rethrow_stub()) { +- // The rethrow call may have too many projections to be +- // properly handled here. Given there's no reason for a +- // barrier to depend on the call, move it above the call +- if (phase->get_ctrl(val) == ctrl) { +- assert(val->Opcode() == Op_DecodeN, "unexpected node"); +- assert(phase->is_dominator(phase->get_ctrl(val->in(1)), call->in(0)), "Load is too low"); +- phase->set_ctrl(val, call->in(0)); +- } +- phase->set_ctrl(lrb, call->in(0)); +- continue; +- } +- CallProjections projs; +- call->extract_projections(&projs, false, false); +- +-#ifdef ASSERT +- VectorSet cloned(Thread::current()->resource_area()); +-#endif +- Node* lrb_clone = lrb->clone(); +- phase->register_new_node(lrb_clone, projs.catchall_catchproj); +- phase->set_ctrl(lrb, projs.fallthrough_catchproj); +- +- stack.push(lrb, 0); +- clones.push(lrb_clone); +- +- do { +- assert(stack.size() == clones.size(), ""); +- Node* n = stack.node(); +-#ifdef ASSERT +- if (n->is_Load()) { +- Node* mem = n->in(MemNode::Memory); +- for (DUIterator_Fast jmax, j = mem->fast_outs(jmax); j < jmax; j++) { +- Node* u = mem->fast_out(j); +- assert(!u->is_Store() || !u->is_LoadStore() || phase->get_ctrl(u) != ctrl, "anti dependent store?"); +- } +- } +-#endif +- uint idx = stack.index(); +- Node* n_clone = clones.at(clones.size()-1); +- if (idx < n->outcnt()) { +- Node* u = n->raw_out(idx); +- Node* c = phase->ctrl_or_self(u); +- if (phase->is_dominator(call, c) && phase->is_dominator(c, projs.fallthrough_proj)) { +- stack.set_index(idx+1); +- assert(!u->is_CFG(), ""); +- stack.push(u, 0); +- assert(!cloned.test_set(u->_idx), "only one clone"); +- Node* u_clone = u->clone(); +- int nb = u_clone->replace_edge(n, n_clone); +- assert(nb > 0, "should have replaced some uses"); +- phase->register_new_node(u_clone, projs.catchall_catchproj); +- clones.push(u_clone); +- phase->set_ctrl(u, projs.fallthrough_catchproj); +- } else { +- bool replaced = false; +- if (u->is_Phi()) { +- for (uint k = 1; k < u->req(); k++) { +- if (u->in(k) == n) { +- if (phase->is_dominator(projs.catchall_catchproj, u->in(0)->in(k))) { +- phase->igvn().replace_input_of(u, k, n_clone); +- replaced = true; +- } else if (!phase->is_dominator(projs.fallthrough_catchproj, u->in(0)->in(k))) { +- phase->igvn().replace_input_of(u, k, create_phis_on_call_return(ctrl, u->in(0)->in(k), n, n_clone, projs, phase)); +- replaced = true; +- } +- } +- } +- } else { +- if (phase->is_dominator(projs.catchall_catchproj, c)) { +- phase->igvn().rehash_node_delayed(u); +- int nb = u->replace_edge(n, n_clone); +- assert(nb > 0, "should have replaced some uses"); +- replaced = true; +- } else if (!phase->is_dominator(projs.fallthrough_catchproj, c)) { +- if (u->is_If()) { +- // Can't break If/Bool/Cmp chain +- assert(n->is_Bool(), "unexpected If shape"); +- assert(stack.node_at(stack.size()-2)->is_Cmp(), "unexpected If shape"); +- assert(n_clone->is_Bool(), "unexpected clone"); +- assert(clones.at(clones.size()-2)->is_Cmp(), "unexpected clone"); +- Node* bol_clone = n->clone(); +- Node* cmp_clone = stack.node_at(stack.size()-2)->clone(); +- bol_clone->set_req(1, cmp_clone); +- +- Node* nn = stack.node_at(stack.size()-3); +- Node* nn_clone = clones.at(clones.size()-3); +- assert(nn->Opcode() == nn_clone->Opcode(), "mismatch"); +- +- int nb = cmp_clone->replace_edge(nn, create_phis_on_call_return(ctrl, c, nn, nn_clone, projs, phase)); +- assert(nb > 0, "should have replaced some uses"); +- +- phase->register_new_node(bol_clone, u->in(0)); +- phase->register_new_node(cmp_clone, u->in(0)); +- +- phase->igvn().replace_input_of(u, 1, bol_clone); +- +- } else { +- phase->igvn().rehash_node_delayed(u); +- int nb = u->replace_edge(n, create_phis_on_call_return(ctrl, c, n, n_clone, projs, phase)); +- assert(nb > 0, "should have replaced some uses"); +- } +- replaced = true; +- } +- } +- if (!replaced) { +- stack.set_index(idx+1); +- } +- } +- } else { +- stack.pop(); +- clones.pop(); +- } +- } while (stack.size() > 0); +- assert(stack.size() == 0 && clones.size() == 0, ""); +- } +- } +- +- // Expand load-reference-barriers +- MemoryGraphFixer fixer(Compile::AliasIdxRaw, true, phase); +- Unique_Node_List uses_to_ignore; +- for (int i = phase->C->shenandoah_barriers_count() - 1; i >= 0; i--) { +- ShenandoahLoadReferenceBarrierNode* lrb = phase->C->shenandoah_barrier(i); +- if (lrb->is_redundant()) { +- phase->igvn().replace_node(lrb, lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn)); +- continue; +- } +- uint last = phase->C->unique(); +- Node* ctrl = phase->get_ctrl(lrb); +- Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn); +- +- +- Node* orig_ctrl = ctrl; +- +- Node* raw_mem = fixer.find_mem(ctrl, lrb); +- Node* init_raw_mem = raw_mem; +- Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL); +- +- IdealLoopTree *loop = phase->get_loop(ctrl); +- CallStaticJavaNode* unc = lrb->pin_and_expand_null_check(phase->igvn()); +- Node* unc_ctrl = NULL; +- if (unc != NULL) { +- if (val->in(ShenandoahLoadReferenceBarrierNode::Control) != ctrl) { +- unc = NULL; +- } else { +- unc_ctrl = val->in(ShenandoahLoadReferenceBarrierNode::Control); +- } +- } +- +- Node* uncasted_val = val; +- if (unc != NULL) { +- uncasted_val = val->in(1); +- } +- +- Node* heap_stable_ctrl = NULL; +- Node* null_ctrl = NULL; +- +- assert(val->bottom_type()->make_oopptr(), "need oop"); +- assert(val->bottom_type()->make_oopptr()->const_oop() == NULL, "expect non-constant"); +- +- enum { _heap_stable = 1, _not_cset, _evac_path, _null_path, PATH_LIMIT }; +- Node* region = new (phase->C) RegionNode(PATH_LIMIT); +- Node* val_phi = new (phase->C) PhiNode(region, uncasted_val->bottom_type()->is_oopptr()); +- Node* raw_mem_phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM); +- +- // Stable path. +- test_gc_state(ctrl, raw_mem, heap_stable_ctrl, phase, ShenandoahHeap::HAS_FORWARDED); +- IfNode* heap_stable_iff = heap_stable_ctrl->in(0)->as_If(); +- +- // Heap stable case +- region->init_req(_heap_stable, heap_stable_ctrl); +- val_phi->init_req(_heap_stable, uncasted_val); +- raw_mem_phi->init_req(_heap_stable, raw_mem); +- +- Node* reg2_ctrl = NULL; +- // Null case +- test_null(ctrl, val, null_ctrl, phase); +- if (null_ctrl != NULL) { +- reg2_ctrl = null_ctrl->in(0); +- region->init_req(_null_path, null_ctrl); +- val_phi->init_req(_null_path, uncasted_val); +- raw_mem_phi->init_req(_null_path, raw_mem); +- } else { +- region->del_req(_null_path); +- val_phi->del_req(_null_path); +- raw_mem_phi->del_req(_null_path); +- } +- +- // Test for in-cset. +- // Wires !in_cset(obj) to slot 2 of region and phis +- Node* not_cset_ctrl = NULL; +- test_in_cset(ctrl, not_cset_ctrl, uncasted_val, raw_mem, phase); +- if (not_cset_ctrl != NULL) { +- if (reg2_ctrl == NULL) reg2_ctrl = not_cset_ctrl->in(0); +- region->init_req(_not_cset, not_cset_ctrl); +- val_phi->init_req(_not_cset, uncasted_val); +- raw_mem_phi->init_req(_not_cset, raw_mem); +- } +- +- // Resolve object when orig-value is in cset. +- // Make the unconditional resolve for fwdptr. +- Node* new_val = uncasted_val; +- if (unc_ctrl != NULL) { +- // Clone the null check in this branch to allow implicit null check +- new_val = clone_null_check(ctrl, val, unc_ctrl, phase); +- fix_null_check(unc, unc_ctrl, ctrl->in(0)->as_If()->proj_out(0), uses, phase); +- +- IfNode* iff = unc_ctrl->in(0)->as_If(); +- phase->igvn().replace_input_of(iff, 1, phase->igvn().intcon(1)); +- } +- +- // Call lrb-stub and wire up that path in slots 4 +- Node* result_mem = NULL; +- +- Node* fwd = new_val; +- Node* addr; +- if (ShenandoahSelfFixing) { +- VectorSet visited(Thread::current()->resource_area()); +- addr = get_load_addr(phase, visited, lrb); +- } else { +- addr = phase->igvn().zerocon(T_OBJECT); +- } +- if (addr->Opcode() == Op_AddP) { +- Node* orig_base = addr->in(AddPNode::Base); +- Node* base = new (phase->C) CheckCastPPNode(ctrl, orig_base, orig_base->bottom_type()); +- phase->register_new_node(base, ctrl); +- if (addr->in(AddPNode::Base) == addr->in((AddPNode::Address))) { +- // Field access +- addr = addr->clone(); +- addr->set_req(AddPNode::Base, base); +- addr->set_req(AddPNode::Address, base); +- phase->register_new_node(addr, ctrl); +- } else { +- Node* addr2 = addr->in(AddPNode::Address); +- if (addr2->Opcode() == Op_AddP && addr2->in(AddPNode::Base) == addr2->in(AddPNode::Address) && +- addr2->in(AddPNode::Base) == orig_base) { +- addr2 = addr2->clone(); +- addr2->set_req(AddPNode::Base, base); +- addr2->set_req(AddPNode::Address, base); +- phase->register_new_node(addr2, ctrl); +- addr = addr->clone(); +- addr->set_req(AddPNode::Base, base); +- addr->set_req(AddPNode::Address, addr2); +- phase->register_new_node(addr, ctrl); +- } +- } +- } +- call_lrb_stub(ctrl, fwd, addr, result_mem, raw_mem, false, phase); +- region->init_req(_evac_path, ctrl); +- val_phi->init_req(_evac_path, fwd); +- raw_mem_phi->init_req(_evac_path, result_mem); +- +- phase->register_control(region, loop, heap_stable_iff); +- Node* out_val = val_phi; +- phase->register_new_node(val_phi, region); +- phase->register_new_node(raw_mem_phi, region); +- +- fix_ctrl(lrb, region, fixer, uses, uses_to_ignore, last, phase); +- +- ctrl = orig_ctrl; +- +- if (unc != NULL) { +- for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) { +- Node* u = val->fast_out(i); +- Node* c = phase->ctrl_or_self(u); +- if (u != lrb && (c != ctrl || is_dominator_same_ctrl(c, lrb, u, phase))) { +- phase->igvn().rehash_node_delayed(u); +- int nb = u->replace_edge(val, out_val); +- --i, imax -= nb; +- } +- } +- if (val->outcnt() == 0) { +- phase->igvn()._worklist.push(val); +- } +- } +- phase->igvn().replace_node(lrb, out_val); +- +- follow_barrier_uses(out_val, ctrl, uses, phase); +- +- for(uint next = 0; next < uses.size(); next++ ) { +- Node *n = uses.at(next); +- assert(phase->get_ctrl(n) == ctrl, "bad control"); +- assert(n != init_raw_mem, "should leave input raw mem above the barrier"); +- phase->set_ctrl(n, region); +- follow_barrier_uses(n, ctrl, uses, phase); +- } +- +- // The slow path call produces memory: hook the raw memory phi +- // from the expanded load reference barrier with the rest of the graph +- // which may require adding memory phis at every post dominated +- // region and at enclosing loop heads. Use the memory state +- // collected in memory_nodes to fix the memory graph. Update that +- // memory state as we go. +- fixer.fix_mem(ctrl, region, init_raw_mem, raw_mem_for_ctrl, raw_mem_phi, uses); +- } +- // Done expanding load-reference-barriers. +- assert(phase->C->shenandoah_barriers_count() == 0, "all load reference barrier nodes should have been replaced"); +- +-} +- +-Node* ShenandoahBarrierC2Support::get_load_addr(PhaseIdealLoop* phase, VectorSet& visited, Node* in) { +- if (visited.test_set(in->_idx)) { +- return NULL; +- } +- switch (in->Opcode()) { +- case Op_Proj: +- return get_load_addr(phase, visited, in->in(0)); +- case Op_CastPP: +- case Op_CheckCastPP: +- case Op_DecodeN: +- case Op_EncodeP: +- return get_load_addr(phase, visited, in->in(1)); +- case Op_LoadN: +- case Op_LoadP: +- return in->in(MemNode::Address); +- case Op_GetAndSetN: +- case Op_GetAndSetP: +- // Those instructions would just have stored a different +- // value into the field. No use to attempt to fix it at this point. +- return phase->igvn().zerocon(T_OBJECT); +- case Op_CMoveP: +- case Op_CMoveN: { +- Node* t = get_load_addr(phase, visited, in->in(CMoveNode::IfTrue)); +- Node* f = get_load_addr(phase, visited, in->in(CMoveNode::IfFalse)); +- // Handle unambiguous cases: single address reported on both branches. +- if (t != NULL && f == NULL) return t; +- if (t == NULL && f != NULL) return f; +- if (t != NULL && t == f) return t; +- // Ambiguity. +- return phase->igvn().zerocon(T_OBJECT); +- } +- case Op_Phi: { +- Node* addr = NULL; +- for (uint i = 1; i < in->req(); i++) { +- Node* addr1 = get_load_addr(phase, visited, in->in(i)); +- if (addr == NULL) { +- addr = addr1; +- } +- if (addr != addr1) { +- return phase->igvn().zerocon(T_OBJECT); +- } +- } +- return addr; +- } +- case Op_ShenandoahLoadReferenceBarrier: +- return get_load_addr(phase, visited, in->in(ShenandoahLoadReferenceBarrierNode::ValueIn)); +- case Op_CallDynamicJava: +- case Op_CallLeaf: +- case Op_CallStaticJava: +- case Op_ConN: +- case Op_ConP: +- case Op_Parm: +- case Op_CreateEx: +- return phase->igvn().zerocon(T_OBJECT); +- default: +-#ifdef ASSERT +- fatal(err_msg("Unknown node in get_load_addr: %s", NodeClassNames[in->Opcode()])); +-#endif +- return phase->igvn().zerocon(T_OBJECT); +- } +- +-} +- +-void ShenandoahBarrierC2Support::move_gc_state_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase) { +- IdealLoopTree *loop = phase->get_loop(iff); +- Node* loop_head = loop->_head; +- Node* entry_c = loop_head->in(LoopNode::EntryControl); +- +- Node* bol = iff->in(1); +- Node* cmp = bol->in(1); +- Node* andi = cmp->in(1); +- Node* load = andi->in(1); +- +- assert(is_gc_state_load(load), "broken"); +- if (!phase->is_dominator(load->in(0), entry_c)) { +- Node* mem_ctrl = NULL; +- Node* mem = dom_mem(load->in(MemNode::Memory), loop_head, Compile::AliasIdxRaw, mem_ctrl, phase); +- load = load->clone(); +- load->set_req(MemNode::Memory, mem); +- load->set_req(0, entry_c); +- phase->register_new_node(load, entry_c); +- andi = andi->clone(); +- andi->set_req(1, load); +- phase->register_new_node(andi, entry_c); +- cmp = cmp->clone(); +- cmp->set_req(1, andi); +- phase->register_new_node(cmp, entry_c); +- bol = bol->clone(); +- bol->set_req(1, cmp); +- phase->register_new_node(bol, entry_c); +- +- Node* old_bol =iff->in(1); +- phase->igvn().replace_input_of(iff, 1, bol); +- } +-} +- +-bool ShenandoahBarrierC2Support::identical_backtoback_ifs(Node* n, PhaseIdealLoop* phase) { +- if (!n->is_If() || n->is_CountedLoopEnd()) { +- return false; +- } +- Node* region = n->in(0); +- +- if (!region->is_Region()) { +- return false; +- } +- Node* dom = phase->idom(region); +- if (!dom->is_If()) { +- return false; +- } +- +- if (!is_heap_stable_test(n) || !is_heap_stable_test(dom)) { +- return false; +- } +- +- IfNode* dom_if = dom->as_If(); +- Node* proj_true = dom_if->proj_out(1); +- Node* proj_false = dom_if->proj_out(0); +- +- for (uint i = 1; i < region->req(); i++) { +- if (phase->is_dominator(proj_true, region->in(i))) { +- continue; +- } +- if (phase->is_dominator(proj_false, region->in(i))) { +- continue; +- } +- return false; +- } +- +- return true; +-} +- +-static bool merge_point_too_heavy(Compile* C, Node* region) { +- // Bail out if the region and its phis have too many users. +- int weight = 0; +- for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { +- weight += region->fast_out(i)->outcnt(); +- } +- int nodes_left = C->max_node_limit() - C->live_nodes(); +- if (weight * 8 > nodes_left) { +-#ifndef PRODUCT +- if (PrintOpto) { +- tty->print_cr("*** Split-if bails out: %d nodes, region weight %d", C->unique(), weight); +- } +-#endif +- return true; +- } else { +- return false; +- } +-} +- +-static bool merge_point_safe(Node* region) { +- // 4799512: Stop split_if_with_blocks from splitting a block with a ConvI2LNode +- // having a PhiNode input. This sidesteps the dangerous case where the split +- // ConvI2LNode may become TOP if the input Value() does not +- // overlap the ConvI2L range, leaving a node which may not dominate its +- // uses. +- // A better fix for this problem can be found in the BugTraq entry, but +- // expediency for Mantis demands this hack. +- // 6855164: If the merge point has a FastLockNode with a PhiNode input, we stop +- // split_if_with_blocks from splitting a block because we could not move around +- // the FastLockNode. +- for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { +- Node* n = region->fast_out(i); +- if (n->is_Phi()) { +- for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { +- Node* m = n->fast_out(j); +- if (m->is_FastLock()) +- return false; +-#ifdef _LP64 +- if (m->Opcode() == Op_ConvI2L) +- return false; +- if (m->is_CastII() && m->isa_CastII()->has_range_check()) { +- return false; +- } +-#endif +- } +- } +- } +- return true; +-} +- +-static bool can_split_if(PhaseIdealLoop* phase, Node* n_ctrl) { +- if (phase->C->live_nodes() > 35000) { +- return false; // Method too big +- } +- +- // Do not do 'split-if' if irreducible loops are present. +- if (phase->_has_irreducible_loops) { +- return false; +- } +- +- if (merge_point_too_heavy(phase->C, n_ctrl)) { +- return false; +- } +- +- // Do not do 'split-if' if some paths are dead. First do dead code +- // elimination and then see if its still profitable. +- for (uint i = 1; i < n_ctrl->req(); i++) { +- if (n_ctrl->in(i) == phase->C->top()) { +- return false; +- } +- } +- +- // If trying to do a 'Split-If' at the loop head, it is only +- // profitable if the cmp folds up on BOTH paths. Otherwise we +- // risk peeling a loop forever. +- +- // CNC - Disabled for now. Requires careful handling of loop +- // body selection for the cloned code. Also, make sure we check +- // for any input path not being in the same loop as n_ctrl. For +- // irreducible loops we cannot check for 'n_ctrl->is_Loop()' +- // because the alternative loop entry points won't be converted +- // into LoopNodes. +- IdealLoopTree *n_loop = phase->get_loop(n_ctrl); +- for (uint j = 1; j < n_ctrl->req(); j++) { +- if (phase->get_loop(n_ctrl->in(j)) != n_loop) { +- return false; +- } +- } +- +- // Check for safety of the merge point. +- if (!merge_point_safe(n_ctrl)) { +- return false; +- } +- +- return true; +-} +- +-void ShenandoahBarrierC2Support::merge_back_to_back_tests(Node* n, PhaseIdealLoop* phase) { +- assert(is_heap_stable_test(n), "no other tests"); +- if (identical_backtoback_ifs(n, phase)) { +- Node* n_ctrl = n->in(0); +- if (can_split_if(phase, n_ctrl)) { +- IfNode* dom_if = phase->idom(n_ctrl)->as_If(); +- if (is_heap_stable_test(n)) { +- Node* gc_state_load = n->in(1)->in(1)->in(1)->in(1); +- assert(is_gc_state_load(gc_state_load), "broken"); +- Node* dom_gc_state_load = dom_if->in(1)->in(1)->in(1)->in(1); +- assert(is_gc_state_load(dom_gc_state_load), "broken"); +- if (gc_state_load != dom_gc_state_load) { +- phase->igvn().replace_node(gc_state_load, dom_gc_state_load); +- } +- } +- PhiNode* bolphi = PhiNode::make_blank(n_ctrl, n->in(1)); +- Node* proj_true = dom_if->proj_out(1); +- Node* proj_false = dom_if->proj_out(0); +- Node* con_true = phase->igvn().makecon(TypeInt::ONE); +- Node* con_false = phase->igvn().makecon(TypeInt::ZERO); +- +- for (uint i = 1; i < n_ctrl->req(); i++) { +- if (phase->is_dominator(proj_true, n_ctrl->in(i))) { +- bolphi->init_req(i, con_true); +- } else { +- assert(phase->is_dominator(proj_false, n_ctrl->in(i)), "bad if"); +- bolphi->init_req(i, con_false); +- } +- } +- phase->register_new_node(bolphi, n_ctrl); +- phase->igvn().replace_input_of(n, 1, bolphi); +- phase->do_split_if(n); +- } +- } +-} +- +-IfNode* ShenandoahBarrierC2Support::find_unswitching_candidate(const IdealLoopTree* loop, PhaseIdealLoop* phase) { +- // Find first invariant test that doesn't exit the loop +- LoopNode *head = loop->_head->as_Loop(); +- IfNode* unswitch_iff = NULL; +- Node* n = head->in(LoopNode::LoopBackControl); +- int loop_has_sfpts = -1; +- while (n != head) { +- Node* n_dom = phase->idom(n); +- if (n->is_Region()) { +- if (n_dom->is_If()) { +- IfNode* iff = n_dom->as_If(); +- if (iff->in(1)->is_Bool()) { +- BoolNode* bol = iff->in(1)->as_Bool(); +- if (bol->in(1)->is_Cmp()) { +- // If condition is invariant and not a loop exit, +- // then found reason to unswitch. +- if (is_heap_stable_test(iff) && +- (loop_has_sfpts == -1 || loop_has_sfpts == 0)) { +- assert(!loop->is_loop_exit(iff), "both branches should be in the loop"); +- if (loop_has_sfpts == -1) { +- for(uint i = 0; i < loop->_body.size(); i++) { +- Node *m = loop->_body[i]; +- if (m->is_SafePoint() && !m->is_CallLeaf()) { +- loop_has_sfpts = 1; +- break; +- } +- } +- if (loop_has_sfpts == -1) { +- loop_has_sfpts = 0; +- } +- } +- if (!loop_has_sfpts) { +- unswitch_iff = iff; +- } +- } +- } +- } +- } +- } +- n = n_dom; +- } +- return unswitch_iff; +-} +- +- +-void ShenandoahBarrierC2Support::optimize_after_expansion(VectorSet &visited, Node_Stack &stack, Node_List &old_new, PhaseIdealLoop* phase) { +- Node_List heap_stable_tests; +- stack.push(phase->C->start(), 0); +- do { +- Node* n = stack.node(); +- uint i = stack.index(); +- +- if (i < n->outcnt()) { +- Node* u = n->raw_out(i); +- stack.set_index(i+1); +- if (!visited.test_set(u->_idx)) { +- stack.push(u, 0); +- } +- } else { +- stack.pop(); +- if (n->is_If() && is_heap_stable_test(n)) { +- heap_stable_tests.push(n); +- } +- } +- } while (stack.size() > 0); +- +- for (uint i = 0; i < heap_stable_tests.size(); i++) { +- Node* n = heap_stable_tests.at(i); +- assert(is_heap_stable_test(n), "only evacuation test"); +- merge_back_to_back_tests(n, phase); +- } +- +- if (!phase->C->major_progress()) { +- VectorSet seen(Thread::current()->resource_area()); +- for (uint i = 0; i < heap_stable_tests.size(); i++) { +- Node* n = heap_stable_tests.at(i); +- IdealLoopTree* loop = phase->get_loop(n); +- if (loop != phase->ltree_root() && +- loop->_child == NULL && +- !loop->_irreducible) { +- Node* head = loop->_head; +- if (head->is_Loop() && +- (!head->is_CountedLoop() || head->as_CountedLoop()->is_main_loop() || head->as_CountedLoop()->is_normal_loop()) && +- !seen.test_set(head->_idx)) { +- IfNode* iff = find_unswitching_candidate(loop, phase); +- if (iff != NULL) { +- Node* bol = iff->in(1); +- move_gc_state_test_out_of_loop(iff, phase); +- if (loop->policy_unswitching(phase)) { +- phase->do_unswitching(loop, old_new); +- } else { +- // Not proceeding with unswitching. Move load back in +- // the loop. +- phase->igvn().replace_input_of(iff, 1, bol); +- } +- } +- } +- } +- } +- } +-} +- +-#ifdef ASSERT +-void ShenandoahBarrierC2Support::verify_raw_mem(RootNode* root) { +- const bool trace = false; +- ResourceMark rm; +- Unique_Node_List nodes; +- Unique_Node_List controls; +- Unique_Node_List memories; +- +- nodes.push(root); +- for (uint next = 0; next < nodes.size(); next++) { +- Node *n = nodes.at(next); +- if (ShenandoahBarrierSetC2::is_shenandoah_lrb_call(n)) { +- controls.push(n); +- if (trace) { tty->print("XXXXXX verifying"); n->dump(); } +- for (uint next2 = 0; next2 < controls.size(); next2++) { +- Node *m = controls.at(next2); +- for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) { +- Node* u = m->fast_out(i); +- if (u->is_CFG() && !u->is_Root() && +- !(u->Opcode() == Op_CProj && u->in(0)->Opcode() == Op_NeverBranch && u->as_Proj()->_con == 1) && +- !(u->is_Region() && u->unique_ctrl_out()->Opcode() == Op_Halt)) { +- if (trace) { tty->print("XXXXXX pushing control"); u->dump(); } +- controls.push(u); +- } +- } +- } +- memories.push(n->as_Call()->proj_out(TypeFunc::Memory)); +- for (uint next2 = 0; next2 < memories.size(); next2++) { +- Node *m = memories.at(next2); +- assert(m->bottom_type() == Type::MEMORY, ""); +- for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) { +- Node* u = m->fast_out(i); +- if (u->bottom_type() == Type::MEMORY && (u->is_Mem() || u->is_ClearArray())) { +- if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); } +- memories.push(u); +- } else if (u->is_LoadStore()) { +- if (trace) { tty->print("XXXXXX pushing memory"); u->find_out_with(Op_SCMemProj)->dump(); } +- memories.push(u->find_out_with(Op_SCMemProj)); +- } else if (u->is_MergeMem() && u->as_MergeMem()->memory_at(Compile::AliasIdxRaw) == m) { +- if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); } +- memories.push(u); +- } else if (u->is_Phi()) { +- assert(u->bottom_type() == Type::MEMORY, ""); +- if (u->adr_type() == TypeRawPtr::BOTTOM || u->adr_type() == TypePtr::BOTTOM) { +- assert(controls.member(u->in(0)), ""); +- if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); } +- memories.push(u); +- } +- } else if (u->is_SafePoint() || u->is_MemBar()) { +- for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) { +- Node* uu = u->fast_out(j); +- if (uu->bottom_type() == Type::MEMORY) { +- if (trace) { tty->print("XXXXXX pushing memory"); uu->dump(); } +- memories.push(uu); +- } +- } +- } +- } +- } +- for (uint next2 = 0; next2 < controls.size(); next2++) { +- Node *m = controls.at(next2); +- if (m->is_Region()) { +- bool all_in = true; +- for (uint i = 1; i < m->req(); i++) { +- if (!controls.member(m->in(i))) { +- all_in = false; +- break; +- } +- } +- if (trace) { tty->print("XXX verifying %s", all_in ? "all in" : ""); m->dump(); } +- bool found_phi = false; +- for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax && !found_phi; j++) { +- Node* u = m->fast_out(j); +- if (u->is_Phi() && memories.member(u)) { +- found_phi = true; +- for (uint i = 1; i < u->req() && found_phi; i++) { +- Node* k = u->in(i); +- if (memories.member(k) != controls.member(m->in(i))) { +- found_phi = false; +- } +- } +- } +- } +- assert(found_phi || all_in, ""); +- } +- } +- controls.clear(); +- memories.clear(); +- } +- for( uint i = 0; i < n->len(); ++i ) { +- Node *m = n->in(i); +- if (m != NULL) { +- nodes.push(m); +- } +- } +- } +-} +-#endif +- +-#ifdef ASSERT +-static bool has_never_branch(Node* root) { +- for (uint i = 1; i < root->req(); i++) { +- Node* in = root->in(i); +- if (in != NULL && in->Opcode() == Op_Halt && in->in(0)->is_Proj() && in->in(0)->in(0)->Opcode() == Op_NeverBranch) { +- return true; +- } +- } +- return false; +-} +-#endif +- +-void MemoryGraphFixer::collect_memory_nodes() { +- Node_Stack stack(0); +- VectorSet visited(Thread::current()->resource_area()); +- Node_List regions; +- +- // Walk the raw memory graph and create a mapping from CFG node to +- // memory node. Exclude phis for now. +- stack.push(_phase->C->root(), 1); +- do { +- Node* n = stack.node(); +- int opc = n->Opcode(); +- uint i = stack.index(); +- if (i < n->req()) { +- Node* mem = NULL; +- if (opc == Op_Root) { +- Node* in = n->in(i); +- int in_opc = in->Opcode(); +- if (in_opc == Op_Return || in_opc == Op_Rethrow) { +- mem = in->in(TypeFunc::Memory); +- } else if (in_opc == Op_Halt) { +- if (in->in(0)->is_Region()) { +- Node* r = in->in(0); +- for (uint j = 1; j < r->req(); j++) { +- assert(r->in(j)->Opcode() != Op_NeverBranch, ""); +- } +- } else { +- Node* proj = in->in(0); +- assert(proj->is_Proj(), ""); +- Node* in = proj->in(0); +- assert(in->is_CallStaticJava() || in->Opcode() == Op_NeverBranch || in->Opcode() == Op_Catch || proj->is_IfProj(), ""); +- if (in->is_CallStaticJava()) { +- mem = in->in(TypeFunc::Memory); +- } else if (in->Opcode() == Op_Catch) { +- Node* call = in->in(0)->in(0); +- assert(call->is_Call(), ""); +- mem = call->in(TypeFunc::Memory); +- } else if (in->Opcode() == Op_NeverBranch) { +- Node* head = in->in(0); +- assert(head->is_Region(), "unexpected infinite loop graph shape"); +- +- Node* phi_mem = NULL; +- for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) { +- Node* u = head->fast_out(j); +- if (u->is_Phi() && u->bottom_type() == Type::MEMORY) { +- if (_phase->C->get_alias_index(u->adr_type()) == _alias) { +- assert(phi_mem == NULL || phi_mem->adr_type() == TypePtr::BOTTOM, ""); +- phi_mem = u; +- } else if (u->adr_type() == TypePtr::BOTTOM) { +- assert(phi_mem == NULL || _phase->C->get_alias_index(phi_mem->adr_type()) == _alias, ""); +- if (phi_mem == NULL) { +- phi_mem = u; +- } +- } +- } +- } +- if (phi_mem == NULL) { +- for (uint j = 1; j < head->req(); j++) { +- Node* tail = head->in(j); +- if (!_phase->is_dominator(head, tail)) { +- continue; +- } +- Node* c = tail; +- while (c != head) { +- if (c->is_SafePoint() && !c->is_CallLeaf()) { +- Node* m =c->in(TypeFunc::Memory); +- if (m->is_MergeMem()) { +- m = m->as_MergeMem()->memory_at(_alias); +- } +- assert(mem == NULL || mem == m, "several memory states"); +- mem = m; +- } +- c = _phase->idom(c); +- } +- assert(mem != NULL, "should have found safepoint"); +- } +- assert(mem != NULL, "should have found safepoint"); +- } else { +- mem = phi_mem; +- } +- } +- } +- } else { +-#ifdef ASSERT +- n->dump(); +- in->dump(); +-#endif +- ShouldNotReachHere(); +- } +- } else { +- assert(n->is_Phi() && n->bottom_type() == Type::MEMORY, ""); +- assert(n->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(n->adr_type()) == _alias, ""); +- mem = n->in(i); +- } +- i++; +- stack.set_index(i); +- if (mem == NULL) { +- continue; +- } +- for (;;) { +- if (visited.test_set(mem->_idx) || mem->is_Start()) { +- break; +- } +- if (mem->is_Phi()) { +- stack.push(mem, 2); +- mem = mem->in(1); +- } else if (mem->is_Proj()) { +- stack.push(mem, mem->req()); +- mem = mem->in(0); +- } else if (mem->is_SafePoint() || mem->is_MemBar()) { +- mem = mem->in(TypeFunc::Memory); +- } else if (mem->is_MergeMem()) { +- MergeMemNode* mm = mem->as_MergeMem(); +- mem = mm->memory_at(_alias); +- } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) { +- assert(_alias == Compile::AliasIdxRaw, ""); +- stack.push(mem, mem->req()); +- mem = mem->in(MemNode::Memory); +- } else { +-#ifdef ASSERT +- mem->dump(); +-#endif +- ShouldNotReachHere(); +- } +- } +- } else { +- if (n->is_Phi()) { +- // Nothing +- } else if (!n->is_Root()) { +- Node* c = get_ctrl(n); +- _memory_nodes.map(c->_idx, n); +- } +- stack.pop(); +- } +- } while(stack.is_nonempty()); +- +- // Iterate over CFG nodes in rpo and propagate memory state to +- // compute memory state at regions, creating new phis if needed. +- Node_List rpo_list; +- visited.Clear(); +- _phase->rpo(_phase->C->root(), stack, visited, rpo_list); +- Node* root = rpo_list.pop(); +- assert(root == _phase->C->root(), ""); +- +- const bool trace = false; +-#ifdef ASSERT +- if (trace) { +- for (int i = rpo_list.size() - 1; i >= 0; i--) { +- Node* c = rpo_list.at(i); +- if (_memory_nodes[c->_idx] != NULL) { +- tty->print("X %d", c->_idx); _memory_nodes[c->_idx]->dump(); +- } +- } +- } +-#endif +- uint last = _phase->C->unique(); +- +-#ifdef ASSERT +- uint16_t max_depth = 0; +- for (LoopTreeIterator iter(_phase->ltree_root()); !iter.done(); iter.next()) { +- IdealLoopTree* lpt = iter.current(); +- max_depth = MAX2(max_depth, lpt->_nest); +- } +-#endif +- +- bool progress = true; +- int iteration = 0; +- Node_List dead_phis; +- while (progress) { +- progress = false; +- iteration++; +- assert(iteration <= 2+max_depth || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), ""); +- if (trace) { tty->print_cr("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"); } +- IdealLoopTree* last_updated_ilt = NULL; +- for (int i = rpo_list.size() - 1; i >= 0; i--) { +- Node* c = rpo_list.at(i); +- +- Node* prev_mem = _memory_nodes[c->_idx]; +- if (c->is_Region()) { +- Node* prev_region = regions[c->_idx]; +- Node* unique = NULL; +- for (uint j = 1; j < c->req() && unique != NodeSentinel; j++) { +- Node* m = _memory_nodes[c->in(j)->_idx]; +- assert(m != NULL || (c->is_Loop() && j == LoopNode::LoopBackControl && iteration == 1) || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "expect memory state"); +- if (m != NULL) { +- if (m == prev_region && ((c->is_Loop() && j == LoopNode::LoopBackControl) || (prev_region->is_Phi() && prev_region->in(0) == c))) { +- assert(c->is_Loop() && j == LoopNode::LoopBackControl || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), ""); +- // continue +- } else if (unique == NULL) { +- unique = m; +- } else if (m == unique) { +- // continue +- } else { +- unique = NodeSentinel; +- } +- } +- } +- assert(unique != NULL, "empty phi???"); +- if (unique != NodeSentinel) { +- if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c) { +- dead_phis.push(prev_region); +- } +- regions.map(c->_idx, unique); +- } else { +- Node* phi = NULL; +- if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c && prev_region->_idx >= last) { +- phi = prev_region; +- for (uint k = 1; k < c->req(); k++) { +- Node* m = _memory_nodes[c->in(k)->_idx]; +- assert(m != NULL, "expect memory state"); +- phi->set_req(k, m); +- } +- } else { +- for (DUIterator_Fast jmax, j = c->fast_outs(jmax); j < jmax && phi == NULL; j++) { +- Node* u = c->fast_out(j); +- if (u->is_Phi() && u->bottom_type() == Type::MEMORY && +- (u->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(u->adr_type()) == _alias)) { +- phi = u; +- for (uint k = 1; k < c->req() && phi != NULL; k++) { +- Node* m = _memory_nodes[c->in(k)->_idx]; +- assert(m != NULL, "expect memory state"); +- if (u->in(k) != m) { +- phi = NULL; +- } +- } +- } +- } +- if (phi == NULL) { +- phi = new (_phase->C) PhiNode(c, Type::MEMORY, _phase->C->get_adr_type(_alias)); +- for (uint k = 1; k < c->req(); k++) { +- Node* m = _memory_nodes[c->in(k)->_idx]; +- assert(m != NULL, "expect memory state"); +- phi->init_req(k, m); +- } +- } +- } +- assert(phi != NULL, ""); +- regions.map(c->_idx, phi); +- } +- Node* current_region = regions[c->_idx]; +- if (current_region != prev_region) { +- progress = true; +- if (prev_region == prev_mem) { +- _memory_nodes.map(c->_idx, current_region); +- } +- } +- } else if (prev_mem == NULL || prev_mem->is_Phi() || ctrl_or_self(prev_mem) != c) { +- Node* m = _memory_nodes[_phase->idom(c)->_idx]; +- assert(m != NULL, "expect memory state"); +- if (m != prev_mem) { +- _memory_nodes.map(c->_idx, m); +- progress = true; +- } +- } +-#ifdef ASSERT +- if (trace) { tty->print("X %d", c->_idx); _memory_nodes[c->_idx]->dump(); } +-#endif +- } +- } +- +- // Replace existing phi with computed memory state for that region +- // if different (could be a new phi or a dominating memory node if +- // that phi was found to be useless). +- while (dead_phis.size() > 0) { +- Node* n = dead_phis.pop(); +- n->replace_by(_phase->C->top()); +- n->destruct(); +- } +- for (int i = rpo_list.size() - 1; i >= 0; i--) { +- Node* c = rpo_list.at(i); +- if (c->is_Region()) { +- Node* n = regions[c->_idx]; +- if (n->is_Phi() && n->_idx >= last && n->in(0) == c) { +- _phase->register_new_node(n, c); +- } +- } +- } +- for (int i = rpo_list.size() - 1; i >= 0; i--) { +- Node* c = rpo_list.at(i); +- if (c->is_Region()) { +- Node* n = regions[c->_idx]; +- for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) { +- Node* u = c->fast_out(i); +- if (u->is_Phi() && u->bottom_type() == Type::MEMORY && +- u != n) { +- if (u->adr_type() == TypePtr::BOTTOM) { +- fix_memory_uses(u, n, n, c); +- } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) { +- _phase->lazy_replace(u, n); +- --i; --imax; +- } +- } +- } +- } +- } +-} +- +-Node* MemoryGraphFixer::get_ctrl(Node* n) const { +- Node* c = _phase->get_ctrl(n); +- if (n->is_Proj() && n->in(0) != NULL && n->in(0)->is_Call()) { +- assert(c == n->in(0), ""); +- CallNode* call = c->as_Call(); +- CallProjections projs; +- call->extract_projections(&projs, true, false); +- if (projs.catchall_memproj != NULL) { +- if (projs.fallthrough_memproj == n) { +- c = projs.fallthrough_catchproj; +- } else { +- assert(projs.catchall_memproj == n, ""); +- c = projs.catchall_catchproj; +- } +- } +- } +- return c; +-} +- +-Node* MemoryGraphFixer::ctrl_or_self(Node* n) const { +- if (_phase->has_ctrl(n)) +- return get_ctrl(n); +- else { +- assert (n->is_CFG(), "must be a CFG node"); +- return n; +- } +-} +- +-bool MemoryGraphFixer::mem_is_valid(Node* m, Node* c) const { +- return m != NULL && get_ctrl(m) == c; +-} +- +-Node* MemoryGraphFixer::find_mem(Node* ctrl, Node* n) const { +- assert(n == NULL || _phase->ctrl_or_self(n) == ctrl, ""); +- Node* mem = _memory_nodes[ctrl->_idx]; +- Node* c = ctrl; +- while (!mem_is_valid(mem, c) && +- (!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem))) { +- c = _phase->idom(c); +- mem = _memory_nodes[c->_idx]; +- } +- if (n != NULL && mem_is_valid(mem, c)) { +- while (!ShenandoahBarrierC2Support::is_dominator_same_ctrl(c, mem, n, _phase) && _phase->ctrl_or_self(mem) == ctrl) { +- mem = next_mem(mem, _alias); +- } +- if (mem->is_MergeMem()) { +- mem = mem->as_MergeMem()->memory_at(_alias); +- } +- if (!mem_is_valid(mem, c)) { +- do { +- c = _phase->idom(c); +- mem = _memory_nodes[c->_idx]; +- } while (!mem_is_valid(mem, c) && +- (!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem))); +- } +- } +- assert(mem->bottom_type() == Type::MEMORY, ""); +- return mem; +-} +- +-bool MemoryGraphFixer::has_mem_phi(Node* region) const { +- for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { +- Node* use = region->fast_out(i); +- if (use->is_Phi() && use->bottom_type() == Type::MEMORY && +- (_phase->C->get_alias_index(use->adr_type()) == _alias)) { +- return true; +- } +- } +- return false; +-} +- +-void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_for_ctrl, Node* new_mem, Unique_Node_List& uses) { +- assert(_phase->ctrl_or_self(new_mem) == new_ctrl, ""); +- const bool trace = false; +- DEBUG_ONLY(if (trace) { tty->print("ZZZ control is"); ctrl->dump(); }); +- DEBUG_ONLY(if (trace) { tty->print("ZZZ mem is"); mem->dump(); }); +- GrowableArray phis; +- if (mem_for_ctrl != mem) { +- Node* old = mem_for_ctrl; +- Node* prev = NULL; +- while (old != mem) { +- prev = old; +- if (old->is_Store() || old->is_ClearArray() || old->is_LoadStore()) { +- assert(_alias == Compile::AliasIdxRaw, ""); +- old = old->in(MemNode::Memory); +- } else if (old->Opcode() == Op_SCMemProj) { +- assert(_alias == Compile::AliasIdxRaw, ""); +- old = old->in(0); +- } else { +- ShouldNotReachHere(); +- } +- } +- assert(prev != NULL, ""); +- if (new_ctrl != ctrl) { +- _memory_nodes.map(ctrl->_idx, mem); +- _memory_nodes.map(new_ctrl->_idx, mem_for_ctrl); +- } +- uint input = (uint)MemNode::Memory; +- _phase->igvn().replace_input_of(prev, input, new_mem); +- } else { +- uses.clear(); +- _memory_nodes.map(new_ctrl->_idx, new_mem); +- uses.push(new_ctrl); +- for(uint next = 0; next < uses.size(); next++ ) { +- Node *n = uses.at(next); +- assert(n->is_CFG(), ""); +- DEBUG_ONLY(if (trace) { tty->print("ZZZ ctrl"); n->dump(); }); +- for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { +- Node* u = n->fast_out(i); +- if (!u->is_Root() && u->is_CFG() && u != n) { +- Node* m = _memory_nodes[u->_idx]; +- if (u->is_Region() && +- !has_mem_phi(u) && +- u->unique_ctrl_out()->Opcode() != Op_Halt) { +- DEBUG_ONLY(if (trace) { tty->print("ZZZ region"); u->dump(); }); +- DEBUG_ONLY(if (trace && m != NULL) { tty->print("ZZZ mem"); m->dump(); }); +- +- if (!mem_is_valid(m, u) || !m->is_Phi()) { +- bool push = true; +- bool create_phi = true; +- if (_phase->is_dominator(new_ctrl, u)) { +- create_phi = false; +- } +- if (create_phi) { +- Node* phi = new (_phase->C) PhiNode(u, Type::MEMORY, _phase->C->get_adr_type(_alias)); +- _phase->register_new_node(phi, u); +- phis.push(phi); +- DEBUG_ONLY(if (trace) { tty->print("ZZZ new phi"); phi->dump(); }); +- if (!mem_is_valid(m, u)) { +- DEBUG_ONLY(if (trace) { tty->print("ZZZ setting mem"); phi->dump(); }); +- _memory_nodes.map(u->_idx, phi); +- } else { +- DEBUG_ONLY(if (trace) { tty->print("ZZZ NOT setting mem"); m->dump(); }); +- for (;;) { +- assert(m->is_Mem() || m->is_LoadStore() || m->is_Proj(), ""); +- Node* next = NULL; +- if (m->is_Proj()) { +- next = m->in(0); +- } else { +- assert(m->is_Mem() || m->is_LoadStore(), ""); +- assert(_alias == Compile::AliasIdxRaw, ""); +- next = m->in(MemNode::Memory); +- } +- if (_phase->get_ctrl(next) != u) { +- break; +- } +- if (next->is_MergeMem()) { +- assert(_phase->get_ctrl(next->as_MergeMem()->memory_at(_alias)) != u, ""); +- break; +- } +- if (next->is_Phi()) { +- assert(next->adr_type() == TypePtr::BOTTOM && next->in(0) == u, ""); +- break; +- } +- m = next; +- } +- +- DEBUG_ONLY(if (trace) { tty->print("ZZZ setting to phi"); m->dump(); }); +- assert(m->is_Mem() || m->is_LoadStore(), ""); +- uint input = (uint)MemNode::Memory; +- _phase->igvn().replace_input_of(m, input, phi); +- push = false; +- } +- } else { +- DEBUG_ONLY(if (trace) { tty->print("ZZZ skipping region"); u->dump(); }); +- } +- if (push) { +- uses.push(u); +- } +- } +- } else if (!mem_is_valid(m, u) && +- !(u->Opcode() == Op_CProj && u->in(0)->Opcode() == Op_NeverBranch && u->as_Proj()->_con == 1)) { +- uses.push(u); +- } +- } +- } +- } +- for (int i = 0; i < phis.length(); i++) { +- Node* n = phis.at(i); +- Node* r = n->in(0); +- DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi"); n->dump(); }); +- for (uint j = 1; j < n->req(); j++) { +- Node* m = find_mem(r->in(j), NULL); +- _phase->igvn().replace_input_of(n, j, m); +- DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi: %d", j); m->dump(); }); +- } +- } +- } +- uint last = _phase->C->unique(); +- MergeMemNode* mm = NULL; +- int alias = _alias; +- DEBUG_ONLY(if (trace) { tty->print("ZZZ raw mem is"); mem->dump(); }); +- // Process loads first to not miss an anti-dependency: if the memory +- // edge of a store is updated before a load is processed then an +- // anti-dependency may be missed. +- for (DUIterator i = mem->outs(); mem->has_out(i); i++) { +- Node* u = mem->out(i); +- if (u->_idx < last && u->is_Load() && _phase->C->get_alias_index(u->adr_type()) == alias) { +- Node* m = find_mem(_phase->get_ctrl(u), u); +- if (m != mem) { +- DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); }); +- _phase->igvn().replace_input_of(u, MemNode::Memory, m); +- --i; +- } +- } +- } +- for (DUIterator i = mem->outs(); mem->has_out(i); i++) { +- Node* u = mem->out(i); +- if (u->_idx < last) { +- if (u->is_Mem()) { +- if (_phase->C->get_alias_index(u->adr_type()) == alias) { +- Node* m = find_mem(_phase->get_ctrl(u), u); +- if (m != mem) { +- DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); }); +- _phase->igvn().replace_input_of(u, MemNode::Memory, m); +- --i; +- } +- } +- } else if (u->is_MergeMem()) { +- MergeMemNode* u_mm = u->as_MergeMem(); +- if (u_mm->memory_at(alias) == mem) { +- MergeMemNode* newmm = NULL; +- for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) { +- Node* uu = u->fast_out(j); +- assert(!uu->is_MergeMem(), "chain of MergeMems?"); +- if (uu->is_Phi()) { +- assert(uu->adr_type() == TypePtr::BOTTOM, ""); +- Node* region = uu->in(0); +- int nb = 0; +- for (uint k = 1; k < uu->req(); k++) { +- if (uu->in(k) == u) { +- Node* m = find_mem(region->in(k), NULL); +- if (m != mem) { +- DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", k); uu->dump(); }); +- newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i); +- if (newmm != u) { +- _phase->igvn().replace_input_of(uu, k, newmm); +- nb++; +- --jmax; +- } +- } +- } +- } +- if (nb > 0) { +- --j; +- } +- } else { +- Node* m = find_mem(_phase->ctrl_or_self(uu), uu); +- if (m != mem) { +- DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); uu->dump(); }); +- newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i); +- if (newmm != u) { +- _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm); +- --j, --jmax; +- } +- } +- } +- } +- } +- } else if (u->is_Phi()) { +- assert(u->bottom_type() == Type::MEMORY, "what else?"); +- if (_phase->C->get_alias_index(u->adr_type()) == alias || u->adr_type() == TypePtr::BOTTOM) { +- Node* region = u->in(0); +- bool replaced = false; +- for (uint j = 1; j < u->req(); j++) { +- if (u->in(j) == mem) { +- Node* m = find_mem(region->in(j), NULL); +- Node* nnew = m; +- if (m != mem) { +- if (u->adr_type() == TypePtr::BOTTOM) { +- mm = allocate_merge_mem(mem, m, _phase->ctrl_or_self(m)); +- nnew = mm; +- } +- DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", j); u->dump(); }); +- _phase->igvn().replace_input_of(u, j, nnew); +- replaced = true; +- } +- } +- } +- if (replaced) { +- --i; +- } +- } +- } else if ((u->adr_type() == TypePtr::BOTTOM) || +- u->adr_type() == NULL) { +- assert(u->adr_type() != NULL || +- u->Opcode() == Op_Rethrow || +- u->Opcode() == Op_Return || +- u->Opcode() == Op_SafePoint || +- (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) || +- (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) || +- u->Opcode() == Op_CallLeaf, ""); +- Node* m = find_mem(_phase->ctrl_or_self(u), u); +- if (m != mem) { +- mm = allocate_merge_mem(mem, m, _phase->get_ctrl(m)); +- _phase->igvn().replace_input_of(u, u->find_edge(mem), mm); +- --i; +- } +- } else if (_phase->C->get_alias_index(u->adr_type()) == alias) { +- Node* m = find_mem(_phase->ctrl_or_self(u), u); +- if (m != mem) { +- DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); }); +- _phase->igvn().replace_input_of(u, u->find_edge(mem), m); +- --i; +- } +- } else if (u->adr_type() != TypePtr::BOTTOM && +- _memory_nodes[_phase->ctrl_or_self(u)->_idx] == u) { +- Node* m = find_mem(_phase->ctrl_or_self(u), u); +- assert(m != mem, ""); +- // u is on the wrong slice... +- assert(u->is_ClearArray(), ""); +- DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); }); +- _phase->igvn().replace_input_of(u, u->find_edge(mem), m); +- --i; +- } +- } +- } +-#ifdef ASSERT +- assert(new_mem->outcnt() > 0, ""); +- for (int i = 0; i < phis.length(); i++) { +- Node* n = phis.at(i); +- assert(n->outcnt() > 0, "new phi must have uses now"); +- } +-#endif +-} +- +-MergeMemNode* MemoryGraphFixer::allocate_merge_mem(Node* mem, Node* rep_proj, Node* rep_ctrl) const { +- MergeMemNode* mm = MergeMemNode::make(_phase->C, mem); +- mm->set_memory_at(_alias, rep_proj); +- _phase->register_new_node(mm, rep_ctrl); +- return mm; +-} +- +-MergeMemNode* MemoryGraphFixer::clone_merge_mem(Node* u, Node* mem, Node* rep_proj, Node* rep_ctrl, DUIterator& i) const { +- MergeMemNode* newmm = NULL; +- MergeMemNode* u_mm = u->as_MergeMem(); +- Node* c = _phase->get_ctrl(u); +- if (_phase->is_dominator(c, rep_ctrl)) { +- c = rep_ctrl; +- } else { +- assert(_phase->is_dominator(rep_ctrl, c), "one must dominate the other"); +- } +- if (u->outcnt() == 1) { +- if (u->req() > (uint)_alias && u->in(_alias) == mem) { +- _phase->igvn().replace_input_of(u, _alias, rep_proj); +- --i; +- } else { +- _phase->igvn().rehash_node_delayed(u); +- u_mm->set_memory_at(_alias, rep_proj); +- } +- newmm = u_mm; +- _phase->set_ctrl_and_loop(u, c); +- } else { +- // can't simply clone u and then change one of its input because +- // it adds and then removes an edge which messes with the +- // DUIterator +- newmm = MergeMemNode::make(_phase->C, u_mm->base_memory()); +- for (uint j = 0; j < u->req(); j++) { +- if (j < newmm->req()) { +- if (j == (uint)_alias) { +- newmm->set_req(j, rep_proj); +- } else if (newmm->in(j) != u->in(j)) { +- newmm->set_req(j, u->in(j)); +- } +- } else if (j == (uint)_alias) { +- newmm->add_req(rep_proj); +- } else { +- newmm->add_req(u->in(j)); +- } +- } +- if ((uint)_alias >= u->req()) { +- newmm->set_memory_at(_alias, rep_proj); +- } +- _phase->register_new_node(newmm, c); +- } +- return newmm; +-} +- +-bool MemoryGraphFixer::should_process_phi(Node* phi) const { +- if (phi->adr_type() == TypePtr::BOTTOM) { +- Node* region = phi->in(0); +- for (DUIterator_Fast jmax, j = region->fast_outs(jmax); j < jmax; j++) { +- Node* uu = region->fast_out(j); +- if (uu->is_Phi() && uu != phi && uu->bottom_type() == Type::MEMORY && _phase->C->get_alias_index(uu->adr_type()) == _alias) { +- return false; +- } +- } +- return true; +- } +- return _phase->C->get_alias_index(phi->adr_type()) == _alias; +-} +- +-void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_proj, Node* rep_ctrl) const { +- uint last = _phase-> C->unique(); +- MergeMemNode* mm = NULL; +- assert(mem->bottom_type() == Type::MEMORY, ""); +- for (DUIterator i = mem->outs(); mem->has_out(i); i++) { +- Node* u = mem->out(i); +- if (u != replacement && u->_idx < last) { +- if (u->is_MergeMem()) { +- MergeMemNode* u_mm = u->as_MergeMem(); +- if (u_mm->memory_at(_alias) == mem) { +- MergeMemNode* newmm = NULL; +- for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) { +- Node* uu = u->fast_out(j); +- assert(!uu->is_MergeMem(), "chain of MergeMems?"); +- if (uu->is_Phi()) { +- if (should_process_phi(uu)) { +- Node* region = uu->in(0); +- int nb = 0; +- for (uint k = 1; k < uu->req(); k++) { +- if (uu->in(k) == u && _phase->is_dominator(rep_ctrl, region->in(k))) { +- if (newmm == NULL) { +- newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i); +- } +- if (newmm != u) { +- _phase->igvn().replace_input_of(uu, k, newmm); +- nb++; +- --jmax; +- } +- } +- } +- if (nb > 0) { +- --j; +- } +- } +- } else { +- if (rep_ctrl != uu && ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(uu), replacement, uu, _phase)) { +- if (newmm == NULL) { +- newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i); +- } +- if (newmm != u) { +- _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm); +- --j, --jmax; +- } +- } +- } +- } +- } +- } else if (u->is_Phi()) { +- assert(u->bottom_type() == Type::MEMORY, "what else?"); +- Node* region = u->in(0); +- if (should_process_phi(u)) { +- bool replaced = false; +- for (uint j = 1; j < u->req(); j++) { +- if (u->in(j) == mem && _phase->is_dominator(rep_ctrl, region->in(j))) { +- Node* nnew = rep_proj; +- if (u->adr_type() == TypePtr::BOTTOM) { +- if (mm == NULL) { +- mm = allocate_merge_mem(mem, rep_proj, rep_ctrl); +- } +- nnew = mm; +- } +- _phase->igvn().replace_input_of(u, j, nnew); +- replaced = true; +- } +- } +- if (replaced) { +- --i; +- } +- +- } +- } else if ((u->adr_type() == TypePtr::BOTTOM) || +- u->adr_type() == NULL) { +- assert(u->adr_type() != NULL || +- u->Opcode() == Op_Rethrow || +- u->Opcode() == Op_Return || +- u->Opcode() == Op_SafePoint || +- u->Opcode() == Op_StoreIConditional || +- u->Opcode() == Op_StoreLConditional || +- (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) || +- (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) || +- u->Opcode() == Op_CallLeaf, err_msg("%s", u->Name())); +- if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) { +- if (mm == NULL) { +- mm = allocate_merge_mem(mem, rep_proj, rep_ctrl); +- } +- _phase->igvn().replace_input_of(u, u->find_edge(mem), mm); +- --i; +- } +- } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) { +- if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) { +- _phase->igvn().replace_input_of(u, u->find_edge(mem), rep_proj); +- --i; +- } +- } +- } +- } +-} +- +-ShenandoahLoadReferenceBarrierNode::ShenandoahLoadReferenceBarrierNode(Node* ctrl, Node* obj) +-: Node(ctrl, obj) { +- Compile::current()->add_shenandoah_barrier(this); +-} +- +-const Type* ShenandoahLoadReferenceBarrierNode::bottom_type() const { +- if (in(ValueIn) == NULL || in(ValueIn)->is_top()) { +- return Type::TOP; +- } +- const Type* t = in(ValueIn)->bottom_type(); +- if (t == TypePtr::NULL_PTR) { +- return t; +- } +- return t->is_oopptr(); +-} +- +-const Type* ShenandoahLoadReferenceBarrierNode::Value(PhaseTransform *phase) const { +- // Either input is TOP ==> the result is TOP +- const Type *t2 = phase->type(in(ValueIn)); +- if( t2 == Type::TOP ) return Type::TOP; +- +- if (t2 == TypePtr::NULL_PTR) { +- return t2; +- } +- +- const Type* type = t2->is_oopptr(); +- return type; +-} +- +-Node* ShenandoahLoadReferenceBarrierNode::Identity(PhaseTransform *phase) { +- Node* value = in(ValueIn); +- if (!needs_barrier(phase, value)) { +- return value; +- } +- return this; +-} +- +-bool ShenandoahLoadReferenceBarrierNode::needs_barrier(PhaseTransform* phase, Node* n) { +- Unique_Node_List visited; +- return needs_barrier_impl(phase, n, visited); +-} +- +-bool ShenandoahLoadReferenceBarrierNode::needs_barrier_impl(PhaseTransform* phase, Node* n, Unique_Node_List &visited) { +- if (n == NULL) return false; +- if (visited.member(n)) { +- return false; // Been there. +- } +- visited.push(n); +- +- if (n->is_Allocate()) { +- // tty->print_cr("optimize barrier on alloc"); +- return false; +- } +- if (n->is_Call()) { +- // tty->print_cr("optimize barrier on call"); +- return false; +- } +- +- const Type* type = phase->type(n); +- if (type == Type::TOP) { +- return false; +- } +- if (type->make_ptr()->higher_equal(TypePtr::NULL_PTR)) { +- // tty->print_cr("optimize barrier on null"); +- return false; +- } +- // Impl detail: Need to check isa_(narrow)oop before calling to make_oopptr on potentially non-oop types +- // in 8u, otherwise make_oopptr would assert. make_oopptr is fixed later during JDK-8078629. +- if ((type->isa_oopptr() || type->isa_narrowoop()) && type->make_oopptr()->const_oop() != NULL) { +- // tty->print_cr("optimize barrier on constant"); +- return false; +- } +- +- switch (n->Opcode()) { +- case Op_AddP: +- return true; // TODO: Can refine? +- case Op_LoadP: +- case Op_GetAndSetN: +- case Op_GetAndSetP: +- return true; +- case Op_Phi: { +- for (uint i = 1; i < n->req(); i++) { +- if (needs_barrier_impl(phase, n->in(i), visited)) return true; +- } +- return false; +- } +- case Op_CheckCastPP: +- case Op_CastPP: +- return needs_barrier_impl(phase, n->in(1), visited); +- case Op_Proj: +- return needs_barrier_impl(phase, n->in(0), visited); +- case Op_ShenandoahLoadReferenceBarrier: +- // tty->print_cr("optimize barrier on barrier"); +- return false; +- case Op_Parm: +- // tty->print_cr("optimize barrier on input arg"); +- return false; +- case Op_DecodeN: +- case Op_EncodeP: +- return needs_barrier_impl(phase, n->in(1), visited); +- case Op_LoadN: +- return true; +- case Op_CMoveN: +- case Op_CMoveP: +- return needs_barrier_impl(phase, n->in(2), visited) || +- needs_barrier_impl(phase, n->in(3), visited); +- case Op_CreateEx: +- return false; +- default: +- break; +- } +-#ifdef ASSERT +- tty->print("need barrier on?: "); +- tty->print_cr("ins:"); +- n->dump(2); +- tty->print_cr("outs:"); +- n->dump(-2); +- ShouldNotReachHere(); +-#endif +- return true; +-} +- +-bool ShenandoahLoadReferenceBarrierNode::is_redundant() { +- Unique_Node_List visited; +- Node_Stack stack(0); +- stack.push(this, 0); +- +- // Check if the barrier is actually useful: go over nodes looking for useful uses +- // (e.g. memory accesses). Stop once we detected a required use. Otherwise, walk +- // until we ran out of nodes, and then declare the barrier redundant. +- while (stack.size() > 0) { +- Node* n = stack.node(); +- if (visited.member(n)) { +- stack.pop(); +- continue; +- } +- visited.push(n); +- bool visit_users = false; +- switch (n->Opcode()) { +- case Op_CallStaticJava: +- case Op_CallDynamicJava: +- case Op_CallLeaf: +- case Op_CallLeafNoFP: +- case Op_CompareAndSwapL: +- case Op_CompareAndSwapI: +- case Op_CompareAndSwapN: +- case Op_CompareAndSwapP: +- case Op_ShenandoahCompareAndSwapN: +- case Op_ShenandoahCompareAndSwapP: +- case Op_GetAndSetL: +- case Op_GetAndSetI: +- case Op_GetAndSetP: +- case Op_GetAndSetN: +- case Op_GetAndAddL: +- case Op_GetAndAddI: +- case Op_FastLock: +- case Op_FastUnlock: +- case Op_Rethrow: +- case Op_Return: +- case Op_StoreB: +- case Op_StoreC: +- case Op_StoreD: +- case Op_StoreF: +- case Op_StoreL: +- case Op_StoreLConditional: +- case Op_StoreI: +- case Op_StoreIConditional: +- case Op_StoreN: +- case Op_StoreP: +- case Op_StoreVector: +- case Op_EncodeP: +- case Op_CastP2X: +- case Op_SafePoint: +- case Op_EncodeISOArray: +- case Op_AryEq: +- case Op_StrEquals: +- case Op_StrComp: +- case Op_StrIndexOf: +- // Known to require barriers +- return false; +- case Op_CmpP: { +- if (n->in(1)->bottom_type()->higher_equal(TypePtr::NULL_PTR) || +- n->in(2)->bottom_type()->higher_equal(TypePtr::NULL_PTR)) { +- // One of the sides is known null, no need for barrier. +- } else { +- return false; +- } +- break; +- } +- case Op_LoadB: +- case Op_LoadUB: +- case Op_LoadUS: +- case Op_LoadD: +- case Op_LoadF: +- case Op_LoadL: +- case Op_LoadI: +- case Op_LoadS: +- case Op_LoadN: +- case Op_LoadP: +- case Op_LoadVector: { +- const TypePtr* adr_type = n->adr_type(); +- int alias_idx = Compile::current()->get_alias_index(adr_type); +- Compile::AliasType* alias_type = Compile::current()->alias_type(alias_idx); +- ciField* field = alias_type->field(); +- bool is_static = field != NULL && field->is_static(); +- bool is_final = field != NULL && field->is_final(); +- +- if (ShenandoahOptimizeStaticFinals && is_static && is_final) { +- // Loading the constant does not require barriers: it should be handled +- // as part of GC roots already. +- } else { +- return false; +- } +- break; +- } +- case Op_Conv2B: +- case Op_LoadRange: +- case Op_LoadKlass: +- case Op_LoadNKlass: +- // Do not require barriers +- break; +- case Op_AddP: +- case Op_CheckCastPP: +- case Op_CastPP: +- case Op_CMoveP: +- case Op_Phi: +- case Op_ShenandoahLoadReferenceBarrier: +- // Whether or not these need the barriers depends on their users +- visit_users = true; +- break; +- default: { +-#ifdef ASSERT +- fatal(err_msg("Unknown node in is_redundant: %s", NodeClassNames[n->Opcode()])); +-#else +- // Default to have excess barriers, rather than miss some. +- return false; +-#endif +- } +- } +- +- stack.pop(); +- if (visit_users) { +- for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { +- Node* user = n->fast_out(i); +- if (user != NULL) { +- stack.push(user, 0); +- } +- } +- } +- } +- +- // No need for barrier found. +- return true; +-} +- +-CallStaticJavaNode* ShenandoahLoadReferenceBarrierNode::pin_and_expand_null_check(PhaseIterGVN& igvn) { +- Node* val = in(ValueIn); +- +- const Type* val_t = igvn.type(val); +- +- if (val_t->meet(TypePtr::NULL_PTR) != val_t && +- val->Opcode() == Op_CastPP && +- val->in(0) != NULL && +- val->in(0)->Opcode() == Op_IfTrue && +- val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) && +- val->in(0)->in(0)->is_If() && +- val->in(0)->in(0)->in(1)->Opcode() == Op_Bool && +- val->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne && +- val->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP && +- val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1) && +- val->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) { +- assert(val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1), ""); +- CallStaticJavaNode* unc = val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none); +- return unc; +- } +- return NULL; +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/c2/shenandoahSupport.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/c2/shenandoahSupport.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/c2/shenandoahSupport.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/c2/shenandoahSupport.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,186 +0,0 @@ +-/* +- * Copyright (c) 2015, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_GC_SHENANDOAH_C2_SHENANDOAHSUPPORT_HPP +-#define SHARE_GC_SHENANDOAH_C2_SHENANDOAHSUPPORT_HPP +- +-#include "gc_implementation/shenandoah/shenandoahForwarding.hpp" +-#include "memory/allocation.hpp" +-#include "opto/addnode.hpp" +-#include "opto/graphKit.hpp" +-#include "opto/machnode.hpp" +-#include "opto/memnode.hpp" +-#include "opto/multnode.hpp" +-#include "opto/node.hpp" +- +-class IdealLoopTree; +-class PhaseGVN; +-class MemoryGraphFixer; +- +-class ShenandoahBarrierC2Support : public AllStatic { +-private: +-#ifdef ASSERT +- enum verify_type { +- ShenandoahLoad, +- ShenandoahStore, +- ShenandoahValue, +- ShenandoahOopStore, +- ShenandoahNone +- }; +- +- static bool verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used); +- static void report_verify_failure(const char* msg, Node* n1 = NULL, Node* n2 = NULL); +-public: +- static void verify_raw_mem(RootNode* root); +-private: +-#endif +- static Node* dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase); +- static Node* no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase); +- static bool is_gc_state_test(Node* iff, int mask); +- static bool has_safepoint_between(Node* start, Node* stop, PhaseIdealLoop *phase); +- static Node* find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase); +- static void follow_barrier_uses(Node* n, Node* ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase); +- static void test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase); +- static void test_gc_state(Node*& ctrl, Node* raw_mem, Node*& heap_stable_ctrl, +- PhaseIdealLoop* phase, int flags); +- static void call_lrb_stub(Node*& ctrl, Node*& val, Node* load_addr, Node*& result_mem, Node* raw_mem, bool is_native, PhaseIdealLoop* phase); +- static Node* clone_null_check(Node*& c, Node* val, Node* unc_ctrl, PhaseIdealLoop* phase); +- static void fix_null_check(Node* unc, Node* unc_ctrl, Node* new_unc_ctrl, Unique_Node_List& uses, +- PhaseIdealLoop* phase); +- static void test_in_cset(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase); +- static void move_gc_state_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase); +- static void merge_back_to_back_tests(Node* n, PhaseIdealLoop* phase); +- static bool identical_backtoback_ifs(Node *n, PhaseIdealLoop* phase); +- static void fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase); +- static IfNode* find_unswitching_candidate(const IdealLoopTree *loop, PhaseIdealLoop* phase); +- +- static Node* get_load_addr(PhaseIdealLoop* phase, VectorSet& visited, Node* lrb); +-public: +- static bool is_dominator(Node* d_c, Node* n_c, Node* d, Node* n, PhaseIdealLoop* phase); +- static bool is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase); +- +- static bool is_gc_state_load(Node* n); +- static bool is_heap_stable_test(Node* iff); +- +- static bool expand(Compile* C, PhaseIterGVN& igvn); +- static void pin_and_expand(PhaseIdealLoop* phase); +- static void optimize_after_expansion(VectorSet& visited, Node_Stack& nstack, Node_List& old_new, PhaseIdealLoop* phase); +- +-#ifdef ASSERT +- static void verify(RootNode* root); +-#endif +-}; +- +-class MemoryGraphFixer : public ResourceObj { +-private: +- Node_List _memory_nodes; +- int _alias; +- PhaseIdealLoop* _phase; +- bool _include_lsm; +- +- void collect_memory_nodes(); +- Node* get_ctrl(Node* n) const; +- Node* ctrl_or_self(Node* n) const; +- bool mem_is_valid(Node* m, Node* c) const; +- MergeMemNode* allocate_merge_mem(Node* mem, Node* rep_proj, Node* rep_ctrl) const; +- MergeMemNode* clone_merge_mem(Node* u, Node* mem, Node* rep_proj, Node* rep_ctrl, DUIterator& i) const; +- void fix_memory_uses(Node* mem, Node* replacement, Node* rep_proj, Node* rep_ctrl) const; +- bool should_process_phi(Node* phi) const; +- bool has_mem_phi(Node* region) const; +- +-public: +- MemoryGraphFixer(int alias, bool include_lsm, PhaseIdealLoop* phase) : +- _alias(alias), _phase(phase), _include_lsm(include_lsm) { +- assert(_alias != Compile::AliasIdxBot, "unsupported"); +- collect_memory_nodes(); +- } +- +- Node* find_mem(Node* ctrl, Node* n) const; +- void fix_mem(Node* ctrl, Node* region, Node* mem, Node* mem_for_ctrl, Node* mem_phi, Unique_Node_List& uses); +- int alias() const { return _alias; } +-}; +- +-class ShenandoahCompareAndSwapPNode : public CompareAndSwapPNode { +-public: +- ShenandoahCompareAndSwapPNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex) +- : CompareAndSwapPNode(c, mem, adr, val, ex) { } +- +- virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) { +- if (in(ExpectedIn) != NULL && phase->type(in(ExpectedIn)) == TypePtr::NULL_PTR) { +- return new (phase->C) CompareAndSwapPNode(in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address), in(MemNode::ValueIn), in(ExpectedIn)); +- } +- return NULL; +- } +- +- virtual int Opcode() const; +-}; +- +-class ShenandoahCompareAndSwapNNode : public CompareAndSwapNNode { +-public: +- ShenandoahCompareAndSwapNNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex) +- : CompareAndSwapNNode(c, mem, adr, val, ex) { } +- +- virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) { +- if (in(ExpectedIn) != NULL && phase->type(in(ExpectedIn)) == TypeNarrowOop::NULL_PTR) { +- return new (phase->C) CompareAndSwapNNode(in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address), in(MemNode::ValueIn), in(ExpectedIn)); +- } +- return NULL; +- } +- +- virtual int Opcode() const; +-}; +- +-class ShenandoahLoadReferenceBarrierNode : public Node { +-public: +- enum { +- Control, +- ValueIn +- }; +- +- ShenandoahLoadReferenceBarrierNode(Node* ctrl, Node* val); +- +- virtual int Opcode() const; +- virtual const Type* bottom_type() const; +- virtual const Type* Value(PhaseTransform *phase) const; +- virtual const class TypePtr *adr_type() const { return TypeOopPtr::BOTTOM; } +- virtual uint match_edge(uint idx) const { +- return idx >= ValueIn; +- } +- virtual uint ideal_reg() const { return Op_RegP; } +- +- virtual Node* Identity(PhaseTransform *phase); +- +- uint size_of() const { +- return sizeof(*this); +- } +- +- bool is_redundant(); +- CallStaticJavaNode* pin_and_expand_null_check(PhaseIterGVN& igvn); +- +-private: +- bool needs_barrier(PhaseTransform* phase, Node* n); +- bool needs_barrier_impl(PhaseTransform* phase, Node* n, Unique_Node_List &visited); +-}; +- +- +-#endif // SHARE_GC_SHENANDOAH_C2_SHENANDOAHSUPPORT_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,166 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +- +-#include "gc_implementation/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp" +-#include "gc_implementation/shenandoah/shenandoahCollectionSet.hpp" +-#include "gc_implementation/shenandoah/shenandoahFreeSet.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegion.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahLogging.hpp" +-#include "utilities/quickSort.hpp" +- +-ShenandoahAdaptiveHeuristics::ShenandoahAdaptiveHeuristics() : +- ShenandoahHeuristics() {} +- +-ShenandoahAdaptiveHeuristics::~ShenandoahAdaptiveHeuristics() {} +- +-void ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset, +- RegionData* data, size_t size, +- size_t actual_free) { +- size_t garbage_threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100; +- +- // The logic for cset selection in adaptive is as follows: +- // +- // 1. We cannot get cset larger than available free space. Otherwise we guarantee OOME +- // during evacuation, and thus guarantee full GC. In practice, we also want to let +- // application to allocate something. This is why we limit CSet to some fraction of +- // available space. In non-overloaded heap, max_cset would contain all plausible candidates +- // over garbage threshold. +- // +- // 2. We should not get cset too low so that free threshold would not be met right +- // after the cycle. Otherwise we get back-to-back cycles for no reason if heap is +- // too fragmented. In non-overloaded non-fragmented heap min_garbage would be around zero. +- // +- // Therefore, we start by sorting the regions by garbage. Then we unconditionally add the best candidates +- // before we meet min_garbage. Then we add all candidates that fit with a garbage threshold before +- // we hit max_cset. When max_cset is hit, we terminate the cset selection. Note that in this scheme, +- // ShenandoahGarbageThreshold is the soft threshold which would be ignored until min_garbage is hit. +- +- size_t capacity = ShenandoahHeap::heap()->soft_max_capacity(); +- size_t max_cset = (size_t)((1.0 * capacity / 100 * ShenandoahEvacReserve) / ShenandoahEvacWaste); +- size_t free_target = (capacity / 100 * ShenandoahMinFreeThreshold) + max_cset; +- size_t min_garbage = (free_target > actual_free ? (free_target - actual_free) : 0); +- +- log_info(gc, ergo)("Adaptive CSet Selection. Target Free: " SIZE_FORMAT "%s, Actual Free: " +- SIZE_FORMAT "%s, Max CSet: " SIZE_FORMAT "%s, Min Garbage: " SIZE_FORMAT "%s", +- byte_size_in_proper_unit(free_target), proper_unit_for_byte_size(free_target), +- byte_size_in_proper_unit(actual_free), proper_unit_for_byte_size(actual_free), +- byte_size_in_proper_unit(max_cset), proper_unit_for_byte_size(max_cset), +- byte_size_in_proper_unit(min_garbage), proper_unit_for_byte_size(min_garbage)); +- +- // Better select garbage-first regions +- QuickSort::sort(data, (int)size, compare_by_garbage, false); +- +- size_t cur_cset = 0; +- size_t cur_garbage = 0; +- +- for (size_t idx = 0; idx < size; idx++) { +- ShenandoahHeapRegion* r = data[idx]._region; +- +- size_t new_cset = cur_cset + r->get_live_data_bytes(); +- size_t new_garbage = cur_garbage + r->garbage(); +- +- if (new_cset > max_cset) { +- break; +- } +- +- if ((new_garbage < min_garbage) || (r->garbage() > garbage_threshold)) { +- cset->add_region(r); +- cur_cset = new_cset; +- cur_garbage = new_garbage; +- } +- } +-} +- +-void ShenandoahAdaptiveHeuristics::record_cycle_start() { +- ShenandoahHeuristics::record_cycle_start(); +-} +- +-bool ShenandoahAdaptiveHeuristics::should_start_gc() const { +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- size_t max_capacity = heap->max_capacity(); +- size_t capacity = heap->soft_max_capacity(); +- size_t available = heap->free_set()->available(); +- +- // Make sure the code below treats available without the soft tail. +- size_t soft_tail = max_capacity - capacity; +- available = (available > soft_tail) ? (available - soft_tail) : 0; +- +- // Check if we are falling below the worst limit, time to trigger the GC, regardless of +- // anything else. +- size_t min_threshold = capacity / 100 * ShenandoahMinFreeThreshold; +- if (available < min_threshold) { +- log_info(gc)("Trigger: Free (" SIZE_FORMAT "%s) is below minimum threshold (" SIZE_FORMAT "%s)", +- byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), +- byte_size_in_proper_unit(min_threshold), proper_unit_for_byte_size(min_threshold)); +- return true; +- } +- +- // Check if are need to learn a bit about the application +- const size_t max_learn = ShenandoahLearningSteps; +- if (_gc_times_learned < max_learn) { +- size_t init_threshold = capacity / 100 * ShenandoahInitFreeThreshold; +- if (available < init_threshold) { +- log_info(gc)("Trigger: Learning " SIZE_FORMAT " of " SIZE_FORMAT ". Free (" SIZE_FORMAT "%s) is below initial threshold (" SIZE_FORMAT "%s)", +- _gc_times_learned + 1, max_learn, +- byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), +- byte_size_in_proper_unit(init_threshold), proper_unit_for_byte_size(init_threshold)); +- return true; +- } +- } +- +- // Check if allocation headroom is still okay. This also factors in: +- // 1. Some space to absorb allocation spikes +- // 2. Accumulated penalties from Degenerated and Full GC +- +- size_t allocation_headroom = available; +- +- size_t spike_headroom = capacity / 100 * ShenandoahAllocSpikeFactor; +- size_t penalties = capacity / 100 * _gc_time_penalties; +- +- allocation_headroom -= MIN2(allocation_headroom, spike_headroom); +- allocation_headroom -= MIN2(allocation_headroom, penalties); +- +- // TODO: Allocation rate is way too averaged to be useful during state changes +- +- double average_gc = _gc_time_history->avg(); +- double time_since_last = time_since_last_gc(); +- double allocation_rate = heap->bytes_allocated_since_gc_start() / time_since_last; +- +- if (average_gc > allocation_headroom / allocation_rate) { +- log_info(gc)("Trigger: Average GC time (%.2f ms) is above the time for allocation rate (%.0f %sB/s) to deplete free headroom (" SIZE_FORMAT "%s)", +- average_gc * 1000, +- byte_size_in_proper_unit(allocation_rate), proper_unit_for_byte_size(allocation_rate), +- byte_size_in_proper_unit(allocation_headroom), proper_unit_for_byte_size(allocation_headroom)); +- log_info(gc, ergo)("Free headroom: " SIZE_FORMAT "%s (free) - " SIZE_FORMAT "%s (spike) - " SIZE_FORMAT "%s (penalties) = " SIZE_FORMAT "%s", +- byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), +- byte_size_in_proper_unit(spike_headroom), proper_unit_for_byte_size(spike_headroom), +- byte_size_in_proper_unit(penalties), proper_unit_for_byte_size(penalties), +- byte_size_in_proper_unit(allocation_headroom), proper_unit_for_byte_size(allocation_headroom)); +- return true; +- } +- +- return ShenandoahHeuristics::should_start_gc(); +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,50 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHADAPTIVEHEURISTICS_HPP +-#define SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHADAPTIVEHEURISTICS_HPP +- +-#include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp" +-#include "gc_implementation/shenandoah/heuristics/shenandoahHeuristics.hpp" +-#include "utilities/numberSeq.hpp" +- +-class ShenandoahAdaptiveHeuristics : public ShenandoahHeuristics { +-public: +- ShenandoahAdaptiveHeuristics(); +- +- virtual ~ShenandoahAdaptiveHeuristics(); +- +- virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset, +- RegionData* data, size_t size, +- size_t actual_free); +- +- void record_cycle_start(); +- +- virtual bool should_start_gc() const; +- +- virtual const char* name() { return "Adaptive"; } +- virtual bool is_diagnostic() { return false; } +- virtual bool is_experimental() { return false; } +-}; +- +-#endif // SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHADAPTIVEHEURISTICS_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahAggressiveHeuristics.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahAggressiveHeuristics.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahAggressiveHeuristics.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahAggressiveHeuristics.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,73 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +- +-#include "gc_implementation/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp" +-#include "gc_implementation/shenandoah/shenandoahCollectionSet.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegion.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahLogging.hpp" +-#include "runtime/os.hpp" +- +-ShenandoahAggressiveHeuristics::ShenandoahAggressiveHeuristics() : ShenandoahHeuristics() { +- // Do not shortcut evacuation +- SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahImmediateThreshold, 100); +- +- // Aggressive evacuates everything, so it needs as much evac space as it can get +- SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahEvacReserveOverflow); +- +- // If class unloading is globally enabled, aggressive does unloading even with +- // concurrent cycles. +- if (ClassUnloading) { +- SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahUnloadClassesFrequency, 1); +- } +-} +- +-void ShenandoahAggressiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset, +- RegionData* data, size_t size, +- size_t free) { +- for (size_t idx = 0; idx < size; idx++) { +- ShenandoahHeapRegion* r = data[idx]._region; +- if (r->garbage() > 0) { +- cset->add_region(r); +- } +- } +-} +- +-bool ShenandoahAggressiveHeuristics::should_start_gc() const { +- log_info(gc)("Trigger: Start next cycle immediately"); +- return true; +-} +- +-bool ShenandoahAggressiveHeuristics::should_process_references() { +- if (!can_process_references()) return false; +- // Randomly process refs with 50% chance. +- return (os::random() & 1) == 1; +-} +- +-bool ShenandoahAggressiveHeuristics::should_unload_classes() { +- if (!can_unload_classes_normal()) return false; +- if (has_metaspace_oom()) return true; +- // Randomly unload classes with 50% chance. +- return (os::random() & 1) == 1; +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,48 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHAGGRESSIVEHEURISTICS_HPP +-#define SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHAGGRESSIVEHEURISTICS_HPP +- +-#include "gc_implementation/shenandoah/heuristics/shenandoahHeuristics.hpp" +- +-class ShenandoahAggressiveHeuristics : public ShenandoahHeuristics { +-public: +- ShenandoahAggressiveHeuristics(); +- +- virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset, +- RegionData* data, size_t size, +- size_t free); +- +- virtual bool should_start_gc() const; +- +- virtual bool should_process_references(); +- +- virtual bool should_unload_classes(); +- +- virtual const char* name() { return "Aggressive"; } +- virtual bool is_diagnostic() { return true; } +- virtual bool is_experimental() { return false; } +-}; +- +-#endif // SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHAGGRESSIVEHEURISTICS_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahCompactHeuristics.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahCompactHeuristics.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahCompactHeuristics.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahCompactHeuristics.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,98 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +- +-#include "gc_implementation/shenandoah/shenandoahCollectionSet.hpp" +-#include "gc_implementation/shenandoah/heuristics/shenandoahCompactHeuristics.hpp" +-#include "gc_implementation/shenandoah/shenandoahFreeSet.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegion.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahLogging.hpp" +- +-ShenandoahCompactHeuristics::ShenandoahCompactHeuristics() : ShenandoahHeuristics() { +- SHENANDOAH_ERGO_ENABLE_FLAG(ExplicitGCInvokesConcurrent); +- SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahImplicitGCInvokesConcurrent); +- SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahUncommit); +- SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahAlwaysClearSoftRefs); +- SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahAllocationThreshold, 10); +- SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahImmediateThreshold, 100); +- SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahUncommitDelay, 1000); +- SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahGuaranteedGCInterval, 30000); +- SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahGarbageThreshold, 10); +-} +- +-bool ShenandoahCompactHeuristics::should_start_gc() const { +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- +- size_t max_capacity = heap->max_capacity(); +- size_t capacity = heap->soft_max_capacity(); +- size_t available = heap->free_set()->available(); +- +- // Make sure the code below treats available without the soft tail. +- size_t soft_tail = max_capacity - capacity; +- available = (available > soft_tail) ? (available - soft_tail) : 0; +- +- size_t threshold_bytes_allocated = capacity / 100 * ShenandoahAllocationThreshold; +- size_t min_threshold = capacity / 100 * ShenandoahMinFreeThreshold; +- +- if (available < min_threshold) { +- log_info(gc)("Trigger: Free (" SIZE_FORMAT "%s) is below minimum threshold (" SIZE_FORMAT "%s)", +- byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), +- byte_size_in_proper_unit(min_threshold), proper_unit_for_byte_size(min_threshold)); +- return true; +- } +- +- size_t bytes_allocated = heap->bytes_allocated_since_gc_start(); +- if (bytes_allocated > threshold_bytes_allocated) { +- log_info(gc)("Trigger: Allocated since last cycle (" SIZE_FORMAT "%s) is larger than allocation threshold (" SIZE_FORMAT "%s)", +- byte_size_in_proper_unit(bytes_allocated), proper_unit_for_byte_size(bytes_allocated), +- byte_size_in_proper_unit(threshold_bytes_allocated), proper_unit_for_byte_size(threshold_bytes_allocated)); +- return true; +- } +- +- return ShenandoahHeuristics::should_start_gc(); +-} +- +-void ShenandoahCompactHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset, +- RegionData* data, size_t size, +- size_t actual_free) { +- // Do not select too large CSet that would overflow the available free space +- size_t max_cset = actual_free * 3 / 4; +- +- log_info(gc, ergo)("CSet Selection. Actual Free: " SIZE_FORMAT "%s, Max CSet: " SIZE_FORMAT "%s", +- byte_size_in_proper_unit(actual_free), proper_unit_for_byte_size(actual_free), +- byte_size_in_proper_unit(max_cset), proper_unit_for_byte_size(max_cset)); +- +- size_t threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100; +- +- size_t live_cset = 0; +- for (size_t idx = 0; idx < size; idx++) { +- ShenandoahHeapRegion* r = data[idx]._region; +- size_t new_cset = live_cset + r->get_live_data_bytes(); +- if (new_cset < max_cset && r->garbage() > threshold) { +- live_cset = new_cset; +- cset->add_region(r); +- } +- } +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahCompactHeuristics.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahCompactHeuristics.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahCompactHeuristics.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahCompactHeuristics.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,44 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHCOMPACTHEURISTICS_HPP +-#define SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHCOMPACTHEURISTICS_HPP +- +-#include "gc_implementation/shenandoah/heuristics/shenandoahHeuristics.hpp" +- +-class ShenandoahCompactHeuristics : public ShenandoahHeuristics { +-public: +- ShenandoahCompactHeuristics(); +- +- virtual bool should_start_gc() const; +- +- virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset, +- RegionData* data, size_t size, +- size_t actual_free); +- +- virtual const char* name() { return "Compact"; } +- virtual bool is_diagnostic() { return false; } +- virtual bool is_experimental() { return false; } +-}; +- +-#endif // SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHCOMPACTHEURISTICS_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahHeuristics.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahHeuristics.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahHeuristics.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahHeuristics.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,300 +0,0 @@ +-/* +- * Copyright (c) 2018, 2020, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +- +-#include "gc_interface/gcCause.hpp" +-#include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegion.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahMarkingContext.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahUtils.hpp" +-#include "gc_implementation/shenandoah/heuristics/shenandoahHeuristics.hpp" +- +-int ShenandoahHeuristics::compare_by_garbage(RegionData a, RegionData b) { +- if (a._garbage > b._garbage) +- return -1; +- else if (a._garbage < b._garbage) +- return 1; +- else return 0; +-} +- +-ShenandoahHeuristics::ShenandoahHeuristics() : +- _region_data(NULL), +- _degenerated_cycles_in_a_row(0), +- _successful_cycles_in_a_row(0), +- _cycle_start(os::elapsedTime()), +- _last_cycle_end(0), +- _gc_times_learned(0), +- _gc_time_penalties(0), +- _gc_time_history(new TruncatedSeq(5)), +- _metaspace_oom() +-{ +- // No unloading during concurrent mark? Communicate that to heuristics +- if (!ClassUnloadingWithConcurrentMark) { +- FLAG_SET_DEFAULT(ShenandoahUnloadClassesFrequency, 0); +- } +- +- size_t num_regions = ShenandoahHeap::heap()->num_regions(); +- assert(num_regions > 0, "Sanity"); +- +- _region_data = NEW_C_HEAP_ARRAY(RegionData, num_regions, mtGC); +-} +- +-ShenandoahHeuristics::~ShenandoahHeuristics() { +- FREE_C_HEAP_ARRAY(RegionGarbage, _region_data, mtGC); +-} +- +-void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set) { +- assert(collection_set->count() == 0, "Must be empty"); +- start_choose_collection_set(); +- +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- +- // Check all pinned regions have updated status before choosing the collection set. +- heap->assert_pinned_region_status(); +- +- // Step 1. Build up the region candidates we care about, rejecting losers and accepting winners right away. +- +- size_t num_regions = heap->num_regions(); +- +- RegionData* candidates = _region_data; +- +- size_t cand_idx = 0; +- +- size_t total_garbage = 0; +- +- size_t immediate_garbage = 0; +- size_t immediate_regions = 0; +- +- size_t free = 0; +- size_t free_regions = 0; +- +- ShenandoahMarkingContext* const ctx = heap->complete_marking_context(); +- +- for (size_t i = 0; i < num_regions; i++) { +- ShenandoahHeapRegion* region = heap->get_region(i); +- +- size_t garbage = region->garbage(); +- total_garbage += garbage; +- +- if (region->is_empty()) { +- free_regions++; +- free += ShenandoahHeapRegion::region_size_bytes(); +- } else if (region->is_regular()) { +- if (!region->has_live()) { +- // We can recycle it right away and put it in the free set. +- immediate_regions++; +- immediate_garbage += garbage; +- region->make_trash_immediate(); +- } else { +- // This is our candidate for later consideration. +- candidates[cand_idx]._region = region; +- candidates[cand_idx]._garbage = garbage; +- cand_idx++; +- } +- } else if (region->is_humongous_start()) { +- // Reclaim humongous regions here, and count them as the immediate garbage +-#ifdef ASSERT +- bool reg_live = region->has_live(); +- bool bm_live = ctx->is_marked(oop(region->bottom())); +- assert(reg_live == bm_live, +- err_msg("Humongous liveness and marks should agree. Region live: %s; Bitmap live: %s; Region Live Words: " SIZE_FORMAT, +- BOOL_TO_STR(reg_live), BOOL_TO_STR(bm_live), region->get_live_data_words())); +-#endif +- if (!region->has_live()) { +- heap->trash_humongous_region_at(region); +- +- // Count only the start. Continuations would be counted on "trash" path +- immediate_regions++; +- immediate_garbage += garbage; +- } +- } else if (region->is_trash()) { +- // Count in just trashed collection set, during coalesced CM-with-UR +- immediate_regions++; +- immediate_garbage += garbage; +- } +- } +- +- // Step 2. Look back at garbage statistics, and decide if we want to collect anything, +- // given the amount of immediately reclaimable garbage. If we do, figure out the collection set. +- +- assert (immediate_garbage <= total_garbage, +- err_msg("Cannot have more immediate garbage than total garbage: " SIZE_FORMAT "%s vs " SIZE_FORMAT "%s", +- byte_size_in_proper_unit(immediate_garbage), proper_unit_for_byte_size(immediate_garbage), +- byte_size_in_proper_unit(total_garbage), proper_unit_for_byte_size(total_garbage))); +- +- size_t immediate_percent = (total_garbage == 0) ? 0 : (immediate_garbage * 100 / total_garbage); +- +- if (immediate_percent <= ShenandoahImmediateThreshold) { +- choose_collection_set_from_regiondata(collection_set, candidates, cand_idx, immediate_garbage + free); +- } +- +- size_t cset_percent = (total_garbage == 0) ? 0 : (collection_set->garbage() * 100 / total_garbage); +- +- size_t collectable_garbage = collection_set->garbage() + immediate_garbage; +- size_t collectable_garbage_percent = (total_garbage == 0) ? 0 : (collectable_garbage * 100 / total_garbage); +- +- log_info(gc, ergo)("Collectable Garbage: " SIZE_FORMAT "%s (" SIZE_FORMAT "%%), " +- "Immediate: " SIZE_FORMAT "%s (" SIZE_FORMAT "%%), " +- "CSet: " SIZE_FORMAT "%s (" SIZE_FORMAT "%%)", +- +- byte_size_in_proper_unit(collectable_garbage), +- proper_unit_for_byte_size(collectable_garbage), +- collectable_garbage_percent, +- +- byte_size_in_proper_unit(immediate_garbage), +- proper_unit_for_byte_size(immediate_garbage), +- immediate_percent, +- +- byte_size_in_proper_unit(collection_set->garbage()), +- proper_unit_for_byte_size(collection_set->garbage()), +- cset_percent); +-} +- +-void ShenandoahHeuristics::record_cycle_start() { +- _cycle_start = os::elapsedTime(); +-} +- +-void ShenandoahHeuristics::record_cycle_end() { +- _last_cycle_end = os::elapsedTime(); +-} +- +-bool ShenandoahHeuristics::should_degenerate_cycle() { +- return _degenerated_cycles_in_a_row <= ShenandoahFullGCThreshold; +-} +- +-void ShenandoahHeuristics::adjust_penalty(intx step) { +- assert(0 <= _gc_time_penalties && _gc_time_penalties <= 100, +- err_msg("In range before adjustment: " INTX_FORMAT, _gc_time_penalties)); +- +- intx new_val = _gc_time_penalties + step; +- if (new_val < 0) { +- new_val = 0; +- } +- if (new_val > 100) { +- new_val = 100; +- } +- _gc_time_penalties = new_val; +- +- assert(0 <= _gc_time_penalties && _gc_time_penalties <= 100, +- err_msg("In range after adjustment: " INTX_FORMAT, _gc_time_penalties)); +-} +- +-void ShenandoahHeuristics::record_success_concurrent() { +- _degenerated_cycles_in_a_row = 0; +- _successful_cycles_in_a_row++; +- +- _gc_time_history->add(time_since_last_gc()); +- _gc_times_learned++; +- +- adjust_penalty(Concurrent_Adjust); +-} +- +-void ShenandoahHeuristics::record_success_degenerated() { +- _degenerated_cycles_in_a_row++; +- _successful_cycles_in_a_row = 0; +- +- adjust_penalty(Degenerated_Penalty); +-} +- +-void ShenandoahHeuristics::record_success_full() { +- _degenerated_cycles_in_a_row = 0; +- _successful_cycles_in_a_row++; +- +- adjust_penalty(Full_Penalty); +-} +- +-void ShenandoahHeuristics::record_allocation_failure_gc() { +- // Do nothing. +-} +- +-void ShenandoahHeuristics::record_requested_gc() { +- // Assume users call System.gc() when external state changes significantly, +- // which forces us to re-learn the GC timings and allocation rates. +- _gc_times_learned = 0; +-} +- +-bool ShenandoahHeuristics::can_process_references() { +- if (ShenandoahRefProcFrequency == 0) return false; +- return true; +-} +- +-bool ShenandoahHeuristics::should_process_references() { +- if (!can_process_references()) return false; +- size_t cycle = ShenandoahHeap::heap()->shenandoah_policy()->cycle_counter(); +- // Process references every Nth GC cycle. +- return cycle % ShenandoahRefProcFrequency == 0; +-} +- +-bool ShenandoahHeuristics::can_unload_classes() { +- if (!ClassUnloading) return false; +- return true; +-} +- +-bool ShenandoahHeuristics::can_unload_classes_normal() { +- if (!can_unload_classes()) return false; +- if (has_metaspace_oom()) return true; +- if (!ClassUnloadingWithConcurrentMark) return false; +- if (ShenandoahUnloadClassesFrequency == 0) return false; +- return true; +-} +- +-bool ShenandoahHeuristics::should_unload_classes() { +- if (!can_unload_classes_normal()) return false; +- if (has_metaspace_oom()) return true; +- size_t cycle = ShenandoahHeap::heap()->shenandoah_policy()->cycle_counter(); +- // Unload classes every Nth GC cycle. +- // This should not happen in the same cycle as process_references to amortize costs. +- // Offsetting by one is enough to break the rendezvous when periods are equal. +- // When periods are not equal, offsetting by one is just as good as any other guess. +- return (cycle + 1) % ShenandoahUnloadClassesFrequency == 0; +-} +- +-void ShenandoahHeuristics::initialize() { +- // Nothing to do by default. +-} +- +-double ShenandoahHeuristics::time_since_last_gc() const { +- return os::elapsedTime() - _cycle_start; +-} +- +-bool ShenandoahHeuristics::should_start_gc() const { +- // Perform GC to cleanup metaspace +- if (has_metaspace_oom()) { +- // Some of vmTestbase/metaspace tests depend on following line to count GC cycles +- log_info(gc)("Trigger: %s", GCCause::to_string(GCCause::_metadata_GC_threshold)); +- return true; +- } +- +- if (ShenandoahGuaranteedGCInterval > 0) { +- double last_time_ms = (os::elapsedTime() - _last_cycle_end) * 1000; +- if (last_time_ms > ShenandoahGuaranteedGCInterval) { +- log_info(gc)("Trigger: Time since last GC (%.0f ms) is larger than guaranteed interval (" UINTX_FORMAT " ms)", +- last_time_ms, ShenandoahGuaranteedGCInterval); +- return true; +- } +- } +- +- return false; +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahHeuristics.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahHeuristics.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahHeuristics.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahHeuristics.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,143 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHHEURISTICS_HPP +-#define SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHHEURISTICS_HPP +- +-#include "gc_implementation/shenandoah/shenandoahHeap.hpp" +-#include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp" +-#include "gc_implementation/shenandoah/shenandoahSharedVariables.hpp" +-#include "memory/allocation.hpp" +-#include "runtime/globals_extension.hpp" +-#include "runtime/java.hpp" +- +-#define SHENANDOAH_ERGO_DISABLE_FLAG(name) \ +- do { \ +- if (FLAG_IS_DEFAULT(name) && (name)) { \ +- log_info(gc)("Heuristics ergonomically sets -XX:-" #name); \ +- FLAG_SET_DEFAULT(name, false); \ +- } \ +- } while (0) +- +-#define SHENANDOAH_ERGO_ENABLE_FLAG(name) \ +- do { \ +- if (FLAG_IS_DEFAULT(name) && !(name)) { \ +- log_info(gc)("Heuristics ergonomically sets -XX:+" #name); \ +- FLAG_SET_DEFAULT(name, true); \ +- } \ +- } while (0) +- +-#define SHENANDOAH_ERGO_OVERRIDE_DEFAULT(name, value) \ +- do { \ +- if (FLAG_IS_DEFAULT(name)) { \ +- log_info(gc)("Heuristics ergonomically sets -XX:" #name "=" #value); \ +- FLAG_SET_DEFAULT(name, value); \ +- } \ +- } while (0) +- +-class ShenandoahCollectionSet; +-class ShenandoahHeapRegion; +- +-class ShenandoahHeuristics : public CHeapObj { +- static const intx Concurrent_Adjust = -1; // recover from penalties +- static const intx Degenerated_Penalty = 10; // how much to penalize average GC duration history on Degenerated GC +- static const intx Full_Penalty = 20; // how much to penalize average GC duration history on Full GC +- +-protected: +- typedef struct { +- ShenandoahHeapRegion* _region; +- size_t _garbage; +- } RegionData; +- +- RegionData* _region_data; +- +- uint _degenerated_cycles_in_a_row; +- uint _successful_cycles_in_a_row; +- +- double _cycle_start; +- double _last_cycle_end; +- +- size_t _gc_times_learned; +- intx _gc_time_penalties; +- TruncatedSeq* _gc_time_history; +- +- // There may be many threads that contend to set this flag +- ShenandoahSharedFlag _metaspace_oom; +- +- static int compare_by_garbage(RegionData a, RegionData b); +- +- virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set, +- RegionData* data, size_t data_size, +- size_t free) = 0; +- +- void adjust_penalty(intx step); +- +-public: +- ShenandoahHeuristics(); +- virtual ~ShenandoahHeuristics(); +- +- void record_metaspace_oom() { _metaspace_oom.set(); } +- void clear_metaspace_oom() { _metaspace_oom.unset(); } +- bool has_metaspace_oom() const { return _metaspace_oom.is_set(); } +- +- virtual void record_cycle_start(); +- +- virtual void record_cycle_end(); +- +- virtual bool should_start_gc() const; +- +- virtual bool should_degenerate_cycle(); +- +- virtual void record_success_concurrent(); +- +- virtual void record_success_degenerated(); +- +- virtual void record_success_full(); +- +- virtual void record_allocation_failure_gc(); +- +- virtual void record_requested_gc(); +- +- virtual void start_choose_collection_set() { +- } +- virtual void end_choose_collection_set() { +- } +- +- virtual void choose_collection_set(ShenandoahCollectionSet* collection_set); +- +- virtual bool can_process_references(); +- virtual bool should_process_references(); +- +- virtual bool can_unload_classes(); +- virtual bool can_unload_classes_normal(); +- virtual bool should_unload_classes(); +- +- virtual const char* name() = 0; +- virtual bool is_diagnostic() = 0; +- virtual bool is_experimental() = 0; +- virtual void initialize(); +- +- double time_since_last_gc() const; +-}; +- +-#endif // SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHHEURISTICS_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahPassiveHeuristics.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahPassiveHeuristics.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahPassiveHeuristics.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahPassiveHeuristics.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,78 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +- +-#include "gc_implementation/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp" +-#include "gc_implementation/shenandoah/shenandoahCollectionSet.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegion.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahLogging.hpp" +- +-bool ShenandoahPassiveHeuristics::should_start_gc() const { +- // Never do concurrent GCs. +- return false; +-} +- +-bool ShenandoahPassiveHeuristics::should_process_references() { +- // Always process references, if we can. +- return can_process_references(); +-} +- +-bool ShenandoahPassiveHeuristics::should_unload_classes() { +- // Always unload classes, if we can. +- return can_unload_classes(); +-} +- +-bool ShenandoahPassiveHeuristics::should_degenerate_cycle() { +- // Always fail to Degenerated GC, if enabled +- return ShenandoahDegeneratedGC; +-} +- +-void ShenandoahPassiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset, +- RegionData* data, size_t size, +- size_t actual_free) { +- assert(ShenandoahDegeneratedGC, "This path is only taken for Degenerated GC"); +- +- // Do not select too large CSet that would overflow the available free space. +- // Take at least the entire evacuation reserve, and be free to overflow to free space. +- size_t max_capacity = ShenandoahHeap::heap()->max_capacity(); +- size_t available = MAX2(max_capacity / 100 * ShenandoahEvacReserve, actual_free); +- size_t max_cset = (size_t)(available / ShenandoahEvacWaste); +- +- log_info(gc, ergo)("CSet Selection. Actual Free: " SIZE_FORMAT "%s, Max CSet: " SIZE_FORMAT "%s", +- byte_size_in_proper_unit(actual_free), proper_unit_for_byte_size(actual_free), +- byte_size_in_proper_unit(max_cset), proper_unit_for_byte_size(max_cset)); +- +- size_t threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100; +- +- size_t live_cset = 0; +- for (size_t idx = 0; idx < size; idx++) { +- ShenandoahHeapRegion* r = data[idx]._region; +- size_t new_cset = live_cset + r->get_live_data_bytes(); +- if (new_cset < max_cset && r->garbage() > threshold) { +- live_cset = new_cset; +- cset->add_region(r); +- } +- } +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,48 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHPASSIVEHEURISTICS_HPP +-#define SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHPASSIVEHEURISTICS_HPP +- +-#include "gc_implementation/shenandoah/heuristics/shenandoahHeuristics.hpp" +- +-class ShenandoahPassiveHeuristics : public ShenandoahHeuristics { +-public: +- virtual bool should_start_gc() const; +- +- virtual bool should_process_references(); +- +- virtual bool should_unload_classes(); +- +- virtual bool should_degenerate_cycle(); +- +- virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set, +- RegionData* data, size_t data_size, +- size_t free); +- +- virtual const char* name() { return "Passive"; } +- virtual bool is_diagnostic() { return true; } +- virtual bool is_experimental() { return false; } +-}; +- +-#endif // SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHPASSIVEHEURISTICS_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahStaticHeuristics.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahStaticHeuristics.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahStaticHeuristics.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahStaticHeuristics.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,73 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +- +-#include "gc_implementation/shenandoah/heuristics/shenandoahStaticHeuristics.hpp" +-#include "gc_implementation/shenandoah/shenandoahCollectionSet.hpp" +-#include "gc_implementation/shenandoah/shenandoahFreeSet.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegion.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahLogging.hpp" +- +-ShenandoahStaticHeuristics::ShenandoahStaticHeuristics() : ShenandoahHeuristics() { +- SHENANDOAH_ERGO_ENABLE_FLAG(ExplicitGCInvokesConcurrent); +- SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahImplicitGCInvokesConcurrent); +-} +- +-ShenandoahStaticHeuristics::~ShenandoahStaticHeuristics() {} +- +-bool ShenandoahStaticHeuristics::should_start_gc() const { +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- +- size_t max_capacity = heap->max_capacity(); +- size_t capacity = heap->soft_max_capacity(); +- size_t available = heap->free_set()->available(); +- +- // Make sure the code below treats available without the soft tail. +- size_t soft_tail = max_capacity - capacity; +- available = (available > soft_tail) ? (available - soft_tail) : 0; +- +- size_t threshold_available = capacity / 100 * ShenandoahMinFreeThreshold; +- +- if (available < threshold_available) { +- log_info(gc)("Trigger: Free (" SIZE_FORMAT "%s) is below minimum threshold (" SIZE_FORMAT "%s)", +- byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), +- byte_size_in_proper_unit(threshold_available), proper_unit_for_byte_size(threshold_available)); +- return true; +- } +- return ShenandoahHeuristics::should_start_gc(); +-} +- +-void ShenandoahStaticHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset, +- RegionData* data, size_t size, +- size_t free) { +- size_t threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100; +- +- for (size_t idx = 0; idx < size; idx++) { +- ShenandoahHeapRegion* r = data[idx]._region; +- if (r->garbage() > threshold) { +- cset->add_region(r); +- } +- } +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahStaticHeuristics.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahStaticHeuristics.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahStaticHeuristics.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/heuristics/shenandoahStaticHeuristics.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,46 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHSTATICHEURISTICS_HPP +-#define SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHSTATICHEURISTICS_HPP +- +-#include "gc_implementation/shenandoah/heuristics/shenandoahHeuristics.hpp" +- +-class ShenandoahStaticHeuristics : public ShenandoahHeuristics { +-public: +- ShenandoahStaticHeuristics(); +- +- virtual ~ShenandoahStaticHeuristics(); +- +- virtual bool should_start_gc() const; +- +- virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset, +- RegionData* data, size_t size, +- size_t free); +- +- virtual const char* name() { return "Static"; } +- virtual bool is_diagnostic() { return false; } +- virtual bool is_experimental() { return false; } +-}; +- +-#endif // SHARE_VM_GC_SHENANDOAH_HEURISTICS_SHENANDOAHSTATICHEURISTICS_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/mode/shenandoahIUMode.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/mode/shenandoahIUMode.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/mode/shenandoahIUMode.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/mode/shenandoahIUMode.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,73 +0,0 @@ +-/* +- * Copyright (c) 2020, Red Hat, Inc. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +-#include "gc_implementation/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp" +-#include "gc_implementation/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp" +-#include "gc_implementation/shenandoah/heuristics/shenandoahCompactHeuristics.hpp" +-#include "gc_implementation/shenandoah/heuristics/shenandoahStaticHeuristics.hpp" +-#include "gc_implementation/shenandoah/mode/shenandoahIUMode.hpp" +-#include "gc_implementation/shenandoah/shenandoahLogging.hpp" +- +-void ShenandoahIUMode::initialize_flags() const { +- if (FLAG_IS_CMDLINE(ClassUnloadingWithConcurrentMark) && ClassUnloading) { +- log_warning(gc)("Shenandoah I-U mode sets -XX:-ClassUnloadingWithConcurrentMark; see JDK-8261341 for details"); +- } +- FLAG_SET_DEFAULT(ClassUnloadingWithConcurrentMark, false); +- +- if (FLAG_IS_DEFAULT(ShenandoahStoreValEnqueueBarrier)) { +- FLAG_SET_DEFAULT(ShenandoahStoreValEnqueueBarrier, true); +- } +- if (FLAG_IS_DEFAULT(ShenandoahSATBBarrier)) { +- FLAG_SET_DEFAULT(ShenandoahSATBBarrier, false); +- } +- +- SHENANDOAH_ERGO_ENABLE_FLAG(ExplicitGCInvokesConcurrent); +- SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahImplicitGCInvokesConcurrent); +- +- // Final configuration checks +- SHENANDOAH_CHECK_FLAG_SET(ShenandoahLoadRefBarrier); +- SHENANDOAH_CHECK_FLAG_UNSET(ShenandoahSATBBarrier); +- SHENANDOAH_CHECK_FLAG_SET(ShenandoahStoreValEnqueueBarrier); +- SHENANDOAH_CHECK_FLAG_SET(ShenandoahCASBarrier); +- SHENANDOAH_CHECK_FLAG_SET(ShenandoahCloneBarrier); +-} +- +-ShenandoahHeuristics* ShenandoahIUMode::initialize_heuristics() const { +- if (ShenandoahGCHeuristics != NULL) { +- if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) { +- return new ShenandoahAggressiveHeuristics(); +- } else if (strcmp(ShenandoahGCHeuristics, "static") == 0) { +- return new ShenandoahStaticHeuristics(); +- } else if (strcmp(ShenandoahGCHeuristics, "adaptive") == 0) { +- return new ShenandoahAdaptiveHeuristics(); +- } else if (strcmp(ShenandoahGCHeuristics, "compact") == 0) { +- return new ShenandoahCompactHeuristics(); +- } else { +- vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option"); +- } +- } +- ShouldNotReachHere(); +- return NULL; +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/mode/shenandoahIUMode.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/mode/shenandoahIUMode.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/mode/shenandoahIUMode.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/mode/shenandoahIUMode.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,42 +0,0 @@ +-/* +- * Copyright (c) 2020, Red Hat, Inc. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_GC_SHENANDOAH_MODE_SHENANDOAHIUMODE_HPP +-#define SHARE_GC_SHENANDOAH_MODE_SHENANDOAHIUMODE_HPP +- +-#include "gc_implementation/shenandoah/mode/shenandoahMode.hpp" +- +-class ShenandoahHeuristics; +- +-class ShenandoahIUMode : public ShenandoahMode { +-public: +- virtual void initialize_flags() const; +- virtual ShenandoahHeuristics* initialize_heuristics() const; +- +- virtual const char* name() { return "Incremental-Update (IU)"; } +- virtual bool is_diagnostic() { return false; } +- virtual bool is_experimental() { return true; } +-}; +- +-#endif // SHARE_GC_SHENANDOAH_MODE_SHENANDOAHIUMODE_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/mode/shenandoahMode.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/mode/shenandoahMode.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/mode/shenandoahMode.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/mode/shenandoahMode.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,56 +0,0 @@ +-/* +- * Copyright (c) 2019, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHMODE_HPP +-#define SHARE_GC_SHENANDOAH_SHENANDOAHMODE_HPP +- +-#include "memory/allocation.hpp" +- +-class ShenandoahHeuristics; +- +-#define SHENANDOAH_CHECK_FLAG_SET(name) \ +- do { \ +- if (!(name)) { \ +- err_msg message("GC mode needs -XX:+" #name " to work correctly"); \ +- vm_exit_during_initialization("Error", message); \ +- } \ +- } while (0) +- +-#define SHENANDOAH_CHECK_FLAG_UNSET(name) \ +- do { \ +- if ((name)) { \ +- err_msg message("GC mode needs -XX:-" #name " to work correctly"); \ +- vm_exit_during_initialization("Error", message); \ +- } \ +- } while (0) +- +-class ShenandoahMode : public CHeapObj { +-public: +- virtual void initialize_flags() const = 0; +- virtual ShenandoahHeuristics* initialize_heuristics() const = 0; +- virtual const char* name() = 0; +- virtual bool is_diagnostic() = 0; +- virtual bool is_experimental() = 0; +-}; +- +-#endif // SHARE_GC_SHENANDOAH_SHENANDOAHMODE_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/mode/shenandoahPassiveMode.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/mode/shenandoahPassiveMode.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/mode/shenandoahPassiveMode.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/mode/shenandoahPassiveMode.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,58 +0,0 @@ +-/* +- * Copyright (c) 2019, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +-#include "gc_implementation/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp" +-#include "gc_implementation/shenandoah/mode/shenandoahPassiveMode.hpp" +-#include "gc_implementation/shenandoah/shenandoahLogging.hpp" +- +-void ShenandoahPassiveMode::initialize_flags() const { +- // Do not allow concurrent cycles. +- FLAG_SET_DEFAULT(ExplicitGCInvokesConcurrent, false); +- FLAG_SET_DEFAULT(ShenandoahImplicitGCInvokesConcurrent, false); +- +- // Passive runs with max speed for allocation, because GC is always STW +- SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahPacing); +- +- // No need for evacuation reserve with Full GC, only for Degenerated GC. +- if (!ShenandoahDegeneratedGC) { +- SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahEvacReserve, 0); +- } +- +- // Disable known barriers by default. +- SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahLoadRefBarrier); +- SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahSATBBarrier); +- SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahStoreValEnqueueBarrier); +- SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahCASBarrier); +- SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahCloneBarrier); +- +- // Final configuration checks +- // No barriers are required to run. +-} +-ShenandoahHeuristics* ShenandoahPassiveMode::initialize_heuristics() const { +- if (ShenandoahGCHeuristics != NULL) { +- return new ShenandoahPassiveHeuristics(); +- } +- ShouldNotReachHere(); +- return NULL; +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/mode/shenandoahPassiveMode.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/mode/shenandoahPassiveMode.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/mode/shenandoahPassiveMode.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/mode/shenandoahPassiveMode.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,39 +0,0 @@ +-/* +- * Copyright (c) 2019, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_GC_SHENANDOAH_MODE_SHENANDOAHPASSIVEMODE_HPP +-#define SHARE_GC_SHENANDOAH_MODE_SHENANDOAHPASSIVEMODE_HPP +- +-#include "gc_implementation/shenandoah/mode/shenandoahMode.hpp" +- +-class ShenandoahPassiveMode : public ShenandoahMode { +-public: +- virtual void initialize_flags() const; +- virtual ShenandoahHeuristics* initialize_heuristics() const; +- +- virtual const char* name() { return "Passive"; } +- virtual bool is_diagnostic() { return true; } +- virtual bool is_experimental() { return false; } +-}; +- +-#endif // SHARE_GC_SHENANDOAH_MODE_SHENANDOAHPASSIVEMODE_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/mode/shenandoahSATBMode.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/mode/shenandoahSATBMode.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/mode/shenandoahSATBMode.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/mode/shenandoahSATBMode.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,59 +0,0 @@ +-/* +- * Copyright (c) 2019, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +-#include "gc_implementation/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp" +-#include "gc_implementation/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp" +-#include "gc_implementation/shenandoah/heuristics/shenandoahCompactHeuristics.hpp" +-#include "gc_implementation/shenandoah/heuristics/shenandoahStaticHeuristics.hpp" +-#include "gc_implementation/shenandoah/mode/shenandoahSATBMode.hpp" +-#include "gc_implementation/shenandoah/shenandoahLogging.hpp" +- +-void ShenandoahSATBMode::initialize_flags() const { +- SHENANDOAH_ERGO_ENABLE_FLAG(ExplicitGCInvokesConcurrent); +- SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahImplicitGCInvokesConcurrent); +- +- // Final configuration checks +- SHENANDOAH_CHECK_FLAG_SET(ShenandoahLoadRefBarrier); +- SHENANDOAH_CHECK_FLAG_UNSET(ShenandoahStoreValEnqueueBarrier); +- SHENANDOAH_CHECK_FLAG_SET(ShenandoahSATBBarrier); +- SHENANDOAH_CHECK_FLAG_SET(ShenandoahCASBarrier); +- SHENANDOAH_CHECK_FLAG_SET(ShenandoahCloneBarrier); +-} +- +-ShenandoahHeuristics* ShenandoahSATBMode::initialize_heuristics() const { +- if (ShenandoahGCHeuristics != NULL) { +- if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) { +- return new ShenandoahAggressiveHeuristics(); +- } else if (strcmp(ShenandoahGCHeuristics, "static") == 0) { +- return new ShenandoahStaticHeuristics(); +- } else if (strcmp(ShenandoahGCHeuristics, "adaptive") == 0) { +- return new ShenandoahAdaptiveHeuristics(); +- } else if (strcmp(ShenandoahGCHeuristics, "compact") == 0) { +- return new ShenandoahCompactHeuristics(); +- } else { +- vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option"); +- } +- } +- ShouldNotReachHere(); +- return NULL; +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/mode/shenandoahSATBMode.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/mode/shenandoahSATBMode.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/mode/shenandoahSATBMode.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/mode/shenandoahSATBMode.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,40 +0,0 @@ +-/* +- * Copyright (c) 2019, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_GC_SHENANDOAH_MODE_SHENANDOAHSATBMODE_HPP +-#define SHARE_GC_SHENANDOAH_MODE_SHENANDOAHSATBMODE_HPP +- +-#include "gc_implementation/shenandoah/mode/shenandoahMode.hpp" +- +-class ShenandoahHeuristics; +- +-class ShenandoahSATBMode : public ShenandoahMode { +-public: +- virtual void initialize_flags() const; +- virtual ShenandoahHeuristics* initialize_heuristics() const; +- virtual const char* name() { return "Snapshot-At-The-Beginning (SATB)"; } +- virtual bool is_diagnostic() { return false; } +- virtual bool is_experimental() { return false; } +-}; +- +-#endif // SHARE_GC_SHENANDOAH_MODE_SHENANDOAHSATBMODE_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/preservedMarks.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/preservedMarks.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/preservedMarks.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/preservedMarks.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,156 +0,0 @@ +-/* +- * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +-#include "gc_implementation/shenandoah/preservedMarks.inline.hpp" +-#include "utilities/workgroup.hpp" +-#include "memory/allocation.inline.hpp" +-#include "memory/resourceArea.hpp" +-#include "oops/oop.inline.hpp" +-#include "utilities/macros.hpp" +- +-void PreservedMarks::restore() { +- while (!_stack.is_empty()) { +- const OopAndMarkOop elem = _stack.pop(); +- elem.set_mark(); +- } +- assert_empty(); +-} +- +-void PreservedMarks::adjust_during_full_gc() { +- StackIterator iter(_stack); +- while (!iter.is_empty()) { +- OopAndMarkOop* elem = iter.next_addr(); +- +- oop obj = elem->get_oop(); +- if (obj->is_forwarded()) { +- elem->set_oop(obj->forwardee()); +- } +- } +-} +- +-void PreservedMarks::restore_and_increment(volatile size_t* const total_size_addr) { +- const size_t stack_size = size(); +- restore(); +- // Only do the atomic add if the size is > 0. +- if (stack_size > 0) { +- Atomic::add(stack_size, (volatile jlong*)total_size_addr); +- } +-} +- +-#ifndef PRODUCT +-void PreservedMarks::assert_empty() { +- assert(_stack.is_empty(), err_msg("stack expected to be empty, size = " SIZE_FORMAT, +- _stack.size())); +- assert(_stack.cache_size() == 0, +- err_msg("stack expected to have no cached segments, cache size = " SIZE_FORMAT, +- _stack.cache_size())); +-} +-#endif // ndef PRODUCT +- +-void RemoveForwardedPointerClosure::do_object(oop obj) { +- if (obj->is_forwarded()) { +- PreservedMarks::init_forwarded_mark(obj); +- } +-} +- +-void PreservedMarksSet::init(uint num) { +- assert(_stacks == NULL && _num == 0, "do not re-initialize"); +- assert(num > 0, "pre-condition"); +- if (_in_c_heap) { +- _stacks = NEW_C_HEAP_ARRAY(Padded, num, mtGC); +- } else { +- _stacks = NEW_RESOURCE_ARRAY(Padded, num); +- } +- for (uint i = 0; i < num; i += 1) { +- ::new (_stacks + i) PreservedMarks(); +- } +- _num = num; +- +- assert_empty(); +-} +- +-class ParRestoreTask : public AbstractGangTask { +-private: +- PreservedMarksSet* const _preserved_marks_set; +- SequentialSubTasksDone _sub_tasks; +- volatile size_t* const _total_size_addr; +- +-public: +- virtual void work(uint worker_id) { +- uint task_id = 0; +- while (!_sub_tasks.is_task_claimed(/* reference */ task_id)) { +- _preserved_marks_set->get(task_id)->restore_and_increment(_total_size_addr); +- } +- _sub_tasks.all_tasks_completed(); +- } +- +- ParRestoreTask(uint worker_num, +- PreservedMarksSet* preserved_marks_set, +- volatile size_t* total_size_addr) +- : AbstractGangTask("Parallel Preserved Mark Restoration"), +- _preserved_marks_set(preserved_marks_set), +- _total_size_addr(total_size_addr) { +- _sub_tasks.set_n_threads(worker_num); +- _sub_tasks.set_n_tasks(preserved_marks_set->num()); +- } +-}; +- +-void PreservedMarksSet::reclaim() { +- assert_empty(); +- +- for (uint i = 0; i < _num; i += 1) { +- _stacks[i].~Padded(); +- } +- +- if (_in_c_heap) { +- FREE_C_HEAP_ARRAY(Padded, _stacks, mtGC); +- } else { +- // the array was resource-allocated, so nothing to do +- } +- _stacks = NULL; +- _num = 0; +-} +- +-#ifndef PRODUCT +-void PreservedMarksSet::assert_empty() { +- assert(_stacks != NULL && _num > 0, "should have been initialized"); +- for (uint i = 0; i < _num; i += 1) { +- get(i)->assert_empty(); +- } +-} +-#endif // ndef PRODUCT +- +-void SharedRestorePreservedMarksTaskExecutor::restore(PreservedMarksSet* preserved_marks_set, +- volatile size_t* total_size_addr) { +- if (_workers == NULL) { +- for (uint i = 0; i < preserved_marks_set->num(); i += 1) { +- *total_size_addr += preserved_marks_set->get(i)->size(); +- preserved_marks_set->get(i)->restore(); +- } +- } else { +- ParRestoreTask task(_workers->active_workers(), preserved_marks_set, total_size_addr); +- _workers->run_task(&task); +- } +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/preservedMarks.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/preservedMarks.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/preservedMarks.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/preservedMarks.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,149 +0,0 @@ +-/* +- * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHARED_PRESERVEDMARKS_HPP +-#define SHARE_VM_GC_SHARED_PRESERVEDMARKS_HPP +- +-#include "memory/allocation.hpp" +-#include "memory/padded.hpp" +-#include "oops/oop.hpp" +-#include "utilities/stack.hpp" +- +-class PreservedMarksSet; +-class WorkGang; +- +-class PreservedMarks { +-private: +- class OopAndMarkOop { +- private: +- oop _o; +- markOop _m; +- +- public: +- OopAndMarkOop(oop obj, markOop m) : _o(obj), _m(m) { } +- +- oop get_oop() { return _o; } +- inline void set_mark() const; +- void set_oop(oop obj) { _o = obj; } +- }; +- typedef Stack OopAndMarkOopStack; +- +- OopAndMarkOopStack _stack; +- +- inline bool should_preserve_mark(oop obj, markOop m) const; +- +-public: +- size_t size() const { return _stack.size(); } +- inline void push(oop obj, markOop m); +- inline void push_if_necessary(oop obj, markOop m); +- // Iterate over the stack, restore all preserved marks, and +- // reclaim the memory taken up by the stack segments. +- void restore(); +- // Iterate over the stack, adjust all preserved marks according +- // to their forwarding location stored in the mark. +- void adjust_during_full_gc(); +- +- void restore_and_increment(volatile size_t* const _total_size_addr); +- inline static void init_forwarded_mark(oop obj); +- +- // Assert the stack is empty and has no cached segments. +- void assert_empty() PRODUCT_RETURN; +- +- inline PreservedMarks(); +- ~PreservedMarks() { assert_empty(); } +-}; +- +-class RemoveForwardedPointerClosure: public ObjectClosure { +-public: +- virtual void do_object(oop obj); +-}; +- +-class RestorePreservedMarksTaskExecutor { +-public: +- void virtual restore(PreservedMarksSet* preserved_marks_set, +- volatile size_t* total_size_addr) = 0; +-}; +- +-class SharedRestorePreservedMarksTaskExecutor : public RestorePreservedMarksTaskExecutor { +-private: +- WorkGang* _workers; +- +-public: +- SharedRestorePreservedMarksTaskExecutor(WorkGang* workers) : _workers(workers) { } +- +- void restore(PreservedMarksSet* preserved_marks_set, +- volatile size_t* total_size_addr); +- +-}; +- +-class PreservedMarksSet : public CHeapObj { +-private: +- // true -> _stacks will be allocated in the C heap +- // false -> _stacks will be allocated in the resource arena +- const bool _in_c_heap; +- +- // Number of stacks we have allocated (typically, one stack per GC worker). +- // This should be >= 1 if the stacks have been initialized, +- // or == 0 if they have not. +- uint _num; +- +- // Stack array (typically, one stack per GC worker) of length _num. +- // This should be != NULL if the stacks have been initialized, +- // or == NULL if they have not. +- Padded* _stacks; +- +-public: +- uint num() const { return _num; } +- +- // Return the i'th stack. +- PreservedMarks* get(uint i = 0) const { +- assert(_num > 0 && _stacks != NULL, "stacks should have been initialized"); +- assert(i < _num, "pre-condition"); +- return (_stacks + i); +- } +- +- // Allocate stack array. +- void init(uint num); +- +- // Iterate over all stacks, restore all preserved marks, and reclaim +- // the memory taken up by the stack segments. +- // Supported executors: SharedRestorePreservedMarksTaskExecutor (Serial, CMS, G1), +- // PSRestorePreservedMarksTaskExecutor (PS). +- inline void restore(RestorePreservedMarksTaskExecutor* executor); +- +- // Reclaim stack array. +- void reclaim(); +- +- // Assert all the stacks are empty and have no cached segments. +- void assert_empty() PRODUCT_RETURN; +- +- PreservedMarksSet(bool in_c_heap) +- : _in_c_heap(in_c_heap), _num(0), _stacks(NULL) { } +- +- ~PreservedMarksSet() { +- assert(_stacks == NULL && _num == 0, "stacks should have been reclaimed"); +- } +-}; +- +-#endif // SHARE_VM_GC_SHARED_PRESERVEDMARKS_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/preservedMarks.inline.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/preservedMarks.inline.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/preservedMarks.inline.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/preservedMarks.inline.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,86 +0,0 @@ +-/* +- * Copyright (c) 2016, 2018 Oracle and/or its affiliates. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHARED_PRESERVEDMARKS_INLINE_HPP +-#define SHARE_VM_GC_SHARED_PRESERVEDMARKS_INLINE_HPP +- +-#include "gc_implementation/shenandoah/preservedMarks.hpp" +-#include "gc_implementation/shenandoah/shenandoahLogging.hpp" +-#include "oops/oop.inline.hpp" +-#include "utilities/stack.inline.hpp" +- +-inline bool PreservedMarks::should_preserve_mark(oop obj, markOop m) const { +- return m->must_be_preserved_for_promotion_failure(obj); +-} +- +-inline void PreservedMarks::push(oop obj, markOop m) { +- assert(should_preserve_mark(obj, m), "pre-condition"); +- OopAndMarkOop elem(obj, m); +- _stack.push(elem); +-} +- +-inline void PreservedMarks::push_if_necessary(oop obj, markOop m) { +- if (should_preserve_mark(obj, m)) { +- push(obj, m); +- } +-} +- +-inline void PreservedMarks::init_forwarded_mark(oop obj) { +- obj->init_mark(); +-} +- +-inline void PreservedMarksSet::restore(RestorePreservedMarksTaskExecutor* executor) { +- volatile size_t total_size = 0; +- +-#ifdef ASSERT +- // This is to make sure the total_size we'll calculate below is correct. +- size_t total_size_before = 0; +- for (uint i = 0; i < _num; i += 1) { +- total_size_before += get(i)->size(); +- } +-#endif // def ASSERT +- +- executor->restore(this, &total_size); +- assert_empty(); +- +- assert(total_size == total_size_before, +- err_msg("total_size = " SIZE_FORMAT " before = " SIZE_FORMAT, +- total_size, total_size_before)); +- +- log_trace(gc)("Restored " SIZE_FORMAT " marks", total_size); +-} +- +-inline PreservedMarks::PreservedMarks() +- : _stack(OopAndMarkOopStack::default_segment_size(), +- // This stack should be used very infrequently so there's +- // no point in caching stack segments (there will be a +- // waste of space most of the time). So we set the max +- // cache size to 0. +- 0 /* max_cache_size */) { } +- +-void PreservedMarks::OopAndMarkOop::set_mark() const { +- _o->set_mark(_m); +-} +- +-#endif // SHARE_VM_GC_SHARED_PRESERVEDMARKS_INLINE_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahAllocRequest.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahAllocRequest.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahAllocRequest.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahAllocRequest.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,162 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHALLOCREQUEST_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHALLOCREQUEST_HPP +- +-#include "memory/allocation.hpp" +- +-class ShenandoahAllocRequest : public StackObj { +-public: +- enum Type { +- _alloc_shared, // Allocate common, outside of TLAB +- _alloc_shared_gc, // Allocate common, outside of GCLAB +- _alloc_tlab, // Allocate TLAB +- _alloc_gclab, // Allocate GCLAB +- _ALLOC_LIMIT +- }; +- +- static const char* alloc_type_to_string(Type type) { +- switch (type) { +- case _alloc_shared: +- return "Shared"; +- case _alloc_shared_gc: +- return "Shared GC"; +- case _alloc_tlab: +- return "TLAB"; +- case _alloc_gclab: +- return "GCLAB"; +- default: +- ShouldNotReachHere(); +- return ""; +- } +- } +- +-private: +- size_t _min_size; +- size_t _requested_size; +- size_t _actual_size; +- Type _alloc_type; +-#ifdef ASSERT +- bool _actual_size_set; +-#endif +- +- ShenandoahAllocRequest(size_t _min_size, size_t _requested_size, Type _alloc_type) : +- _min_size(_min_size), _requested_size(_requested_size), +- _actual_size(0), _alloc_type(_alloc_type) +-#ifdef ASSERT +- , _actual_size_set(false) +-#endif +- {} +- +-public: +- static inline ShenandoahAllocRequest for_tlab(size_t requested_size) { +- return ShenandoahAllocRequest(requested_size, requested_size, _alloc_tlab); +- } +- +- static inline ShenandoahAllocRequest for_gclab(size_t min_size, size_t requested_size) { +- return ShenandoahAllocRequest(min_size, requested_size, _alloc_gclab); +- } +- +- static inline ShenandoahAllocRequest for_shared_gc(size_t requested_size) { +- return ShenandoahAllocRequest(0, requested_size, _alloc_shared_gc); +- } +- +- static inline ShenandoahAllocRequest for_shared(size_t requested_size) { +- return ShenandoahAllocRequest(0, requested_size, _alloc_shared); +- } +- +- inline size_t size() { +- return _requested_size; +- } +- +- inline Type type() { +- return _alloc_type; +- } +- +- inline const char* type_string() { +- return alloc_type_to_string(_alloc_type); +- } +- +- inline size_t min_size() { +- assert (is_lab_alloc(), "Only access for LAB allocs"); +- return _min_size; +- } +- +- inline size_t actual_size() { +- assert (_actual_size_set, "Should be set"); +- return _actual_size; +- } +- +- inline void set_actual_size(size_t v) { +-#ifdef ASSERT +- assert (!_actual_size_set, "Should not be set"); +- _actual_size_set = true; +-#endif +- _actual_size = v; +- } +- +- inline bool is_mutator_alloc() { +- switch (_alloc_type) { +- case _alloc_tlab: +- case _alloc_shared: +- return true; +- case _alloc_gclab: +- case _alloc_shared_gc: +- return false; +- default: +- ShouldNotReachHere(); +- return false; +- } +- } +- +- inline bool is_gc_alloc() { +- switch (_alloc_type) { +- case _alloc_tlab: +- case _alloc_shared: +- return false; +- case _alloc_gclab: +- case _alloc_shared_gc: +- return true; +- default: +- ShouldNotReachHere(); +- return false; +- } +- } +- +- inline bool is_lab_alloc() { +- switch (_alloc_type) { +- case _alloc_tlab: +- case _alloc_gclab: +- return true; +- case _alloc_shared: +- case _alloc_shared_gc: +- return false; +- default: +- ShouldNotReachHere(); +- return false; +- } +- } +-}; +- +-#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHALLOCREQUEST_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahAsserts.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahAsserts.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahAsserts.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahAsserts.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,398 +0,0 @@ +-/* +- * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +- +-#include "gc_implementation/shenandoah/shenandoahAsserts.hpp" +-#include "gc_implementation/shenandoah/shenandoahForwarding.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegionSet.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahMarkingContext.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahUtils.hpp" +-#include "memory/resourceArea.hpp" +- +-void print_raw_memory(ShenandoahMessageBuffer &msg, void* loc) { +- // Be extra safe. Only access data that is guaranteed to be safe: +- // should be in heap, in known committed region, within that region. +- +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- if (!heap->is_in(loc)) return; +- +- ShenandoahHeapRegion* r = heap->heap_region_containing(loc); +- if (r != NULL && r->is_committed()) { +- address start = MAX2((address) r->bottom(), (address) loc - 32); +- address end = MIN2((address) r->end(), (address) loc + 128); +- if (start >= end) return; +- +- stringStream ss; +- os::print_hex_dump(&ss, start, end, 4); +- msg.append("\n"); +- msg.append("Raw heap memory:\n%s", ss.as_string()); +- } +-} +- +-void ShenandoahAsserts::print_obj(ShenandoahMessageBuffer& msg, oop obj) { +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- ShenandoahHeapRegion *r = heap->heap_region_containing(obj); +- +- ResourceMark rm; +- stringStream ss; +- r->print_on(&ss); +- +- stringStream mw_ss; +- obj->mark()->print_on(&mw_ss); +- +- ShenandoahMarkingContext* const ctx = heap->marking_context(); +- +- msg.append(" " PTR_FORMAT " - klass " PTR_FORMAT " %s\n", p2i(obj), p2i(obj->klass()), obj->klass()->external_name()); +- msg.append(" %3s allocated after mark start\n", ctx->allocated_after_mark_start((HeapWord *) obj) ? "" : "not"); +- msg.append(" %3s after update watermark\n", cast_from_oop(obj) >= r->get_update_watermark() ? "" : "not"); +- msg.append(" %3s marked \n", ctx->is_marked(obj) ? "" : "not"); +- msg.append(" %3s in collection set\n", heap->in_collection_set(obj) ? "" : "not"); +- msg.append(" mark:%s\n", mw_ss.as_string()); +- msg.append(" region: %s", ss.as_string()); +-} +- +-void ShenandoahAsserts::print_non_obj(ShenandoahMessageBuffer& msg, void* loc) { +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- if (heap->is_in(loc)) { +- msg.append(" inside Java heap\n"); +- ShenandoahHeapRegion *r = heap->heap_region_containing(loc); +- stringStream ss; +- r->print_on(&ss); +- +- msg.append(" %3s in collection set\n", heap->in_collection_set_loc(loc) ? "" : "not"); +- msg.append(" region: %s", ss.as_string()); +- } else { +- msg.append(" outside of Java heap\n"); +- stringStream ss; +- os::print_location(&ss, (intptr_t) loc, false); +- msg.append(" %s", ss.as_string()); +- } +-} +- +-void ShenandoahAsserts::print_obj_safe(ShenandoahMessageBuffer& msg, void* loc) { +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- msg.append(" " PTR_FORMAT " - safe print, no details\n", p2i(loc)); +- if (heap->is_in(loc)) { +- ShenandoahHeapRegion* r = heap->heap_region_containing(loc); +- if (r != NULL) { +- stringStream ss; +- r->print_on(&ss); +- msg.append(" region: %s", ss.as_string()); +- print_raw_memory(msg, loc); +- } +- } +-} +- +-void ShenandoahAsserts::print_failure(SafeLevel level, oop obj, void* interior_loc, oop loc, +- const char* phase, const char* label, +- const char* file, int line) { +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- ResourceMark rm; +- +- bool loc_in_heap = (loc != NULL && heap->is_in(loc)); +- +- ShenandoahMessageBuffer msg("%s; %s\n\n", phase, label); +- +- msg.append("Referenced from:\n"); +- if (interior_loc != NULL) { +- msg.append(" interior location: " PTR_FORMAT "\n", p2i(interior_loc)); +- if (loc_in_heap) { +- print_obj(msg, loc); +- } else { +- print_non_obj(msg, interior_loc); +- } +- } else { +- msg.append(" no interior location recorded (probably a plain heap scan, or detached oop)\n"); +- } +- msg.append("\n"); +- +- msg.append("Object:\n"); +- if (level >= _safe_oop) { +- print_obj(msg, obj); +- } else { +- print_obj_safe(msg, obj); +- } +- msg.append("\n"); +- +- if (level >= _safe_oop) { +- oop fwd = (oop) ShenandoahForwarding::get_forwardee_raw_unchecked(obj); +- msg.append("Forwardee:\n"); +- if (obj != fwd) { +- if (level >= _safe_oop_fwd) { +- print_obj(msg, fwd); +- } else { +- print_obj_safe(msg, fwd); +- } +- } else { +- msg.append(" (the object itself)"); +- } +- msg.append("\n"); +- } +- +- if (level >= _safe_oop_fwd) { +- oop fwd = (oop) ShenandoahForwarding::get_forwardee_raw_unchecked(obj); +- oop fwd2 = (oop) ShenandoahForwarding::get_forwardee_raw_unchecked(fwd); +- if (fwd != fwd2) { +- msg.append("Second forwardee:\n"); +- print_obj_safe(msg, fwd2); +- msg.append("\n"); +- } +- } +- +- report_vm_error(file, line, msg.buffer()); +-} +- +-void ShenandoahAsserts::assert_in_heap(void* interior_loc, oop obj, const char *file, int line) { +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- +- if (!heap->is_in(obj)) { +- print_failure(_safe_unknown, obj, interior_loc, NULL, "Shenandoah assert_in_heap failed", +- "oop must point to a heap address", +- file, line); +- } +-} +- +-void ShenandoahAsserts::assert_correct(void* interior_loc, oop obj, const char* file, int line) { +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- +- // Step 1. Check that obj is correct. +- // After this step, it is safe to call heap_region_containing(). +- if (!heap->is_in(obj)) { +- print_failure(_safe_unknown, obj, interior_loc, NULL, "Shenandoah assert_correct failed", +- "oop must point to a heap address", +- file, line); +- } +- +- Klass* obj_klass = obj->klass_or_null(); +- if (obj_klass == NULL) { +- print_failure(_safe_unknown, obj, interior_loc, NULL, "Shenandoah assert_correct failed", +- "Object klass pointer should not be NULL", +- file,line); +- } +- +- if (!Metaspace::contains(obj_klass)) { +- print_failure(_safe_unknown, obj, interior_loc, NULL, "Shenandoah assert_correct failed", +- "Object klass pointer must go to metaspace", +- file,line); +- } +- +- oop fwd = oop(ShenandoahForwarding::get_forwardee_raw_unchecked(obj)); +- +- if (obj != fwd) { +- // When Full GC moves the objects, we cannot trust fwdptrs. If we got here, it means something +- // tries fwdptr manipulation when Full GC is running. The only exception is using the fwdptr +- // that still points to the object itself. +- if (heap->is_full_gc_move_in_progress()) { +- print_failure(_safe_oop, obj, interior_loc, NULL, "Shenandoah assert_correct failed", +- "Non-trivial forwarding pointer during Full GC moves, probable bug.", +- file, line); +- } +- +- // Step 2. Check that forwardee is correct +- if (!heap->is_in(fwd)) { +- print_failure(_safe_oop, obj, interior_loc, NULL, "Shenandoah assert_correct failed", +- "Forwardee must point to a heap address", +- file, line); +- } +- +- if (obj_klass != fwd->klass()) { +- print_failure(_safe_oop, obj, interior_loc, NULL, "Shenandoah assert_correct failed", +- "Forwardee klass disagrees with object class", +- file, line); +- } +- +- // Step 3. Check that forwardee points to correct region +- if (heap->heap_region_index_containing(fwd) == heap->heap_region_index_containing(obj)) { +- print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_correct failed", +- "Non-trivial forwardee should in another region", +- file, line); +- } +- +- // Step 4. Check for multiple forwardings +- oop fwd2 = oop(ShenandoahForwarding::get_forwardee_raw_unchecked(fwd)); +- if (fwd != fwd2) { +- print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_correct failed", +- "Multiple forwardings", +- file, line); +- } +- } +-} +- +-void ShenandoahAsserts::assert_in_correct_region(void* interior_loc, oop obj, const char* file, int line) { +- assert_correct(interior_loc, obj, file, line); +- +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- ShenandoahHeapRegion* r = heap->heap_region_containing(obj); +- if (!r->is_active()) { +- print_failure(_safe_unknown, obj, interior_loc, NULL, "Shenandoah assert_in_correct_region failed", +- "Object must reside in active region", +- file, line); +- } +- +- size_t alloc_size = obj->size(); +- if (alloc_size > ShenandoahHeapRegion::humongous_threshold_words()) { +- size_t idx = r->index(); +- size_t num_regions = ShenandoahHeapRegion::required_regions(alloc_size * HeapWordSize); +- for (size_t i = idx; i < idx + num_regions; i++) { +- ShenandoahHeapRegion* chain_reg = heap->get_region(i); +- if (i == idx && !chain_reg->is_humongous_start()) { +- print_failure(_safe_unknown, obj, interior_loc, NULL, "Shenandoah assert_in_correct_region failed", +- "Object must reside in humongous start", +- file, line); +- } +- if (i != idx && !chain_reg->is_humongous_continuation()) { +- print_failure(_safe_oop, obj, interior_loc, NULL, "Shenandoah assert_in_correct_region failed", +- "Humongous continuation should be of proper size", +- file, line); +- } +- } +- } +-} +- +-void ShenandoahAsserts::assert_forwarded(void* interior_loc, oop obj, const char* file, int line) { +- assert_correct(interior_loc, obj, file, line); +- oop fwd = oop(ShenandoahForwarding::get_forwardee_raw_unchecked(obj)); +- +- if (obj == fwd) { +- print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_forwarded failed", +- "Object should be forwarded", +- file, line); +- } +-} +- +-void ShenandoahAsserts::assert_not_forwarded(void* interior_loc, oop obj, const char* file, int line) { +- assert_correct(interior_loc, obj, file, line); +- oop fwd = oop(ShenandoahForwarding::get_forwardee_raw_unchecked(obj)); +- +- if (obj != fwd) { +- print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_not_forwarded failed", +- "Object should not be forwarded", +- file, line); +- } +-} +- +-void ShenandoahAsserts::assert_marked(void *interior_loc, oop obj, const char *file, int line) { +- assert_correct(interior_loc, obj, file, line); +- +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- if (!heap->marking_context()->is_marked(obj)) { +- print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_marked failed", +- "Object should be marked", +- file, line); +- } +-} +- +-void ShenandoahAsserts::assert_in_cset(void* interior_loc, oop obj, const char* file, int line) { +- assert_correct(interior_loc, obj, file, line); +- +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- if (!heap->in_collection_set(obj)) { +- print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_in_cset failed", +- "Object should be in collection set", +- file, line); +- } +-} +- +-void ShenandoahAsserts::assert_not_in_cset(void* interior_loc, oop obj, const char* file, int line) { +- assert_correct(interior_loc, obj, file, line); +- +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- if (heap->in_collection_set(obj)) { +- print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_not_in_cset failed", +- "Object should not be in collection set", +- file, line); +- } +-} +- +-void ShenandoahAsserts::assert_not_in_cset_loc(void* interior_loc, const char* file, int line) { +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- if (heap->in_collection_set_loc(interior_loc)) { +- print_failure(_safe_unknown, NULL, interior_loc, NULL, "Shenandoah assert_not_in_cset_loc failed", +- "Interior location should not be in collection set", +- file, line); +- } +-} +- +-void ShenandoahAsserts::print_rp_failure(const char *label, BoolObjectClosure* actual, +- const char *file, int line) { +- ShenandoahMessageBuffer msg("%s\n", label); +- msg.append(" Actual: " PTR_FORMAT "\n", p2i(actual)); +- report_vm_error(file, line, msg.buffer()); +-} +- +-void ShenandoahAsserts::assert_heaplocked(const char* file, int line) { +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- +- if (heap->lock()->owned_by_self()) { +- return; +- } +- +- ShenandoahMessageBuffer msg("Heap lock must be owned by current thread"); +- report_vm_error(file, line, msg.buffer()); +-} +- +-void ShenandoahAsserts::assert_not_heaplocked(const char* file, int line) { +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- +- if (!heap->lock()->owned_by_self()) { +- return; +- } +- +- ShenandoahMessageBuffer msg("Heap lock must not be owned by current thread"); +- report_vm_error(file, line, msg.buffer()); +-} +- +-void ShenandoahAsserts::assert_heaplocked_or_safepoint(const char* file, int line) { +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- +- if (heap->lock()->owned_by_self()) { +- return; +- } +- +- if (ShenandoahSafepoint::is_at_shenandoah_safepoint() && Thread::current()->is_VM_thread()) { +- return; +- } +- +- ShenandoahMessageBuffer msg("Heap lock must be owned by current thread, or be at safepoint"); +- report_vm_error(file, line, msg.buffer()); +-} +- +-void ShenandoahAsserts::assert_rp_isalive_not_installed(const char *file, int line) { +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- ReferenceProcessor* rp = heap->ref_processor(); +- if (rp->is_alive_non_header() != NULL) { +- print_rp_failure("Shenandoah assert_rp_isalive_not_installed failed", rp->is_alive_non_header(), +- file, line); +- } +-} +- +-void ShenandoahAsserts::assert_rp_isalive_installed(const char *file, int line) { +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- ReferenceProcessor* rp = heap->ref_processor(); +- if (rp->is_alive_non_header() == NULL) { +- print_rp_failure("Shenandoah assert_rp_isalive_installed failed", rp->is_alive_non_header(), +- file, line); +- } +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahAsserts.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahAsserts.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahAsserts.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahAsserts.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,187 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHASSERTS_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHASSERTS_HPP +- +-#include "oops/oop.hpp" +-#include "utilities/debug.hpp" +- +-typedef FormatBuffer<8192> ShenandoahMessageBuffer; +- +-class ShenandoahAsserts { +-public: +- enum SafeLevel { +- _safe_unknown, +- _safe_oop, +- _safe_oop_fwd, +- _safe_all +- }; +- +- static void print_obj(ShenandoahMessageBuffer &msg, oop obj); +- +- static void print_non_obj(ShenandoahMessageBuffer &msg, void *loc); +- +- static void print_obj_safe(ShenandoahMessageBuffer &msg, void *loc); +- +- static void print_failure(SafeLevel level, oop obj, void *interior_loc, oop loc, +- const char *phase, const char *label, +- const char *file, int line); +- +- static void print_rp_failure(const char *label, BoolObjectClosure* actual, +- const char *file, int line); +- +- static void assert_in_heap(void* interior_loc, oop obj, const char* file, int line); +- static void assert_in_correct_region(void* interior_loc, oop obj, const char* file, int line); +- +- static void assert_correct(void* interior_loc, oop obj, const char* file, int line); +- static void assert_forwarded(void* interior_loc, oop obj, const char* file, int line); +- static void assert_not_forwarded(void* interior_loc, oop obj, const char* file, int line); +- static void assert_marked(void* interior_loc, oop obj, const char* file, int line); +- static void assert_in_cset(void* interior_loc, oop obj, const char* file, int line); +- static void assert_not_in_cset(void* interior_loc, oop obj, const char* file, int line); +- static void assert_not_in_cset_loc(void* interior_loc, const char* file, int line); +- +- static void assert_rp_isalive_not_installed(const char *file, int line); +- static void assert_rp_isalive_installed(const char *file, int line); +- +- static void assert_heaplocked(const char* file, int line); +- static void assert_not_heaplocked(const char* file, int line); +- static void assert_heaplocked_or_safepoint(const char* file, int line); +- +-#ifdef ASSERT +-#define shenandoah_assert_in_heap(interior_loc, obj) \ +- ShenandoahAsserts::assert_in_heap(interior_loc, obj, __FILE__, __LINE__) +-#define shenandoah_assert_in_correct_region(interior_loc, obj) \ +- ShenandoahAsserts::assert_in_correct_region(interior_loc, obj, __FILE__, __LINE__) +- +-#define shenandoah_assert_correct_if(interior_loc, obj, condition) \ +- if (condition) ShenandoahAsserts::assert_correct(interior_loc, obj, __FILE__, __LINE__) +-#define shenandoah_assert_correct_except(interior_loc, obj, exception) \ +- if (!(exception)) ShenandoahAsserts::assert_correct(interior_loc, obj, __FILE__, __LINE__) +-#define shenandoah_assert_correct(interior_loc, obj) \ +- ShenandoahAsserts::assert_correct(interior_loc, obj, __FILE__, __LINE__) +- +-#define shenandoah_assert_forwarded_if(interior_loc, obj, condition) \ +- if (condition) ShenandoahAsserts::assert_forwarded(interior_loc, obj, __FILE__, __LINE__) +-#define shenandoah_assert_forwarded_except(interior_loc, obj, exception) \ +- if (!(exception)) ShenandoahAsserts::assert_forwarded(interior_loc, obj, __FILE__, __LINE__) +-#define shenandoah_assert_forwarded(interior_loc, obj) \ +- ShenandoahAsserts::assert_forwarded(interior_loc, obj, __FILE__, __LINE__) +- +-#define shenandoah_assert_not_forwarded_if(interior_loc, obj, condition) \ +- if (condition) ShenandoahAsserts::assert_not_forwarded(interior_loc, obj, __FILE__, __LINE__) +-#define shenandoah_assert_not_forwarded_except(interior_loc, obj, exception) \ +- if (!(exception)) ShenandoahAsserts::assert_not_forwarded(interior_loc, obj, __FILE__, __LINE__) +-#define shenandoah_assert_not_forwarded(interior_loc, obj) \ +- ShenandoahAsserts::assert_not_forwarded(interior_loc, obj, __FILE__, __LINE__) +- +-#define shenandoah_assert_marked_if(interior_loc, obj, condition) \ +- if (condition) ShenandoahAsserts::assert_marked(interior_loc, obj, __FILE__, __LINE__) +-#define shenandoah_assert_marked_except(interior_loc, obj, exception) \ +- if (!(exception)) ShenandoahAsserts::assert_marked(interior_loc, obj, __FILE__, __LINE__) +-#define shenandoah_assert_marked(interior_loc, obj) \ +- ShenandoahAsserts::assert_marked(interior_loc, obj, __FILE__, __LINE__) +- +-#define shenandoah_assert_in_cset_if(interior_loc, obj, condition) \ +- if (condition) ShenandoahAsserts::assert_in_cset(interior_loc, obj, __FILE__, __LINE__) +-#define shenandoah_assert_in_cset_except(interior_loc, obj, exception) \ +- if (!(exception)) ShenandoahAsserts::assert_in_cset(interior_loc, obj, __FILE__, __LINE__) +-#define shenandoah_assert_in_cset(interior_loc, obj) \ +- ShenandoahAsserts::assert_in_cset(interior_loc, obj, __FILE__, __LINE__) +- +-#define shenandoah_assert_not_in_cset_if(interior_loc, obj, condition) \ +- if (condition) ShenandoahAsserts::assert_not_in_cset(interior_loc, obj, __FILE__, __LINE__) +-#define shenandoah_assert_not_in_cset_except(interior_loc, obj, exception) \ +- if (!(exception)) ShenandoahAsserts::assert_not_in_cset(interior_loc, obj, __FILE__, __LINE__) +-#define shenandoah_assert_not_in_cset(interior_loc, obj) \ +- ShenandoahAsserts::assert_not_in_cset(interior_loc, obj, __FILE__, __LINE__) +- +-#define shenandoah_assert_not_in_cset_loc_if(interior_loc, condition) \ +- if (condition) ShenandoahAsserts::assert_not_in_cset_loc(interior_loc, __FILE__, __LINE__) +-#define shenandoah_assert_not_in_cset_loc_except(interior_loc, exception) \ +- if (!(exception)) ShenandoahAsserts::assert_not_in_cset_loc(interior_loc, __FILE__, __LINE__) +-#define shenandoah_assert_not_in_cset_loc(interior_loc) \ +- ShenandoahAsserts::assert_not_in_cset_loc(interior_loc, __FILE__, __LINE__) +- +-#define shenandoah_assert_rp_isalive_installed() \ +- ShenandoahAsserts::assert_rp_isalive_installed(__FILE__, __LINE__) +-#define shenandoah_assert_rp_isalive_not_installed() \ +- ShenandoahAsserts::assert_rp_isalive_not_installed(__FILE__, __LINE__) +- +-#define shenandoah_assert_heaplocked() \ +- ShenandoahAsserts::assert_heaplocked(__FILE__, __LINE__) +- +-#define shenandoah_assert_not_heaplocked() \ +- ShenandoahAsserts::assert_not_heaplocked(__FILE__, __LINE__) +- +-#define shenandoah_assert_heaplocked_or_safepoint() \ +- ShenandoahAsserts::assert_heaplocked_or_safepoint(__FILE__, __LINE__) +-#else +-#define shenandoah_assert_in_heap(interior_loc, obj) +-#define shenandoah_assert_in_correct_region(interior_loc, obj) +- +-#define shenandoah_assert_correct_if(interior_loc, obj, condition) +-#define shenandoah_assert_correct_except(interior_loc, obj, exception) +-#define shenandoah_assert_correct(interior_loc, obj) +- +-#define shenandoah_assert_forwarded_if(interior_loc, obj, condition) +-#define shenandoah_assert_forwarded_except(interior_loc, obj, exception) +-#define shenandoah_assert_forwarded(interior_loc, obj) +- +-#define shenandoah_assert_not_forwarded_if(interior_loc, obj, condition) +-#define shenandoah_assert_not_forwarded_except(interior_loc, obj, exception) +-#define shenandoah_assert_not_forwarded(interior_loc, obj) +- +-#define shenandoah_assert_marked_if(interior_loc, obj, condition) +-#define shenandoah_assert_marked_except(interior_loc, obj, exception) +-#define shenandoah_assert_marked(interior_loc, obj) +- +-#define shenandoah_assert_in_cset_if(interior_loc, obj, condition) +-#define shenandoah_assert_in_cset_except(interior_loc, obj, exception) +-#define shenandoah_assert_in_cset(interior_loc, obj) +- +-#define shenandoah_assert_not_in_cset_if(interior_loc, obj, condition) +-#define shenandoah_assert_not_in_cset_except(interior_loc, obj, exception) +-#define shenandoah_assert_not_in_cset(interior_loc, obj) +- +-#define shenandoah_assert_not_in_cset_loc_if(interior_loc, condition) +-#define shenandoah_assert_not_in_cset_loc_except(interior_loc, exception) +-#define shenandoah_assert_not_in_cset_loc(interior_loc) +- +-#define shenandoah_assert_rp_isalive_installed() +-#define shenandoah_assert_rp_isalive_not_installed() +- +-#define shenandoah_assert_heaplocked() +-#define shenandoah_assert_not_heaplocked() +-#define shenandoah_assert_heaplocked_or_safepoint() +-#endif +- +-#define shenandoah_not_implemented \ +- { fatal("Deliberately not implemented."); } +-#define shenandoah_not_implemented_return(v) \ +- { fatal("Deliberately not implemented."); return v; } +- +-}; +- +-#endif //SHARE_VM_GC_SHENANDOAH_SHENANDOAHASSERTS_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahBarrierSetAssembler_stub.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahBarrierSetAssembler_stub.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahBarrierSetAssembler_stub.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahBarrierSetAssembler_stub.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,34 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_STUB_HPP +-#define SHARE_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_STUB_HPP +- +-#include "asm/macroAssembler.hpp" +-#include "memory/allocation.hpp" +- +-class ShenandoahBarrierSetAssembler : public CHeapObj { +- +-}; +- +-#endif // SHARE_GC_SHENANDOAH_SHENANDOAHBARRIERSETASSEMBLER_STUB_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahBarrierSetClone.inline.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahBarrierSetClone.inline.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahBarrierSetClone.inline.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahBarrierSetClone.inline.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,113 +0,0 @@ +-/* +- * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHBARRIERSETCLONE_INLINE_HPP +-#define SHARE_GC_SHENANDOAH_SHENANDOAHBARRIERSETCLONE_INLINE_HPP +- +-#include "gc_implementation/shenandoah/shenandoahBarrierSet.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahCollectionSet.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahEvacOOMHandler.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "memory/iterator.hpp" +-#include "oops/oop.inline.hpp" +- +-template +-class ShenandoahUpdateRefsForOopClosure: public ExtendedOopClosure { +-private: +- ShenandoahHeap* const _heap; +- ShenandoahBarrierSet* const _bs; +- const ShenandoahCollectionSet* const _cset; +- Thread* const _thread; +- +- template +- inline void do_oop_work(T* p) { +- T o = oopDesc::load_heap_oop(p); +- if (!oopDesc::is_null(o)) { +- oop obj = oopDesc::decode_heap_oop_not_null(o); +- if (HAS_FWD && _cset->is_in(obj)) { +- oop fwd = _bs->resolve_forwarded_not_null(obj); +- if (EVAC && obj == fwd) { +- fwd = _heap->evacuate_object(obj, _thread); +- } +- assert(obj != fwd || _heap->cancelled_gc(), "must be forwarded"); +- ShenandoahHeap::cas_oop(fwd, p, o); +- obj = fwd; +- } +- if (ENQUEUE) { +- _bs->enqueue(obj); +- } +- } +- } +-public: +- ShenandoahUpdateRefsForOopClosure() : +- _heap(ShenandoahHeap::heap()), +- _bs(ShenandoahBarrierSet::barrier_set()), +- _cset(_heap->collection_set()), +- _thread(Thread::current()) { +- } +- +- virtual void do_oop(oop* p) { do_oop_work(p); } +- virtual void do_oop(narrowOop* p) { do_oop_work(p); } +-}; +- +-void ShenandoahBarrierSet::clone_marking(oop obj) { +- assert(_heap->is_concurrent_mark_in_progress(), "only during marking"); +- assert(ShenandoahStoreValEnqueueBarrier, "only with incremental-update"); +- if (!_heap->marking_context()->allocated_after_mark_start(obj)) { +- ShenandoahUpdateRefsForOopClosure cl; +- obj->oop_iterate(&cl); +- } +-} +- +-void ShenandoahBarrierSet::clone_evacuation(oop obj) { +- assert(_heap->is_evacuation_in_progress(), "only during evacuation"); +- if (need_bulk_update(cast_from_oop(obj))) { +- ShenandoahEvacOOMScope oom_evac_scope; +- ShenandoahUpdateRefsForOopClosure cl; +- obj->oop_iterate(&cl); +- } +-} +- +-void ShenandoahBarrierSet::clone_update(oop obj) { +- assert(_heap->is_update_refs_in_progress(), "only during update-refs"); +- if (need_bulk_update(cast_from_oop(obj))) { +- ShenandoahUpdateRefsForOopClosure cl; +- obj->oop_iterate(&cl); +- } +-} +- +-void ShenandoahBarrierSet::clone_barrier(oop obj) { +- assert(ShenandoahCloneBarrier, "only get here with clone barriers enabled"); +- shenandoah_assert_correct(NULL, obj); +- +- int gc_state = _heap->gc_state(); +- if ((gc_state & ShenandoahHeap::MARKING) != 0) { +- clone_marking(obj); +- } else if ((gc_state & ShenandoahHeap::EVACUATION) != 0) { +- clone_evacuation(obj); +- } else { +- clone_update(obj); +- } +-} +- +-#endif // SHARE_GC_SHENANDOAH_SHENANDOAHBARRIERSETCLONE_INLINE_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahBarrierSet.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahBarrierSet.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahBarrierSet.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahBarrierSet.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,270 +0,0 @@ +-/* +- * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +-#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" +-#include "gc_implementation/shenandoah/shenandoahAsserts.hpp" +-#include "gc_implementation/shenandoah/shenandoahBarrierSet.hpp" +-#include "gc_implementation/shenandoah/shenandoahBarrierSetClone.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "gc_implementation/shenandoah/heuristics/shenandoahHeuristics.hpp" +-#include "runtime/interfaceSupport.hpp" +-#include "utilities/macros.hpp" +- +-#ifdef COMPILER1 +-#include "gc_implementation/shenandoah/c1/shenandoahBarrierSetC1.hpp" +-#endif +-#ifdef COMPILER2 +-#include "gc_implementation/shenandoah/c2/shenandoahBarrierSetC2.hpp" +-#endif +- +-#if defined(TARGET_ARCH_aarch64) +-#include "shenandoahBarrierSetAssembler_aarch64.hpp" +-#elif defined(TARGET_ARCH_x86) +-#include "shenandoahBarrierSetAssembler_x86.hpp" +-#else +-#include "shenandoahBarrierSetAssembler_stub.hpp" +-#endif +- +-ShenandoahBarrierSet::ShenandoahBarrierSet(ShenandoahHeap* heap) : +- BarrierSet(), +- _heap(heap), +- _bsasm(new ShenandoahBarrierSetAssembler()), +- _bsc1(COMPILER1_PRESENT(new ShenandoahBarrierSetC1()) NOT_COMPILER1(NULL)), +- _bsc2(COMPILER2_PRESENT(new ShenandoahBarrierSetC2()) NOT_COMPILER2(NULL)) +-{ +- _kind = BarrierSet::ShenandoahBarrierSet; +-} +- +-ShenandoahBarrierSetAssembler* ShenandoahBarrierSet::bsasm() const { +- return _bsasm; +-} +- +-ShenandoahBarrierSetC1* ShenandoahBarrierSet::bsc1() const { +- return _bsc1; +-} +- +-ShenandoahBarrierSetC2* ShenandoahBarrierSet::bsc2() const { +- return _bsc2; +-} +- +-void ShenandoahBarrierSet::print_on(outputStream* st) const { +- st->print("ShenandoahBarrierSet"); +-} +- +-bool ShenandoahBarrierSet::is_a(BarrierSet::Name bsn) { +- return bsn == BarrierSet::ShenandoahBarrierSet; +-} +- +-bool ShenandoahBarrierSet::has_read_prim_array_opt() { +- return true; +-} +- +-bool ShenandoahBarrierSet::has_read_prim_barrier() { +- return false; +-} +- +-bool ShenandoahBarrierSet::has_read_ref_array_opt() { +- return true; +-} +- +-bool ShenandoahBarrierSet::has_read_ref_barrier() { +- return false; +-} +- +-bool ShenandoahBarrierSet::has_read_region_opt() { +- return true; +-} +- +-bool ShenandoahBarrierSet::has_write_prim_array_opt() { +- return true; +-} +- +-bool ShenandoahBarrierSet::has_write_prim_barrier() { +- return false; +-} +- +-bool ShenandoahBarrierSet::has_write_ref_array_opt() { +- return true; +-} +- +-bool ShenandoahBarrierSet::has_write_ref_barrier() { +- return true; +-} +- +-bool ShenandoahBarrierSet::has_write_ref_pre_barrier() { +- return true; +-} +- +-bool ShenandoahBarrierSet::has_write_region_opt() { +- return true; +-} +- +-bool ShenandoahBarrierSet::is_aligned(HeapWord* hw) { +- return true; +-} +- +-bool ShenandoahBarrierSet::read_prim_needs_barrier(HeapWord* hw, size_t s) { +- return false; +-} +- +-void ShenandoahBarrierSet::read_ref_field(void* v) { +- // tty->print_cr("read_ref_field: v = "PTR_FORMAT, v); +- // return *v; +-} +- +-template +-inline void ShenandoahBarrierSet::inline_write_ref_field_pre(T* field, oop newVal) { +- newVal = load_reference_barrier(newVal); +- storeval_barrier(newVal); +- if (ShenandoahSATBBarrier && _heap->is_concurrent_mark_in_progress()) { +- T heap_oop = oopDesc::load_heap_oop(field); +- shenandoah_assert_not_in_cset_loc_except(field, ShenandoahHeap::heap()->cancelled_gc()); +- if (!oopDesc::is_null(heap_oop)) { +- ShenandoahBarrierSet::barrier_set()->enqueue(oopDesc::decode_heap_oop(heap_oop)); +- } +- } +-} +- +-// These are the more general virtual versions. +-void ShenandoahBarrierSet::write_ref_field_pre_work(oop* field, oop new_val) { +- inline_write_ref_field_pre(field, new_val); +-} +- +-void ShenandoahBarrierSet::write_ref_field_pre_work(narrowOop* field, oop new_val) { +- inline_write_ref_field_pre(field, new_val); +-} +- +-void ShenandoahBarrierSet::write_ref_field_work(void* v, oop o, bool release) { +- shenandoah_assert_not_in_cset_loc_except(v, _heap->cancelled_gc()); +- shenandoah_assert_not_forwarded_except (v, o, o == NULL || _heap->cancelled_gc() || !_heap->is_concurrent_mark_in_progress()); +- shenandoah_assert_not_in_cset_except (v, o, o == NULL || _heap->cancelled_gc() || !_heap->is_concurrent_mark_in_progress()); +-} +- +-oop ShenandoahBarrierSet::load_reference_barrier_not_null(oop obj) { +- assert(obj != NULL, ""); +- if (ShenandoahLoadRefBarrier && _heap->has_forwarded_objects()) { +- return load_reference_barrier_impl(obj); +- } else { +- return obj; +- } +-} +- +-oop ShenandoahBarrierSet::load_reference_barrier(oop obj) { +- if (obj != NULL) { +- return load_reference_barrier_not_null(obj); +- } else { +- return obj; +- } +-} +- +- +-oop ShenandoahBarrierSet::load_reference_barrier_impl(oop obj) { +- assert(ShenandoahLoadRefBarrier, "should be enabled"); +- if (!oopDesc::is_null(obj)) { +- oop fwd = resolve_forwarded_not_null(obj); +- if (_heap->is_evacuation_in_progress() && +- _heap->in_collection_set(obj) && +- obj == fwd) { +- Thread *t = Thread::current(); +- ShenandoahEvacOOMScope oom_evac_scope; +- return _heap->evacuate_object(obj, t); +- } else { +- return fwd; +- } +- } else { +- return obj; +- } +-} +- +-void ShenandoahBarrierSet::storeval_barrier(oop obj) { +- if (ShenandoahStoreValEnqueueBarrier && !oopDesc::is_null(obj) && _heap->is_concurrent_mark_in_progress()) { +- enqueue(obj); +- } +-} +- +-void ShenandoahBarrierSet::keep_alive_barrier(oop obj) { +- if (_heap->is_concurrent_mark_in_progress()) { +- enqueue(obj); +- } +-} +- +-void ShenandoahBarrierSet::enqueue(oop obj) { +- assert(JavaThread::satb_mark_queue_set().shared_satb_queue()->is_active(), "only get here when SATB active"); +- +- // Filter marked objects before hitting the SATB queues. The same predicate would +- // be used by SATBMQ::filter to eliminate already marked objects downstream, but +- // filtering here helps to avoid wasteful SATB queueing work to begin with. +- if (!_heap->requires_marking(obj)) return; +- +- Thread* thr = Thread::current(); +- if (thr->is_Java_thread()) { +- JavaThread* jt = (JavaThread*)thr; +- jt->satb_mark_queue().enqueue_known_active(obj); +- } else { +- MutexLockerEx x(Shared_SATB_Q_lock, Mutex::_no_safepoint_check_flag); +- JavaThread::satb_mark_queue_set().shared_satb_queue()->enqueue_known_active(obj); +- } +-} +- +-oop ShenandoahBarrierSet::atomic_compare_exchange_oop(oop exchange_value, +- volatile HeapWord *dest, +- oop compare_value) { +- if (UseCompressedOops) { +- // encode exchange and compare value from oop to T +- narrowOop val = oopDesc::encode_heap_oop(exchange_value); +- narrowOop cmp = oopDesc::encode_heap_oop(compare_value); +- +- narrowOop old = (narrowOop) Atomic::cmpxchg(val, (narrowOop*)dest, cmp); +- // decode old from T to oop +- return oopDesc::decode_heap_oop(old); +- } else { +- return (oop)Atomic::cmpxchg_ptr(exchange_value, (oop*)dest, compare_value); +- } +-} +- +-oop ShenandoahBarrierSet::oop_atomic_cmpxchg_in_heap(oop new_value, volatile HeapWord* dest, oop compare_value) { +- oop expected; +- bool success; +- do { +- expected = compare_value; +- compare_value = atomic_compare_exchange_oop(new_value, dest, expected); +- success = (compare_value == expected); +- } while ((! success) && resolve_forwarded(compare_value) == resolve_forwarded(expected)); +- oop result = load_reference_barrier(compare_value); +- if (ShenandoahSATBBarrier && success && result != NULL && +- ShenandoahHeap::heap()->is_concurrent_mark_in_progress()) { +- enqueue(result); +- } +- if (new_value != NULL) { +- storeval_barrier(new_value); +- } +- return result; +-} +- +-void ShenandoahBarrierSet::clone_barrier_runtime(oop src) { +- if (_heap->has_forwarded_objects() || (ShenandoahStoreValEnqueueBarrier && _heap->is_concurrent_mark_in_progress())) { +- clone_barrier(src); +- } +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahBarrierSet.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahBarrierSet.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahBarrierSet.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahBarrierSet.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,146 +0,0 @@ +-/* +- * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHBARRIERSET_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHBARRIERSET_HPP +- +-#include "memory/barrierSet.hpp" +-#include "gc_implementation/shenandoah/shenandoahAsserts.hpp" +- +-class ShenandoahBarrierSetAssembler; +-class ShenandoahBarrierSetC1; +-class ShenandoahBarrierSetC2; +-class ShenandoahHeap; +- +-class ShenandoahBarrierSet: public BarrierSet { +-private: +- ShenandoahHeap* _heap; +- ShenandoahBarrierSetAssembler* const _bsasm; +- ShenandoahBarrierSetC1* const _bsc1; +- ShenandoahBarrierSetC2* const _bsc2; +- +- inline bool need_bulk_update(HeapWord* dst); +-public: +- ShenandoahBarrierSet(ShenandoahHeap* heap); +- +- inline static ShenandoahBarrierSet* barrier_set() { +- BarrierSet *bs = oopDesc::bs(); +- assert(bs->kind() == BarrierSet::ShenandoahBarrierSet, "sanity"); +- return (ShenandoahBarrierSet*)bs; +- } +- +- ShenandoahBarrierSetAssembler* bsasm() const; +- ShenandoahBarrierSetC1* bsc1() const; +- ShenandoahBarrierSetC2* bsc2() const; +- +- void print_on(outputStream* st) const; +- +- bool is_a(BarrierSet::Name bsn); +- +- bool has_read_prim_array_opt(); +- bool has_read_prim_barrier(); +- bool has_read_ref_array_opt(); +- bool has_read_ref_barrier(); +- bool has_read_region_opt(); +- bool has_write_prim_array_opt(); +- bool has_write_prim_barrier(); +- bool has_write_ref_array_opt(); +- bool has_write_ref_barrier(); +- bool has_write_ref_pre_barrier(); +- bool has_write_region_opt(); +- bool is_aligned(HeapWord* hw); +- void read_prim_array(MemRegion mr) shenandoah_not_implemented; +- void read_prim_field(HeapWord* hw, size_t s) shenandoah_not_implemented; +- bool read_prim_needs_barrier(HeapWord* hw, size_t s); +- void read_ref_array(MemRegion mr) shenandoah_not_implemented; +- +- void read_ref_field(void* v); +- +- bool read_ref_needs_barrier(void* v) shenandoah_not_implemented_return(false); +- void read_region(MemRegion mr) shenandoah_not_implemented; +- void resize_covered_region(MemRegion mr) shenandoah_not_implemented; +- void write_prim_array(MemRegion mr) shenandoah_not_implemented; +- void write_prim_field(HeapWord* hw, size_t s , juint x, juint y) shenandoah_not_implemented; +- bool write_prim_needs_barrier(HeapWord* hw, size_t s, juint x, juint y) shenandoah_not_implemented_return(false); +- +- void write_ref_array_work(MemRegion mr) {} +- +- template +- inline void arraycopy_barrier(T* src, T* dst, size_t count); +- inline void clone_barrier(oop src); +- void clone_barrier_runtime(oop src); +- +- // We export this to make it available in cases where the static +- // type of the barrier set is known. Note that it is non-virtual. +- template inline void inline_write_ref_field_pre(T* field, oop newVal); +- +- // These are the more general virtual versions. +- void write_ref_field_pre_work(oop* field, oop new_val); +- void write_ref_field_pre_work(narrowOop* field, oop new_val); +- void write_ref_field_pre_work(void* field, oop new_val) shenandoah_not_implemented; +- +- void write_ref_field_work(void* v, oop o, bool release = false); +- void write_region_work(MemRegion mr) {}; +- +- static inline oop resolve_forwarded_not_null(oop p); +- static inline oop resolve_forwarded_not_null_mutator(oop p); +- static inline oop resolve_forwarded(oop p); +- +- void storeval_barrier(oop obj); +- +- oop load_reference_barrier(oop obj); +- oop load_reference_barrier_not_null(oop obj); +- inline oop load_reference_barrier_mutator(oop obj, oop* load_addr); +- inline oop load_reference_barrier_mutator(oop obj, narrowOop* load_addr); +- +- template +- inline oop load_reference_barrier_mutator_work(oop obj, T* load_addr); +- +- oop oop_atomic_cmpxchg_in_heap(oop new_value, volatile HeapWord* dest, oop compare_value); +- +- void enqueue(oop obj); +- void keep_alive_barrier(oop obj); +- +-private: +- template +- inline void arraycopy_marking(T* src, T* dst, size_t count); +- template +- inline void arraycopy_evacuation(T* src, size_t count); +- template +- inline void arraycopy_update(T* src, size_t count); +- +- inline void clone_marking(oop src); +- inline void clone_evacuation(oop src); +- inline void clone_update(oop src); +- +- template +- inline void arraycopy_work(T* src, size_t count); +- +- oop load_reference_barrier_impl(oop obj); +- +- oop atomic_compare_exchange_oop(oop exchange_value, +- volatile HeapWord *dest, +- oop compare_value); +-}; +- +-#endif //SHARE_VM_GC_SHENANDOAH_SHENANDOAHBARRIERSET_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahBarrierSet.inline.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahBarrierSet.inline.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahBarrierSet.inline.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahBarrierSet.inline.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,154 +0,0 @@ +-/* +- * Copyright (c) 2015, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHBARRIERSET_INLINE_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHBARRIERSET_INLINE_HPP +- +-#include "gc_implementation/shenandoah/shenandoahBarrierSet.hpp" +-#include "gc_implementation/shenandoah/shenandoahCollectionSet.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahForwarding.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahMarkingContext.inline.hpp" +-#include "memory/iterator.inline.hpp" +-#include "oops/oop.inline.hpp" +- +-inline oop ShenandoahBarrierSet::resolve_forwarded_not_null(oop p) { +- return ShenandoahForwarding::get_forwardee(p); +-} +- +-inline oop ShenandoahBarrierSet::resolve_forwarded(oop p) { +- if (((HeapWord*) p) != NULL) { +- return resolve_forwarded_not_null(p); +- } else { +- return p; +- } +-} +- +-inline oop ShenandoahBarrierSet::resolve_forwarded_not_null_mutator(oop p) { +- return ShenandoahForwarding::get_forwardee_mutator(p); +-} +- +-inline oop ShenandoahBarrierSet::load_reference_barrier_mutator(oop obj, oop* load_addr) { +- return load_reference_barrier_mutator_work(obj, load_addr); +-} +- +-inline oop ShenandoahBarrierSet::load_reference_barrier_mutator(oop obj, narrowOop* load_addr) { +- return load_reference_barrier_mutator_work(obj, load_addr); +-} +- +-template +-oop ShenandoahBarrierSet::load_reference_barrier_mutator_work(oop obj, T* load_addr) { +- assert(ShenandoahLoadRefBarrier, "should be enabled"); +- shenandoah_assert_in_cset(load_addr, obj); +- +- oop fwd = resolve_forwarded_not_null_mutator(obj); +- if (obj == fwd) { +- assert(_heap->is_evacuation_in_progress(), +- "evac should be in progress"); +- ShenandoahEvacOOMScope scope; +- fwd = _heap->evacuate_object(obj, Thread::current()); +- } +- +- if (load_addr != NULL && fwd != obj) { +- // Since we are here and we know the load address, update the reference. +- ShenandoahHeap::cas_oop(fwd, load_addr, obj); +- } +- +- return fwd; +-} +- +-template +-void ShenandoahBarrierSet::arraycopy_work(T* src, size_t count) { +- assert(HAS_FWD == _heap->has_forwarded_objects(), "Forwarded object status is sane"); +- +- JavaThread* thread = JavaThread::current(); +- ObjPtrQueue& queue = thread->satb_mark_queue(); +- ShenandoahMarkingContext* ctx = _heap->marking_context(); +- const ShenandoahCollectionSet* const cset = _heap->collection_set(); +- T* end = src + count; +- for (T* elem_ptr = src; elem_ptr < end; elem_ptr++) { +- T o = oopDesc::load_heap_oop(elem_ptr); +- if (!oopDesc::is_null(o)) { +- oop obj = oopDesc::decode_heap_oop_not_null(o); +- if (HAS_FWD && cset->is_in(obj)) { +- oop fwd = resolve_forwarded_not_null(obj); +- if (EVAC && obj == fwd) { +- fwd = _heap->evacuate_object(obj, thread); +- } +- assert(obj != fwd || _heap->cancelled_gc(), "must be forwarded"); +- oop witness = ShenandoahHeap::cas_oop(fwd, elem_ptr, o); +- obj = fwd; +- } +- if (ENQUEUE && !ctx->is_marked(obj)) { +- queue.enqueue_known_active(obj); +- } +- } +- } +-} +- +-template +-void ShenandoahBarrierSet::arraycopy_barrier(T* src, T* dst, size_t count) { +- if (count == 0) { +- return; +- } +- int gc_state = _heap->gc_state(); +- if ((gc_state & ShenandoahHeap::MARKING) != 0) { +- arraycopy_marking(src, dst, count); +- } else if ((gc_state & ShenandoahHeap::EVACUATION) != 0) { +- arraycopy_evacuation(src, count); +- } else if ((gc_state & ShenandoahHeap::UPDATEREFS) != 0) { +- arraycopy_update(src, count); +- } +-} +- +-template +-void ShenandoahBarrierSet::arraycopy_marking(T* src, T* dst, size_t count) { +- assert(_heap->is_concurrent_mark_in_progress(), "only during marking"); +- T* array = ShenandoahSATBBarrier ? dst : src; +- if (!_heap->marking_context()->allocated_after_mark_start(reinterpret_cast(array))) { +- arraycopy_work(array, count); +- } +-} +- +-inline bool ShenandoahBarrierSet::need_bulk_update(HeapWord* ary) { +- return ary < _heap->heap_region_containing(ary)->get_update_watermark(); +-} +- +-template +-void ShenandoahBarrierSet::arraycopy_evacuation(T* src, size_t count) { +- assert(_heap->is_evacuation_in_progress(), "only during evacuation"); +- if (need_bulk_update(reinterpret_cast(src))) { +- ShenandoahEvacOOMScope oom_evac; +- arraycopy_work(src, count); +- } +-} +- +-template +-void ShenandoahBarrierSet::arraycopy_update(T* src, size_t count) { +- assert(_heap->is_update_refs_in_progress(), "only during update-refs"); +- if (need_bulk_update(reinterpret_cast(src))) { +- arraycopy_work(src, count); +- } +-} +- +-#endif //SHARE_VM_GC_SHENANDOAH_SHENANDOAHBARRIERSET_INLINE_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahClosures.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahClosures.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahClosures.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahClosures.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,94 +0,0 @@ +-/* +- * Copyright (c) 2019, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +-#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHCLOSURES_HPP +-#define SHARE_GC_SHENANDOAH_SHENANDOAHCLOSURES_HPP +- +-#include "memory/iterator.hpp" +- +-class ShenandoahHeap; +-class ShenandoahMarkingContext; +-class Thread; +- +-class ShenandoahForwardedIsAliveClosure: public BoolObjectClosure { +-private: +- ShenandoahMarkingContext* const _mark_context; +-public: +- inline ShenandoahForwardedIsAliveClosure(); +- inline bool do_object_b(oop obj); +-}; +- +-class ShenandoahIsAliveClosure: public BoolObjectClosure { +-private: +- ShenandoahMarkingContext* const _mark_context; +-public: +- inline ShenandoahIsAliveClosure(); +- inline bool do_object_b(oop obj); +-}; +- +-class ShenandoahIsAliveSelector : public StackObj { +-private: +- ShenandoahIsAliveClosure _alive_cl; +- ShenandoahForwardedIsAliveClosure _fwd_alive_cl; +-public: +- inline BoolObjectClosure* is_alive_closure(); +-}; +- +-class ShenandoahUpdateRefsClosure: public OopClosure { +-private: +- ShenandoahHeap* _heap; +-public: +- inline ShenandoahUpdateRefsClosure(); +- inline void do_oop(oop* p); +- inline void do_oop(narrowOop* p); +-private: +- template +- inline void do_oop_work(T* p); +-}; +- +-class ShenandoahEvacuateUpdateRootsClosure: public ExtendedOopClosure { +-private: +- ShenandoahHeap* _heap; +- Thread* _thread; +-public: +- inline ShenandoahEvacuateUpdateRootsClosure(); +- inline void do_oop(oop* p); +- inline void do_oop(narrowOop* p); +- +-private: +- template +- inline void do_oop_work(T* p); +-}; +- +-#ifdef ASSERT +-class ShenandoahAssertNotForwardedClosure : public OopClosure { +-private: +- template +- inline void do_oop_work(T* p); +- +-public: +- inline void do_oop(narrowOop* p); +- inline void do_oop(oop* p); +-}; +-#endif // ASSERT +- +-#endif // SHARE_GC_SHENANDOAH_SHENANDOAHCLOSURES_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahClosures.inline.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahClosures.inline.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahClosures.inline.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahClosures.inline.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,122 +0,0 @@ +-/* +- * Copyright (c) 2019, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +-#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHCLOSURES_INLINE_HPP +-#define SHARE_GC_SHENANDOAH_SHENANDOAHCLOSURES_INLINE_HPP +- +-#include "gc_implementation/shenandoah/shenandoahAsserts.hpp" +-#include "gc_implementation/shenandoah/shenandoahClosures.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "runtime/thread.hpp" +- +-ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() : +- _mark_context(ShenandoahHeap::heap()->marking_context()) { +-} +- +-bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) { +- if (oopDesc::is_null(obj)) { +- return false; +- } +- obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); +- shenandoah_assert_not_forwarded_if(NULL, obj, ShenandoahHeap::heap()->is_concurrent_mark_in_progress()); +- return _mark_context->is_marked(obj); +-} +- +-ShenandoahIsAliveClosure::ShenandoahIsAliveClosure() : +- _mark_context(ShenandoahHeap::heap()->marking_context()) { +-} +- +-bool ShenandoahIsAliveClosure::do_object_b(oop obj) { +- if (oopDesc::is_null(obj)) { +- return false; +- } +- shenandoah_assert_not_forwarded(NULL, obj); +- return _mark_context->is_marked(obj); +-} +- +-BoolObjectClosure* ShenandoahIsAliveSelector::is_alive_closure() { +- return ShenandoahHeap::heap()->has_forwarded_objects() ? +- reinterpret_cast(&_fwd_alive_cl) : +- reinterpret_cast(&_alive_cl); +-} +- +-ShenandoahUpdateRefsClosure::ShenandoahUpdateRefsClosure() : +- _heap(ShenandoahHeap::heap()) { +-} +- +-template +-void ShenandoahUpdateRefsClosure::do_oop_work(T* p) { +- T o = oopDesc::load_heap_oop(p); +- if (!oopDesc::is_null(o)) { +- oop obj = oopDesc::decode_heap_oop_not_null(o); +- _heap->update_with_forwarded_not_null(p, obj); +- } +-} +- +-void ShenandoahUpdateRefsClosure::do_oop(oop* p) { do_oop_work(p); } +-void ShenandoahUpdateRefsClosure::do_oop(narrowOop* p) { do_oop_work(p); } +- +-ShenandoahEvacuateUpdateRootsClosure::ShenandoahEvacuateUpdateRootsClosure() : +- _heap(ShenandoahHeap::heap()), _thread(Thread::current()) { +-} +- +-template +-void ShenandoahEvacuateUpdateRootsClosure::do_oop_work(T* p) { +- assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress"); +- +- T o = oopDesc::load_heap_oop(p); +- if (! oopDesc::is_null(o)) { +- oop obj = oopDesc::decode_heap_oop_not_null(o); +- if (_heap->in_collection_set(obj)) { +- shenandoah_assert_marked(p, obj); +- oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); +- if (resolved == obj) { +- resolved = _heap->evacuate_object(obj, _thread); +- } +- oopDesc::encode_store_heap_oop(p, resolved); +- } +- } +-} +- +-void ShenandoahEvacuateUpdateRootsClosure::do_oop(oop* p) { +- do_oop_work(p); +-} +- +-void ShenandoahEvacuateUpdateRootsClosure::do_oop(narrowOop* p) { +- do_oop_work(p); +-} +- +-#ifdef ASSERT +-template +-void ShenandoahAssertNotForwardedClosure::do_oop_work(T* p) { +- T o = oopDesc::load_heap_oop(p); +- if (!oopDesc::is_null(o)) { +- oop obj = oopDesc::decode_heap_oop_not_null(o); +- shenandoah_assert_not_forwarded(p, obj); +- } +-} +- +-void ShenandoahAssertNotForwardedClosure::do_oop(narrowOop* p) { do_oop_work(p); } +-void ShenandoahAssertNotForwardedClosure::do_oop(oop* p) { do_oop_work(p); } +-#endif +- +-#endif // SHARE_GC_SHENANDOAH_SHENANDOAHCLOSURES_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahCodeRoots.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahCodeRoots.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahCodeRoots.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahCodeRoots.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,311 +0,0 @@ +-/* +- * Copyright (c) 2017, 2019, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +-#include "code/codeCache.hpp" +-#include "code/nmethod.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahCodeRoots.hpp" +-#include "memory/resourceArea.hpp" +-#include "runtime/vmThread.hpp" +- +-ShenandoahParallelCodeCacheIterator::ShenandoahParallelCodeCacheIterator() : +- _claimed_idx(0), _finished(false) { +-} +- +-void ShenandoahParallelCodeCacheIterator::parallel_blobs_do(CodeBlobClosure* f) { +- assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint"); +- +- /* +- * Parallel code heap walk. +- * +- * This code makes all threads scan all code heaps, but only one thread would execute the +- * closure on given blob. This is achieved by recording the "claimed" blocks: if a thread +- * had claimed the block, it can process all blobs in it. Others have to fast-forward to +- * next attempt without processing. +- * +- * Late threads would return immediately if iterator is finished. +- */ +- +- if (_finished) { +- return; +- } +- +- int stride = 256; // educated guess +- int stride_mask = stride - 1; +- assert (is_power_of_2(stride), "sanity"); +- +- int count = 0; +- bool process_block = true; +- +- for (CodeBlob *cb = CodeCache::first(); cb != NULL; cb = CodeCache::next(cb)) { +- int current = count++; +- if ((current & stride_mask) == 0) { +- process_block = (current >= _claimed_idx) && +- (Atomic::cmpxchg(current + stride, &_claimed_idx, current) == current); +- } +- if (process_block) { +- if (cb->is_alive()) { +- f->do_code_blob(cb); +-#ifdef ASSERT +- if (cb->is_nmethod()) +- ((nmethod*)cb)->verify_scavenge_root_oops(); +-#endif +- } +- } +- } +- +- _finished = true; +-} +- +-class ShenandoahNMethodOopDetector : public OopClosure { +-private: +- ResourceMark rm; // For growable array allocation below. +- GrowableArray _oops; +- +-public: +- ShenandoahNMethodOopDetector() : _oops(10) {}; +- +- void do_oop(oop* o) { +- _oops.append(o); +- } +- +- void do_oop(narrowOop* o) { +- fatal("NMethods should not have compressed oops embedded."); +- } +- +- GrowableArray* oops() { +- return &_oops; +- } +- +- bool has_oops() { +- return !_oops.is_empty(); +- } +-}; +- +-ShenandoahCodeRoots::PaddedLock ShenandoahCodeRoots::_recorded_nms_lock; +-GrowableArray* ShenandoahCodeRoots::_recorded_nms; +- +-void ShenandoahCodeRoots::initialize() { +- _recorded_nms_lock._lock = 0; +- _recorded_nms = new (ResourceObj::C_HEAP, mtGC) GrowableArray(100, true, mtGC); +-} +- +-void ShenandoahCodeRoots::add_nmethod(nmethod* nm) { +- switch (ShenandoahCodeRootsStyle) { +- case 0: +- case 1: +- break; +- case 2: { +- ShenandoahNMethodOopDetector detector; +- nm->oops_do(&detector); +- +- if (detector.has_oops()) { +- ShenandoahNMethod* nmr = new ShenandoahNMethod(nm, detector.oops()); +- nmr->assert_alive_and_correct(); +- +- ShenandoahCodeRootsLock lock(true); +- +- int idx = _recorded_nms->find(nm, ShenandoahNMethod::find_with_nmethod); +- if (idx != -1) { +- ShenandoahNMethod* old = _recorded_nms->at(idx); +- _recorded_nms->at_put(idx, nmr); +- delete old; +- } else { +- _recorded_nms->append(nmr); +- } +- } +- break; +- } +- default: +- ShouldNotReachHere(); +- } +-}; +- +-void ShenandoahCodeRoots::remove_nmethod(nmethod* nm) { +- switch (ShenandoahCodeRootsStyle) { +- case 0: +- case 1: { +- break; +- } +- case 2: { +- ShenandoahNMethodOopDetector detector; +- nm->oops_do(&detector, /* allow_zombie = */ true); +- +- if (detector.has_oops()) { +- ShenandoahCodeRootsLock lock(true); +- +- int idx = _recorded_nms->find(nm, ShenandoahNMethod::find_with_nmethod); +- assert(idx != -1, err_msg("nmethod " PTR_FORMAT " should be registered", p2i(nm))); +- ShenandoahNMethod* old = _recorded_nms->at(idx); +- old->assert_same_oops(detector.oops()); +- _recorded_nms->delete_at(idx); +- delete old; +- } +- break; +- } +- default: +- ShouldNotReachHere(); +- } +-} +- +-ShenandoahCodeRootsIterator::ShenandoahCodeRootsIterator() : +- _heap(ShenandoahHeap::heap()), +- _claimed(0) { +- assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint"); +- assert(!Thread::current()->is_Worker_thread(), "Should not be acquired by workers"); +- switch (ShenandoahCodeRootsStyle) { +- case 0: +- case 1: +- break; +- case 2: +- ShenandoahCodeRoots::acquire_lock(false); +- break; +- default: +- ShouldNotReachHere(); +- } +-} +- +-ShenandoahCodeRootsIterator::~ShenandoahCodeRootsIterator() { +- switch (ShenandoahCodeRootsStyle) { +- case 0: +- case 1: { +- // No need to do anything here +- break; +- } +- case 2: { +- ShenandoahCodeRoots::release_lock(false); +- break; +- } +- default: +- ShouldNotReachHere(); +- } +-} +- +-template +-void ShenandoahCodeRootsIterator::dispatch_parallel_blobs_do(CodeBlobClosure *f) { +- switch (ShenandoahCodeRootsStyle) { +- case 0: { +- if (_seq_claimed.try_set()) { +- CodeCache::blobs_do(f); +- } +- break; +- } +- case 1: { +- _par_iterator.parallel_blobs_do(f); +- break; +- } +- case 2: { +- ShenandoahCodeRootsIterator::fast_parallel_blobs_do(f); +- break; +- } +- default: +- ShouldNotReachHere(); +- } +-} +- +-void ShenandoahAllCodeRootsIterator::possibly_parallel_blobs_do(CodeBlobClosure *f) { +- ShenandoahCodeRootsIterator::dispatch_parallel_blobs_do(f); +-} +- +-void ShenandoahCsetCodeRootsIterator::possibly_parallel_blobs_do(CodeBlobClosure *f) { +- ShenandoahCodeRootsIterator::dispatch_parallel_blobs_do(f); +-} +- +-template +-void ShenandoahCodeRootsIterator::fast_parallel_blobs_do(CodeBlobClosure *f) { +- assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint"); +- +- size_t stride = 256; // educated guess +- +- GrowableArray* list = ShenandoahCodeRoots::_recorded_nms; +- +- jlong max = list->length(); +- while (_claimed < max) { +- size_t cur = (size_t)Atomic::add(stride, &_claimed); // Note: in JDK 8, add() returns old value +- size_t start = cur; +- size_t end = MIN2(cur + stride, (size_t) max); +- if (start >= (size_t)max) break; +- +- for (size_t idx = start; idx < end; idx++) { +- ShenandoahNMethod* nmr = list->at((int) idx); +- nmr->assert_alive_and_correct(); +- +- if (CSET_FILTER && !nmr->has_cset_oops(_heap)) { +- continue; +- } +- +- f->do_code_blob(nmr->nm()); +- } +- } +-} +- +-ShenandoahNMethod::ShenandoahNMethod(nmethod* nm, GrowableArray* oops) { +- _nm = nm; +- _oops = NEW_C_HEAP_ARRAY(oop*, oops->length(), mtGC); +- _oops_count = oops->length(); +- for (int c = 0; c < _oops_count; c++) { +- _oops[c] = oops->at(c); +- } +-} +- +-ShenandoahNMethod::~ShenandoahNMethod() { +- if (_oops != NULL) { +- FREE_C_HEAP_ARRAY(oop*, _oops, mtGC); +- } +-} +- +-bool ShenandoahNMethod::has_cset_oops(ShenandoahHeap *heap) { +- for (int c = 0; c < _oops_count; c++) { +- oop o = oopDesc::load_heap_oop(_oops[c]); +- if (heap->in_collection_set(o)) { +- return true; +- } +- } +- return false; +-} +- +-#ifdef ASSERT +-void ShenandoahNMethod::assert_alive_and_correct() { +- assert(_nm->is_alive(), "only alive nmethods here"); +- assert(_oops_count > 0, "should have filtered nmethods without oops before"); +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- for (int c = 0; c < _oops_count; c++) { +- oop *loc = _oops[c]; +- assert(_nm->code_contains((address) loc) || _nm->oops_contains(loc), "nmethod should contain the oop*"); +- oop o = oopDesc::load_heap_oop(loc); +- shenandoah_assert_correct_except(loc, o, +- o == NULL || +- heap->is_full_gc_move_in_progress() || +- (VMThread::vm_operation() != NULL) && (VMThread::vm_operation()->type() == VM_Operation::VMOp_HeapWalkOperation) +- ); +- } +-} +- +-void ShenandoahNMethod::assert_same_oops(GrowableArray* oops) { +- assert(_oops_count == oops->length(), "should have the same number of oop*"); +- for (int c = 0; c < _oops_count; c++) { +- assert(_oops[c] == oops->at(c), "should be the same oop*"); +- } +-} +-#endif +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahCodeRoots.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahCodeRoots.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahCodeRoots.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahCodeRoots.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,181 +0,0 @@ +-/* +- * Copyright (c) 2017, 2019, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHCODEROOTS_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHCODEROOTS_HPP +- +-#include "code/codeCache.hpp" +-#include "gc_implementation/shenandoah/shenandoahSharedVariables.hpp" +-#include "gc_implementation/shenandoah/shenandoahPadding.hpp" +-#include "memory/allocation.hpp" +-#include "memory/iterator.hpp" +- +-class ShenandoahHeap; +-class ShenandoahHeapRegion; +-class ShenandoahCodeRootsLock; +- +-class ShenandoahParallelCodeCacheIterator VALUE_OBJ_CLASS_SPEC { +- friend class CodeCache; +-private: +- shenandoah_padding(0); +- volatile int _claimed_idx; +- volatile bool _finished; +- shenandoah_padding(1); +- +- // Noncopyable. +- ShenandoahParallelCodeCacheIterator(const ShenandoahParallelCodeCacheIterator& o); +- ShenandoahParallelCodeCacheIterator& operator=(const ShenandoahParallelCodeCacheIterator& o); +-public: +- ShenandoahParallelCodeCacheIterator(); +- void parallel_blobs_do(CodeBlobClosure* f); +-}; +- +-// ShenandoahNMethod tuple records the internal locations of oop slots within the nmethod. +-// This allows us to quickly scan the oops without doing the nmethod-internal scans, that +-// sometimes involves parsing the machine code. Note it does not record the oops themselves, +-// because it would then require handling these tuples as the new class of roots. +-class ShenandoahNMethod : public CHeapObj { +-private: +- nmethod* _nm; +- oop** _oops; +- int _oops_count; +- +-public: +- ShenandoahNMethod(nmethod *nm, GrowableArray* oops); +- ~ShenandoahNMethod(); +- +- nmethod* nm() { +- return _nm; +- } +- +- bool has_cset_oops(ShenandoahHeap* heap); +- +- void assert_alive_and_correct() NOT_DEBUG_RETURN; +- void assert_same_oops(GrowableArray* oops) NOT_DEBUG_RETURN; +- +- static bool find_with_nmethod(void* nm, ShenandoahNMethod* other) { +- return other->_nm == nm; +- } +-}; +- +-class ShenandoahCodeRootsIterator VALUE_OBJ_CLASS_SPEC { +- friend class ShenandoahCodeRoots; +-protected: +- ShenandoahHeap* _heap; +- ShenandoahParallelCodeCacheIterator _par_iterator; +- ShenandoahSharedFlag _seq_claimed; +- char _pad0[DEFAULT_CACHE_LINE_SIZE]; +- volatile jlong _claimed; +- char _pad1[DEFAULT_CACHE_LINE_SIZE]; +-protected: +- ShenandoahCodeRootsIterator(); +- ~ShenandoahCodeRootsIterator(); +- +- template +- void dispatch_parallel_blobs_do(CodeBlobClosure *f); +- +- template +- void fast_parallel_blobs_do(CodeBlobClosure *f); +-}; +- +-class ShenandoahAllCodeRootsIterator : public ShenandoahCodeRootsIterator { +-public: +- ShenandoahAllCodeRootsIterator() : ShenandoahCodeRootsIterator() {}; +- void possibly_parallel_blobs_do(CodeBlobClosure *f); +-}; +- +-class ShenandoahCsetCodeRootsIterator : public ShenandoahCodeRootsIterator { +-public: +- ShenandoahCsetCodeRootsIterator() : ShenandoahCodeRootsIterator() {}; +- void possibly_parallel_blobs_do(CodeBlobClosure* f); +-}; +- +-class ShenandoahCodeRoots : public AllStatic { +- friend class ShenandoahHeap; +- friend class ShenandoahCodeRootsLock; +- friend class ShenandoahCodeRootsIterator; +- +-public: +- static void initialize(); +- static void add_nmethod(nmethod* nm); +- static void remove_nmethod(nmethod* nm); +- +-private: +- struct PaddedLock { +- char _pad0[DEFAULT_CACHE_LINE_SIZE]; +- volatile int _lock; +- char _pad1[DEFAULT_CACHE_LINE_SIZE]; +- }; +- +- static PaddedLock _recorded_nms_lock; +- static GrowableArray* _recorded_nms; +- +- static void acquire_lock(bool write) { +- volatile int* loc = &_recorded_nms_lock._lock; +- if (write) { +- while ((OrderAccess::load_acquire(loc) != 0) || +- Atomic::cmpxchg(-1, loc, 0) != 0) { +- SpinPause(); +- } +- assert (*loc == -1, "acquired for write"); +- } else { +- while (true) { +- jint cur = OrderAccess::load_acquire(loc); +- if (cur >= 0) { +- if (Atomic::cmpxchg(cur + 1, loc, cur) == cur) { +- // Success! +- assert (*loc > 0, "acquired for read"); +- return; +- } +- } +- SpinPause(); +- } +- } +- } +- +- static void release_lock(bool write) { +- volatile int* loc = &ShenandoahCodeRoots::_recorded_nms_lock._lock; +- if (write) { +- OrderAccess::release_store_fence(loc, 0); +- } else { +- Atomic::dec(loc); +- } +- } +-}; +- +-// Very simple unranked read-write lock +-class ShenandoahCodeRootsLock : public StackObj { +- friend class ShenandoahCodeRoots; +-private: +- const bool _write; +-public: +- ShenandoahCodeRootsLock(bool write) : _write(write) { +- ShenandoahCodeRoots::acquire_lock(write); +- } +- +- ~ShenandoahCodeRootsLock() { +- ShenandoahCodeRoots::release_lock(_write); +- } +-}; +- +-#endif //SHARE_VM_GC_SHENANDOAH_SHENANDOAHCODEROOTS_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahCollectionSet.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahCollectionSet.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahCollectionSet.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahCollectionSet.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,163 +0,0 @@ +-/* +- * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +- +-#include "gc_implementation/shenandoah/shenandoahCollectionSet.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegion.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegionSet.hpp" +-#include "gc_implementation/shenandoah/shenandoahUtils.hpp" +-#include "runtime/atomic.hpp" +-#include "services/memTracker.hpp" +-#include "utilities/copy.hpp" +- +-ShenandoahCollectionSet::ShenandoahCollectionSet(ShenandoahHeap* heap, ReservedSpace space, char* heap_base) : +- _map_size(heap->num_regions()), +- _region_size_bytes_shift(ShenandoahHeapRegion::region_size_bytes_shift()), +- _map_space(space), +- _cset_map(_map_space.base() + ((uintx)heap_base >> _region_size_bytes_shift)), +- _biased_cset_map(_map_space.base()), +- _heap(heap), +- _garbage(0), +- _used(0), +- _region_count(0), +- _current_index(0) { +- +- // The collection set map is reserved to cover the entire heap *and* zero addresses. +- // This is needed to accept in-cset checks for both heap oops and NULLs, freeing +- // high-performance code from checking for NULL first. +- // +- // Since heap_base can be far away, committing the entire map would waste memory. +- // Therefore, we only commit the parts that are needed to operate: the heap view, +- // and the zero page. +- // +- // Note: we could instead commit the entire map, and piggyback on OS virtual memory +- // subsystem for mapping not-yet-written-to pages to a single physical backing page, +- // but this is not guaranteed, and would confuse NMT and other memory accounting tools. +- +- MemTracker::record_virtual_memory_type(_map_space.base(), mtGC); +- +- size_t page_size = (size_t)os::vm_page_size(); +- +- if (!_map_space.special()) { +- // Commit entire pages that cover the heap cset map. +- char* bot_addr = (char*)align_ptr_down(_cset_map, page_size); +- char* top_addr = (char*)align_ptr_up(_cset_map + _map_size, page_size); +- os::commit_memory_or_exit(bot_addr, pointer_delta(top_addr, bot_addr, 1), false, +- "Unable to commit collection set bitmap: heap"); +- +- // Commit the zero page, if not yet covered by heap cset map. +- if (bot_addr != _biased_cset_map) { +- os::commit_memory_or_exit(_biased_cset_map, page_size, false, +- "Unable to commit collection set bitmap: zero page"); +- } +- } +- +- Copy::zero_to_bytes(_cset_map, _map_size); +- Copy::zero_to_bytes(_biased_cset_map, page_size); +-} +- +-void ShenandoahCollectionSet::add_region(ShenandoahHeapRegion* r) { +- assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); +- assert(Thread::current()->is_VM_thread(), "Must be VMThread"); +- assert(!is_in(r), "Already in collection set"); +- _cset_map[r->index()] = 1; +- _region_count++; +- _garbage += r->garbage(); +- _used += r->used(); +- +- // Update the region status too. State transition would be checked internally. +- r->make_cset(); +-} +- +-void ShenandoahCollectionSet::clear() { +- assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); +- Copy::zero_to_bytes(_cset_map, _map_size); +- +-#ifdef ASSERT +- for (size_t index = 0; index < _heap->num_regions(); index ++) { +- assert (!_heap->get_region(index)->is_cset(), "should have been cleared before"); +- } +-#endif +- +- _garbage = 0; +- _used = 0; +- +- _region_count = 0; +- _current_index = 0; +-} +- +-ShenandoahHeapRegion* ShenandoahCollectionSet::claim_next() { +- size_t num_regions = _heap->num_regions(); +- if (_current_index >= (jint)num_regions) { +- return NULL; +- } +- +- jint saved_current = _current_index; +- size_t index = (size_t)saved_current; +- +- while(index < num_regions) { +- if (is_in(index)) { +- jint cur = Atomic::cmpxchg((jint)(index + 1), &_current_index, saved_current); +- assert(cur >= (jint)saved_current, "Must move forward"); +- if (cur == saved_current) { +- assert(is_in(index), "Invariant"); +- return _heap->get_region(index); +- } else { +- index = (size_t)cur; +- saved_current = cur; +- } +- } else { +- index ++; +- } +- } +- return NULL; +-} +- +-ShenandoahHeapRegion* ShenandoahCollectionSet::next() { +- assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); +- assert(Thread::current()->is_VM_thread(), "Must be VMThread"); +- size_t num_regions = _heap->num_regions(); +- for (size_t index = (size_t)_current_index; index < num_regions; index ++) { +- if (is_in(index)) { +- _current_index = (jint)(index + 1); +- return _heap->get_region(index); +- } +- } +- +- return NULL; +-} +- +-void ShenandoahCollectionSet::print_on(outputStream* out) const { +- out->print_cr("Collection Set : " SIZE_FORMAT "", count()); +- +- debug_only(size_t regions = 0;) +- for (size_t index = 0; index < _heap->num_regions(); index ++) { +- if (is_in(index)) { +- _heap->get_region(index)->print_on(out); +- debug_only(regions ++;) +- } +- } +- assert(regions == count(), "Must match"); +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahCollectionSet.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahCollectionSet.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahCollectionSet.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahCollectionSet.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,91 +0,0 @@ +-/* +- * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHCOLLECTIONSET_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHCOLLECTIONSET_HPP +- +-#include "memory/allocation.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegion.hpp" +-#include "gc_implementation/shenandoah/shenandoahPadding.hpp" +- +-class ShenandoahCollectionSet : public CHeapObj { +- friend class ShenandoahHeap; +-private: +- size_t const _map_size; +- size_t const _region_size_bytes_shift; +- ReservedSpace _map_space; +- char* const _cset_map; +- // Bias cset map's base address for fast test if an oop is in cset +- char* const _biased_cset_map; +- +- ShenandoahHeap* const _heap; +- +- size_t _garbage; +- size_t _used; +- size_t _region_count; +- +- shenandoah_padding(0); +- volatile jint _current_index; +- shenandoah_padding(1); +- +-public: +- ShenandoahCollectionSet(ShenandoahHeap* heap, ReservedSpace space, char* heap_base); +- +- // Add region to collection set +- void add_region(ShenandoahHeapRegion* r); +- +- // MT version +- ShenandoahHeapRegion* claim_next(); +- +- // Single-thread version +- ShenandoahHeapRegion* next(); +- +- size_t count() const { return _region_count; } +- bool is_empty() const { return _region_count == 0; } +- +- void clear_current_index() { +- _current_index = 0; +- } +- +- inline bool is_in(ShenandoahHeapRegion* r) const; +- inline bool is_in(size_t region_idx) const; +- inline bool is_in(oop obj) const; +- inline bool is_in_loc(void* loc) const; +- +- void print_on(outputStream* out) const; +- +- size_t used() const { return _used; } +- size_t garbage() const { return _garbage; } +- void clear(); +- +-private: +- char* map_address() const { +- return _cset_map; +- } +- char* biased_map_address() const { +- return _biased_cset_map; +- } +-}; +- +-#endif //SHARE_VM_GC_SHENANDOAH_SHENANDOAHCOLLECTIONSET_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahCollectionSet.inline.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahCollectionSet.inline.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahCollectionSet.inline.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahCollectionSet.inline.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,53 +0,0 @@ +-/* +- * Copyright (c) 2017, 2020, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHCOLLECTIONSET_INLINE_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHCOLLECTIONSET_INLINE_HPP +- +-#include "gc_implementation/shenandoah/shenandoahCollectionSet.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegion.hpp" +- +-bool ShenandoahCollectionSet::is_in(size_t region_idx) const { +- assert(region_idx < _heap->num_regions(), "Sanity"); +- return _cset_map[region_idx] == 1; +-} +- +-bool ShenandoahCollectionSet::is_in(ShenandoahHeapRegion* r) const { +- return is_in(r->index()); +-} +- +-bool ShenandoahCollectionSet::is_in(oop p) const { +- shenandoah_assert_in_heap(NULL, p); +- return is_in_loc(cast_from_oop(p)); +-} +- +-bool ShenandoahCollectionSet::is_in_loc(void* p) const { +- assert(_heap->is_in(p), "Must be in the heap"); +- uintx index = ((uintx) p) >> _region_size_bytes_shift; +- // no need to subtract the bottom of the heap from p, +- // _biased_cset_map is biased +- return _biased_cset_map[index] == 1; +-} +- +-#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHCOLLECTIONSET_INLINE_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahCollectorPolicy.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahCollectorPolicy.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahCollectorPolicy.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahCollectorPolicy.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,205 +0,0 @@ +-/* +- * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +-#include "gc_interface/gcCause.hpp" +-#include "gc_implementation/shared/gcTimer.hpp" +-#include "gc_implementation/shenandoah/shenandoahCollectionSet.hpp" +-#include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahLogging.hpp" +-#include "gc_implementation/shenandoah/heuristics/shenandoahHeuristics.hpp" +- +-ShenandoahCollectorPolicy::ShenandoahCollectorPolicy() : +- _success_concurrent_gcs(0), +- _success_degenerated_gcs(0), +- _success_full_gcs(0), +- _alloc_failure_degenerated(0), +- _alloc_failure_full(0), +- _alloc_failure_degenerated_upgrade_to_full(0), +- _explicit_concurrent(0), +- _explicit_full(0), +- _implicit_concurrent(0), +- _implicit_full(0), +- _cycle_counter(0) { +- +- Copy::zero_to_bytes(_degen_points, sizeof(size_t) * ShenandoahHeap::_DEGENERATED_LIMIT); +- +- ShenandoahHeapRegion::setup_sizes(max_heap_byte_size()); +- +- initialize_all(); +- +- _tracer = new (ResourceObj::C_HEAP, mtGC) ShenandoahTracer(); +-} +- +-BarrierSet::Name ShenandoahCollectorPolicy::barrier_set_name() { +- return BarrierSet::ShenandoahBarrierSet; +-} +- +-HeapWord* ShenandoahCollectorPolicy::mem_allocate_work(size_t size, +- bool is_tlab, +- bool* gc_overhead_limit_was_exceeded) { +- guarantee(false, "Not using this policy feature yet."); +- return NULL; +-} +- +-HeapWord* ShenandoahCollectorPolicy::satisfy_failed_allocation(size_t size, bool is_tlab) { +- guarantee(false, "Not using this policy feature yet."); +- return NULL; +-} +- +-MetaWord* ShenandoahCollectorPolicy::satisfy_failed_metadata_allocation(ClassLoaderData *loader_data, +- size_t size, +- Metaspace::MetadataType mdtype) { +- MetaWord* result; +- +- ShenandoahHeap* sh = ShenandoahHeap::heap(); +- +- // Inform metaspace OOM to GC heuristics if class unloading is possible. +- ShenandoahHeuristics* h = sh->heuristics(); +- if (h->can_unload_classes()) { +- h->record_metaspace_oom(); +- } +- +- // Expand and retry allocation +- result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype); +- if (result != NULL) { +- return result; +- } +- +- // Start full GC +- sh->collect(GCCause::_shenandoah_metadata_gc_clear_softrefs); +- +- // Retry allocation +- result = loader_data->metaspace_non_null()->allocate(size, mdtype); +- if (result != NULL) { +- return result; +- } +- +- // Expand and retry allocation +- result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype); +- if (result != NULL) { +- return result; +- } +- +- // Out of memory +- return NULL; +-} +- +-void ShenandoahCollectorPolicy::initialize_alignments() { +- // This is expected by our algorithm for ShenandoahHeap::heap_region_containing(). +- size_t align = ShenandoahHeapRegion::region_size_bytes(); +- if (UseLargePages) { +- align = MAX2(align, os::large_page_size()); +- } +- _space_alignment = align; +- _heap_alignment = align; +-} +- +-void ShenandoahCollectorPolicy::record_explicit_to_concurrent() { +- _explicit_concurrent++; +-} +- +-void ShenandoahCollectorPolicy::record_explicit_to_full() { +- _explicit_full++; +-} +- +-void ShenandoahCollectorPolicy::record_implicit_to_concurrent() { +- _implicit_concurrent++; +-} +- +-void ShenandoahCollectorPolicy::record_implicit_to_full() { +- _implicit_full++; +-} +- +-void ShenandoahCollectorPolicy::record_alloc_failure_to_full() { +- _alloc_failure_full++; +-} +- +-void ShenandoahCollectorPolicy::record_alloc_failure_to_degenerated(ShenandoahHeap::ShenandoahDegenPoint point) { +- assert(point < ShenandoahHeap::_DEGENERATED_LIMIT, "sanity"); +- _alloc_failure_degenerated++; +- _degen_points[point]++; +-} +- +-void ShenandoahCollectorPolicy::record_degenerated_upgrade_to_full() { +- _alloc_failure_degenerated_upgrade_to_full++; +-} +- +-void ShenandoahCollectorPolicy::record_success_concurrent() { +- _success_concurrent_gcs++; +-} +- +-void ShenandoahCollectorPolicy::record_success_degenerated() { +- _success_degenerated_gcs++; +-} +- +-void ShenandoahCollectorPolicy::record_success_full() { +- _success_full_gcs++; +-} +- +-size_t ShenandoahCollectorPolicy::cycle_counter() const { +- return _cycle_counter; +-} +- +-void ShenandoahCollectorPolicy::record_cycle_start() { +- _cycle_counter++; +-} +- +-void ShenandoahCollectorPolicy::record_shutdown() { +- _in_shutdown.set(); +-} +- +-bool ShenandoahCollectorPolicy::is_at_shutdown() { +- return _in_shutdown.is_set(); +-} +- +-void ShenandoahCollectorPolicy::print_gc_stats(outputStream* out) const { +- out->print_cr("Under allocation pressure, concurrent cycles may cancel, and either continue cycle"); +- out->print_cr("under stop-the-world pause or result in stop-the-world Full GC. Increase heap size,"); +- out->print_cr("tune GC heuristics, set more aggressive pacing delay, or lower allocation rate"); +- out->print_cr("to avoid Degenerated and Full GC cycles."); +- out->cr(); +- +- out->print_cr(SIZE_FORMAT_W(5) " successful concurrent GCs", _success_concurrent_gcs); +- out->print_cr(" " SIZE_FORMAT_W(5) " invoked explicitly", _explicit_concurrent); +- out->print_cr(" " SIZE_FORMAT_W(5) " invoked implicitly", _implicit_concurrent); +- out->cr(); +- +- out->print_cr(SIZE_FORMAT_W(5) " Degenerated GCs", _success_degenerated_gcs); +- out->print_cr(" " SIZE_FORMAT_W(5) " caused by allocation failure", _alloc_failure_degenerated); +- for (int c = 0; c < ShenandoahHeap::_DEGENERATED_LIMIT; c++) { +- if (_degen_points[c] > 0) { +- const char* desc = ShenandoahHeap::degen_point_to_string((ShenandoahHeap::ShenandoahDegenPoint)c); +- out->print_cr(" " SIZE_FORMAT_W(5) " happened at %s", _degen_points[c], desc); +- } +- } +- out->print_cr(" " SIZE_FORMAT_W(5) " upgraded to Full GC", _alloc_failure_degenerated_upgrade_to_full); +- out->cr(); +- +- out->print_cr(SIZE_FORMAT_W(5) " Full GCs", _success_full_gcs + _alloc_failure_degenerated_upgrade_to_full); +- out->print_cr(" " SIZE_FORMAT_W(5) " invoked explicitly", _explicit_full); +- out->print_cr(" " SIZE_FORMAT_W(5) " invoked implicitly", _implicit_full); +- out->print_cr(" " SIZE_FORMAT_W(5) " caused by allocation failure", _alloc_failure_full); +- out->print_cr(" " SIZE_FORMAT_W(5) " upgraded from Degenerated GC", _alloc_failure_degenerated_upgrade_to_full); +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,104 +0,0 @@ +-/* +- * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHCOLLECTORPOLICY_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHCOLLECTORPOLICY_HPP +- +-#include "memory/collectorPolicy.hpp" +-#include "gc_implementation/shared/gcTrace.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.hpp" +-#include "utilities/ostream.hpp" +- +-class STWGCTimer; +-class ConcurrentGCTimer; +- +-class ShenandoahTracer : public GCTracer { +-public: +- ShenandoahTracer() : GCTracer(Shenandoah) {} +-}; +- +-class ShenandoahCollectorPolicy: public CollectorPolicy { +-private: +- size_t _success_concurrent_gcs; +- size_t _success_degenerated_gcs; +- size_t _success_full_gcs; +- size_t _alloc_failure_degenerated; +- size_t _alloc_failure_degenerated_upgrade_to_full; +- size_t _alloc_failure_full; +- size_t _explicit_concurrent; +- size_t _explicit_full; +- size_t _implicit_concurrent; +- size_t _implicit_full; +- size_t _degen_points[ShenandoahHeap::_DEGENERATED_LIMIT]; +- +- ShenandoahSharedFlag _in_shutdown; +- +- ShenandoahTracer* _tracer; +- +- size_t _cycle_counter; +- +-public: +- ShenandoahCollectorPolicy(); +- +- void post_heap_initialize() {}; +- +- BarrierSet::Name barrier_set_name(); +- +- HeapWord* mem_allocate_work(size_t size, +- bool is_tlab, +- bool* gc_overhead_limit_was_exceeded); +- +- HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab); +- +- MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data, +- size_t size, +- Metaspace::MetadataType mdtype); +- +- void initialize_alignments(); +- +- // TODO: This is different from gc_end: that one encompasses one VM operation. +- // These two encompass the entire cycle. +- void record_cycle_start(); +- +- void record_success_concurrent(); +- void record_success_degenerated(); +- void record_success_full(); +- void record_alloc_failure_to_degenerated(ShenandoahHeap::ShenandoahDegenPoint point); +- void record_alloc_failure_to_full(); +- void record_degenerated_upgrade_to_full(); +- void record_explicit_to_concurrent(); +- void record_explicit_to_full(); +- void record_implicit_to_concurrent(); +- void record_implicit_to_full(); +- +- void record_shutdown(); +- bool is_at_shutdown(); +- +- ShenandoahTracer* tracer() {return _tracer;} +- +- size_t cycle_counter() const; +- +- void print_gc_stats(outputStream* out) const; +-}; +- +-#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHCOLLECTORPOLICY_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahConcurrentMark.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahConcurrentMark.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahConcurrentMark.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahConcurrentMark.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,1022 +0,0 @@ +-/* +- * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +- +-#include "classfile/symbolTable.hpp" +-#include "classfile/systemDictionary.hpp" +-#include "code/codeCache.hpp" +- +-#include "gc_implementation/shenandoah/shenandoahBarrierSet.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahClosures.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp" +-#include "gc_implementation/shenandoah/shenandoahConcurrentMark.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahOopClosures.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahParallelCleaning.hpp" +-#include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp" +-#include "gc_implementation/shenandoah/shenandoahRootProcessor.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahTaskqueue.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahWorkGroup.hpp" +-#include "gc_implementation/shenandoah/shenandoahUtils.hpp" +-#include "gc_implementation/shenandoah/shenandoah_specialized_oop_closures.hpp" +- +-#include "memory/referenceProcessor.hpp" +-#include "memory/iterator.inline.hpp" +-#include "memory/metaspace.hpp" +-#include "memory/resourceArea.hpp" +-#include "oops/oop.inline.hpp" +- +-template +-class ShenandoahInitMarkRootsClosure : public OopClosure { +-private: +- ShenandoahObjToScanQueue* _queue; +- ShenandoahHeap* _heap; +- ShenandoahStrDedupQueue* _dedup_queue; +- ShenandoahMarkingContext* const _mark_context; +- +- template +- inline void do_oop_nv(T* p) { +- ShenandoahConcurrentMark::mark_through_ref(p, _heap, _queue, _mark_context, _dedup_queue); +- } +- +-public: +- ShenandoahInitMarkRootsClosure(ShenandoahObjToScanQueue* q, ShenandoahStrDedupQueue* dq) : +- _queue(q), +- _heap(ShenandoahHeap::heap()), +- _dedup_queue(dq), +- _mark_context(_heap->marking_context()) {}; +- +- void do_oop(narrowOop* p) { do_oop_nv(p); } +- void do_oop(oop* p) { do_oop_nv(p); } +-}; +- +-ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : +- MetadataAwareOopClosure(rp), +- _queue(q), +- _dedup_queue(NULL), +- _heap(ShenandoahHeap::heap()), +- _mark_context(_heap->marking_context()) +-{ } +- +-ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ShenandoahStrDedupQueue* dq, ReferenceProcessor* rp) : +- MetadataAwareOopClosure(rp), +- _queue(q), +- _dedup_queue(dq), +- _heap(ShenandoahHeap::heap()), +- _mark_context(_heap->marking_context()) +-{ } +- +-template +-class ShenandoahInitMarkRootsTask : public AbstractGangTask { +-private: +- ShenandoahAllRootScanner* _rp; +-public: +- ShenandoahInitMarkRootsTask(ShenandoahAllRootScanner* rp) : +- AbstractGangTask("Shenandoah init mark roots task"), +- _rp(rp) { +- } +- +- void work(uint worker_id) { +- assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); +- ShenandoahParallelWorkerSession worker_session(worker_id); +- +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- ShenandoahObjToScanQueueSet* queues = heap->concurrent_mark()->task_queues(); +- assert(queues->get_reserved() > worker_id, err_msg("Queue has not been reserved for worker id: %d", worker_id)); +- +- ShenandoahObjToScanQueue* q = queues->queue(worker_id); +- ShenandoahInitMarkRootsClosure mark_cl(q, NULL); +- do_work(heap, &mark_cl, worker_id); +- } +- +-private: +- void do_work(ShenandoahHeap* heap, OopClosure* oops, uint worker_id) { +- // The rationale for selecting the roots to scan is as follows: +- // a. With unload_classes = true, we only want to scan the actual strong roots from the +- // code cache. This will allow us to identify the dead classes, unload them, *and* +- // invalidate the relevant code cache blobs. This could be only done together with +- // class unloading. +- // b. With unload_classes = false, we have to nominally retain all the references from code +- // cache, because there could be the case of embedded class/oop in the generated code, +- // which we will never visit during mark. Without code cache invalidation, as in (a), +- // we risk executing that code cache blob, and crashing. +- // c. With ShenandoahConcurrentScanCodeRoots, we avoid scanning the entire code cache here, +- // and instead do that in concurrent phase under the relevant lock. This saves init mark +- // pause time. +- ResourceMark m; +- if (heap->unload_classes()) { +- _rp->strong_roots_do(worker_id, oops); +- } else { +- _rp->roots_do(worker_id, oops); +- } +- } +-}; +- +-class ShenandoahUpdateRootsTask : public AbstractGangTask { +-private: +- ShenandoahRootUpdater* _root_updater; +-public: +- ShenandoahUpdateRootsTask(ShenandoahRootUpdater* _root_updater) : +- AbstractGangTask("Shenandoah update roots task"), +- _root_updater(_root_updater) { +- } +- +- void work(uint worker_id) { +- assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); +- ShenandoahParallelWorkerSession worker_session(worker_id); +- +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- ShenandoahUpdateRefsClosure cl; +- ShenandoahIsAliveSelector is_alive; +- _root_updater->roots_do(worker_id, is_alive.is_alive_closure(), &cl); +- } +-}; +- +-class ShenandoahConcurrentMarkingTask : public AbstractGangTask { +-private: +- ShenandoahConcurrentMark* _cm; +- ShenandoahTaskTerminator* _terminator; +- +-public: +- ShenandoahConcurrentMarkingTask(ShenandoahConcurrentMark* cm, ShenandoahTaskTerminator* terminator) : +- AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator) { +- } +- +- void work(uint worker_id) { +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- ShenandoahConcurrentWorkerSession worker_session(worker_id); +- ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id); +- ReferenceProcessor* rp; +- if (heap->process_references()) { +- rp = heap->ref_processor(); +- shenandoah_assert_rp_isalive_installed(); +- } else { +- rp = NULL; +- } +- +- _cm->concurrent_scan_code_roots(worker_id, rp); +- _cm->mark_loop(worker_id, _terminator, rp, +- true, // cancellable +- ShenandoahStringDedup::is_enabled()); // perform string dedup +- } +-}; +- +-class ShenandoahSATBAndRemarkCodeRootsThreadsClosure : public ThreadClosure { +-private: +- ShenandoahSATBBufferClosure* _satb_cl; +- OopClosure* const _cl; +- MarkingCodeBlobClosure* _code_cl; +- int _thread_parity; +- +-public: +- ShenandoahSATBAndRemarkCodeRootsThreadsClosure(ShenandoahSATBBufferClosure* satb_cl, OopClosure* cl, MarkingCodeBlobClosure* code_cl) : +- _satb_cl(satb_cl), _cl(cl), _code_cl(code_cl), +- _thread_parity(SharedHeap::heap()->strong_roots_parity()) {} +- +- void do_thread(Thread* thread) { +- if (thread->is_Java_thread()) { +- if (thread->claim_oops_do(true, _thread_parity)) { +- JavaThread* jt = (JavaThread*)thread; +- jt->satb_mark_queue().apply_closure_and_empty(_satb_cl); +- if (_cl != NULL) { +- ResourceMark rm; +- jt->oops_do(_cl, NULL, _code_cl); +- } else if (_code_cl != NULL) { +- // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking +- // however the liveness of oops reachable from nmethods have very complex lifecycles: +- // * Alive if on the stack of an executing method +- // * Weakly reachable otherwise +- // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be +- // live by the SATB invariant but other oops recorded in nmethods may behave differently. +- jt->nmethods_do(_code_cl); +- } +- } +- } else if (thread->is_VM_thread()) { +- if (thread->claim_oops_do(true, _thread_parity)) { +- JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl); +- } +- } +- } +-}; +- +-class ShenandoahFinalMarkingTask : public AbstractGangTask { +-private: +- ShenandoahConcurrentMark* _cm; +- ShenandoahTaskTerminator* _terminator; +- bool _dedup_string; +- ShenandoahSharedFlag _claimed_syncroots; +- +-public: +- ShenandoahFinalMarkingTask(ShenandoahConcurrentMark* cm, ShenandoahTaskTerminator* terminator, bool dedup_string) : +- AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator), _dedup_string(dedup_string) { +- } +- +- void work(uint worker_id) { +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- +- ReferenceProcessor* rp; +- if (heap->process_references()) { +- rp = heap->ref_processor(); +- shenandoah_assert_rp_isalive_installed(); +- } else { +- rp = NULL; +- } +- +- // First drain remaining SATB buffers. +- // Notice that this is not strictly necessary for mark-compact. But since +- // it requires a StrongRootsScope around the task, we need to claim the +- // threads, and performance-wise it doesn't really matter. Adds about 1ms to +- // full-gc. +- { +- ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id); +- ShenandoahStrDedupQueue *dq = NULL; +- if (ShenandoahStringDedup::is_enabled()) { +- dq = ShenandoahStringDedup::queue(worker_id); +- } +- ShenandoahSATBBufferClosure cl(q, dq); +- SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); +- while (satb_mq_set.apply_closure_to_completed_buffer(&cl)); +- bool do_nmethods = heap->unload_classes(); +- if (heap->has_forwarded_objects()) { +- ShenandoahMarkResolveRefsClosure resolve_mark_cl(q, rp); +- MarkingCodeBlobClosure blobsCl(&resolve_mark_cl, !CodeBlobToOopClosure::FixRelocations); +- ShenandoahSATBAndRemarkCodeRootsThreadsClosure tc(&cl, +- ShenandoahStoreValEnqueueBarrier ? &resolve_mark_cl : NULL, +- do_nmethods ? &blobsCl : NULL); +- Threads::threads_do(&tc); +- if (ShenandoahStoreValEnqueueBarrier && _claimed_syncroots.try_set()) { +- ObjectSynchronizer::oops_do(&resolve_mark_cl); +- } +- } else { +- ShenandoahMarkRefsClosure mark_cl(q, rp); +- MarkingCodeBlobClosure blobsCl(&mark_cl, !CodeBlobToOopClosure::FixRelocations); +- ShenandoahSATBAndRemarkCodeRootsThreadsClosure tc(&cl, +- ShenandoahStoreValEnqueueBarrier ? &mark_cl : NULL, +- do_nmethods ? &blobsCl : NULL); +- Threads::threads_do(&tc); +- if (ShenandoahStoreValEnqueueBarrier && _claimed_syncroots.try_set()) { +- ObjectSynchronizer::oops_do(&mark_cl); +- } +- } +- } +- +- if (heap->is_degenerated_gc_in_progress() || heap->is_full_gc_in_progress()) { +- // Full GC does not execute concurrent cycle. +- // Degenerated cycle may bypass concurrent cycle. +- // So code roots might not be scanned, let's scan here. +- _cm->concurrent_scan_code_roots(worker_id, rp); +- } +- +- _cm->mark_loop(worker_id, _terminator, rp, +- false, // not cancellable +- _dedup_string); +- +- assert(_cm->task_queues()->is_empty(), "Should be empty"); +- } +-}; +- +-void ShenandoahConcurrentMark::mark_roots(ShenandoahPhaseTimings::Phase root_phase) { +- assert(Thread::current()->is_VM_thread(), "can only do this in VMThread"); +- assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); +- +- ShenandoahGCPhase phase(root_phase); +- +- WorkGang* workers = _heap->workers(); +- uint nworkers = workers->active_workers(); +- +- assert(nworkers <= task_queues()->size(), "Just check"); +- +- ShenandoahAllRootScanner root_proc(root_phase); +- TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats()); +- task_queues()->reserve(nworkers); +- +- if (_heap->has_forwarded_objects()) { +- ShenandoahInitMarkRootsTask mark_roots(&root_proc); +- workers->run_task(&mark_roots); +- } else { +- // No need to update references, which means the heap is stable. +- // Can save time not walking through forwarding pointers. +- ShenandoahInitMarkRootsTask mark_roots(&root_proc); +- workers->run_task(&mark_roots); +- } +- +- clear_claim_codecache(); +-} +- +-void ShenandoahConcurrentMark::update_roots(ShenandoahPhaseTimings::Phase root_phase) { +- assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); +- assert(root_phase == ShenandoahPhaseTimings::full_gc_update_roots || +- root_phase == ShenandoahPhaseTimings::degen_gc_update_roots, +- "Only for these phases"); +- +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- ShenandoahGCPhase phase(root_phase); +- +- COMPILER2_PRESENT(DerivedPointerTable::clear()); +- +- uint nworkers = heap->workers()->active_workers(); +- +- ShenandoahRootUpdater root_updater(root_phase); +- ShenandoahUpdateRootsTask update_roots(&root_updater); +- _heap->workers()->run_task(&update_roots); +- +- COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); +-} +- +-class ShenandoahUpdateThreadRootsTask : public AbstractGangTask { +-private: +- SharedHeap::StrongRootsScope _srs; +- ShenandoahPhaseTimings::Phase _phase; +- ShenandoahGCWorkerPhase _worker_phase; +-public: +- ShenandoahUpdateThreadRootsTask(bool is_par, ShenandoahPhaseTimings::Phase phase) : +- AbstractGangTask("Shenandoah Update Thread Roots"), +- _srs(ShenandoahHeap::heap(), true), +- _phase(phase), +- _worker_phase(phase) {} +- +- void work(uint worker_id) { +- ShenandoahUpdateRefsClosure cl; +- ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::ThreadRoots, worker_id); +- ResourceMark rm; +- Threads::possibly_parallel_oops_do(&cl, NULL, NULL); +- } +-}; +- +-void ShenandoahConcurrentMark::update_thread_roots(ShenandoahPhaseTimings::Phase root_phase) { +- assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); +- +- ShenandoahGCPhase phase(root_phase); +- +- COMPILER2_PRESENT(DerivedPointerTable::clear()); +- +- WorkGang* workers = _heap->workers(); +- bool is_par = workers->active_workers() > 1; +- +- ShenandoahUpdateThreadRootsTask task(is_par, root_phase); +- workers->run_task(&task); +- +- COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); +-} +- +-void ShenandoahConcurrentMark::initialize(uint workers) { +- _heap = ShenandoahHeap::heap(); +- +- uint num_queues = MAX2(workers, 1U); +- +- _task_queues = new ShenandoahObjToScanQueueSet((int) num_queues); +- +- for (uint i = 0; i < num_queues; ++i) { +- ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue(); +- task_queue->initialize(); +- _task_queues->register_queue(i, task_queue); +- } +- +- JavaThread::satb_mark_queue_set().set_buffer_size(ShenandoahSATBBufferSize); +-} +- +-void ShenandoahConcurrentMark::concurrent_scan_code_roots(uint worker_id, ReferenceProcessor* rp) { +- if (claim_codecache()) { +- ShenandoahObjToScanQueue* q = task_queues()->queue(worker_id); +- if (!_heap->unload_classes()) { +- MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); +- // TODO: We can not honor StringDeduplication here, due to lock ranking +- // inversion. So, we may miss some deduplication candidates. +- if (_heap->has_forwarded_objects()) { +- ShenandoahMarkResolveRefsClosure cl(q, rp); +- CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations); +- CodeCache::blobs_do(&blobs); +- } else { +- ShenandoahMarkRefsClosure cl(q, rp); +- CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations); +- CodeCache::blobs_do(&blobs); +- } +- } +- } +-} +- +-void ShenandoahConcurrentMark::mark_from_roots() { +- WorkGang* workers = _heap->workers(); +- uint nworkers = workers->active_workers(); +- +- ShenandoahGCPhase conc_mark_phase(ShenandoahPhaseTimings::conc_mark); +- +- if (_heap->process_references()) { +- ReferenceProcessor* rp = _heap->ref_processor(); +- rp->set_active_mt_degree(nworkers); +- +- // enable ("weak") refs discovery +- rp->enable_discovery(true /*verify_no_refs*/, true); +- rp->setup_policy(_heap->collector_policy()->should_clear_all_soft_refs()); +- } +- +- shenandoah_assert_rp_isalive_not_installed(); +- ShenandoahIsAliveSelector is_alive; +- ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure()); +- +- task_queues()->reserve(nworkers); +- +- { +- ShenandoahTaskTerminator terminator(nworkers, task_queues()); +- ShenandoahConcurrentMarkingTask task(this, &terminator); +- workers->run_task(&task); +- } +- +- assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty when not cancelled"); +- if (!_heap->cancelled_gc()) { +- TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats()); +- } +- +- TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats()); +-} +- +-void ShenandoahConcurrentMark::finish_mark_from_roots(bool full_gc) { +- assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); +- +- uint nworkers = _heap->workers()->active_workers(); +- +- // Finally mark everything else we've got in our queues during the previous steps. +- // It does two different things for concurrent vs. mark-compact GC: +- // - For concurrent GC, it starts with empty task queues, drains the remaining +- // SATB buffers, and then completes the marking closure. +- // - For mark-compact GC, it starts out with the task queues seeded by initial +- // root scan, and completes the closure, thus marking through all live objects +- // The implementation is the same, so it's shared here. +- { +- ShenandoahGCPhase phase(full_gc ? +- ShenandoahPhaseTimings::full_gc_mark_finish_queues : +- ShenandoahPhaseTimings::finish_queues); +- task_queues()->reserve(nworkers); +- +- shenandoah_assert_rp_isalive_not_installed(); +- ShenandoahIsAliveSelector is_alive; +- ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure()); +- +- SharedHeap::StrongRootsScope scope(_heap, true); +- ShenandoahTaskTerminator terminator(nworkers, task_queues()); +- ShenandoahFinalMarkingTask task(this, &terminator, ShenandoahStringDedup::is_enabled()); +- _heap->workers()->run_task(&task); +- } +- +- assert(task_queues()->is_empty(), "Should be empty"); +- +- // Marking is completed, deactivate SATB barrier if it is active +- _heap->complete_marking(); +- +- // When we're done marking everything, we process weak references. +- // It is not obvious, but reference processing actually calls +- // JNIHandle::weak_oops_do() to cleanup JNI and JVMTI weak oops. +- if (_heap->process_references()) { +- weak_refs_work(full_gc); +- } +- +- // And finally finish class unloading +- if (_heap->unload_classes()) { +- // We don't mark through weak roots with class unloading cycle, +- // so process them here. +- weak_roots_work(full_gc); +- _heap->unload_classes_and_cleanup_tables(full_gc); +- } else if (ShenandoahStringDedup::is_enabled()) { +- ShenandoahStringDedup::parallel_cleanup(); +- } +- assert(task_queues()->is_empty(), "Should be empty"); +- TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats()); +- TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats()); +- +- // Resize Metaspace +- MetaspaceGC::compute_new_size(); +-} +- +-// Weak Reference Closures +-class ShenandoahCMDrainMarkingStackClosure: public VoidClosure { +- uint _worker_id; +- ShenandoahTaskTerminator* _terminator; +- bool _reset_terminator; +- +-public: +- ShenandoahCMDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false): +- _worker_id(worker_id), +- _terminator(t), +- _reset_terminator(reset_terminator) { +- } +- +- void do_void() { +- assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); +- +- ShenandoahHeap* sh = ShenandoahHeap::heap(); +- ShenandoahConcurrentMark* scm = sh->concurrent_mark(); +- assert(sh->process_references(), "why else would we be here?"); +- ReferenceProcessor* rp = sh->ref_processor(); +- +- shenandoah_assert_rp_isalive_installed(); +- +- scm->mark_loop(_worker_id, _terminator, rp, +- false, // not cancellable +- false); // do not do strdedup +- +- if (_reset_terminator) { +- _terminator->reset_for_reuse(); +- } +- } +-}; +- +-class ShenandoahCMKeepAliveClosure : public OopClosure { +-private: +- ShenandoahObjToScanQueue* _queue; +- ShenandoahHeap* _heap; +- ShenandoahMarkingContext* const _mark_context; +- +- template +- inline void do_oop_nv(T* p) { +- ShenandoahConcurrentMark::mark_through_ref(p, _heap, _queue, _mark_context); +- } +- +-public: +- ShenandoahCMKeepAliveClosure(ShenandoahObjToScanQueue* q) : +- _queue(q), +- _heap(ShenandoahHeap::heap()), +- _mark_context(_heap->marking_context()) {} +- +- void do_oop(narrowOop* p) { do_oop_nv(p); } +- void do_oop(oop* p) { do_oop_nv(p); } +-}; +- +-class ShenandoahCMKeepAliveUpdateClosure : public OopClosure { +-private: +- ShenandoahObjToScanQueue* _queue; +- ShenandoahHeap* _heap; +- ShenandoahMarkingContext* const _mark_context; +- +- template +- inline void do_oop_nv(T* p) { +- ShenandoahConcurrentMark::mark_through_ref(p, _heap, _queue, _mark_context); +- } +- +-public: +- ShenandoahCMKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) : +- _queue(q), +- _heap(ShenandoahHeap::heap()), +- _mark_context(_heap->marking_context()) {} +- +- void do_oop(narrowOop* p) { do_oop_nv(p); } +- void do_oop(oop* p) { do_oop_nv(p); } +-}; +- +-class ShenandoahRefProcTaskProxy : public AbstractGangTask { +-private: +- AbstractRefProcTaskExecutor::ProcessTask& _proc_task; +- ShenandoahTaskTerminator* _terminator; +- +-public: +- ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task, +- ShenandoahTaskTerminator* t) : +- AbstractGangTask("Process reference objects in parallel"), +- _proc_task(proc_task), +- _terminator(t) { +- } +- +- void work(uint worker_id) { +- assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- ShenandoahCMDrainMarkingStackClosure complete_gc(worker_id, _terminator); +- if (heap->has_forwarded_objects()) { +- ShenandoahForwardedIsAliveClosure is_alive; +- ShenandoahCMKeepAliveUpdateClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id)); +- _proc_task.work(worker_id, is_alive, keep_alive, complete_gc); +- } else { +- ShenandoahIsAliveClosure is_alive; +- ShenandoahCMKeepAliveClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id)); +- _proc_task.work(worker_id, is_alive, keep_alive, complete_gc); +- } +- } +-}; +- +-class ShenandoahRefEnqueueTaskProxy : public AbstractGangTask { +-private: +- AbstractRefProcTaskExecutor::EnqueueTask& _enqueue_task; +- +-public: +- ShenandoahRefEnqueueTaskProxy(AbstractRefProcTaskExecutor::EnqueueTask& enqueue_task) : +- AbstractGangTask("Enqueue reference objects in parallel"), +- _enqueue_task(enqueue_task) { +- } +- +- void work(uint worker_id) { +- _enqueue_task.work(worker_id); +- } +-}; +- +-class ShenandoahRefProcTaskExecutor : public AbstractRefProcTaskExecutor { +-private: +- WorkGang* _workers; +- +-public: +- ShenandoahRefProcTaskExecutor(WorkGang* workers) : +- _workers(workers) { +- } +- +- // Executes a task using worker threads. +- void execute(ProcessTask& task) { +- assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); +- +- // Shortcut execution if task is empty. +- // This should be replaced with the generic ReferenceProcessor shortcut, +- // see JDK-8181214, JDK-8043575, JDK-6938732. +- if (task.is_empty()) { +- return; +- } +- +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- ShenandoahConcurrentMark* cm = heap->concurrent_mark(); +- uint nworkers = _workers->active_workers(); +- cm->task_queues()->reserve(nworkers); +- +- ShenandoahTaskTerminator terminator(nworkers, cm->task_queues()); +- ShenandoahRefProcTaskProxy proc_task_proxy(task, &terminator); +- _workers->run_task(&proc_task_proxy); +- } +- +- void execute(EnqueueTask& task) { +- ShenandoahRefEnqueueTaskProxy enqueue_task_proxy(task); +- _workers->run_task(&enqueue_task_proxy); +- } +-}; +- +-void ShenandoahConcurrentMark::weak_refs_work(bool full_gc) { +- assert(_heap->process_references(), "sanity"); +- +- ShenandoahPhaseTimings::Phase phase_root = +- full_gc ? +- ShenandoahPhaseTimings::full_gc_weakrefs : +- ShenandoahPhaseTimings::weakrefs; +- +- ShenandoahGCPhase phase(phase_root); +- +- ReferenceProcessor* rp = _heap->ref_processor(); +- weak_refs_work_doit(full_gc); +- +- rp->verify_no_references_recorded(); +- assert(!rp->discovery_enabled(), "Post condition"); +- +-} +- +-void ShenandoahConcurrentMark::weak_refs_work_doit(bool full_gc) { +- ReferenceProcessor* rp = _heap->ref_processor(); +- +- ShenandoahPhaseTimings::Phase phase_process = +- full_gc ? +- ShenandoahPhaseTimings::full_gc_weakrefs_process : +- ShenandoahPhaseTimings::weakrefs_process; +- +- ShenandoahPhaseTimings::Phase phase_enqueue = +- full_gc ? +- ShenandoahPhaseTimings::full_gc_weakrefs_enqueue : +- ShenandoahPhaseTimings::weakrefs_enqueue; +- +- shenandoah_assert_rp_isalive_not_installed(); +- ShenandoahIsAliveSelector is_alive; +- ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure()); +- +- WorkGang* workers = _heap->workers(); +- uint nworkers = workers->active_workers(); +- +- rp->setup_policy(_heap->collector_policy()->should_clear_all_soft_refs()); +- rp->set_active_mt_degree(nworkers); +- +- assert(task_queues()->is_empty(), "Should be empty"); +- +- // complete_gc and keep_alive closures instantiated here are only needed for +- // single-threaded path in RP. They share the queue 0 for tracking work, which +- // simplifies implementation. Since RP may decide to call complete_gc several +- // times, we need to be able to reuse the terminator. +- uint serial_worker_id = 0; +- ShenandoahTaskTerminator terminator(1, task_queues()); +- ShenandoahCMDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true); +- +- ShenandoahRefProcTaskExecutor executor(workers); +- +- { +- ShenandoahGCPhase phase(phase_process); +- +- if (_heap->has_forwarded_objects()) { +- ShenandoahForwardedIsAliveClosure is_alive; +- ShenandoahCMKeepAliveUpdateClosure keep_alive(get_queue(serial_worker_id)); +- rp->process_discovered_references(&is_alive, &keep_alive, +- &complete_gc, &executor, +- NULL, _heap->shenandoah_policy()->tracer()->gc_id()); +- } else { +- ShenandoahIsAliveClosure is_alive; +- ShenandoahCMKeepAliveClosure keep_alive(get_queue(serial_worker_id)); +- rp->process_discovered_references(&is_alive, &keep_alive, +- &complete_gc, &executor, +- NULL, _heap->shenandoah_policy()->tracer()->gc_id()); +- } +- +- assert(task_queues()->is_empty(), "Should be empty"); +- } +- +- { +- ShenandoahGCPhase phase(phase_enqueue); +- rp->enqueue_discovered_references(&executor); +- } +-} +- +-class DoNothingClosure: public OopClosure { +- public: +- void do_oop(oop* p) {} +- void do_oop(narrowOop* p) {} +-}; +- +-class ShenandoahWeakUpdateClosure : public OopClosure { +-private: +- ShenandoahHeap* const _heap; +- +- template +- inline void do_oop_work(T* p) { +- oop o = _heap->maybe_update_with_forwarded(p); +- shenandoah_assert_marked_except(p, o, o == NULL); +- } +- +-public: +- ShenandoahWeakUpdateClosure() : _heap(ShenandoahHeap::heap()) {} +- +- void do_oop(narrowOop* p) { do_oop_work(p); } +- void do_oop(oop* p) { do_oop_work(p); } +-}; +- +-void ShenandoahConcurrentMark::weak_roots_work(bool full_gc) { +- ShenandoahPhaseTimings::Phase phase = full_gc ? +- ShenandoahPhaseTimings::full_gc_weak_roots : +- ShenandoahPhaseTimings::weak_roots; +- ShenandoahGCPhase root_phase(phase); +- ShenandoahGCWorkerPhase worker_phase(phase); +- +- ShenandoahIsAliveSelector is_alive; +- DoNothingClosure cl; +- ShenandoahWeakRoots weak_roots(phase); +- weak_roots.weak_oops_do(is_alive.is_alive_closure(), &cl, 0); +-} +- +-class ShenandoahCancelledGCYieldClosure : public YieldClosure { +-private: +- ShenandoahHeap* const _heap; +-public: +- ShenandoahCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {}; +- virtual bool should_return() { return _heap->cancelled_gc(); } +-}; +- +-class ShenandoahPrecleanCompleteGCClosure : public VoidClosure { +-public: +- void do_void() { +- ShenandoahHeap* sh = ShenandoahHeap::heap(); +- ShenandoahConcurrentMark* scm = sh->concurrent_mark(); +- assert(sh->process_references(), "why else would we be here?"); +- ShenandoahTaskTerminator terminator(1, scm->task_queues()); +- +- ReferenceProcessor* rp = sh->ref_processor(); +- shenandoah_assert_rp_isalive_installed(); +- +- scm->mark_loop(0, &terminator, rp, +- false, // not cancellable +- false); // do not do strdedup +- } +-}; +- +-class ShenandoahPrecleanTask : public AbstractGangTask { +-private: +- ReferenceProcessor* _rp; +- +-public: +- ShenandoahPrecleanTask(ReferenceProcessor* rp) : +- AbstractGangTask("Precleaning task"), +- _rp(rp) {} +- +- void work(uint worker_id) { +- assert(worker_id == 0, "The code below is single-threaded, only one worker is expected"); +- ShenandoahParallelWorkerSession worker_session(worker_id); +- +- ShenandoahHeap* sh = ShenandoahHeap::heap(); +- assert(!sh->has_forwarded_objects(), "No forwarded objects expected here"); +- +- ShenandoahObjToScanQueue* q = sh->concurrent_mark()->get_queue(worker_id); +- +- ShenandoahCancelledGCYieldClosure yield; +- ShenandoahPrecleanCompleteGCClosure complete_gc; +- +- ShenandoahIsAliveClosure is_alive; +- ShenandoahCMKeepAliveClosure keep_alive(q); +- ResourceMark rm; +- _rp->preclean_discovered_references(&is_alive, &keep_alive, +- &complete_gc, &yield, +- NULL, sh->shenandoah_policy()->tracer()->gc_id()); +- } +-}; +- +-void ShenandoahConcurrentMark::preclean_weak_refs() { +- // Pre-cleaning weak references before diving into STW makes sense at the +- // end of concurrent mark. This will filter out the references which referents +- // are alive. Note that ReferenceProcessor already filters out these on reference +- // discovery, and the bulk of work is done here. This phase processes leftovers +- // that missed the initial filtering, i.e. when referent was marked alive after +- // reference was discovered by RP. +- +- assert(_heap->process_references(), "sanity"); +- +- ReferenceProcessor* rp = _heap->ref_processor(); +- +- assert(task_queues()->is_empty(), "Should be empty"); +- +- ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false); +- +- shenandoah_assert_rp_isalive_not_installed(); +- ShenandoahIsAliveSelector is_alive; +- ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure()); +- +- // Execute precleaning in the worker thread: it will give us GCLABs, String dedup +- // queues and other goodies. When upstream ReferenceProcessor starts supporting +- // parallel precleans, we can extend this to more threads. +- WorkGang* workers = _heap->workers(); +- uint nworkers = workers->active_workers(); +- assert(nworkers == 1, "This code uses only a single worker"); +- task_queues()->reserve(nworkers); +- +- ShenandoahPrecleanTask task(rp); +- workers->run_task(&task); +- +- assert(task_queues()->is_empty(), "Should be empty"); +-} +- +-void ShenandoahConcurrentMark::cancel() { +- // Clean up marking stacks. +- ShenandoahObjToScanQueueSet* queues = task_queues(); +- queues->clear(); +- +- // Cancel SATB buffers. +- JavaThread::satb_mark_queue_set().abandon_partial_marking(); +-} +- +-ShenandoahObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) { +- assert(task_queues()->get_reserved() > worker_id, err_msg("No reserved queue for worker id: %d", worker_id)); +- return _task_queues->queue(worker_id); +-} +- +-template +-void ShenandoahConcurrentMark::mark_loop_prework(uint w, ShenandoahTaskTerminator *t, ReferenceProcessor *rp, +- bool strdedup) { +- ShenandoahObjToScanQueue* q = get_queue(w); +- +- ShenandoahLiveData* ld = _heap->get_liveness_cache(w); +- +- // TODO: We can clean up this if we figure out how to do templated oop closures that +- // play nice with specialized_oop_iterators. +- if (_heap->unload_classes()) { +- if (_heap->has_forwarded_objects()) { +- if (strdedup) { +- ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w); +- ShenandoahMarkUpdateRefsMetadataDedupClosure cl(q, dq, rp); +- mark_loop_work(&cl, ld, w, t); +- } else { +- ShenandoahMarkUpdateRefsMetadataClosure cl(q, rp); +- mark_loop_work(&cl, ld, w, t); +- } +- } else { +- if (strdedup) { +- ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w); +- ShenandoahMarkRefsMetadataDedupClosure cl(q, dq, rp); +- mark_loop_work(&cl, ld, w, t); +- } else { +- ShenandoahMarkRefsMetadataClosure cl(q, rp); +- mark_loop_work(&cl, ld, w, t); +- } +- } +- } else { +- if (_heap->has_forwarded_objects()) { +- if (strdedup) { +- ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w); +- ShenandoahMarkUpdateRefsDedupClosure cl(q, dq, rp); +- mark_loop_work(&cl, ld, w, t); +- } else { +- ShenandoahMarkUpdateRefsClosure cl(q, rp); +- mark_loop_work(&cl, ld, w, t); +- } +- } else { +- if (strdedup) { +- ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w); +- ShenandoahMarkRefsDedupClosure cl(q, dq, rp); +- mark_loop_work(&cl, ld, w, t); +- } else { +- ShenandoahMarkRefsClosure cl(q, rp); +- mark_loop_work(&cl, ld, w, t); +- } +- } +- } +- +- _heap->flush_liveness_cache(w); +-} +- +-template +-void ShenandoahConcurrentMark::mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint worker_id, ShenandoahTaskTerminator *terminator) { +- int seed = 17; +- uintx stride = ShenandoahMarkLoopStride; +- +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- ShenandoahObjToScanQueueSet* queues = task_queues(); +- ShenandoahObjToScanQueue* q; +- ShenandoahMarkTask t; +- +- /* +- * Process outstanding queues, if any. +- * +- * There can be more queues than workers. To deal with the imbalance, we claim +- * extra queues first. Since marking can push new tasks into the queue associated +- * with this worker id, we come back to process this queue in the normal loop. +- */ +- assert(queues->get_reserved() == heap->workers()->active_workers(), +- "Need to reserve proper number of queues"); +- +- q = queues->claim_next(); +- while (q != NULL) { +- if (CANCELLABLE && heap->cancelled_gc()) { +- return; +- } +- +- for (uint i = 0; i < stride; i++) { +- if (q->pop(t)) { +- do_task(q, cl, live_data, &t); +- } else { +- assert(q->is_empty(), "Must be empty"); +- q = queues->claim_next(); +- break; +- } +- } +- } +- +- q = get_queue(worker_id); +- +- ShenandoahStrDedupQueue *dq = NULL; +- if (ShenandoahStringDedup::is_enabled()) { +- dq = ShenandoahStringDedup::queue(worker_id); +- } +- +- ShenandoahSATBBufferClosure drain_satb(q, dq); +- SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); +- +- /* +- * Normal marking loop: +- */ +- while (true) { +- if (CANCELLABLE && heap->cancelled_gc()) { +- return; +- } +- +- while (satb_mq_set.completed_buffers_num() > 0) { +- satb_mq_set.apply_closure_to_completed_buffer(&drain_satb); +- } +- +- uint work = 0; +- for (uint i = 0; i < stride; i++) { +- if (q->pop(t) || +- queues->steal(worker_id, &seed, t)) { +- do_task(q, cl, live_data, &t); +- work++; +- } else { +- break; +- } +- } +- +- if (work == 0) { +- // No work encountered in current stride, try to terminate. +- ShenandoahTerminatorTerminator tt(heap); +- if (terminator->offer_termination(&tt)) return; +- } +- } +-} +- +-bool ShenandoahConcurrentMark::claim_codecache() { +- return _claimed_codecache.try_set(); +-} +- +-void ShenandoahConcurrentMark::clear_claim_codecache() { +- _claimed_codecache.unset(); +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahConcurrentMark.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahConcurrentMark.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahConcurrentMark.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahConcurrentMark.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,112 +0,0 @@ +-/* +- * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_HPP +- +-#include "utilities/taskqueue.hpp" +-#include "gc_implementation/shenandoah/shenandoahOopClosures.hpp" +-#include "gc_implementation/shenandoah/shenandoahTaskqueue.hpp" +-#include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp" +- +-class ShenandoahStrDedupQueue; +- +-class ShenandoahConcurrentMark: public CHeapObj { +-private: +- ShenandoahHeap* _heap; +- ShenandoahObjToScanQueueSet* _task_queues; +- +-public: +- void initialize(uint workers); +- void cancel(); +- +-// ---------- Marking loop and tasks +-// +-private: +- template +- inline void do_task(ShenandoahObjToScanQueue* q, T* cl, ShenandoahLiveData* live_data, ShenandoahMarkTask* task); +- +- template +- inline void do_chunked_array_start(ShenandoahObjToScanQueue* q, T* cl, oop array); +- +- template +- inline void do_chunked_array(ShenandoahObjToScanQueue* q, T* cl, oop array, int chunk, int pow); +- +- inline void count_liveness(ShenandoahLiveData* live_data, oop obj); +- +- template +- void mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint worker_id, ShenandoahTaskTerminator *t); +- +- template +- void mark_loop_prework(uint worker_id, ShenandoahTaskTerminator *terminator, ReferenceProcessor *rp, bool strdedup); +- +-public: +- void mark_loop(uint worker_id, ShenandoahTaskTerminator* terminator, ReferenceProcessor *rp, +- bool cancellable, bool strdedup) { +- if (cancellable) { +- mark_loop_prework(worker_id, terminator, rp, strdedup); +- } else { +- mark_loop_prework(worker_id, terminator, rp, strdedup); +- } +- } +- +- template +- static inline void mark_through_ref(T* p, ShenandoahHeap* heap, ShenandoahObjToScanQueue* q, ShenandoahMarkingContext* const mark_context, ShenandoahStrDedupQueue* dq = NULL); +- +- void mark_from_roots(); +- void finish_mark_from_roots(bool full_gc); +- +- void mark_roots(ShenandoahPhaseTimings::Phase root_phase); +- void update_roots(ShenandoahPhaseTimings::Phase root_phase); +- void update_thread_roots(ShenandoahPhaseTimings::Phase root_phase); +- +-// ---------- Weak references +-// +-private: +- void weak_refs_work(bool full_gc); +- void weak_refs_work_doit(bool full_gc); +- +-public: +- void weak_roots_work(bool full_gc); +- void preclean_weak_refs(); +- +-// ---------- Concurrent code cache +-// +-private: +- ShenandoahSharedFlag _claimed_codecache; +- +-public: +- void concurrent_scan_code_roots(uint worker_id, ReferenceProcessor* rp); +- bool claim_codecache(); +- void clear_claim_codecache(); +- +-// ---------- Helpers +-// Used from closures, need to be public +-// +-public: +- ShenandoahObjToScanQueue* get_queue(uint worker_id); +- ShenandoahObjToScanQueueSet* task_queues() { return _task_queues; } +- +-}; +- +-#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahConcurrentMark.inline.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahConcurrentMark.inline.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahConcurrentMark.inline.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahConcurrentMark.inline.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,272 +0,0 @@ +-/* +- * Copyright (c) 2015, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_INLINE_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_INLINE_HPP +- +-#include "gc_implementation/shenandoah/shenandoahAsserts.hpp" +-#include "gc_implementation/shenandoah/shenandoahBarrierSet.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahConcurrentMark.hpp" +-#include "gc_implementation/shenandoah/shenandoahMarkingContext.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahOopClosures.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahStringDedup.hpp" +-#include "gc_implementation/shenandoah/shenandoahTaskqueue.inline.hpp" +-#include "memory/iterator.inline.hpp" +-#include "oops/oop.inline.hpp" +-#include "runtime/prefetch.inline.hpp" +- +-template +-void ShenandoahConcurrentMark::do_task(ShenandoahObjToScanQueue* q, T* cl, ShenandoahLiveData* live_data, ShenandoahMarkTask* task) { +- oop obj = task->obj(); +- +- shenandoah_assert_not_forwarded(NULL, obj); +- shenandoah_assert_marked(NULL, obj); +- shenandoah_assert_not_in_cset_except(NULL, obj, _heap->cancelled_gc()); +- +- if (task->is_not_chunked()) { +- if (obj->is_instance()) { +- // Case 1: Normal oop, process as usual. +- obj->oop_iterate(cl); +- } else if (obj->is_objArray()) { +- // Case 2: Object array instance and no chunk is set. Must be the first +- // time we visit it, start the chunked processing. +- do_chunked_array_start(q, cl, obj); +- } else { +- // Case 3: Primitive array. Do nothing, no oops there. We use the same +- // performance tweak TypeArrayKlass::oop_oop_iterate_impl is using: +- // We skip iterating over the klass pointer since we know that +- // Universe::TypeArrayKlass never moves. +- assert (obj->is_typeArray(), "should be type array"); +- } +- // Count liveness the last: push the outstanding work to the queues first +- count_liveness(live_data, obj); +- } else { +- // Case 4: Array chunk, has sensible chunk id. Process it. +- do_chunked_array(q, cl, obj, task->chunk(), task->pow()); +- } +-} +- +-inline void ShenandoahConcurrentMark::count_liveness(ShenandoahLiveData* live_data, oop obj) { +- size_t region_idx = _heap->heap_region_index_containing(obj); +- ShenandoahHeapRegion* region = _heap->get_region(region_idx); +- size_t size = obj->size(); +- +- if (!region->is_humongous_start()) { +- assert(!region->is_humongous(), "Cannot have continuations here"); +- ShenandoahLiveData cur = live_data[region_idx]; +- size_t new_val = size + cur; +- if (new_val >= SHENANDOAH_LIVEDATA_MAX) { +- // overflow, flush to region data +- region->increase_live_data_gc_words(new_val); +- live_data[region_idx] = 0; +- } else { +- // still good, remember in locals +- live_data[region_idx] = (ShenandoahLiveData) new_val; +- } +- } else { +- shenandoah_assert_in_correct_region(NULL, obj); +- size_t num_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize); +- +- for (size_t i = region_idx; i < region_idx + num_regions; i++) { +- ShenandoahHeapRegion* chain_reg = _heap->get_region(i); +- assert(chain_reg->is_humongous(), "Expecting a humongous region"); +- chain_reg->increase_live_data_gc_words(chain_reg->used() >> LogHeapWordSize); +- } +- } +-} +- +-template +-inline void ShenandoahConcurrentMark::do_chunked_array_start(ShenandoahObjToScanQueue* q, T* cl, oop obj) { +- assert(obj->is_objArray(), "expect object array"); +- objArrayOop array = objArrayOop(obj); +- int len = array->length(); +- +- if (len <= (int) ObjArrayMarkingStride*2) { +- // A few slices only, process directly +- array->oop_iterate_range(cl, 0, len); +- } else { +- int bits = log2_long(len); +- // Compensate for non-power-of-two arrays, cover the array in excess: +- if (len != (1 << bits)) bits++; +- +- // Only allow full chunks on the queue. This frees do_chunked_array() from checking from/to +- // boundaries against array->length(), touching the array header on every chunk. +- // +- // To do this, we cut the prefix in full-sized chunks, and submit them on the queue. +- // If the array is not divided in chunk sizes, then there would be an irregular tail, +- // which we will process separately. +- +- int last_idx = 0; +- +- int chunk = 1; +- int pow = bits; +- +- // Handle overflow +- if (pow >= 31) { +- assert (pow == 31, "sanity"); +- pow--; +- chunk = 2; +- last_idx = (1 << pow); +- bool pushed = q->push(ShenandoahMarkTask(array, 1, pow)); +- assert(pushed, "overflow queue should always succeed pushing"); +- } +- +- // Split out tasks, as suggested in ShenandoahMarkTask docs. Record the last +- // successful right boundary to figure out the irregular tail. +- while ((1 << pow) > (int)ObjArrayMarkingStride && +- (chunk*2 < ShenandoahMarkTask::chunk_size())) { +- pow--; +- int left_chunk = chunk*2 - 1; +- int right_chunk = chunk*2; +- int left_chunk_end = left_chunk * (1 << pow); +- if (left_chunk_end < len) { +- bool pushed = q->push(ShenandoahMarkTask(array, left_chunk, pow)); +- assert(pushed, "overflow queue should always succeed pushing"); +- chunk = right_chunk; +- last_idx = left_chunk_end; +- } else { +- chunk = left_chunk; +- } +- } +- +- // Process the irregular tail, if present +- int from = last_idx; +- if (from < len) { +- array->oop_iterate_range(cl, from, len); +- } +- } +-} +- +-template +-inline void ShenandoahConcurrentMark::do_chunked_array(ShenandoahObjToScanQueue* q, T* cl, oop obj, int chunk, int pow) { +- assert(obj->is_objArray(), "expect object array"); +- objArrayOop array = objArrayOop(obj); +- +- assert (ObjArrayMarkingStride > 0, "sanity"); +- +- // Split out tasks, as suggested in ShenandoahMarkTask docs. Avoid pushing tasks that +- // are known to start beyond the array. +- while ((1 << pow) > (int)ObjArrayMarkingStride && (chunk*2 < ShenandoahMarkTask::chunk_size())) { +- pow--; +- chunk *= 2; +- bool pushed = q->push(ShenandoahMarkTask(array, chunk - 1, pow)); +- assert(pushed, "overflow queue should always succeed pushing"); +- } +- +- int chunk_size = 1 << pow; +- +- int from = (chunk - 1) * chunk_size; +- int to = chunk * chunk_size; +- +-#ifdef ASSERT +- int len = array->length(); +- assert (0 <= from && from < len, err_msg("from is sane: %d/%d", from, len)); +- assert (0 < to && to <= len, err_msg("to is sane: %d/%d", to, len)); +-#endif +- +- array->oop_iterate_range(cl, from, to); +-} +- +-class ShenandoahSATBBufferClosure : public SATBBufferClosure { +-private: +- ShenandoahObjToScanQueue* _queue; +- ShenandoahStrDedupQueue* _dedup_queue; +- ShenandoahHeap* _heap; +- ShenandoahMarkingContext* const _mark_context; +-public: +- ShenandoahSATBBufferClosure(ShenandoahObjToScanQueue* q, ShenandoahStrDedupQueue* dq) : +- _queue(q), +- _dedup_queue(dq), +- _heap(ShenandoahHeap::heap()), +- _mark_context(_heap->marking_context()) +- { +- } +- +- void do_buffer(void **buffer, size_t size) { +- assert(size == 0 || !_heap->has_forwarded_objects(), "Forwarded objects are not expected here"); +- if (ShenandoahStringDedup::is_enabled()) { +- do_buffer_impl(buffer, size); +- } else { +- do_buffer_impl(buffer, size); +- } +- } +- +- template +- void do_buffer_impl(void **buffer, size_t size) { +- for (size_t i = 0; i < size; ++i) { +- oop *p = (oop *) &buffer[i]; +- ShenandoahConcurrentMark::mark_through_ref(p, _heap, _queue, _mark_context, _dedup_queue); +- } +- } +-}; +- +-template +-inline void ShenandoahConcurrentMark::mark_through_ref(T *p, ShenandoahHeap* heap, ShenandoahObjToScanQueue* q, ShenandoahMarkingContext* const mark_context, ShenandoahStrDedupQueue* dq) { +- T o = oopDesc::load_heap_oop(p); +- if (! oopDesc::is_null(o)) { +- oop obj = oopDesc::decode_heap_oop_not_null(o); +- switch (UPDATE_REFS) { +- case NONE: +- break; +- case RESOLVE: +- obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); +- break; +- case SIMPLE: +- // We piggy-back reference updating to the marking tasks. +- obj = heap->update_with_forwarded_not_null(p, obj); +- break; +- case CONCURRENT: +- obj = heap->maybe_update_with_forwarded_not_null(p, obj); +- break; +- default: +- ShouldNotReachHere(); +- } +- +- // Note: Only when concurrently updating references can obj be different +- // (that is, really different, not just different from-/to-space copies of the same) +- // from the one we originally loaded. Mutator thread can beat us by writing something +- // else into the location. In that case, we would mark through that updated value, +- // on the off-chance it is not handled by other means (e.g. via SATB). However, +- // if that write was NULL, we don't need to do anything else. +- if (UPDATE_REFS != CONCURRENT || !oopDesc::is_null(obj)) { +- shenandoah_assert_not_forwarded(p, obj); +- shenandoah_assert_not_in_cset_except(p, obj, heap->cancelled_gc()); +- +- if (mark_context->mark(obj)) { +- bool pushed = q->push(ShenandoahMarkTask(obj)); +- assert(pushed, "overflow queue should always succeed pushing"); +- +- if ((STRING_DEDUP == ENQUEUE_DEDUP) && ShenandoahStringDedup::is_candidate(obj)) { +- assert(ShenandoahStringDedup::is_enabled(), "Must be enabled"); +- assert(dq != NULL, "Dedup queue not set"); +- ShenandoahStringDedup::enqueue_candidate(obj, dq); +- } +- } +- +- shenandoah_assert_marked(p, obj); +- } +- } +-} +- +-#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_INLINE_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahControlThread.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahControlThread.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahControlThread.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahControlThread.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,719 +0,0 @@ +-/* +- * Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +- +-#include "gc_implementation/shared/gcTimer.hpp" +-#include "gc_implementation/shenandoah/shenandoahGCTraceTime.hpp" +-#include "gc_implementation/shenandoah/shenandoahConcurrentMark.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahControlThread.hpp" +-#include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp" +-#include "gc_implementation/shenandoah/shenandoahFreeSet.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahMonitoringSupport.hpp" +-#include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp" +-#include "gc_implementation/shenandoah/shenandoahUtils.hpp" +-#include "gc_implementation/shenandoah/shenandoahVMOperations.hpp" +-#include "gc_implementation/shenandoah/shenandoahWorkerPolicy.hpp" +-#include "gc_implementation/shenandoah/heuristics/shenandoahHeuristics.hpp" +-#include "memory/iterator.hpp" +-#include "memory/universe.hpp" +- +-#ifdef _WINDOWS +-#pragma warning(disable : 4355) +-#endif +- +-SurrogateLockerThread* ShenandoahControlThread::_slt = NULL; +- +-ShenandoahControlThread::ShenandoahControlThread() : +- ConcurrentGCThread(), +- _alloc_failure_waiters_lock(Mutex::leaf, "ShenandoahAllocFailureGC_lock", true), +- _gc_waiters_lock(Mutex::leaf, "ShenandoahRequestedGC_lock", true), +- _periodic_task(this), +- _requested_gc_cause(GCCause::_no_cause_specified), +- _degen_point(ShenandoahHeap::_degenerated_outside_cycle), +- _allocs_seen(0) { +- +- reset_gc_id(); +- if (os::create_thread(this, os::cgc_thread)) { +- os::set_native_priority(this, os::java_to_os_priority[NearMaxPriority]); +- if (!_should_terminate && !DisableStartThread) { +- os::start_thread(this); +- } +- } +- +- _periodic_task.enroll(); +- _periodic_satb_flush_task.enroll(); +- if (ShenandoahPacing) { +- _periodic_pacer_notify_task.enroll(); +- } +-} +- +-ShenandoahControlThread::~ShenandoahControlThread() { +- // This is here so that super is called. +-} +- +-void ShenandoahPeriodicTask::task() { +- _thread->handle_force_counters_update(); +- _thread->handle_counters_update(); +-} +- +-void ShenandoahPeriodicSATBFlushTask::task() { +- ShenandoahHeap::heap()->force_satb_flush_all_threads(); +-} +- +-void ShenandoahPeriodicPacerNotify::task() { +- assert(ShenandoahPacing, "Should not be here otherwise"); +- ShenandoahHeap::heap()->pacer()->notify_waiters(); +-} +- +-void ShenandoahControlThread::run() { +- initialize_in_thread(); +- +- wait_for_universe_init(); +- +- // Wait until we have the surrogate locker thread in place. +- { +- MutexLockerEx x(CGC_lock, true); +- while(_slt == NULL && !_should_terminate) { +- CGC_lock->wait(true, 200); +- } +- } +- +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- +- GCMode default_mode = concurrent_normal; +- GCCause::Cause default_cause = GCCause::_shenandoah_concurrent_gc; +- int sleep = ShenandoahControlIntervalMin; +- +- double last_shrink_time = os::elapsedTime(); +- double last_sleep_adjust_time = os::elapsedTime(); +- +- // Shrink period avoids constantly polling regions for shrinking. +- // Having a period 10x lower than the delay would mean we hit the +- // shrinking with lag of less than 1/10-th of true delay. +- // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds. +- double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10; +- +- ShenandoahCollectorPolicy* policy = heap->shenandoah_policy(); +- +- ShenandoahHeuristics* heuristics = heap->heuristics(); +- while (!in_graceful_shutdown() && !_should_terminate) { +- // Figure out if we have pending requests. +- bool alloc_failure_pending = _alloc_failure_gc.is_set(); +- bool explicit_gc_requested = _gc_requested.is_set() && is_explicit_gc(_requested_gc_cause); +- bool implicit_gc_requested = _gc_requested.is_set() && !is_explicit_gc(_requested_gc_cause); +- +- // This control loop iteration have seen this much allocations. +- intptr_t allocs_seen = (intptr_t)(Atomic::xchg_ptr(0, &_allocs_seen)); +- +- // Check if we have seen a new target for soft max heap size. +- bool soft_max_changed = check_soft_max_changed(); +- +- // Choose which GC mode to run in. The block below should select a single mode. +- GCMode mode = none; +- GCCause::Cause cause = GCCause::_last_gc_cause; +- ShenandoahHeap::ShenandoahDegenPoint degen_point = ShenandoahHeap::_degenerated_unset; +- +- if (alloc_failure_pending) { +- // Allocation failure takes precedence: we have to deal with it first thing +- log_info(gc)("Trigger: Handle Allocation Failure"); +- +- cause = GCCause::_allocation_failure; +- +- // Consume the degen point, and seed it with default value +- degen_point = _degen_point; +- _degen_point = ShenandoahHeap::_degenerated_outside_cycle; +- +- if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle()) { +- heuristics->record_allocation_failure_gc(); +- policy->record_alloc_failure_to_degenerated(degen_point); +- mode = stw_degenerated; +- } else { +- heuristics->record_allocation_failure_gc(); +- policy->record_alloc_failure_to_full(); +- mode = stw_full; +- } +- +- } else if (explicit_gc_requested) { +- cause = _requested_gc_cause; +- log_info(gc)("Trigger: Explicit GC request (%s)", GCCause::to_string(cause)); +- +- heuristics->record_requested_gc(); +- +- if (ExplicitGCInvokesConcurrent) { +- policy->record_explicit_to_concurrent(); +- mode = default_mode; +- // Unload and clean up everything +- heap->set_process_references(heuristics->can_process_references()); +- heap->set_unload_classes(heuristics->can_unload_classes()); +- } else { +- policy->record_explicit_to_full(); +- mode = stw_full; +- } +- } else if (implicit_gc_requested) { +- cause = _requested_gc_cause; +- log_info(gc)("Trigger: Implicit GC request (%s)", GCCause::to_string(cause)); +- +- heuristics->record_requested_gc(); +- +- if (ShenandoahImplicitGCInvokesConcurrent) { +- policy->record_implicit_to_concurrent(); +- mode = default_mode; +- +- // Unload and clean up everything +- heap->set_process_references(heuristics->can_process_references()); +- heap->set_unload_classes(heuristics->can_unload_classes()); +- } else { +- policy->record_implicit_to_full(); +- mode = stw_full; +- } +- } else { +- // Potential normal cycle: ask heuristics if it wants to act +- if (heuristics->should_start_gc()) { +- mode = default_mode; +- cause = default_cause; +- } +- +- // Ask policy if this cycle wants to process references or unload classes +- heap->set_process_references(heuristics->should_process_references()); +- heap->set_unload_classes(heuristics->should_unload_classes()); +- } +- +- // Blow all soft references on this cycle, if handling allocation failure, +- // either implicit or explicit GC request, or we are requested to do so unconditionally. +- if (alloc_failure_pending || implicit_gc_requested || explicit_gc_requested || ShenandoahAlwaysClearSoftRefs) { +- heap->collector_policy()->set_should_clear_all_soft_refs(true); +- } +- +- bool gc_requested = (mode != none); +- assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set"); +- +- if (gc_requested) { +- // GC is starting, bump the internal ID +- update_gc_id(); +- +- heap->reset_bytes_allocated_since_gc_start(); +- +- // Capture metaspace usage before GC. +- const size_t metadata_prev_used = MetaspaceAux::used_bytes(); +- +- // If GC was requested, we are sampling the counters even without actual triggers +- // from allocation machinery. This captures GC phases more accurately. +- set_forced_counters_update(true); +- +- // If GC was requested, we better dump freeset data for performance debugging +- { +- ShenandoahHeapLocker locker(heap->lock()); +- heap->free_set()->log_status(); +- } +- +- switch (mode) { +- case none: +- break; +- case concurrent_normal: +- service_concurrent_normal_cycle(cause); +- break; +- case stw_degenerated: +- service_stw_degenerated_cycle(cause, degen_point); +- break; +- case stw_full: +- service_stw_full_cycle(cause); +- break; +- default: +- ShouldNotReachHere(); +- } +- +- // If this was the requested GC cycle, notify waiters about it +- if (explicit_gc_requested || implicit_gc_requested) { +- notify_gc_waiters(); +- } +- +- // If this was the allocation failure GC cycle, notify waiters about it +- if (alloc_failure_pending) { +- notify_alloc_failure_waiters(); +- } +- +- // Report current free set state at the end of cycle, whether +- // it is a normal completion, or the abort. +- { +- ShenandoahHeapLocker locker(heap->lock()); +- heap->free_set()->log_status(); +- +- // Notify Universe about new heap usage. This has implications for +- // global soft refs policy, and we better report it every time heap +- // usage goes down. +- Universe::update_heap_info_at_gc(); +- } +- +- // Disable forced counters update, and update counters one more time +- // to capture the state at the end of GC session. +- handle_force_counters_update(); +- set_forced_counters_update(false); +- +- // Retract forceful part of soft refs policy +- heap->collector_policy()->set_should_clear_all_soft_refs(false); +- +- // Clear metaspace oom flag, if current cycle unloaded classes +- if (heap->unload_classes()) { +- heuristics->clear_metaspace_oom(); +- } +- +- // Commit worker statistics to cycle data +- heap->phase_timings()->flush_par_workers_to_cycle(); +- if (ShenandoahPacing) { +- heap->pacer()->flush_stats_to_cycle(); +- } +- +- // Print GC stats for current cycle +- if (PrintGCDetails) { +- ResourceMark rm; +- heap->phase_timings()->print_cycle_on(gclog_or_tty); +- if (ShenandoahPacing) { +- heap->pacer()->print_cycle_on(gclog_or_tty); +- } +- } +- +- // Commit statistics to globals +- heap->phase_timings()->flush_cycle_to_global(); +- +- // Print Metaspace change following GC (if logging is enabled). +- if (PrintGCDetails) { +- MetaspaceAux::print_metaspace_change(metadata_prev_used); +- } +- +- // GC is over, we are at idle now +- if (ShenandoahPacing) { +- heap->pacer()->setup_for_idle(); +- } +- } else { +- // Allow allocators to know we have seen this much regions +- if (ShenandoahPacing && (allocs_seen > 0)) { +- heap->pacer()->report_alloc(allocs_seen); +- } +- } +- +- double current = os::elapsedTime(); +- +- if (ShenandoahUncommit && (explicit_gc_requested || soft_max_changed || (current - last_shrink_time > shrink_period))) { +- // Explicit GC tries to uncommit everything down to min capacity. +- // Soft max change tries to uncommit everything down to target capacity. +- // Periodic uncommit tries to uncommit suitable regions down to min capacity. +- +- double shrink_before = (explicit_gc_requested || soft_max_changed) ? +- current : +- current - (ShenandoahUncommitDelay / 1000.0); +- +- size_t shrink_until = soft_max_changed ? +- heap->soft_max_capacity() : +- heap->min_capacity(); +- +- service_uncommit(shrink_before, shrink_until); +- heap->phase_timings()->flush_cycle_to_global(); +- last_shrink_time = current; +- } +- +- // Wait before performing the next action. If allocation happened during this wait, +- // we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle, +- // back off exponentially. +- if (_heap_changed.try_unset()) { +- sleep = ShenandoahControlIntervalMin; +- } else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){ +- sleep = MIN2(ShenandoahControlIntervalMax, MAX2(1, sleep * 2)); +- last_sleep_adjust_time = current; +- } +- os::naked_short_sleep(sleep); +- } +- +- // Wait for the actual stop(), can't leave run_service() earlier. +- while (! _should_terminate) { +- os::naked_short_sleep(ShenandoahControlIntervalMin); +- } +- terminate(); +-} +- +-bool ShenandoahControlThread::check_soft_max_changed() const { +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- size_t new_soft_max = OrderAccess::load_acquire(&ShenandoahSoftMaxHeapSize); +- size_t old_soft_max = heap->soft_max_capacity(); +- if (new_soft_max != old_soft_max) { +- new_soft_max = MAX2(heap->min_capacity(), new_soft_max); +- new_soft_max = MIN2(heap->max_capacity(), new_soft_max); +- if (new_soft_max != old_soft_max) { +- log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s", +- byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max), +- byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max) +- ); +- heap->set_soft_max_capacity(new_soft_max); +- return true; +- } +- } +- return false; +-} +- +-void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cause) { +- // Normal cycle goes via all concurrent phases. If allocation failure (af) happens during +- // any of the concurrent phases, it first degrades to Degenerated GC and completes GC there. +- // If second allocation failure happens during Degenerated GC cycle (for example, when GC +- // tries to evac something and no memory is available), cycle degrades to Full GC. +- // +- // There are also a shortcut through the normal cycle: immediate garbage shortcut, when +- // heuristics says there are no regions to compact, and all the collection comes from immediately +- // reclaimable regions. +- // +- // ................................................................................................ +- // +- // (immediate garbage shortcut) Concurrent GC +- // /-------------------------------------------\ +- // | | +- // | | +- // | | +- // | v +- // [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END] +- // | | | ^ +- // | (af) | (af) | (af) | +- // ..................|....................|.................|..............|....................... +- // | | | | +- // | | | | Degenerated GC +- // v v v | +- // STW Mark ----------> STW Evac ----> STW Update-Refs ----->o +- // | | | ^ +- // | (af) | (af) | (af) | +- // ..................|....................|.................|..............|....................... +- // | | | | +- // | v | | Full GC +- // \------------------->o<----------------/ | +- // | | +- // v | +- // Full GC --------------------------/ +- // +- +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- +- if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_outside_cycle)) return; +- +- ShenandoahGCSession session(cause); +- +- TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters()); +- +- // Reset for upcoming marking +- heap->entry_reset(); +- +- // Start initial mark under STW +- heap->vmop_entry_init_mark(); +- +- // Continue concurrent mark +- heap->entry_mark(); +- if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_mark)) return; +- +- // If not cancelled, can try to concurrently pre-clean +- heap->entry_preclean(); +- +- // Complete marking under STW, and start evacuation +- heap->vmop_entry_final_mark(); +- +- // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim +- // the space. This would be the last action if there is nothing to evacuate. +- heap->entry_cleanup_early(); +- +- { +- ShenandoahHeapLocker locker(heap->lock()); +- heap->free_set()->log_status(); +- } +- +- // Continue the cycle with evacuation and optional update-refs. +- // This may be skipped if there is nothing to evacuate. +- // If so, evac_in_progress would be unset by collection set preparation code. +- if (heap->is_evacuation_in_progress()) { +- // Concurrently evacuate +- heap->entry_evac(); +- if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_evac)) return; +- +- // Perform update-refs phase. +- heap->vmop_entry_init_updaterefs(); +- heap->entry_updaterefs(); +- if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_updaterefs)) return; +- +- heap->vmop_entry_final_updaterefs(); +- +- // Update references freed up collection set, kick the cleanup to reclaim the space. +- heap->entry_cleanup_complete(); +- } +- +- // Cycle is complete +- heap->heuristics()->record_success_concurrent(); +- heap->shenandoah_policy()->record_success_concurrent(); +-} +- +-bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahHeap::ShenandoahDegenPoint point) { +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- if (heap->cancelled_gc()) { +- assert (is_alloc_failure_gc() || in_graceful_shutdown(), "Cancel GC either for alloc failure GC, or gracefully exiting"); +- if (!in_graceful_shutdown()) { +- assert (_degen_point == ShenandoahHeap::_degenerated_outside_cycle, +- err_msg("Should not be set yet: %s", ShenandoahHeap::degen_point_to_string(_degen_point))); +- _degen_point = point; +- } +- return true; +- } +- return false; +-} +- +-void ShenandoahControlThread::stop() { +- { +- MutexLockerEx ml(Terminator_lock); +- _should_terminate = true; +- } +- +- { +- MutexLockerEx ml(CGC_lock, Mutex::_no_safepoint_check_flag); +- CGC_lock->notify_all(); +- } +- +- { +- MutexLockerEx ml(Terminator_lock); +- while (!_has_terminated) { +- Terminator_lock->wait(); +- } +- } +-} +- +-void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) { +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- ShenandoahGCSession session(cause); +- +- heap->vmop_entry_full(cause); +- +- heap->heuristics()->record_success_full(); +- heap->shenandoah_policy()->record_success_full(); +-} +- +-void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahHeap::ShenandoahDegenPoint point) { +- assert (point != ShenandoahHeap::_degenerated_unset, "Degenerated point should be set"); +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- ShenandoahGCSession session(cause); +- +- heap->vmop_degenerated(point); +- +- heap->heuristics()->record_success_degenerated(); +- heap->shenandoah_policy()->record_success_degenerated(); +-} +- +-void ShenandoahControlThread::service_uncommit(double shrink_before, size_t shrink_until) { +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- +- // Determine if there is work to do. This avoids taking heap lock if there is +- // no work available, avoids spamming logs with superfluous logging messages, +- // and minimises the amount of work while locks are taken. +- +- if (heap->committed() <= shrink_until) return; +- +- bool has_work = false; +- for (size_t i = 0; i < heap->num_regions(); i++) { +- ShenandoahHeapRegion *r = heap->get_region(i); +- if (r->is_empty_committed() && (r->empty_time() < shrink_before)) { +- has_work = true; +- break; +- } +- } +- +- if (has_work) { +- heap->entry_uncommit(shrink_before, shrink_until); +- } +-} +- +-bool ShenandoahControlThread::is_explicit_gc(GCCause::Cause cause) const { +- return GCCause::is_user_requested_gc(cause) || +- GCCause::is_serviceability_requested_gc(cause); +-} +- +-void ShenandoahControlThread::request_gc(GCCause::Cause cause) { +- assert(GCCause::is_user_requested_gc(cause) || +- GCCause::is_serviceability_requested_gc(cause) || +- cause == GCCause::_shenandoah_metadata_gc_clear_softrefs || +- cause == GCCause::_full_gc_alot || +- cause == GCCause::_scavenge_alot, +- "only requested GCs here"); +- +- if (is_explicit_gc(cause)) { +- if (!DisableExplicitGC) { +- handle_requested_gc(cause); +- } +- } else { +- handle_requested_gc(cause); +- } +-} +- +-void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) { +- // Make sure we have at least one complete GC cycle before unblocking +- // from the explicit GC request. +- // +- // This is especially important for weak references cleanup and/or native +- // resources (e.g. DirectByteBuffers) machinery: when explicit GC request +- // comes very late in the already running cycle, it would miss lots of new +- // opportunities for cleanup that were made available before the caller +- // requested the GC. +- +- MonitorLockerEx ml(&_gc_waiters_lock); +- size_t current_gc_id = get_gc_id(); +- size_t required_gc_id = current_gc_id + 1; +- while (current_gc_id < required_gc_id) { +- _gc_requested.set(); +- _requested_gc_cause = cause; +- ml.wait(); +- current_gc_id = get_gc_id(); +- } +-} +- +-void ShenandoahControlThread::handle_alloc_failure(ShenandoahAllocRequest& req) { +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- +- assert(current()->is_Java_thread(), "expect Java thread here"); +- +- if (try_set_alloc_failure_gc()) { +- // Only report the first allocation failure +- log_info(gc)("Failed to allocate %s, " SIZE_FORMAT "%s", +- req.type_string(), +- byte_size_in_proper_unit(req.size() * HeapWordSize), proper_unit_for_byte_size(req.size() * HeapWordSize)); +- +- // Now that alloc failure GC is scheduled, we can abort everything else +- heap->cancel_gc(GCCause::_allocation_failure); +- } +- +- MonitorLockerEx ml(&_alloc_failure_waiters_lock); +- while (is_alloc_failure_gc()) { +- ml.wait(); +- } +-} +- +-void ShenandoahControlThread::handle_alloc_failure_evac(size_t words) { +- Thread* t = Thread::current(); +- +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- +- if (try_set_alloc_failure_gc()) { +- // Only report the first allocation failure +- log_info(gc)("Failed to allocate " SIZE_FORMAT "%s for evacuation", +- byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize)); +- } +- +- heap->cancel_gc(GCCause::_shenandoah_allocation_failure_evac); +-} +- +-void ShenandoahControlThread::notify_alloc_failure_waiters() { +- _alloc_failure_gc.unset(); +- MonitorLockerEx ml(&_alloc_failure_waiters_lock); +- ml.notify_all(); +-} +- +-bool ShenandoahControlThread::try_set_alloc_failure_gc() { +- return _alloc_failure_gc.try_set(); +-} +- +-bool ShenandoahControlThread::is_alloc_failure_gc() { +- return _alloc_failure_gc.is_set(); +-} +- +-void ShenandoahControlThread::notify_gc_waiters() { +- _gc_requested.unset(); +- MonitorLockerEx ml(&_gc_waiters_lock); +- ml.notify_all(); +-} +- +-void ShenandoahControlThread::handle_counters_update() { +- if (_do_counters_update.is_set()) { +- _do_counters_update.unset(); +- ShenandoahHeap::heap()->monitoring_support()->update_counters(); +- } +-} +- +-void ShenandoahControlThread::handle_force_counters_update() { +- if (_force_counters_update.is_set()) { +- _do_counters_update.unset(); // reset these too, we do update now! +- ShenandoahHeap::heap()->monitoring_support()->update_counters(); +- } +-} +- +-void ShenandoahControlThread::notify_heap_changed() { +- // This is called from allocation path, and thus should be fast. +- +- // Update monitoring counters when we took a new region. This amortizes the +- // update costs on slow path. +- if (_do_counters_update.is_unset()) { +- _do_counters_update.set(); +- } +- // Notify that something had changed. +- if (_heap_changed.is_unset()) { +- _heap_changed.set(); +- } +-} +- +-void ShenandoahControlThread::pacing_notify_alloc(size_t words) { +- assert(ShenandoahPacing, "should only call when pacing is enabled"); +- Atomic::add(words, &_allocs_seen); +-} +- +-void ShenandoahControlThread::set_forced_counters_update(bool value) { +- _force_counters_update.set_cond(value); +-} +- +-void ShenandoahControlThread::reset_gc_id() { +- OrderAccess::release_store_ptr_fence(&_gc_id, 0); +-} +- +-void ShenandoahControlThread::update_gc_id() { +- Atomic::add(1, &_gc_id); +-} +- +-size_t ShenandoahControlThread::get_gc_id() { +- return OrderAccess::load_acquire(&_gc_id); +-} +- +-void ShenandoahControlThread::print() const { +- print_on(tty); +-} +- +-void ShenandoahControlThread::print_on(outputStream* st) const { +- st->print("Shenandoah Concurrent Thread"); +- Thread::print_on(st); +- st->cr(); +-} +- +-void ShenandoahControlThread::start() { +- create_and_start(); +-} +- +-void ShenandoahControlThread::makeSurrogateLockerThread(TRAPS) { +- assert(UseShenandoahGC, "SLT thread needed only for concurrent GC"); +- assert(THREAD->is_Java_thread(), "must be a Java thread"); +- assert(_slt == NULL, "SLT already created"); +- _slt = SurrogateLockerThread::make(THREAD); +-} +- +-void ShenandoahControlThread::prepare_for_graceful_shutdown() { +- _graceful_shutdown.set(); +-} +- +-bool ShenandoahControlThread::in_graceful_shutdown() { +- return _graceful_shutdown.is_set(); +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahControlThread.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahControlThread.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahControlThread.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahControlThread.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,168 +0,0 @@ +-/* +- * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTTHREAD_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTTHREAD_HPP +- +-#include "gc_interface/gcCause.hpp" +-#include "gc_implementation/shared/concurrentGCThread.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.hpp" +-#include "gc_implementation/shenandoah/shenandoahPadding.hpp" +-#include "gc_implementation/shenandoah/shenandoahSharedVariables.hpp" +-#include "runtime/task.hpp" +-#include "utilities/ostream.hpp" +- +-class ShenandoahControlThread; +- +-// Periodic task is useful for doing asynchronous things that do not require (heap) locks, +-// or synchronization with other parts of collector. These could run even when ShenandoahConcurrentThread +-// is busy driving the GC cycle. +-class ShenandoahPeriodicTask : public PeriodicTask { +-private: +- ShenandoahControlThread* _thread; +-public: +- ShenandoahPeriodicTask(ShenandoahControlThread* thread) : +- PeriodicTask(100), _thread(thread) {} +- virtual void task(); +-}; +- +-// Periodic task to flush SATB buffers periodically. +-class ShenandoahPeriodicSATBFlushTask : public PeriodicTask { +-public: +- ShenandoahPeriodicSATBFlushTask() : PeriodicTask(ShenandoahSATBBufferFlushInterval) {} +- virtual void task(); +-}; +- +-// Periodic task to notify blocked paced waiters. +-class ShenandoahPeriodicPacerNotify : public PeriodicTask { +-public: +- ShenandoahPeriodicPacerNotify() : PeriodicTask(PeriodicTask::min_interval) {} +- virtual void task(); +-}; +- +-class ShenandoahControlThread: public ConcurrentGCThread { +- friend class VMStructs; +- +-private: +- typedef enum { +- none, +- concurrent_normal, +- stw_degenerated, +- stw_full +- } GCMode; +- +- // While we could have a single lock for these, it may risk unblocking +- // GC waiters when alloc failure GC cycle finishes. We want instead +- // to make complete explicit cycle for for demanding customers. +- Monitor _alloc_failure_waiters_lock; +- Monitor _gc_waiters_lock; +- ShenandoahPeriodicTask _periodic_task; +- ShenandoahPeriodicSATBFlushTask _periodic_satb_flush_task; +- ShenandoahPeriodicPacerNotify _periodic_pacer_notify_task; +- +- private: +- static SurrogateLockerThread* _slt; +- +-public: +- void run(); +- void stop(); +- +-private: +- ShenandoahSharedFlag _gc_requested; +- ShenandoahSharedFlag _alloc_failure_gc; +- ShenandoahSharedFlag _graceful_shutdown; +- ShenandoahSharedFlag _heap_changed; +- ShenandoahSharedFlag _do_counters_update; +- ShenandoahSharedFlag _force_counters_update; +- GCCause::Cause _requested_gc_cause; +- ShenandoahHeap::ShenandoahDegenPoint _degen_point; +- +- shenandoah_padding(0); +- volatile intptr_t _allocs_seen; +- shenandoah_padding(1); +- volatile intptr_t _gc_id; +- shenandoah_padding(2); +- +- bool check_cancellation_or_degen(ShenandoahHeap::ShenandoahDegenPoint point); +- void service_concurrent_normal_cycle(GCCause::Cause cause); +- void service_stw_full_cycle(GCCause::Cause cause); +- void service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahHeap::ShenandoahDegenPoint point); +- void service_uncommit(double shrink_before, size_t shrink_until); +- +- bool try_set_alloc_failure_gc(); +- void notify_alloc_failure_waiters(); +- bool is_alloc_failure_gc(); +- +- void reset_gc_id(); +- void update_gc_id(); +- size_t get_gc_id(); +- +- void notify_gc_waiters(); +- +- // Handle GC request. +- // Blocks until GC is over. +- void handle_requested_gc(GCCause::Cause cause); +- +- bool is_explicit_gc(GCCause::Cause cause) const; +- +- bool check_soft_max_changed() const; +- +-public: +- // Constructor +- ShenandoahControlThread(); +- ~ShenandoahControlThread(); +- +- static void makeSurrogateLockerThread(TRAPS); +- static SurrogateLockerThread* slt() { return _slt; } +- +- // Handle allocation failure from normal allocation. +- // Blocks until memory is available. +- void handle_alloc_failure(ShenandoahAllocRequest& req); +- +- // Handle allocation failure from evacuation path. +- // Optionally blocks while collector is handling the failure. +- void handle_alloc_failure_evac(size_t words); +- +- void request_gc(GCCause::Cause cause); +- +- void handle_counters_update(); +- void handle_force_counters_update(); +- void set_forced_counters_update(bool value); +- +- void notify_heap_changed(); +- +- void pacing_notify_alloc(size_t words); +- +- void start(); +- void prepare_for_graceful_shutdown(); +- bool in_graceful_shutdown(); +- +- char* name() const { return (char*)"ShenandoahConcurrentThread";} +- +- // Printing +- void print_on(outputStream* st) const; +- void print() const; +- +-}; +- +-#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTTHREAD_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahEvacOOMHandler.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahEvacOOMHandler.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahEvacOOMHandler.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahEvacOOMHandler.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,122 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +- +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahUtils.hpp" +-#include "gc_implementation/shenandoah/shenandoahEvacOOMHandler.hpp" +- +-const jint ShenandoahEvacOOMHandler::OOM_MARKER_MASK = 0x80000000; +- +-ShenandoahEvacOOMHandler::ShenandoahEvacOOMHandler() : +- _threads_in_evac(0) { +-} +- +-void ShenandoahEvacOOMHandler::wait_for_no_evac_threads() { +- while ((OrderAccess::load_acquire(&_threads_in_evac) & ~OOM_MARKER_MASK) != 0) { +- os::naked_short_sleep(1); +- } +- // At this point we are sure that no threads can evacuate anything. Raise +- // the thread-local oom_during_evac flag to indicate that any attempt +- // to evacuate should simply return the forwarding pointer instead (which is safe now). +- Thread::current()->set_oom_during_evac(true); +-} +- +-void ShenandoahEvacOOMHandler::enter_evacuation() { +- jint threads_in_evac = OrderAccess::load_acquire(&_threads_in_evac); +- +- assert(!Thread::current()->is_evac_allowed(), "sanity"); +- assert(!Thread::current()->is_oom_during_evac(), "TL oom-during-evac must not be set"); +- +- if ((threads_in_evac & OOM_MARKER_MASK) != 0) { +- wait_for_no_evac_threads(); +- return; +- } +- +- while (true) { +- jint other = Atomic::cmpxchg(threads_in_evac + 1, &_threads_in_evac, threads_in_evac); +- if (other == threads_in_evac) { +- // Success: caller may safely enter evacuation +- DEBUG_ONLY(Thread::current()->set_evac_allowed(true)); +- return; +- } else { +- // Failure: +- // - if offender has OOM_MARKER_MASK, then loop until no more threads in evac +- // - otherwise re-try CAS +- if ((other & OOM_MARKER_MASK) != 0) { +- wait_for_no_evac_threads(); +- return; +- } +- threads_in_evac = other; +- } +- } +-} +- +-void ShenandoahEvacOOMHandler::leave_evacuation() { +- if (!Thread::current()->is_oom_during_evac()) { +- assert((OrderAccess::load_acquire(&_threads_in_evac) & ~OOM_MARKER_MASK) > 0, "sanity"); +- // NOTE: It's ok to simply decrement, even with mask set, because unmasked value is positive. +- Atomic::dec(&_threads_in_evac); +- } else { +- // If we get here, the current thread has already gone through the +- // OOM-during-evac protocol and has thus either never entered or successfully left +- // the evacuation region. Simply flip its TL oom-during-evac flag back off. +- Thread::current()->set_oom_during_evac(false); +- } +- DEBUG_ONLY(Thread::current()->set_evac_allowed(false)); +- assert(!Thread::current()->is_oom_during_evac(), "TL oom-during-evac must be turned off"); +-} +- +-void ShenandoahEvacOOMHandler::handle_out_of_memory_during_evacuation() { +- assert(Thread::current()->is_evac_allowed(), "sanity"); +- assert(!Thread::current()->is_oom_during_evac(), "TL oom-during-evac must not be set"); +- +- jint threads_in_evac = OrderAccess::load_acquire(&_threads_in_evac); +- while (true) { +- jint other = Atomic::cmpxchg((threads_in_evac - 1) | OOM_MARKER_MASK, +- &_threads_in_evac, threads_in_evac); +- if (other == threads_in_evac) { +- // Success: wait for other threads to get out of the protocol and return. +- wait_for_no_evac_threads(); +- return; +- } else { +- // Failure: try again with updated new value. +- threads_in_evac = other; +- } +- } +-} +- +-void ShenandoahEvacOOMHandler::clear() { +- assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint"); +- assert((OrderAccess::load_acquire(&_threads_in_evac) & ~OOM_MARKER_MASK) == 0, "sanity"); +- OrderAccess::release_store_fence(&_threads_in_evac, 0); +-} +- +-ShenandoahEvacOOMScope::ShenandoahEvacOOMScope() { +- ShenandoahHeap::heap()->enter_evacuation(); +-} +- +-ShenandoahEvacOOMScope::~ShenandoahEvacOOMScope() { +- ShenandoahHeap::heap()->leave_evacuation(); +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahEvacOOMHandler.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahEvacOOMHandler.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahEvacOOMHandler.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahEvacOOMHandler.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,121 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHEVACOOMHANDLER_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHEVACOOMHANDLER_HPP +- +-#include "gc_implementation/shenandoah/shenandoahPadding.hpp" +- +-/** +- * Provides safe handling of out-of-memory situations during evacuation. +- * +- * When a Java thread encounters out-of-memory while evacuating an object in a +- * write-barrier (i.e. it cannot copy the object to to-space), it does not necessarily +- * follow we can return immediately from the WB (and store to from-space). +- * +- * In very basic case, on such failure we may wait until the the evacuation is over, +- * and then resolve the forwarded copy, and to the store there. This is possible +- * because other threads might still have space in their GCLABs, and successfully +- * evacuate the object. +- * +- * But, there is a race due to non-atomic evac_in_progress transition. Consider +- * thread A is stuck waiting for the evacuation to be over -- it cannot leave with +- * from-space copy yet. Control thread drops evacuation_in_progress preparing for +- * next STW phase that has to recover from OOME. Thread B misses that update, and +- * successfully evacuates the object, does the write to to-copy. But, before +- * Thread B is able to install the fwdptr, thread A discovers evac_in_progress is +- * down, exits from here, reads the fwdptr, discovers old from-copy, and stores there. +- * Thread B then wakes up and installs to-copy. This breaks to-space invariant, and +- * silently corrupts the heap: we accepted two writes to separate copies of the object. +- * +- * The way it is solved here is to maintain a counter of threads inside the +- * 'evacuation path'. The 'evacuation path' is the part of evacuation that does the actual +- * allocation, copying and CASing of the copy object, and is protected by this +- * OOM-during-evac-handler. The handler allows multiple threads to enter and exit +- * evacuation path, but on OOME it requires all threads that experienced OOME to wait +- * for current threads to leave, and blocks other threads from entering. +- * +- * Detailed state change: +- * +- * Upon entry of the evac-path, entering thread will attempt to increase the counter, +- * using a CAS. Depending on the result of the CAS: +- * - success: carry on with evac +- * - failure: +- * - if offending value is a valid counter, then try again +- * - if offending value is OOM-during-evac special value: loop until +- * counter drops to 0, then exit with read-barrier +- * +- * Upon exit, exiting thread will decrease the counter using atomic dec. +- * +- * Upon OOM-during-evac, any thread will attempt to CAS OOM-during-evac +- * special value into the counter. Depending on result: +- * - success: busy-loop until counter drops to zero, then exit with RB +- * - failure: +- * - offender is valid counter update: try again +- * - offender is OOM-during-evac: busy loop until counter drops to +- * zero, then exit with RB +- */ +-class ShenandoahEvacOOMHandler { +-private: +- static const jint OOM_MARKER_MASK; +- +- shenandoah_padding(0); +- volatile jint _threads_in_evac; +- shenandoah_padding(1); +- +- void wait_for_no_evac_threads(); +- +-public: +- ShenandoahEvacOOMHandler(); +- +- /** +- * Attempt to enter the protected evacuation path. +- * +- * When this returns true, it is safe to continue with normal evacuation. +- * When this method returns false, evacuation must not be entered, and caller +- * may safely continue with a read-barrier (if Java thread). +- */ +- void enter_evacuation(); +- +- /** +- * Leave evacuation path. +- */ +- void leave_evacuation(); +- +- /** +- * Signal out-of-memory during evacuation. It will prevent any other threads +- * from entering the evacuation path, then wait until all threads have left the +- * evacuation path, and then return. It is then safe to continue with a read-barrier. +- */ +- void handle_out_of_memory_during_evacuation(); +- +- void clear(); +-}; +- +-class ShenandoahEvacOOMScope : public StackObj { +-public: +- ShenandoahEvacOOMScope(); +- ~ShenandoahEvacOOMScope(); +-}; +- +-#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHEVACOOMHANDLER_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahForwarding.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahForwarding.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahForwarding.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahForwarding.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,66 +0,0 @@ +-/* +- * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHFORWARDING_HPP +-#define SHARE_GC_SHENANDOAH_SHENANDOAHFORWARDING_HPP +- +-#include "oops/oop.hpp" +-#include "utilities/globalDefinitions.hpp" +- +-class ShenandoahForwarding { +-public: +- /* Gets forwardee from the given object. +- */ +- static inline oop get_forwardee(oop obj); +- +- /* Gets forwardee from the given object. Only from mutator thread. +- */ +- static inline oop get_forwardee_mutator(oop obj); +- +- /* Returns the raw value from forwardee slot. +- */ +- static inline HeapWord* get_forwardee_raw(oop obj); +- +- /* Returns the raw value from forwardee slot without any checks. +- * Used for quick verification. +- */ +- static inline HeapWord* get_forwardee_raw_unchecked(oop obj); +- +- /** +- * Returns true if the object is forwarded, false otherwise. +- */ +- static inline bool is_forwarded(oop obj); +- +- /* Tries to atomically update forwardee in $holder object to $update. +- * Assumes $holder points at itself. +- * Asserts $holder is in from-space. +- * Asserts $update is in to-space. +- * +- * Returns the new object 'update' upon success, or +- * the new forwardee that a competing thread installed. +- */ +- static inline oop try_update_forwardee(oop obj, oop update); +- +-}; +- +-#endif // SHARE_GC_SHENANDOAH_SHENANDOAHFORWARDING_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahForwarding.inline.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahForwarding.inline.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahForwarding.inline.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahForwarding.inline.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,94 +0,0 @@ +-/* +- * Copyright (c) 2015, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHFORWARDING_INLINE_HPP +-#define SHARE_GC_SHENANDOAH_SHENANDOAHFORWARDING_INLINE_HPP +- +-#include "gc_implementation/shenandoah/shenandoahAsserts.hpp" +-#include "gc_implementation/shenandoah/shenandoahForwarding.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegion.hpp" +-#include "gc_implementation/shenandoah/shenandoahLogging.hpp" +-#include "runtime/atomic.hpp" +-#include "runtime/thread.hpp" +- +-inline HeapWord* ShenandoahForwarding::get_forwardee_raw(oop obj) { +- shenandoah_assert_in_heap(NULL, obj); +- return get_forwardee_raw_unchecked(obj); +-} +- +-inline HeapWord* ShenandoahForwarding::get_forwardee_raw_unchecked(oop obj) { +- // JVMTI use mark words for marking objects for their needs. +- // On this path, we can encounter the "marked" object, but with NULL +- // fwdptr. That object is still not forwarded, and we need to return +- // the object itself. +- markOop mark = obj->mark(); +- if (mark->is_marked()) { +- HeapWord* fwdptr = (HeapWord*) mark->clear_lock_bits(); +- if (fwdptr != NULL) { +- return fwdptr; +- } +- } +- return (HeapWord*) obj; +-} +- +-inline oop ShenandoahForwarding::get_forwardee_mutator(oop obj) { +- // Same as above, but mutator thread cannot ever see NULL forwardee. +- shenandoah_assert_correct(NULL, obj); +- assert(Thread::current()->is_Java_thread(), "Must be a mutator thread"); +- +- markOop mark = obj->mark(); +- if (mark->is_marked()) { +- HeapWord* fwdptr = (HeapWord*) mark->clear_lock_bits(); +- assert(fwdptr != NULL, "Forwarding pointer is never null here"); +- return oop(fwdptr); +- } else { +- return obj; +- } +-} +- +-inline oop ShenandoahForwarding::get_forwardee(oop obj) { +- shenandoah_assert_correct(NULL, obj); +- return oop(get_forwardee_raw_unchecked(obj)); +-} +- +-inline bool ShenandoahForwarding::is_forwarded(oop obj) { +- return obj->mark()->is_marked(); +-} +- +-inline oop ShenandoahForwarding::try_update_forwardee(oop obj, oop update) { +- markOop old_mark = obj->mark(); +- if (old_mark->is_marked()) { +- return (oop) old_mark->clear_lock_bits(); +- } +- +- markOop new_mark = markOopDesc::encode_pointer_as_mark(update); +- markOop prev_mark = obj->cas_set_mark(new_mark, old_mark); +- if (prev_mark == old_mark) { +- return update; +- } else { +- return (oop) prev_mark->clear_lock_bits(); +- } +-} +- +-#endif // SHARE_GC_SHENANDOAH_SHENANDOAHFORWARDING_INLINE_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahFreeSet.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahFreeSet.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahFreeSet.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahFreeSet.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,699 +0,0 @@ +-/* +- * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +- +-#include "gc_implementation/shenandoah/shenandoahFreeSet.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +- +-ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap,size_t max_regions) : +- _heap(heap), +- _mutator_free_bitmap(max_regions, /* in_resource_area = */ false), +- _collector_free_bitmap(max_regions, /* in_resource_area = */ false), +- _max(max_regions) +-{ +- clear_internal(); +-} +- +-void ShenandoahFreeSet::increase_used(size_t num_bytes) { +- shenandoah_assert_heaplocked(); +- _used += num_bytes; +- +- assert(_used <= _capacity, err_msg("must not use more than we have: used: " SIZE_FORMAT +- ", capacity: " SIZE_FORMAT ", num_bytes: " SIZE_FORMAT, +- _used, _capacity, num_bytes)); +-} +- +-bool ShenandoahFreeSet::is_mutator_free(size_t idx) const { +- assert (idx < _max, +- err_msg("index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")", +- idx, _max, _mutator_leftmost, _mutator_rightmost)); +- return _mutator_free_bitmap.at(idx); +-} +- +-bool ShenandoahFreeSet::is_collector_free(size_t idx) const { +- assert (idx < _max, +- err_msg("index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")", +- idx, _max, _collector_leftmost, _collector_rightmost)); +- return _collector_free_bitmap.at(idx); +-} +- +-HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& in_new_region) { +- // Scan the bitmap looking for a first fit. +- // +- // Leftmost and rightmost bounds provide enough caching to walk bitmap efficiently. Normally, +- // we would find the region to allocate at right away. +- // +- // Allocations are biased: new application allocs go to beginning of the heap, and GC allocs +- // go to the end. This makes application allocation faster, because we would clear lots +- // of regions from the beginning most of the time. +- // +- // Free set maintains mutator and collector views, and normally they allocate in their views only, +- // unless we special cases for stealing and mixed allocations. +- +- switch (req.type()) { +- case ShenandoahAllocRequest::_alloc_tlab: +- case ShenandoahAllocRequest::_alloc_shared: { +- +- // Try to allocate in the mutator view +- for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) { +- if (is_mutator_free(idx)) { +- HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region); +- if (result != NULL) { +- return result; +- } +- } +- } +- +- // There is no recovery. Mutator does not touch collector view at all. +- break; +- } +- case ShenandoahAllocRequest::_alloc_gclab: +- case ShenandoahAllocRequest::_alloc_shared_gc: { +- // size_t is unsigned, need to dodge underflow when _leftmost = 0 +- +- // Fast-path: try to allocate in the collector view first +- for (size_t c = _collector_rightmost + 1; c > _collector_leftmost; c--) { +- size_t idx = c - 1; +- if (is_collector_free(idx)) { +- HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region); +- if (result != NULL) { +- return result; +- } +- } +- } +- +- // No dice. Can we borrow space from mutator view? +- if (!ShenandoahEvacReserveOverflow) { +- return NULL; +- } +- +- // Try to steal the empty region from the mutator view +- for (size_t c = _mutator_rightmost + 1; c > _mutator_leftmost; c--) { +- size_t idx = c - 1; +- if (is_mutator_free(idx)) { +- ShenandoahHeapRegion* r = _heap->get_region(idx); +- if (is_empty_or_trash(r)) { +- flip_to_gc(r); +- HeapWord *result = try_allocate_in(r, req, in_new_region); +- if (result != NULL) { +- return result; +- } +- } +- } +- } +- +- // No dice. Do not try to mix mutator and GC allocations, because +- // URWM moves due to GC allocations would expose unparsable mutator +- // allocations. +- +- break; +- } +- default: +- ShouldNotReachHere(); +- } +- +- return NULL; +-} +- +-HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) { +- assert (!has_no_alloc_capacity(r), err_msg("Performance: should avoid full regions on this path: " SIZE_FORMAT, r->index())); +- +- try_recycle_trashed(r); +- +- in_new_region = r->is_empty(); +- +- HeapWord* result = NULL; +- size_t size = req.size(); +- +- if (ShenandoahElasticTLAB && req.is_lab_alloc()) { +- size_t free = align_size_down(r->free() >> LogHeapWordSize, MinObjAlignment); +- if (size > free) { +- size = free; +- } +- if (size >= req.min_size()) { +- result = r->allocate(size, req.type()); +- assert (result != NULL, err_msg("Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, size)); +- } +- } else { +- result = r->allocate(size, req.type()); +- } +- +- if (result != NULL) { +- // Allocation successful, bump stats: +- if (req.is_mutator_alloc()) { +- increase_used(size * HeapWordSize); +- } +- +- // Record actual allocation size +- req.set_actual_size(size); +- +- if (req.is_gc_alloc()) { +- r->set_update_watermark(r->top()); +- } +- } +- +- if (result == NULL || has_no_alloc_capacity(r)) { +- // Region cannot afford this or future allocations. Retire it. +- // +- // While this seems a bit harsh, especially in the case when this large allocation does not +- // fit, but the next small one would, we are risking to inflate scan times when lots of +- // almost-full regions precede the fully-empty region where we want allocate the entire TLAB. +- // TODO: Record first fully-empty region, and use that for large allocations +- +- // Record the remainder as allocation waste +- if (req.is_mutator_alloc()) { +- size_t waste = r->free(); +- if (waste > 0) { +- increase_used(waste); +- _heap->notify_mutator_alloc_words(waste >> LogHeapWordSize, true); +- } +- } +- +- size_t num = r->index(); +- _collector_free_bitmap.clear_bit(num); +- _mutator_free_bitmap.clear_bit(num); +- // Touched the bounds? Need to update: +- if (touches_bounds(num)) { +- adjust_bounds(); +- } +- assert_bounds(); +- } +- return result; +-} +- +-bool ShenandoahFreeSet::touches_bounds(size_t num) const { +- return num == _collector_leftmost || num == _collector_rightmost || num == _mutator_leftmost || num == _mutator_rightmost; +-} +- +-void ShenandoahFreeSet::recompute_bounds() { +- // Reset to the most pessimistic case: +- _mutator_rightmost = _max - 1; +- _mutator_leftmost = 0; +- _collector_rightmost = _max - 1; +- _collector_leftmost = 0; +- +- // ...and adjust from there +- adjust_bounds(); +-} +- +-void ShenandoahFreeSet::adjust_bounds() { +- // Rewind both mutator bounds until the next bit. +- while (_mutator_leftmost < _max && !is_mutator_free(_mutator_leftmost)) { +- _mutator_leftmost++; +- } +- while (_mutator_rightmost > 0 && !is_mutator_free(_mutator_rightmost)) { +- _mutator_rightmost--; +- } +- // Rewind both collector bounds until the next bit. +- while (_collector_leftmost < _max && !is_collector_free(_collector_leftmost)) { +- _collector_leftmost++; +- } +- while (_collector_rightmost > 0 && !is_collector_free(_collector_rightmost)) { +- _collector_rightmost--; +- } +-} +- +-HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) { +- shenandoah_assert_heaplocked(); +- +- size_t words_size = req.size(); +- size_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize); +- +- // No regions left to satisfy allocation, bye. +- if (num > mutator_count()) { +- return NULL; +- } +- +- // Find the continuous interval of $num regions, starting from $beg and ending in $end, +- // inclusive. Contiguous allocations are biased to the beginning. +- +- size_t beg = _mutator_leftmost; +- size_t end = beg; +- +- while (true) { +- if (end >= _max) { +- // Hit the end, goodbye +- return NULL; +- } +- +- // If regions are not adjacent, then current [beg; end] is useless, and we may fast-forward. +- // If region is not completely free, the current [beg; end] is useless, and we may fast-forward. +- if (!is_mutator_free(end) || !is_empty_or_trash(_heap->get_region(end))) { +- end++; +- beg = end; +- continue; +- } +- +- if ((end - beg + 1) == num) { +- // found the match +- break; +- } +- +- end++; +- }; +- +- size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask(); +- +- // Initialize regions: +- for (size_t i = beg; i <= end; i++) { +- ShenandoahHeapRegion* r = _heap->get_region(i); +- try_recycle_trashed(r); +- +- assert(i == beg || _heap->get_region(i - 1)->index() + 1 == r->index(), "Should be contiguous"); +- assert(r->is_empty(), "Should be empty"); +- +- if (i == beg) { +- r->make_humongous_start(); +- } else { +- r->make_humongous_cont(); +- } +- +- // Trailing region may be non-full, record the remainder there +- size_t used_words; +- if ((i == end) && (remainder != 0)) { +- used_words = remainder; +- } else { +- used_words = ShenandoahHeapRegion::region_size_words(); +- } +- +- r->set_top(r->bottom() + used_words); +- +- _mutator_free_bitmap.clear_bit(r->index()); +- } +- +- // While individual regions report their true use, all humongous regions are +- // marked used in the free set. +- increase_used(ShenandoahHeapRegion::region_size_bytes() * num); +- +- if (remainder != 0) { +- // Record this remainder as allocation waste +- _heap->notify_mutator_alloc_words(ShenandoahHeapRegion::region_size_words() - remainder, true); +- } +- +- // Allocated at left/rightmost? Move the bounds appropriately. +- if (beg == _mutator_leftmost || end == _mutator_rightmost) { +- adjust_bounds(); +- } +- assert_bounds(); +- +- req.set_actual_size(words_size); +- return _heap->get_region(beg)->bottom(); +-} +- +-bool ShenandoahFreeSet::is_empty_or_trash(ShenandoahHeapRegion *r) { +- return r->is_empty() || r->is_trash(); +-} +- +-size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) { +- if (r->is_trash()) { +- // This would be recycled on allocation path +- return ShenandoahHeapRegion::region_size_bytes(); +- } else { +- return r->free(); +- } +-} +- +-bool ShenandoahFreeSet::has_no_alloc_capacity(ShenandoahHeapRegion *r) { +- return alloc_capacity(r) == 0; +-} +- +-void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion *r) { +- if (r->is_trash()) { +- _heap->decrease_used(r->used()); +- r->recycle(); +- } +-} +- +-void ShenandoahFreeSet::recycle_trash() { +- // lock is not reentrable, check we don't have it +- shenandoah_assert_not_heaplocked(); +- +- for (size_t i = 0; i < _heap->num_regions(); i++) { +- ShenandoahHeapRegion* r = _heap->get_region(i); +- if (r->is_trash()) { +- ShenandoahHeapLocker locker(_heap->lock()); +- try_recycle_trashed(r); +- } +- SpinPause(); // allow allocators to take the lock +- } +-} +- +-void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) { +- size_t idx = r->index(); +- +- assert(_mutator_free_bitmap.at(idx), "Should be in mutator view"); +- assert(is_empty_or_trash(r), "Should not be allocated"); +- +- _mutator_free_bitmap.clear_bit(idx); +- _collector_free_bitmap.set_bit(idx); +- _collector_leftmost = MIN2(idx, _collector_leftmost); +- _collector_rightmost = MAX2(idx, _collector_rightmost); +- +- _capacity -= alloc_capacity(r); +- +- if (touches_bounds(idx)) { +- adjust_bounds(); +- } +- assert_bounds(); +-} +- +-void ShenandoahFreeSet::clear() { +- shenandoah_assert_heaplocked(); +- clear_internal(); +-} +- +-void ShenandoahFreeSet::clear_internal() { +- _mutator_free_bitmap.clear(); +- _collector_free_bitmap.clear(); +- _mutator_leftmost = _max; +- _mutator_rightmost = 0; +- _collector_leftmost = _max; +- _collector_rightmost = 0; +- _capacity = 0; +- _used = 0; +-} +- +-void ShenandoahFreeSet::rebuild() { +- shenandoah_assert_heaplocked(); +- clear(); +- +- for (size_t idx = 0; idx < _heap->num_regions(); idx++) { +- ShenandoahHeapRegion* region = _heap->get_region(idx); +- if (region->is_alloc_allowed() || region->is_trash()) { +- assert(!region->is_cset(), "Shouldn't be adding those to the free set"); +- +- // Do not add regions that would surely fail allocation +- if (has_no_alloc_capacity(region)) continue; +- +- _capacity += alloc_capacity(region); +- assert(_used <= _capacity, "must not use more than we have"); +- +- assert(!is_mutator_free(idx), "We are about to add it, it shouldn't be there already"); +- _mutator_free_bitmap.set_bit(idx); +- } +- } +- +- // Evac reserve: reserve trailing space for evacuations +- size_t to_reserve = _heap->max_capacity() / 100 * ShenandoahEvacReserve; +- size_t reserved = 0; +- +- for (size_t idx = _heap->num_regions() - 1; idx > 0; idx--) { +- if (reserved >= to_reserve) break; +- +- ShenandoahHeapRegion* region = _heap->get_region(idx); +- if (_mutator_free_bitmap.at(idx) && is_empty_or_trash(region)) { +- _mutator_free_bitmap.clear_bit(idx); +- _collector_free_bitmap.set_bit(idx); +- size_t ac = alloc_capacity(region); +- _capacity -= ac; +- reserved += ac; +- } +- } +- +- recompute_bounds(); +- assert_bounds(); +-} +- +-void ShenandoahFreeSet::log_status() { +- shenandoah_assert_heaplocked(); +- +- if (ShenandoahLogInfo || PrintGCDetails) { +- ResourceMark rm; +- outputStream* ls = gclog_or_tty; +- +- { +- size_t last_idx = 0; +- size_t max = 0; +- size_t max_contig = 0; +- size_t empty_contig = 0; +- +- size_t total_used = 0; +- size_t total_free = 0; +- size_t total_free_ext = 0; +- +- for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) { +- if (is_mutator_free(idx)) { +- ShenandoahHeapRegion *r = _heap->get_region(idx); +- size_t free = alloc_capacity(r); +- +- max = MAX2(max, free); +- +- if (r->is_empty()) { +- total_free_ext += free; +- if (last_idx + 1 == idx) { +- empty_contig++; +- } else { +- empty_contig = 1; +- } +- } else { +- empty_contig = 0; +- } +- +- total_used += r->used(); +- total_free += free; +- +- max_contig = MAX2(max_contig, empty_contig); +- last_idx = idx; +- } +- } +- +- size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes(); +- +- ls->print("Free: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s regular, " SIZE_FORMAT "%s humongous, ", +- byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free), +- byte_size_in_proper_unit(max), proper_unit_for_byte_size(max), +- byte_size_in_proper_unit(max_humongous), proper_unit_for_byte_size(max_humongous) +- ); +- +- ls->print("Frag: "); +- size_t frag_ext; +- if (total_free_ext > 0) { +- frag_ext = 100 - (100 * max_humongous / total_free_ext); +- } else { +- frag_ext = 0; +- } +- ls->print(SIZE_FORMAT "%% external, ", frag_ext); +- +- size_t frag_int; +- if (mutator_count() > 0) { +- frag_int = (100 * (total_used / mutator_count()) / ShenandoahHeapRegion::region_size_bytes()); +- } else { +- frag_int = 0; +- } +- ls->print(SIZE_FORMAT "%% internal; ", frag_int); +- } +- +- { +- size_t max = 0; +- size_t total_free = 0; +- +- for (size_t idx = _collector_leftmost; idx <= _collector_rightmost; idx++) { +- if (is_collector_free(idx)) { +- ShenandoahHeapRegion *r = _heap->get_region(idx); +- size_t free = alloc_capacity(r); +- max = MAX2(max, free); +- total_free += free; +- } +- } +- +- ls->print_cr("Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s", +- byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free), +- byte_size_in_proper_unit(max), proper_unit_for_byte_size(max)); +- } +- } +-} +- +-HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) { +- shenandoah_assert_heaplocked(); +- assert_bounds(); +- +- if (req.size() > ShenandoahHeapRegion::humongous_threshold_words()) { +- switch (req.type()) { +- case ShenandoahAllocRequest::_alloc_shared: +- case ShenandoahAllocRequest::_alloc_shared_gc: +- in_new_region = true; +- return allocate_contiguous(req); +- case ShenandoahAllocRequest::_alloc_gclab: +- case ShenandoahAllocRequest::_alloc_tlab: +- in_new_region = false; +- assert(false, err_msg("Trying to allocate TLAB larger than the humongous threshold: " SIZE_FORMAT " > " SIZE_FORMAT, +- req.size(), ShenandoahHeapRegion::humongous_threshold_words())); +- return NULL; +- default: +- ShouldNotReachHere(); +- return NULL; +- } +- } else { +- return allocate_single(req, in_new_region); +- } +-} +- +-size_t ShenandoahFreeSet::unsafe_peek_free() const { +- // Deliberately not locked, this method is unsafe when free set is modified. +- +- for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) { +- if (index < _max && is_mutator_free(index)) { +- ShenandoahHeapRegion* r = _heap->get_region(index); +- if (r->free() >= MinTLABSize) { +- return r->free(); +- } +- } +- } +- +- // It appears that no regions left +- return 0; +-} +- +-void ShenandoahFreeSet::print_on(outputStream* out) const { +- out->print_cr("Mutator Free Set: " SIZE_FORMAT "", mutator_count()); +- for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) { +- if (is_mutator_free(index)) { +- _heap->get_region(index)->print_on(out); +- } +- } +- out->print_cr("Collector Free Set: " SIZE_FORMAT "", collector_count()); +- for (size_t index = _collector_leftmost; index <= _collector_rightmost; index++) { +- if (is_collector_free(index)) { +- _heap->get_region(index)->print_on(out); +- } +- } +-} +- +-/* +- * Internal fragmentation metric: describes how fragmented the heap regions are. +- * +- * It is derived as: +- * +- * sum(used[i]^2, i=0..k) +- * IF = 1 - ------------------------------ +- * C * sum(used[i], i=0..k) +- * +- * ...where k is the number of regions in computation, C is the region capacity, and +- * used[i] is the used space in the region. +- * +- * The non-linearity causes IF to be lower for the cases where the same total heap +- * used is densely packed. For example: +- * a) Heap is completely full => IF = 0 +- * b) Heap is half full, first 50% regions are completely full => IF = 0 +- * c) Heap is half full, each region is 50% full => IF = 1/2 +- * d) Heap is quarter full, first 50% regions are completely full => IF = 0 +- * e) Heap is quarter full, each region is 25% full => IF = 3/4 +- * f) Heap has one small object per each region => IF =~ 1 +- */ +-double ShenandoahFreeSet::internal_fragmentation() { +- double squared = 0; +- double linear = 0; +- int count = 0; +- +- for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) { +- if (is_mutator_free(index)) { +- ShenandoahHeapRegion* r = _heap->get_region(index); +- size_t used = r->used(); +- squared += used * used; +- linear += used; +- count++; +- } +- } +- +- if (count > 0) { +- double s = squared / (ShenandoahHeapRegion::region_size_bytes() * linear); +- return 1 - s; +- } else { +- return 0; +- } +-} +- +-/* +- * External fragmentation metric: describes how fragmented the heap is. +- * +- * It is derived as: +- * +- * EF = 1 - largest_contiguous_free / total_free +- * +- * For example: +- * a) Heap is completely empty => EF = 0 +- * b) Heap is completely full => EF = 0 +- * c) Heap is first-half full => EF = 1/2 +- * d) Heap is half full, full and empty regions interleave => EF =~ 1 +- */ +-double ShenandoahFreeSet::external_fragmentation() { +- size_t last_idx = 0; +- size_t max_contig = 0; +- size_t empty_contig = 0; +- +- size_t free = 0; +- +- for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) { +- if (is_mutator_free(index)) { +- ShenandoahHeapRegion* r = _heap->get_region(index); +- if (r->is_empty()) { +- free += ShenandoahHeapRegion::region_size_bytes(); +- if (last_idx + 1 == index) { +- empty_contig++; +- } else { +- empty_contig = 1; +- } +- } else { +- empty_contig = 0; +- } +- +- max_contig = MAX2(max_contig, empty_contig); +- last_idx = index; +- } +- } +- +- if (free > 0) { +- return 1 - (1.0 * max_contig * ShenandoahHeapRegion::region_size_bytes() / free); +- } else { +- return 0; +- } +-} +- +-#ifdef ASSERT +-void ShenandoahFreeSet::assert_bounds() const { +- // Performance invariants. Failing these would not break the free set, but performance +- // would suffer. +- assert (_mutator_leftmost <= _max, err_msg("leftmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _mutator_leftmost, _max)); +- assert (_mutator_rightmost < _max, err_msg("rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _mutator_rightmost, _max)); +- +- assert (_mutator_leftmost == _max || is_mutator_free(_mutator_leftmost), err_msg("leftmost region should be free: " SIZE_FORMAT, _mutator_leftmost)); +- assert (_mutator_rightmost == 0 || is_mutator_free(_mutator_rightmost), err_msg("rightmost region should be free: " SIZE_FORMAT, _mutator_rightmost)); +- +- size_t beg_off = _mutator_free_bitmap.get_next_one_offset(0); +- size_t end_off = _mutator_free_bitmap.get_next_one_offset(_mutator_rightmost + 1); +- assert (beg_off >= _mutator_leftmost, err_msg("free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, _mutator_leftmost)); +- assert (end_off == _max, err_msg("free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, end_off, _mutator_rightmost)); +- +- assert (_collector_leftmost <= _max, err_msg("leftmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _collector_leftmost, _max)); +- assert (_collector_rightmost < _max, err_msg("rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _collector_rightmost, _max)); +- +- assert (_collector_leftmost == _max || is_collector_free(_collector_leftmost), err_msg("leftmost region should be free: " SIZE_FORMAT, _collector_leftmost)); +- assert (_collector_rightmost == 0 || is_collector_free(_collector_rightmost), err_msg("rightmost region should be free: " SIZE_FORMAT, _collector_rightmost)); +- +- beg_off = _collector_free_bitmap.get_next_one_offset(0); +- end_off = _collector_free_bitmap.get_next_one_offset(_collector_rightmost + 1); +- assert (beg_off >= _collector_leftmost, err_msg("free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, _collector_leftmost)); +- assert (end_off == _max, err_msg("free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, end_off, _collector_rightmost)); +-} +-#endif +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahFreeSet.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahFreeSet.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahFreeSet.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahFreeSet.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,99 +0,0 @@ +- +-/* +- * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHFREESET_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHFREESET_HPP +- +-#include "gc_implementation/shenandoah/shenandoahHeap.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegionSet.hpp" +- +-class ShenandoahFreeSet : public CHeapObj { +-private: +- ShenandoahHeap* const _heap; +- BitMap _mutator_free_bitmap; +- BitMap _collector_free_bitmap; +- size_t _max; +- +- // Left-most and right-most region indexes. There are no free regions outside +- // of [left-most; right-most] index intervals +- size_t _mutator_leftmost, _mutator_rightmost; +- size_t _collector_leftmost, _collector_rightmost; +- +- size_t _capacity; +- size_t _used; +- +- void assert_bounds() const NOT_DEBUG_RETURN; +- +- bool is_mutator_free(size_t idx) const; +- bool is_collector_free(size_t idx) const; +- +- HeapWord* try_allocate_in(ShenandoahHeapRegion* region, ShenandoahAllocRequest& req, bool& in_new_region); +- HeapWord* allocate_single(ShenandoahAllocRequest& req, bool& in_new_region); +- HeapWord* allocate_contiguous(ShenandoahAllocRequest& req); +- +- void flip_to_gc(ShenandoahHeapRegion* r); +- +- void recompute_bounds(); +- void adjust_bounds(); +- bool touches_bounds(size_t num) const; +- +- void increase_used(size_t amount); +- void clear_internal(); +- +- size_t collector_count() const { return _collector_free_bitmap.count_one_bits(); } +- size_t mutator_count() const { return _mutator_free_bitmap.count_one_bits(); } +- +- void try_recycle_trashed(ShenandoahHeapRegion *r); +- +- bool is_empty_or_trash(ShenandoahHeapRegion *r); +- size_t alloc_capacity(ShenandoahHeapRegion *r); +- bool has_no_alloc_capacity(ShenandoahHeapRegion *r); +- +-public: +- ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions); +- +- void clear(); +- void rebuild(); +- +- void recycle_trash(); +- +- void log_status(); +- +- size_t capacity() const { return _capacity; } +- size_t used() const { return _used; } +- size_t available() const { +- assert(_used <= _capacity, "must use less than capacity"); +- return _capacity - _used; +- } +- +- HeapWord* allocate(ShenandoahAllocRequest& req, bool& in_new_region); +- size_t unsafe_peek_free() const; +- +- double internal_fragmentation(); +- double external_fragmentation(); +- +- void print_on(outputStream* out) const; +-}; +- +-#endif //SHARE_VM_GC_SHENANDOAH_SHENANDOAHFREESET_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahGCTraceTime.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahGCTraceTime.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahGCTraceTime.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahGCTraceTime.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,110 +0,0 @@ +-/* +- * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +-#include "gc_implementation/shared/gcTimer.hpp" +-#include "gc_implementation/shared/gcTrace.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahGCTraceTime.hpp" +-#include "runtime/globals.hpp" +-#include "runtime/os.hpp" +-#include "runtime/safepoint.hpp" +-#include "runtime/thread.inline.hpp" +-#include "runtime/timer.hpp" +-#include "utilities/ostream.hpp" +-#include "utilities/ticks.hpp" +- +-ShenandoahGCTraceTime::ShenandoahGCTraceTime(const char* title, bool doit, GCTimer* timer, GCId gc_id, bool print_heap) : +- _title(title), _doit(doit), _timer(timer), _start_counter(), _heap(ShenandoahHeap::heap()), _print_heap(print_heap), _gc_id(gc_id) { +- if (_doit || _timer != NULL) { +- _start_counter.stamp(); +- } +- +- if (_timer != NULL) { +- _timer->register_gc_phase_start(title, _start_counter); +- } +- +- if (_doit) { +- _bytes_before = _heap->used(); +- +- gclog_or_tty->date_stamp(PrintGCDateStamps); +- gclog_or_tty->stamp(PrintGCTimeStamps); +- if (PrintGCID && !_gc_id.is_undefined()) { +- gclog_or_tty->print("#%u: ", _gc_id.id()); +- } +- gclog_or_tty->print("[%s", title); +- +- // Detailed view prints the "start" message +- if (PrintGCDetails) { +- gclog_or_tty->print_cr(", start]"); +- } +- +- gclog_or_tty->flush(); +- gclog_or_tty->inc(); +- } +-} +- +-ShenandoahGCTraceTime::~ShenandoahGCTraceTime() { +- Ticks stop_counter; +- +- if (_doit || _timer != NULL) { +- stop_counter.stamp(); +- } +- +- if (_timer != NULL) { +- _timer->register_gc_phase_end(stop_counter); +- } +- +- if (_doit) { +- const Tickspan duration = stop_counter - _start_counter; +- double secs = duration.seconds(); +- +- size_t bytes_after = _heap->used(); +- size_t capacity = _heap->capacity(); +- +- // Detailed view has to restart the logging here, because "start" was printed +- if (PrintGCDetails) { +- gclog_or_tty->date_stamp(PrintGCDateStamps); +- gclog_or_tty->stamp(PrintGCTimeStamps); +- if (PrintGCID && !_gc_id.is_undefined()) { +- gclog_or_tty->print("#%u: ", _gc_id.id()); +- } +- gclog_or_tty->print("[%s", _title); +- } +- +- if (_print_heap) { +- gclog_or_tty->print(" " SIZE_FORMAT "%s->" SIZE_FORMAT "%s(" SIZE_FORMAT "%s)", +- byte_size_in_proper_unit(_bytes_before), +- proper_unit_for_byte_size(_bytes_before), +- byte_size_in_proper_unit(bytes_after), +- proper_unit_for_byte_size(bytes_after), +- byte_size_in_proper_unit(capacity), +- proper_unit_for_byte_size(capacity)); +- } +- +- gclog_or_tty->dec(); +- gclog_or_tty->print_cr(", %.3f ms]", secs * 1000); +- gclog_or_tty->flush(); +- } +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahGCTraceTime.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahGCTraceTime.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahGCTraceTime.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahGCTraceTime.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,51 +0,0 @@ +-/* +- * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHGCTRACETIME_HPP +-#define SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHGCTRACETIME_HPP +- +-#include "gc_implementation/shared/gcTrace.hpp" +-#include "prims/jni_md.h" +-#include "utilities/ticks.hpp" +- +-class GCTimer; +- +-class ShenandoahGCTraceTime { +- ShenandoahHeap* _heap; +- const char* _title; +- bool _doit; +- bool _print_heap; +- GCTimer* _timer; +- Ticks _start_counter; +- size_t _bytes_before; +- GCId _gc_id; +- +- public: +- ShenandoahGCTraceTime(const char* title, bool doit, GCTimer* timer, GCId gc_id, bool print_heap = false); +- ~ShenandoahGCTraceTime(); +-}; +- +-typedef ShenandoahGCTraceTime GCTraceTime; +- +-#endif // SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHGCTRACETIME_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoah_globals.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoah_globals.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoah_globals.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoah_globals.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,35 +0,0 @@ +-/* +- * Copyright (c) 2016, 2017, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +-#include "gc_implementation/shenandoah/shenandoah_globals.hpp" +- +-SHENANDOAH_FLAGS(MATERIALIZE_DEVELOPER_FLAG, \ +- MATERIALIZE_PD_DEVELOPER_FLAG, \ +- MATERIALIZE_PRODUCT_FLAG, \ +- MATERIALIZE_PD_PRODUCT_FLAG, \ +- MATERIALIZE_DIAGNOSTIC_FLAG, \ +- MATERIALIZE_EXPERIMENTAL_FLAG, \ +- MATERIALIZE_NOTPRODUCT_FLAG, \ +- MATERIALIZE_MANAGEABLE_FLAG, \ +- MATERIALIZE_PRODUCT_RW_FLAG) +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoah_globals.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoah_globals.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoah_globals.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoah_globals.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,365 +0,0 @@ +-/* +- * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAH_GLOBALS_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAH_GLOBALS_HPP +- +-#include "runtime/globals.hpp" +- +-#define SHENANDOAH_FLAGS(develop, \ +- develop_pd, \ +- product, \ +- product_pd, \ +- diagnostic, \ +- experimental, \ +- notproduct, \ +- manageable, \ +- product_rw) \ +- \ +- experimental(uintx, ShenandoahRegionSize, 0, \ +- "Static heap region size. Set zero to enable automatic sizing.") \ +- \ +- experimental(uintx, ShenandoahTargetNumRegions, 2048, \ +- "With automatic region sizing, this is the approximate number " \ +- "of regions that would be used, within min/max region size " \ +- "limits.") \ +- \ +- experimental(uintx, ShenandoahMinRegionSize, 256 * K, \ +- "With automatic region sizing, the regions would be at least " \ +- "this large.") \ +- \ +- experimental(uintx, ShenandoahMaxRegionSize, 32 * M, \ +- "With automatic region sizing, the regions would be at most " \ +- "this large.") \ +- \ +- experimental(intx, ShenandoahHumongousThreshold, 100, \ +- "Humongous objects are allocated in separate regions. " \ +- "This setting defines how large the object should be to be " \ +- "deemed humongous. Value is in percents of heap region size. " \ +- "This also caps the maximum TLAB size.") \ +- \ +- product(ccstr, ShenandoahGCMode, "satb", \ +- "GC mode to use. Among other things, this defines which " \ +- "barriers are in in use. Possible values are:" \ +- " satb - snapshot-at-the-beginning concurrent GC (three pass mark-evac-update);" \ +- " iu - incremental-update concurrent GC (three pass mark-evac-update);" \ +- " passive - stop the world GC only (either degenerated or full)") \ +- \ +- product(ccstr, ShenandoahGCHeuristics, "adaptive", \ +- "GC heuristics to use. This fine-tunes the GC mode selected, " \ +- "by choosing when to start the GC, how much to process on each " \ +- "cycle, and what other features to automatically enable. " \ +- "Possible values are:" \ +- " adaptive - adapt to maintain the given amount of free heap " \ +- "at all times, even during the GC cycle;" \ +- " static - trigger GC when free heap falls below the threshold;" \ +- " aggressive - run GC continuously, try to evacuate everything;" \ +- " compact - run GC more frequently and with deeper targets to " \ +- "free up more memory.") \ +- \ +- experimental(uintx, ShenandoahRefProcFrequency, 5, \ +- "Process process weak (soft, phantom, finalizers) references " \ +- "every Nth cycle. Normally affects concurrent GC cycles only, " \ +- "as degenerated and full GCs would try to process references " \ +- "regardless. Set to zero to disable reference processing " \ +- "completely.") \ +- \ +- experimental(uintx, ShenandoahUnloadClassesFrequency, 100, \ +- "Unload the classes every Nth cycle. Normally affects concurrent "\ +- "GC cycles, as degenerated and full GCs would try to unload " \ +- "classes regardless. Set to zero to disable class unloading.") \ +- \ +- experimental(uintx, ShenandoahGarbageThreshold, 25, \ +- "How much garbage a region has to contain before it would be " \ +- "taken for collection. This a guideline only, as GC heuristics " \ +- "may select the region for collection even if it has little " \ +- "garbage. This also affects how much internal fragmentation the " \ +- "collector accepts. In percents of heap region size.") \ +- \ +- experimental(uintx, ShenandoahInitFreeThreshold, 70, \ +- "How much heap should be free before some heuristics trigger the "\ +- "initial (learning) cycles. Affects cycle frequency on startup " \ +- "and after drastic state changes, e.g. after degenerated/full " \ +- "GC cycles. In percents of (soft) max heap size.") \ +- \ +- experimental(uintx, ShenandoahMinFreeThreshold, 10, \ +- "How much heap should be free before most heuristics trigger the "\ +- "collection, even without other triggers. Provides the safety " \ +- "margin for many heuristics. In percents of (soft) max heap size.")\ +- \ +- experimental(uintx, ShenandoahAllocationThreshold, 0, \ +- "How many new allocations should happen since the last GC cycle " \ +- "before some heuristics trigger the collection. In percents of " \ +- "(soft) max heap size. Set to zero to effectively disable.") \ +- \ +- experimental(uintx, ShenandoahAllocSpikeFactor, 5, \ +- "How much of heap should some heuristics reserve for absorbing " \ +- "the allocation spikes. Larger value wastes more memory in " \ +- "non-emergency cases, but provides more safety in emergency " \ +- "cases. In percents of (soft) max heap size.") \ +- \ +- experimental(uintx, ShenandoahLearningSteps, 5, \ +- "The number of cycles some heuristics take to collect in order " \ +- "to learn application and GC performance.") \ +- \ +- experimental(uintx, ShenandoahImmediateThreshold, 90, \ +- "The cycle may shortcut when enough garbage can be reclaimed " \ +- "from the immediate garbage (completely garbage regions). " \ +- "In percents of total garbage found. Setting this threshold " \ +- "to 100 effectively disables the shortcut.") \ +- \ +- experimental(uintx, ShenandoahGuaranteedGCInterval, 5*60*1000, \ +- "Many heuristics would guarantee a concurrent GC cycle at " \ +- "at least with this interval. This is useful when large idle" \ +- " intervals are present, where GC can run without stealing " \ +- "time from active application. Time is in milliseconds." \ +- "Setting this to 0 disables the feature.") \ +- \ +- experimental(bool, ShenandoahAlwaysClearSoftRefs, false, \ +- "Unconditionally clear soft references, instead of using any " \ +- "other cleanup policy. This minimizes footprint at expense of" \ +- "more soft reference churn in applications.") \ +- \ +- experimental(bool, ShenandoahUncommit, true, \ +- "Allow to uncommit memory under unused regions and metadata. " \ +- "This optimizes footprint at expense of allocation latency in " \ +- "regions that require committing back. Uncommits would be " \ +- "disabled by some heuristics, or with static heap size.") \ +- \ +- experimental(uintx, ShenandoahUncommitDelay, 5*60*1000, \ +- "Uncommit memory for regions that were not used for more than " \ +- "this time. First use after that would incur allocation stalls. " \ +- "Actively used regions would never be uncommitted, because they " \ +- "do not become unused longer than this delay. Time is in " \ +- "milliseconds. Setting this delay to 0 effectively uncommits " \ +- "regions almost immediately after they become unused.") \ +- \ +- experimental(bool, ShenandoahRegionSampling, false, \ +- "Provide heap region sampling data via jvmstat.") \ +- \ +- experimental(int, ShenandoahRegionSamplingRate, 40, \ +- "Sampling rate for heap region sampling. In milliseconds between "\ +- "the samples. Higher values provide more fidelity, at expense " \ +- "of more sampling overhead.") \ +- \ +- experimental(uintx, ShenandoahControlIntervalMin, 1, \ +- "The minimum sleep interval for the control loop that drives " \ +- "the cycles. Lower values would increase GC responsiveness " \ +- "to changing heap conditions, at the expense of higher perf " \ +- "overhead. Time is in milliseconds.") \ +- \ +- experimental(uintx, ShenandoahControlIntervalMax, 10, \ +- "The maximum sleep interval for control loop that drives " \ +- "the cycles. Lower values would increase GC responsiveness " \ +- "to changing heap conditions, at the expense of higher perf " \ +- "overhead. Time is in milliseconds.") \ +- \ +- experimental(uintx, ShenandoahControlIntervalAdjustPeriod, 1000, \ +- "The time period for one step in control loop interval " \ +- "adjustment. Lower values make adjustments faster, at the " \ +- "expense of higher perf overhead. Time is in milliseconds.") \ +- \ +- diagnostic(bool, ShenandoahVerify, false, \ +- "Enable internal verification. This would catch many GC bugs, " \ +- "but it would also stall the collector during the verification, " \ +- "which prolongs the pauses and might hide other bugs.") \ +- \ +- diagnostic(intx, ShenandoahVerifyLevel, 4, \ +- "Verification level, higher levels check more, taking more time. "\ +- "Accepted values are:" \ +- " 0 = basic heap checks; " \ +- " 1 = previous level, plus basic region checks; " \ +- " 2 = previous level, plus all roots; " \ +- " 3 = previous level, plus all reachable objects; " \ +- " 4 = previous level, plus all marked objects") \ +- \ +- diagnostic(bool, ShenandoahElasticTLAB, true, \ +- "Use Elastic TLABs with Shenandoah") \ +- \ +- experimental(uintx, ShenandoahEvacReserve, 5, \ +- "How much of heap to reserve for evacuations. Larger values make "\ +- "GC evacuate more live objects on every cycle, while leaving " \ +- "less headroom for application to allocate in. In percents of " \ +- "total heap size.") \ +- \ +- experimental(double, ShenandoahEvacWaste, 1.2, \ +- "How much waste evacuations produce within the reserved space. " \ +- "Larger values make evacuations more resilient against " \ +- "evacuation conflicts, at expense of evacuating less on each " \ +- "GC cycle.") \ +- \ +- experimental(bool, ShenandoahEvacReserveOverflow, true, \ +- "Allow evacuations to overflow the reserved space. Enabling it " \ +- "will make evacuations more resilient when evacuation " \ +- "reserve/waste is incorrect, at the risk that application " \ +- "runs out of memory too early.") \ +- \ +- experimental(bool, ShenandoahPacing, true, \ +- "Pace application allocations to give GC chance to start " \ +- "and complete before allocation failure is reached.") \ +- \ +- experimental(uintx, ShenandoahPacingMaxDelay, 10, \ +- "Max delay for pacing application allocations. Larger values " \ +- "provide more resilience against out of memory, at expense at " \ +- "hiding the GC latencies in the allocation path. Time is in " \ +- "milliseconds. Setting it to arbitrarily large value makes " \ +- "GC effectively stall the threads indefinitely instead of going " \ +- "to degenerated or Full GC.") \ +- \ +- experimental(uintx, ShenandoahPacingIdleSlack, 2, \ +- "How much of heap counted as non-taxable allocations during idle "\ +- "phases. Larger value makes the pacing milder when collector is " \ +- "idle, requiring less rendezvous with control thread. Lower " \ +- "value makes the pacing control less responsive to out-of-cycle " \ +- "allocs. In percent of total heap size.") \ +- \ +- experimental(uintx, ShenandoahPacingCycleSlack, 10, \ +- "How much of free space to take as non-taxable allocations " \ +- "the GC cycle. Larger value makes the pacing milder at the " \ +- "beginning of the GC cycle. Lower value makes the pacing less " \ +- "uniform during the cycle. In percent of free space.") \ +- \ +- experimental(double, ShenandoahPacingSurcharge, 1.1, \ +- "Additional pacing tax surcharge to help unclutter the heap. " \ +- "Larger values makes the pacing more aggressive. Lower values " \ +- "risk GC cycles finish with less memory than were available at " \ +- "the beginning of it.") \ +- \ +- experimental(uintx, ShenandoahCriticalFreeThreshold, 1, \ +- "How much of the heap needs to be free after recovery cycles, " \ +- "either Degenerated or Full GC to be claimed successful. If this "\ +- "much space is not available, next recovery step would be " \ +- "triggered.") \ +- \ +- diagnostic(bool, ShenandoahDegeneratedGC, true, \ +- "Enable Degenerated GC as the graceful degradation step. " \ +- "Disabling this option leads to degradation to Full GC instead. " \ +- "When running in passive mode, this can be toggled to measure " \ +- "either Degenerated GC or Full GC costs.") \ +- \ +- experimental(uintx, ShenandoahFullGCThreshold, 3, \ +- "How many back-to-back Degenerated GCs should happen before " \ +- "going to a Full GC.") \ +- \ +- experimental(bool, ShenandoahImplicitGCInvokesConcurrent, false, \ +- "Should internally-caused GC requests invoke concurrent cycles, " \ +- "should they do the stop-the-world (Degenerated / Full GC)? " \ +- "Many heuristics automatically enable this. This option is " \ +- "similar to global ExplicitGCInvokesConcurrent.") \ +- \ +- diagnostic(bool, ShenandoahHumongousMoves, true, \ +- "Allow moving humongous regions. This makes GC more resistant " \ +- "to external fragmentation that may otherwise fail other " \ +- "humongous allocations, at the expense of higher GC copying " \ +- "costs. Currently affects stop-the-world (Full) cycle only.") \ +- \ +- diagnostic(bool, ShenandoahOOMDuringEvacALot, false, \ +- "Testing: simulate OOM during evacuation.") \ +- \ +- diagnostic(bool, ShenandoahAllocFailureALot, false, \ +- "Testing: make lots of artificial allocation failures.") \ +- \ +- experimental(intx, ShenandoahMarkScanPrefetch, 32, \ +- "How many objects to prefetch ahead when traversing mark bitmaps."\ +- "Set to 0 to disable prefetching.") \ +- \ +- experimental(uintx, ShenandoahMarkLoopStride, 1000, \ +- "How many items to process during one marking iteration before " \ +- "checking for cancellation, yielding, etc. Larger values improve "\ +- "marking performance at expense of responsiveness.") \ +- \ +- experimental(uintx, ShenandoahParallelRegionStride, 1024, \ +- "How many regions to process at once during parallel region " \ +- "iteration. Affects heaps with lots of regions.") \ +- \ +- experimental(intx, ShenandoahSATBBufferSize, 1 * K, \ +- "Number of entries in an SATB log buffer.") \ +- \ +- experimental(uintx, ShenandoahSATBBufferFlushInterval, 100, \ +- "Forcefully flush non-empty SATB buffers at this interval. " \ +- "Time is in milliseconds.") \ +- \ +- diagnostic(bool, ShenandoahPreclean, true, \ +- "Do concurrent preclean phase before final mark: process " \ +- "definitely alive references to avoid dealing with them during " \ +- "pause.") \ +- \ +- diagnostic(bool, ShenandoahSATBBarrier, true, \ +- "Turn on/off SATB barriers in Shenandoah") \ +- \ +- diagnostic(bool, ShenandoahStoreValEnqueueBarrier, false, \ +- "Turn on/off enqueuing of oops for storeval barriers") \ +- \ +- diagnostic(bool, ShenandoahCASBarrier, true, \ +- "Turn on/off CAS barriers in Shenandoah") \ +- \ +- diagnostic(bool, ShenandoahCloneBarrier, true, \ +- "Turn on/off clone barriers in Shenandoah") \ +- \ +- diagnostic(bool, ShenandoahLoadRefBarrier, true, \ +- "Turn on/off load-reference barriers in Shenandoah") \ +- \ +- diagnostic(uintx, ShenandoahCodeRootsStyle, 2, \ +- "Use this style to scan the code cache roots:" \ +- " 0 - sequential iterator;" \ +- " 1 - parallel iterator;" \ +- " 2 - parallel iterator with cset filters;") \ +- \ +- diagnostic(bool, ShenandoahOptimizeStaticFinals, true, \ +- "Optimize barriers on static final fields. " \ +- "Turn it off for maximum compatibility with reflection or JNI " \ +- "code that manipulates final fields.") \ +- \ +- develop(bool, ShenandoahVerifyOptoBarriers, false, \ +- "Verify no missing barriers in C2.") \ +- \ +- product(bool, ShenandoahLogTrace, false, \ +- "Turns on logging in Shenandoah at trace level. ") \ +- \ +- product(bool, ShenandoahLogDebug, false, \ +- "Turns on logging in Shenandoah at debug level. ") \ +- \ +- product(bool, ShenandoahLogInfo, false, \ +- "Turns on logging in Shenandoah at info level. ") \ +- \ +- product(bool, ShenandoahLogWarning, false, \ +- "Turns on logging in Shenandoah at warning level. ") \ +- \ +- diagnostic(bool, ShenandoahSelfFixing, true, \ +- "Fix references with load reference barrier. Disabling this " \ +- "might degrade performance.") \ +- \ +- manageable(uintx, ShenandoahSoftMaxHeapSize, 0, \ +- "Soft limit for maximum heap size (in bytes)") \ +- +-SHENANDOAH_FLAGS(DECLARE_DEVELOPER_FLAG, \ +- DECLARE_PD_DEVELOPER_FLAG, \ +- DECLARE_PRODUCT_FLAG, \ +- DECLARE_PD_PRODUCT_FLAG, \ +- DECLARE_DIAGNOSTIC_FLAG, \ +- DECLARE_EXPERIMENTAL_FLAG, \ +- DECLARE_NOTPRODUCT_FLAG, \ +- DECLARE_MANAGEABLE_FLAG, \ +- DECLARE_PRODUCT_RW_FLAG) +- +-#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAH_GLOBALS_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeap.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeap.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeap.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeap.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,2738 +0,0 @@ +-/* +- * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +-#include "memory/allocation.hpp" +- +-#include "gc_implementation/shared/gcTimer.hpp" +-#include "gc_implementation/shenandoah/shenandoahGCTraceTime.hpp" +- +-#include "gc_implementation/shenandoah/shenandoahBarrierSet.hpp" +-#include "gc_implementation/shenandoah/shenandoahClosures.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahCollectionSet.hpp" +-#include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp" +-#include "gc_implementation/shenandoah/shenandoahConcurrentMark.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahControlThread.hpp" +-#include "gc_implementation/shenandoah/shenandoahFreeSet.hpp" +-#include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegion.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegionSet.hpp" +-#include "gc_implementation/shenandoah/shenandoahMarkCompact.hpp" +-#include "gc_implementation/shenandoah/shenandoahMarkingContext.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahMonitoringSupport.hpp" +-#include "gc_implementation/shenandoah/shenandoahMetrics.hpp" +-#include "gc_implementation/shenandoah/shenandoahOopClosures.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahPacer.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahPadding.hpp" +-#include "gc_implementation/shenandoah/shenandoahParallelCleaning.hpp" +-#include "gc_implementation/shenandoah/shenandoahRootProcessor.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahTaskqueue.hpp" +-#include "gc_implementation/shenandoah/shenandoahUtils.hpp" +-#include "gc_implementation/shenandoah/shenandoahVerifier.hpp" +-#include "gc_implementation/shenandoah/shenandoahCodeRoots.hpp" +-#include "gc_implementation/shenandoah/shenandoahVMOperations.hpp" +-#include "gc_implementation/shenandoah/shenandoahWorkGroup.hpp" +-#include "gc_implementation/shenandoah/shenandoahWorkerPolicy.hpp" +-#include "gc_implementation/shenandoah/heuristics/shenandoahHeuristics.hpp" +-#include "gc_implementation/shenandoah/mode/shenandoahIUMode.hpp" +-#include "gc_implementation/shenandoah/mode/shenandoahPassiveMode.hpp" +-#include "gc_implementation/shenandoah/mode/shenandoahSATBMode.hpp" +-#if INCLUDE_JFR +-#include "gc_implementation/shenandoah/shenandoahJfrSupport.hpp" +-#endif +- +-#include "memory/metaspace.hpp" +-#include "runtime/vmThread.hpp" +-#include "services/mallocTracker.hpp" +- +-ShenandoahHeap* ShenandoahHeap::_heap = NULL; +- +-class ShenandoahPretouchHeapTask : public AbstractGangTask { +-private: +- ShenandoahRegionIterator _regions; +- const size_t _page_size; +-public: +- ShenandoahPretouchHeapTask(size_t page_size) : +- AbstractGangTask("Shenandoah Pretouch Heap"), +- _page_size(page_size) {} +- +- virtual void work(uint worker_id) { +- ShenandoahHeapRegion* r = _regions.next(); +- while (r != NULL) { +- if (r->is_committed()) { +- os::pretouch_memory((char *) r->bottom(), (char *) r->end()); +- } +- r = _regions.next(); +- } +- } +-}; +- +-class ShenandoahPretouchBitmapTask : public AbstractGangTask { +-private: +- ShenandoahRegionIterator _regions; +- char* _bitmap_base; +- const size_t _bitmap_size; +- const size_t _page_size; +-public: +- ShenandoahPretouchBitmapTask(char* bitmap_base, size_t bitmap_size, size_t page_size) : +- AbstractGangTask("Shenandoah Pretouch Bitmap"), +- _bitmap_base(bitmap_base), +- _bitmap_size(bitmap_size), +- _page_size(page_size) {} +- +- virtual void work(uint worker_id) { +- ShenandoahHeapRegion* r = _regions.next(); +- while (r != NULL) { +- size_t start = r->index() * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor(); +- size_t end = (r->index() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor(); +- assert (end <= _bitmap_size, err_msg("end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size)); +- +- if (r->is_committed()) { +- os::pretouch_memory(_bitmap_base + start, _bitmap_base + end); +- } +- +- r = _regions.next(); +- } +- } +-}; +- +-jint ShenandoahHeap::initialize() { +- CollectedHeap::pre_initialize(); +- +- // +- // Figure out heap sizing +- // +- +- size_t init_byte_size = collector_policy()->initial_heap_byte_size(); +- size_t min_byte_size = collector_policy()->min_heap_byte_size(); +- size_t max_byte_size = collector_policy()->max_heap_byte_size(); +- size_t heap_alignment = collector_policy()->heap_alignment(); +- +- size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes(); +- +- Universe::check_alignment(max_byte_size, reg_size_bytes, "Shenandoah heap"); +- Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap"); +- +- _num_regions = ShenandoahHeapRegion::region_count(); +- assert(_num_regions == (max_byte_size / reg_size_bytes), +- err_msg("Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT, +- _num_regions, max_byte_size, reg_size_bytes)); +- // Now we know the number of regions, initialize the heuristics. +- initialize_heuristics(); +- +- size_t num_committed_regions = init_byte_size / reg_size_bytes; +- num_committed_regions = MIN2(num_committed_regions, _num_regions); +- assert(num_committed_regions <= _num_regions, "sanity"); +- _initial_size = num_committed_regions * reg_size_bytes; +- +- size_t num_min_regions = min_byte_size / reg_size_bytes; +- num_min_regions = MIN2(num_min_regions, _num_regions); +- assert(num_min_regions <= _num_regions, "sanity"); +- _minimum_size = num_min_regions * reg_size_bytes; +- +- // Default to max heap size. +- _soft_max_size = _num_regions * reg_size_bytes; +- +- _committed = _initial_size; +- +- size_t heap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size(); +- size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size(); +- size_t region_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size(); +- +- // +- // Reserve and commit memory for heap +- // +- +- ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment); +- _reserved.set_word_size(0); +- _reserved.set_start((HeapWord*)heap_rs.base()); +- _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size())); +- _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize); +- _heap_region_special = heap_rs.special(); +- +- assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0, +- err_msg("Misaligned heap: " PTR_FORMAT, p2i(base()))); +- +-#if SHENANDOAH_OPTIMIZED_MARKTASK +- // The optimized ObjArrayChunkedTask takes some bits away from the full object bits. +- // Fail if we ever attempt to address more than we can. +- if ((uintptr_t)(heap_rs.base() + heap_rs.size()) >= ShenandoahMarkTask::max_addressable()) { +- FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n" +- "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n" +- "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).", +- p2i(heap_rs.base()), p2i(heap_rs.base() + heap_rs.size()), ShenandoahMarkTask::max_addressable()); +- vm_exit_during_initialization("Fatal Error", buf); +- } +-#endif +- +- ReservedSpace sh_rs = heap_rs.first_part(max_byte_size); +- if (!_heap_region_special) { +- os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false, +- "Cannot commit heap memory"); +- } +- +- // +- // Reserve and commit memory for bitmap(s) +- // +- +- _bitmap_size = MarkBitMap::compute_size(heap_rs.size()); +- _bitmap_size = align_size_up(_bitmap_size, bitmap_page_size); +- +- size_t bitmap_bytes_per_region = reg_size_bytes / MarkBitMap::heap_map_factor(); +- +- guarantee(bitmap_bytes_per_region != 0, +- err_msg("Bitmap bytes per region should not be zero")); +- guarantee(is_power_of_2(bitmap_bytes_per_region), +- err_msg("Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region)); +- +- if (bitmap_page_size > bitmap_bytes_per_region) { +- _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region; +- _bitmap_bytes_per_slice = bitmap_page_size; +- } else { +- _bitmap_regions_per_slice = 1; +- _bitmap_bytes_per_slice = bitmap_bytes_per_region; +- } +- +- guarantee(_bitmap_regions_per_slice >= 1, +- err_msg("Should have at least one region per slice: " SIZE_FORMAT, +- _bitmap_regions_per_slice)); +- +- guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0, +- err_msg("Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT, +- _bitmap_bytes_per_slice, bitmap_page_size)); +- +- ReservedSpace bitmap(_bitmap_size, bitmap_page_size); +- MemTracker::record_virtual_memory_type(bitmap.base(), mtGC); +- _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize); +- _bitmap_region_special = bitmap.special(); +- +- size_t bitmap_init_commit = _bitmap_bytes_per_slice * +- align_size_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice; +- bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit); +- if (!_bitmap_region_special) { +- os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false, +- "Cannot commit bitmap memory"); +- } +- +- _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions); +- +- if (ShenandoahVerify) { +- ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size); +- if (!verify_bitmap.special()) { +- os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false, +- "Cannot commit verification bitmap memory"); +- } +- MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC); +- MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize); +- _verification_bit_map.initialize(_heap_region, verify_bitmap_region); +- _verifier = new ShenandoahVerifier(this, &_verification_bit_map); +- } +- +- // Reserve aux bitmap for use in object_iterate(). We don't commit it here. +- ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size); +- MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC); +- _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize); +- _aux_bitmap_region_special = aux_bitmap.special(); +- _aux_bit_map.initialize(_heap_region, _aux_bitmap_region); +- +- // +- // Create regions and region sets +- // +- size_t region_align = align_size_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE); +- size_t region_storage_size = align_size_up(region_align * _num_regions, region_page_size); +- region_storage_size = align_size_up(region_storage_size, os::vm_allocation_granularity()); +- +- ReservedSpace region_storage(region_storage_size, region_page_size); +- MemTracker::record_virtual_memory_type(region_storage.base(), mtGC); +- if (!region_storage.special()) { +- os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false, +- "Cannot commit region memory"); +- } +- +- // Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks. +- // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there. +- // If not successful, bite a bullet and allocate at whatever address. +- { +- size_t cset_align = MAX2(os::vm_page_size(), os::vm_allocation_granularity()); +- size_t cset_size = align_size_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align); +- +- uintptr_t min = ShenandoahUtils::round_up_power_of_2(cset_align); +- uintptr_t max = (1u << 30u); +- +- for (uintptr_t addr = min; addr <= max; addr <<= 1u) { +- char* req_addr = (char*)addr; +- assert(is_ptr_aligned(req_addr, cset_align), "Should be aligned"); +- ReservedSpace cset_rs(cset_size, cset_align, false, req_addr); +- if (cset_rs.is_reserved()) { +- assert(cset_rs.base() == req_addr, err_msg("Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr)); +- _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base()); +- break; +- } +- } +- +- if (_collection_set == NULL) { +- ReservedSpace cset_rs(cset_size, cset_align, false); +- _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base()); +- } +- } +- +- _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC); +- _free_set = new ShenandoahFreeSet(this, _num_regions); +- +- { +- ShenandoahHeapLocker locker(lock()); +- +- for (size_t i = 0; i < _num_regions; i++) { +- HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i; +- bool is_committed = i < num_committed_regions; +- void* loc = region_storage.base() + i * region_align; +- +- ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed); +- assert(is_ptr_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity"); +- +- _marking_context->initialize_top_at_mark_start(r); +- _regions[i] = r; +- assert(!collection_set()->is_in(i), "New region should not be in collection set"); +- } +- +- // Initialize to complete +- _marking_context->mark_complete(); +- +- _free_set->rebuild(); +- } +- +- if (AlwaysPreTouch) { +- // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads, +- // before initialize() below zeroes it with initializing thread. For any given region, +- // we touch the region and the corresponding bitmaps from the same thread. +- ShenandoahPushWorkerScope scope(workers(), _max_workers, false); +- +- _pretouch_heap_page_size = heap_page_size; +- _pretouch_bitmap_page_size = bitmap_page_size; +- +-#ifdef LINUX +- // UseTransparentHugePages would madvise that backing memory can be coalesced into huge +- // pages. But, the kernel needs to know that every small page is used, in order to coalesce +- // them into huge one. Therefore, we need to pretouch with smaller pages. +- if (UseTransparentHugePages) { +- _pretouch_heap_page_size = (size_t)os::vm_page_size(); +- _pretouch_bitmap_page_size = (size_t)os::vm_page_size(); +- } +-#endif +- +- // OS memory managers may want to coalesce back-to-back pages. Make their jobs +- // simpler by pre-touching continuous spaces (heap and bitmap) separately. +- +- ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size); +- _workers->run_task(&bcl); +- +- ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size); +- _workers->run_task(&hcl); +- } +- +- // +- // Initialize the rest of GC subsystems +- // +- +- set_barrier_set(new ShenandoahBarrierSet(this)); +- +- _liveness_cache = NEW_C_HEAP_ARRAY(ShenandoahLiveData*, _max_workers, mtGC); +- for (uint worker = 0; worker < _max_workers; worker++) { +- _liveness_cache[worker] = NEW_C_HEAP_ARRAY(ShenandoahLiveData, _num_regions, mtGC); +- Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData)); +- } +- +- // The call below uses stuff (the SATB* things) that are in G1, but probably +- // belong into a shared location. +- JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon, +- SATB_Q_FL_lock, +- 20 /*G1SATBProcessCompletedThreshold */, +- Shared_SATB_Q_lock); +- +- _monitoring_support = new ShenandoahMonitoringSupport(this); +- _phase_timings = new ShenandoahPhaseTimings(max_workers()); +- ShenandoahStringDedup::initialize(); +- ShenandoahCodeRoots::initialize(); +- +- if (ShenandoahPacing) { +- _pacer = new ShenandoahPacer(this); +- _pacer->setup_for_idle(); +- } else { +- _pacer = NULL; +- } +- +- _control_thread = new ShenandoahControlThread(); +- +- log_info(gc, init)("Initialize Shenandoah heap: " SIZE_FORMAT "%s initial, " SIZE_FORMAT "%s min, " SIZE_FORMAT "%s max", +- byte_size_in_proper_unit(_initial_size), proper_unit_for_byte_size(_initial_size), +- byte_size_in_proper_unit(_minimum_size), proper_unit_for_byte_size(_minimum_size), +- byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()) +- ); +- +- return JNI_OK; +-} +- +-#ifdef _MSC_VER +-#pragma warning( push ) +-#pragma warning( disable:4355 ) // 'this' : used in base member initializer list +-#endif +- +-void ShenandoahHeap::initialize_heuristics() { +- if (ShenandoahGCMode != NULL) { +- if (strcmp(ShenandoahGCMode, "satb") == 0) { +- _gc_mode = new ShenandoahSATBMode(); +- } else if (strcmp(ShenandoahGCMode, "iu") == 0) { +- _gc_mode = new ShenandoahIUMode(); +- } else if (strcmp(ShenandoahGCMode, "passive") == 0) { +- _gc_mode = new ShenandoahPassiveMode(); +- } else { +- vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option"); +- } +- } else { +- ShouldNotReachHere(); +- } +- _gc_mode->initialize_flags(); +- if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) { +- vm_exit_during_initialization( +- err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.", +- _gc_mode->name())); +- } +- if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) { +- vm_exit_during_initialization( +- err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.", +- _gc_mode->name())); +- } +- log_info(gc, init)("Shenandoah GC mode: %s", +- _gc_mode->name()); +- +- _heuristics = _gc_mode->initialize_heuristics(); +- +- if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) { +- vm_exit_during_initialization( +- err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.", +- _heuristics->name())); +- } +- if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) { +- vm_exit_during_initialization( +- err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.", +- _heuristics->name())); +- } +- log_info(gc, init)("Shenandoah heuristics: %s", +- _heuristics->name()); +-} +- +-ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) : +- SharedHeap(policy), +- _shenandoah_policy(policy), +- _heap_region_special(false), +- _regions(NULL), +- _free_set(NULL), +- _collection_set(NULL), +- _update_refs_iterator(this), +- _bytes_allocated_since_gc_start(0), +- _max_workers((uint)MAX2(ConcGCThreads, ParallelGCThreads)), +- _ref_processor(NULL), +- _marking_context(NULL), +- _bitmap_size(0), +- _bitmap_regions_per_slice(0), +- _bitmap_bytes_per_slice(0), +- _bitmap_region_special(false), +- _aux_bitmap_region_special(false), +- _liveness_cache(NULL), +- _aux_bit_map(), +- _verifier(NULL), +- _pacer(NULL), +- _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), +- _phase_timings(NULL) +-{ +- _heap = this; +- +- log_info(gc, init)("GC threads: " UINTX_FORMAT " parallel, " UINTX_FORMAT " concurrent", ParallelGCThreads, ConcGCThreads); +- +- _scm = new ShenandoahConcurrentMark(); +- +- _full_gc = new ShenandoahMarkCompact(); +- _used = 0; +- +- _max_workers = MAX2(_max_workers, 1U); +- +- // SharedHeap did not initialize this for us, and we want our own workgang anyway. +- assert(SharedHeap::_workers == NULL && _workers == NULL, "Should not be initialized yet"); +- _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers, +- /* are_GC_task_threads */true, +- /* are_ConcurrentGC_threads */false); +- if (_workers == NULL) { +- vm_exit_during_initialization("Failed necessary allocation."); +- } else { +- _workers->initialize_workers(); +- } +- assert(SharedHeap::_workers == _workers, "Sanity: initialized the correct field"); +-} +- +-#ifdef _MSC_VER +-#pragma warning( pop ) +-#endif +- +-class ShenandoahResetBitmapTask : public AbstractGangTask { +-private: +- ShenandoahRegionIterator _regions; +- +-public: +- ShenandoahResetBitmapTask() : +- AbstractGangTask("Parallel Reset Bitmap Task") {} +- +- void work(uint worker_id) { +- ShenandoahHeapRegion* region = _regions.next(); +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- ShenandoahMarkingContext* const ctx = heap->marking_context(); +- while (region != NULL) { +- if (heap->is_bitmap_slice_committed(region)) { +- ctx->clear_bitmap(region); +- } +- region = _regions.next(); +- } +- } +-}; +- +-void ShenandoahHeap::reset_mark_bitmap() { +- assert_gc_workers(_workers->active_workers()); +- mark_incomplete_marking_context(); +- +- ShenandoahResetBitmapTask task; +- _workers->run_task(&task); +-} +- +-void ShenandoahHeap::print_on(outputStream* st) const { +- st->print_cr("Shenandoah Heap"); +- st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used", +- byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()), +- byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()), +- byte_size_in_proper_unit(committed()), proper_unit_for_byte_size(committed()), +- byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used())); +- st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions", +- num_regions(), +- byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()), +- proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes())); +- +- st->print("Status: "); +- if (has_forwarded_objects()) st->print("has forwarded objects, "); +- if (is_concurrent_mark_in_progress()) st->print("marking, "); +- if (is_evacuation_in_progress()) st->print("evacuating, "); +- if (is_update_refs_in_progress()) st->print("updating refs, "); +- if (is_degenerated_gc_in_progress()) st->print("degenerated gc, "); +- if (is_full_gc_in_progress()) st->print("full gc, "); +- if (is_full_gc_move_in_progress()) st->print("full gc move, "); +- +- if (cancelled_gc()) { +- st->print("cancelled"); +- } else { +- st->print("not cancelled"); +- } +- st->cr(); +- +- st->print_cr("Reserved region:"); +- st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ", +- p2i(reserved_region().start()), +- p2i(reserved_region().end())); +- +- ShenandoahCollectionSet* cset = collection_set(); +- st->print_cr("Collection set:"); +- if (cset != NULL) { +- st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address())); +- st->print_cr(" - map (biased): " PTR_FORMAT, p2i(cset->biased_map_address())); +- } else { +- st->print_cr(" (NULL)"); +- } +- +- st->cr(); +- MetaspaceAux::print_on(st); +- +- if (Verbose) { +- print_heap_regions_on(st); +- } +-} +- +-class ShenandoahInitGCLABClosure : public ThreadClosure { +-public: +- void do_thread(Thread* thread) { +- assert(thread == NULL || !thread->is_Java_thread(), "Don't expect JavaThread this early"); +- if (thread != NULL && thread->is_Worker_thread()) { +- thread->gclab().initialize(true); +- } +- } +-}; +- +-void ShenandoahHeap::post_initialize() { +- if (UseTLAB) { +- MutexLocker ml(Threads_lock); +- +- ShenandoahInitGCLABClosure init_gclabs; +- Threads::threads_do(&init_gclabs); +- } +- +- _scm->initialize(_max_workers); +- _full_gc->initialize(_gc_timer); +- +- ref_processing_init(); +- +- _heuristics->initialize(); +- +- JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers()); +-} +- +-size_t ShenandoahHeap::used() const { +- OrderAccess::acquire(); +- return (size_t) _used; +-} +- +-size_t ShenandoahHeap::committed() const { +- OrderAccess::acquire(); +- return _committed; +-} +- +-void ShenandoahHeap::increase_committed(size_t bytes) { +- shenandoah_assert_heaplocked_or_safepoint(); +- _committed += bytes; +-} +- +-void ShenandoahHeap::decrease_committed(size_t bytes) { +- shenandoah_assert_heaplocked_or_safepoint(); +- _committed -= bytes; +-} +- +-void ShenandoahHeap::increase_used(size_t bytes) { +- Atomic::add(bytes, &_used); +-} +- +-void ShenandoahHeap::set_used(size_t bytes) { +- OrderAccess::release_store_fence(&_used, bytes); +-} +- +-void ShenandoahHeap::decrease_used(size_t bytes) { +- assert(used() >= bytes, "never decrease heap size by more than we've left"); +- Atomic::add(-(jlong)bytes, &_used); +-} +- +-void ShenandoahHeap::increase_allocated(size_t bytes) { +- Atomic::add(bytes, &_bytes_allocated_since_gc_start); +-} +- +-void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) { +- size_t bytes = words * HeapWordSize; +- if (!waste) { +- increase_used(bytes); +- } +- increase_allocated(bytes); +- if (ShenandoahPacing) { +- control_thread()->pacing_notify_alloc(words); +- if (waste) { +- pacer()->claim_for_alloc(words, true); +- } +- } +-} +- +-size_t ShenandoahHeap::capacity() const { +- return committed(); +-} +- +-size_t ShenandoahHeap::max_capacity() const { +- return _num_regions * ShenandoahHeapRegion::region_size_bytes(); +-} +- +-size_t ShenandoahHeap::soft_max_capacity() const { +- size_t v = OrderAccess::load_acquire((volatile size_t*)&_soft_max_size); +- assert(min_capacity() <= v && v <= max_capacity(), +- err_msg("Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT, +- min_capacity(), v, max_capacity())); +- return v; +-} +- +-void ShenandoahHeap::set_soft_max_capacity(size_t v) { +- assert(min_capacity() <= v && v <= max_capacity(), +- err_msg("Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT, +- min_capacity(), v, max_capacity())); +- OrderAccess::release_store_fence(&_soft_max_size, v); +-} +- +-size_t ShenandoahHeap::min_capacity() const { +- return _minimum_size; +-} +- +-size_t ShenandoahHeap::initial_capacity() const { +- return _initial_size; +-} +- +-bool ShenandoahHeap::is_in(const void* p) const { +- HeapWord* heap_base = (HeapWord*) base(); +- HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions(); +- return p >= heap_base && p < last_region_end; +-} +- +-void ShenandoahHeap::op_uncommit(double shrink_before, size_t shrink_until) { +- assert (ShenandoahUncommit, "should be enabled"); +- +- // Application allocates from the beginning of the heap, and GC allocates at +- // the end of it. It is more efficient to uncommit from the end, so that applications +- // could enjoy the near committed regions. GC allocations are much less frequent, +- // and therefore can accept the committing costs. +- +- size_t count = 0; +- for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow +- ShenandoahHeapRegion* r = get_region(i - 1); +- if (r->is_empty_committed() && (r->empty_time() < shrink_before)) { +- ShenandoahHeapLocker locker(lock()); +- if (r->is_empty_committed()) { +- if (committed() < shrink_until + ShenandoahHeapRegion::region_size_bytes()) { +- break; +- } +- +- r->make_uncommitted(); +- count++; +- } +- } +- SpinPause(); // allow allocators to take the lock +- } +- +- if (count > 0) { +- _control_thread->notify_heap_changed(); +- } +-} +- +-HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) { +- // Retain tlab and allocate object in shared space if +- // the amount free in the tlab is too large to discard. +- if (thread->gclab().free() > thread->gclab().refill_waste_limit()) { +- thread->gclab().record_slow_allocation(size); +- return NULL; +- } +- +- // Discard gclab and allocate a new one. +- // To minimize fragmentation, the last GCLAB may be smaller than the rest. +- size_t new_gclab_size = thread->gclab().compute_size(size); +- +- thread->gclab().clear_before_allocation(); +- +- if (new_gclab_size == 0) { +- return NULL; +- } +- +- // Allocated object should fit in new GCLAB, and new_gclab_size should be larger than min +- size_t min_size = MAX2(size + ThreadLocalAllocBuffer::alignment_reserve(), ThreadLocalAllocBuffer::min_size()); +- new_gclab_size = MAX2(new_gclab_size, min_size); +- +- // Allocate a new GCLAB... +- size_t actual_size = 0; +- HeapWord* obj = allocate_new_gclab(min_size, new_gclab_size, &actual_size); +- +- if (obj == NULL) { +- return NULL; +- } +- +- assert (size <= actual_size, "allocation should fit"); +- +- if (ZeroTLAB) { +- // ..and clear it. +- Copy::zero_to_words(obj, actual_size); +- } else { +- // ...and zap just allocated object. +-#ifdef ASSERT +- // Skip mangling the space corresponding to the object header to +- // ensure that the returned space is not considered parsable by +- // any concurrent GC thread. +- size_t hdr_size = oopDesc::header_size(); +- Copy::fill_to_words(obj + hdr_size, actual_size - hdr_size, badHeapWordVal); +-#endif // ASSERT +- } +- thread->gclab().fill(obj, obj + size, actual_size); +- return obj; +-} +- +-HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size) { +- ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(word_size); +- return allocate_memory(req); +-} +- +-HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size, +- size_t word_size, +- size_t* actual_size) { +- ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size); +- HeapWord* res = allocate_memory(req); +- if (res != NULL) { +- *actual_size = req.actual_size(); +- } else { +- *actual_size = 0; +- } +- return res; +-} +- +-HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) { +- intptr_t pacer_epoch = 0; +- bool in_new_region = false; +- HeapWord* result = NULL; +- +- if (req.is_mutator_alloc()) { +- if (ShenandoahPacing) { +- pacer()->pace_for_alloc(req.size()); +- pacer_epoch = pacer()->epoch(); +- } +- +- if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) { +- result = allocate_memory_under_lock(req, in_new_region); +- } +- +- // Allocation failed, block until control thread reacted, then retry allocation. +- // +- // It might happen that one of the threads requesting allocation would unblock +- // way later after GC happened, only to fail the second allocation, because +- // other threads have already depleted the free storage. In this case, a better +- // strategy is to try again, as long as GC makes progress. +- // +- // Then, we need to make sure the allocation was retried after at least one +- // Full GC, which means we want to try more than ShenandoahFullGCThreshold times. +- +- size_t tries = 0; +- +- while (result == NULL && _progress_last_gc.is_set()) { +- tries++; +- control_thread()->handle_alloc_failure(req); +- result = allocate_memory_under_lock(req, in_new_region); +- } +- +- while (result == NULL && tries <= ShenandoahFullGCThreshold) { +- tries++; +- control_thread()->handle_alloc_failure(req); +- result = allocate_memory_under_lock(req, in_new_region); +- } +- +- } else { +- assert(req.is_gc_alloc(), "Can only accept GC allocs here"); +- result = allocate_memory_under_lock(req, in_new_region); +- // Do not call handle_alloc_failure() here, because we cannot block. +- // The allocation failure would be handled by the WB slowpath with handle_alloc_failure_evac(). +- } +- +- if (in_new_region) { +- control_thread()->notify_heap_changed(); +- } +- +- if (result != NULL) { +- size_t requested = req.size(); +- size_t actual = req.actual_size(); +- +- assert (req.is_lab_alloc() || (requested == actual), +- err_msg("Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT, +- ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual)); +- +- if (req.is_mutator_alloc()) { +- notify_mutator_alloc_words(actual, false); +- +- // If we requested more than we were granted, give the rest back to pacer. +- // This only matters if we are in the same pacing epoch: do not try to unpace +- // over the budget for the other phase. +- if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) { +- pacer()->unpace_for_alloc(pacer_epoch, requested - actual); +- } +- } else { +- increase_used(actual*HeapWordSize); +- } +- } +- +- return result; +-} +- +-HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) { +- ShenandoahHeapLocker locker(lock()); +- return _free_set->allocate(req, in_new_region); +-} +- +-HeapWord* ShenandoahHeap::mem_allocate(size_t size, +- bool* gc_overhead_limit_was_exceeded) { +- ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size); +- return allocate_memory(req); +-} +- +-class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure { +-private: +- ShenandoahHeap* const _heap; +- Thread* const _thread; +-public: +- ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) : +- _heap(heap), _thread(Thread::current()) {} +- +- void do_object(oop p) { +- shenandoah_assert_marked(NULL, p); +- if (!p->is_forwarded()) { +- _heap->evacuate_object(p, _thread); +- } +- } +-}; +- +-class ShenandoahEvacuationTask : public AbstractGangTask { +-private: +- ShenandoahHeap* const _sh; +- ShenandoahCollectionSet* const _cs; +- bool _concurrent; +-public: +- ShenandoahEvacuationTask(ShenandoahHeap* sh, +- ShenandoahCollectionSet* cs, +- bool concurrent) : +- AbstractGangTask("Parallel Evacuation Task"), +- _sh(sh), +- _cs(cs), +- _concurrent(concurrent) +- {} +- +- void work(uint worker_id) { +- ShenandoahEvacOOMScope oom_evac_scope; +- if (_concurrent) { +- ShenandoahConcurrentWorkerSession worker_session(worker_id); +- do_work(); +- } else { +- ShenandoahParallelWorkerSession worker_session(worker_id); +- do_work(); +- } +- } +- +-private: +- void do_work() { +- ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh); +- ShenandoahHeapRegion* r; +- while ((r =_cs->claim_next()) != NULL) { +- assert(r->has_live(), err_msg("Region " SIZE_FORMAT " should have been reclaimed early", r->index())); +- _sh->marked_object_iterate(r, &cl); +- +- if (ShenandoahPacing) { +- _sh->pacer()->report_evac(r->used() >> LogHeapWordSize); +- } +- +- if (_sh->cancelled_gc()) { +- break; +- } +- } +- } +-}; +- +-void ShenandoahHeap::trash_cset_regions() { +- ShenandoahHeapLocker locker(lock()); +- +- ShenandoahCollectionSet* set = collection_set(); +- ShenandoahHeapRegion* r; +- set->clear_current_index(); +- while ((r = set->next()) != NULL) { +- r->make_trash(); +- } +- collection_set()->clear(); +-} +- +-void ShenandoahHeap::print_heap_regions_on(outputStream* st) const { +- st->print_cr("Heap Regions:"); +- st->print_cr("EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HC=humongous continuation, CS=collection set, T=trash, P=pinned"); +- st->print_cr("BTE=bottom/top/end, U=used, T=TLAB allocs, G=GCLAB allocs, S=shared allocs, L=live data"); +- st->print_cr("R=root, CP=critical pins, TAMS=top-at-mark-start, UWM=update watermark"); +- st->print_cr("SN=alloc sequence number"); +- +- for (size_t i = 0; i < num_regions(); i++) { +- get_region(i)->print_on(st); +- } +-} +- +-void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) { +- assert(start->is_humongous_start(), "reclaim regions starting with the first one"); +- +- oop humongous_obj = oop(start->bottom()); +- size_t size = humongous_obj->size(); +- size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize); +- size_t index = start->index() + required_regions - 1; +- +- assert(!start->has_live(), "liveness must be zero"); +- +- for(size_t i = 0; i < required_regions; i++) { +- // Reclaim from tail. Otherwise, assertion fails when printing region to trace log, +- // as it expects that every region belongs to a humongous region starting with a humongous start region. +- ShenandoahHeapRegion* region = get_region(index --); +- +- assert(region->is_humongous(), "expect correct humongous start or continuation"); +- assert(!region->is_cset(), "Humongous region should not be in collection set"); +- +- region->make_trash_immediate(); +- } +-} +- +-class ShenandoahRetireGCLABClosure : public ThreadClosure { +-private: +- bool _retire; +-public: +- ShenandoahRetireGCLABClosure(bool retire) : _retire(retire) {}; +- +- void do_thread(Thread* thread) { +- assert(thread->gclab().is_initialized(), err_msg("GCLAB should be initialized for %s", thread->name())); +- thread->gclab().make_parsable(_retire); +- } +-}; +- +-void ShenandoahHeap::make_parsable(bool retire_tlabs) { +- if (UseTLAB) { +- CollectedHeap::ensure_parsability(retire_tlabs); +- ShenandoahRetireGCLABClosure cl(retire_tlabs); +- Threads::java_threads_do(&cl); +- _workers->threads_do(&cl); +- } +-} +- +-class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask { +-private: +- ShenandoahRootEvacuator* _rp; +- +-public: +- ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) : +- AbstractGangTask("Shenandoah evacuate and update roots"), +- _rp(rp) {} +- +- void work(uint worker_id) { +- ShenandoahParallelWorkerSession worker_session(worker_id); +- ShenandoahEvacOOMScope oom_evac_scope; +- ShenandoahEvacuateUpdateRootsClosure cl; +- +- MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations); +- _rp->roots_do(worker_id, &cl); +- } +-}; +- +-void ShenandoahHeap::evacuate_and_update_roots() { +- COMPILER2_PRESENT(DerivedPointerTable::clear()); +- +- assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped"); +- +- { +- ShenandoahRootEvacuator rp(ShenandoahPhaseTimings::init_evac); +- ShenandoahEvacuateUpdateRootsTask roots_task(&rp); +- workers()->run_task(&roots_task); +- } +- +- COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); +-} +- +-size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const { +- // Returns size in bytes +- return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes()); +-} +- +-size_t ShenandoahHeap::max_tlab_size() const { +- // Returns size in words +- return ShenandoahHeapRegion::max_tlab_size_words(); +-} +- +-class ShenandoahResizeGCLABClosure : public ThreadClosure { +-public: +- void do_thread(Thread* thread) { +- assert(thread->gclab().is_initialized(), err_msg("GCLAB should be initialized for %s", thread->name())); +- thread->gclab().resize(); +- } +-}; +- +-void ShenandoahHeap::resize_all_tlabs() { +- CollectedHeap::resize_all_tlabs(); +- +- ShenandoahResizeGCLABClosure cl; +- Threads::java_threads_do(&cl); +- _workers->threads_do(&cl); +-} +- +-class ShenandoahAccumulateStatisticsGCLABClosure : public ThreadClosure { +-public: +- void do_thread(Thread* thread) { +- assert(thread->gclab().is_initialized(), err_msg("GCLAB should be initialized for %s", thread->name())); +- thread->gclab().accumulate_statistics(); +- thread->gclab().initialize_statistics(); +- } +-}; +- +-void ShenandoahHeap::accumulate_statistics_all_gclabs() { +- ShenandoahAccumulateStatisticsGCLABClosure cl; +- Threads::java_threads_do(&cl); +- _workers->threads_do(&cl); +-} +- +-void ShenandoahHeap::collect(GCCause::Cause cause) { +- _control_thread->request_gc(cause); +-} +- +-void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) { +- //assert(false, "Shouldn't need to do full collections"); +-} +- +-CollectorPolicy* ShenandoahHeap::collector_policy() const { +- return _shenandoah_policy; +-} +- +-void ShenandoahHeap::resize_tlabs() { +- CollectedHeap::resize_all_tlabs(); +-} +- +-void ShenandoahHeap::accumulate_statistics_tlabs() { +- CollectedHeap::accumulate_statistics_all_tlabs(); +-} +- +-HeapWord* ShenandoahHeap::block_start(const void* addr) const { +- ShenandoahHeapRegion* r = heap_region_containing(addr); +- if (r != NULL) { +- return r->block_start(addr); +- } +- return NULL; +-} +- +-size_t ShenandoahHeap::block_size(const HeapWord* addr) const { +- ShenandoahHeapRegion* r = heap_region_containing(addr); +- return r->block_size(addr); +-} +- +-bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const { +- ShenandoahHeapRegion* r = heap_region_containing(addr); +- return r->block_is_obj(addr); +-} +- +-jlong ShenandoahHeap::millis_since_last_gc() { +- double v = heuristics()->time_since_last_gc() * 1000; +- assert(0 <= v && v <= max_jlong, err_msg("value should fit: %f", v)); +- return (jlong)v; +-} +- +-void ShenandoahHeap::prepare_for_verify() { +- if (SafepointSynchronize::is_at_safepoint()) { +- make_parsable(false); +- } +-} +- +-void ShenandoahHeap::print_gc_threads_on(outputStream* st) const { +- workers()->print_worker_threads_on(st); +- if (ShenandoahStringDedup::is_enabled()) { +- ShenandoahStringDedup::print_worker_threads_on(st); +- } +-} +- +-void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const { +- workers()->threads_do(tcl); +- if (ShenandoahStringDedup::is_enabled()) { +- ShenandoahStringDedup::threads_do(tcl); +- } +-} +- +-void ShenandoahHeap::print_tracing_info() const { +- if (PrintGC || TraceGen0Time || TraceGen1Time) { +- ResourceMark rm; +- outputStream* out = gclog_or_tty; +- phase_timings()->print_global_on(out); +- +- out->cr(); +- out->cr(); +- +- shenandoah_policy()->print_gc_stats(out); +- +- out->cr(); +- out->cr(); +- } +-} +- +-void ShenandoahHeap::verify(bool silent, VerifyOption vo) { +- if (ShenandoahSafepoint::is_at_shenandoah_safepoint() || ! UseTLAB) { +- if (ShenandoahVerify) { +- verifier()->verify_generic(vo); +- } else { +- // TODO: Consider allocating verification bitmaps on demand, +- // and turn this on unconditionally. +- } +- } +-} +-size_t ShenandoahHeap::tlab_capacity(Thread *thr) const { +- return _free_set->capacity(); +-} +- +-class ObjectIterateScanRootClosure : public ExtendedOopClosure { +-private: +- MarkBitMap* _bitmap; +- Stack* _oop_stack; +- +- template +- void do_oop_work(T* p) { +- T o = oopDesc::load_heap_oop(p); +- if (!oopDesc::is_null(o)) { +- oop obj = oopDesc::decode_heap_oop_not_null(o); +- obj = (oop) ShenandoahBarrierSet::resolve_forwarded_not_null(obj); +- assert(obj->is_oop(), "must be a valid oop"); +- if (!_bitmap->isMarked((HeapWord*) obj)) { +- _bitmap->mark((HeapWord*) obj); +- _oop_stack->push(obj); +- } +- } +- } +-public: +- ObjectIterateScanRootClosure(MarkBitMap* bitmap, Stack* oop_stack) : +- _bitmap(bitmap), _oop_stack(oop_stack) {} +- void do_oop(oop* p) { do_oop_work(p); } +- void do_oop(narrowOop* p) { do_oop_work(p); } +-}; +- +-/* +- * This is public API, used in preparation of object_iterate(). +- * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't +- * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can +- * control, we call SH::make_parsable(). +- */ +-void ShenandoahHeap::ensure_parsability(bool retire_tlabs) { +- // No-op. +-} +- +-/* +- * Iterates objects in the heap. This is public API, used for, e.g., heap dumping. +- * +- * We cannot safely iterate objects by doing a linear scan at random points in time. Linear +- * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g. +- * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear +- * scanning therefore depends on having a valid marking bitmap to support it. However, we only +- * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid +- * marking bitmap during marking, after aborted marking or during/after cleanup (when we just +- * wiped the bitmap in preparation for next marking). +- * +- * For all those reasons, we implement object iteration as a single marking traversal, reporting +- * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap +- * is allowed to report dead objects, but is not required to do so. +- */ +-void ShenandoahHeap::object_iterate(ObjectClosure* cl) { +- assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints"); +- if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) { +- log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration"); +- return; +- } +- +- // Reset bitmap +- _aux_bit_map.clear(); +- +- Stack oop_stack; +- +- ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack); +- +- { +- // First, we process GC roots according to current GC cycle. +- // This populates the work stack with initial objects. +- // It is important to relinquish the associated locks before diving +- // into heap dumper. +- ShenandoahHeapIterationRootScanner rp; +- rp.roots_do(&oops); +- } +- +- // Work through the oop stack to traverse heap. +- while (! oop_stack.is_empty()) { +- oop obj = oop_stack.pop(); +- assert(obj->is_oop(), "must be a valid oop"); +- cl->do_object(obj); +- obj->oop_iterate(&oops); +- } +- +- assert(oop_stack.is_empty(), "should be empty"); +- +- if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) { +- log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration"); +- } +-} +- +-void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) { +- assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints"); +- object_iterate(cl); +-} +- +-void ShenandoahHeap::oop_iterate(ExtendedOopClosure* cl) { +- ObjectToOopClosure cl2(cl); +- object_iterate(&cl2); +-} +- +-void ShenandoahHeap::gc_prologue(bool b) { +- Unimplemented(); +-} +- +-void ShenandoahHeap::gc_epilogue(bool b) { +- Unimplemented(); +-} +- +-void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const { +- for (size_t i = 0; i < num_regions(); i++) { +- ShenandoahHeapRegion* current = get_region(i); +- blk->heap_region_do(current); +- } +-} +- +-class ShenandoahParallelHeapRegionTask : public AbstractGangTask { +-private: +- ShenandoahHeap* const _heap; +- ShenandoahHeapRegionClosure* const _blk; +- +- shenandoah_padding(0); +- volatile jint _index; +- shenandoah_padding(1); +- +-public: +- ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk) : +- AbstractGangTask("Parallel Region Task"), +- _heap(ShenandoahHeap::heap()), _blk(blk), _index(0) {} +- +- void work(uint worker_id) { +- jint stride = (jint)ShenandoahParallelRegionStride; +- +- jint max = (jint)_heap->num_regions(); +- while (_index < max) { +- jint cur = Atomic::add(stride, &_index) - stride; +- jint start = cur; +- jint end = MIN2(cur + stride, max); +- if (start >= max) break; +- +- for (jint i = cur; i < end; i++) { +- ShenandoahHeapRegion* current = _heap->get_region((size_t)i); +- _blk->heap_region_do(current); +- } +- } +- } +-}; +- +-void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const { +- assert(blk->is_thread_safe(), "Only thread-safe closures here"); +- if (num_regions() > ShenandoahParallelRegionStride) { +- ShenandoahParallelHeapRegionTask task(blk); +- workers()->run_task(&task); +- } else { +- heap_region_iterate(blk); +- } +-} +- +-class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure { +-private: +- ShenandoahMarkingContext* const _ctx; +-public: +- ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {} +- +- void heap_region_do(ShenandoahHeapRegion* r) { +- assert(!r->has_live(), +- err_msg("Region " SIZE_FORMAT " should have no live data", r->index())); +- if (r->is_active()) { +- // Check if region needs updating its TAMS. We have updated it already during concurrent +- // reset, so it is very likely we don't need to do another write here. +- if (_ctx->top_at_mark_start(r) != r->top()) { +- _ctx->capture_top_at_mark_start(r); +- } +- } else { +- assert(_ctx->top_at_mark_start(r) == r->top(), +- err_msg("Region " SIZE_FORMAT " should already have correct TAMS", r->index())); +- } +- } +- +- bool is_thread_safe() { return true; } +-}; +- +-void ShenandoahHeap::op_init_mark() { +- assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint"); +- assert(Thread::current()->is_VM_thread(), "can only do this in VMThread"); +- +- assert(marking_context()->is_bitmap_clear(), "need clear marking bitmap"); +- assert(!marking_context()->is_complete(), "should not be complete"); +- assert(!has_forwarded_objects(), "No forwarded objects on this path"); +- +- if (ShenandoahVerify) { +- verifier()->verify_before_concmark(); +- } +- +- { +- ShenandoahGCPhase phase(ShenandoahPhaseTimings::accumulate_stats); +- accumulate_statistics_tlabs(); +- } +- +- if (VerifyBeforeGC) { +- Universe::verify(); +- } +- +- set_concurrent_mark_in_progress(true); +- // We need to reset all TLABs because we'd lose marks on all objects allocated in them. +- if (UseTLAB) { +- ShenandoahGCPhase phase(ShenandoahPhaseTimings::make_parsable); +- make_parsable(true); +- } +- +- { +- ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states); +- ShenandoahInitMarkUpdateRegionStateClosure cl; +- parallel_heap_region_iterate(&cl); +- } +- +- // Make above changes visible to worker threads +- OrderAccess::fence(); +- +- concurrent_mark()->mark_roots(ShenandoahPhaseTimings::scan_roots); +- +- if (UseTLAB) { +- ShenandoahGCPhase phase(ShenandoahPhaseTimings::resize_tlabs); +- resize_tlabs(); +- } +- +- if (ShenandoahPacing) { +- pacer()->setup_for_mark(); +- } +-} +- +-void ShenandoahHeap::op_mark() { +- concurrent_mark()->mark_from_roots(); +-} +- +-class ShenandoahFinalMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure { +-private: +- ShenandoahMarkingContext* const _ctx; +- ShenandoahHeapLock* const _lock; +- +-public: +- ShenandoahFinalMarkUpdateRegionStateClosure() : +- _ctx(ShenandoahHeap::heap()->complete_marking_context()), _lock(ShenandoahHeap::heap()->lock()) {} +- +- void heap_region_do(ShenandoahHeapRegion* r) { +- if (r->is_active()) { +- // All allocations past TAMS are implicitly live, adjust the region data. +- // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap. +- HeapWord *tams = _ctx->top_at_mark_start(r); +- HeapWord *top = r->top(); +- if (top > tams) { +- r->increase_live_data_alloc_words(pointer_delta(top, tams)); +- } +- +- // We are about to select the collection set, make sure it knows about +- // current pinning status. Also, this allows trashing more regions that +- // now have their pinning status dropped. +- if (r->is_pinned()) { +- if (r->pin_count() == 0) { +- ShenandoahHeapLocker locker(_lock); +- r->make_unpinned(); +- } +- } else { +- if (r->pin_count() > 0) { +- ShenandoahHeapLocker locker(_lock); +- r->make_pinned(); +- } +- } +- +- // Remember limit for updating refs. It's guaranteed that we get no +- // from-space-refs written from here on. +- r->set_update_watermark_at_safepoint(r->top()); +- } else { +- assert(!r->has_live(), +- err_msg("Region " SIZE_FORMAT " should have no live data", r->index())); +- assert(_ctx->top_at_mark_start(r) == r->top(), +- err_msg("Region " SIZE_FORMAT " should have correct TAMS", r->index())); +- } +- } +- +- bool is_thread_safe() { return true; } +-}; +- +-void ShenandoahHeap::op_final_mark() { +- assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint"); +- assert(!has_forwarded_objects(), "No forwarded objects on this path"); +- +- // It is critical that we +- // evacuate roots right after finishing marking, so that we don't +- // get unmarked objects in the roots. +- +- if (!cancelled_gc()) { +- concurrent_mark()->finish_mark_from_roots(/* full_gc = */ false); +- +- TASKQUEUE_STATS_ONLY(concurrent_mark()->task_queues()->reset_taskqueue_stats()); +- +- if (ShenandoahVerify) { +- verifier()->verify_roots_no_forwarded(); +- } +- +- TASKQUEUE_STATS_ONLY(concurrent_mark()->task_queues()->print_taskqueue_stats()); +- +- { +- ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_region_states); +- ShenandoahFinalMarkUpdateRegionStateClosure cl; +- parallel_heap_region_iterate(&cl); +- +- assert_pinned_region_status(); +- } +- +- // Force the threads to reacquire their TLABs outside the collection set. +- { +- ShenandoahGCPhase phase(ShenandoahPhaseTimings::retire_tlabs); +- make_parsable(true); +- } +- +- { +- ShenandoahGCPhase phase(ShenandoahPhaseTimings::choose_cset); +- ShenandoahHeapLocker locker(lock()); +- _collection_set->clear(); +- heuristics()->choose_collection_set(_collection_set); +- } +- +- { +- ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_rebuild_freeset); +- ShenandoahHeapLocker locker(lock()); +- _free_set->rebuild(); +- } +- +- // If collection set has candidates, start evacuation. +- // Otherwise, bypass the rest of the cycle. +- if (!collection_set()->is_empty()) { +- ShenandoahGCPhase init_evac(ShenandoahPhaseTimings::init_evac); +- +- if (ShenandoahVerify) { +- verifier()->verify_before_evacuation(); +- } +- +- set_evacuation_in_progress(true); +- // From here on, we need to update references. +- set_has_forwarded_objects(true); +- +- if (!is_degenerated_gc_in_progress()) { +- evacuate_and_update_roots(); +- } +- +- if (ShenandoahPacing) { +- pacer()->setup_for_evac(); +- } +- +- if (ShenandoahVerify) { +- verifier()->verify_roots_no_forwarded(); +- verifier()->verify_during_evacuation(); +- } +- } else { +- if (ShenandoahVerify) { +- verifier()->verify_after_concmark(); +- } +- +- if (VerifyAfterGC) { +- Universe::verify(); +- } +- } +- +- } else { +- concurrent_mark()->cancel(); +- complete_marking(); +- +- if (process_references()) { +- // Abandon reference processing right away: pre-cleaning must have failed. +- ReferenceProcessor *rp = ref_processor(); +- rp->disable_discovery(); +- rp->abandon_partial_discovery(); +- rp->verify_no_references_recorded(); +- } +- } +-} +- +-void ShenandoahHeap::op_conc_evac() { +- ShenandoahEvacuationTask task(this, _collection_set, true); +- workers()->run_task(&task); +-} +- +-void ShenandoahHeap::op_stw_evac() { +- ShenandoahEvacuationTask task(this, _collection_set, false); +- workers()->run_task(&task); +-} +- +-void ShenandoahHeap::op_updaterefs() { +- update_heap_references(true); +-} +- +-void ShenandoahHeap::op_cleanup_early() { +- free_set()->recycle_trash(); +-} +- +-void ShenandoahHeap::op_cleanup_complete() { +- free_set()->recycle_trash(); +-} +- +-class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure { +-private: +- ShenandoahMarkingContext* const _ctx; +-public: +- ShenandoahResetUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {} +- +- void heap_region_do(ShenandoahHeapRegion* r) { +- if (r->is_active()) { +- // Reset live data and set TAMS optimistically. We would recheck these under the pause +- // anyway to capture any updates that happened since now. +- r->clear_live_data(); +- _ctx->capture_top_at_mark_start(r); +- } +- } +- +- bool is_thread_safe() { return true; } +-}; +- +-void ShenandoahHeap::op_reset() { +- if (ShenandoahPacing) { +- pacer()->setup_for_reset(); +- } +- reset_mark_bitmap(); +- +- ShenandoahResetUpdateRegionStateClosure cl; +- parallel_heap_region_iterate(&cl); +-} +- +-void ShenandoahHeap::op_preclean() { +- if (ShenandoahPacing) { +- pacer()->setup_for_preclean(); +- } +- concurrent_mark()->preclean_weak_refs(); +-} +- +-void ShenandoahHeap::op_full(GCCause::Cause cause) { +- ShenandoahMetricsSnapshot metrics; +- metrics.snap_before(); +- +- full_gc()->do_it(cause); +- +- metrics.snap_after(); +- +- if (metrics.is_good_progress()) { +- _progress_last_gc.set(); +- } else { +- // Nothing to do. Tell the allocation path that we have failed to make +- // progress, and it can finally fail. +- _progress_last_gc.unset(); +- } +-} +- +-void ShenandoahHeap::op_degenerated(ShenandoahDegenPoint point) { +- // Degenerated GC is STW, but it can also fail. Current mechanics communicates +- // GC failure via cancelled_concgc() flag. So, if we detect the failure after +- // some phase, we have to upgrade the Degenerate GC to Full GC. +- +- clear_cancelled_gc(); +- +- ShenandoahMetricsSnapshot metrics; +- metrics.snap_before(); +- +- switch (point) { +- // The cases below form the Duff's-like device: it describes the actual GC cycle, +- // but enters it at different points, depending on which concurrent phase had +- // degenerated. +- +- case _degenerated_outside_cycle: +- // We have degenerated from outside the cycle, which means something is bad with +- // the heap, most probably heavy humongous fragmentation, or we are very low on free +- // space. It makes little sense to wait for Full GC to reclaim as much as it can, when +- // we can do the most aggressive degen cycle, which includes processing references and +- // class unloading, unless those features are explicitly disabled. +- // +- // Note that we can only do this for "outside-cycle" degens, otherwise we would risk +- // changing the cycle parameters mid-cycle during concurrent -> degenerated handover. +- set_process_references(heuristics()->can_process_references()); +- set_unload_classes(heuristics()->can_unload_classes()); +- +- if (_heap->process_references()) { +- ReferenceProcessor* rp = _heap->ref_processor(); +- rp->set_active_mt_degree(_heap->workers()->active_workers()); +- +- // enable ("weak") refs discovery +- rp->enable_discovery(true /*verify_no_refs*/, true); +- rp->setup_policy(_heap->collector_policy()->should_clear_all_soft_refs()); +- } +- +- op_reset(); +- +- op_init_mark(); +- if (cancelled_gc()) { +- op_degenerated_fail(); +- return; +- } +- +- case _degenerated_mark: +- op_final_mark(); +- if (cancelled_gc()) { +- op_degenerated_fail(); +- return; +- } +- +- op_cleanup_early(); +- +- case _degenerated_evac: +- // If heuristics thinks we should do the cycle, this flag would be set, +- // and we can do evacuation. Otherwise, it would be the shortcut cycle. +- if (is_evacuation_in_progress()) { +- +- // Degeneration under oom-evac protocol might have left some objects in +- // collection set un-evacuated. Restart evacuation from the beginning to +- // capture all objects. For all the objects that are already evacuated, +- // it would be a simple check, which is supposed to be fast. This is also +- // safe to do even without degeneration, as CSet iterator is at beginning +- // in preparation for evacuation anyway. +- // +- // Before doing that, we need to make sure we never had any cset-pinned +- // regions. This may happen if allocation failure happened when evacuating +- // the about-to-be-pinned object, oom-evac protocol left the object in +- // the collection set, and then the pin reached the cset region. If we continue +- // the cycle here, we would trash the cset and alive objects in it. To avoid +- // it, we fail degeneration right away and slide into Full GC to recover. +- +- { +- sync_pinned_region_status(); +- collection_set()->clear_current_index(); +- +- ShenandoahHeapRegion* r; +- while ((r = collection_set()->next()) != NULL) { +- if (r->is_pinned()) { +- cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc); +- op_degenerated_fail(); +- return; +- } +- } +- +- collection_set()->clear_current_index(); +- } +- +- op_stw_evac(); +- if (cancelled_gc()) { +- op_degenerated_fail(); +- return; +- } +- } +- +- // If heuristics thinks we should do the cycle, this flag would be set, +- // and we need to do update-refs. Otherwise, it would be the shortcut cycle. +- if (has_forwarded_objects()) { +- op_init_updaterefs(); +- if (cancelled_gc()) { +- op_degenerated_fail(); +- return; +- } +- } +- +- case _degenerated_updaterefs: +- if (has_forwarded_objects()) { +- op_final_updaterefs(); +- if (cancelled_gc()) { +- op_degenerated_fail(); +- return; +- } +- } +- +- op_cleanup_complete(); +- break; +- +- default: +- ShouldNotReachHere(); +- } +- +- if (ShenandoahVerify) { +- verifier()->verify_after_degenerated(); +- } +- +- if (VerifyAfterGC) { +- Universe::verify(); +- } +- +- metrics.snap_after(); +- +- // Check for futility and fail. There is no reason to do several back-to-back Degenerated cycles, +- // because that probably means the heap is overloaded and/or fragmented. +- if (!metrics.is_good_progress()) { +- _progress_last_gc.unset(); +- cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc); +- op_degenerated_futile(); +- } else { +- _progress_last_gc.set(); +- } +-} +- +-void ShenandoahHeap::op_degenerated_fail() { +- log_info(gc)("Cannot finish degeneration, upgrading to Full GC"); +- shenandoah_policy()->record_degenerated_upgrade_to_full(); +- op_full(GCCause::_shenandoah_upgrade_to_full_gc); +-} +- +-void ShenandoahHeap::op_degenerated_futile() { +- shenandoah_policy()->record_degenerated_upgrade_to_full(); +- op_full(GCCause::_shenandoah_upgrade_to_full_gc); +-} +- +-void ShenandoahHeap::complete_marking() { +- if (is_concurrent_mark_in_progress()) { +- set_concurrent_mark_in_progress(false); +- } +- +- if (!cancelled_gc()) { +- // If we needed to update refs, and concurrent marking has been cancelled, +- // we need to finish updating references. +- set_has_forwarded_objects(false); +- mark_complete_marking_context(); +- } +-} +- +-void ShenandoahHeap::force_satb_flush_all_threads() { +- if (!is_concurrent_mark_in_progress()) { +- // No need to flush SATBs +- return; +- } +- +- // Do not block if Threads lock is busy. This avoids the potential deadlock +- // when this code is called from the periodic task, and something else is +- // expecting the periodic task to complete without blocking. On the off-chance +- // Threads lock is busy momentarily, try to acquire several times. +- for (int t = 0; t < 10; t++) { +- if (Threads_lock->try_lock()) { +- JavaThread::set_force_satb_flush_all_threads(true); +- Threads_lock->unlock(); +- +- // The threads are not "acquiring" their thread-local data, but it does not +- // hurt to "release" the updates here anyway. +- OrderAccess::fence(); +- break; +- } +- os::naked_short_sleep(1); +- } +-} +- +-void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) { +- assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint"); +- _gc_state.set_cond(mask, value); +- JavaThread::set_gc_state_all_threads(_gc_state.raw_value()); +-} +- +-void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) { +- if (has_forwarded_objects()) { +- set_gc_state_mask(MARKING | UPDATEREFS, in_progress); +- } else { +- set_gc_state_mask(MARKING, in_progress); +- } +- JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress); +-} +- +-void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) { +- assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint"); +- set_gc_state_mask(EVACUATION, in_progress); +-} +- +-void ShenandoahHeap::ref_processing_init() { +- MemRegion mr = reserved_region(); +- +- assert(_max_workers > 0, "Sanity"); +- +- bool mt_processing = ParallelRefProcEnabled && (ParallelGCThreads > 1); +- bool mt_discovery = _max_workers > 1; +- +- _ref_processor = +- new ReferenceProcessor(mr, // span +- mt_processing, // MT processing +- _max_workers, // Degree of MT processing +- mt_discovery, // MT discovery +- _max_workers, // Degree of MT discovery +- false, // Reference discovery is not atomic +- NULL); // No closure, should be installed before use +- +- log_info(gc, init)("Reference processing: %s discovery, %s processing", +- mt_discovery ? "parallel" : "serial", +- mt_processing ? "parallel" : "serial"); +- +- shenandoah_assert_rp_isalive_not_installed(); +-} +- +-void ShenandoahHeap::acquire_pending_refs_lock() { +- _control_thread->slt()->manipulatePLL(SurrogateLockerThread::acquirePLL); +-} +- +-void ShenandoahHeap::release_pending_refs_lock() { +- _control_thread->slt()->manipulatePLL(SurrogateLockerThread::releaseAndNotifyPLL); +-} +- +-GCTracer* ShenandoahHeap::tracer() { +- return shenandoah_policy()->tracer(); +-} +- +-size_t ShenandoahHeap::tlab_used(Thread* thread) const { +- return _free_set->used(); +-} +- +-void ShenandoahHeap::cancel_gc(GCCause::Cause cause) { +- if (try_cancel_gc()) { +- FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause)); +- log_info(gc)("%s", msg.buffer()); +- Events::log(Thread::current(), "%s", msg.buffer()); +- } +-} +- +-uint ShenandoahHeap::max_workers() { +- return _max_workers; +-} +- +-void ShenandoahHeap::stop() { +- // The shutdown sequence should be able to terminate when GC is running. +- +- // Step 0. Notify policy to disable event recording. +- _shenandoah_policy->record_shutdown(); +- +- // Step 1. Notify control thread that we are in shutdown. +- // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown. +- // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below. +- _control_thread->prepare_for_graceful_shutdown(); +- +- // Step 2. Notify GC workers that we are cancelling GC. +- cancel_gc(GCCause::_shenandoah_stop_vm); +- +- // Step 3. Wait until GC worker exits normally. +- _control_thread->stop(); +- +- // Step 4. Stop String Dedup thread if it is active +- if (ShenandoahStringDedup::is_enabled()) { +- ShenandoahStringDedup::stop(); +- } +-} +- +-void ShenandoahHeap::unload_classes_and_cleanup_tables(bool full_gc) { +- assert(heuristics()->can_unload_classes(), "Class unloading should be enabled"); +- +- ShenandoahGCPhase root_phase(full_gc ? +- ShenandoahPhaseTimings::full_gc_purge : +- ShenandoahPhaseTimings::purge); +- +- ShenandoahIsAliveSelector alive; +- BoolObjectClosure* is_alive = alive.is_alive_closure(); +- +- // Cleaning of klasses depends on correct information from MetadataMarkOnStack. The CodeCache::mark_on_stack +- // part is too slow to be done serially, so it is handled during the ShenandoahParallelCleaning phase. +- // Defer the cleaning until we have complete on_stack data. +- MetadataOnStackMark md_on_stack(false /* Don't visit the code cache at this point */); +- +- bool purged_class; +- +- // Unload classes and purge SystemDictionary. +- { +- ShenandoahGCPhase phase(full_gc ? +- ShenandoahPhaseTimings::full_gc_purge_class_unload : +- ShenandoahPhaseTimings::purge_class_unload); +- purged_class = SystemDictionary::do_unloading(is_alive, +- false /* Defer klass cleaning */); +- } +- { +- ShenandoahGCPhase phase(full_gc ? +- ShenandoahPhaseTimings::full_gc_purge_par : +- ShenandoahPhaseTimings::purge_par); +- uint active = _workers->active_workers(); +- ShenandoahParallelCleaningTask unlink_task(is_alive, true, true, active, purged_class); +- _workers->run_task(&unlink_task); +- } +- +- { +- ShenandoahGCPhase phase(full_gc ? +- ShenandoahPhaseTimings::full_gc_purge_metadata : +- ShenandoahPhaseTimings::purge_metadata); +- ClassLoaderDataGraph::free_deallocate_lists(); +- } +- +- if (ShenandoahStringDedup::is_enabled()) { +- ShenandoahGCPhase phase(full_gc ? +- ShenandoahPhaseTimings::full_gc_purge_string_dedup : +- ShenandoahPhaseTimings::purge_string_dedup); +- ShenandoahStringDedup::parallel_cleanup(); +- } +- +- { +- ShenandoahGCPhase phase(full_gc ? +- ShenandoahPhaseTimings::full_gc_purge_cldg : +- ShenandoahPhaseTimings::purge_cldg); +- ClassLoaderDataGraph::purge(); +- } +-} +- +-void ShenandoahHeap::set_has_forwarded_objects(bool cond) { +- set_gc_state_mask(HAS_FORWARDED, cond); +-} +- +-void ShenandoahHeap::set_process_references(bool pr) { +- _process_references.set_cond(pr); +-} +- +-void ShenandoahHeap::set_unload_classes(bool uc) { +- _unload_classes.set_cond(uc); +-} +- +-bool ShenandoahHeap::process_references() const { +- return _process_references.is_set(); +-} +- +-bool ShenandoahHeap::unload_classes() const { +- return _unload_classes.is_set(); +-} +- +-address ShenandoahHeap::in_cset_fast_test_addr() { +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- assert(heap->collection_set() != NULL, "Sanity"); +- return (address) heap->collection_set()->biased_map_address(); +-} +- +-address ShenandoahHeap::cancelled_gc_addr() { +- return (address) ShenandoahHeap::heap()->_cancelled_gc.addr_of(); +-} +- +-address ShenandoahHeap::gc_state_addr() { +- return (address) ShenandoahHeap::heap()->_gc_state.addr_of(); +-} +- +-size_t ShenandoahHeap::conservative_max_heap_alignment() { +- size_t align = ShenandoahMaxRegionSize; +- if (UseLargePages) { +- align = MAX2(align, os::large_page_size()); +- } +- return align; +-} +- +-size_t ShenandoahHeap::bytes_allocated_since_gc_start() { +- return OrderAccess::load_acquire(&_bytes_allocated_since_gc_start); +-} +- +-void ShenandoahHeap::reset_bytes_allocated_since_gc_start() { +- OrderAccess::release_store_fence(&_bytes_allocated_since_gc_start, (size_t)0); +-} +- +-void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) { +- _degenerated_gc_in_progress.set_cond(in_progress); +-} +- +-void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) { +- _full_gc_in_progress.set_cond(in_progress); +-} +- +-void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) { +- assert (is_full_gc_in_progress(), "should be"); +- _full_gc_move_in_progress.set_cond(in_progress); +-} +- +-void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) { +- set_gc_state_mask(UPDATEREFS, in_progress); +-} +- +-void ShenandoahHeap::register_nmethod(nmethod* nm) { +- ShenandoahCodeRoots::add_nmethod(nm); +-} +- +-void ShenandoahHeap::unregister_nmethod(nmethod* nm) { +- ShenandoahCodeRoots::remove_nmethod(nm); +-} +- +-oop ShenandoahHeap::pin_object(JavaThread* thr, oop o) { +- heap_region_containing(o)->record_pin(); +- return o; +-} +- +-void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) { +- heap_region_containing(o)->record_unpin(); +-} +- +-void ShenandoahHeap::sync_pinned_region_status() { +- ShenandoahHeapLocker locker(lock()); +- +- for (size_t i = 0; i < num_regions(); i++) { +- ShenandoahHeapRegion *r = get_region(i); +- if (r->is_active()) { +- if (r->is_pinned()) { +- if (r->pin_count() == 0) { +- r->make_unpinned(); +- } +- } else { +- if (r->pin_count() > 0) { +- r->make_pinned(); +- } +- } +- } +- } +- +- assert_pinned_region_status(); +-} +- +-#ifdef ASSERT +-void ShenandoahHeap::assert_pinned_region_status() { +- for (size_t i = 0; i < num_regions(); i++) { +- ShenandoahHeapRegion* r = get_region(i); +- assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0), +- err_msg("Region " SIZE_FORMAT " pinning status is inconsistent", i)); +- } +-} +-#endif +- +-GCTimer* ShenandoahHeap::gc_timer() const { +- return _gc_timer; +-} +- +-#ifdef ASSERT +-void ShenandoahHeap::assert_gc_workers(uint nworkers) { +- assert(nworkers > 0 && nworkers <= max_workers(), "Sanity"); +- +- if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) { +- if (UseDynamicNumberOfGCThreads || +- (FLAG_IS_DEFAULT(ParallelGCThreads) && ForceDynamicNumberOfGCThreads)) { +- assert(nworkers <= ParallelGCThreads, "Cannot use more than it has"); +- } else { +- // Use ParallelGCThreads inside safepoints +- assert(nworkers == ParallelGCThreads, "Use ParalleGCThreads within safepoints"); +- } +- } else { +- if (UseDynamicNumberOfGCThreads || +- (FLAG_IS_DEFAULT(ConcGCThreads) && ForceDynamicNumberOfGCThreads)) { +- assert(nworkers <= ConcGCThreads, "Cannot use more than it has"); +- } else { +- // Use ConcGCThreads outside safepoints +- assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints"); +- } +- } +-} +-#endif +- +-ShenandoahVerifier* ShenandoahHeap::verifier() { +- guarantee(ShenandoahVerify, "Should be enabled"); +- assert (_verifier != NULL, "sanity"); +- return _verifier; +-} +- +-ShenandoahUpdateHeapRefsClosure::ShenandoahUpdateHeapRefsClosure() : +- _heap(ShenandoahHeap::heap()) {} +- +-class ShenandoahUpdateHeapRefsTask : public AbstractGangTask { +-private: +- ShenandoahHeap* _heap; +- ShenandoahRegionIterator* _regions; +- bool _concurrent; +- +-public: +- ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions, bool concurrent) : +- AbstractGangTask("Concurrent Update References Task"), +- _heap(ShenandoahHeap::heap()), +- _regions(regions), +- _concurrent(concurrent) { +- } +- +- void work(uint worker_id) { +- ShenandoahConcurrentWorkerSession worker_session(worker_id); +- ShenandoahUpdateHeapRefsClosure cl; +- ShenandoahHeapRegion* r = _regions->next(); +- ShenandoahMarkingContext* const ctx = _heap->complete_marking_context(); +- while (r != NULL) { +- HeapWord* update_watermark = r->get_update_watermark(); +- assert (update_watermark >= r->bottom(), "sanity"); +- if (r->is_active() && !r->is_cset()) { +- _heap->marked_object_oop_iterate(r, &cl, update_watermark); +- } +- if (ShenandoahPacing) { +- _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom())); +- } +- if (_heap->cancelled_gc()) { +- return; +- } +- r = _regions->next(); +- } +- } +-}; +- +-void ShenandoahHeap::update_heap_references(bool concurrent) { +- ShenandoahUpdateHeapRefsTask task(&_update_refs_iterator, concurrent); +- workers()->run_task(&task); +-} +- +-void ShenandoahHeap::op_init_updaterefs() { +- assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint"); +- +- set_evacuation_in_progress(false); +- +- if (ShenandoahVerify) { +- if (!is_degenerated_gc_in_progress()) { +- verifier()->verify_roots_no_forwarded_except(ShenandoahRootVerifier::ThreadRoots); +- } +- verifier()->verify_before_updaterefs(); +- } +- +- set_update_refs_in_progress(true); +- +- { +- ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_prepare); +- +- make_parsable(true); +- +- // Reset iterator. +- _update_refs_iterator.reset(); +- } +- +- if (ShenandoahPacing) { +- pacer()->setup_for_updaterefs(); +- } +-} +- +-class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure { +-private: +- ShenandoahHeapLock* const _lock; +- +-public: +- ShenandoahFinalUpdateRefsUpdateRegionStateClosure() : _lock(ShenandoahHeap::heap()->lock()) {} +- +- void heap_region_do(ShenandoahHeapRegion* r) { +- // Drop unnecessary "pinned" state from regions that does not have CP marks +- // anymore, as this would allow trashing them. +- +- if (r->is_active()) { +- if (r->is_pinned()) { +- if (r->pin_count() == 0) { +- ShenandoahHeapLocker locker(_lock); +- r->make_unpinned(); +- } +- } else { +- if (r->pin_count() > 0) { +- ShenandoahHeapLocker locker(_lock); +- r->make_pinned(); +- } +- } +- } +- } +- +- bool is_thread_safe() { return true; } +-}; +- +-void ShenandoahHeap::op_final_updaterefs() { +- assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint"); +- +- // Check if there is left-over work, and finish it +- if (_update_refs_iterator.has_next()) { +- ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_finish_work); +- +- // Finish updating references where we left off. +- clear_cancelled_gc(); +- update_heap_references(false); +- } +- +- // Clear cancelled GC, if set. On cancellation path, the block before would handle +- // everything. On degenerated paths, cancelled gc would not be set anyway. +- if (cancelled_gc()) { +- clear_cancelled_gc(); +- } +- assert(!cancelled_gc(), "Should have been done right before"); +- +- if (ShenandoahVerify && !is_degenerated_gc_in_progress()) { +- verifier()->verify_roots_no_forwarded_except(ShenandoahRootVerifier::ThreadRoots); +- } +- +- if (is_degenerated_gc_in_progress()) { +- concurrent_mark()->update_roots(ShenandoahPhaseTimings::degen_gc_update_roots); +- } else { +- concurrent_mark()->update_thread_roots(ShenandoahPhaseTimings::final_update_refs_roots); +- } +- +- // Has to be done before cset is clear +- if (ShenandoahVerify) { +- verifier()->verify_roots_in_to_space(); +- } +- +- { +- ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_trash_cset); +- trash_cset_regions(); +- } +- +- set_has_forwarded_objects(false); +- set_update_refs_in_progress(false); +- +- if (ShenandoahVerify) { +- verifier()->verify_after_updaterefs(); +- } +- +- if (VerifyAfterGC) { +- Universe::verify(); +- } +- +- { +- ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_update_region_states); +- ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl; +- parallel_heap_region_iterate(&cl); +- +- assert_pinned_region_status(); +- } +- +- { +- ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_rebuild_freeset); +- ShenandoahHeapLocker locker(lock()); +- _free_set->rebuild(); +- } +-} +- +-void ShenandoahHeap::print_extended_on(outputStream *st) const { +- print_on(st); +- print_heap_regions_on(st); +-} +- +-bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) { +- size_t slice = r->index() / _bitmap_regions_per_slice; +- +- size_t regions_from = _bitmap_regions_per_slice * slice; +- size_t regions_to = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1)); +- for (size_t g = regions_from; g < regions_to; g++) { +- assert (g / _bitmap_regions_per_slice == slice, "same slice"); +- if (skip_self && g == r->index()) continue; +- if (get_region(g)->is_committed()) { +- return true; +- } +- } +- return false; +-} +- +-bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) { +- shenandoah_assert_heaplocked(); +- +- // Bitmaps in special regions do not need commits +- if (_bitmap_region_special) { +- return true; +- } +- +- if (is_bitmap_slice_committed(r, true)) { +- // Some other region from the group is already committed, meaning the bitmap +- // slice is already committed, we exit right away. +- return true; +- } +- +- // Commit the bitmap slice: +- size_t slice = r->index() / _bitmap_regions_per_slice; +- size_t off = _bitmap_bytes_per_slice * slice; +- size_t len = _bitmap_bytes_per_slice; +- char* start = (char*) _bitmap_region.start() + off; +- +- if (!os::commit_memory(start, len, false)) { +- return false; +- } +- +- if (AlwaysPreTouch) { +- os::pretouch_memory(start, start + len); +- } +- +- return true; +-} +- +-bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) { +- shenandoah_assert_heaplocked(); +- +- // Bitmaps in special regions do not need uncommits +- if (_bitmap_region_special) { +- return true; +- } +- +- if (is_bitmap_slice_committed(r, true)) { +- // Some other region from the group is still committed, meaning the bitmap +- // slice is should stay committed, exit right away. +- return true; +- } +- +- // Uncommit the bitmap slice: +- size_t slice = r->index() / _bitmap_regions_per_slice; +- size_t off = _bitmap_bytes_per_slice * slice; +- size_t len = _bitmap_bytes_per_slice; +- if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) { +- return false; +- } +- return true; +-} +- +-void ShenandoahHeap::vmop_entry_init_mark() { +- TraceCollectorStats tcs(monitoring_support()->stw_collection_counters()); +- ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark_gross); +- +- try_inject_alloc_failure(); +- VM_ShenandoahInitMark op; +- VMThread::execute(&op); // jump to entry_init_mark() under safepoint +-} +- +-void ShenandoahHeap::vmop_entry_final_mark() { +- TraceCollectorStats tcs(monitoring_support()->stw_collection_counters()); +- ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark_gross); +- +- try_inject_alloc_failure(); +- VM_ShenandoahFinalMarkStartEvac op; +- VMThread::execute(&op); // jump to entry_final_mark under safepoint +-} +- +-void ShenandoahHeap::vmop_entry_init_updaterefs() { +- TraceCollectorStats tcs(monitoring_support()->stw_collection_counters()); +- ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_gross); +- +- try_inject_alloc_failure(); +- VM_ShenandoahInitUpdateRefs op; +- VMThread::execute(&op); +-} +- +-void ShenandoahHeap::vmop_entry_final_updaterefs() { +- TraceCollectorStats tcs(monitoring_support()->stw_collection_counters()); +- ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_gross); +- +- try_inject_alloc_failure(); +- VM_ShenandoahFinalUpdateRefs op; +- VMThread::execute(&op); +-} +- +-void ShenandoahHeap::vmop_entry_full(GCCause::Cause cause) { +- TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters()); +- ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_gross); +- +- try_inject_alloc_failure(); +- VM_ShenandoahFullGC op(cause); +- VMThread::execute(&op); +-} +- +-void ShenandoahHeap::vmop_degenerated(ShenandoahDegenPoint point) { +- TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters()); +- ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_gross); +- +- VM_ShenandoahDegeneratedGC degenerated_gc((int)point); +- VMThread::execute(°enerated_gc); +-} +- +-void ShenandoahHeap::entry_init_mark() { +- ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark); +- +- const char* msg = init_mark_event_message(); +- GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id()); +- EventMark em("%s", msg); +- +- ShenandoahWorkerScope scope(workers(), +- ShenandoahWorkerPolicy::calc_workers_for_init_marking(), +- "init marking"); +- +- op_init_mark(); +-} +- +-void ShenandoahHeap::entry_final_mark() { +- ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark); +- +- const char* msg = final_mark_event_message(); +- GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id()); +- EventMark em("%s", msg); +- +- ShenandoahWorkerScope scope(workers(), +- ShenandoahWorkerPolicy::calc_workers_for_final_marking(), +- "final marking"); +- +- op_final_mark(); +-} +- +-void ShenandoahHeap::entry_init_updaterefs() { +- ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs); +- +- static const char* msg = "Pause Init Update Refs"; +- GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id()); +- EventMark em("%s", msg); +- +- // No workers used in this phase, no setup required +- +- op_init_updaterefs(); +-} +- +-void ShenandoahHeap::entry_final_updaterefs() { +- ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs); +- +- static const char* msg = "Pause Final Update Refs"; +- GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id()); +- EventMark em("%s", msg); +- +- ShenandoahWorkerScope scope(workers(), +- ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(), +- "final reference update"); +- +- op_final_updaterefs(); +-} +- +-void ShenandoahHeap::entry_full(GCCause::Cause cause) { +- ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc); +- +- static const char* msg = "Pause Full"; +- GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id(), true); +- EventMark em("%s", msg); +- +- ShenandoahWorkerScope scope(workers(), +- ShenandoahWorkerPolicy::calc_workers_for_fullgc(), +- "full gc"); +- +- op_full(cause); +-} +- +-void ShenandoahHeap::entry_degenerated(int point) { +- ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc); +- +- ShenandoahDegenPoint dpoint = (ShenandoahDegenPoint)point; +- const char* msg = degen_event_message(dpoint); +- GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id(), true); +- EventMark em("%s", msg); +- +- ShenandoahWorkerScope scope(workers(), +- ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated(), +- "stw degenerated gc"); +- +- set_degenerated_gc_in_progress(true); +- op_degenerated(dpoint); +- set_degenerated_gc_in_progress(false); +-} +- +-void ShenandoahHeap::entry_mark() { +- TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters()); +- +- const char* msg = conc_mark_event_message(); +- GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id()); +- EventMark em("%s", msg); +- +- ShenandoahWorkerScope scope(workers(), +- ShenandoahWorkerPolicy::calc_workers_for_conc_marking(), +- "concurrent marking"); +- +- try_inject_alloc_failure(); +- op_mark(); +-} +- +-void ShenandoahHeap::entry_evac() { +- ShenandoahGCPhase conc_evac_phase(ShenandoahPhaseTimings::conc_evac); +- TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters()); +- +- static const char *msg = "Concurrent evacuation"; +- GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id()); +- EventMark em("%s", msg); +- +- ShenandoahWorkerScope scope(workers(), +- ShenandoahWorkerPolicy::calc_workers_for_conc_evac(), +- "concurrent evacuation"); +- +- try_inject_alloc_failure(); +- op_conc_evac(); +-} +- +-void ShenandoahHeap::entry_updaterefs() { +- ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_update_refs); +- +- static const char* msg = "Concurrent update references"; +- GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id()); +- EventMark em("%s", msg); +- +- ShenandoahWorkerScope scope(workers(), +- ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(), +- "concurrent reference update"); +- +- try_inject_alloc_failure(); +- op_updaterefs(); +-} +- +-void ShenandoahHeap::entry_cleanup_early() { +- ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup_early); +- +- static const char* msg = "Concurrent cleanup"; +- GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id(), true); +- EventMark em("%s", msg); +- +- // This phase does not use workers, no need for setup +- +- try_inject_alloc_failure(); +- op_cleanup_early(); +-} +- +-void ShenandoahHeap::entry_cleanup_complete() { +- ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup_complete); +- +- static const char* msg = "Concurrent cleanup"; +- GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id(), true); +- EventMark em("%s", msg); +- +- // This phase does not use workers, no need for setup +- +- try_inject_alloc_failure(); +- op_cleanup_complete(); +-} +- +-void ShenandoahHeap::entry_reset() { +- ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_reset); +- +- static const char* msg = "Concurrent reset"; +- GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id()); +- EventMark em("%s", msg); +- +- ShenandoahWorkerScope scope(workers(), +- ShenandoahWorkerPolicy::calc_workers_for_conc_reset(), +- "concurrent reset"); +- +- try_inject_alloc_failure(); +- op_reset(); +-} +- +-void ShenandoahHeap::entry_preclean() { +- if (ShenandoahPreclean && process_references()) { +- ShenandoahGCPhase conc_preclean(ShenandoahPhaseTimings::conc_preclean); +- +- static const char* msg = "Concurrent precleaning"; +- GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id()); +- EventMark em("%s", msg); +- +- ShenandoahWorkerScope scope(workers(), +- ShenandoahWorkerPolicy::calc_workers_for_conc_preclean(), +- "concurrent preclean", +- /* check_workers = */ false); +- +- try_inject_alloc_failure(); +- op_preclean(); +- } +-} +- +-void ShenandoahHeap::entry_uncommit(double shrink_before, size_t shrink_until) { +- static const char *msg = "Concurrent uncommit"; +- GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id(), true); +- EventMark em("%s", msg); +- +- ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_uncommit); +- +- op_uncommit(shrink_before, shrink_until); +-} +- +-void ShenandoahHeap::try_inject_alloc_failure() { +- if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) { +- _inject_alloc_failure.set(); +- os::naked_short_sleep(1); +- if (cancelled_gc()) { +- log_info(gc)("Allocation failure was successfully injected"); +- } +- } +-} +- +-bool ShenandoahHeap::should_inject_alloc_failure() { +- return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset(); +-} +- +-void ShenandoahHeap::enter_evacuation() { +- _oom_evac_handler.enter_evacuation(); +-} +- +-void ShenandoahHeap::leave_evacuation() { +- _oom_evac_handler.leave_evacuation(); +-} +- +-ShenandoahRegionIterator::ShenandoahRegionIterator() : +- _heap(ShenandoahHeap::heap()), +- _index(0) {} +- +-ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) : +- _heap(heap), +- _index(0) {} +- +-void ShenandoahRegionIterator::reset() { +- _index = 0; +-} +- +-bool ShenandoahRegionIterator::has_next() const { +- return _index < (jint)_heap->num_regions(); +-} +- +-char ShenandoahHeap::gc_state() { +- return _gc_state.raw_value(); +-} +- +-const char* ShenandoahHeap::init_mark_event_message() const { +- assert(!has_forwarded_objects(), "Should not have forwarded objects here"); +- +- bool proc_refs = process_references(); +- bool unload_cls = unload_classes(); +- +- if (proc_refs && unload_cls) { +- return "Pause Init Mark (process weakrefs) (unload classes)"; +- } else if (proc_refs) { +- return "Pause Init Mark (process weakrefs)"; +- } else if (unload_cls) { +- return "Pause Init Mark (unload classes)"; +- } else { +- return "Pause Init Mark"; +- } +-} +- +-const char* ShenandoahHeap::final_mark_event_message() const { +- assert(!has_forwarded_objects(), "Should not have forwarded objects here"); +- +- bool proc_refs = process_references(); +- bool unload_cls = unload_classes(); +- +- if (proc_refs && unload_cls) { +- return "Pause Final Mark (process weakrefs) (unload classes)"; +- } else if (proc_refs) { +- return "Pause Final Mark (process weakrefs)"; +- } else if (unload_cls) { +- return "Pause Final Mark (unload classes)"; +- } else { +- return "Pause Final Mark"; +- } +-} +- +-const char* ShenandoahHeap::conc_mark_event_message() const { +- assert(!has_forwarded_objects(), "Should not have forwarded objects here"); +- +- bool proc_refs = process_references(); +- bool unload_cls = unload_classes(); +- +- if (proc_refs && unload_cls) { +- return "Concurrent marking (process weakrefs) (unload classes)"; +- } else if (proc_refs) { +- return "Concurrent marking (process weakrefs)"; +- } else if (unload_cls) { +- return "Concurrent marking (unload classes)"; +- } else { +- return "Concurrent marking"; +- } +-} +- +-const char* ShenandoahHeap::degen_event_message(ShenandoahDegenPoint point) const { +- switch (point) { +- case _degenerated_unset: +- return "Pause Degenerated GC ()"; +- case _degenerated_outside_cycle: +- return "Pause Degenerated GC (Outside of Cycle)"; +- case _degenerated_mark: +- return "Pause Degenerated GC (Mark)"; +- case _degenerated_evac: +- return "Pause Degenerated GC (Evacuation)"; +- case _degenerated_updaterefs: +- return "Pause Degenerated GC (Update Refs)"; +- default: +- ShouldNotReachHere(); +- return "ERROR"; +- } +-} +- +-ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) { +-#ifdef ASSERT +- assert(_liveness_cache != NULL, "sanity"); +- assert(worker_id < _max_workers, "sanity"); +- for (uint i = 0; i < num_regions(); i++) { +- assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty"); +- } +-#endif +- return _liveness_cache[worker_id]; +-} +- +-void ShenandoahHeap::flush_liveness_cache(uint worker_id) { +- assert(worker_id < _max_workers, "sanity"); +- assert(_liveness_cache != NULL, "sanity"); +- ShenandoahLiveData* ld = _liveness_cache[worker_id]; +- for (uint i = 0; i < num_regions(); i++) { +- ShenandoahLiveData live = ld[i]; +- if (live > 0) { +- ShenandoahHeapRegion* r = get_region(i); +- r->increase_live_data_gc_words(live); +- ld[i] = 0; +- } +- } +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeap.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeap.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeap.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeap.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,670 +0,0 @@ +-/* +- * Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_HPP +- +-#include "gc_implementation/shared/markBitMap.hpp" +-#include "gc_implementation/shenandoah/shenandoahAsserts.hpp" +-#include "gc_implementation/shenandoah/shenandoahAllocRequest.hpp" +-#include "gc_implementation/shenandoah/shenandoahLock.hpp" +-#include "gc_implementation/shenandoah/shenandoahEvacOOMHandler.hpp" +-#include "gc_implementation/shenandoah/shenandoahPadding.hpp" +-#include "gc_implementation/shenandoah/shenandoahSharedVariables.hpp" +- +-class ConcurrentGCTimer; +- +-class ShenandoahCollectionSet; +-class ShenandoahCollectorPolicy; +-class ShenandoahConcurrentMark; +-class ShenandoahControlThread; +-class ShenandoahGCSession; +-class ShenandoahGCStateResetter; +-class ShenandoahFreeSet; +-class ShenandoahHeapRegion; +-class ShenandoahHeapRegionClosure; +-class ShenandoahMarkCompact; +-class ShenandoahMonitoringSupport; +-class ShenandoahHeuristics; +-class ShenandoahMarkingContext; +-class ShenandoahMode; +-class ShenandoahPhaseTimings; +-class ShenandoahPacer; +-class ShenandoahVerifier; +-class ShenandoahWorkGang; +-class VMStructs; +- +-// Used for buffering per-region liveness data. +-// Needed since ShenandoahHeapRegion uses atomics to update liveness. +-// The ShenandoahHeap array has max-workers elements, each of which is an array of +-// uint16_t * max_regions. The choice of uint16_t is not accidental: +-// there is a tradeoff between static/dynamic footprint that translates +-// into cache pressure (which is already high during marking), and +-// too many atomic updates. uint32_t is too large, uint8_t is too small. +-typedef uint16_t ShenandoahLiveData; +-#define SHENANDOAH_LIVEDATA_MAX ((ShenandoahLiveData)-1) +- +-class ShenandoahRegionIterator : public StackObj { +-private: +- ShenandoahHeap* _heap; +- +- shenandoah_padding(0); +- volatile jint _index; +- shenandoah_padding(1); +- +- // No implicit copying: iterators should be passed by reference to capture the state +- ShenandoahRegionIterator(const ShenandoahRegionIterator& that); +- ShenandoahRegionIterator& operator=(const ShenandoahRegionIterator& o); +- +-public: +- ShenandoahRegionIterator(); +- ShenandoahRegionIterator(ShenandoahHeap* heap); +- +- // Reset iterator to default state +- void reset(); +- +- // Returns next region, or NULL if there are no more regions. +- // This is multi-thread-safe. +- inline ShenandoahHeapRegion* next(); +- +- // This is *not* MT safe. However, in the absence of multithreaded access, it +- // can be used to determine if there is more work to do. +- bool has_next() const; +-}; +- +-class ShenandoahHeapRegionClosure : public StackObj { +-public: +- virtual void heap_region_do(ShenandoahHeapRegion* r) = 0; +- virtual bool is_thread_safe() { return false; } +-}; +- +-typedef ShenandoahLock ShenandoahHeapLock; +-typedef ShenandoahLocker ShenandoahHeapLocker; +- +-// Shenandoah GC is low-pause concurrent GC that uses Brooks forwarding pointers +-// to encode forwarding data. See BrooksPointer for details on forwarding data encoding. +-// See ShenandoahControlThread for GC cycle structure. +-// +-class ShenandoahHeap : public SharedHeap { +- friend class ShenandoahAsserts; +- friend class VMStructs; +- friend class ShenandoahGCSession; +- friend class ShenandoahGCStateResetter; +- friend class ShenandoahSafepoint; +- +-// ---------- Locks that guard important data structures in Heap +-// +-private: +- ShenandoahHeapLock _lock; +- +-public: +- ShenandoahHeapLock* lock() { +- return &_lock; +- } +- +-// ---------- Initialization, termination, identification, printing routines +-// +-private: +- static ShenandoahHeap* _heap; +- +-public: +- static ShenandoahHeap* heap(); +- static size_t conservative_max_heap_alignment(); +- +- const char* name() const { return "Shenandoah"; } +- ShenandoahHeap::Name kind() const { return CollectedHeap::ShenandoahHeap; } +- +- ShenandoahHeap(ShenandoahCollectorPolicy* policy); +- jint initialize(); +- void post_initialize(); +- void initialize_heuristics(); +- +- void print_on(outputStream* st) const; +- void print_extended_on(outputStream *st) const; +- void print_tracing_info() const; +- void print_gc_threads_on(outputStream* st) const; +- void print_heap_regions_on(outputStream* st) const; +- +- void stop(); +- +- void prepare_for_verify(); +- void verify(bool silent, VerifyOption vo); +- +-// ---------- Heap counters and metrics +-// +-private: +- size_t _initial_size; +- size_t _minimum_size; +- volatile size_t _soft_max_size; +- shenandoah_padding(0); +- volatile jlong _used; +- volatile size_t _committed; +- volatile jlong _bytes_allocated_since_gc_start; +- shenandoah_padding(1); +- +-public: +- void increase_used(size_t bytes); +- void decrease_used(size_t bytes); +- void set_used(size_t bytes); +- +- void increase_committed(size_t bytes); +- void decrease_committed(size_t bytes); +- void increase_allocated(size_t bytes); +- +- size_t bytes_allocated_since_gc_start(); +- void reset_bytes_allocated_since_gc_start(); +- +- size_t min_capacity() const; +- size_t max_capacity() const; +- size_t soft_max_capacity() const; +- size_t initial_capacity() const; +- size_t capacity() const; +- size_t used() const; +- size_t committed() const; +- +- void set_soft_max_capacity(size_t v); +- +-// ---------- Workers handling +-// +-private: +- uint _max_workers; +- +-public: +- uint max_workers(); +- void assert_gc_workers(uint nworker) NOT_DEBUG_RETURN; +- +- ShenandoahWorkGang* workers() const; +- +- void gc_threads_do(ThreadClosure* tcl) const; +- +-// ---------- Heap regions handling machinery +-// +-private: +- MemRegion _heap_region; +- bool _heap_region_special; +- size_t _num_regions; +- ShenandoahHeapRegion** _regions; +- ShenandoahRegionIterator _update_refs_iterator; +- +-public: +- inline size_t num_regions() const { return _num_regions; } +- inline bool is_heap_region_special() { return _heap_region_special; } +- +- inline ShenandoahHeapRegion* const heap_region_containing(const void* addr) const; +- inline size_t heap_region_index_containing(const void* addr) const; +- +- inline ShenandoahHeapRegion* const get_region(size_t region_idx) const; +- +- void heap_region_iterate(ShenandoahHeapRegionClosure* blk) const; +- void parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const; +- +-// ---------- GC state machinery +-// +-// GC state describes the important parts of collector state, that may be +-// used to make barrier selection decisions in the native and generated code. +-// Multiple bits can be set at once. +-// +-// Important invariant: when GC state is zero, the heap is stable, and no barriers +-// are required. +-// +-public: +- enum GCStateBitPos { +- // Heap has forwarded objects: need RB, ACMP, CAS barriers. +- HAS_FORWARDED_BITPOS = 0, +- +- // Heap is under marking: needs SATB barriers. +- MARKING_BITPOS = 1, +- +- // Heap is under evacuation: needs WB barriers. (Set together with UNSTABLE) +- EVACUATION_BITPOS = 2, +- +- // Heap is under updating: needs SVRB/SVWB barriers. +- UPDATEREFS_BITPOS = 3, +- }; +- +- enum GCState { +- STABLE = 0, +- HAS_FORWARDED = 1 << HAS_FORWARDED_BITPOS, +- MARKING = 1 << MARKING_BITPOS, +- EVACUATION = 1 << EVACUATION_BITPOS, +- UPDATEREFS = 1 << UPDATEREFS_BITPOS, +- }; +- +-private: +- ShenandoahSharedBitmap _gc_state; +- ShenandoahSharedFlag _degenerated_gc_in_progress; +- ShenandoahSharedFlag _full_gc_in_progress; +- ShenandoahSharedFlag _full_gc_move_in_progress; +- ShenandoahSharedFlag _progress_last_gc; +- +- void set_gc_state_mask(uint mask, bool value); +- +-public: +- char gc_state(); +- static address gc_state_addr(); +- +- void set_concurrent_mark_in_progress(bool in_progress); +- void set_evacuation_in_progress(bool in_progress); +- void set_update_refs_in_progress(bool in_progress); +- void set_degenerated_gc_in_progress(bool in_progress); +- void set_full_gc_in_progress(bool in_progress); +- void set_full_gc_move_in_progress(bool in_progress); +- void set_has_forwarded_objects(bool cond); +- +- inline bool is_stable() const; +- inline bool is_idle() const; +- inline bool is_concurrent_mark_in_progress() const; +- inline bool is_update_refs_in_progress() const; +- inline bool is_evacuation_in_progress() const; +- inline bool is_degenerated_gc_in_progress() const; +- inline bool is_full_gc_in_progress() const; +- inline bool is_full_gc_move_in_progress() const; +- inline bool has_forwarded_objects() const; +- inline bool is_gc_in_progress_mask(uint mask) const; +- +-// ---------- GC cancellation and degeneration machinery +-// +-// Cancelled GC flag is used to notify concurrent phases that they should terminate. +-// +-public: +- enum ShenandoahDegenPoint { +- _degenerated_unset, +- _degenerated_outside_cycle, +- _degenerated_mark, +- _degenerated_evac, +- _degenerated_updaterefs, +- _DEGENERATED_LIMIT +- }; +- +- static const char* degen_point_to_string(ShenandoahDegenPoint point) { +- switch (point) { +- case _degenerated_unset: +- return ""; +- case _degenerated_outside_cycle: +- return "Outside of Cycle"; +- case _degenerated_mark: +- return "Mark"; +- case _degenerated_evac: +- return "Evacuation"; +- case _degenerated_updaterefs: +- return "Update Refs"; +- default: +- ShouldNotReachHere(); +- return "ERROR"; +- } +- }; +- +-private: +- ShenandoahSharedFlag _cancelled_gc; +- inline bool try_cancel_gc(); +- +-public: +- static address cancelled_gc_addr(); +- +- inline bool cancelled_gc() const; +- +- inline void clear_cancelled_gc(); +- +- void cancel_gc(GCCause::Cause cause); +- +-// ---------- GC operations entry points +-// +-public: +- // Entry points to STW GC operations, these cause a related safepoint, that then +- // call the entry method below +- void vmop_entry_init_mark(); +- void vmop_entry_final_mark(); +- void vmop_entry_init_updaterefs(); +- void vmop_entry_final_updaterefs(); +- void vmop_entry_full(GCCause::Cause cause); +- void vmop_degenerated(ShenandoahDegenPoint point); +- +- // Entry methods to normally STW GC operations. These set up logging, monitoring +- // and workers for net VM operation +- void entry_init_mark(); +- void entry_final_mark(); +- void entry_init_updaterefs(); +- void entry_final_updaterefs(); +- void entry_full(GCCause::Cause cause); +- void entry_degenerated(int point); +- +- // Entry methods to normally concurrent GC operations. These set up logging, monitoring +- // for concurrent operation. +- void entry_reset(); +- void entry_mark(); +- void entry_preclean(); +- void entry_cleanup_early(); +- void entry_evac(); +- void entry_updaterefs(); +- void entry_cleanup_complete(); +- void entry_uncommit(double shrink_before, size_t shrink_until); +- +-private: +- // Actual work for the phases +- void op_init_mark(); +- void op_final_mark(); +- void op_init_updaterefs(); +- void op_final_updaterefs(); +- void op_full(GCCause::Cause cause); +- void op_degenerated(ShenandoahDegenPoint point); +- void op_degenerated_fail(); +- void op_degenerated_futile(); +- +- void op_reset(); +- void op_mark(); +- void op_preclean(); +- void op_cleanup_early(); +- void op_conc_evac(); +- void op_stw_evac(); +- void op_updaterefs(); +- void op_cleanup_complete(); +- void op_uncommit(double shrink_before, size_t shrink_until); +- +- // Messages for GC trace event, they have to be immortal for +- // passing around the logging/tracing systems +- const char* init_mark_event_message() const; +- const char* final_mark_event_message() const; +- const char* conc_mark_event_message() const; +- const char* degen_event_message(ShenandoahDegenPoint point) const; +- +-// ---------- GC subsystems +-// +-private: +- ShenandoahControlThread* _control_thread; +- ShenandoahCollectorPolicy* _shenandoah_policy; +- ShenandoahMode* _gc_mode; +- ShenandoahHeuristics* _heuristics; +- ShenandoahFreeSet* _free_set; +- ShenandoahConcurrentMark* _scm; +- ShenandoahMarkCompact* _full_gc; +- ShenandoahPacer* _pacer; +- ShenandoahVerifier* _verifier; +- +- ShenandoahPhaseTimings* _phase_timings; +- +- ShenandoahControlThread* control_thread() { return _control_thread; } +- ShenandoahMarkCompact* full_gc() { return _full_gc; } +- +-public: +- ShenandoahCollectorPolicy* shenandoah_policy() const { return _shenandoah_policy; } +- ShenandoahHeuristics* heuristics() const { return _heuristics; } +- ShenandoahFreeSet* free_set() const { return _free_set; } +- ShenandoahConcurrentMark* concurrent_mark() { return _scm; } +- ShenandoahPacer* pacer() const { return _pacer; } +- +- ShenandoahPhaseTimings* phase_timings() const { return _phase_timings; } +- +- ShenandoahVerifier* verifier(); +- +-// ---------- VM subsystem bindings +-// +-private: +- ShenandoahMonitoringSupport* _monitoring_support; +- ConcurrentGCTimer* _gc_timer; +- +-public: +- ShenandoahMonitoringSupport* monitoring_support() { return _monitoring_support; } +- +- GCTracer* tracer(); +- GCTimer* gc_timer() const; +- CollectorPolicy* collector_policy() const; +- +-// ---------- Reference processing +-// +-private: +- ReferenceProcessor* _ref_processor; +- ShenandoahSharedFlag _process_references; +- +- void ref_processing_init(); +- +-public: +- ReferenceProcessor* ref_processor() { return _ref_processor;} +- void set_process_references(bool pr); +- bool process_references() const; +- +-// ---------- Class Unloading +-// +-private: +- ShenandoahSharedFlag _unload_classes; +- +-public: +- void set_unload_classes(bool uc); +- bool unload_classes() const; +- +- // Delete entries for dead interned string and clean up unreferenced symbols +- // in symbol table, possibly in parallel. +- void unload_classes_and_cleanup_tables(bool full_gc); +- +-// ---------- Generic interface hooks +-// Minor things that super-interface expects us to implement to play nice with +-// the rest of runtime. Some of the things here are not required to be implemented, +-// and can be stubbed out. +-// +-public: +- AdaptiveSizePolicy* size_policy() shenandoah_not_implemented_return(NULL); +- bool is_maximal_no_gc() const shenandoah_not_implemented_return(false); +- +- bool is_in(const void* p) const; +- +- // All objects can potentially move +- bool is_scavengable(const void* addr) { return true; } +- +- void collect(GCCause::Cause cause); +- void do_full_collection(bool clear_all_soft_refs); +- +- // Used for parsing heap during error printing +- HeapWord* block_start(const void* addr) const; +- size_t block_size(const HeapWord* addr) const; +- bool block_is_obj(const HeapWord* addr) const; +- +- // Used for native heap walkers: heap dumpers, mostly +- void object_iterate(ObjectClosure* cl); +- void safe_object_iterate(ObjectClosure* cl); +- void space_iterate(SpaceClosure* scl) shenandoah_not_implemented; +- void oop_iterate(ExtendedOopClosure* cl); +- Space* space_containing(const void* oop) const shenandoah_not_implemented_return(NULL); +- +- // Used by RMI +- jlong millis_since_last_gc(); +- +- bool can_elide_tlab_store_barriers() const { return true; } +- oop new_store_pre_barrier(JavaThread* thread, oop new_obj) { return new_obj; } +- bool can_elide_initializing_store_barrier(oop new_obj) { return true; } +- bool card_mark_must_follow_store() const { return false; } +- +- bool is_in_partial_collection(const void* p) shenandoah_not_implemented_return(false); +- bool supports_heap_inspection() const { return true; } +- +- void gc_prologue(bool b); +- void gc_epilogue(bool b); +- +- void acquire_pending_refs_lock(); +- void release_pending_refs_lock(); +- +-// ---------- Code roots handling hooks +-// +-public: +- void register_nmethod(nmethod* nm); +- void unregister_nmethod(nmethod* nm); +- +-// ---------- Pinning hooks +-// +-public: +- // Shenandoah supports per-object (per-region) pinning +- bool supports_object_pinning() const { return true; } +- +- oop pin_object(JavaThread* thread, oop obj); +- void unpin_object(JavaThread* thread, oop obj); +- +- void sync_pinned_region_status(); +- void assert_pinned_region_status() NOT_DEBUG_RETURN; +- +-// ---------- Allocation support +-// +-private: +- HeapWord* allocate_memory_under_lock(ShenandoahAllocRequest& request, bool& in_new_region); +- inline HeapWord* allocate_from_gclab(Thread* thread, size_t size); +- HeapWord* allocate_from_gclab_slow(Thread* thread, size_t size); +- HeapWord* allocate_new_gclab(size_t min_size, size_t word_size, size_t* actual_size); +- +-public: +- HeapWord* allocate_memory(ShenandoahAllocRequest& request); +- HeapWord* mem_allocate(size_t size, bool* what); +- +- void notify_mutator_alloc_words(size_t words, bool waste); +- +- // Shenandoah supports TLAB allocation +- bool supports_tlab_allocation() const { return true; } +- +- HeapWord* allocate_new_tlab(size_t word_size); +- size_t tlab_capacity(Thread *thr) const; +- size_t unsafe_max_tlab_alloc(Thread *thread) const; +- size_t max_tlab_size() const; +- size_t tlab_used(Thread* ignored) const; +- +- void resize_tlabs(); +- void resize_all_tlabs(); +- +- void accumulate_statistics_tlabs(); +- void accumulate_statistics_all_gclabs(); +- +- void make_parsable(bool retire_tlabs); +- void ensure_parsability(bool retire_tlabs); +- +-// ---------- Marking support +-// +-private: +- ShenandoahMarkingContext* _marking_context; +- MemRegion _bitmap_region; +- MemRegion _aux_bitmap_region; +- MarkBitMap _verification_bit_map; +- MarkBitMap _aux_bit_map; +- +- size_t _bitmap_size; +- size_t _bitmap_regions_per_slice; +- size_t _bitmap_bytes_per_slice; +- +- size_t _pretouch_heap_page_size; +- size_t _pretouch_bitmap_page_size; +- +- bool _bitmap_region_special; +- bool _aux_bitmap_region_special; +- +- ShenandoahLiveData** _liveness_cache; +- +-public: +- inline ShenandoahMarkingContext* complete_marking_context() const; +- inline ShenandoahMarkingContext* marking_context() const; +- inline void mark_complete_marking_context(); +- inline void mark_incomplete_marking_context(); +- +- template +- inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl); +- +- template +- inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit); +- +- template +- inline void marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit); +- +- void reset_mark_bitmap(); +- +- // SATB barriers hooks +- inline bool requires_marking(const void* entry) const; +- void force_satb_flush_all_threads(); +- +- // Support for bitmap uncommits +- bool commit_bitmap_slice(ShenandoahHeapRegion *r); +- bool uncommit_bitmap_slice(ShenandoahHeapRegion *r); +- bool is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self = false); +- +- // Liveness caching support +- ShenandoahLiveData* get_liveness_cache(uint worker_id); +- void flush_liveness_cache(uint worker_id); +- +- size_t pretouch_heap_page_size() { return _pretouch_heap_page_size; } +- +-// ---------- Evacuation support +-// +-private: +- ShenandoahCollectionSet* _collection_set; +- ShenandoahEvacOOMHandler _oom_evac_handler; +- +- void evacuate_and_update_roots(); +- +-public: +- static address in_cset_fast_test_addr(); +- +- ShenandoahCollectionSet* collection_set() const { return _collection_set; } +- +- // Checks if object is in the collection set. +- inline bool in_collection_set(oop obj) const; +- +- // Checks if location is in the collection set. Can be interior pointer, not the oop itself. +- inline bool in_collection_set_loc(void* loc) const; +- +- // Evacuates object src. Returns the evacuated object, either evacuated +- // by this thread, or by some other thread. +- inline oop evacuate_object(oop src, Thread* thread); +- +- // Call before/after evacuation. +- void enter_evacuation(); +- void leave_evacuation(); +- +-// ---------- Helper functions +-// +-public: +- template +- inline oop evac_update_with_forwarded(T* p); +- +- template +- inline oop maybe_update_with_forwarded(T* p); +- +- template +- inline oop maybe_update_with_forwarded_not_null(T* p, oop obj); +- +- template +- inline oop update_with_forwarded_not_null(T* p, oop obj); +- +- static inline oop cas_oop(oop n, narrowOop* addr, oop c); +- static inline oop cas_oop(oop n, oop* addr, oop c); +- static inline oop cas_oop(oop n, narrowOop* addr, narrowOop c); +- +- void trash_humongous_region_at(ShenandoahHeapRegion *r); +- +- void complete_marking(); +- +-private: +- void trash_cset_regions(); +- void update_heap_references(bool concurrent); +- +-// ---------- Testing helpers functions +-// +-private: +- ShenandoahSharedFlag _inject_alloc_failure; +- +- void try_inject_alloc_failure(); +- bool should_inject_alloc_failure(); +-}; +- +-#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeap.inline.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeap.inline.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeap.inline.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeap.inline.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,501 +0,0 @@ +-/* +- * Copyright (c) 2015, 2020, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP +- +-#include "gc_implementation/shared/markBitMap.inline.hpp" +-#include "memory/threadLocalAllocBuffer.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahAsserts.hpp" +-#include "gc_implementation/shenandoah/shenandoahBarrierSet.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahCollectionSet.hpp" +-#include "gc_implementation/shenandoah/shenandoahCollectionSet.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahForwarding.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahControlThread.hpp" +-#include "gc_implementation/shenandoah/shenandoahMarkingContext.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegionSet.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegion.inline.hpp" +-#include "oops/oop.inline.hpp" +-#include "runtime/atomic.hpp" +-#include "runtime/prefetch.hpp" +-#include "runtime/prefetch.inline.hpp" +-#include "utilities/copy.hpp" +-#include "utilities/globalDefinitions.hpp" +- +-inline ShenandoahHeap* ShenandoahHeap::heap() { +- assert(_heap != NULL, "Heap is not initialized yet"); +- return _heap; +-} +- +-inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() { +- size_t new_index = Atomic::add((size_t) 1, &_index); +- // get_region() provides the bounds-check and returns NULL on OOB. +- return _heap->get_region(new_index - 1); +-} +- +-inline bool ShenandoahHeap::has_forwarded_objects() const { +- return _gc_state.is_set(HAS_FORWARDED); +-} +- +-inline ShenandoahWorkGang* ShenandoahHeap::workers() const { +- return (ShenandoahWorkGang*)_workers; +-} +- +-inline size_t ShenandoahHeap::heap_region_index_containing(const void* addr) const { +- uintptr_t region_start = ((uintptr_t) addr); +- uintptr_t index = (region_start - (uintptr_t) base()) >> ShenandoahHeapRegion::region_size_bytes_shift(); +- assert(index < num_regions(), err_msg("Region index is in bounds: " PTR_FORMAT, p2i(addr))); +- return index; +-} +- +-inline ShenandoahHeapRegion* const ShenandoahHeap::heap_region_containing(const void* addr) const { +- size_t index = heap_region_index_containing(addr); +- ShenandoahHeapRegion* const result = get_region(index); +- assert(addr >= result->bottom() && addr < result->end(), err_msg("Heap region contains the address: " PTR_FORMAT, p2i(addr))); +- return result; +-} +- +-template +-inline oop ShenandoahHeap::update_with_forwarded_not_null(T* p, oop obj) { +- if (in_collection_set(obj)) { +- shenandoah_assert_forwarded_except(p, obj, is_full_gc_in_progress() || cancelled_gc() || is_degenerated_gc_in_progress()); +- obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); +- oopDesc::encode_store_heap_oop(p, obj); +- } +-#ifdef ASSERT +- else { +- shenandoah_assert_not_forwarded(p, obj); +- } +-#endif +- return obj; +-} +- +-template +-inline oop ShenandoahHeap::maybe_update_with_forwarded(T* p) { +- T o = oopDesc::load_heap_oop(p); +- if (! oopDesc::is_null(o)) { +- oop obj = oopDesc::decode_heap_oop_not_null(o); +- return maybe_update_with_forwarded_not_null(p, obj); +- } else { +- return NULL; +- } +-} +- +-template +-inline oop ShenandoahHeap::evac_update_with_forwarded(T* p) { +- T o = oopDesc::load_heap_oop(p); +- if (!oopDesc::is_null(o)) { +- oop heap_oop = oopDesc::decode_heap_oop_not_null(o); +- if (in_collection_set(heap_oop)) { +- oop forwarded_oop = ShenandoahBarrierSet::resolve_forwarded_not_null(heap_oop); +- if (forwarded_oop == heap_oop) { +- forwarded_oop = evacuate_object(heap_oop, Thread::current()); +- } +- oop prev = cas_oop(forwarded_oop, p, heap_oop); +- if (prev == heap_oop) { +- return forwarded_oop; +- } else { +- return NULL; +- } +- } +- return heap_oop; +- } else { +- return NULL; +- } +-} +- +-inline oop ShenandoahHeap::cas_oop(oop n, oop* addr, oop c) { +- assert(is_ptr_aligned(addr, sizeof(narrowOop)), err_msg("Address should be aligned: " PTR_FORMAT, p2i(addr))); +- return (oop) Atomic::cmpxchg_ptr(n, addr, c); +-} +- +-inline oop ShenandoahHeap::cas_oop(oop n, narrowOop* addr, narrowOop c) { +- narrowOop val = oopDesc::encode_heap_oop(n); +- return oopDesc::decode_heap_oop((narrowOop) Atomic::cmpxchg(val, addr, c)); +-} +- +-inline oop ShenandoahHeap::cas_oop(oop n, narrowOop* addr, oop c) { +- assert(is_ptr_aligned(addr, sizeof(narrowOop)), err_msg("Address should be aligned: " PTR_FORMAT, p2i(addr))); +- narrowOop cmp = oopDesc::encode_heap_oop(c); +- narrowOop val = oopDesc::encode_heap_oop(n); +- return oopDesc::decode_heap_oop((narrowOop) Atomic::cmpxchg(val, addr, cmp)); +-} +- +-template +-inline oop ShenandoahHeap::maybe_update_with_forwarded_not_null(T* p, oop heap_oop) { +- shenandoah_assert_not_in_cset_loc_except(p, !is_in(p) || is_full_gc_in_progress() || is_degenerated_gc_in_progress()); +- shenandoah_assert_correct(p, heap_oop); +- +- if (in_collection_set(heap_oop)) { +- oop forwarded_oop = ShenandoahBarrierSet::resolve_forwarded_not_null(heap_oop); +- +- shenandoah_assert_forwarded_except(p, heap_oop, is_full_gc_in_progress() || is_degenerated_gc_in_progress()); +- shenandoah_assert_not_forwarded(p, forwarded_oop); +- shenandoah_assert_not_in_cset_except(p, forwarded_oop, cancelled_gc()); +- +- // If this fails, another thread wrote to p before us, it will be logged in SATB and the +- // reference be updated later. +- oop witness = cas_oop(forwarded_oop, p, heap_oop); +- +- if (witness != heap_oop) { +- // CAS failed, someone had beat us to it. Normally, we would return the failure witness, +- // because that would be the proper write of to-space object, enforced by strong barriers. +- // However, there is a corner case with arraycopy. It can happen that a Java thread +- // beats us with an arraycopy, which first copies the array, which potentially contains +- // from-space refs, and only afterwards updates all from-space refs to to-space refs, +- // which leaves a short window where the new array elements can be from-space. +- // In this case, we can just resolve the result again. As we resolve, we need to consider +- // the contended write might have been NULL. +- oop result = ShenandoahBarrierSet::resolve_forwarded(witness); +- shenandoah_assert_not_forwarded_except(p, result, (result == NULL)); +- shenandoah_assert_not_in_cset_except(p, result, (result == NULL) || cancelled_gc()); +- return result; +- } else { +- // Success! We have updated with known to-space copy. We have already asserted it is sane. +- return forwarded_oop; +- } +- } else { +- shenandoah_assert_not_forwarded(p, heap_oop); +- return heap_oop; +- } +-} +- +-inline bool ShenandoahHeap::cancelled_gc() const { +- return _cancelled_gc.is_set(); +-} +- +-inline bool ShenandoahHeap::try_cancel_gc() { +- return _cancelled_gc.try_set(); +-} +- +-inline void ShenandoahHeap::clear_cancelled_gc() { +- _cancelled_gc.unset(); +- _oom_evac_handler.clear(); +-} +- +-inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size) { +- assert(UseTLAB, "TLABs should be enabled"); +- +- if (!thread->gclab().is_initialized()) { +- assert(!thread->is_Java_thread() && !thread->is_Worker_thread(), +- err_msg("Performance: thread should have GCLAB: %s", thread->name())); +- // No GCLABs in this thread, fallback to shared allocation +- return NULL; +- } +- HeapWord *obj = thread->gclab().allocate(size); +- if (obj != NULL) { +- return obj; +- } +- // Otherwise... +- return allocate_from_gclab_slow(thread, size); +-} +- +-inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) { +- if (Thread::current()->is_oom_during_evac()) { +- // This thread went through the OOM during evac protocol and it is safe to return +- // the forward pointer. It must not attempt to evacuate any more. +- return ShenandoahBarrierSet::resolve_forwarded(p); +- } +- +- assert(thread->is_evac_allowed(), "must be enclosed in in oom-evac scope"); +- +- size_t size = p->size(); +- +- assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects"); +- +- bool alloc_from_gclab = true; +- HeapWord* copy = NULL; +- +-#ifdef ASSERT +- if (ShenandoahOOMDuringEvacALot && +- (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call +- copy = NULL; +- } else { +-#endif +- if (UseTLAB) { +- copy = allocate_from_gclab(thread, size); +- } +- if (copy == NULL) { +- ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size); +- copy = allocate_memory(req); +- alloc_from_gclab = false; +- } +-#ifdef ASSERT +- } +-#endif +- +- if (copy == NULL) { +- control_thread()->handle_alloc_failure_evac(size); +- +- _oom_evac_handler.handle_out_of_memory_during_evacuation(); +- +- return ShenandoahBarrierSet::resolve_forwarded(p); +- } +- +- // Copy the object: +- Copy::aligned_disjoint_words((HeapWord*) p, copy, size); +- +- // Try to install the new forwarding pointer. +- oop copy_val = oop(copy); +- oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val); +- if (result == copy_val) { +- // Successfully evacuated. Our copy is now the public one! +- shenandoah_assert_correct(NULL, copy_val); +- return copy_val; +- } else { +- // Failed to evacuate. We need to deal with the object that is left behind. Since this +- // new allocation is certainly after TAMS, it will be considered live in the next cycle. +- // But if it happens to contain references to evacuated regions, those references would +- // not get updated for this stale copy during this cycle, and we will crash while scanning +- // it the next cycle. +- // +- // For GCLAB allocations, it is enough to rollback the allocation ptr. Either the next +- // object will overwrite this stale copy, or the filler object on LAB retirement will +- // do this. For non-GCLAB allocations, we have no way to retract the allocation, and +- // have to explicitly overwrite the copy with the filler object. With that overwrite, +- // we have to keep the fwdptr initialized and pointing to our (stale) copy. +- if (alloc_from_gclab) { +- thread->gclab().rollback(size); +- } else { +- fill_with_object(copy, size); +- shenandoah_assert_correct(NULL, copy_val); +- } +- shenandoah_assert_correct(NULL, result); +- return result; +- } +-} +- +-inline bool ShenandoahHeap::requires_marking(const void* entry) const { +- return !_marking_context->is_marked(oop(entry)); +-} +- +-inline bool ShenandoahHeap::in_collection_set(oop p) const { +- assert(collection_set() != NULL, "Sanity"); +- return collection_set()->is_in(p); +-} +- +-inline bool ShenandoahHeap::in_collection_set_loc(void* p) const { +- assert(collection_set() != NULL, "Sanity"); +- return collection_set()->is_in_loc(p); +-} +- +-inline bool ShenandoahHeap::is_stable() const { +- return _gc_state.is_clear(); +-} +- +-inline bool ShenandoahHeap::is_idle() const { +- return _gc_state.is_unset(MARKING | EVACUATION | UPDATEREFS); +-} +- +-inline bool ShenandoahHeap::is_concurrent_mark_in_progress() const { +- return _gc_state.is_set(MARKING); +-} +- +-inline bool ShenandoahHeap::is_evacuation_in_progress() const { +- return _gc_state.is_set(EVACUATION); +-} +- +-inline bool ShenandoahHeap::is_gc_in_progress_mask(uint mask) const { +- return _gc_state.is_set(mask); +-} +- +-inline bool ShenandoahHeap::is_degenerated_gc_in_progress() const { +- return _degenerated_gc_in_progress.is_set(); +-} +- +-inline bool ShenandoahHeap::is_full_gc_in_progress() const { +- return _full_gc_in_progress.is_set(); +-} +- +-inline bool ShenandoahHeap::is_full_gc_move_in_progress() const { +- return _full_gc_move_in_progress.is_set(); +-} +- +-inline bool ShenandoahHeap::is_update_refs_in_progress() const { +- return _gc_state.is_set(UPDATEREFS); +-} +- +-template +-inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) { +- marked_object_iterate(region, cl, region->top()); +-} +- +-template +-inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit) { +- assert(! region->is_humongous_continuation(), "no humongous continuation regions here"); +- +- ShenandoahMarkingContext* const ctx = complete_marking_context(); +- assert(ctx->is_complete(), "sanity"); +- +- MarkBitMap* mark_bit_map = ctx->mark_bit_map(); +- HeapWord* tams = ctx->top_at_mark_start(region); +- +- size_t skip_bitmap_delta = 1; +- HeapWord* start = region->bottom(); +- HeapWord* end = MIN2(tams, region->end()); +- +- // Step 1. Scan below the TAMS based on bitmap data. +- HeapWord* limit_bitmap = MIN2(limit, tams); +- +- // Try to scan the initial candidate. If the candidate is above the TAMS, it would +- // fail the subsequent "< limit_bitmap" checks, and fall through to Step 2. +- HeapWord* cb = mark_bit_map->getNextMarkedWordAddress(start, end); +- +- intx dist = ShenandoahMarkScanPrefetch; +- if (dist > 0) { +- // Batched scan that prefetches the oop data, anticipating the access to +- // either header, oop field, or forwarding pointer. Not that we cannot +- // touch anything in oop, while it still being prefetched to get enough +- // time for prefetch to work. This is why we try to scan the bitmap linearly, +- // disregarding the object size. However, since we know forwarding pointer +- // preceeds the object, we can skip over it. Once we cannot trust the bitmap, +- // there is no point for prefetching the oop contents, as oop->size() will +- // touch it prematurely. +- +- // No variable-length arrays in standard C++, have enough slots to fit +- // the prefetch distance. +- static const int SLOT_COUNT = 256; +- guarantee(dist <= SLOT_COUNT, "adjust slot count"); +- HeapWord* slots[SLOT_COUNT]; +- +- int avail; +- do { +- avail = 0; +- for (int c = 0; (c < dist) && (cb < limit_bitmap); c++) { +- Prefetch::read(cb, oopDesc::mark_offset_in_bytes()); +- slots[avail++] = cb; +- cb += skip_bitmap_delta; +- if (cb < limit_bitmap) { +- cb = mark_bit_map->getNextMarkedWordAddress(cb, limit_bitmap); +- } +- } +- +- for (int c = 0; c < avail; c++) { +- assert (slots[c] < tams, err_msg("only objects below TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(slots[c]), p2i(tams))); +- assert (slots[c] < limit, err_msg("only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(slots[c]), p2i(limit))); +- oop obj = oop(slots[c]); +- assert(!oopDesc::is_null(obj), "sanity"); +- assert(obj->is_oop(), "sanity"); +- assert(_marking_context->is_marked(obj), "object expected to be marked"); +- cl->do_object(obj); +- } +- } while (avail > 0); +- } else { +- while (cb < limit_bitmap) { +- assert (cb < tams, err_msg("only objects below TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cb), p2i(tams))); +- assert (cb < limit, err_msg("only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cb), p2i(limit))); +- oop obj = oop(cb); +- assert(!oopDesc::is_null(obj), "sanity"); +- assert(obj->is_oop(), "sanity"); +- assert(_marking_context->is_marked(obj), "object expected to be marked"); +- cl->do_object(obj); +- cb += skip_bitmap_delta; +- if (cb < limit_bitmap) { +- cb = mark_bit_map->getNextMarkedWordAddress(cb, limit_bitmap); +- } +- } +- } +- +- // Step 2. Accurate size-based traversal, happens past the TAMS. +- // This restarts the scan at TAMS, which makes sure we traverse all objects, +- // regardless of what happened at Step 1. +- HeapWord* cs = tams; +- while (cs < limit) { +- assert (cs >= tams, err_msg("only objects past TAMS here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(tams))); +- assert (cs < limit, err_msg("only objects below limit here: " PTR_FORMAT " (" PTR_FORMAT ")", p2i(cs), p2i(limit))); +- oop obj = oop(cs); +- int size = obj->size(); +- assert(!oopDesc::is_null(obj), "sanity"); +- assert(obj->is_oop(), "sanity"); +- assert(_marking_context->is_marked(obj), "object expected to be marked"); +- cl->do_object(obj); +- cs += size; +- } +-} +- +-template +-class ShenandoahObjectToOopClosure : public ObjectClosure { +- T* _cl; +-public: +- ShenandoahObjectToOopClosure(T* cl) : _cl(cl) {} +- +- void do_object(oop obj) { +- obj->oop_iterate(_cl); +- } +-}; +- +-template +-class ShenandoahObjectToOopBoundedClosure : public ObjectClosure { +- T* _cl; +- MemRegion _bounds; +-public: +- ShenandoahObjectToOopBoundedClosure(T* cl, HeapWord* bottom, HeapWord* top) : +- _cl(cl), _bounds(bottom, top) {} +- +- void do_object(oop obj) { +- obj->oop_iterate(_cl, _bounds); +- } +-}; +- +-template +-inline void ShenandoahHeap::marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* top) { +- if (region->is_humongous()) { +- HeapWord* bottom = region->bottom(); +- if (top > bottom) { +- region = region->humongous_start_region(); +- ShenandoahObjectToOopBoundedClosure objs(cl, bottom, top); +- marked_object_iterate(region, &objs); +- } +- } else { +- ShenandoahObjectToOopClosure objs(cl); +- marked_object_iterate(region, &objs, top); +- } +-} +- +-inline ShenandoahHeapRegion* const ShenandoahHeap::get_region(size_t region_idx) const { +- if (region_idx < _num_regions) { +- return _regions[region_idx]; +- } else { +- return NULL; +- } +-} +- +-inline void ShenandoahHeap::mark_complete_marking_context() { +- _marking_context->mark_complete(); +-} +- +-inline void ShenandoahHeap::mark_incomplete_marking_context() { +- _marking_context->mark_incomplete(); +-} +- +-inline ShenandoahMarkingContext* ShenandoahHeap::complete_marking_context() const { +- assert (_marking_context->is_complete()," sanity"); +- return _marking_context; +-} +- +-inline ShenandoahMarkingContext* ShenandoahHeap::marking_context() const { +- return _marking_context; +-} +- +-#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegionCounters.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegionCounters.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegionCounters.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegionCounters.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,110 +0,0 @@ +-/* +- * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +- +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegion.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegionSet.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegionCounters.hpp" +-#include "memory/resourceArea.hpp" +-#include "runtime/perfData.hpp" +- +-ShenandoahHeapRegionCounters::ShenandoahHeapRegionCounters() : +- _last_sample_millis(0) +-{ +- if (UsePerfData && ShenandoahRegionSampling) { +- EXCEPTION_MARK; +- ResourceMark rm; +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- size_t num_regions = heap->num_regions(); +- const char* cns = PerfDataManager::name_space("shenandoah", "regions"); +- _name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1, mtGC); +- strcpy(_name_space, cns); +- +- const char* cname = PerfDataManager::counter_name(_name_space, "timestamp"); +- _timestamp = PerfDataManager::create_long_variable(SUN_GC, cname, PerfData::U_None, CHECK); +- +- cname = PerfDataManager::counter_name(_name_space, "max_regions"); +- PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None, num_regions, CHECK); +- +- cname = PerfDataManager::counter_name(_name_space, "region_size"); +- PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None, ShenandoahHeapRegion::region_size_bytes() >> 10, CHECK); +- +- cname = PerfDataManager::counter_name(_name_space, "status"); +- _status = PerfDataManager::create_long_variable(SUN_GC, cname, +- PerfData::U_None, CHECK); +- +- _regions_data = NEW_C_HEAP_ARRAY(PerfVariable*, num_regions, mtGC); +- for (uint i = 0; i < num_regions; i++) { +- const char* reg_name = PerfDataManager::name_space(_name_space, "region", i); +- const char* data_name = PerfDataManager::counter_name(reg_name, "data"); +- const char* ns = PerfDataManager::ns_to_string(SUN_GC); +- const char* fullname = PerfDataManager::counter_name(ns, data_name); +- assert(!PerfDataManager::exists(fullname), "must not exist"); +- _regions_data[i] = PerfDataManager::create_long_variable(SUN_GC, data_name, +- PerfData::U_None, CHECK); +- } +- } +-} +- +-ShenandoahHeapRegionCounters::~ShenandoahHeapRegionCounters() { +- if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space, mtGC); +-} +- +-void ShenandoahHeapRegionCounters::update() { +- if (ShenandoahRegionSampling) { +- jlong current = os::javaTimeMillis(); +- jlong last = _last_sample_millis; +- if (current - last > ShenandoahRegionSamplingRate && +- Atomic::cmpxchg(current, &_last_sample_millis, last) == last) { +- +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- jlong status = 0; +- if (heap->is_concurrent_mark_in_progress()) status |= 1 << 0; +- if (heap->is_evacuation_in_progress()) status |= 1 << 1; +- if (heap->is_update_refs_in_progress()) status |= 1 << 2; +- _status->set_value(status); +- +- _timestamp->set_value(os::elapsed_counter()); +- +- size_t num_regions = heap->num_regions(); +- +- { +- ShenandoahHeapLocker locker(heap->lock()); +- size_t rs = ShenandoahHeapRegion::region_size_bytes(); +- for (uint i = 0; i < num_regions; i++) { +- ShenandoahHeapRegion* r = heap->get_region(i); +- jlong data = 0; +- data |= ((100 * r->used() / rs) & PERCENT_MASK) << USED_SHIFT; +- data |= ((100 * r->get_live_data_bytes() / rs) & PERCENT_MASK) << LIVE_SHIFT; +- data |= ((100 * r->get_tlab_allocs() / rs) & PERCENT_MASK) << TLAB_SHIFT; +- data |= ((100 * r->get_gclab_allocs() / rs) & PERCENT_MASK) << GCLAB_SHIFT; +- data |= ((100 * r->get_shared_allocs() / rs) & PERCENT_MASK) << SHARED_SHIFT; +- data |= (r->state_ordinal() & STATUS_MASK) << STATUS_SHIFT; +- _regions_data[i]->set_value(data); +- } +- } +- } +- } +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegionCounters.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegionCounters.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegionCounters.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegionCounters.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,83 +0,0 @@ +-/* +- * Copyright (c) 2016, 2017, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGIONCOUNTERS_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGIONCOUNTERS_HPP +- +-#include "memory/allocation.hpp" +- +-/** +- * This provides the following in JVMStat: +- * +- * constants: +- * - sun.gc.shenandoah.regions.timestamp the timestamp for this sample +- * - sun.gc.shenandoah.regions.max_regions maximum number of regions +- * - sun.gc.shenandoah.regions.region_size size per region, in kilobytes +- * +- * variables: +- * - sun.gc.shenandoah.regions.status current GC status: +- * - bit 0 set when marking in progress +- * - bit 1 set when evacuation in progress +- * - bit 2 set when update refs in progress +- * +- * one variable counter per region, with $max_regions (see above) counters: +- * - sun.gc.shenandoah.regions.region.$i.data +- * where $ is the region number from 0 <= i < $max_regions +- * +- * .data is in the following format: +- * - bits 0-6 used memory in percent +- * - bits 7-13 live memory in percent +- * - bits 14-20 tlab allocated memory in percent +- * - bits 21-27 gclab allocated memory in percent +- * - bits 28-34 shared allocated memory in percent +- * - bits 35-41 +- * - bits 42-50 +- * - bits 51-57 +- * - bits describe the state as recorded in ShenandoahHeapRegion +- */ +-class ShenandoahHeapRegionCounters : public CHeapObj { +-private: +- static const jlong PERCENT_MASK = 0x7f; +- static const jlong STATUS_MASK = 0x3f; +- +- static const jlong USED_SHIFT = 0; +- static const jlong LIVE_SHIFT = 7; +- static const jlong TLAB_SHIFT = 14; +- static const jlong GCLAB_SHIFT = 21; +- static const jlong SHARED_SHIFT = 28; +- +- static const jlong STATUS_SHIFT = 58; +- +- char* _name_space; +- PerfLongVariable** _regions_data; +- PerfLongVariable* _timestamp; +- PerfLongVariable* _status; +- volatile jlong _last_sample_millis; +- +-public: +- ShenandoahHeapRegionCounters(); +- ~ShenandoahHeapRegionCounters(); +- void update(); +-}; +- +-#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGIONCOUNTERS_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegion.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegion.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegion.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegion.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,669 +0,0 @@ +-/* +- * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +- +-#include "memory/allocation.hpp" +-#include "gc_implementation/shared/spaceDecorator.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegion.hpp" +-#include "gc_implementation/shenandoah/shenandoahMarkingContext.inline.hpp" +-#include "jfr/jfrEvents.hpp" +-#include "memory/space.inline.hpp" +-#include "memory/resourceArea.hpp" +-#include "memory/universe.hpp" +-#include "oops/oop.inline.hpp" +-#include "runtime/java.hpp" +-#include "runtime/mutexLocker.hpp" +-#include "runtime/os.hpp" +-#include "runtime/safepoint.hpp" +-#include "utilities/align.hpp" +- +-size_t ShenandoahHeapRegion::RegionCount = 0; +-size_t ShenandoahHeapRegion::RegionSizeBytes = 0; +-size_t ShenandoahHeapRegion::RegionSizeWords = 0; +-size_t ShenandoahHeapRegion::RegionSizeBytesShift = 0; +-size_t ShenandoahHeapRegion::RegionSizeWordsShift = 0; +-size_t ShenandoahHeapRegion::RegionSizeBytesMask = 0; +-size_t ShenandoahHeapRegion::RegionSizeWordsMask = 0; +-size_t ShenandoahHeapRegion::HumongousThresholdBytes = 0; +-size_t ShenandoahHeapRegion::HumongousThresholdWords = 0; +-size_t ShenandoahHeapRegion::MaxTLABSizeBytes = 0; +-size_t ShenandoahHeapRegion::MaxTLABSizeWords = 0; +- +-ShenandoahHeapRegion::ShenandoahHeapRegion(HeapWord* start, size_t index, bool committed) : +- _index(index), +- _bottom(start), +- _end(start + RegionSizeWords), +- _new_top(NULL), +- _empty_time(os::elapsedTime()), +- _state(committed ? _empty_committed : _empty_uncommitted), +- _top(start), +- _tlab_allocs(0), +- _gclab_allocs(0), +- _live_data(0), +- _critical_pins(0), +- _update_watermark(start) { +- +- assert(Universe::on_page_boundary(_bottom) && Universe::on_page_boundary(_end), +- "invalid space boundaries"); +- if (ZapUnusedHeapArea && committed) { +- SpaceMangler::mangle_region(MemRegion(_bottom, _end)); +- } +-} +- +-void ShenandoahHeapRegion::report_illegal_transition(const char *method) { +- ResourceMark rm; +- stringStream ss; +- ss.print("Illegal region state transition from \"%s\", at %s\n ", region_state_to_string(_state), method); +- print_on(&ss); +- fatal(ss.as_string()); +-} +- +-void ShenandoahHeapRegion::make_regular_allocation() { +- shenandoah_assert_heaplocked(); +- switch (_state) { +- case _empty_uncommitted: +- do_commit(); +- case _empty_committed: +- set_state(_regular); +- case _regular: +- case _pinned: +- return; +- default: +- report_illegal_transition("regular allocation"); +- } +-} +- +-void ShenandoahHeapRegion::make_regular_bypass() { +- shenandoah_assert_heaplocked(); +- assert (ShenandoahHeap::heap()->is_full_gc_in_progress() || ShenandoahHeap::heap()->is_degenerated_gc_in_progress(), +- "only for full or degen GC"); +- +- switch (_state) { +- case _empty_uncommitted: +- do_commit(); +- case _empty_committed: +- case _cset: +- case _humongous_start: +- case _humongous_cont: +- set_state(_regular); +- return; +- case _pinned_cset: +- set_state(_pinned); +- return; +- case _regular: +- case _pinned: +- return; +- default: +- report_illegal_transition("regular bypass"); +- } +-} +- +-void ShenandoahHeapRegion::make_humongous_start() { +- shenandoah_assert_heaplocked(); +- switch (_state) { +- case _empty_uncommitted: +- do_commit(); +- case _empty_committed: +- set_state(_humongous_start); +- return; +- default: +- report_illegal_transition("humongous start allocation"); +- } +-} +- +-void ShenandoahHeapRegion::make_humongous_start_bypass() { +- shenandoah_assert_heaplocked(); +- assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC"); +- +- switch (_state) { +- case _empty_committed: +- case _regular: +- case _humongous_start: +- case _humongous_cont: +- set_state(_humongous_start); +- return; +- default: +- report_illegal_transition("humongous start bypass"); +- } +-} +- +-void ShenandoahHeapRegion::make_humongous_cont() { +- shenandoah_assert_heaplocked(); +- switch (_state) { +- case _empty_uncommitted: +- do_commit(); +- case _empty_committed: +- set_state(_humongous_cont); +- return; +- default: +- report_illegal_transition("humongous continuation allocation"); +- } +-} +- +-void ShenandoahHeapRegion::make_humongous_cont_bypass() { +- shenandoah_assert_heaplocked(); +- assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC"); +- +- switch (_state) { +- case _empty_committed: +- case _regular: +- case _humongous_start: +- case _humongous_cont: +- set_state(_humongous_cont); +- return; +- default: +- report_illegal_transition("humongous continuation bypass"); +- } +-} +- +-void ShenandoahHeapRegion::make_pinned() { +- shenandoah_assert_heaplocked(); +- assert(pin_count() > 0, err_msg("Should have pins: " SIZE_FORMAT, pin_count())); +- +- switch (_state) { +- case _regular: +- set_state(_pinned); +- case _pinned_cset: +- case _pinned: +- return; +- case _humongous_start: +- set_state(_pinned_humongous_start); +- case _pinned_humongous_start: +- return; +- case _cset: +- set_state(_pinned_cset); +- return; +- default: +- report_illegal_transition("pinning"); +- } +-} +- +-void ShenandoahHeapRegion::make_unpinned() { +- shenandoah_assert_heaplocked(); +- assert(pin_count() == 0, err_msg("Should not have pins: " SIZE_FORMAT, pin_count())); +- +- switch (_state) { +- case _pinned: +- set_state(_regular); +- return; +- case _regular: +- case _humongous_start: +- return; +- case _pinned_cset: +- set_state(_cset); +- return; +- case _pinned_humongous_start: +- set_state(_humongous_start); +- return; +- default: +- report_illegal_transition("unpinning"); +- } +-} +- +-void ShenandoahHeapRegion::make_cset() { +- shenandoah_assert_heaplocked(); +- switch (_state) { +- case _regular: +- set_state(_cset); +- case _cset: +- return; +- default: +- report_illegal_transition("cset"); +- } +-} +- +-void ShenandoahHeapRegion::make_trash() { +- shenandoah_assert_heaplocked(); +- switch (_state) { +- case _cset: +- // Reclaiming cset regions +- case _humongous_start: +- case _humongous_cont: +- // Reclaiming humongous regions +- case _regular: +- // Immediate region reclaim +- set_state(_trash); +- return; +- default: +- report_illegal_transition("trashing"); +- } +-} +- +-void ShenandoahHeapRegion::make_trash_immediate() { +- make_trash(); +- +- // On this path, we know there are no marked objects in the region, +- // tell marking context about it to bypass bitmap resets. +- ShenandoahHeap::heap()->complete_marking_context()->reset_top_bitmap(this); +-} +- +-void ShenandoahHeapRegion::make_empty() { +- shenandoah_assert_heaplocked(); +- switch (_state) { +- case _trash: +- set_state(_empty_committed); +- _empty_time = os::elapsedTime(); +- return; +- default: +- report_illegal_transition("emptying"); +- } +-} +- +-void ShenandoahHeapRegion::make_uncommitted() { +- shenandoah_assert_heaplocked(); +- switch (_state) { +- case _empty_committed: +- do_uncommit(); +- set_state(_empty_uncommitted); +- return; +- default: +- report_illegal_transition("uncommiting"); +- } +-} +- +-void ShenandoahHeapRegion::make_committed_bypass() { +- shenandoah_assert_heaplocked(); +- assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC"); +- +- switch (_state) { +- case _empty_uncommitted: +- do_commit(); +- set_state(_empty_committed); +- return; +- default: +- report_illegal_transition("commit bypass"); +- } +-} +- +-void ShenandoahHeapRegion::reset_alloc_metadata() { +- _tlab_allocs = 0; +- _gclab_allocs = 0; +-} +- +-size_t ShenandoahHeapRegion::get_shared_allocs() const { +- return used() - (_tlab_allocs + _gclab_allocs) * HeapWordSize; +-} +- +-size_t ShenandoahHeapRegion::get_tlab_allocs() const { +- return _tlab_allocs * HeapWordSize; +-} +- +-size_t ShenandoahHeapRegion::get_gclab_allocs() const { +- return _gclab_allocs * HeapWordSize; +-} +- +-void ShenandoahHeapRegion::set_live_data(size_t s) { +- assert(Thread::current()->is_VM_thread(), "by VM thread"); +- size_t v = s >> LogHeapWordSize; +- assert(v < (size_t)max_jint, "sanity"); +- _live_data = (jint)v; +-} +- +-void ShenandoahHeapRegion::print_on(outputStream* st) const { +- st->print("|"); +- st->print(SIZE_FORMAT_W(5), this->_index); +- +- switch (_state) { +- case _empty_uncommitted: +- st->print("|EU "); +- break; +- case _empty_committed: +- st->print("|EC "); +- break; +- case _regular: +- st->print("|R "); +- break; +- case _humongous_start: +- st->print("|H "); +- break; +- case _pinned_humongous_start: +- st->print("|HP "); +- break; +- case _humongous_cont: +- st->print("|HC "); +- break; +- case _cset: +- st->print("|CS "); +- break; +- case _trash: +- st->print("|T "); +- break; +- case _pinned: +- st->print("|P "); +- break; +- case _pinned_cset: +- st->print("|CSP"); +- break; +- default: +- ShouldNotReachHere(); +- } +- st->print("|BTE " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12), +- p2i(bottom()), p2i(top()), p2i(end())); +- st->print("|TAMS " INTPTR_FORMAT_W(12), +- p2i(ShenandoahHeap::heap()->marking_context()->top_at_mark_start(const_cast(this)))); +- st->print("|UWM " INTPTR_FORMAT_W(12), +- p2i(_update_watermark)); +- st->print("|U " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used())); +- st->print("|T " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_tlab_allocs()), proper_unit_for_byte_size(get_tlab_allocs())); +- st->print("|G " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_gclab_allocs()), proper_unit_for_byte_size(get_gclab_allocs())); +- st->print("|S " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_shared_allocs()), proper_unit_for_byte_size(get_shared_allocs())); +- st->print("|L " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_live_data_bytes()), proper_unit_for_byte_size(get_live_data_bytes())); +- st->print("|CP " SIZE_FORMAT_W(3), pin_count()); +- st->cr(); +-} +- +-ShenandoahHeapRegion* ShenandoahHeapRegion::humongous_start_region() const { +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- assert(is_humongous(), "Must be a part of the humongous region"); +- size_t i = index(); +- ShenandoahHeapRegion* r = const_cast(this); +- while (!r->is_humongous_start()) { +- assert(i > 0, "Sanity"); +- i--; +- r = heap->get_region(i); +- assert(r->is_humongous(), "Must be a part of the humongous region"); +- } +- assert(r->is_humongous_start(), "Must be"); +- return r; +-} +- +-void ShenandoahHeapRegion::recycle() { +- set_top(bottom()); +- clear_live_data(); +- reset_alloc_metadata(); +- +- ShenandoahHeap::heap()->marking_context()->reset_top_at_mark_start(this); +- set_update_watermark(bottom()); +- +- make_empty(); +- +- if (ZapUnusedHeapArea) { +- SpaceMangler::mangle_region(MemRegion(bottom(), end())); +- } +-} +- +-HeapWord* ShenandoahHeapRegion::block_start(const void* p) const { +- assert(MemRegion(bottom(), end()).contains(p), +- err_msg("p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")", +- p2i(p), p2i(bottom()), p2i(end()))); +- if (p >= top()) { +- return top(); +- } else { +- HeapWord* last = bottom(); +- HeapWord* cur = last; +- while (cur <= p) { +- last = cur; +- cur += oop(cur)->size(); +- } +- shenandoah_assert_correct(NULL, oop(last)); +- return last; +- } +-} +- +-size_t ShenandoahHeapRegion::block_size(const HeapWord* p) const { +- assert(MemRegion(bottom(), end()).contains(p), +- err_msg("p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")", +- p2i(p), p2i(bottom()), p2i(end()))); +- if (p < top()) { +- return oop(p)->size(); +- } else { +- assert(p == top(), "just checking"); +- return pointer_delta(end(), (HeapWord*) p); +- } +-} +- +-size_t ShenandoahHeapRegion::setup_sizes(size_t max_heap_size) { +- // Absolute minimums we should not ever break: +- static const size_t MIN_REGION_SIZE = 256*K; +- +- size_t region_size; +- if (FLAG_IS_DEFAULT(ShenandoahRegionSize)) { +- if (ShenandoahMinRegionSize > max_heap_size / MIN_NUM_REGIONS) { +- err_msg message("Max heap size (" SIZE_FORMAT "%s) is too low to afford the minimum number " +- "of regions (" SIZE_FORMAT ") of minimum region size (" SIZE_FORMAT "%s).", +- byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size), +- MIN_NUM_REGIONS, +- byte_size_in_proper_unit(ShenandoahMinRegionSize), +- proper_unit_for_byte_size(ShenandoahMinRegionSize)); +- vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message); +- } +- if (ShenandoahMinRegionSize < MIN_REGION_SIZE) { +- err_msg message("" SIZE_FORMAT "%s should not be lower than minimum region size (" SIZE_FORMAT "%s).", +- byte_size_in_proper_unit(ShenandoahMinRegionSize), +- proper_unit_for_byte_size(ShenandoahMinRegionSize), +- byte_size_in_proper_unit(MIN_REGION_SIZE), proper_unit_for_byte_size(MIN_REGION_SIZE)); +- vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message); +- } +- if (ShenandoahMinRegionSize < MinTLABSize) { +- err_msg message("" SIZE_FORMAT "%s should not be lower than TLAB size size (" SIZE_FORMAT "%s).", +- byte_size_in_proper_unit(ShenandoahMinRegionSize), +- proper_unit_for_byte_size(ShenandoahMinRegionSize), +- byte_size_in_proper_unit(MinTLABSize), proper_unit_for_byte_size(MinTLABSize)); +- vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message); +- } +- if (ShenandoahMaxRegionSize < MIN_REGION_SIZE) { +- err_msg message("" SIZE_FORMAT "%s should not be lower than min region size (" SIZE_FORMAT "%s).", +- byte_size_in_proper_unit(ShenandoahMaxRegionSize), +- proper_unit_for_byte_size(ShenandoahMaxRegionSize), +- byte_size_in_proper_unit(MIN_REGION_SIZE), proper_unit_for_byte_size(MIN_REGION_SIZE)); +- vm_exit_during_initialization("Invalid -XX:ShenandoahMaxRegionSize option", message); +- } +- if (ShenandoahMinRegionSize > ShenandoahMaxRegionSize) { +- err_msg message("Minimum (" SIZE_FORMAT "%s) should be larger than maximum (" SIZE_FORMAT "%s).", +- byte_size_in_proper_unit(ShenandoahMinRegionSize), +- proper_unit_for_byte_size(ShenandoahMinRegionSize), +- byte_size_in_proper_unit(ShenandoahMaxRegionSize), +- proper_unit_for_byte_size(ShenandoahMaxRegionSize)); +- vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize", message); +- } +- +- // We rapidly expand to max_heap_size in most scenarios, so that is the measure +- // for usual heap sizes. Do not depend on initial_heap_size here. +- region_size = max_heap_size / ShenandoahTargetNumRegions; +- +- // Now make sure that we don't go over or under our limits. +- region_size = MAX2(ShenandoahMinRegionSize, region_size); +- region_size = MIN2(ShenandoahMaxRegionSize, region_size); +- +- } else { +- if (ShenandoahRegionSize > max_heap_size / MIN_NUM_REGIONS) { +- err_msg message("Max heap size (" SIZE_FORMAT "%s) is too low to afford the minimum number " +- "of regions (" SIZE_FORMAT ") of requested size (" SIZE_FORMAT "%s).", +- byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size), +- MIN_NUM_REGIONS, +- byte_size_in_proper_unit(ShenandoahRegionSize), +- proper_unit_for_byte_size(ShenandoahRegionSize)); +- vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message); +- } +- if (ShenandoahRegionSize < ShenandoahMinRegionSize) { +- err_msg message("Heap region size (" SIZE_FORMAT "%s) should be larger than min region size (" SIZE_FORMAT "%s).", +- byte_size_in_proper_unit(ShenandoahRegionSize), +- proper_unit_for_byte_size(ShenandoahRegionSize), +- byte_size_in_proper_unit(ShenandoahMinRegionSize), +- proper_unit_for_byte_size(ShenandoahMinRegionSize)); +- vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message); +- } +- if (ShenandoahRegionSize > ShenandoahMaxRegionSize) { +- err_msg message("Heap region size (" SIZE_FORMAT "%s) should be lower than max region size (" SIZE_FORMAT "%s).", +- byte_size_in_proper_unit(ShenandoahRegionSize), +- proper_unit_for_byte_size(ShenandoahRegionSize), +- byte_size_in_proper_unit(ShenandoahMaxRegionSize), +- proper_unit_for_byte_size(ShenandoahMaxRegionSize)); +- vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message); +- } +- region_size = ShenandoahRegionSize; +- } +- +- if (1 > ShenandoahHumongousThreshold || ShenandoahHumongousThreshold > 100) { +- vm_exit_during_initialization("Invalid -XX:ShenandoahHumongousThreshold option, should be within [1..100]"); +- } +- +- // Make sure region size and heap size are page aligned. +- // If large pages are used, we ensure that region size is aligned to large page size if +- // heap size is large enough to accommodate minimal number of regions. Otherwise, we align +- // region size to regular page size. +- +- // Figure out page size to use, and aligns up heap to page size +- int page_size = os::vm_page_size(); +- if (UseLargePages) { +- size_t large_page_size = os::large_page_size(); +- max_heap_size = align_up(max_heap_size, large_page_size); +- if ((max_heap_size / align_up(region_size, large_page_size)) >= MIN_NUM_REGIONS) { +- page_size = (int)large_page_size; +- } else { +- // Should have been checked during argument initialization +- assert(!ShenandoahUncommit, "Uncommit requires region size aligns to large page size"); +- } +- } else { +- max_heap_size = align_up(max_heap_size, page_size); +- } +- +- // Align region size to page size +- region_size = align_up(region_size, page_size); +- int region_size_log = log2_long((jlong) region_size); +- // Recalculate the region size to make sure it's a power of +- // 2. This means that region_size is the largest power of 2 that's +- // <= what we've calculated so far. +- region_size = size_t(1) << region_size_log; +- +- // Now, set up the globals. +- guarantee(RegionSizeBytesShift == 0, "we should only set it once"); +- RegionSizeBytesShift = (size_t)region_size_log; +- +- guarantee(RegionSizeWordsShift == 0, "we should only set it once"); +- RegionSizeWordsShift = RegionSizeBytesShift - LogHeapWordSize; +- +- guarantee(RegionSizeBytes == 0, "we should only set it once"); +- RegionSizeBytes = region_size; +- RegionSizeWords = RegionSizeBytes >> LogHeapWordSize; +- assert (RegionSizeWords*HeapWordSize == RegionSizeBytes, "sanity"); +- +- guarantee(RegionSizeWordsMask == 0, "we should only set it once"); +- RegionSizeWordsMask = RegionSizeWords - 1; +- +- guarantee(RegionSizeBytesMask == 0, "we should only set it once"); +- RegionSizeBytesMask = RegionSizeBytes - 1; +- +- guarantee(RegionCount == 0, "we should only set it once"); +- RegionCount = align_up(max_heap_size, RegionSizeBytes) / RegionSizeBytes; +- +- guarantee(HumongousThresholdWords == 0, "we should only set it once"); +- HumongousThresholdWords = RegionSizeWords * ShenandoahHumongousThreshold / 100; +- HumongousThresholdWords = (size_t)align_size_down(HumongousThresholdWords, MinObjAlignment); +- assert (HumongousThresholdWords <= RegionSizeWords, "sanity"); +- +- guarantee(HumongousThresholdBytes == 0, "we should only set it once"); +- HumongousThresholdBytes = HumongousThresholdWords * HeapWordSize; +- assert (HumongousThresholdBytes <= RegionSizeBytes, "sanity"); +- +- // The rationale for trimming the TLAB sizes has to do with the raciness in +- // TLAB allocation machinery. It may happen that TLAB sizing policy polls Shenandoah +- // about next free size, gets the answer for region #N, goes away for a while, then +- // tries to allocate in region #N, and fail because some other thread have claimed part +- // of the region #N, and then the freeset allocation code has to retire the region #N, +- // before moving the allocation to region #N+1. +- // +- // The worst case realizes when "answer" is "region size", which means it could +- // prematurely retire an entire region. Having smaller TLABs does not fix that +- // completely, but reduces the probability of too wasteful region retirement. +- // With current divisor, we will waste no more than 1/8 of region size in the worst +- // case. This also has a secondary effect on collection set selection: even under +- // the race, the regions would be at least 7/8 used, which allows relying on +- // "used" - "live" for cset selection. Otherwise, we can get the fragmented region +- // below the garbage threshold that would never be considered for collection. +- // +- // The whole thing would be mitigated if Elastic TLABs were enabled, but there +- // is no support in this JDK. +- // +- guarantee(MaxTLABSizeWords == 0, "we should only set it once"); +- MaxTLABSizeWords = MIN2(RegionSizeWords / 8, HumongousThresholdWords); +- MaxTLABSizeWords = (size_t)align_size_down(MaxTLABSizeWords, MinObjAlignment); +- +- guarantee(MaxTLABSizeBytes == 0, "we should only set it once"); +- MaxTLABSizeBytes = MaxTLABSizeWords * HeapWordSize; +- assert (MaxTLABSizeBytes > MinTLABSize, "should be larger"); +- +- log_info(gc, init)("Regions: " SIZE_FORMAT " x " SIZE_FORMAT "%s", +- RegionCount, byte_size_in_proper_unit(RegionSizeBytes), proper_unit_for_byte_size(RegionSizeBytes)); +- log_info(gc, init)("Humongous object threshold: " SIZE_FORMAT "%s", +- byte_size_in_proper_unit(HumongousThresholdBytes), proper_unit_for_byte_size(HumongousThresholdBytes)); +- log_info(gc, init)("Max TLAB size: " SIZE_FORMAT "%s", +- byte_size_in_proper_unit(MaxTLABSizeBytes), proper_unit_for_byte_size(MaxTLABSizeBytes)); +- +- return max_heap_size; +-} +- +-void ShenandoahHeapRegion::do_commit() { +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- if (!heap->is_heap_region_special() && !os::commit_memory((char *) bottom(), RegionSizeBytes, false)) { +- report_java_out_of_memory("Unable to commit region"); +- } +- if (!heap->commit_bitmap_slice(this)) { +- report_java_out_of_memory("Unable to commit bitmaps for region"); +- } +- if (AlwaysPreTouch) { +- os::pretouch_memory((char*)bottom(), (char*)end()); +- } +- heap->increase_committed(ShenandoahHeapRegion::region_size_bytes()); +-} +- +-void ShenandoahHeapRegion::do_uncommit() { +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- if (!heap->is_heap_region_special() && !os::uncommit_memory((char *) bottom(), RegionSizeBytes)) { +- report_java_out_of_memory("Unable to uncommit region"); +- } +- if (!heap->uncommit_bitmap_slice(this)) { +- report_java_out_of_memory("Unable to uncommit bitmaps for region"); +- } +- heap->decrease_committed(ShenandoahHeapRegion::region_size_bytes()); +-} +- +-void ShenandoahHeapRegion::record_pin() { +- Atomic::add(1, &_critical_pins); +-} +- +-void ShenandoahHeapRegion::record_unpin() { +- assert(pin_count() > 0, err_msg("Region " SIZE_FORMAT " should have non-zero pins", index())); +- Atomic::add(-1, &_critical_pins); +-} +- +-size_t ShenandoahHeapRegion::pin_count() const { +- jint v = OrderAccess::load_acquire((volatile jint*)&_critical_pins); +- assert(v >= 0, "sanity"); +- return (size_t)v; +-} +- +-void ShenandoahHeapRegion::set_state(RegionState to) { +- EventShenandoahHeapRegionStateChange evt; +- if (evt.should_commit()){ +- evt.set_index((unsigned)index()); +- evt.set_start((uintptr_t)bottom()); +- evt.set_used(used()); +- evt.set_from(_state); +- evt.set_to(to); +- evt.commit(); +- } +- _state = to; +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegion.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegion.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegion.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegion.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,394 +0,0 @@ +-/* +- * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP +- +-#include "gc_implementation/shenandoah/shenandoahAllocRequest.hpp" +-#include "gc_implementation/shenandoah/shenandoahAsserts.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.hpp" +-#include "gc_implementation/shenandoah/shenandoahPacer.hpp" +-#include "gc_implementation/shenandoah/shenandoahPadding.hpp" +- +-class VMStructs; +-class ShenandoahHeapRegionStateConstant; +- +-class ShenandoahHeapRegion { +- friend class VMStructs; +- friend class ShenandoahHeapRegionStateConstant; +-private: +- /* +- Region state is described by a state machine. Transitions are guarded by +- heap lock, which allows changing the state of several regions atomically. +- Region states can be logically aggregated in groups. +- +- "Empty": +- ................................................................. +- . . +- . . +- . Uncommitted <------- Committed <------------------------\ +- . | | . | +- . \---------v-----------/ . | +- . | . | +- .........................|....................................... | +- | | +- "Active": | | +- .........................|....................................... | +- . | . | +- . /-----------------^-------------------\ . | +- . | | . | +- . v v "Humongous": . | +- . Regular ---\-----\ ..................O................ . | +- . | ^ | | . | . . | +- . | | | | . *---------\ . . | +- . v | | | . v v . . | +- . Pinned Cset | . HStart <--> H/Start H/Cont . . | +- . ^ / | | . Pinned v | . . | +- . | / | | . *<--------/ . . | +- . | v | | . | . . | +- . CsetPinned | | ..................O................ . | +- . | | | . | +- . \-----\---v-------------------/ . | +- . | . | +- .........................|....................................... | +- | | +- "Trash": | | +- .........................|....................................... | +- . | . | +- . v . | +- . Trash ---------------------------------------/ +- . . +- . . +- ................................................................. +- +- Transition from "Empty" to "Active" is first allocation. It can go from {Uncommitted, Committed} +- to {Regular, "Humongous"}. The allocation may happen in Regular regions too, but not in Humongous. +- +- Transition from "Active" to "Trash" is reclamation. It can go from CSet during the normal cycle, +- and from {Regular, "Humongous"} for immediate reclamation. The existence of Trash state allows +- quick reclamation without actual cleaning up. +- +- Transition from "Trash" to "Empty" is recycling. It cleans up the regions and corresponding metadata. +- Can be done asynchronously and in bulk. +- +- Note how internal transitions disallow logic bugs: +- a) No region can go Empty, unless properly reclaimed/recycled; +- b) No region can go Uncommitted, unless reclaimed/recycled first; +- c) Only Regular regions can go to CSet; +- d) Pinned cannot go Trash, thus it could never be reclaimed until unpinned; +- e) Pinned cannot go CSet, thus it never moves; +- f) Humongous cannot be used for regular allocations; +- g) Humongous cannot go CSet, thus it never moves; +- h) Humongous start can go pinned, and thus can be protected from moves (humongous continuations should +- follow associated humongous starts, not pinnable/movable by themselves); +- i) Empty cannot go Trash, avoiding useless work; +- j) ... +- */ +- +- enum RegionState { +- _empty_uncommitted, // region is empty and has memory uncommitted +- _empty_committed, // region is empty and has memory committed +- _regular, // region is for regular allocations +- _humongous_start, // region is the humongous start +- _humongous_cont, // region is the humongous continuation +- _pinned_humongous_start, // region is both humongous start and pinned +- _cset, // region is in collection set +- _pinned, // region is pinned +- _pinned_cset, // region is pinned and in cset (evac failure path) +- _trash, // region contains only trash +- _REGION_STATES_NUM // last +- }; +- +- static const char* region_state_to_string(RegionState s) { +- switch (s) { +- case _empty_uncommitted: return "Empty Uncommitted"; +- case _empty_committed: return "Empty Committed"; +- case _regular: return "Regular"; +- case _humongous_start: return "Humongous Start"; +- case _humongous_cont: return "Humongous Continuation"; +- case _pinned_humongous_start: return "Humongous Start, Pinned"; +- case _cset: return "Collection Set"; +- case _pinned: return "Pinned"; +- case _pinned_cset: return "Collection Set, Pinned"; +- case _trash: return "Trash"; +- default: +- ShouldNotReachHere(); +- return ""; +- } +- } +- +- // This method protects from accidental changes in enum order: +- int region_state_to_ordinal(RegionState s) const { +- switch (s) { +- case _empty_uncommitted: return 0; +- case _empty_committed: return 1; +- case _regular: return 2; +- case _humongous_start: return 3; +- case _humongous_cont: return 4; +- case _cset: return 5; +- case _pinned: return 6; +- case _trash: return 7; +- case _pinned_cset: return 8; +- case _pinned_humongous_start: return 9; +- default: +- ShouldNotReachHere(); +- return -1; +- } +- } +- +- void report_illegal_transition(const char* method); +- +-public: +- static const int region_states_num() { +- return _REGION_STATES_NUM; +- } +- +- // Allowed transitions from the outside code: +- void make_regular_allocation(); +- void make_regular_bypass(); +- void make_humongous_start(); +- void make_humongous_cont(); +- void make_humongous_start_bypass(); +- void make_humongous_cont_bypass(); +- void make_pinned(); +- void make_unpinned(); +- void make_cset(); +- void make_trash(); +- void make_trash_immediate(); +- void make_empty(); +- void make_uncommitted(); +- void make_committed_bypass(); +- +- // Individual states: +- bool is_empty_uncommitted() const { return _state == _empty_uncommitted; } +- bool is_empty_committed() const { return _state == _empty_committed; } +- bool is_regular() const { return _state == _regular; } +- bool is_humongous_continuation() const { return _state == _humongous_cont; } +- +- // Participation in logical groups: +- bool is_empty() const { return is_empty_committed() || is_empty_uncommitted(); } +- bool is_active() const { return !is_empty() && !is_trash(); } +- bool is_trash() const { return _state == _trash; } +- bool is_humongous_start() const { return _state == _humongous_start || _state == _pinned_humongous_start; } +- bool is_humongous() const { return is_humongous_start() || is_humongous_continuation(); } +- bool is_committed() const { return !is_empty_uncommitted(); } +- bool is_cset() const { return _state == _cset || _state == _pinned_cset; } +- bool is_pinned() const { return _state == _pinned || _state == _pinned_cset || _state == _pinned_humongous_start; } +- +- // Macro-properties: +- bool is_alloc_allowed() const { return is_empty() || is_regular() || _state == _pinned; } +- bool is_stw_move_allowed() const { return is_regular() || _state == _cset || (ShenandoahHumongousMoves && _state == _humongous_start); } +- +- RegionState state() const { return _state; } +- int state_ordinal() const { return region_state_to_ordinal(_state); } +- +- void record_pin(); +- void record_unpin(); +- size_t pin_count() const; +- +-private: +- static size_t RegionCount; +- static size_t RegionSizeBytes; +- static size_t RegionSizeWords; +- static size_t RegionSizeBytesShift; +- static size_t RegionSizeWordsShift; +- static size_t RegionSizeBytesMask; +- static size_t RegionSizeWordsMask; +- static size_t HumongousThresholdBytes; +- static size_t HumongousThresholdWords; +- static size_t MaxTLABSizeBytes; +- static size_t MaxTLABSizeWords; +- +- // Never updated fields +- size_t const _index; +- HeapWord* const _bottom; +- HeapWord* const _end; +- +- // Rarely updated fields +- HeapWord* _new_top; +- double _empty_time; +- +- // Seldom updated fields +- RegionState _state; +- +- // Frequently updated fields +- HeapWord* _top; +- +- size_t _tlab_allocs; +- size_t _gclab_allocs; +- +- volatile jint _live_data; +- volatile jint _critical_pins; +- +- HeapWord* volatile _update_watermark; +- +-public: +- ShenandoahHeapRegion(HeapWord* start, size_t index, bool committed); +- +- static const size_t MIN_NUM_REGIONS = 10; +- +- // Return adjusted max heap size +- static size_t setup_sizes(size_t max_heap_size); +- +- double empty_time() { +- return _empty_time; +- } +- +- inline static size_t required_regions(size_t bytes) { +- return (bytes + ShenandoahHeapRegion::region_size_bytes() - 1) >> ShenandoahHeapRegion::region_size_bytes_shift(); +- } +- +- inline static size_t region_count() { +- return ShenandoahHeapRegion::RegionCount; +- } +- +- inline static size_t region_size_bytes() { +- return ShenandoahHeapRegion::RegionSizeBytes; +- } +- +- inline static size_t region_size_words() { +- return ShenandoahHeapRegion::RegionSizeWords; +- } +- +- inline static size_t region_size_bytes_shift() { +- return ShenandoahHeapRegion::RegionSizeBytesShift; +- } +- +- inline static size_t region_size_words_shift() { +- return ShenandoahHeapRegion::RegionSizeWordsShift; +- } +- +- inline static size_t region_size_bytes_mask() { +- return ShenandoahHeapRegion::RegionSizeBytesMask; +- } +- +- inline static size_t region_size_words_mask() { +- return ShenandoahHeapRegion::RegionSizeWordsMask; +- } +- +- // Convert to jint with sanity checking +- inline static jint region_size_bytes_jint() { +- assert (ShenandoahHeapRegion::RegionSizeBytes <= (size_t)max_jint, "sanity"); +- return (jint)ShenandoahHeapRegion::RegionSizeBytes; +- } +- +- // Convert to jint with sanity checking +- inline static jint region_size_words_jint() { +- assert (ShenandoahHeapRegion::RegionSizeWords <= (size_t)max_jint, "sanity"); +- return (jint)ShenandoahHeapRegion::RegionSizeWords; +- } +- +- // Convert to jint with sanity checking +- inline static jint region_size_bytes_shift_jint() { +- assert (ShenandoahHeapRegion::RegionSizeBytesShift <= (size_t)max_jint, "sanity"); +- return (jint)ShenandoahHeapRegion::RegionSizeBytesShift; +- } +- +- // Convert to jint with sanity checking +- inline static jint region_size_words_shift_jint() { +- assert (ShenandoahHeapRegion::RegionSizeWordsShift <= (size_t)max_jint, "sanity"); +- return (jint)ShenandoahHeapRegion::RegionSizeWordsShift; +- } +- +- inline static size_t humongous_threshold_bytes() { +- return ShenandoahHeapRegion::HumongousThresholdBytes; +- } +- +- inline static size_t humongous_threshold_words() { +- return ShenandoahHeapRegion::HumongousThresholdWords; +- } +- +- inline static size_t max_tlab_size_bytes() { +- return ShenandoahHeapRegion::MaxTLABSizeBytes; +- } +- +- inline static size_t max_tlab_size_words() { +- return ShenandoahHeapRegion::MaxTLABSizeWords; +- } +- +- inline size_t index() const { +- return _index; +- } +- +- // Allocation (return NULL if full) +- inline HeapWord* allocate(size_t word_size, ShenandoahAllocRequest::Type type); +- +- inline void clear_live_data(); +- void set_live_data(size_t s); +- +- // Increase live data for newly allocated region +- inline void increase_live_data_alloc_words(size_t s); +- +- // Increase live data for region scanned with GC +- inline void increase_live_data_gc_words(size_t s); +- +- inline bool has_live() const; +- inline size_t get_live_data_bytes() const; +- inline size_t get_live_data_words() const; +- +- inline size_t garbage() const; +- +- void print_on(outputStream* st) const; +- +- void recycle(); +- +- HeapWord* block_start(const void* p) const; +- size_t block_size(const HeapWord* p) const; +- bool block_is_obj(const HeapWord* p) const { return p < top(); } +- +- // Find humongous start region that this region belongs to +- ShenandoahHeapRegion* humongous_start_region() const; +- +- HeapWord* top() const { return _top; } +- void set_top(HeapWord* v) { _top = v; } +- +- HeapWord* new_top() const { return _new_top; } +- void set_new_top(HeapWord* v) { _new_top = v; } +- +- HeapWord* bottom() const { return _bottom; } +- HeapWord* end() const { return _end; } +- +- size_t capacity() const { return byte_size(bottom(), end()); } +- size_t used() const { return byte_size(bottom(), top()); } +- size_t free() const { return byte_size(top(), end()); } +- +- inline void adjust_alloc_metadata(ShenandoahAllocRequest::Type type, size_t); +- void reset_alloc_metadata(); +- size_t get_shared_allocs() const; +- size_t get_tlab_allocs() const; +- size_t get_gclab_allocs() const; +- +- inline HeapWord* get_update_watermark() const; +- inline void set_update_watermark(HeapWord* w); +- inline void set_update_watermark_at_safepoint(HeapWord* w); +- +-private: +- void do_commit(); +- void do_uncommit(); +- +- inline void internal_increase_live_data(size_t s); +- +- void set_state(RegionState to); +-}; +- +-#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegion.inline.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegion.inline.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegion.inline.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegion.inline.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,136 +0,0 @@ +-/* +- * Copyright (c) 2015, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGION_INLINE_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGION_INLINE_HPP +- +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegion.hpp" +-#include "gc_implementation/shenandoah/shenandoahPacer.inline.hpp" +-#include "runtime/atomic.hpp" +- +-HeapWord* ShenandoahHeapRegion::allocate(size_t size, ShenandoahAllocRequest::Type type) { +- shenandoah_assert_heaplocked_or_safepoint(); +- +- assert(is_object_aligned((intptr_t)size), err_msg("alloc size breaks alignment: " SIZE_FORMAT, size)); +- +- HeapWord* obj = top(); +- if (pointer_delta(end(), obj) >= size) { +- make_regular_allocation(); +- adjust_alloc_metadata(type, size); +- +- HeapWord* new_top = obj + size; +- set_top(new_top); +- +- assert(is_object_aligned((intptr_t)new_top), err_msg("new top breaks alignment: " PTR_FORMAT, p2i(new_top))); +- assert(is_object_aligned((intptr_t)obj), err_msg("obj is not aligned: " PTR_FORMAT, p2i(obj))); +- +- return obj; +- } else { +- return NULL; +- } +-} +- +-inline void ShenandoahHeapRegion::adjust_alloc_metadata(ShenandoahAllocRequest::Type type, size_t size) { +- switch (type) { +- case ShenandoahAllocRequest::_alloc_shared: +- case ShenandoahAllocRequest::_alloc_shared_gc: +- // Counted implicitly by tlab/gclab allocs +- break; +- case ShenandoahAllocRequest::_alloc_tlab: +- _tlab_allocs += size; +- break; +- case ShenandoahAllocRequest::_alloc_gclab: +- _gclab_allocs += size; +- break; +- default: +- ShouldNotReachHere(); +- } +-} +- +-void ShenandoahHeapRegion::clear_live_data() { +- OrderAccess::release_store_fence((volatile jint*)&_live_data, 0); +-} +- +-inline void ShenandoahHeapRegion::increase_live_data_alloc_words(size_t s) { +- internal_increase_live_data(s); +-} +- +-inline void ShenandoahHeapRegion::increase_live_data_gc_words(size_t s) { +- internal_increase_live_data(s); +- if (ShenandoahPacing) { +- ShenandoahHeap::heap()->pacer()->report_mark(s); +- } +-} +- +-inline void ShenandoahHeapRegion::internal_increase_live_data(size_t s) { +- assert(s < (size_t)max_jint, "sanity"); +- size_t new_live_data = (size_t)(Atomic::add((jint)s, &_live_data)); +-#ifdef ASSERT +- size_t live_bytes = new_live_data * HeapWordSize; +- size_t used_bytes = used(); +- assert(live_bytes <= used_bytes, +- err_msg("can't have more live data than used: " SIZE_FORMAT ", " SIZE_FORMAT, live_bytes, used_bytes)); +-#endif +-} +- +-size_t ShenandoahHeapRegion::get_live_data_words() const { +- jint v = OrderAccess::load_acquire((volatile jint*)&_live_data); +- assert(v >= 0, "sanity"); +- return (size_t)v; +-} +- +-size_t ShenandoahHeapRegion::get_live_data_bytes() const { +- return get_live_data_words() * HeapWordSize; +-} +- +-bool ShenandoahHeapRegion::has_live() const { +- return get_live_data_words() != 0; +-} +- +-size_t ShenandoahHeapRegion::garbage() const { +- assert(used() >= get_live_data_bytes(), err_msg("Live Data must be a subset of used() live: " SIZE_FORMAT " used: " SIZE_FORMAT, +- get_live_data_bytes(), used())); +- size_t result = used() - get_live_data_bytes(); +- return result; +-} +- +-inline HeapWord* ShenandoahHeapRegion::get_update_watermark() const { +- HeapWord* watermark = (HeapWord*)OrderAccess::load_ptr_acquire(&_update_watermark); +- assert(bottom() <= watermark && watermark <= top(), "within bounds"); +- return watermark; +-} +- +-inline void ShenandoahHeapRegion::set_update_watermark(HeapWord* w) { +- assert(bottom() <= w && w <= top(), "within bounds"); +- OrderAccess::release_store_ptr(&_update_watermark, w); +-} +- +-// Fast version that avoids synchronization, only to be used at safepoints. +-inline void ShenandoahHeapRegion::set_update_watermark_at_safepoint(HeapWord* w) { +- assert(bottom() <= w && w <= top(), "within bounds"); +- assert(SafepointSynchronize::is_at_safepoint(), "Should be at Shenandoah safepoint"); +- _update_watermark = w; +-} +- +-#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGION_INLINE_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegionSet.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegionSet.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegionSet.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegionSet.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,140 +0,0 @@ +-/* +- * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegionSet.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegion.hpp" +-#include "gc_implementation/shenandoah/shenandoahUtils.hpp" +-#include "runtime/atomic.hpp" +-#include "utilities/copy.hpp" +- +-ShenandoahHeapRegionSetIterator::ShenandoahHeapRegionSetIterator(const ShenandoahHeapRegionSet* const set) : +- _set(set), _heap(ShenandoahHeap::heap()), _current_index(0) {} +- +-void ShenandoahHeapRegionSetIterator::reset(const ShenandoahHeapRegionSet* const set) { +- _set = set; +- _current_index = 0; +-} +- +-ShenandoahHeapRegionSet::ShenandoahHeapRegionSet() : +- _heap(ShenandoahHeap::heap()), +- _map_size(_heap->num_regions()), +- _region_size_bytes_shift(ShenandoahHeapRegion::region_size_bytes_shift()), +- _set_map(NEW_C_HEAP_ARRAY(jbyte, _map_size, mtGC)), +- // Bias set map's base address for fast test if an oop is in set +- _biased_set_map(_set_map - ((uintx)_heap->base() >> _region_size_bytes_shift)), +- _region_count(0) +-{ +- // Use 1-byte data type +- STATIC_ASSERT(sizeof(jbyte) == 1); +- +- // Initialize cset map +- Copy::zero_to_bytes(_set_map, _map_size); +-} +- +-ShenandoahHeapRegionSet::~ShenandoahHeapRegionSet() { +- FREE_C_HEAP_ARRAY(jbyte, _set_map, mtGC); +-} +- +-void ShenandoahHeapRegionSet::add_region(ShenandoahHeapRegion* r) { +- assert(!is_in(r), "Already in collection set"); +- _set_map[r->index()] = 1; +- _region_count++; +-} +- +-bool ShenandoahHeapRegionSet::add_region_check_for_duplicates(ShenandoahHeapRegion* r) { +- if (!is_in(r)) { +- add_region(r); +- return true; +- } else { +- return false; +- } +-} +- +-void ShenandoahHeapRegionSet::remove_region(ShenandoahHeapRegion* r) { +- assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); +- assert(Thread::current()->is_VM_thread(), "Must be VMThread"); +- assert(is_in(r), "Not in region set"); +- _set_map[r->index()] = 0; +- _region_count --; +-} +- +-void ShenandoahHeapRegionSet::clear() { +- assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); +- Copy::zero_to_bytes(_set_map, _map_size); +- +- _region_count = 0; +-} +- +-ShenandoahHeapRegion* ShenandoahHeapRegionSetIterator::claim_next() { +- size_t num_regions = _heap->num_regions(); +- if (_current_index >= (jint)num_regions) { +- return NULL; +- } +- +- jint saved_current = _current_index; +- size_t index = (size_t)saved_current; +- +- while(index < num_regions) { +- if (_set->is_in(index)) { +- jint cur = Atomic::cmpxchg((jint)(index + 1), &_current_index, saved_current); +- assert(cur >= (jint)saved_current, "Must move forward"); +- if (cur == saved_current) { +- assert(_set->is_in(index), "Invariant"); +- return _heap->get_region(index); +- } else { +- index = (size_t)cur; +- saved_current = cur; +- } +- } else { +- index ++; +- } +- } +- return NULL; +-} +- +-ShenandoahHeapRegion* ShenandoahHeapRegionSetIterator::next() { +- size_t num_regions = _heap->num_regions(); +- for (size_t index = (size_t)_current_index; index < num_regions; index ++) { +- if (_set->is_in(index)) { +- _current_index = (jint)(index + 1); +- return _heap->get_region(index); +- } +- } +- +- return NULL; +-} +- +-void ShenandoahHeapRegionSet::print_on(outputStream* out) const { +- out->print_cr("Region Set : " SIZE_FORMAT "", count()); +- +- debug_only(size_t regions = 0;) +- for (size_t index = 0; index < _heap->num_regions(); index ++) { +- if (is_in(index)) { +- _heap->get_region(index)->print_on(out); +- debug_only(regions ++;) +- } +- } +- assert(regions == count(), "Must match"); +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegionSet.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegionSet.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegionSet.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegionSet.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,99 +0,0 @@ +-/* +- * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGIONSET_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGIONSET_HPP +- +-#include "memory/allocation.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegion.hpp" +-#include "gc_implementation/shenandoah/shenandoahPadding.hpp" +- +-class ShenandoahHeapRegionSet; +- +-class ShenandoahHeapRegionSetIterator : public StackObj { +-private: +- const ShenandoahHeapRegionSet* _set; +- ShenandoahHeap* const _heap; +- +- shenandoah_padding(0); +- volatile jint _current_index; +- shenandoah_padding(1); +- +- // No implicit copying: iterators should be passed by reference to capture the state +- ShenandoahHeapRegionSetIterator(const ShenandoahHeapRegionSetIterator& that); +- ShenandoahHeapRegionSetIterator& operator=(const ShenandoahHeapRegionSetIterator& o); +- +-public: +- ShenandoahHeapRegionSetIterator(const ShenandoahHeapRegionSet* const set); +- +- // Reset existing iterator to new set +- void reset(const ShenandoahHeapRegionSet* const set); +- +- // MT version +- ShenandoahHeapRegion* claim_next(); +- +- // Single-thread version +- ShenandoahHeapRegion* next(); +-}; +- +-class ShenandoahHeapRegionSet : public CHeapObj { +- friend class ShenandoahHeap; +-private: +- ShenandoahHeap* const _heap; +- size_t const _map_size; +- size_t const _region_size_bytes_shift; +- jbyte* const _set_map; +- // Bias set map's base address for fast test if an oop is in set +- jbyte* const _biased_set_map; +- size_t _region_count; +- +-public: +- ShenandoahHeapRegionSet(); +- ~ShenandoahHeapRegionSet(); +- +- // Add region to set +- void add_region(ShenandoahHeapRegion* r); +- bool add_region_check_for_duplicates(ShenandoahHeapRegion* r); +- +- // Remove region from set +- void remove_region(ShenandoahHeapRegion* r); +- +- size_t count() const { return _region_count; } +- bool is_empty() const { return _region_count == 0; } +- +- inline bool is_in(ShenandoahHeapRegion* r) const; +- inline bool is_in(size_t region_idx) const; +- inline bool is_in(oop p) const; +- +- void print_on(outputStream* out) const; +- +- void clear(); +- +-private: +- jbyte* biased_map_address() const { +- return _biased_set_map; +- } +-}; +- +-#endif //SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGIONSET_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegionSet.inline.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegionSet.inline.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegionSet.inline.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahHeapRegionSet.inline.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,50 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGIONSET_INLINE_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGIONSET_INLINE_HPP +- +-#include "gc_implementation/shenandoah/shenandoahAsserts.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegionSet.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegion.hpp" +- +-bool ShenandoahHeapRegionSet::is_in(size_t region_idx) const { +- assert(region_idx < _heap->num_regions(), "Sanity"); +- return _set_map[region_idx] == 1; +-} +- +-bool ShenandoahHeapRegionSet::is_in(ShenandoahHeapRegion* r) const { +- return is_in(r->index()); +-} +- +-bool ShenandoahHeapRegionSet::is_in(oop p) const { +- shenandoah_assert_in_heap(NULL, p); +- uintx index = ((uintx)(void*) p) >> _region_size_bytes_shift; +- // no need to subtract the bottom of the heap from p, +- // _biased_set_map is biased +- return _biased_set_map[index] == 1; +-} +- +-#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPREGIONSET_INLINE_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahJfrSupport.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahJfrSupport.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahJfrSupport.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahJfrSupport.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,71 +0,0 @@ +-/* +- * Copyright (c) 2019, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegion.hpp" +-#include "gc_implementation/shenandoah/shenandoahJfrSupport.hpp" +-#include "jfr/jfrEvents.hpp" +-#if INCLUDE_JFR +-#include "jfr/metadata/jfrSerializer.hpp" +-#endif +- +-#if INCLUDE_JFR +- +-class ShenandoahHeapRegionStateConstant : public JfrSerializer { +- friend class ShenandoahHeapRegion; +-public: +- virtual void serialize(JfrCheckpointWriter& writer) { +- static const u4 nof_entries = ShenandoahHeapRegion::region_states_num(); +- writer.write_count(nof_entries); +- for (u4 i = 0; i < nof_entries; ++i) { +- writer.write_key(i); +- writer.write(ShenandoahHeapRegion::region_state_to_string((ShenandoahHeapRegion::RegionState)i)); +- } +- } +-}; +- +-void ShenandoahJFRSupport::register_jfr_type_serializers() { +- JfrSerializer::register_serializer(TYPE_SHENANDOAHHEAPREGIONSTATE, +- false, +- true, +- new ShenandoahHeapRegionStateConstant()); +-} +-#endif +- +-class ShenandoahDumpHeapRegionInfoClosure : public ShenandoahHeapRegionClosure { +-public: +- virtual void heap_region_do(ShenandoahHeapRegion* r) { +- EventShenandoahHeapRegionInformation evt; +- evt.set_index((unsigned)r->index()); +- evt.set_state((u8)r->state()); +- evt.set_start((uintptr_t)r->bottom()); +- evt.set_used(r->used()); +- evt.commit(); +- } +-}; +- +-void VM_ShenandoahSendHeapRegionInfoEvents::doit() { +- ShenandoahDumpHeapRegionInfoClosure c; +- ShenandoahHeap::heap()->heap_region_iterate(&c); +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahJfrSupport.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahJfrSupport.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahJfrSupport.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahJfrSupport.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,40 +0,0 @@ +-/* +- * Copyright (c) 2019, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHJFRSUPPORT_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHJFRSUPPORT_HPP +- +-#include "runtime/vm_operations.hpp" +- +-class VM_ShenandoahSendHeapRegionInfoEvents : public VM_Operation { +-public: +- virtual void doit(); +- virtual VMOp_Type type() const { return VMOp_HeapIterateOperation; } +-}; +- +-class ShenandoahJFRSupport { +-public: +- static void register_jfr_type_serializers(); +-}; +- +-#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHJFRSUPPORT_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahLock.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahLock.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahLock.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahLock.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,89 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPLOCK_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPLOCK_HPP +- +-#include "gc_implementation/shenandoah/shenandoahPadding.hpp" +-#include "memory/allocation.hpp" +-#include "runtime/safepoint.hpp" +-#include "runtime/thread.hpp" +- +-class ShenandoahLock { +-private: +- enum LockState { unlocked = 0, locked = 1 }; +- +- shenandoah_padding(0); +- volatile int _state; +- shenandoah_padding(1); +- volatile Thread* _owner; +- shenandoah_padding(2); +- +-public: +- ShenandoahLock() : _state(unlocked), _owner(NULL) {}; +- +- void lock() { +-#ifdef ASSERT +- assert(_owner != Thread::current(), "reentrant locking attempt, would deadlock"); +-#endif +- Thread::SpinAcquire(&_state, "Shenandoah Heap Lock"); +-#ifdef ASSERT +- assert(_state == locked, "must be locked"); +- assert(_owner == NULL, "must not be owned"); +- _owner = Thread::current(); +-#endif +- } +- +- void unlock() { +-#ifdef ASSERT +- assert (_owner == Thread::current(), "sanity"); +- _owner = NULL; +-#endif +- Thread::SpinRelease(&_state); +- } +- +- bool owned_by_self() { +-#ifdef ASSERT +- return _state == locked && _owner == Thread::current(); +-#else +- ShouldNotReachHere(); +- return false; +-#endif +- } +-}; +- +-class ShenandoahLocker : public StackObj { +-private: +- ShenandoahLock* _lock; +-public: +- ShenandoahLocker(ShenandoahLock* lock) { +- _lock = lock; +- _lock->lock(); +- } +- +- ~ShenandoahLocker() { +- _lock->unlock(); +- } +-}; +- +-#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAPLOCK_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahLogging.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahLogging.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahLogging.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahLogging.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,62 +0,0 @@ +-/* +- * Copyright (c) 2019, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +- +-#include // for va_list and friends +-#include "gc_implementation/shenandoah/shenandoahLogging.hpp" +-#include "utilities/ostream.hpp" +- +-void ShenandoahLogger::handle_warning(const char* format, ...) { +- va_list ap; +- va_start(ap, format); +- handle_generic(format, ap); +- va_end(ap); +-} +- +-void ShenandoahLogger::handle_trace(const char* format, ...) { +- va_list ap; +- va_start(ap, format); +- handle_generic(format, ap); +- va_end(ap); +-} +- +-void ShenandoahLogger::handle_debug(const char* format, ...) { +- va_list ap; +- va_start(ap, format); +- handle_generic(format, ap); +- va_end(ap); +-} +- +-void ShenandoahLogger::handle_info(const char* format, ...) { +- va_list ap; +- va_start(ap, format); +- handle_generic(format, ap); +- va_end(ap); +-} +- +-void ShenandoahLogger::handle_generic(const char* format, va_list ap) { +- gclog_or_tty->bol(); +- gclog_or_tty->sp(gclog_or_tty->indentation()*4); +- gclog_or_tty->vprint_cr(format, ap); +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahLogging.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahLogging.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahLogging.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahLogging.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,59 +0,0 @@ +-/* +- * Copyright (c) 2016, 2017, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHLOGGING_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHLOGGING_HPP +- +-#include // for va_list and friends +-#include +- +-class ShenandoahLogger { +-public: +- static void handle_trace(const char* format, ...); +- static void handle_debug(const char* format, ...); +- static void handle_info(const char* format, ...); +- static void handle_warning(const char* format, ...); +-private: +- static void handle_generic(const char* format, va_list ap); +-}; +- +-#define log_trace(...) if (ShenandoahLogTrace) ShenandoahLogger::handle_trace +-#define log_debug(...) if (ShenandoahLogDebug) ShenandoahLogger::handle_debug +-#define log_warning(...) if (ShenandoahLogWarning) ShenandoahLogger::handle_warning +- +-// With ShenandoahLogInfo, only print out the single-"gc"-tag messages. +-#define log_info(...) if (((strcmp(#__VA_ARGS__, "gc") == 0) && (ShenandoahLogInfo || PrintGC || PrintGCDetails)) || \ +- ((strcmp(#__VA_ARGS__, "gc") > 0) && (ShenandoahLogInfo || PrintGCDetails)) || \ +- ShenandoahLogDebug) \ +- ShenandoahLogger::handle_info +- +-#ifndef PRODUCT +-#define log_develop_trace(...) if (ShenandoahLogTrace) ShenandoahLogger::handle_trace +-#define log_develop_debug(...) if (ShenandoahLogDebug) ShenandoahLogger::handle_debug +-#else +-#define DUMMY_ARGUMENT_CONSUMER(...) +-#define log_develop_trace(...) DUMMY_ARGUMENT_CONSUMER +-#define log_develop_debug(...) DUMMY_ARGUMENT_CONSUMER +-#endif +- +-#endif +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahMarkCompact.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahMarkCompact.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahMarkCompact.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahMarkCompact.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,1014 +0,0 @@ +-/* +- * Copyright (c) 2014, 2019, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +- +-#include "code/codeCache.hpp" +-#include "gc_implementation/shenandoah/shenandoahGCTraceTime.hpp" +-#include "gc_implementation/shared/gcTimer.hpp" +-#include "gc_implementation/shenandoah/preservedMarks.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahForwarding.hpp" +-#include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp" +-#include "gc_implementation/shenandoah/shenandoahConcurrentMark.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahCollectionSet.hpp" +-#include "gc_implementation/shenandoah/shenandoahFreeSet.hpp" +-#include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp" +-#include "gc_implementation/shenandoah/shenandoahMarkCompact.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegionSet.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegion.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahMarkingContext.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahRootProcessor.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahTaskqueue.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahUtils.hpp" +-#include "gc_implementation/shenandoah/shenandoahVerifier.hpp" +-#include "gc_implementation/shenandoah/shenandoahVMOperations.hpp" +-#include "gc_implementation/shenandoah/shenandoahWorkGroup.hpp" +-#include "gc_implementation/shenandoah/shenandoahWorkerPolicy.hpp" +-#include "gc_implementation/shenandoah/heuristics/shenandoahHeuristics.hpp" +-#include "memory/metaspace.hpp" +-#include "oops/oop.inline.hpp" +-#include "runtime/biasedLocking.hpp" +-#include "runtime/thread.hpp" +-#include "utilities/copy.hpp" +-#include "utilities/growableArray.hpp" +-#include "utilities/workgroup.hpp" +- +-ShenandoahMarkCompact::ShenandoahMarkCompact() : +- _gc_timer(NULL), +- _preserved_marks(new PreservedMarksSet(true)) {} +- +-void ShenandoahMarkCompact::initialize(GCTimer* gc_timer) { +- _gc_timer = gc_timer; +-} +- +-void ShenandoahMarkCompact::do_it(GCCause::Cause gc_cause) { +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- +- if (ShenandoahVerify) { +- heap->verifier()->verify_before_fullgc(); +- } +- +- if (VerifyBeforeGC) { +- Universe::verify(); +- } +- +- heap->set_full_gc_in_progress(true); +- +- assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint"); +- assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped"); +- +- { +- ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_pre); +- heap->pre_full_gc_dump(_gc_timer); +- } +- +- { +- ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_prepare); +- // Full GC is supposed to recover from any GC state: +- +- // a0. Remember if we have forwarded objects +- bool has_forwarded_objects = heap->has_forwarded_objects(); +- +- // a1. Cancel evacuation, if in progress +- if (heap->is_evacuation_in_progress()) { +- heap->set_evacuation_in_progress(false); +- } +- assert(!heap->is_evacuation_in_progress(), "sanity"); +- +- // a2. Cancel update-refs, if in progress +- if (heap->is_update_refs_in_progress()) { +- heap->set_update_refs_in_progress(false); +- } +- assert(!heap->is_update_refs_in_progress(), "sanity"); +- +- // b. Cancel concurrent mark, if in progress +- if (heap->is_concurrent_mark_in_progress()) { +- heap->concurrent_mark()->cancel(); +- heap->complete_marking(); +- } +- assert(!heap->is_concurrent_mark_in_progress(), "sanity"); +- +- // c. Update roots if this full GC is due to evac-oom, which may carry from-space pointers in roots. +- if (has_forwarded_objects) { +- heap->concurrent_mark()->update_roots(ShenandoahPhaseTimings::full_gc_update_roots); +- } +- +- // d. Reset the bitmaps for new marking +- heap->reset_mark_bitmap(); +- assert(heap->marking_context()->is_bitmap_clear(), "sanity"); +- assert(!heap->marking_context()->is_complete(), "sanity"); +- +- // e. Abandon reference discovery and clear all discovered references. +- ReferenceProcessor *rp = heap->ref_processor(); +- rp->disable_discovery(); +- rp->abandon_partial_discovery(); +- rp->verify_no_references_recorded(); +- +- // f. Set back forwarded objects bit back, in case some steps above dropped it. +- heap->set_has_forwarded_objects(has_forwarded_objects); +- +- // g. Sync pinned region status from the CP marks +- heap->sync_pinned_region_status(); +- +- // The rest of prologue: +- BiasedLocking::preserve_marks(); +- _preserved_marks->init(heap->workers()->active_workers()); +- } +- +- heap->make_parsable(true); +- +- CodeCache::gc_prologue(); +- +- OrderAccess::fence(); +- +- phase1_mark_heap(); +- +- // Once marking is done, which may have fixed up forwarded objects, we can drop it. +- // Coming out of Full GC, we would not have any forwarded objects. +- // This also prevents read barrier from kicking in while adjusting pointers in phase3. +- heap->set_has_forwarded_objects(false); +- +- heap->set_full_gc_move_in_progress(true); +- +- // Setup workers for the rest +- OrderAccess::fence(); +- +- // Initialize worker slices +- ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC); +- for (uint i = 0; i < heap->max_workers(); i++) { +- worker_slices[i] = new ShenandoahHeapRegionSet(); +- } +- +- { +- // The rest of code performs region moves, where region status is undefined +- // until all phases run together. +- ShenandoahHeapLocker lock(heap->lock()); +- +- phase2_calculate_target_addresses(worker_slices); +- +- OrderAccess::fence(); +- +- phase3_update_references(); +- +- phase4_compact_objects(worker_slices); +- } +- +- { +- // Epilogue +- SharedRestorePreservedMarksTaskExecutor exec(heap->workers()); +- _preserved_marks->restore(&exec); +- BiasedLocking::restore_marks(); +- _preserved_marks->reclaim(); +- +- JvmtiExport::gc_epilogue(); +- } +- +- // Resize metaspace +- MetaspaceGC::compute_new_size(); +- +- // Free worker slices +- for (uint i = 0; i < heap->max_workers(); i++) { +- delete worker_slices[i]; +- } +- FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices, mtGC); +- +- CodeCache::gc_epilogue(); +- JvmtiExport::gc_epilogue(); +- +- heap->set_full_gc_move_in_progress(false); +- heap->set_full_gc_in_progress(false); +- +- if (ShenandoahVerify) { +- heap->verifier()->verify_after_fullgc(); +- } +- +- if (VerifyAfterGC) { +- Universe::verify(); +- } +- +- { +- ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_post); +- heap->post_full_gc_dump(_gc_timer); +- } +- +- if (UseTLAB) { +- ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_resize_tlabs); +- heap->resize_all_tlabs(); +- } +-} +- +-class ShenandoahPrepareForMarkClosure: public ShenandoahHeapRegionClosure { +-private: +- ShenandoahMarkingContext* const _ctx; +- +-public: +- ShenandoahPrepareForMarkClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {} +- +- void heap_region_do(ShenandoahHeapRegion *r) { +- _ctx->capture_top_at_mark_start(r); +- r->clear_live_data(); +- } +-}; +- +-void ShenandoahMarkCompact::phase1_mark_heap() { +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- GCTraceTime time("Phase 1: Mark live objects", ShenandoahLogDebug, _gc_timer, heap->tracer()->gc_id()); +- ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark); +- +- ShenandoahPrepareForMarkClosure cl; +- heap->heap_region_iterate(&cl); +- +- ShenandoahConcurrentMark* cm = heap->concurrent_mark(); +- +- heap->set_process_references(heap->heuristics()->can_process_references()); +- heap->set_unload_classes(heap->heuristics()->can_unload_classes()); +- +- ReferenceProcessor* rp = heap->ref_processor(); +- // enable ("weak") refs discovery +- rp->enable_discovery(true /*verify_no_refs*/, true); +- rp->setup_policy(true); // forcefully purge all soft references +- rp->set_active_mt_degree(heap->workers()->active_workers()); +- +- cm->mark_roots(ShenandoahPhaseTimings::full_gc_scan_roots); +- cm->finish_mark_from_roots(/* full_gc = */ true); +- +- heap->mark_complete_marking_context(); +-} +- +-class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure { +-private: +- PreservedMarks* const _preserved_marks; +- ShenandoahHeap* const _heap; +- GrowableArray& _empty_regions; +- int _empty_regions_pos; +- ShenandoahHeapRegion* _to_region; +- ShenandoahHeapRegion* _from_region; +- HeapWord* _compact_point; +- +-public: +- ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks, +- GrowableArray& empty_regions, +- ShenandoahHeapRegion* to_region) : +- _preserved_marks(preserved_marks), +- _heap(ShenandoahHeap::heap()), +- _empty_regions(empty_regions), +- _empty_regions_pos(0), +- _to_region(to_region), +- _from_region(NULL), +- _compact_point(to_region->bottom()) {} +- +- void set_from_region(ShenandoahHeapRegion* from_region) { +- _from_region = from_region; +- } +- +- void finish_region() { +- assert(_to_region != NULL, "should not happen"); +- _to_region->set_new_top(_compact_point); +- } +- +- bool is_compact_same_region() { +- return _from_region == _to_region; +- } +- +- int empty_regions_pos() { +- return _empty_regions_pos; +- } +- +- void do_object(oop p) { +- assert(_from_region != NULL, "must set before work"); +- assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); +- assert(!_heap->complete_marking_context()->allocated_after_mark_start((HeapWord*) p), "must be truly marked"); +- +- size_t obj_size = p->size(); +- if (_compact_point + obj_size > _to_region->end()) { +- finish_region(); +- +- // Object doesn't fit. Pick next empty region and start compacting there. +- ShenandoahHeapRegion* new_to_region; +- if (_empty_regions_pos < _empty_regions.length()) { +- new_to_region = _empty_regions.at(_empty_regions_pos); +- _empty_regions_pos++; +- } else { +- // Out of empty region? Compact within the same region. +- new_to_region = _from_region; +- } +- +- assert(new_to_region != _to_region, "must not reuse same to-region"); +- assert(new_to_region != NULL, "must not be NULL"); +- _to_region = new_to_region; +- _compact_point = _to_region->bottom(); +- } +- +- // Object fits into current region, record new location: +- assert(_compact_point + obj_size <= _to_region->end(), "must fit"); +- shenandoah_assert_not_forwarded(NULL, p); +- _preserved_marks->push_if_necessary(p, p->mark()); +- p->forward_to(oop(_compact_point)); +- _compact_point += obj_size; +- } +-}; +- +-class ShenandoahPrepareForCompactionTask : public AbstractGangTask { +-private: +- PreservedMarksSet* const _preserved_marks; +- ShenandoahHeap* const _heap; +- ShenandoahHeapRegionSet** const _worker_slices; +- +-public: +- ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices) : +- AbstractGangTask("Shenandoah Prepare For Compaction Task"), +- _preserved_marks(preserved_marks), +- _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) { +- } +- +- static bool is_candidate_region(ShenandoahHeapRegion* r) { +- // Empty region: get it into the slice to defragment the slice itself. +- // We could have skipped this without violating correctness, but we really +- // want to compact all live regions to the start of the heap, which sometimes +- // means moving them into the fully empty regions. +- if (r->is_empty()) return true; +- +- // Can move the region, and this is not the humongous region. Humongous +- // moves are special cased here, because their moves are handled separately. +- return r->is_stw_move_allowed() && !r->is_humongous(); +- } +- +- void work(uint worker_id) { +- ShenandoahHeapRegionSet* slice = _worker_slices[worker_id]; +- ShenandoahHeapRegionSetIterator it(slice); +- ShenandoahHeapRegion* from_region = it.next(); +- +- // No work? +- if (from_region == NULL) { +- return; +- } +- +- // Sliding compaction. Walk all regions in the slice, and compact them. +- // Remember empty regions and reuse them as needed. +- ResourceMark rm; +- +- GrowableArray empty_regions((int)_heap->num_regions()); +- +- ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region); +- +- while (from_region != NULL) { +- assert(is_candidate_region(from_region), "Sanity"); +- +- cl.set_from_region(from_region); +- if (from_region->has_live()) { +- _heap->marked_object_iterate(from_region, &cl); +- } +- +- // Compacted the region to somewhere else? From-region is empty then. +- if (!cl.is_compact_same_region()) { +- empty_regions.append(from_region); +- } +- from_region = it.next(); +- } +- cl.finish_region(); +- +- // Mark all remaining regions as empty +- for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) { +- ShenandoahHeapRegion* r = empty_regions.at(pos); +- r->set_new_top(r->bottom()); +- } +- } +-}; +- +-void ShenandoahMarkCompact::calculate_target_humongous_objects() { +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- +- // Compute the new addresses for humongous objects. We need to do this after addresses +- // for regular objects are calculated, and we know what regions in heap suffix are +- // available for humongous moves. +- // +- // Scan the heap backwards, because we are compacting humongous regions towards the end. +- // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide +- // humongous start there. +- // +- // The complication is potential non-movable regions during the scan. If such region is +- // detected, then sliding restarts towards that non-movable region. +- +- size_t to_begin = heap->num_regions(); +- size_t to_end = heap->num_regions(); +- +- for (size_t c = heap->num_regions(); c > 0; c--) { +- ShenandoahHeapRegion *r = heap->get_region(c - 1); +- if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) { +- // To-region candidate: record this, and continue scan +- to_begin = r->index(); +- continue; +- } +- +- if (r->is_humongous_start() && r->is_stw_move_allowed()) { +- // From-region candidate: movable humongous region +- oop old_obj = oop(r->bottom()); +- size_t words_size = old_obj->size(); +- size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize); +- +- size_t start = to_end - num_regions; +- +- if (start >= to_begin && start != r->index()) { +- // Fits into current window, and the move is non-trivial. Record the move then, and continue scan. +- _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark()); +- old_obj->forward_to(oop(heap->get_region(start)->bottom())); +- to_end = start; +- continue; +- } +- } +- +- // Failed to fit. Scan starting from current region. +- to_begin = r->index(); +- to_end = r->index(); +- } +-} +- +-class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure { +-private: +- ShenandoahHeap* const _heap; +- +-public: +- ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {} +- void heap_region_do(ShenandoahHeapRegion* r) { +- if (r->is_trash()) { +- r->recycle(); +- } +- if (r->is_cset()) { +- r->make_regular_bypass(); +- } +- if (r->is_empty_uncommitted()) { +- r->make_committed_bypass(); +- } +- assert (r->is_committed(), err_msg("only committed regions in heap now, see region " SIZE_FORMAT, r->index())); +- +- // Record current region occupancy: this communicates empty regions are free +- // to the rest of Full GC code. +- r->set_new_top(r->top()); +- } +-}; +- +-class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure { +-private: +- ShenandoahHeap* const _heap; +- ShenandoahMarkingContext* const _ctx; +- +-public: +- ShenandoahTrashImmediateGarbageClosure() : +- _heap(ShenandoahHeap::heap()), +- _ctx(ShenandoahHeap::heap()->complete_marking_context()) {} +- +- void heap_region_do(ShenandoahHeapRegion* r) { +- if (r->is_humongous_start()) { +- oop humongous_obj = oop(r->bottom()); +- if (!_ctx->is_marked(humongous_obj)) { +- assert(!r->has_live(), +- err_msg("Region " SIZE_FORMAT " is not marked, should not have live", r->index())); +- _heap->trash_humongous_region_at(r); +- } else { +- assert(r->has_live(), +- err_msg("Region " SIZE_FORMAT " should have live", r->index())); +- } +- } else if (r->is_humongous_continuation()) { +- // If we hit continuation, the non-live humongous starts should have been trashed already +- assert(r->humongous_start_region()->has_live(), +- err_msg("Region " SIZE_FORMAT " should have live", r->index())); +- } else if (r->is_regular()) { +- if (!r->has_live()) { +- r->make_trash_immediate(); +- } +- } +- } +-}; +- +-void ShenandoahMarkCompact::distribute_slices(ShenandoahHeapRegionSet** worker_slices) { +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- +- uint n_workers = heap->workers()->active_workers(); +- size_t n_regions = heap->num_regions(); +- +- // What we want to accomplish: have the dense prefix of data, while still balancing +- // out the parallel work. +- // +- // Assuming the amount of work is driven by the live data that needs moving, we can slice +- // the entire heap into equal-live-sized prefix slices, and compact into them. So, each +- // thread takes all regions in its prefix subset, and then it takes some regions from +- // the tail. +- // +- // Tail region selection becomes interesting. +- // +- // First, we want to distribute the regions fairly between the workers, and those regions +- // might have different amount of live data. So, until we sure no workers need live data, +- // we need to only take what the worker needs. +- // +- // Second, since we slide everything to the left in each slice, the most busy regions +- // would be the ones on the left. Which means we want to have all workers have their after-tail +- // regions as close to the left as possible. +- // +- // The easiest way to do this is to distribute after-tail regions in round-robin between +- // workers that still need live data. +- // +- // Consider parallel workers A, B, C, then the target slice layout would be: +- // +- // AAAAAAAABBBBBBBBCCCCCCCC|ABCABCABCABCABCABCABCABABABABABABABABABABAAAAA +- // +- // (.....dense-prefix.....) (.....................tail...................) +- // [all regions fully live] [left-most regions are fuller that right-most] +- // +- +- // Compute how much live data is there. This would approximate the size of dense prefix +- // we target to create. +- size_t total_live = 0; +- for (size_t idx = 0; idx < n_regions; idx++) { +- ShenandoahHeapRegion *r = heap->get_region(idx); +- if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) { +- total_live += r->get_live_data_words(); +- } +- } +- +- // Estimate the size for the dense prefix. Note that we specifically count only the +- // "full" regions, so there would be some non-full regions in the slice tail. +- size_t live_per_worker = total_live / n_workers; +- size_t prefix_regions_per_worker = live_per_worker / ShenandoahHeapRegion::region_size_words(); +- size_t prefix_regions_total = prefix_regions_per_worker * n_workers; +- prefix_regions_total = MIN2(prefix_regions_total, n_regions); +- assert(prefix_regions_total <= n_regions, "Sanity"); +- +- // There might be non-candidate regions in the prefix. To compute where the tail actually +- // ends up being, we need to account those as well. +- size_t prefix_end = prefix_regions_total; +- for (size_t idx = 0; idx < prefix_regions_total; idx++) { +- ShenandoahHeapRegion *r = heap->get_region(idx); +- if (!ShenandoahPrepareForCompactionTask::is_candidate_region(r)) { +- prefix_end++; +- } +- } +- prefix_end = MIN2(prefix_end, n_regions); +- assert(prefix_end <= n_regions, "Sanity"); +- +- // Distribute prefix regions per worker: each thread definitely gets its own same-sized +- // subset of dense prefix. +- size_t prefix_idx = 0; +- +- size_t* live = NEW_C_HEAP_ARRAY(size_t, n_workers, mtGC); +- +- for (size_t wid = 0; wid < n_workers; wid++) { +- ShenandoahHeapRegionSet* slice = worker_slices[wid]; +- +- live[wid] = 0; +- size_t regs = 0; +- +- // Add all prefix regions for this worker +- while (prefix_idx < prefix_end && regs < prefix_regions_per_worker) { +- ShenandoahHeapRegion *r = heap->get_region(prefix_idx); +- if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) { +- slice->add_region(r); +- live[wid] += r->get_live_data_words(); +- regs++; +- } +- prefix_idx++; +- } +- } +- +- // Distribute the tail among workers in round-robin fashion. +- size_t wid = n_workers - 1; +- +- for (size_t tail_idx = prefix_end; tail_idx < n_regions; tail_idx++) { +- ShenandoahHeapRegion *r = heap->get_region(tail_idx); +- if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) { +- assert(wid < n_workers, "Sanity"); +- +- size_t live_region = r->get_live_data_words(); +- +- // Select next worker that still needs live data. +- size_t old_wid = wid; +- do { +- wid++; +- if (wid == n_workers) wid = 0; +- } while (live[wid] + live_region >= live_per_worker && old_wid != wid); +- +- if (old_wid == wid) { +- // Circled back to the same worker? This means liveness data was +- // miscalculated. Bump the live_per_worker limit so that +- // everyone gets a piece of the leftover work. +- live_per_worker += ShenandoahHeapRegion::region_size_words(); +- } +- +- worker_slices[wid]->add_region(r); +- live[wid] += live_region; +- } +- } +- +- FREE_C_HEAP_ARRAY(size_t, live, mtGC); +- +-#ifdef ASSERT +- BitMap map(n_regions, true /* in_resource_area */); +- for (size_t wid = 0; wid < n_workers; wid++) { +- ShenandoahHeapRegionSetIterator it(worker_slices[wid]); +- ShenandoahHeapRegion* r = it.next(); +- while (r != NULL) { +- size_t idx = r->index(); +- assert(ShenandoahPrepareForCompactionTask::is_candidate_region(r), err_msg("Sanity: " SIZE_FORMAT, idx)); +- assert(!map.at(idx), err_msg("No region distributed twice: " SIZE_FORMAT, idx)); +- map.at_put(idx, true); +- r = it.next(); +- } +- } +- +- for (size_t rid = 0; rid < n_regions; rid++) { +- bool is_candidate = ShenandoahPrepareForCompactionTask::is_candidate_region(heap->get_region(rid)); +- bool is_distributed = map.at(rid); +- assert(is_distributed || !is_candidate, err_msg("All candidates are distributed: " SIZE_FORMAT, rid)); +- } +-#endif +-} +- +-void ShenandoahMarkCompact::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices) { +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- GCTraceTime time("Phase 2: Compute new object addresses", ShenandoahLogDebug, _gc_timer, heap->tracer()->gc_id()); +- ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses); +- +- // About to figure out which regions can be compacted, make sure pinning status +- // had been updated in GC prologue. +- heap->assert_pinned_region_status(); +- +- { +- // Trash the immediately collectible regions before computing addresses +- ShenandoahTrashImmediateGarbageClosure tigcl; +- heap->heap_region_iterate(&tigcl); +- +- // Make sure regions are in good state: committed, active, clean. +- // This is needed because we are potentially sliding the data through them. +- ShenandoahEnsureHeapActiveClosure ecl; +- heap->heap_region_iterate(&ecl); +- } +- +- // Compute the new addresses for regular objects +- { +- ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular); +- +- distribute_slices(worker_slices); +- +- ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices); +- heap->workers()->run_task(&task); +- } +- +- // Compute the new addresses for humongous objects +- { +- ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong); +- calculate_target_humongous_objects(); +- } +-} +- +-class ShenandoahAdjustPointersClosure : public MetadataAwareOopClosure { +-private: +- ShenandoahHeap* const _heap; +- ShenandoahMarkingContext* const _ctx; +- +- template +- inline void do_oop_work(T* p) { +- T o = oopDesc::load_heap_oop(p); +- if (! oopDesc::is_null(o)) { +- oop obj = oopDesc::decode_heap_oop_not_null(o); +- assert(_ctx->is_marked(obj), "must be marked"); +- if (obj->is_forwarded()) { +- oop forw = obj->forwardee(); +- oopDesc::encode_store_heap_oop(p, forw); +- } +- } +- } +- +-public: +- ShenandoahAdjustPointersClosure() : +- _heap(ShenandoahHeap::heap()), +- _ctx(ShenandoahHeap::heap()->complete_marking_context()) {} +- +- void do_oop(oop* p) { do_oop_work(p); } +- void do_oop(narrowOop* p) { do_oop_work(p); } +-}; +- +-class ShenandoahAdjustPointersObjectClosure : public ObjectClosure { +-private: +- ShenandoahHeap* const _heap; +- ShenandoahAdjustPointersClosure _cl; +- +-public: +- ShenandoahAdjustPointersObjectClosure() : +- _heap(ShenandoahHeap::heap()) { +- } +- void do_object(oop p) { +- assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); +- p->oop_iterate(&_cl); +- } +-}; +- +-class ShenandoahAdjustPointersTask : public AbstractGangTask { +-private: +- ShenandoahHeap* const _heap; +- ShenandoahRegionIterator _regions; +- +-public: +- ShenandoahAdjustPointersTask() : +- AbstractGangTask("Shenandoah Adjust Pointers Task"), +- _heap(ShenandoahHeap::heap()) { +- } +- +- void work(uint worker_id) { +- ShenandoahAdjustPointersObjectClosure obj_cl; +- ShenandoahHeapRegion* r = _regions.next(); +- while (r != NULL) { +- if (!r->is_humongous_continuation() && r->has_live()) { +- _heap->marked_object_iterate(r, &obj_cl); +- } +- r = _regions.next(); +- } +- } +-}; +- +-class ShenandoahAdjustRootPointersTask : public AbstractGangTask { +-private: +- ShenandoahRootAdjuster* _rp; +- PreservedMarksSet* _preserved_marks; +-public: +- ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) : +- AbstractGangTask("Shenandoah Adjust Root Pointers Task"), +- _rp(rp), +- _preserved_marks(preserved_marks) {} +- +- void work(uint worker_id) { +- ShenandoahAdjustPointersClosure cl; +- _rp->roots_do(worker_id, &cl); +- _preserved_marks->get(worker_id)->adjust_during_full_gc(); +- } +-}; +- +-void ShenandoahMarkCompact::phase3_update_references() { +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- GCTraceTime time("Phase 3: Adjust pointers", ShenandoahLogDebug, _gc_timer, heap->tracer()->gc_id()); +- ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers); +- +- WorkGang* workers = heap->workers(); +- uint nworkers = workers->active_workers(); +- { +- COMPILER2_PRESENT(DerivedPointerTable::clear()); +- ShenandoahRootAdjuster rp(ShenandoahPhaseTimings::full_gc_adjust_roots); +- ShenandoahAdjustRootPointersTask task(&rp, _preserved_marks); +- workers->run_task(&task); +- COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); +- } +- +- ShenandoahAdjustPointersTask adjust_pointers_task; +- workers->run_task(&adjust_pointers_task); +-} +- +-class ShenandoahCompactObjectsClosure : public ObjectClosure { +-private: +- ShenandoahHeap* const _heap; +- +-public: +- ShenandoahCompactObjectsClosure() : _heap(ShenandoahHeap::heap()) {} +- +- void do_object(oop p) { +- assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); +- size_t size = (size_t)p->size(); +- if (p->is_forwarded()) { +- HeapWord* compact_from = (HeapWord*) p; +- HeapWord* compact_to = (HeapWord*) p->forwardee(); +- Copy::aligned_conjoint_words(compact_from, compact_to, size); +- oop new_obj = oop(compact_to); +- new_obj->init_mark(); +- } +- } +-}; +- +-class ShenandoahCompactObjectsTask : public AbstractGangTask { +-private: +- ShenandoahHeap* const _heap; +- ShenandoahHeapRegionSet** const _worker_slices; +- +-public: +- ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) : +- AbstractGangTask("Shenandoah Compact Objects Task"), +- _heap(ShenandoahHeap::heap()), +- _worker_slices(worker_slices) { +- } +- +- void work(uint worker_id) { +- ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]); +- +- ShenandoahCompactObjectsClosure cl; +- ShenandoahHeapRegion* r = slice.next(); +- while (r != NULL) { +- assert(!r->is_humongous(), "must not get humongous regions here"); +- if (r->has_live()) { +- _heap->marked_object_iterate(r, &cl); +- } +- r->set_top(r->new_top()); +- r = slice.next(); +- } +- } +-}; +- +-class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure { +-private: +- ShenandoahHeap* const _heap; +- size_t _live; +- +-public: +- ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()), _live(0) { +- _heap->free_set()->clear(); +- } +- +- void heap_region_do(ShenandoahHeapRegion* r) { +- assert (!r->is_cset(), "cset regions should have been demoted already"); +- +- // Need to reset the complete-top-at-mark-start pointer here because +- // the complete marking bitmap is no longer valid. This ensures +- // size-based iteration in marked_object_iterate(). +- // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip +- // pinned regions. +- if (!r->is_pinned()) { +- _heap->complete_marking_context()->reset_top_at_mark_start(r); +- } +- +- size_t live = r->used(); +- +- // Make empty regions that have been allocated into regular +- if (r->is_empty() && live > 0) { +- r->make_regular_bypass(); +- } +- +- // Reclaim regular regions that became empty +- if (r->is_regular() && live == 0) { +- r->make_trash(); +- } +- +- // Recycle all trash regions +- if (r->is_trash()) { +- live = 0; +- r->recycle(); +- } +- +- r->set_live_data(live); +- r->reset_alloc_metadata(); +- _live += live; +- } +- +- size_t get_live() { +- return _live; +- } +-}; +- +-void ShenandoahMarkCompact::compact_humongous_objects() { +- // Compact humongous regions, based on their fwdptr objects. +- // +- // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases, +- // humongous regions are already compacted, and do not require further moves, which alleviates +- // sliding costs. We may consider doing this in parallel in future. +- +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- +- for (size_t c = heap->num_regions(); c > 0; c--) { +- ShenandoahHeapRegion* r = heap->get_region(c - 1); +- if (r->is_humongous_start()) { +- oop old_obj = oop(r->bottom()); +- if (!old_obj->is_forwarded()) { +- // No need to move the object, it stays at the same slot +- continue; +- } +- size_t words_size = old_obj->size(); +- size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize); +- +- size_t old_start = r->index(); +- size_t old_end = old_start + num_regions - 1; +- size_t new_start = heap->heap_region_index_containing(old_obj->forwardee()); +- size_t new_end = new_start + num_regions - 1; +- assert(old_start != new_start, "must be real move"); +- assert(r->is_stw_move_allowed(), err_msg("Region " SIZE_FORMAT " should be movable", r->index())); +- +- Copy::aligned_conjoint_words(heap->get_region(old_start)->bottom(), +- heap->get_region(new_start)->bottom(), +- words_size); +- +- oop new_obj = oop(heap->get_region(new_start)->bottom()); +- new_obj->init_mark(); +- +- { +- for (size_t c = old_start; c <= old_end; c++) { +- ShenandoahHeapRegion* r = heap->get_region(c); +- r->make_regular_bypass(); +- r->set_top(r->bottom()); +- } +- +- for (size_t c = new_start; c <= new_end; c++) { +- ShenandoahHeapRegion* r = heap->get_region(c); +- if (c == new_start) { +- r->make_humongous_start_bypass(); +- } else { +- r->make_humongous_cont_bypass(); +- } +- +- // Trailing region may be non-full, record the remainder there +- size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask(); +- if ((c == new_end) && (remainder != 0)) { +- r->set_top(r->bottom() + remainder); +- } else { +- r->set_top(r->end()); +- } +- +- r->reset_alloc_metadata(); +- } +- } +- } +- } +-} +- +-// This is slightly different to ShHeap::reset_next_mark_bitmap: +-// we need to remain able to walk pinned regions. +-// Since pinned region do not move and don't get compacted, we will get holes with +-// unreachable objects in them (which may have pointers to unloaded Klasses and thus +-// cannot be iterated over using oop->size(). The only way to safely iterate over those is using +-// a valid marking bitmap and valid TAMS pointer. This class only resets marking +-// bitmaps for un-pinned regions, and later we only reset TAMS for unpinned regions. +-class ShenandoahMCResetCompleteBitmapTask : public AbstractGangTask { +-private: +- ShenandoahRegionIterator _regions; +- +-public: +- ShenandoahMCResetCompleteBitmapTask() : +- AbstractGangTask("Parallel Reset Bitmap Task") { +- } +- +- void work(uint worker_id) { +- ShenandoahHeapRegion* region = _regions.next(); +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- ShenandoahMarkingContext* const ctx = heap->complete_marking_context(); +- while (region != NULL) { +- if (heap->is_bitmap_slice_committed(region) && !region->is_pinned() && region->has_live()) { +- ctx->clear_bitmap(region); +- } +- region = _regions.next(); +- } +- } +-}; +- +-void ShenandoahMarkCompact::phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices) { +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- GCTraceTime time("Phase 4: Move objects", ShenandoahLogDebug, _gc_timer, heap->tracer()->gc_id()); +- ShenandoahGCPhase compaction_phase(ShenandoahPhaseTimings::full_gc_copy_objects); +- +- // Compact regular objects first +- { +- ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_regular); +- ShenandoahCompactObjectsTask compact_task(worker_slices); +- heap->workers()->run_task(&compact_task); +- } +- +- // Compact humongous objects after regular object moves +- { +- ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong); +- compact_humongous_objects(); +- } +- +- // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer +- // and must ensure the bitmap is in sync. +- { +- ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_complete); +- ShenandoahMCResetCompleteBitmapTask task; +- heap->workers()->run_task(&task); +- } +- +- // Bring regions in proper states after the collection, and set heap properties. +- { +- ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild); +- +- ShenandoahPostCompactClosure post_compact; +- heap->heap_region_iterate(&post_compact); +- heap->set_used(post_compact.get_live()); +- +- heap->collection_set()->clear(); +- heap->free_set()->rebuild(); +- } +- +- heap->clear_cancelled_gc(); +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahMarkCompact.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahMarkCompact.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahMarkCompact.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahMarkCompact.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,78 +0,0 @@ +-/* +- * Copyright (c) 2014, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHMARKCOMPACT_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHMARKCOMPACT_HPP +- +-#include "gc_implementation/shared/gcTimer.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegionSet.hpp" +- +-/** +- * This implements Full GC (e.g. when invoking System.gc()) using a mark-compact algorithm. +- * +- * Current implementation is parallel sliding Lisp-2-style algorithm, based on +- * "Parallel Garbage Collection for Shared Memory Multiprocessors", by Christine Flood et al. +- * http://people.csail.mit.edu/shanir/publications/dfsz2001.pdf +- * +- * It is implemented in four phases: +- * +- * 1. Mark all live objects of the heap by traversing objects starting at GC roots. +- * 2. Calculate the new location of each live object. This is done by sequentially scanning +- * the heap, keeping track of a next-location-pointer, which is then written to each +- * object's fwdptr field. +- * 3. Update all references. This is implemented by another scan of the heap, and updates +- * all references in live objects by what's stored in the target object's fwdptr. +- * 4. Compact the heap by copying all live objects to their new location. +- * +- * Parallelization is handled by assigning each GC worker the slice of the heap (the set of regions) +- * where it does sliding compaction, without interfering with other threads. +- */ +- +-class PreservedMarksSet; +- +-class ShenandoahMarkCompact : public CHeapObj { +- friend class ShenandoahPrepareForCompactionObjectClosure; +-private: +- GCTimer* _gc_timer; +- +- PreservedMarksSet* _preserved_marks; +- +-public: +- ShenandoahMarkCompact(); +- void initialize(GCTimer* gc_timer); +- +- void do_it(GCCause::Cause gc_cause); +- +-private: +- void phase1_mark_heap(); +- void phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices); +- void phase3_update_references(); +- void phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices); +- +- void distribute_slices(ShenandoahHeapRegionSet** worker_slices); +- void calculate_target_humongous_objects(); +- void compact_humongous_objects(); +-}; +- +-#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHMARKCOMPACT_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahMarkingContext.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahMarkingContext.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahMarkingContext.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahMarkingContext.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,82 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +-#include "gc_implementation/shared/markBitMap.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegion.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahMarkingContext.hpp" +- +-ShenandoahMarkingContext::ShenandoahMarkingContext(MemRegion heap_region, MemRegion bitmap_region, size_t num_regions) : +- _top_bitmaps(NEW_C_HEAP_ARRAY(HeapWord*, num_regions, mtGC)), +- _top_at_mark_starts_base(NEW_C_HEAP_ARRAY(HeapWord*, num_regions, mtGC)), +- _top_at_mark_starts(_top_at_mark_starts_base - +- ((uintx) heap_region.start() >> ShenandoahHeapRegion::region_size_bytes_shift())) { +- _mark_bit_map.initialize(heap_region, bitmap_region); +-} +- +-bool ShenandoahMarkingContext::is_bitmap_clear() const { +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- size_t num_regions = heap->num_regions(); +- for (size_t idx = 0; idx < num_regions; idx++) { +- ShenandoahHeapRegion* r = heap->get_region(idx); +- if (heap->is_bitmap_slice_committed(r) && !is_bitmap_clear_range(r->bottom(), r->end())) { +- return false; +- } +- } +- return true; +-} +- +-bool ShenandoahMarkingContext::is_bitmap_clear_range(HeapWord* start, HeapWord* end) const { +- return _mark_bit_map.getNextMarkedWordAddress(start, end) == end; +-} +- +-void ShenandoahMarkingContext::initialize_top_at_mark_start(ShenandoahHeapRegion* r) { +- size_t idx = r->index(); +- HeapWord *bottom = r->bottom(); +- _top_at_mark_starts_base[idx] = bottom; +- _top_bitmaps[idx] = bottom; +-} +- +-void ShenandoahMarkingContext::clear_bitmap(ShenandoahHeapRegion* r) { +- HeapWord* bottom = r->bottom(); +- HeapWord* top_bitmap = _top_bitmaps[r->index()]; +- if (top_bitmap > bottom) { +- _mark_bit_map.clear_range_large(MemRegion(bottom, top_bitmap)); +- _top_bitmaps[r->index()] = bottom; +- } +- assert(is_bitmap_clear_range(bottom, r->end()), +- err_msg("Region " SIZE_FORMAT " should have no marks in bitmap", r->index())); +-} +- +-bool ShenandoahMarkingContext::is_complete() { +- return _is_complete.is_set(); +-} +- +-void ShenandoahMarkingContext::mark_complete() { +- _is_complete.set(); +-} +- +-void ShenandoahMarkingContext::mark_incomplete() { +- _is_complete.unset(); +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahMarkingContext.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahMarkingContext.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahMarkingContext.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahMarkingContext.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,81 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHMARKINGCONTEXT_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHMARKINGCONTEXT_HPP +- +-#include "gc_implementation/shared/markBitMap.hpp" +-#include "memory/allocation.hpp" +-#include "memory/memRegion.hpp" +-#include "oops/oopsHierarchy.hpp" +- +-class HeapWord; +- +-/** +- * Encapsulate a marking bitmap with the top-at-mark-start and top-bitmaps array. +- */ +-class ShenandoahMarkingContext : public CHeapObj { +-private: +- MarkBitMap _mark_bit_map; +- +- HeapWord** const _top_bitmaps; +- HeapWord** const _top_at_mark_starts_base; +- HeapWord** const _top_at_mark_starts; +- +- ShenandoahSharedFlag _is_complete; +- +-public: +- ShenandoahMarkingContext(MemRegion heap_region, MemRegion bitmap_region, size_t num_regions); +- +- /* +- * Marks the object. Returns true if the object has not been marked before and has +- * been marked by this thread. Returns false if the object has already been marked, +- * or if a competing thread succeeded in marking this object. +- */ +- inline bool mark(oop obj); +- +- inline bool is_marked(oop obj) const; +- +- inline bool allocated_after_mark_start(oop obj) const; +- inline bool allocated_after_mark_start(HeapWord* addr) const; +- +- inline MarkBitMap* mark_bit_map(); +- +- inline HeapWord* top_at_mark_start(ShenandoahHeapRegion* r) const; +- inline void capture_top_at_mark_start(ShenandoahHeapRegion* r); +- inline void reset_top_at_mark_start(ShenandoahHeapRegion* r); +- void initialize_top_at_mark_start(ShenandoahHeapRegion* r); +- +- inline void reset_top_bitmap(ShenandoahHeapRegion *r); +- void clear_bitmap(ShenandoahHeapRegion *r); +- +- bool is_bitmap_clear() const; +- bool is_bitmap_clear_range(HeapWord* start, HeapWord* end) const; +- +- bool is_complete(); +- void mark_complete(); +- void mark_incomplete(); +- +-}; +- +-#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHMARKINGCONTEXT_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahMarkingContext.inline.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahMarkingContext.inline.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahMarkingContext.inline.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahMarkingContext.inline.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,87 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHMARKINGCONTEXT_INLINE_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHMARKINGCONTEXT_INLINE_HPP +- +-#include "gc_implementation/shenandoah/shenandoahMarkingContext.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegion.hpp" +- +-inline MarkBitMap* ShenandoahMarkingContext::mark_bit_map() { +- return &_mark_bit_map; +-} +- +-inline bool ShenandoahMarkingContext::mark(oop obj) { +- shenandoah_assert_not_forwarded(NULL, obj); +- HeapWord* addr = (HeapWord*) obj; +- return (! allocated_after_mark_start(addr)) && _mark_bit_map.parMark(addr); +-} +- +-inline bool ShenandoahMarkingContext::is_marked(oop obj) const { +- HeapWord* addr = (HeapWord*) obj; +- return allocated_after_mark_start(addr) || _mark_bit_map.isMarked(addr); +-} +- +-inline bool ShenandoahMarkingContext::allocated_after_mark_start(oop obj) const { +- HeapWord* addr = cast_from_oop(obj); +- return allocated_after_mark_start(addr); +-} +- +-inline bool ShenandoahMarkingContext::allocated_after_mark_start(HeapWord* addr) const { +- uintx index = ((uintx) addr) >> ShenandoahHeapRegion::region_size_bytes_shift(); +- HeapWord* top_at_mark_start = _top_at_mark_starts[index]; +- bool alloc_after_mark_start = addr >= top_at_mark_start; +- return alloc_after_mark_start; +-} +- +-void ShenandoahMarkingContext::capture_top_at_mark_start(ShenandoahHeapRegion *r) { +- size_t idx = r->index(); +- HeapWord* old_tams = _top_at_mark_starts_base[idx]; +- HeapWord* new_tams = r->top(); +- +- assert(new_tams >= old_tams, +- err_msg("Region " SIZE_FORMAT", TAMS updates should be monotonic: " PTR_FORMAT " -> " PTR_FORMAT, +- idx, p2i(old_tams), p2i(new_tams))); +- assert(is_bitmap_clear_range(old_tams, new_tams), +- err_msg("Region " SIZE_FORMAT ", bitmap should be clear while adjusting TAMS: " PTR_FORMAT " -> " PTR_FORMAT, +- idx, p2i(old_tams), p2i(new_tams))); +- +- _top_at_mark_starts_base[idx] = new_tams; +- _top_bitmaps[idx] = new_tams; +-} +- +-void ShenandoahMarkingContext::reset_top_at_mark_start(ShenandoahHeapRegion* r) { +- _top_at_mark_starts_base[r->index()] = r->bottom(); +-} +- +-HeapWord* ShenandoahMarkingContext::top_at_mark_start(ShenandoahHeapRegion* r) const { +- return _top_at_mark_starts_base[r->index()]; +-} +- +-void ShenandoahMarkingContext::reset_top_bitmap(ShenandoahHeapRegion* r) { +- assert(is_bitmap_clear_range(r->bottom(), r->end()), +- err_msg("Region " SIZE_FORMAT " should have no marks in bitmap", r->index())); +- _top_bitmaps[r->index()] = r->bottom(); +-} +- +-#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHMARKINGCONTEXT_INLINE_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahMetrics.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahMetrics.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahMetrics.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahMetrics.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,94 +0,0 @@ +-/* +- * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +-#include "gc_implementation/shenandoah/shenandoahMetrics.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegion.hpp" +-#include "gc_implementation/shenandoah/shenandoahFreeSet.hpp" +- +-ShenandoahMetricsSnapshot::ShenandoahMetricsSnapshot() { +- _heap = ShenandoahHeap::heap(); +-} +- +-void ShenandoahMetricsSnapshot::snap_before() { +- _used_before = _heap->used(); +- _if_before = _heap->free_set()->internal_fragmentation(); +- _ef_before = _heap->free_set()->external_fragmentation(); +-} +-void ShenandoahMetricsSnapshot::snap_after() { +- _used_after = _heap->used(); +- _if_after = _heap->free_set()->internal_fragmentation(); +- _ef_after = _heap->free_set()->external_fragmentation(); +-} +- +-bool ShenandoahMetricsSnapshot::is_good_progress() { +- // Under the critical threshold? +- size_t free_actual = _heap->free_set()->available(); +- size_t free_expected = _heap->max_capacity() / 100 * ShenandoahCriticalFreeThreshold; +- bool prog_free = free_actual >= free_expected; +- log_info(gc, ergo)("%s progress for free space: " SIZE_FORMAT "%s, need " SIZE_FORMAT "%s", +- prog_free ? "Good" : "Bad", +- byte_size_in_proper_unit(free_actual), proper_unit_for_byte_size(free_actual), +- byte_size_in_proper_unit(free_expected), proper_unit_for_byte_size(free_expected)); +- if (!prog_free) { +- return false; +- } +- +- // Freed up enough? +- size_t progress_actual = (_used_before > _used_after) ? _used_before - _used_after : 0; +- size_t progress_expected = ShenandoahHeapRegion::region_size_bytes(); +- bool prog_used = progress_actual >= progress_expected; +- log_info(gc, ergo)("%s progress for used space: " SIZE_FORMAT "%s, need " SIZE_FORMAT "%s", +- prog_used ? "Good" : "Bad", +- byte_size_in_proper_unit(progress_actual), proper_unit_for_byte_size(progress_actual), +- byte_size_in_proper_unit(progress_expected), proper_unit_for_byte_size(progress_expected)); +- if (prog_used) { +- return true; +- } +- +- // Internal fragmentation is down? +- double if_actual = _if_before - _if_after; +- double if_expected = 0.01; // 1% should be enough +- bool prog_if = if_actual >= if_expected; +- log_info(gc, ergo)("%s progress for internal fragmentation: %.1f%%, need %.1f%%", +- prog_if ? "Good" : "Bad", +- if_actual * 100, if_expected * 100); +- if (prog_if) { +- return true; +- } +- +- // External fragmentation is down? +- double ef_actual = _ef_before - _ef_after; +- double ef_expected = 0.01; // 1% should be enough +- bool prog_ef = ef_actual >= ef_expected; +- log_info(gc, ergo)("%s progress for external fragmentation: %.1f%%, need %.1f%%", +- prog_ef ? "Good" : "Bad", +- ef_actual * 100, ef_expected * 100); +- if (prog_ef) { +- return true; +- } +- +- // Nothing good had happened. +- return false; +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahMetrics.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahMetrics.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahMetrics.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahMetrics.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,45 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHMETRICS_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHMETRICS_HPP +- +-#include "gc_implementation/shenandoah/shenandoahHeap.hpp" +- +-class ShenandoahMetricsSnapshot : public StackObj { +-private: +- ShenandoahHeap* _heap; +- size_t _used_before, _used_after; +- double _if_before, _if_after; +- double _ef_before, _ef_after; +- +-public: +- ShenandoahMetricsSnapshot(); +- +- void snap_before(); +- void snap_after(); +- +- bool is_good_progress(); +-}; +- +-#endif //SHARE_VM_GC_SHENANDOAH_SHENANDOAHMETRICS_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahMonitoringSupport.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahMonitoringSupport.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahMonitoringSupport.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahMonitoringSupport.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,97 +0,0 @@ +-/* +- * Copyright (c) 2015, 2017, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +-#include "gc_implementation/shared/hSpaceCounters.hpp" +-#include "gc_implementation/shared/collectorCounters.hpp" +-#include "gc_implementation/shared/generationCounters.hpp" +-#include "gc_implementation/shenandoah/shenandoahMonitoringSupport.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegionCounters.hpp" +- +-class ShenandoahYoungGenerationCounters : public GenerationCounters { +-public: +- ShenandoahYoungGenerationCounters() : +- GenerationCounters("Young", 0, 0, 0, (size_t)0, (size_t)0) {}; +- +- virtual void update_all() { +- // no update +- } +-}; +- +-class ShenandoahGenerationCounters : public GenerationCounters { +-private: +- ShenandoahHeap* _heap; +-public: +- ShenandoahGenerationCounters(ShenandoahHeap* heap) : +- GenerationCounters("Heap", 1, 1, heap->initial_capacity(), heap->max_capacity(), heap->capacity()), +- _heap(heap) +- {}; +- +- virtual void update_all() { +- _current_size->set_value(_heap->capacity()); +- } +-}; +- +-ShenandoahMonitoringSupport::ShenandoahMonitoringSupport(ShenandoahHeap* heap) : +- _full_counters(NULL) +-{ +- // Collection counters do not fit Shenandoah very well. +- // We record full cycles (including full STW GC) as "old". +- _full_counters = new CollectorCounters("Shenandoah full", 1); +- +- // We report young gen as unused. +- _young_counters = new ShenandoahYoungGenerationCounters(); +- _heap_counters = new ShenandoahGenerationCounters(heap); +- _space_counters = new HSpaceCounters("Heap", 0, heap->max_capacity(), heap->initial_capacity(), _heap_counters); +- +- _heap_region_counters = new ShenandoahHeapRegionCounters(); +-} +- +-CollectorCounters* ShenandoahMonitoringSupport::stw_collection_counters() { +- return _full_counters; +-} +- +-CollectorCounters* ShenandoahMonitoringSupport::full_stw_collection_counters() { +- return _full_counters; +-} +- +-CollectorCounters* ShenandoahMonitoringSupport::concurrent_collection_counters() { +- return _full_counters; +-} +- +-void ShenandoahMonitoringSupport::update_counters() { +- MemoryService::track_memory_usage(); +- +- if (UsePerfData) { +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- size_t used = heap->used(); +- size_t capacity = heap->max_capacity(); +- _heap_counters->update_all(); +- _space_counters->update_all(capacity, used); +- _heap_region_counters->update(); +- +- MetaspaceCounters::update_performance_counters(); +- CompressedClassSpaceCounters::update_performance_counters(); +- } +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahMonitoringSupport.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahMonitoringSupport.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahMonitoringSupport.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahMonitoringSupport.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,54 +0,0 @@ +-/* +- * Copyright (c) 2015, 2017, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHMONITORINGSUPPORT_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHMONITORINGSUPPORT_HPP +- +-#include "memory/allocation.hpp" +- +-class GenerationCounters; +-class HSpaceCounters; +-class ShenandoahHeap; +-class CollectorCounters; +-class ShenandoahHeapRegionCounters; +- +-class ShenandoahMonitoringSupport : public CHeapObj { +-private: +- CollectorCounters* _full_counters; +- +- GenerationCounters* _young_counters; +- GenerationCounters* _heap_counters; +- +- HSpaceCounters* _space_counters; +- +- ShenandoahHeapRegionCounters* _heap_region_counters; +- +-public: +- ShenandoahMonitoringSupport(ShenandoahHeap* heap); +- CollectorCounters* stw_collection_counters(); +- CollectorCounters* full_stw_collection_counters(); +- CollectorCounters* concurrent_collection_counters(); +- void update_counters(); +-}; +- +-#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHMONITORINGSUPPORT_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahNumberSeq.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahNumberSeq.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahNumberSeq.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahNumberSeq.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,194 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +- +-#include "gc_implementation/shenandoah/shenandoahNumberSeq.hpp" +-#include "memory/allocation.inline.hpp" +-#include "runtime/atomic.hpp" +- +-HdrSeq::HdrSeq() { +- _hdr = NEW_C_HEAP_ARRAY(int*, MagBuckets, mtInternal); +- for (int c = 0; c < MagBuckets; c++) { +- _hdr[c] = NULL; +- } +-} +- +-HdrSeq::~HdrSeq() { +- for (int c = 0; c < MagBuckets; c++) { +- int* sub = _hdr[c]; +- if (sub != NULL) { +- FREE_C_HEAP_ARRAY(int, sub, mtInternal); +- } +- } +- FREE_C_HEAP_ARRAY(int*, _hdr, mtInternal); +-} +- +-void HdrSeq::add(double val) { +- if (val < 0) { +- assert (false, err_msg("value (%8.2f) is not negative", val)); +- val = 0; +- } +- +- NumberSeq::add(val); +- +- double v = val; +- int mag; +- if (v > 0) { +- mag = 0; +- while (v > 1) { +- mag++; +- v /= 10; +- } +- while (v < 0.1) { +- mag--; +- v *= 10; +- } +- } else { +- mag = MagMinimum; +- } +- +- int bucket = -MagMinimum + mag; +- int sub_bucket = (int) (v * ValBuckets); +- +- // Defensively saturate for product bits: +- if (bucket < 0) { +- assert (false, err_msg("bucket index (%d) underflow for value (%8.2f)", bucket, val)); +- bucket = 0; +- } +- +- if (bucket >= MagBuckets) { +- assert (false, err_msg("bucket index (%d) overflow for value (%8.2f)", bucket, val)); +- bucket = MagBuckets - 1; +- } +- +- if (sub_bucket < 0) { +- assert (false, err_msg("sub-bucket index (%d) underflow for value (%8.2f)", sub_bucket, val)); +- sub_bucket = 0; +- } +- +- if (sub_bucket >= ValBuckets) { +- assert (false, err_msg("sub-bucket index (%d) overflow for value (%8.2f)", sub_bucket, val)); +- sub_bucket = ValBuckets - 1; +- } +- +- int* b = _hdr[bucket]; +- if (b == NULL) { +- b = NEW_C_HEAP_ARRAY(int, ValBuckets, mtInternal); +- for (int c = 0; c < ValBuckets; c++) { +- b[c] = 0; +- } +- _hdr[bucket] = b; +- } +- b[sub_bucket]++; +-} +- +-double HdrSeq::percentile(double level) const { +- // target should be non-zero to find the first sample +- int target = MAX2(1, (int) (level * num() / 100)); +- int cnt = 0; +- for (int mag = 0; mag < MagBuckets; mag++) { +- if (_hdr[mag] != NULL) { +- for (int val = 0; val < ValBuckets; val++) { +- cnt += _hdr[mag][val]; +- if (cnt >= target) { +- return pow(10.0, MagMinimum + mag) * val / ValBuckets; +- } +- } +- } +- } +- return maximum(); +-} +- +-BinaryMagnitudeSeq::BinaryMagnitudeSeq() { +- _mags = NEW_C_HEAP_ARRAY(jlong, BitsPerJavaLong, mtInternal); +- clear(); +-} +- +-BinaryMagnitudeSeq::~BinaryMagnitudeSeq() { +- FREE_C_HEAP_ARRAY(size_t, _mags, mtInternal); +-} +- +-void BinaryMagnitudeSeq::clear() { +- for (int c = 0; c < BitsPerJavaLong; c++) { +- _mags[c] = 0; +- } +- _sum = 0; +-} +- +-void BinaryMagnitudeSeq::add(size_t val) { +- Atomic::add(val, &_sum); +- +- int mag = log2_intptr(val) + 1; +- +- // Defensively saturate for product bits: +- if (mag < 0) { +- assert (false, err_msg("bucket index (%d) underflow for value (" SIZE_FORMAT ")", mag, val)); +- mag = 0; +- } +- +- if (mag >= BitsPerJavaLong) { +- assert (false, err_msg("bucket index (%d) overflow for value (" SIZE_FORMAT ")", mag, val)); +- mag = BitsPerJavaLong - 1; +- } +- +- Atomic::add(1, &_mags[mag]); +-} +- +-size_t BinaryMagnitudeSeq::level(int level) const { +- if (0 <= level && level < BitsPerJavaLong) { +- return _mags[level]; +- } else { +- return 0; +- } +-} +- +-size_t BinaryMagnitudeSeq::num() const { +- int r = 0; +- for (int c = 0; c < BitsPerJavaLong; c++) { +- r += _mags[c]; +- } +- return r; +-} +- +-size_t BinaryMagnitudeSeq::sum() const { +- return _sum; +-} +- +-int BinaryMagnitudeSeq::min_level() const { +- for (int c = 0; c < BitsPerJavaLong; c++) { +- if (_mags[c] != 0) { +- return c; +- } +- } +- return BitsPerJavaLong - 1; +-} +- +-int BinaryMagnitudeSeq::max_level() const { +- for (int c = BitsPerJavaLong - 1; c > 0; c--) { +- if (_mags[c] != 0) { +- return c; +- } +- } +- return 0; +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahNumberSeq.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahNumberSeq.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahNumberSeq.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahNumberSeq.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,75 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHNUMBERSEQ_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHNUMBERSEQ_HPP +- +-#include "utilities/numberSeq.hpp" +- +-// HDR sequence stores the low-resolution high-dynamic-range values. +-// It does so by maintaining the double array, where first array defines +-// the magnitude of the value being stored, and the second array maintains +-// the low resolution histogram within that magnitude. For example, storing +-// 4.352819 * 10^3 increments the bucket _hdr[3][435]. This allows for +-// memory efficient storage of huge amount of samples. +-// +-// Accepts positive numbers only. +-class HdrSeq: public NumberSeq { +-private: +- enum PrivateConstants { +- ValBuckets = 512, +- MagBuckets = 24, +- MagMinimum = -12 +- }; +- int** _hdr; +- +-public: +- HdrSeq(); +- ~HdrSeq(); +- +- virtual void add(double val); +- double percentile(double level) const; +-}; +- +-// Binary magnitude sequence stores the power-of-two histogram. +-// It has very low memory requirements, and is thread-safe. When accuracy +-// is not needed, it is preferred over HdrSeq. +-class BinaryMagnitudeSeq { +-private: +- jlong _sum; +- jlong* _mags; +- +-public: +- BinaryMagnitudeSeq(); +- ~BinaryMagnitudeSeq(); +- +- void add(size_t val); +- size_t num() const; +- size_t level(int level) const; +- size_t sum() const; +- int min_level() const; +- int max_level() const; +- void clear(); +-}; +- +-#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHNUMBERSEQ_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahOopClosures.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahOopClosures.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahOopClosures.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahOopClosures.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,205 +0,0 @@ +-/* +- * Copyright (c) 2015, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHOOPCLOSURES_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHOOPCLOSURES_HPP +- +-#include "memory/iterator.hpp" +-#include "gc_implementation/shenandoah/shenandoahTaskqueue.hpp" +- +-class ShenandoahHeap; +-class ShenandoahStrDedupQueue; +-class ShenandoahMarkingContext; +-class OopClosure; +- +-enum UpdateRefsMode { +- NONE, // No reference updating +- RESOLVE, // Only a read-barrier (no reference updating) +- SIMPLE, // Reference updating using simple store +- CONCURRENT // Reference updating using CAS +-}; +- +-enum StringDedupMode { +- NO_DEDUP, // Do not do anything for String deduplication +- ENQUEUE_DEDUP // Enqueue candidate Strings for deduplication +-}; +- +-class ShenandoahMarkRefsSuperClosure : public MetadataAwareOopClosure { +-private: +- ShenandoahObjToScanQueue* _queue; +- ShenandoahStrDedupQueue* _dedup_queue; +- ShenandoahHeap* _heap; +- ShenandoahMarkingContext* const _mark_context; +- +-public: +- ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp); +- ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ShenandoahStrDedupQueue* dq, ReferenceProcessor* rp); +- +- template +- void work(T *p); +-}; +- +-class ShenandoahMarkUpdateRefsClosure : public ShenandoahMarkRefsSuperClosure { +-public: +- ShenandoahMarkUpdateRefsClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : +- ShenandoahMarkRefsSuperClosure(q, rp) {}; +- +- template +- inline void do_oop_nv(T* p) { work(p); } +- virtual void do_oop(narrowOop* p) { do_oop_nv(p); } +- virtual void do_oop(oop* p) { do_oop_nv(p); } +- inline bool do_metadata_nv() { return false; } +- virtual bool do_metadata() { return false; } +-}; +- +-class ShenandoahMarkUpdateRefsDedupClosure : public ShenandoahMarkRefsSuperClosure { +-public: +- ShenandoahMarkUpdateRefsDedupClosure(ShenandoahObjToScanQueue* q, ShenandoahStrDedupQueue* dq, ReferenceProcessor* rp) : +- ShenandoahMarkRefsSuperClosure(q, dq, rp) {}; +- +- template +- inline void do_oop_nv(T* p) { work(p); } +- virtual void do_oop(narrowOop* p) { do_oop_nv(p); } +- virtual void do_oop(oop* p) { do_oop_nv(p); } +- inline bool do_metadata_nv() { return false; } +- virtual bool do_metadata() { return false; } +-}; +- +-class ShenandoahMarkUpdateRefsMetadataClosure : public ShenandoahMarkRefsSuperClosure { +-public: +- ShenandoahMarkUpdateRefsMetadataClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : +- ShenandoahMarkRefsSuperClosure(q, rp) {}; +- +- template +- inline void do_oop_nv(T* p) { work(p); } +- virtual void do_oop(narrowOop* p) { do_oop_nv(p); } +- virtual void do_oop(oop* p) { do_oop_nv(p); } +- inline bool do_metadata_nv() { return true; } +- virtual bool do_metadata() { return true; } +-}; +- +-class ShenandoahMarkUpdateRefsMetadataDedupClosure : public ShenandoahMarkRefsSuperClosure { +-public: +- ShenandoahMarkUpdateRefsMetadataDedupClosure(ShenandoahObjToScanQueue* q, ShenandoahStrDedupQueue* dq, ReferenceProcessor* rp) : +- ShenandoahMarkRefsSuperClosure(q, dq, rp) {}; +- +- template +- inline void do_oop_nv(T* p) { work(p); } +- virtual void do_oop(narrowOop* p) { do_oop_nv(p); } +- virtual void do_oop(oop* p) { do_oop_nv(p); } +- inline bool do_metadata_nv() { return true; } +- virtual bool do_metadata() { return true; } +-}; +- +-class ShenandoahMarkRefsClosure : public ShenandoahMarkRefsSuperClosure { +-public: +- ShenandoahMarkRefsClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : +- ShenandoahMarkRefsSuperClosure(q, rp) {}; +- +- template +- inline void do_oop_nv(T* p) { work(p); } +- virtual void do_oop(narrowOop* p) { do_oop_nv(p); } +- virtual void do_oop(oop* p) { do_oop_nv(p); } +- inline bool do_metadata_nv() { return false; } +- virtual bool do_metadata() { return false; } +-}; +- +-class ShenandoahMarkRefsDedupClosure : public ShenandoahMarkRefsSuperClosure { +-public: +- ShenandoahMarkRefsDedupClosure(ShenandoahObjToScanQueue* q, ShenandoahStrDedupQueue* dq, ReferenceProcessor* rp) : +- ShenandoahMarkRefsSuperClosure(q, dq, rp) {}; +- +- template +- inline void do_oop_nv(T* p) { work(p); } +- virtual void do_oop(narrowOop* p) { do_oop_nv(p); } +- virtual void do_oop(oop* p) { do_oop_nv(p); } +- inline bool do_metadata_nv() { return false; } +- virtual bool do_metadata() { return false; } +-}; +- +-class ShenandoahMarkResolveRefsClosure : public ShenandoahMarkRefsSuperClosure { +-public: +- ShenandoahMarkResolveRefsClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : +- ShenandoahMarkRefsSuperClosure(q, rp) {}; +- +- template +- inline void do_oop_nv(T* p) { work(p); } +- virtual void do_oop(narrowOop* p) { do_oop_nv(p); } +- virtual void do_oop(oop* p) { do_oop_nv(p); } +- inline bool do_metadata_nv() { return false; } +- virtual bool do_metadata() { return false; } +-}; +- +-class ShenandoahMarkResolveRefsDedupClosure : public ShenandoahMarkRefsSuperClosure { +-public: +- ShenandoahMarkResolveRefsDedupClosure(ShenandoahObjToScanQueue* q, ShenandoahStrDedupQueue* dq, ReferenceProcessor* rp) : +- ShenandoahMarkRefsSuperClosure(q, dq, rp) {}; +- +- template +- inline void do_oop_nv(T* p) { work(p); } +- virtual void do_oop(narrowOop* p) { do_oop_nv(p); } +- virtual void do_oop(oop* p) { do_oop_nv(p); } +- inline bool do_metadata_nv() { return false; } +- virtual bool do_metadata() { return false; } +-}; +- +-class ShenandoahMarkRefsMetadataClosure : public ShenandoahMarkRefsSuperClosure { +-public: +- ShenandoahMarkRefsMetadataClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) : +- ShenandoahMarkRefsSuperClosure(q, rp) {}; +- +- template +- inline void do_oop_nv(T* p) { work(p); } +- virtual void do_oop(narrowOop* p) { do_oop_nv(p); } +- virtual void do_oop(oop* p) { do_oop_nv(p); } +- inline bool do_metadata_nv() { return true; } +- virtual bool do_metadata() { return true; } +-}; +- +-class ShenandoahMarkRefsMetadataDedupClosure : public ShenandoahMarkRefsSuperClosure { +-public: +- ShenandoahMarkRefsMetadataDedupClosure(ShenandoahObjToScanQueue* q, ShenandoahStrDedupQueue* dq, ReferenceProcessor* rp) : +- ShenandoahMarkRefsSuperClosure(q, dq, rp) {}; +- +- template +- inline void do_oop_nv(T* p) { work(p); } +- virtual void do_oop(narrowOop* p) { do_oop_nv(p); } +- virtual void do_oop(oop* p) { do_oop_nv(p); } +- inline bool do_metadata_nv() { return true; } +- virtual bool do_metadata() { return true; } +-}; +- +-class ShenandoahUpdateHeapRefsClosure : public ExtendedOopClosure { +-private: +- ShenandoahHeap* _heap; +-public: +- ShenandoahUpdateHeapRefsClosure(); +- +- template +- void do_oop_nv(T* p); +- +- virtual void do_oop(narrowOop* p) { do_oop_nv(p); } +- virtual void do_oop(oop* p) { do_oop_nv(p); } +-}; +- +-#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHOOPCLOSURES_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahOopClosures.inline.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahOopClosures.inline.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahOopClosures.inline.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahOopClosures.inline.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,40 +0,0 @@ +-/* +- * Copyright (c) 2015, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHOOPCLOSURES_INLINE_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHOOPCLOSURES_INLINE_HPP +- +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahConcurrentMark.inline.hpp" +- +-template +-inline void ShenandoahMarkRefsSuperClosure::work(T *p) { +- ShenandoahConcurrentMark::mark_through_ref(p, _heap, _queue, _mark_context, _dedup_queue); +-} +- +-template +-inline void ShenandoahUpdateHeapRefsClosure::do_oop_nv(T* p) { +- _heap->maybe_update_with_forwarded(p); +-} +- +-#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHOOPCLOSURES_INLINE_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahPacer.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahPacer.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahPacer.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahPacer.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,355 +0,0 @@ +-/* +- * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +- +-#include "gc_implementation/shenandoah/shenandoahFreeSet.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahPacer.hpp" +-#include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp" +-#include "runtime/mutexLocker.hpp" +- +-/* +- * In normal concurrent cycle, we have to pace the application to let GC finish. +- * +- * Here, we do not know how large would be the collection set, and what are the +- * relative performances of the each stage in the concurrent cycle, and so we have to +- * make some assumptions. +- * +- * For concurrent mark, there is no clear notion of progress. The moderately accurate +- * and easy to get metric is the amount of live objects the mark had encountered. But, +- * that does directly correlate with the used heap, because the heap might be fully +- * dead or fully alive. We cannot assume either of the extremes: we would either allow +- * application to run out of memory if we assume heap is fully dead but it is not, and, +- * conversely, we would pacify application excessively if we assume heap is fully alive +- * but it is not. So we need to guesstimate the particular expected value for heap liveness. +- * The best way to do this is apparently recording the past history. +- * +- * For concurrent evac and update-refs, we are walking the heap per-region, and so the +- * notion of progress is clear: we get reported the "used" size from the processed regions +- * and use the global heap-used as the baseline. +- * +- * The allocatable space when GC is running is "free" at the start of phase, but the +- * accounted budget is based on "used". So, we need to adjust the tax knowing that. +- */ +- +-void ShenandoahPacer::setup_for_mark() { +- assert(ShenandoahPacing, "Only be here when pacing is enabled"); +- +- size_t live = update_and_get_progress_history(); +- size_t free = _heap->free_set()->available(); +- +- size_t non_taxable = free * ShenandoahPacingCycleSlack / 100; +- size_t taxable = free - non_taxable; +- +- double tax = 1.0 * live / taxable; // base tax for available free space +- tax *= 1; // mark can succeed with immediate garbage, claim all available space +- tax *= ShenandoahPacingSurcharge; // additional surcharge to help unclutter heap +- +- restart_with(non_taxable, tax); +- +- log_info(gc, ergo)("Pacer for Mark. Expected Live: " SIZE_FORMAT "%s, Free: " SIZE_FORMAT "%s, " +- "Non-Taxable: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx", +- byte_size_in_proper_unit(live), proper_unit_for_byte_size(live), +- byte_size_in_proper_unit(free), proper_unit_for_byte_size(free), +- byte_size_in_proper_unit(non_taxable), proper_unit_for_byte_size(non_taxable), +- tax); +-} +- +-void ShenandoahPacer::setup_for_evac() { +- assert(ShenandoahPacing, "Only be here when pacing is enabled"); +- +- size_t used = _heap->collection_set()->used(); +- size_t free = _heap->free_set()->available(); +- +- size_t non_taxable = free * ShenandoahPacingCycleSlack / 100; +- size_t taxable = free - non_taxable; +- +- double tax = 1.0 * used / taxable; // base tax for available free space +- tax *= 2; // evac is followed by update-refs, claim 1/2 of remaining free +- tax = MAX2(1, tax); // never allocate more than GC processes during the phase +- tax *= ShenandoahPacingSurcharge; // additional surcharge to help unclutter heap +- +- restart_with(non_taxable, tax); +- +- log_info(gc, ergo)("Pacer for Evacuation. Used CSet: " SIZE_FORMAT "%s, Free: " SIZE_FORMAT "%s, " +- "Non-Taxable: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx", +- byte_size_in_proper_unit(used), proper_unit_for_byte_size(used), +- byte_size_in_proper_unit(free), proper_unit_for_byte_size(free), +- byte_size_in_proper_unit(non_taxable), proper_unit_for_byte_size(non_taxable), +- tax); +-} +- +-void ShenandoahPacer::setup_for_updaterefs() { +- assert(ShenandoahPacing, "Only be here when pacing is enabled"); +- +- size_t used = _heap->used(); +- size_t free = _heap->free_set()->available(); +- +- size_t non_taxable = free * ShenandoahPacingCycleSlack / 100; +- size_t taxable = free - non_taxable; +- +- double tax = 1.0 * used / taxable; // base tax for available free space +- tax *= 1; // update-refs is the last phase, claim the remaining free +- tax = MAX2(1, tax); // never allocate more than GC processes during the phase +- tax *= ShenandoahPacingSurcharge; // additional surcharge to help unclutter heap +- +- restart_with(non_taxable, tax); +- +- log_info(gc, ergo)("Pacer for Update Refs. Used: " SIZE_FORMAT "%s, Free: " SIZE_FORMAT "%s, " +- "Non-Taxable: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx", +- byte_size_in_proper_unit(used), proper_unit_for_byte_size(used), +- byte_size_in_proper_unit(free), proper_unit_for_byte_size(free), +- byte_size_in_proper_unit(non_taxable), proper_unit_for_byte_size(non_taxable), +- tax); +-} +- +-/* +- * In idle phase, we have to pace the application to let control thread react with GC start. +- * +- * Here, we have rendezvous with concurrent thread that adds up the budget as it acknowledges +- * it had seen recent allocations. It will naturally pace the allocations if control thread is +- * not catching up. To bootstrap this feedback cycle, we need to start with some initial budget +- * for applications to allocate at. +- */ +- +-void ShenandoahPacer::setup_for_idle() { +- assert(ShenandoahPacing, "Only be here when pacing is enabled"); +- +- size_t initial = _heap->max_capacity() / 100 * ShenandoahPacingIdleSlack; +- double tax = 1; +- +- restart_with(initial, tax); +- +- log_info(gc, ergo)("Pacer for Idle. Initial: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx", +- byte_size_in_proper_unit(initial), proper_unit_for_byte_size(initial), +- tax); +-} +- +-/* +- * There is no useful notion of progress for these operations. To avoid stalling +- * the allocators unnecessarily, allow them to run unimpeded. +- */ +- +-void ShenandoahPacer::setup_for_preclean() { +- assert(ShenandoahPacing, "Only be here when pacing is enabled"); +- +- size_t initial = _heap->max_capacity(); +- restart_with(initial, 1.0); +- +- log_info(gc, ergo)("Pacer for Precleaning. Non-Taxable: " SIZE_FORMAT "%s", +- byte_size_in_proper_unit(initial), proper_unit_for_byte_size(initial)); +-} +- +-void ShenandoahPacer::setup_for_reset() { +- assert(ShenandoahPacing, "Only be here when pacing is enabled"); +- +- size_t initial = _heap->max_capacity(); +- restart_with(initial, 1.0); +- +- log_info(gc, ergo)("Pacer for Reset. Non-Taxable: " SIZE_FORMAT "%s", +- byte_size_in_proper_unit(initial), proper_unit_for_byte_size(initial)); +-} +- +-size_t ShenandoahPacer::update_and_get_progress_history() { +- if (_progress == -1) { +- // First initialization, report some prior +- Atomic::store_ptr((intptr_t)PACING_PROGRESS_ZERO, &_progress); +- return (size_t) (_heap->max_capacity() * 0.1); +- } else { +- // Record history, and reply historical data +- _progress_history->add(_progress); +- Atomic::store_ptr((intptr_t)PACING_PROGRESS_ZERO, &_progress); +- return (size_t) (_progress_history->avg() * HeapWordSize); +- } +-} +- +-void ShenandoahPacer::restart_with(jlong non_taxable_bytes, jdouble tax_rate) { +- STATIC_ASSERT(sizeof(size_t) <= sizeof(intptr_t)); +- { +- intptr_t initial = (size_t) (non_taxable_bytes * tax_rate) >> LogHeapWordSize; +- intptr_t cur; +- do { +- cur = OrderAccess::load_acquire(&_budget); +- } while (Atomic::cmpxchg_ptr(initial, &_budget, cur) != cur); +- } +- +- OrderAccess::release_store(&_tax_rate, tax_rate); +- +- { +- intptr_t cur, val; +- do { +- cur = OrderAccess::load_acquire(&_epoch); +- val = cur + 1; +- } while (Atomic::cmpxchg_ptr(val, &_epoch, cur) != cur); +- } +- +- // Shake up stalled waiters after budget update. +- _need_notify_waiters.try_set(); +-} +- +-bool ShenandoahPacer::claim_for_alloc(size_t words, bool force) { +- assert(ShenandoahPacing, "Only be here when pacing is enabled"); +- +- intptr_t tax = MAX2(1, (intptr_t)(words * OrderAccess::load_acquire(&_tax_rate))); +- +- intptr_t cur = 0; +- intptr_t new_val = 0; +- do { +- cur = OrderAccess::load_acquire(&_budget); +- if (cur < tax && !force) { +- // Progress depleted, alas. +- return false; +- } +- new_val = cur - tax; +- } while (Atomic::cmpxchg_ptr(new_val, &_budget, cur) != cur); +- return true; +-} +- +-void ShenandoahPacer::unpace_for_alloc(intptr_t epoch, size_t words) { +- assert(ShenandoahPacing, "Only be here when pacing is enabled"); +- +- if (_epoch != epoch) { +- // Stale ticket, no need to unpace. +- return; +- } +- +- size_t tax = MAX2(1, words * OrderAccess::load_acquire(&_tax_rate)); +- add_budget(tax); +-} +- +-intptr_t ShenandoahPacer::epoch() { +- return OrderAccess::load_acquire(&_epoch); +-} +- +-void ShenandoahPacer::pace_for_alloc(size_t words) { +- assert(ShenandoahPacing, "Only be here when pacing is enabled"); +- +- // Fast path: try to allocate right away +- bool claimed = claim_for_alloc(words, false); +- if (claimed) { +- return; +- } +- +- // Forcefully claim the budget: it may go negative at this point, and +- // GC should replenish for this and subsequent allocations. After this claim, +- // we would wait a bit until our claim is matched by additional progress, +- // or the time budget depletes. +- claimed = claim_for_alloc(words, true); +- assert(claimed, "Should always succeed"); +- +- // Threads that are attaching should not block at all: they are not +- // fully initialized yet. Blocking them would be awkward. +- // This is probably the path that allocates the thread oop itself. +- if (JavaThread::current()->is_attaching_via_jni()) { +- return; +- } +- +- double start = os::elapsedTime(); +- +- size_t max_ms = ShenandoahPacingMaxDelay; +- size_t total_ms = 0; +- +- while (true) { +- // We could instead assist GC, but this would suffice for now. +- size_t cur_ms = (max_ms > total_ms) ? (max_ms - total_ms) : 1; +- wait(cur_ms); +- +- double end = os::elapsedTime(); +- total_ms = (size_t)((end - start) * 1000); +- +- if (total_ms > max_ms || OrderAccess::load_ptr_acquire(&_budget) >= 0) { +- // Exiting if either: +- // a) Spent local time budget to wait for enough GC progress. +- // Breaking out and allocating anyway, which may mean we outpace GC, +- // and start Degenerated GC cycle. +- // b) The budget had been replenished, which means our claim is satisfied. +- JavaThread::current()->add_paced_time(end - start); +- break; +- } +- } +-} +- +-void ShenandoahPacer::wait(size_t time_ms) { +- // Perform timed wait. It works like like sleep(), except without modifying +- // the thread interruptible status. MonitorLocker also checks for safepoints. +- assert(time_ms > 0, "Should not call this with zero argument, as it would stall until notify"); +- assert(time_ms <= LONG_MAX, "Sanity"); +- MonitorLockerEx locker(_wait_monitor); +- _wait_monitor->wait(!Mutex::_no_safepoint_check_flag, (long)time_ms); +-} +- +-void ShenandoahPacer::notify_waiters() { +- if (_need_notify_waiters.try_unset()) { +- MonitorLockerEx locker(_wait_monitor); +- _wait_monitor->notify_all(); +- } +-} +- +-void ShenandoahPacer::flush_stats_to_cycle() { +- MutexLocker lock(Threads_lock); +- +- double sum = 0; +- for (JavaThread* t = Threads::first(); t != NULL; t = t->next()) { +- sum += t->paced_time(); +- } +- ShenandoahHeap::heap()->phase_timings()->record_phase_time(ShenandoahPhaseTimings::pacing, sum); +-} +- +-void ShenandoahPacer::print_cycle_on(outputStream* out) { +- MutexLocker lock(Threads_lock); +- +- double now = os::elapsedTime(); +- double total = now - _last_time; +- _last_time = now; +- +- out->cr(); +- out->print_cr("Allocation pacing accrued:"); +- +- size_t threads_total = 0; +- size_t threads_nz = 0; +- double sum = 0; +- for (JavaThread* t = Threads::first(); t != NULL; t = t->next()) { +- double d = t->paced_time(); +- if (d > 0) { +- threads_nz++; +- sum += d; +- out->print_cr(" %5.0f of %5.0f ms (%5.1f%%): %s", +- d * 1000, total * 1000, d/total*100, t->name()); +- } +- threads_total++; +- t->reset_paced_time(); +- } +- out->print_cr(" %5.0f of %5.0f ms (%5.1f%%): ", +- sum * 1000, total * 1000, sum/total*100); +- +- if (threads_total > 0) { +- out->print_cr(" %5.0f of %5.0f ms (%5.1f%%): ", +- sum / threads_total * 1000, total * 1000, sum / threads_total / total * 100); +- } +- if (threads_nz > 0) { +- out->print_cr(" %5.0f of %5.0f ms (%5.1f%%): ", +- sum / threads_nz * 1000, total * 1000, sum / threads_nz / total * 100); +- } +- out->cr(); +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahPacer.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahPacer.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahPacer.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahPacer.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,108 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHPACER_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHPACER_HPP +- +-#include "gc_implementation/shenandoah/shenandoahNumberSeq.hpp" +-#include "gc_implementation/shenandoah/shenandoahPadding.hpp" +-#include "memory/allocation.hpp" +- +-class ShenandoahHeap; +- +-#define PACING_PROGRESS_UNINIT (-1) +-#define PACING_PROGRESS_ZERO ( 0) +- +-/** +- * ShenandoahPacer provides allocation pacing mechanism. +- * +- * Currently it implements simple tax-and-spend pacing policy: GC threads provide +- * credit, allocating thread spend the credit, or stall when credit is not available. +- */ +-class ShenandoahPacer : public CHeapObj { +-private: +- ShenandoahHeap* _heap; +- double _last_time; +- TruncatedSeq* _progress_history; +- Monitor* _wait_monitor; +- ShenandoahSharedFlag _need_notify_waiters; +- +- volatile intptr_t _epoch; +- volatile jdouble _tax_rate; +- +- shenandoah_padding(0); +- volatile intptr_t _budget; +- shenandoah_padding(1); +- volatile intptr_t _progress; +- shenandoah_padding(2); +- +-public: +- ShenandoahPacer(ShenandoahHeap* heap) : +- _heap(heap), +- _last_time(os::elapsedTime()), +- _progress_history(new TruncatedSeq(5)), +- _wait_monitor(new Monitor(Mutex::leaf, "_wait_monitor", true)), +- _epoch(0), +- _tax_rate(1), +- _budget(0), +- _progress(PACING_PROGRESS_UNINIT) { +- } +- +- void setup_for_idle(); +- void setup_for_mark(); +- void setup_for_evac(); +- void setup_for_updaterefs(); +- +- void setup_for_reset(); +- void setup_for_preclean(); +- +- inline void report_mark(size_t words); +- inline void report_evac(size_t words); +- inline void report_updaterefs(size_t words); +- +- inline void report_alloc(size_t words); +- +- bool claim_for_alloc(size_t words, bool force); +- void pace_for_alloc(size_t words); +- void unpace_for_alloc(intptr_t epoch, size_t words); +- +- void notify_waiters(); +- +- intptr_t epoch(); +- +- void flush_stats_to_cycle(); +- void print_cycle_on(outputStream* out); +- +-private: +- inline void report_internal(size_t words); +- inline void report_progress_internal(size_t words); +- +- inline void add_budget(size_t words); +- void restart_with(jlong non_taxable_bytes, jdouble tax_rate); +- +- size_t update_and_get_progress_history(); +- +- void wait(size_t time_ms); +-}; +- +-#endif //SHARE_VM_GC_SHENANDOAH_SHENANDOAHPACER_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahPacer.inline.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahPacer.inline.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahPacer.inline.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahPacer.inline.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,71 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHPACER_INLINE_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHPACER_INLINE_HPP +- +-#include "gc_implementation/shenandoah/shenandoahPacer.hpp" +-#include "runtime/atomic.hpp" +- +-inline void ShenandoahPacer::report_mark(size_t words) { +- report_internal(words); +- report_progress_internal(words); +-} +- +-inline void ShenandoahPacer::report_evac(size_t words) { +- report_internal(words); +-} +- +-inline void ShenandoahPacer::report_updaterefs(size_t words) { +- report_internal(words); +-} +- +-inline void ShenandoahPacer::report_alloc(size_t words) { +- report_internal(words); +-} +- +-inline void ShenandoahPacer::report_internal(size_t words) { +- assert(ShenandoahPacing, "Only be here when pacing is enabled"); +- add_budget(words); +-} +- +-inline void ShenandoahPacer::report_progress_internal(size_t words) { +- assert(ShenandoahPacing, "Only be here when pacing is enabled"); +- STATIC_ASSERT(sizeof(size_t) <= sizeof(intptr_t)); +- Atomic::add_ptr((intptr_t)words, &_progress); +-} +- +-inline void ShenandoahPacer::add_budget(size_t words) { +- STATIC_ASSERT(sizeof(size_t) <= sizeof(intptr_t)); +- intptr_t inc = (intptr_t) words; +- intptr_t new_budget = Atomic::add_ptr(inc, &_budget); +- +- // Was the budget replenished beyond zero? Then all pacing claims +- // are satisfied, notify the waiters. Avoid taking any locks here, +- // as it can be called from hot paths and/or while holding other locks. +- if (new_budget >= 0 && (new_budget - inc) < 0) { +- _need_notify_waiters.try_set(); +- } +-} +- +-#endif //SHARE_VM_GC_SHENANDOAH_SHENANDOAHPACER_INLINE_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahPadding.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahPadding.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahPadding.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahPadding.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,38 +0,0 @@ +-/* +- * Copyright (c) 2020, Red Hat, Inc. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHPADDING_INLINE_HPP +-#define SHARE_GC_SHENANDOAH_SHENANDOAHPADDING_INLINE_HPP +- +-// 64 bytes is enough to cover all existing architectures. If we have some +-// other platforms, we would need to provide the architecture-specific +-// versions here. Shared code provides DEFAULT_CACHE_LINE_SIZE, which is +-// inconveniently large by default. +- +-#define SHENANDOAH_CACHE_LINE_SIZE 64 +- +-#define shenandoah_padding(id) \ +- char _pad##id[SHENANDOAH_CACHE_LINE_SIZE] +- +-#endif // SHARE_GC_SHENANDOAH_SHENANDOAHPADDING_INLINE_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahParallelCleaning.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahParallelCleaning.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahParallelCleaning.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahParallelCleaning.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,28 +0,0 @@ +-/* +- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +-#include "gc_implementation/shenandoah/shenandoahParallelCleaning.hpp" +- +-Monitor* ShenandoahCodeCacheUnloadingTask::_lock = new Monitor(Mutex::leaf, "Code Cache Unload lock"); +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahParallelCleaning.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahParallelCleaning.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahParallelCleaning.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahParallelCleaning.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,397 +0,0 @@ +-/* +- * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHPARALLELCLEANING_HPP +-#define SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHPARALLELCLEANING_HPP +- +-#include "classfile/metadataOnStackMark.hpp" +-#include "classfile/symbolTable.hpp" +-#include "code/codeCache.hpp" +-#include "gc_interface/collectedHeap.hpp" +-#include "memory/resourceArea.hpp" +-#include "utilities/workgroup.hpp" +- +-class ShenandoahStringSymbolTableUnlinkTask : public AbstractGangTask { +-private: +- BoolObjectClosure* _is_alive; +- int _initial_string_table_size; +- int _initial_symbol_table_size; +- +- bool _process_strings; +- int _strings_processed; +- int _strings_removed; +- +- bool _process_symbols; +- int _symbols_processed; +- int _symbols_removed; +- +- bool _do_in_parallel; +-public: +- ShenandoahStringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) : +- AbstractGangTask("String/Symbol Unlinking"), +- _is_alive(is_alive), +- _do_in_parallel(Universe::heap()->use_parallel_gc_threads()), +- _process_strings(process_strings), _strings_processed(0), _strings_removed(0), +- _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) { +- +- _initial_string_table_size = StringTable::the_table()->table_size(); +- _initial_symbol_table_size = SymbolTable::the_table()->table_size(); +- if (process_strings) { +- StringTable::clear_parallel_claimed_index(); +- } +- if (process_symbols) { +- SymbolTable::clear_parallel_claimed_index(); +- } +- } +- +- ~ShenandoahStringSymbolTableUnlinkTask() { +- guarantee(!_process_strings || !_do_in_parallel || StringTable::parallel_claimed_index() >= _initial_string_table_size, +- err_msg("claim value " INT32_FORMAT " after unlink less than initial string table size " INT32_FORMAT, +- StringTable::parallel_claimed_index(), _initial_string_table_size)); +- guarantee(!_process_symbols || !_do_in_parallel || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size, +- err_msg("claim value " INT32_FORMAT " after unlink less than initial symbol table size " INT32_FORMAT, +- SymbolTable::parallel_claimed_index(), _initial_symbol_table_size)); +- +- if (G1TraceStringSymbolTableScrubbing) { +- gclog_or_tty->print_cr("Cleaned string and symbol table, " +- "strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed, " +- "symbols: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed", +- strings_processed(), strings_removed(), +- symbols_processed(), symbols_removed()); +- } +- } +- +- void work(uint worker_id) { +- if (_do_in_parallel) { +- int strings_processed = 0; +- int strings_removed = 0; +- int symbols_processed = 0; +- int symbols_removed = 0; +- if (_process_strings) { +- StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed); +- Atomic::add(strings_processed, &_strings_processed); +- Atomic::add(strings_removed, &_strings_removed); +- } +- if (_process_symbols) { +- SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed); +- Atomic::add(symbols_processed, &_symbols_processed); +- Atomic::add(symbols_removed, &_symbols_removed); +- } +- } else { +- if (_process_strings) { +- StringTable::unlink(_is_alive, &_strings_processed, &_strings_removed); +- } +- if (_process_symbols) { +- SymbolTable::unlink(&_symbols_processed, &_symbols_removed); +- } +- } +- } +- +- size_t strings_processed() const { return (size_t)_strings_processed; } +- size_t strings_removed() const { return (size_t)_strings_removed; } +- +- size_t symbols_processed() const { return (size_t)_symbols_processed; } +- size_t symbols_removed() const { return (size_t)_symbols_removed; } +-}; +- +-class ShenandoahCodeCacheUnloadingTask VALUE_OBJ_CLASS_SPEC { +-private: +- static Monitor* _lock; +- +- BoolObjectClosure* const _is_alive; +- const bool _unloading_occurred; +- const uint _num_workers; +- +- // Variables used to claim nmethods. +- nmethod* _first_nmethod; +- volatile nmethod* _claimed_nmethod; +- +- // The list of nmethods that need to be processed by the second pass. +- volatile nmethod* _postponed_list; +- volatile uint _num_entered_barrier; +- +- public: +- ShenandoahCodeCacheUnloadingTask(uint num_workers, BoolObjectClosure* is_alive, bool unloading_occurred) : +- _is_alive(is_alive), +- _unloading_occurred(unloading_occurred), +- _num_workers(num_workers), +- _first_nmethod(NULL), +- _claimed_nmethod(NULL), +- _postponed_list(NULL), +- _num_entered_barrier(0) +- { +- nmethod::increase_unloading_clock(); +- _first_nmethod = CodeCache::alive_nmethod(CodeCache::first()); +- _claimed_nmethod = (volatile nmethod*)_first_nmethod; +- } +- +- ~ShenandoahCodeCacheUnloadingTask() { +- CodeCache::verify_clean_inline_caches(); +- +- CodeCache::set_needs_cache_clean(false); +- guarantee(CodeCache::scavenge_root_nmethods() == NULL, "Must be"); +- +- CodeCache::verify_icholder_relocations(); +- } +- +- private: +- void add_to_postponed_list(nmethod* nm) { +- nmethod* old; +- do { +- old = (nmethod*)_postponed_list; +- nm->set_unloading_next(old); +- } while ((nmethod*)Atomic::cmpxchg_ptr(nm, &_postponed_list, old) != old); +- } +- +- void clean_nmethod(nmethod* nm) { +- bool postponed = nm->do_unloading_parallel(_is_alive, _unloading_occurred); +- +- if (postponed) { +- // This nmethod referred to an nmethod that has not been cleaned/unloaded yet. +- add_to_postponed_list(nm); +- } +- +- // Mark that this thread has been cleaned/unloaded. +- // After this call, it will be safe to ask if this nmethod was unloaded or not. +- nm->set_unloading_clock(nmethod::global_unloading_clock()); +- } +- +- void clean_nmethod_postponed(nmethod* nm) { +- nm->do_unloading_parallel_postponed(_is_alive, _unloading_occurred); +- } +- +- static const int MaxClaimNmethods = 16; +- +- void claim_nmethods(nmethod** claimed_nmethods, int *num_claimed_nmethods) { +- nmethod* first; +- nmethod* last; +- +- do { +- *num_claimed_nmethods = 0; +- +- first = last = (nmethod*)_claimed_nmethod; +- +- if (first != NULL) { +- for (int i = 0; i < MaxClaimNmethods; i++) { +- last = CodeCache::alive_nmethod(CodeCache::next(last)); +- +- if (last == NULL) { +- break; +- } +- +- claimed_nmethods[i] = last; +- (*num_claimed_nmethods)++; +- } +- } +- +- } while ((nmethod*)Atomic::cmpxchg_ptr(last, &_claimed_nmethod, first) != first); +- } +- +- nmethod* claim_postponed_nmethod() { +- nmethod* claim; +- nmethod* next; +- +- do { +- claim = (nmethod*)_postponed_list; +- if (claim == NULL) { +- return NULL; +- } +- +- next = claim->unloading_next(); +- +- } while ((nmethod*)Atomic::cmpxchg_ptr(next, &_postponed_list, claim) != claim); +- +- return claim; +- } +- +- public: +- // Mark that we're done with the first pass of nmethod cleaning. +- void barrier_mark(uint worker_id) { +- MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag); +- _num_entered_barrier++; +- if (_num_entered_barrier == _num_workers) { +- ml.notify_all(); +- } +- } +- +- // See if we have to wait for the other workers to +- // finish their first-pass nmethod cleaning work. +- void barrier_wait(uint worker_id) { +- if (_num_entered_barrier < _num_workers) { +- MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag); +- while (_num_entered_barrier < _num_workers) { +- ml.wait(Mutex::_no_safepoint_check_flag, 0, false); +- } +- } +- } +- +- // Cleaning and unloading of nmethods. Some work has to be postponed +- // to the second pass, when we know which nmethods survive. +- void work_first_pass(uint worker_id) { +- // The first nmethods is claimed by the first worker. +- if (worker_id == 0 && _first_nmethod != NULL) { +- clean_nmethod(_first_nmethod); +- _first_nmethod = NULL; +- } +- +- int num_claimed_nmethods; +- nmethod* claimed_nmethods[MaxClaimNmethods]; +- +- while (true) { +- claim_nmethods(claimed_nmethods, &num_claimed_nmethods); +- +- if (num_claimed_nmethods == 0) { +- break; +- } +- +- for (int i = 0; i < num_claimed_nmethods; i++) { +- clean_nmethod(claimed_nmethods[i]); +- } +- } +- +- // The nmethod cleaning helps out and does the CodeCache part of MetadataOnStackMark. +- // Need to retire the buffers now that this thread has stopped cleaning nmethods. +- MetadataOnStackMark::retire_buffer_for_thread(Thread::current()); +- } +- +- void work_second_pass(uint worker_id) { +- nmethod* nm; +- // Take care of postponed nmethods. +- while ((nm = claim_postponed_nmethod()) != NULL) { +- clean_nmethod_postponed(nm); +- } +- } +-}; +- +-class ShenandoahKlassCleaningTask : public StackObj { +- BoolObjectClosure* _is_alive; +- volatile jint _clean_klass_tree_claimed; +- ClassLoaderDataGraphKlassIteratorAtomic _klass_iterator; +- +- public: +- ShenandoahKlassCleaningTask(BoolObjectClosure* is_alive) : +- _is_alive(is_alive), +- _clean_klass_tree_claimed(0), +- _klass_iterator() { +- } +- +- private: +- bool claim_clean_klass_tree_task() { +- if (_clean_klass_tree_claimed) { +- return false; +- } +- +- return Atomic::cmpxchg(1, (jint*)&_clean_klass_tree_claimed, 0) == 0; +- } +- +- InstanceKlass* claim_next_klass() { +- Klass* klass; +- do { +- klass =_klass_iterator.next_klass(); +- } while (klass != NULL && !klass->oop_is_instance()); +- +- return (InstanceKlass*)klass; +- } +- +-public: +- +- void clean_klass(InstanceKlass* ik) { +- ik->clean_weak_instanceklass_links(_is_alive); +- +- if (JvmtiExport::has_redefined_a_class()) { +- InstanceKlass::purge_previous_versions(ik); +- } +- } +- +- void work() { +- ResourceMark rm; +- +- // One worker will clean the subklass/sibling klass tree. +- if (claim_clean_klass_tree_task()) { +- Klass::clean_subklass_tree(_is_alive); +- } +- +- // All workers will help cleaning the classes, +- InstanceKlass* klass; +- while ((klass = claim_next_klass()) != NULL) { +- clean_klass(klass); +- } +- } +-}; +- +-// To minimize the remark pause times, the tasks below are done in parallel. +-class ShenandoahParallelCleaningTask : public AbstractGangTask { +-private: +- ShenandoahStringSymbolTableUnlinkTask _string_symbol_task; +- ShenandoahCodeCacheUnloadingTask _code_cache_task; +- ShenandoahKlassCleaningTask _klass_cleaning_task; +- +-public: +- // The constructor is run in the VMThread. +- ShenandoahParallelCleaningTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols, uint num_workers, bool unloading_occurred) : +- AbstractGangTask("Parallel Cleaning"), +- _string_symbol_task(is_alive, process_strings, process_symbols), +- _code_cache_task(num_workers, is_alive, unloading_occurred), +- _klass_cleaning_task(is_alive) { +- } +- +- void pre_work_verification() { +- // The VM Thread will have registered Metadata during the single-threaded phase of MetadataStackOnMark. +- assert(Thread::current()->is_VM_thread() +- || !MetadataOnStackMark::has_buffer_for_thread(Thread::current()), "Should be empty"); +- } +- +- void post_work_verification() { +- assert(!MetadataOnStackMark::has_buffer_for_thread(Thread::current()), "Should be empty"); +- } +- +- // The parallel work done by all worker threads. +- void work(uint worker_id) { +- pre_work_verification(); +- +- // Do first pass of code cache cleaning. +- _code_cache_task.work_first_pass(worker_id); +- +- // Let the threads mark that the first pass is done. +- _code_cache_task.barrier_mark(worker_id); +- +- // Clean the Strings and Symbols. +- _string_symbol_task.work(worker_id); +- +- // Wait for all workers to finish the first code cache cleaning pass. +- _code_cache_task.barrier_wait(worker_id); +- +- // Do the second code cache cleaning work, which realize on +- // the liveness information gathered during the first pass. +- _code_cache_task.work_second_pass(worker_id); +- +- // Clean all klasses that were not unloaded. +- _klass_cleaning_task.work(); +- +- post_work_verification(); +- } +- +-}; +- +-#endif // SHARE_VM_GC_IMPLEMENTATION_SHENANDOAH_SHENANDOAHPARALLELCLEANING_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahPhaseTimings.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahPhaseTimings.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahPhaseTimings.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahPhaseTimings.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,314 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +- +-#include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp" +-#include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahWorkerDataArray.hpp" +-#include "gc_implementation/shenandoah/shenandoahUtils.hpp" +-#include "gc_implementation/shenandoah/heuristics/shenandoahHeuristics.hpp" +-#include "runtime/orderAccess.hpp" +-#include "utilities/ostream.hpp" +- +-#define SHENANDOAH_PHASE_NAME_FORMAT "%-30s" +-#define SHENANDOAH_S_TIME_FORMAT "%8.3lf" +-#define SHENANDOAH_US_TIME_FORMAT "%8.0lf" +-#define SHENANDOAH_US_WORKER_TIME_FORMAT "%3.0lf" +-#define SHENANDOAH_US_WORKER_NOTIME_FORMAT "%3s" +-#define SHENANDOAH_PARALLELISM_FORMAT "%4.2lf" +- +-#define SHENANDOAH_PHASE_DECLARE_NAME(type, title) \ +- title, +- +-const char* ShenandoahPhaseTimings::_phase_names[] = { +- SHENANDOAH_PHASE_DO(SHENANDOAH_PHASE_DECLARE_NAME) +-}; +- +-#undef SHENANDOAH_PHASE_DECLARE_NAME +- +-ShenandoahPhaseTimings::ShenandoahPhaseTimings(uint max_workers) : +- _max_workers(max_workers) { +- assert(_max_workers > 0, "Must have some GC threads"); +- +- // Initialize everything to sane defaults +- for (uint i = 0; i < _num_phases; i++) { +-#define SHENANDOAH_WORKER_DATA_NULL(type, title) \ +- _worker_data[i] = NULL; +- SHENANDOAH_PAR_PHASE_DO(,, SHENANDOAH_WORKER_DATA_NULL) +-#undef SHENANDOAH_WORKER_DATA_NULL +- _cycle_data[i] = uninitialized(); +- } +- +- // Then punch in the worker-related data. +- // Every worker phase get a bunch of internal objects, except +- // the very first slot, which is "" and is not populated. +- for (uint i = 0; i < _num_phases; i++) { +- if (is_worker_phase(Phase(i))) { +- int c = 0; +-#define SHENANDOAH_WORKER_DATA_INIT(type, title) \ +- if (c++ != 0) _worker_data[i + c] = new ShenandoahWorkerData(_max_workers, title); +- SHENANDOAH_PAR_PHASE_DO(,, SHENANDOAH_WORKER_DATA_INIT) +-#undef SHENANDOAH_WORKER_DATA_INIT +- } +- } +- +- _policy = ShenandoahHeap::heap()->shenandoah_policy(); +- assert(_policy != NULL, "Can not be NULL"); +-} +- +-ShenandoahPhaseTimings::Phase ShenandoahPhaseTimings::worker_par_phase(Phase phase, ParPhase par_phase) { +- assert(is_worker_phase(phase), err_msg("Phase should accept worker phase times: %s", phase_name(phase))); +- Phase p = Phase(phase + 1 + par_phase); +- assert(p >= 0 && p < _num_phases, err_msg("Out of bound for: %s", phase_name(phase))); +- return p; +-} +- +-ShenandoahWorkerData* ShenandoahPhaseTimings::worker_data(Phase phase, ParPhase par_phase) { +- Phase p = worker_par_phase(phase, par_phase); +- ShenandoahWorkerData* wd = _worker_data[p]; +- assert(wd != NULL, err_msg("Counter initialized: %s", phase_name(p))); +- return wd; +-} +- +-bool ShenandoahPhaseTimings::is_worker_phase(Phase phase) { +- assert(phase >= 0 && phase < _num_phases, "Out of bounds"); +- switch (phase) { +- case init_evac: +- case scan_roots: +- case update_roots: +- case final_update_refs_roots: +- case full_gc_scan_roots: +- case full_gc_update_roots: +- case full_gc_adjust_roots: +- case degen_gc_update_roots: +- case weak_roots: +- case full_gc_weak_roots: +- case heap_iteration_roots: +- case verifier_roots: +- return true; +- default: +- return false; +- } +-} +- +-bool ShenandoahPhaseTimings::is_root_work_phase(Phase phase) { +- switch (phase) { +- case scan_roots: +- case update_roots: +- case init_evac: +- case final_update_refs_roots: +- case degen_gc_update_roots: +- case full_gc_scan_roots: +- case full_gc_update_roots: +- case full_gc_adjust_roots: +- return true; +- default: +- return false; +- } +-} +- +-void ShenandoahPhaseTimings::set_cycle_data(Phase phase, double time) { +-#ifdef ASSERT +- double d = _cycle_data[phase]; +- assert(d == uninitialized(), err_msg("Should not be set yet: %s, current value: %lf", phase_name(phase), d)); +-#endif +- _cycle_data[phase] = time; +-} +- +-void ShenandoahPhaseTimings::record_phase_time(Phase phase, double time) { +- if (!_policy->is_at_shutdown()) { +- set_cycle_data(phase, time); +- } +-} +- +-void ShenandoahPhaseTimings::record_workers_start(Phase phase) { +- assert(is_worker_phase(phase), err_msg("Phase should accept worker phase times: %s", phase_name(phase))); +- +- // Special case: these phases can enter multiple times, need to reset +- // their worker data every time. +- if (phase == heap_iteration_roots) { +- for (uint i = 1; i < _num_par_phases; i++) { +- worker_data(phase, ParPhase(i))->reset(); +- } +- } +- +-#ifdef ASSERT +- for (uint i = 1; i < _num_par_phases; i++) { +- ShenandoahWorkerData* wd = worker_data(phase, ParPhase(i)); +- for (uint c = 0; c < _max_workers; c++) { +- assert(wd->get(c) == ShenandoahWorkerData::uninitialized(), +- err_msg("Should not be set: %s", phase_name(worker_par_phase(phase, ParPhase(i))))); +- } +- } +-#endif +-} +- +-void ShenandoahPhaseTimings::record_workers_end(Phase phase) { +- assert(is_worker_phase(phase), err_msg("Phase should accept worker phase times: %s", phase_name(phase))); +-} +- +-void ShenandoahPhaseTimings::flush_par_workers_to_cycle() { +- for (uint pi = 0; pi < _num_phases; pi++) { +- Phase phase = Phase(pi); +- if (is_worker_phase(phase)) { +- double s = uninitialized(); +- for (uint i = 1; i < _num_par_phases; i++) { +- ShenandoahWorkerData* wd = worker_data(phase, ParPhase(i)); +- double ws = uninitialized(); +- for (uint c = 0; c < _max_workers; c++) { +- double v = wd->get(c); +- if (v != ShenandoahWorkerData::uninitialized()) { +- if (ws == uninitialized()) { +- ws = v; +- } else { +- ws += v; +- } +- } +- } +- if (ws != uninitialized()) { +- // add to each line in phase +- set_cycle_data(Phase(phase + i + 1), ws); +- if (s == uninitialized()) { +- s = ws; +- } else { +- s += ws; +- } +- } +- } +- if (s != uninitialized()) { +- // add to total for phase +- set_cycle_data(Phase(phase + 1), s); +- } +- } +- } +-} +- +-void ShenandoahPhaseTimings::flush_cycle_to_global() { +- for (uint i = 0; i < _num_phases; i++) { +- if (_cycle_data[i] != uninitialized()) { +- _global_data[i].add(_cycle_data[i]); +- _cycle_data[i] = uninitialized(); +- } +- if (_worker_data[i] != NULL) { +- _worker_data[i]->reset(); +- } +- } +- OrderAccess::fence(); +-} +- +-void ShenandoahPhaseTimings::print_cycle_on(outputStream* out) const { +- out->cr(); +- out->print_cr("All times are wall-clock times, except per-root-class counters, that are sum over"); +- out->print_cr("all workers. Dividing the over the root stage time estimates parallelism."); +- out->cr(); +- for (uint i = 0; i < _num_phases; i++) { +- double v = _cycle_data[i] * 1000000.0; +- if (v > 0) { +- out->print(SHENANDOAH_PHASE_NAME_FORMAT " " SHENANDOAH_US_TIME_FORMAT " us", _phase_names[i], v); +- +- if (is_worker_phase(Phase(i))) { +- double total = _cycle_data[i + 1] * 1000000.0; +- if (total > 0) { +- out->print(", parallelism: " SHENANDOAH_PARALLELISM_FORMAT "x", total / v); +- } +- } +- +- if (_worker_data[i] != NULL) { +- out->print(", workers (us): "); +- for (uint c = 0; c < _max_workers; c++) { +- double tv = _worker_data[i]->get(c); +- if (tv != ShenandoahWorkerData::uninitialized()) { +- out->print(SHENANDOAH_US_WORKER_TIME_FORMAT ", ", tv * 1000000.0); +- } else { +- out->print(SHENANDOAH_US_WORKER_NOTIME_FORMAT ", ", "---"); +- } +- } +- } +- out->cr(); +- } +- } +-} +- +-void ShenandoahPhaseTimings::print_global_on(outputStream* out) const { +- out->cr(); +- out->print_cr("GC STATISTICS:"); +- out->print_cr(" \"(G)\" (gross) pauses include VM time: time to notify and block threads, do the pre-"); +- out->print_cr(" and post-safepoint housekeeping. Use -XX:+PrintSafepointStatistics to dissect."); +- out->print_cr(" \"(N)\" (net) pauses are the times spent in the actual GC code."); +- out->print_cr(" \"a\" is average time for each phase, look at levels to see if average makes sense."); +- out->print_cr(" \"lvls\" are quantiles: 0%% (minimum), 25%%, 50%% (median), 75%%, 100%% (maximum)."); +- out->cr(); +- out->print_cr(" All times are wall-clock times, except per-root-class counters, that are sum over"); +- out->print_cr(" all workers. Dividing the over the root stage time estimates parallelism."); +- out->cr(); +- +- out->print_cr(" Pacing delays are measured from entering the pacing code till exiting it. Therefore,"); +- out->print_cr(" observed pacing delays may be higher than the threshold when paced thread spent more"); +- out->print_cr(" time in the pacing code. It usually happens when thread is de-scheduled while paced,"); +- out->print_cr(" OS takes longer to unblock the thread, or JVM experiences an STW pause."); +- out->cr(); +- out->print_cr(" Higher delay would prevent application outpacing the GC, but it will hide the GC latencies"); +- out->print_cr(" from the STW pause times. Pacing affects the individual threads, and so it would also be"); +- out->print_cr(" invisible to the usual profiling tools, but would add up to end-to-end application latency."); +- out->print_cr(" Raise max pacing delay with care."); +- out->cr(); +- +- for (uint i = 0; i < _num_phases; i++) { +- if (_global_data[i].maximum() != 0) { +- out->print_cr(SHENANDOAH_PHASE_NAME_FORMAT " = " SHENANDOAH_S_TIME_FORMAT " s " +- "(a = " SHENANDOAH_US_TIME_FORMAT " us) " +- "(n = " INT32_FORMAT_W(5) ") (lvls, us = " +- SHENANDOAH_US_TIME_FORMAT ", " +- SHENANDOAH_US_TIME_FORMAT ", " +- SHENANDOAH_US_TIME_FORMAT ", " +- SHENANDOAH_US_TIME_FORMAT ", " +- SHENANDOAH_US_TIME_FORMAT ")", +- _phase_names[i], +- _global_data[i].sum(), +- _global_data[i].avg() * 1000000.0, +- _global_data[i].num(), +- _global_data[i].percentile(0) * 1000000.0, +- _global_data[i].percentile(25) * 1000000.0, +- _global_data[i].percentile(50) * 1000000.0, +- _global_data[i].percentile(75) * 1000000.0, +- _global_data[i].maximum() * 1000000.0 +- ); +- } +- } +-} +- +-ShenandoahWorkerTimingsTracker::ShenandoahWorkerTimingsTracker(ShenandoahPhaseTimings::Phase phase, +- ShenandoahPhaseTimings::ParPhase par_phase, uint worker_id) : +- _timings(ShenandoahHeap::heap()->phase_timings()), +- _phase(phase), _par_phase(par_phase), _worker_id(worker_id) { +- +- assert(_timings->worker_data(_phase, _par_phase)->get(_worker_id) == ShenandoahWorkerData::uninitialized(), +- err_msg("Should not be set yet: %s", ShenandoahPhaseTimings::phase_name(_timings->worker_par_phase(_phase, _par_phase)))); +- _start_time = os::elapsedTime(); +-} +- +-ShenandoahWorkerTimingsTracker::~ShenandoahWorkerTimingsTracker() { +- _timings->worker_data(_phase, _par_phase)->set(_worker_id, os::elapsedTime() - _start_time); +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahPhaseTimings.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahPhaseTimings.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahPhaseTimings.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahPhaseTimings.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,242 +0,0 @@ +- +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHPHASETIMEINGS_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHPHASETIMEINGS_HPP +- +-#include "memory/allocation.hpp" +-#include "gc_implementation/shenandoah/shenandoahNumberSeq.hpp" +-#include "gc_implementation/shenandoah/shenandoahWorkerDataArray.hpp" +-#include "gc_implementation/shenandoah/shenandoahWorkerDataArray.inline.hpp" +- +-class ShenandoahCollectorPolicy; +-class outputStream; +- +-#define SHENANDOAH_PAR_PHASE_DO(CNT_PREFIX, DESC_PREFIX, f) \ +- f(CNT_PREFIX ## TotalWork, DESC_PREFIX "") \ +- f(CNT_PREFIX ## ThreadRoots, DESC_PREFIX "Thread Roots") \ +- f(CNT_PREFIX ## CodeCacheRoots, DESC_PREFIX "Code Cache Roots") \ +- f(CNT_PREFIX ## UniverseRoots, DESC_PREFIX "Universe Roots") \ +- f(CNT_PREFIX ## JNIRoots, DESC_PREFIX "JNI Handles Roots") \ +- f(CNT_PREFIX ## JVMTIWeakRoots, DESC_PREFIX "JVMTI Weak Roots") \ +- f(CNT_PREFIX ## JFRWeakRoots, DESC_PREFIX "JFR Weak Roots") \ +- f(CNT_PREFIX ## JNIWeakRoots, DESC_PREFIX "JNI Weak Roots") \ +- f(CNT_PREFIX ## StringTableRoots, DESC_PREFIX "String Table Roots") \ +- f(CNT_PREFIX ## ResolvedMethodTableRoots, DESC_PREFIX "Resolved Table Roots") \ +- f(CNT_PREFIX ## VMGlobalRoots, DESC_PREFIX "VM Global Roots") \ +- f(CNT_PREFIX ## VMWeakRoots, DESC_PREFIX "VM Weak Roots") \ +- f(CNT_PREFIX ## ObjectSynchronizerRoots, DESC_PREFIX "Synchronizer Roots") \ +- f(CNT_PREFIX ## FlatProfilerRoots, DESC_PREFIX "Flat Profiler Roots") \ +- f(CNT_PREFIX ## ManagementRoots, DESC_PREFIX "Management Roots") \ +- f(CNT_PREFIX ## SystemDictionaryRoots, DESC_PREFIX "System Dict Roots") \ +- f(CNT_PREFIX ## CLDGRoots, DESC_PREFIX "CLDG Roots") \ +- f(CNT_PREFIX ## JVMTIRoots, DESC_PREFIX "JVMTI Roots") \ +- f(CNT_PREFIX ## StringDedupTableRoots, DESC_PREFIX "Dedup Table Roots") \ +- f(CNT_PREFIX ## StringDedupQueueRoots, DESC_PREFIX "Dedup Queue Roots") \ +- f(CNT_PREFIX ## StringDedupThreadRoots, DESC_PREFIX "Dedup Thread Roots") \ +- f(CNT_PREFIX ## FinishQueues, DESC_PREFIX "Finish Queues") \ +- // end +- +-#define SHENANDOAH_PHASE_DO(f) \ +- f(conc_reset, "Concurrent Reset") \ +- \ +- f(init_mark_gross, "Pause Init Mark (G)") \ +- f(init_mark, "Pause Init Mark (N)") \ +- f(accumulate_stats, " Accumulate Stats") \ +- f(make_parsable, " Make Parsable") \ +- f(init_update_region_states, " Update Region States") \ +- f(scan_roots, " Scan Roots") \ +- SHENANDOAH_PAR_PHASE_DO(scan_, " S: ", f) \ +- f(resize_tlabs, " Resize TLABs") \ +- \ +- f(conc_mark, "Concurrent Marking") \ +- f(conc_preclean, "Concurrent Precleaning") \ +- \ +- f(final_mark_gross, "Pause Final Mark (G)") \ +- f(final_mark, "Pause Final Mark (N)") \ +- f(update_roots, " Update Roots") \ +- SHENANDOAH_PAR_PHASE_DO(update_, " U: ", f) \ +- f(finish_queues, " Finish Queues") \ +- f(weakrefs, " Weak References") \ +- f(weakrefs_process, " Process") \ +- f(weakrefs_enqueue, " Enqueue") \ +- f(weak_roots, " Weak Roots") \ +- SHENANDOAH_PAR_PHASE_DO(weak_roots_, " WR: ", f) \ +- f(purge, " System Purge") \ +- f(purge_class_unload, " Unload Classes") \ +- f(purge_par, " Parallel Cleanup") \ +- f(purge_metadata, " Deallocate Metadata") \ +- f(purge_cldg, " CLDG") \ +- f(purge_string_dedup, " String Dedup") \ +- f(final_update_region_states, " Update Region States") \ +- f(retire_tlabs, " Retire TLABs") \ +- f(choose_cset, " Choose Collection Set") \ +- f(final_rebuild_freeset, " Rebuild Free Set") \ +- f(init_evac, " Initial Evacuation") \ +- SHENANDOAH_PAR_PHASE_DO(evac_, " E: ", f) \ +- \ +- f(conc_cleanup_early, "Concurrent Cleanup") \ +- f(conc_evac, "Concurrent Evacuation") \ +- \ +- f(init_update_refs_gross, "Pause Init Update Refs (G)") \ +- f(init_update_refs, "Pause Init Update Refs (N)") \ +- f(init_update_refs_retire_gclabs, " Retire GCLABs") \ +- f(init_update_refs_prepare, " Prepare") \ +- \ +- f(conc_update_refs, "Concurrent Update Refs") \ +- \ +- f(final_update_refs_gross, "Pause Final Update Refs (G)") \ +- f(final_update_refs, "Pause Final Update Refs (N)") \ +- f(final_update_refs_finish_work, " Finish Work") \ +- f(final_update_refs_roots, " Update Roots") \ +- SHENANDOAH_PAR_PHASE_DO(final_update_, " UR: ", f) \ +- f(final_update_refs_update_region_states, " Update Region States") \ +- f(final_update_refs_trash_cset, " Trash Collection Set") \ +- f(final_update_refs_rebuild_freeset, " Rebuild Free Set") \ +- \ +- f(conc_cleanup_complete, "Concurrent Cleanup") \ +- \ +- f(degen_gc_gross, "Pause Degenerated GC (G)") \ +- f(degen_gc, "Pause Degenerated GC (N)") \ +- f(degen_gc_update_roots, " Degen Update Roots") \ +- SHENANDOAH_PAR_PHASE_DO(degen_gc_update_, " DU: ", f) \ +- \ +- f(full_gc_gross, "Pause Full GC (G)") \ +- f(full_gc, "Pause Full GC (N)") \ +- f(full_gc_heapdump_pre, " Pre Heap Dump") \ +- f(full_gc_prepare, " Prepare") \ +- f(full_gc_update_roots, " Update Roots") \ +- SHENANDOAH_PAR_PHASE_DO(full_gc_update_roots_, " FU: ", f) \ +- f(full_gc_scan_roots, " Scan Roots") \ +- SHENANDOAH_PAR_PHASE_DO(full_gc_scan_roots_, " FS: ", f) \ +- f(full_gc_mark, " Mark") \ +- f(full_gc_mark_finish_queues, " Finish Queues") \ +- f(full_gc_weakrefs, " Weak References") \ +- f(full_gc_weakrefs_process, " Process") \ +- f(full_gc_weakrefs_enqueue, " Enqueue") \ +- f(full_gc_weak_roots, " Weak Roots") \ +- SHENANDOAH_PAR_PHASE_DO(full_gc_weak_roots_, " WR: ", f) \ +- f(full_gc_purge, " System Purge") \ +- f(full_gc_purge_class_unload, " Unload Classes") \ +- f(full_gc_purge_par, " Parallel Cleanup") \ +- f(full_gc_purge_metadata, " Deallocate Metadata") \ +- f(full_gc_purge_cldg, " CLDG") \ +- f(full_gc_purge_string_dedup, " String Dedup") \ +- f(full_gc_calculate_addresses, " Calculate Addresses") \ +- f(full_gc_calculate_addresses_regular, " Regular Objects") \ +- f(full_gc_calculate_addresses_humong, " Humongous Objects") \ +- f(full_gc_adjust_pointers, " Adjust Pointers") \ +- f(full_gc_adjust_roots, " Adjust Roots") \ +- SHENANDOAH_PAR_PHASE_DO(full_gc_adjust_roots_, " FA: ", f) \ +- f(full_gc_copy_objects, " Copy Objects") \ +- f(full_gc_copy_objects_regular, " Regular Objects") \ +- f(full_gc_copy_objects_humong, " Humongous Objects") \ +- f(full_gc_copy_objects_reset_complete, " Reset Complete Bitmap") \ +- f(full_gc_copy_objects_rebuild, " Rebuild Region Sets") \ +- f(full_gc_resize_tlabs, " Resize TLABs") \ +- f(full_gc_heapdump_post, " Post Heap Dump") \ +- \ +- /* Longer concurrent phases at the end */ \ +- \ +- f(conc_uncommit, "Concurrent Uncommit") \ +- f(pacing, "Pacing") \ +- \ +- f(heap_iteration_roots, "Heap Iteration") \ +- SHENANDOAH_PAR_PHASE_DO(heap_iteration_roots_, " HI: ", f) \ +- f(verifier_roots, "Verifier") \ +- SHENANDOAH_PAR_PHASE_DO(verifier_roots_, " V: ", f) \ +- // end +- +-typedef ShenandoahWorkerDataArray ShenandoahWorkerData; +- +-class ShenandoahPhaseTimings : public CHeapObj { +- friend class ShenandoahGCPhase; +- friend class ShenandoahWorkerTimingsTracker; +-public: +-#define SHENANDOAH_PHASE_DECLARE_ENUM(type, title) type, +- enum Phase { +- SHENANDOAH_PHASE_DO(SHENANDOAH_PHASE_DECLARE_ENUM) +- _num_phases, +- _invalid_phase = _num_phases +- }; +- +- enum ParPhase { +- SHENANDOAH_PAR_PHASE_DO(,, SHENANDOAH_PHASE_DECLARE_ENUM) +- _num_par_phases +- }; +- +-#undef SHENANDOAH_PHASE_DECLARE_ENUM +- +-private: +- uint _max_workers; +- double _cycle_data[_num_phases]; +- HdrSeq _global_data[_num_phases]; +- static const char* _phase_names[_num_phases]; +- +- ShenandoahWorkerData* _worker_data[_num_phases]; +- ShenandoahCollectorPolicy* _policy; +- +- static bool is_worker_phase(Phase phase); +- static bool is_root_work_phase(Phase phase); +- +- ShenandoahWorkerData* worker_data(Phase phase, ParPhase par_phase); +- Phase worker_par_phase(Phase phase, ParPhase par_phase); +- +- void set_cycle_data(Phase phase, double time); +- static double uninitialized() { return -1; } +- +-public: +- ShenandoahPhaseTimings(uint max_workers); +- +- void record_phase_time(Phase phase, double time); +- +- void record_workers_start(Phase phase); +- void record_workers_end(Phase phase); +- +- void flush_par_workers_to_cycle(); +- void flush_cycle_to_global(); +- +- static const char* phase_name(Phase phase) { +- assert(phase >= 0 && phase < _num_phases, "Out of bound"); +- return _phase_names[phase]; +- } +- +- void print_cycle_on(outputStream* out) const; +- void print_global_on(outputStream* out) const; +-}; +- +-class ShenandoahWorkerTimingsTracker : public StackObj { +-private: +- ShenandoahPhaseTimings* const _timings; +- ShenandoahPhaseTimings::Phase const _phase; +- ShenandoahPhaseTimings::ParPhase const _par_phase; +- uint const _worker_id; +- +- double _start_time; +-public: +- ShenandoahWorkerTimingsTracker(ShenandoahPhaseTimings::Phase phase, ShenandoahPhaseTimings::ParPhase par_phase, uint worker_id); +- ~ShenandoahWorkerTimingsTracker(); +-}; +- +-#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHGCPHASETIMEINGS_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahRootProcessor.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahRootProcessor.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahRootProcessor.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahRootProcessor.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,343 +0,0 @@ +-/* +- * Copyright (c) 2015, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +- +-#include "classfile/classLoaderData.hpp" +-#include "classfile/systemDictionary.hpp" +-#include "code/codeCache.hpp" +-#include "gc_implementation/shenandoah/shenandoahClosures.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahRootProcessor.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahFreeSet.hpp" +-#include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp" +-#include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp" +-#include "gc_implementation/shenandoah/shenandoahStringDedup.hpp" +-#include "gc_implementation/shenandoah/shenandoahSynchronizerIterator.hpp" +-#include "gc_implementation/shenandoah/shenandoahWorkGroup.hpp" +-#include "memory/allocation.inline.hpp" +-#include "memory/resourceArea.hpp" +-#include "runtime/fprofiler.hpp" +-#include "runtime/thread.hpp" +-#include "services/management.hpp" +- +-#if INCLUDE_JFR +-#include "jfr/leakprofiler/leakProfiler.hpp" +-#endif +- +-ShenandoahSerialRoot::ShenandoahSerialRoot(ShenandoahSerialRoot::OopsDo oops_do, ShenandoahPhaseTimings::Phase phase, ShenandoahPhaseTimings::ParPhase par_phase) : +- _claimed(0), _oops_do(oops_do), _phase(phase), _par_phase(par_phase) { +-} +- +-void ShenandoahSerialRoot::oops_do(OopClosure* cl, uint worker_id) { +- if (_claimed == 0 && Atomic::cmpxchg(1, &_claimed, 0) == 0) { +- ShenandoahWorkerTimingsTracker timer(_phase, _par_phase, worker_id); +- _oops_do(cl); +- } +-} +- +-static void universe_oops_do(OopClosure* closure) { +- Universe::oops_do(closure); +-} +- +-ShenandoahSerialRoots::ShenandoahSerialRoots(ShenandoahPhaseTimings::Phase phase) : +- _phase(phase), +- _universe_roots(&universe_oops_do, phase, ShenandoahPhaseTimings::UniverseRoots), +- _management_roots(&Management::oops_do, phase, ShenandoahPhaseTimings::ManagementRoots), +- _jvmti_roots(&JvmtiExport::oops_do, phase, ShenandoahPhaseTimings::JVMTIRoots), +- _jni_handle_roots(&JNIHandles::oops_do, phase, ShenandoahPhaseTimings::JNIRoots), +- _flat_profiler_roots(&FlatProfiler::oops_do, phase, ShenandoahPhaseTimings::FlatProfilerRoots) { +-} +- +-void ShenandoahSerialRoots::oops_do(OopClosure* cl, uint worker_id) { +- _universe_roots.oops_do(cl, worker_id); +- _management_roots.oops_do(cl, worker_id); +- _jvmti_roots.oops_do(cl, worker_id); +- _jni_handle_roots.oops_do(cl, worker_id); +- _flat_profiler_roots.oops_do(cl, worker_id); +- +- { +- ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::ObjectSynchronizerRoots, worker_id); +- while(_om_iterator.parallel_oops_do(cl)); +- } +-} +- +-ShenandoahSystemDictionaryRoots::ShenandoahSystemDictionaryRoots(ShenandoahPhaseTimings::Phase phase) : +- _phase(phase), _claimed(0) { +-} +- +-void ShenandoahSystemDictionaryRoots::strong_oops_do(OopClosure* oops, uint worker_id) { +- if (_claimed == 0 && Atomic::cmpxchg(1, &_claimed, 0) == 0) { +- ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::SystemDictionaryRoots, worker_id); +- SystemDictionary::roots_oops_do(oops, NULL); +- } +-} +- +-void ShenandoahSystemDictionaryRoots::oops_do(OopClosure* oops, uint worker_id) { +- if (_claimed == 0 && Atomic::cmpxchg(1, &_claimed, 0) == 0) { +- ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::SystemDictionaryRoots, worker_id); +- SystemDictionary::roots_oops_do(oops, oops); +- } +-} +- +-ShenandoahStringTableRoots::ShenandoahStringTableRoots(ShenandoahPhaseTimings::Phase phase) : +- _phase(phase) +-{} +- +-void ShenandoahStringTableRoots::oops_do(OopClosure* oops, uint worker_id) { +- ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::StringTableRoots, worker_id); +- StringTable::possibly_parallel_oops_do_shenandoah(oops); +-} +- +-ShenandoahThreadRoots::ShenandoahThreadRoots(ShenandoahPhaseTimings::Phase phase) : +- _phase(phase) { +- ShenandoahHeap* const heap = ShenandoahHeap::heap(); +- heap->set_par_threads(heap->workers()->active_workers()); +-} +- +-void ShenandoahThreadRoots::oops_do(OopClosure* oops_cl, CLDClosure* cld_cl, CodeBlobClosure* code_cl, uint worker_id) { +- ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::ThreadRoots, worker_id); +- ResourceMark rm; +- Threads::possibly_parallel_oops_do(oops_cl, cld_cl, code_cl); +-} +- +-ShenandoahWeakRoot::ShenandoahWeakRoot(ShenandoahPhaseTimings::Phase phase, ShenandoahPhaseTimings::ParPhase par_phase, ShenandoahWeakRoot::WeakOopsDo oops_do) : +- _phase(phase), _par_phase(par_phase), _claimed(0), _weak_oops_do(oops_do) { +-} +- +-void ShenandoahWeakRoot::oops_do(OopClosure* keep_alive, uint worker_id) { +- AlwaysTrueClosure always_true; +- weak_oops_do(&always_true, keep_alive, worker_id); +-} +- +-void ShenandoahWeakRoot::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive, uint worker_id) { +- if (_claimed == 0 && Atomic::cmpxchg(1, &_claimed, 0) == 0) { +- ShenandoahWorkerTimingsTracker t(_phase, _par_phase, worker_id); +- _weak_oops_do(is_alive, keep_alive); +- } +-} +- +-ShenandoahWeakRoots::ShenandoahWeakRoots(ShenandoahPhaseTimings::Phase phase) : +-#if INCLUDE_JFR +- _jfr_weak_roots(phase, ShenandoahPhaseTimings::JFRWeakRoots, &LeakProfiler::oops_do), +-#endif // INCLUDE_JFR +- _jni_weak_roots(phase, ShenandoahPhaseTimings::JNIWeakRoots, &JNIHandles::weak_oops_do) { +-} +- +-void ShenandoahWeakRoots::oops_do(OopClosure* keep_alive, uint worker_id) { +- JFR_ONLY(_jfr_weak_roots.oops_do(keep_alive, worker_id);) +- _jni_weak_roots.oops_do(keep_alive, worker_id); +-} +- +-void ShenandoahWeakRoots::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive, uint worker_id) { +- JFR_ONLY(_jfr_weak_roots.weak_oops_do(is_alive, keep_alive, worker_id);) +- _jni_weak_roots.weak_oops_do(is_alive, keep_alive, worker_id); +-} +- +-ShenandoahStringDedupRoots::ShenandoahStringDedupRoots(ShenandoahPhaseTimings::Phase phase) : _phase(phase) { +- if (ShenandoahStringDedup::is_enabled()) { +- ShenandoahStringDedup::clear_claimed(); +- } +-} +- +-void ShenandoahStringDedupRoots::oops_do(OopClosure* oops, uint worker_id) { +- if (ShenandoahStringDedup::is_enabled()) { +- ShenandoahStringDedup::parallel_oops_do(_phase, oops, worker_id); +- } +-} +- +-ShenandoahClassLoaderDataRoots::ShenandoahClassLoaderDataRoots(ShenandoahPhaseTimings::Phase phase) : +- _phase(phase) { +- ClassLoaderDataGraph::clear_claimed_marks(); +-} +- +-void ShenandoahClassLoaderDataRoots::cld_do(CLDClosure* clds, uint worker_id) { +- ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CLDGRoots, worker_id); +- ClassLoaderDataGraph::roots_cld_do(clds, clds); +-} +- +-void ShenandoahClassLoaderDataRoots::always_strong_cld_do(CLDClosure* clds, uint worker_id) { +- ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CLDGRoots, worker_id); +- ClassLoaderDataGraph::always_strong_cld_do(clds); +-} +- +-ShenandoahRootProcessor::ShenandoahRootProcessor(ShenandoahPhaseTimings::Phase phase) : +- _srs(ShenandoahHeap::heap()), +- _heap(ShenandoahHeap::heap()), +- _phase(phase) { +- assert(SafepointSynchronize::is_at_safepoint(), "Must at safepoint"); +- _heap->phase_timings()->record_workers_start(_phase); +-} +- +-ShenandoahRootProcessor::~ShenandoahRootProcessor() { +- assert(SafepointSynchronize::is_at_safepoint(), "Must at safepoint"); +- _heap->phase_timings()->record_workers_end(_phase); +-} +- +-ShenandoahRootEvacuator::ShenandoahRootEvacuator(ShenandoahPhaseTimings::Phase phase) : +- ShenandoahRootProcessor(phase), +- _serial_roots(phase), +- _dict_roots(phase), +- _cld_roots(phase), +- _thread_roots(phase), +- _weak_roots(phase), +- _dedup_roots(phase), +- _string_table_roots(phase), +- _code_roots(phase) +-{} +- +-ShenandoahHeapIterationRootScanner::ShenandoahHeapIterationRootScanner() : +- ShenandoahRootProcessor(ShenandoahPhaseTimings::heap_iteration_roots), +- _serial_roots(ShenandoahPhaseTimings::heap_iteration_roots), +- _dict_roots(ShenandoahPhaseTimings::heap_iteration_roots), +- _thread_roots(ShenandoahPhaseTimings::heap_iteration_roots), +- _cld_roots(ShenandoahPhaseTimings::heap_iteration_roots), +- _weak_roots(ShenandoahPhaseTimings::heap_iteration_roots), +- _dedup_roots(ShenandoahPhaseTimings::heap_iteration_roots), +- _string_table_roots(ShenandoahPhaseTimings::heap_iteration_roots), +- _code_roots(ShenandoahPhaseTimings::heap_iteration_roots) +-{} +- +- void ShenandoahHeapIterationRootScanner::roots_do(OopClosure* oops) { +- assert(Thread::current()->is_VM_thread(), "Only by VM thread"); +- // Must use _claim_none to avoid interfering with concurrent CLDG iteration +- CLDToOopClosure clds(oops, false /* must claim */); +- MarkingCodeBlobClosure code(oops, !CodeBlobToOopClosure::FixRelocations); +- ResourceMark rm; +- +- _serial_roots.oops_do(oops, 0); +- _dict_roots.oops_do(oops, 0); +- _cld_roots.cld_do(&clds, 0); +- _thread_roots.oops_do(oops, NULL, NULL, 0); +- _code_roots.code_blobs_do(&code, 0); +- +- _weak_roots.oops_do(oops, 0); +- _string_table_roots.oops_do(oops, 0); +- _dedup_roots.oops_do(oops, 0); +- } +- +-void ShenandoahRootEvacuator::roots_do(uint worker_id, OopClosure* oops) { +- { +- // Evacuate the PLL here so that the SurrogateLockerThread doesn't +- // have to. SurrogateLockerThread can execute write barrier in VMOperation +- // prolog. If the SLT runs into OOM during that evacuation, the VMOperation +- // may deadlock. Doing this evacuation the first thing makes that critical +- // OOM less likely to happen. It is a bit excessive to perform WB by all +- // threads, but this guarantees the very first evacuation would be the PLL. +- // +- // This pre-evac can still silently fail with OOME here, and PLL would not +- // get evacuated. This would mean next VMOperation would try to evac PLL in +- // SLT thread. We make additional effort to recover from that OOME in SLT, +- // see ShenandoahHeap::oom_during_evacuation(). It seems to be the lesser evil +- // to do there, because we cannot trigger Full GC right here, when we are +- // in another VMOperation. +- +- ShenandoahHeap* const heap = ShenandoahHeap::heap(); +- assert(heap->is_evacuation_in_progress(), "only when evacuating"); +- HeapWord* pll_addr = java_lang_ref_Reference::pending_list_lock_addr(); +- oop pll; +- if (UseCompressedOops) { +- pll = oopDesc::load_decode_heap_oop((narrowOop *)pll_addr); +- } else { +- pll = oopDesc::load_decode_heap_oop((oop*)pll_addr); +- } +- if (!oopDesc::is_null(pll) && heap->in_collection_set(pll)) { +- oop fwd = ShenandoahBarrierSet::resolve_forwarded_not_null(pll); +- if (pll == fwd) { +- Thread *t = Thread::current(); +- heap->evacuate_object(pll, t); +- } +- } +- } +- +- MarkingCodeBlobClosure blobsCl(oops, CodeBlobToOopClosure::FixRelocations); +- CLDToOopClosure clds(oops); +- +- _serial_roots.oops_do(oops, worker_id); +- _dict_roots.oops_do(oops, worker_id); +- _thread_roots.oops_do(oops, NULL, NULL, worker_id); +- _cld_roots.cld_do(&clds, worker_id); +- _code_roots.code_blobs_do(&blobsCl, worker_id); +- +- _weak_roots.oops_do(oops, worker_id); +- _dedup_roots.oops_do(oops, worker_id); +- _string_table_roots.oops_do(oops, worker_id); +-} +- +-ShenandoahRootUpdater::ShenandoahRootUpdater(ShenandoahPhaseTimings::Phase phase) : +- ShenandoahRootProcessor(phase), +- _serial_roots(phase), +- _dict_roots(phase), +- _cld_roots(phase), +- _thread_roots(phase), +- _weak_roots(phase), +- _dedup_roots(phase), +- _string_table_roots(phase), +- _code_roots(phase) +-{} +- +-void ShenandoahRootUpdater::roots_do(uint worker_id, BoolObjectClosure* is_alive, OopClosure* keep_alive) { +- CodeBlobToOopClosure update_blobs(keep_alive, CodeBlobToOopClosure::FixRelocations); +- CLDToOopClosure clds(keep_alive); +- +- _serial_roots.oops_do(keep_alive, worker_id); +- _dict_roots.oops_do(keep_alive, worker_id); +- _thread_roots.oops_do(keep_alive, &clds, NULL, worker_id); +- _cld_roots.cld_do(&clds, worker_id); +- +- _code_roots.code_blobs_do(&update_blobs, worker_id); +- +- _weak_roots.weak_oops_do(is_alive, keep_alive, worker_id); +- _dedup_roots.oops_do(keep_alive, worker_id); +- _string_table_roots.oops_do(keep_alive, worker_id); +-} +- +- +-ShenandoahRootAdjuster::ShenandoahRootAdjuster(ShenandoahPhaseTimings::Phase phase) : +- ShenandoahRootProcessor(phase), +- _serial_roots(phase), +- _dict_roots(phase), +- _cld_roots(phase), +- _thread_roots(phase), +- _weak_roots(phase), +- _dedup_roots(phase), +- _string_table_roots(phase), +- _code_roots(phase) +-{ +- assert(ShenandoahHeap::heap()->is_full_gc_in_progress(), "Full GC only"); +-} +- +-void ShenandoahRootAdjuster::roots_do(uint worker_id, OopClosure* oops) { +- CodeBlobToOopClosure adjust_code_closure(oops, CodeBlobToOopClosure::FixRelocations); +- CLDToOopClosure adjust_cld_closure(oops); +- +- _serial_roots.oops_do(oops, worker_id); +- _dict_roots.oops_do(oops, worker_id); +- _thread_roots.oops_do(oops, NULL, NULL, worker_id); +- _cld_roots.always_strong_cld_do(&adjust_cld_closure, worker_id); +- _code_roots.code_blobs_do(&adjust_code_closure, worker_id); +- +- _weak_roots.oops_do(oops, worker_id); +- _dedup_roots.oops_do(oops, worker_id); +- _string_table_roots.oops_do(oops, worker_id); +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahRootProcessor.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahRootProcessor.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahRootProcessor.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahRootProcessor.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,262 +0,0 @@ +-/* +- * Copyright (c) 2015, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHROOTPROCESSOR_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHROOTPROCESSOR_HPP +- +-#include "code/codeCache.hpp" +-#include "gc_implementation/shenandoah/shenandoahCodeRoots.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.hpp" +-#include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp" +-#include "gc_implementation/shenandoah/shenandoahSynchronizerIterator.hpp" +-#include "memory/allocation.hpp" +-#include "memory/iterator.hpp" +-#include "memory/sharedHeap.hpp" +-#include "utilities/macros.hpp" +-#include "utilities/workgroup.hpp" +- +- +-class ShenandoahSerialRoot { +-public: +- typedef void (*OopsDo)(OopClosure*); +-private: +- volatile jint _claimed; +- const OopsDo _oops_do; +- const ShenandoahPhaseTimings::Phase _phase; +- const ShenandoahPhaseTimings::ParPhase _par_phase; +- +-public: +- ShenandoahSerialRoot(OopsDo oops_do, +- ShenandoahPhaseTimings::Phase phase, ShenandoahPhaseTimings::ParPhase par_phase); +- void oops_do(OopClosure* cl, uint worker_id); +-}; +- +-class ShenandoahSerialRoots { +-private: +- const ShenandoahPhaseTimings::Phase _phase; +- ShenandoahSerialRoot _universe_roots; +- ShenandoahSerialRoot _management_roots; +- ShenandoahSerialRoot _jvmti_roots; +- ShenandoahSerialRoot _jni_handle_roots; +- ShenandoahSerialRoot _flat_profiler_roots; +- ShenandoahSynchronizerIterator _om_iterator; +-public: +- ShenandoahSerialRoots(ShenandoahPhaseTimings::Phase phase); +- void oops_do(OopClosure* cl, uint worker_id); +-}; +- +-class ShenandoahSystemDictionaryRoots { +-private: +- const ShenandoahPhaseTimings::Phase _phase; +- volatile int _claimed; +-public: +- ShenandoahSystemDictionaryRoots(ShenandoahPhaseTimings::Phase phase); +- void strong_oops_do(OopClosure* oops, uint worker_id); +- void oops_do(OopClosure* oops, uint worker_id); +-}; +- +-class ShenandoahStringTableRoots { +-private: +- const ShenandoahPhaseTimings::Phase _phase; +-public: +- ShenandoahStringTableRoots(ShenandoahPhaseTimings::Phase phase); +- void oops_do(OopClosure* oops, uint worker_id); +-}; +- +-class ShenandoahThreadRoots { +-private: +- const ShenandoahPhaseTimings::Phase _phase; +-public: +- ShenandoahThreadRoots(ShenandoahPhaseTimings::Phase phase); +- void oops_do(OopClosure* oops_cl, CLDClosure* cld_cl, CodeBlobClosure* code_cl, uint worker_id); +-}; +- +-class ShenandoahWeakRoot { +-public: +- typedef void (*WeakOopsDo)(BoolObjectClosure*, OopClosure*); +-private: +- const ShenandoahPhaseTimings::Phase _phase; +- const ShenandoahPhaseTimings::ParPhase _par_phase; +- volatile int _claimed; +- const WeakOopsDo _weak_oops_do; +- +-public: +- ShenandoahWeakRoot(ShenandoahPhaseTimings::Phase phase, ShenandoahPhaseTimings::ParPhase par_phase, WeakOopsDo oops_do); +- void oops_do(OopClosure* keep_alive, uint worker_id); +- void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive, uint worker_id); +-}; +- +-class ShenandoahWeakRoots { +-private: +- JFR_ONLY(ShenandoahWeakRoot _jfr_weak_roots;) +- ShenandoahWeakRoot _jni_weak_roots; +-public: +- ShenandoahWeakRoots(ShenandoahPhaseTimings::Phase phase); +- void oops_do(OopClosure* keep_alive, uint worker_id); +- void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive, uint worker_id); +-}; +- +-class ShenandoahStringDedupRoots { +-private: +- const ShenandoahPhaseTimings::Phase _phase; +-public: +- ShenandoahStringDedupRoots(ShenandoahPhaseTimings::Phase phase); +- void oops_do(OopClosure* oops, uint worker_id); +-}; +- +-template +-class ShenandoahCodeCacheRoots { +-private: +- const ShenandoahPhaseTimings::Phase _phase; +- ITR _coderoots_iterator; +-public: +- ShenandoahCodeCacheRoots(ShenandoahPhaseTimings::Phase phase); +- void code_blobs_do(CodeBlobClosure* blob_cl, uint worker_id); +-}; +- +-class ShenandoahClassLoaderDataRoots { +-private: +- const ShenandoahPhaseTimings::Phase _phase; +-public: +- ShenandoahClassLoaderDataRoots(ShenandoahPhaseTimings::Phase phase); +- +- void always_strong_cld_do(CLDClosure* clds, uint worker_id); +- void cld_do(CLDClosure* clds, uint worker_id); +-}; +- +-class ShenandoahRootProcessor : public StackObj { +-private: +- SharedHeap::StrongRootsScope _srs; +- ShenandoahHeap* const _heap; +- const ShenandoahPhaseTimings::Phase _phase; +-public: +- ShenandoahRootProcessor(ShenandoahPhaseTimings::Phase phase); +- ~ShenandoahRootProcessor(); +- +- ShenandoahHeap* heap() const { return _heap; } +-}; +- +-template +-class ShenandoahRootScanner : public ShenandoahRootProcessor { +-private: +- ShenandoahSerialRoots _serial_roots; +- ShenandoahSystemDictionaryRoots _dict_roots; +- ShenandoahClassLoaderDataRoots _cld_roots; +- ShenandoahThreadRoots _thread_roots; +- ShenandoahWeakRoots _weak_roots; +- ShenandoahStringDedupRoots _dedup_roots; +- ShenandoahStringTableRoots _string_table_roots; +- ShenandoahCodeCacheRoots _code_roots; +-public: +- ShenandoahRootScanner(ShenandoahPhaseTimings::Phase phase); +- +- // Apply oops, clds and blobs to all strongly reachable roots in the system, +- // during class unloading cycle +- void strong_roots_do(uint worker_id, OopClosure* cl); +- void strong_roots_do(uint worker_id, OopClosure* oops, CLDClosure* clds, CodeBlobClosure* code); +- +- // Apply oops, clds and blobs to all strongly reachable roots and weakly reachable +- // roots when class unloading is disabled during this cycle +- void roots_do(uint worker_id, OopClosure* cl); +- void roots_do(uint worker_id, OopClosure* oops, CLDClosure* clds, CodeBlobClosure* code); +-}; +- +-typedef ShenandoahRootScanner ShenandoahAllRootScanner; +-typedef ShenandoahRootScanner ShenandoahCSetRootScanner; +- +-// This scanner is only for SH::object_iteration() and only supports single-threaded +-// root scanning +-class ShenandoahHeapIterationRootScanner : public ShenandoahRootProcessor { +-private: +- ShenandoahSerialRoots _serial_roots; +- ShenandoahSystemDictionaryRoots _dict_roots; +- ShenandoahThreadRoots _thread_roots; +- ShenandoahClassLoaderDataRoots _cld_roots; +- ShenandoahWeakRoots _weak_roots; +- ShenandoahStringDedupRoots _dedup_roots; +- ShenandoahStringTableRoots _string_table_roots; +- ShenandoahCodeCacheRoots _code_roots; +- +-public: +- ShenandoahHeapIterationRootScanner(); +- +- void roots_do(OopClosure* cl); +-}; +- +-// Evacuate all roots at a safepoint +-class ShenandoahRootEvacuator : public ShenandoahRootProcessor { +-private: +- ShenandoahSerialRoots _serial_roots; +- ShenandoahSystemDictionaryRoots _dict_roots; +- ShenandoahClassLoaderDataRoots _cld_roots; +- ShenandoahThreadRoots _thread_roots; +- ShenandoahWeakRoots _weak_roots; +- ShenandoahStringDedupRoots _dedup_roots; +- ShenandoahStringTableRoots _string_table_roots; +- ShenandoahCodeCacheRoots +- _code_roots; +- +-public: +- ShenandoahRootEvacuator(ShenandoahPhaseTimings::Phase phase); +- +- void roots_do(uint worker_id, OopClosure* oops); +-}; +- +-// Update all roots at a safepoint +-class ShenandoahRootUpdater : public ShenandoahRootProcessor { +-private: +- ShenandoahSerialRoots _serial_roots; +- ShenandoahSystemDictionaryRoots _dict_roots; +- ShenandoahClassLoaderDataRoots _cld_roots; +- ShenandoahThreadRoots _thread_roots; +- ShenandoahWeakRoots _weak_roots; +- ShenandoahStringDedupRoots _dedup_roots; +- ShenandoahStringTableRoots _string_table_roots; +- ShenandoahCodeCacheRoots +- _code_roots; +- +-public: +- ShenandoahRootUpdater(ShenandoahPhaseTimings::Phase phase); +- void roots_do(uint worker_id, BoolObjectClosure* is_alive, OopClosure* keep_alive); +-}; +- +-// Adjuster all roots at a safepoint during full gc +-class ShenandoahRootAdjuster : public ShenandoahRootProcessor { +-private: +- ShenandoahSerialRoots _serial_roots; +- ShenandoahSystemDictionaryRoots _dict_roots; +- ShenandoahClassLoaderDataRoots _cld_roots; +- ShenandoahThreadRoots _thread_roots; +- ShenandoahWeakRoots _weak_roots; +- ShenandoahStringDedupRoots _dedup_roots; +- ShenandoahStringTableRoots _string_table_roots; +- ShenandoahCodeCacheRoots +- _code_roots; +- +-public: +- ShenandoahRootAdjuster(ShenandoahPhaseTimings::Phase phase); +- +- void roots_do(uint worker_id, OopClosure* oops); +-}; +- +-#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHROOTPROCESSOR_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahRootProcessor.inline.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahRootProcessor.inline.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahRootProcessor.inline.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahRootProcessor.inline.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,96 +0,0 @@ +-/* +- * Copyright (c) 2019, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHROOTPROCESSOR_INLINE_HPP +-#define SHARE_GC_SHENANDOAH_SHENANDOAHROOTPROCESSOR_INLINE_HPP +- +-#include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp" +-#include "gc_implementation/shenandoah/shenandoahRootProcessor.hpp" +-#include "gc_implementation/shenandoah/shenandoahUtils.hpp" +-#include "memory/resourceArea.hpp" +-#include "runtime/safepoint.hpp" +- +-template +-ShenandoahCodeCacheRoots::ShenandoahCodeCacheRoots(ShenandoahPhaseTimings::Phase phase) : +- _phase(phase) +-{} +- +-template +-void ShenandoahCodeCacheRoots::code_blobs_do(CodeBlobClosure* blob_cl, uint worker_id) { +- ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id); +- _coderoots_iterator.possibly_parallel_blobs_do(blob_cl); +-} +- +-template +-ShenandoahRootScanner::ShenandoahRootScanner(ShenandoahPhaseTimings::Phase phase) : +- ShenandoahRootProcessor(phase), +- _serial_roots(phase), +- _dict_roots(phase), +- _cld_roots(phase), +- _thread_roots(phase), +- _weak_roots(phase), +- _dedup_roots(phase), +- _string_table_roots(phase), +- _code_roots(phase) +-{ } +- +-template +-void ShenandoahRootScanner::roots_do(uint worker_id, OopClosure* oops) { +- CLDToOopClosure clds_cl(oops); +- MarkingCodeBlobClosure blobs_cl(oops, !CodeBlobToOopClosure::FixRelocations); +- roots_do(worker_id, oops, &clds_cl, &blobs_cl); +-} +- +-template +-void ShenandoahRootScanner::strong_roots_do(uint worker_id, OopClosure* oops) { +- CLDToOopClosure clds_cl(oops); +- MarkingCodeBlobClosure blobs_cl(oops, !CodeBlobToOopClosure::FixRelocations); +- strong_roots_do(worker_id, oops, &clds_cl, &blobs_cl); +-} +- +-template +-void ShenandoahRootScanner::roots_do(uint worker_id, OopClosure* oops, CLDClosure* clds, CodeBlobClosure* code) { +- assert(!ShenandoahHeap::heap()->unload_classes(), +- "No class unloading"); +- ResourceMark rm; +- _serial_roots.oops_do(oops, worker_id); +- _dict_roots.oops_do(oops, worker_id); +- _thread_roots.oops_do(oops, clds, code, worker_id); +- _cld_roots.cld_do(clds, worker_id); +- _weak_roots.oops_do(oops, worker_id); +- _string_table_roots.oops_do(oops, worker_id); +- _dedup_roots.oops_do(oops, worker_id); +-} +- +-template +-void ShenandoahRootScanner::strong_roots_do(uint worker_id, OopClosure* oops, CLDClosure* clds, CodeBlobClosure* code) { +- assert(ShenandoahHeap::heap()->unload_classes(), "Should be used during class unloading"); +- ResourceMark rm; +- AlwaysTrueClosure always_true; +- +- _serial_roots.oops_do(oops, worker_id); +- _dict_roots.strong_oops_do(oops, worker_id); +- _cld_roots.always_strong_cld_do(clds, worker_id); +- _thread_roots.oops_do(oops, clds, code, worker_id); +-} +-#endif // SHARE_GC_SHENANDOAH_SHENANDOAHROOTPROCESSOR_INLINE_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahRootVerifier.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahRootVerifier.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahRootVerifier.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahRootVerifier.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,139 +0,0 @@ +-/* +- * Copyright (c) 2019, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +- +-#include "precompiled.hpp" +- +- +-#include "classfile/classLoaderData.hpp" +-#include "classfile/systemDictionary.hpp" +-#include "code/codeCache.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.hpp" +-#include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp" +-#include "gc_implementation/shenandoah/shenandoahRootVerifier.hpp" +-#include "gc_implementation/shenandoah/shenandoahStringDedup.hpp" +-#include "memory/universe.hpp" +-#include "runtime/fprofiler.hpp" +-#include "runtime/thread.hpp" +-#include "services/management.hpp" +-#include "utilities/debug.hpp" +- +-ShenandoahRootVerifier::ShenandoahRootVerifier() : _types(AllRoots) { +-} +- +-void ShenandoahRootVerifier::excludes(RootTypes types) { +- _types = static_cast(static_cast(_types) & (~static_cast(types))); +-} +- +-bool ShenandoahRootVerifier::verify(RootTypes type) const { +- return (_types & type) != 0; +-} +- +-void ShenandoahRootVerifier::oops_do(OopClosure* oops) { +- CodeBlobToOopClosure blobs(oops, !CodeBlobToOopClosure::FixRelocations); +- if (verify(CodeRoots)) { +- CodeCache::blobs_do(&blobs); +- } +- +- if (verify(CLDGRoots)) { +- CLDToOopClosure clds(oops, false /* must_claim */); +- ClassLoaderDataGraph::cld_do(&clds); +- } +- +- if (verify(SerialRoots)) { +- Universe::oops_do(oops); +- FlatProfiler::oops_do(oops); +- Management::oops_do(oops); +- JvmtiExport::oops_do(oops); +- JNIHandles::oops_do(oops); +- ObjectSynchronizer::oops_do(oops); +- SystemDictionary::oops_do(oops); +- StringTable::oops_do(oops); +- } +- +- if (verify(WeakRoots)) { +- AlwaysTrueClosure always_true; +- JNIHandles::weak_oops_do(&always_true, oops); +- } +- +- if (ShenandoahStringDedup::is_enabled() && verify(StringDedupRoots)) { +- ShenandoahStringDedup::oops_do_slow(oops); +- } +- +- if (verify(ThreadRoots)) { +- // Do thread roots the last. This allows verification code to find +- // any broken objects from those special roots first, not the accidental +- // dangling reference from the thread root. +- CLDToOopClosure clds(oops, false /* must_claim */); +- Threads::possibly_parallel_oops_do(oops, &clds, &blobs); +- } +-} +- +-void ShenandoahRootVerifier::roots_do(OopClosure* oops) { +- CodeBlobToOopClosure blobs(oops, !CodeBlobToOopClosure::FixRelocations); +- CodeCache::blobs_do(&blobs); +- +- CLDToOopClosure clds(oops, false /* must_claim */); +- ClassLoaderDataGraph::cld_do(&clds); +- +- Universe::oops_do(oops); +- Management::oops_do(oops); +- JvmtiExport::oops_do(oops); +- JNIHandles::oops_do(oops); +- ObjectSynchronizer::oops_do(oops); +- SystemDictionary::oops_do(oops); +- FlatProfiler::oops_do(oops); +- StringTable::oops_do(oops); +- +- JNIHandles::weak_oops_do(oops); +- StringTable::oops_do(oops); +- +- if (ShenandoahStringDedup::is_enabled()) { +- ShenandoahStringDedup::oops_do_slow(oops); +- } +- +- // Do thread roots the last. This allows verification code to find +- // any broken objects from those special roots first, not the accidental +- // dangling reference from the thread root. +- Threads::possibly_parallel_oops_do(oops, &clds, &blobs); +-} +- +-void ShenandoahRootVerifier::strong_roots_do(OopClosure* oops) { +- CodeBlobToOopClosure blobs(oops, !CodeBlobToOopClosure::FixRelocations); +- +- CLDToOopClosure clds(oops, false /* must_claim */); +- ClassLoaderDataGraph::roots_cld_do(&clds, NULL); +- +- Universe::oops_do(oops); +- Management::oops_do(oops); +- JvmtiExport::oops_do(oops); +- JNIHandles::oops_do(oops); +- ObjectSynchronizer::oops_do(oops); +- SystemDictionary::oops_do(oops); +- FlatProfiler::oops_do(oops); +- +- // Do thread roots the last. This allows verification code to find +- // any broken objects from those special roots first, not the accidental +- // dangling reference from the thread root. +- Threads::possibly_parallel_oops_do(oops, &clds, &blobs); +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahRootVerifier.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahRootVerifier.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahRootVerifier.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahRootVerifier.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,58 +0,0 @@ +-/* +- * Copyright (c) 2019, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHROOTVERIFIER_HPP +-#define SHARE_GC_SHENANDOAH_SHENANDOAHROOTVERIFIER_HPP +- +-#include "memory/allocation.hpp" +-#include "memory/iterator.hpp" +- +-class ShenandoahRootVerifier : public StackObj { +-public: +- enum RootTypes { +- SerialRoots = 1 << 0, +- ThreadRoots = 1 << 1, +- CodeRoots = 1 << 2, +- CLDGRoots = 1 << 3, +- WeakRoots = 1 << 4, +- StringDedupRoots = 1 << 5, +- AllRoots = (SerialRoots | ThreadRoots | CodeRoots | CLDGRoots | WeakRoots | StringDedupRoots) +- }; +- +-private: +- RootTypes _types; +- +-public: +- ShenandoahRootVerifier(); +- +- void excludes(RootTypes types); +- void oops_do(OopClosure* cl); +- +- // Used to seed ShenandoahVerifier, do not honor root type filter +- void roots_do(OopClosure* cl); +- void strong_roots_do(OopClosure* cl); +-private: +- bool verify(RootTypes type) const; +-}; +- +-#endif // SHARE_GC_SHENANDOAH_SHENANDOAHROOTVERIFIER_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahRuntime.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahRuntime.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahRuntime.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahRuntime.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,69 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +-#include "gc_implementation/shenandoah/shenandoahBarrierSet.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahBarrierSetClone.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahRuntime.hpp" +-#include "runtime/interfaceSupport.hpp" +-#include "oops/oop.inline.hpp" +- +-void ShenandoahRuntime::arraycopy_barrier_oop_entry(oop* src, oop* dst, size_t length) { +- ShenandoahBarrierSet *bs = ShenandoahBarrierSet::barrier_set(); +- bs->arraycopy_barrier(src, dst, length); +-} +- +-void ShenandoahRuntime::arraycopy_barrier_narrow_oop_entry(narrowOop* src, narrowOop* dst, size_t length) { +- ShenandoahBarrierSet *bs = ShenandoahBarrierSet::barrier_set(); +- bs->arraycopy_barrier(src, dst, length); +-} +- +-// Shenandoah pre write barrier slowpath +-JRT_LEAF(void, ShenandoahRuntime::write_ref_field_pre_entry(oopDesc* orig, JavaThread *thread)) +- assert(orig != NULL, "should be optimized out"); +- shenandoah_assert_correct(NULL, orig); +- // store the original value that was in the field reference +- assert(thread->satb_mark_queue().is_active(), "Shouldn't be here otherwise"); +- thread->satb_mark_queue().enqueue_known_active(orig); +-JRT_END +- +-JRT_LEAF(oopDesc*, ShenandoahRuntime::load_reference_barrier(oopDesc* src, oop* load_addr)) +- return ShenandoahBarrierSet::barrier_set()->load_reference_barrier_mutator(src, load_addr); +-JRT_END +- +-JRT_LEAF(oopDesc*, ShenandoahRuntime::load_reference_barrier_narrow(oopDesc* src, narrowOop* load_addr)) +- return ShenandoahBarrierSet::barrier_set()->load_reference_barrier_mutator(src, load_addr); +-JRT_END +- +-IRT_LEAF(oopDesc*, ShenandoahRuntime::load_reference_barrier_interpreter(oopDesc* src)) +- oop result = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(oop(src)); +- return (oopDesc*) result; +-IRT_END +- +-// Shenandoah clone barrier: makes sure that references point to to-space +-// in cloned objects. +-JRT_LEAF(void, ShenandoahRuntime::shenandoah_clone_barrier(oopDesc* src)) +- oop s = oop(src); +- shenandoah_assert_correct(NULL, s); +- ShenandoahBarrierSet::barrier_set()->clone_barrier_runtime(s); +-JRT_END +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahRuntime.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahRuntime.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahRuntime.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahRuntime.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,48 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHRUNTIME_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHRUNTIME_HPP +- +-#include "memory/allocation.hpp" +-#include "oops/oopsHierarchy.hpp" +- +-class HeapWord; +-class JavaThread; +-class oopDesc; +- +-class ShenandoahRuntime : public AllStatic { +-public: +- static void arraycopy_barrier_oop_entry(oop* src, oop* dst, size_t length); +- static void arraycopy_barrier_narrow_oop_entry(narrowOop* src, narrowOop* dst, size_t length); +- +- static void write_ref_field_pre_entry(oopDesc* orig, JavaThread* thread); +- +- static oopDesc* load_reference_barrier(oopDesc* src, oop* load_addr); +- static oopDesc* load_reference_barrier_narrow(oopDesc* src, narrowOop* load_addr); +- static oopDesc* load_reference_barrier_interpreter(oopDesc* src); +- +- static void shenandoah_clone_barrier(oopDesc* src); +-}; +- +-#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHRUNTIME_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahSharedVariables.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahSharedVariables.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahSharedVariables.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahSharedVariables.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,247 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHSHAREDFLAG_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHSHAREDFLAG_HPP +- +-#include "gc_implementation/shenandoah/shenandoahPadding.hpp" +-#include "memory/allocation.hpp" +-#include "runtime/orderAccess.hpp" +- +-typedef jbyte ShenandoahSharedValue; +- +-typedef struct ShenandoahSharedFlag { +- enum { +- UNSET = 0, +- SET = 1 +- }; +- +- shenandoah_padding(0); +- volatile ShenandoahSharedValue value; +- shenandoah_padding(1); +- +- ShenandoahSharedFlag() { +- // Needed for cooperation with generated code. +- STATIC_ASSERT(sizeof(ShenandoahSharedValue) == 1); +- +- unset(); +- } +- +- void set() { +- OrderAccess::release_store_fence(&value, (ShenandoahSharedValue)SET); +- } +- +- void unset() { +- OrderAccess::release_store_fence(&value, (ShenandoahSharedValue)UNSET); +- } +- +- bool is_set() const { +- return OrderAccess::load_acquire((volatile ShenandoahSharedValue*) &value) == SET; +- } +- +- bool is_unset() const { +- return OrderAccess::load_acquire((volatile ShenandoahSharedValue*) &value) == UNSET; +- } +- +- void set_cond(bool val) { +- if (val) { +- set(); +- } else { +- unset(); +- } +- } +- +- bool try_set() { +- if (is_set()) { +- return false; +- } +- ShenandoahSharedValue old = Atomic::cmpxchg((ShenandoahSharedValue)SET, &value, (ShenandoahSharedValue)UNSET); +- return old == UNSET; // success +- } +- +- bool try_unset() { +- if (!is_set()) { +- return false; +- } +- ShenandoahSharedValue old = Atomic::cmpxchg((ShenandoahSharedValue)UNSET, &value, (ShenandoahSharedValue)SET); +- return old == SET; // success +- } +- +- volatile ShenandoahSharedValue* addr_of() { +- return &value; +- } +- +-private: +- volatile ShenandoahSharedValue* operator&() { +- fatal("Use addr_of() instead"); +- return NULL; +- } +- +- bool operator==(ShenandoahSharedFlag& other) { fatal("Use is_set() instead"); return false; } +- bool operator!=(ShenandoahSharedFlag& other) { fatal("Use is_set() instead"); return false; } +- bool operator> (ShenandoahSharedFlag& other) { fatal("Use is_set() instead"); return false; } +- bool operator>=(ShenandoahSharedFlag& other) { fatal("Use is_set() instead"); return false; } +- bool operator< (ShenandoahSharedFlag& other) { fatal("Use is_set() instead"); return false; } +- bool operator<=(ShenandoahSharedFlag& other) { fatal("Use is_set() instead"); return false; } +- +-} ShenandoahSharedFlag; +- +-typedef struct ShenandoahSharedBitmap { +- shenandoah_padding(0); +- volatile ShenandoahSharedValue value; +- shenandoah_padding(1); +- +- ShenandoahSharedBitmap() { +- clear(); +- } +- +- void set(uint mask) { +- assert (mask < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity"); +- ShenandoahSharedValue mask_val = (ShenandoahSharedValue) mask; +- while (true) { +- ShenandoahSharedValue ov = OrderAccess::load_acquire(&value); +- if ((ov & mask_val) != 0) { +- // already set +- return; +- } +- +- ShenandoahSharedValue nv = ov | mask_val; +- if (Atomic::cmpxchg(nv, &value, ov) == ov) { +- // successfully set +- return; +- } +- } +- } +- +- void unset(uint mask) { +- assert (mask < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity"); +- ShenandoahSharedValue mask_val = (ShenandoahSharedValue) mask; +- while (true) { +- ShenandoahSharedValue ov = OrderAccess::load_acquire(&value); +- if ((ov & mask_val) == 0) { +- // already unset +- return; +- } +- +- ShenandoahSharedValue nv = ov & ~mask_val; +- if (Atomic::cmpxchg(nv, &value, ov) == ov) { +- // successfully unset +- return; +- } +- } +- } +- +- void clear() { +- OrderAccess::release_store_fence(&value, (ShenandoahSharedValue)0); +- } +- +- bool is_set(uint mask) const { +- return !is_unset(mask); +- } +- +- bool is_unset(uint mask) const { +- assert (mask < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity"); +- return (OrderAccess::load_acquire((volatile ShenandoahSharedValue*)&value) & (ShenandoahSharedValue) mask) == 0; +- } +- +- bool is_clear() const { +- return (OrderAccess::load_acquire((volatile ShenandoahSharedValue*)&value)) == 0; +- } +- +- void set_cond(uint mask, bool val) { +- if (val) { +- set(mask); +- } else { +- unset(mask); +- } +- } +- +- volatile ShenandoahSharedValue* addr_of() { +- return &value; +- } +- +- ShenandoahSharedValue raw_value() { +- return value; +- } +- +-private: +- volatile ShenandoahSharedValue* operator&() { +- fatal("Use addr_of() instead"); +- return NULL; +- } +- +- bool operator==(ShenandoahSharedFlag& other) { fatal("Use is_set() instead"); return false; } +- bool operator!=(ShenandoahSharedFlag& other) { fatal("Use is_set() instead"); return false; } +- bool operator> (ShenandoahSharedFlag& other) { fatal("Use is_set() instead"); return false; } +- bool operator>=(ShenandoahSharedFlag& other) { fatal("Use is_set() instead"); return false; } +- bool operator< (ShenandoahSharedFlag& other) { fatal("Use is_set() instead"); return false; } +- bool operator<=(ShenandoahSharedFlag& other) { fatal("Use is_set() instead"); return false; } +- +-} ShenandoahSharedBitmap; +- +-template +-struct ShenandoahSharedEnumFlag { +- shenandoah_padding(0); +- volatile ShenandoahSharedValue value; +- shenandoah_padding(1); +- +- ShenandoahSharedEnumFlag() { +- value = 0; +- } +- +- void set(T v) { +- assert (v >= 0, "sanity"); +- assert (v < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity"); +- OrderAccess::release_store_fence(&value, (ShenandoahSharedValue)v); +- } +- +- T get() const { +- return (T)OrderAccess::load_acquire((volatile ShenandoahSharedValue*) &value); +- } +- +- T cmpxchg(T new_value, T expected) { +- assert (new_value >= 0, "sanity"); +- assert (new_value < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity"); +- return (T)Atomic::cmpxchg((ShenandoahSharedValue)new_value, &value, (ShenandoahSharedValue)expected); +- } +- +- volatile ShenandoahSharedValue* addr_of() { +- return &value; +- } +- +-private: +- volatile T* operator&() { +- fatal("Use addr_of() instead"); +- return NULL; +- } +- +- bool operator==(ShenandoahSharedEnumFlag& other) { fatal("Use get() instead"); return false; } +- bool operator!=(ShenandoahSharedEnumFlag& other) { fatal("Use get() instead"); return false; } +- bool operator> (ShenandoahSharedEnumFlag& other) { fatal("Use get() instead"); return false; } +- bool operator>=(ShenandoahSharedEnumFlag& other) { fatal("Use get() instead"); return false; } +- bool operator< (ShenandoahSharedEnumFlag& other) { fatal("Use get() instead"); return false; } +- bool operator<=(ShenandoahSharedEnumFlag& other) { fatal("Use get() instead"); return false; } +- +-}; +- +-#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHSHAREDFLAG_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoah_specialized_oop_closures.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoah_specialized_oop_closures.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoah_specialized_oop_closures.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoah_specialized_oop_closures.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,49 +0,0 @@ +-/* +- * Copyright (c) 2015, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAH_SPECIALIZED_OOP_CLOSURES_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAH_SPECIALIZED_OOP_CLOSURES_HPP +- +-class ShenandoahMarkUpdateRefsClosure; +-class ShenandoahMarkUpdateRefsDedupClosure; +-class ShenandoahMarkUpdateRefsMetadataClosure; +-class ShenandoahMarkUpdateRefsMetadataDedupClosure; +-class ShenandoahMarkRefsClosure; +-class ShenandoahMarkRefsDedupClosure; +-class ShenandoahMarkRefsMetadataClosure; +-class ShenandoahMarkRefsMetadataDedupClosure; +-class ShenandoahUpdateHeapRefsClosure; +- +-#define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_SHENANDOAH(f) \ +- f(ShenandoahMarkUpdateRefsClosure,_nv) \ +- f(ShenandoahMarkUpdateRefsMetadataClosure,_nv) \ +- f(ShenandoahMarkRefsClosure,_nv) \ +- f(ShenandoahMarkRefsMetadataClosure,_nv) \ +- f(ShenandoahUpdateHeapRefsClosure,_nv) \ +- f(ShenandoahMarkUpdateRefsDedupClosure,_nv) \ +- f(ShenandoahMarkUpdateRefsMetadataDedupClosure,_nv) \ +- f(ShenandoahMarkRefsDedupClosure,_nv) \ +- f(ShenandoahMarkRefsMetadataDedupClosure,_nv) +- +- +-#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAH_SPECIALIZED_OOP_CLOSURES_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahStrDedupQueue.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahStrDedupQueue.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahStrDedupQueue.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahStrDedupQueue.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,193 +0,0 @@ +-/* +- * Copyright (c) 2017, 2019, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +- +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahStrDedupQueue.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahStringDedup.hpp" +-#include "memory/allocation.inline.hpp" +-#include "runtime/atomic.hpp" +- +-ShenandoahStrDedupQueue::ShenandoahStrDedupQueue(ShenandoahStrDedupQueueSet* queue_set, uint num) : +- _queue_set(queue_set), _current_list(NULL), _queue_num(num) { +- assert(num < _queue_set->num_queues(), "Not valid queue number"); +-} +- +-ShenandoahStrDedupQueue::~ShenandoahStrDedupQueue() { +- if (_current_list != NULL) { +- delete _current_list; +- } +-} +- +-void ShenandoahStrDedupQueue::oops_do(OopClosure* cl) { +- if (_current_list != NULL) { +- _current_list->oops_do(cl); +- } +-} +- +-ShenandoahStrDedupQueueSet::ShenandoahStrDedupQueueSet(uint n) : +- _num_queues(n), _free_list(NULL), _num_free_queues(0), _terminated(false), _claimed(0) { +- _lock = new Monitor(Mutex::leaf, "ShenandoahStrDedupQueueLock", false); +- +- _local_queues = NEW_C_HEAP_ARRAY(ShenandoahStrDedupQueue*, num_queues(), mtGC); +- _outgoing_work_list = NEW_C_HEAP_ARRAY(QueueChunkedList*, num_queues(), mtGC); +- +- for (uint index = 0; index < num_queues(); index ++) { +- _local_queues[index] = new ShenandoahStrDedupQueue(this, index); +- _outgoing_work_list[index] = NULL; +- } +-} +- +-ShenandoahStrDedupQueueSet::~ShenandoahStrDedupQueueSet() { +- QueueChunkedList* q; +- QueueChunkedList* tmp; +- +- for (uint index = 0; index < num_queues_nv(); index ++) { +- if (_local_queues[index] != NULL) { +- delete _local_queues[index]; +- } +- +- q = _outgoing_work_list[index]; +- while (q != NULL) { +- tmp = q; +- q = q->next(); +- delete tmp; +- } +- } +- +- q = _free_list; +- while (q != NULL) { +- tmp = q; +- q = tmp->next(); +- delete tmp; +- } +- +- FREE_C_HEAP_ARRAY(ShenandoahStrDedupQueue*, _local_queues, mtGC); +- FREE_C_HEAP_ARRAY(QueueChunkedList*, _outgoing_work_list, mtGC); +- +- delete _lock; +-} +- +-size_t ShenandoahStrDedupQueueSet::claim() { +- size_t index = (size_t)Atomic::add(1, (volatile jint*)&_claimed) - 1; +- return index; +-} +- +-void ShenandoahStrDedupQueueSet::parallel_oops_do(OopClosure* cl) { +- assert(cl != NULL, "No closure"); +- assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint"); +- size_t claimed_index; +- while ((claimed_index = claim()) < num_queues()) { +- queue_at(claimed_index)->oops_do(cl); +- QueueChunkedList* head = _outgoing_work_list[claimed_index]; +- while (head != NULL) { +- head->oops_do(cl); +- head = head->next(); +- } +- } +-} +- +-void ShenandoahStrDedupQueueSet::oops_do_slow(OopClosure* cl) { +- assert(cl != NULL, "No closure"); +- assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint"); +- for (size_t index = 0; index < num_queues(); index ++) { +- queue_at(index)->oops_do(cl); +- QueueChunkedList* head = _outgoing_work_list[index]; +- while (head != NULL) { +- head->oops_do(cl); +- head = head->next(); +- } +- } +-} +- +-void ShenandoahStrDedupQueueSet::terminate() { +- MonitorLockerEx locker(_lock, Mutex::_no_safepoint_check_flag); +- _terminated = true; +- locker.notify_all(); +-} +- +-void ShenandoahStrDedupQueueSet::release_chunked_list(QueueChunkedList* q) { +- assert(q != NULL, "null queue"); +- MutexLockerEx locker(lock(), Mutex::_no_safepoint_check_flag); +- if (_num_free_queues >= 2 * num_queues()) { +- delete q; +- } else { +- q->set_next(_free_list); +- _free_list = q; +- _num_free_queues ++; +- } +-} +- +-QueueChunkedList* ShenandoahStrDedupQueueSet::allocate_no_lock() { +- assert_lock_strong(lock()); +- +- if (_free_list != NULL) { +- QueueChunkedList* q = _free_list; +- _free_list = _free_list->next(); +- _num_free_queues --; +- q->reset(); +- return q; +- } else { +- return new QueueChunkedList(); +- } +-} +- +-QueueChunkedList* ShenandoahStrDedupQueueSet::allocate_chunked_list() { +- MutexLockerEx locker(_lock, Mutex::_no_safepoint_check_flag); +- return allocate_no_lock(); +-} +- +-QueueChunkedList* ShenandoahStrDedupQueueSet::push_and_get_atomic(QueueChunkedList* q, uint queue_num) { +- QueueChunkedList* head = _outgoing_work_list[queue_num]; +- QueueChunkedList* result; +- q->set_next(head); +- while ((result = (QueueChunkedList*)Atomic::cmpxchg_ptr(q, &_outgoing_work_list[queue_num], head)) != head) { +- head = result; +- q->set_next(head); +- } +- +- { +- MutexLockerEx locker(lock(), Mutex::_no_safepoint_check_flag); +- q = allocate_no_lock(); +- lock()->notify(); +- } +- return q; +-} +- +-QueueChunkedList* ShenandoahStrDedupQueueSet::remove_work_list_atomic(uint queue_num) { +- assert(queue_num < num_queues(), "Invalid queue number"); +- +- QueueChunkedList* list = _outgoing_work_list[queue_num]; +- QueueChunkedList* result; +- while ((result = (QueueChunkedList*)Atomic::cmpxchg_ptr((QueueChunkedList*)NULL, &_outgoing_work_list[queue_num], list)) != list) { +- list = result; +- } +- +- return list; +-} +- +-void ShenandoahStrDedupQueueSet::parallel_cleanup() { +- ShenandoahStrDedupQueueCleanupClosure cl; +- parallel_oops_do(&cl); +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahStrDedupQueue.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahStrDedupQueue.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahStrDedupQueue.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahStrDedupQueue.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,159 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHSTRINGDEDUPQUEUE_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHSTRINGDEDUPQUEUE_HPP +- +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "memory/iterator.hpp" +-#include "oops/oop.hpp" +-#include "runtime/mutex.hpp" +- +-template +-class ShenandoahStrDedupChunkedList : public CHeapObj { +-private: +- oop _oops[SIZE]; +- ShenandoahStrDedupChunkedList* _next; +- uint _index; +- +-public: +- ShenandoahStrDedupChunkedList() : _next(NULL), _index(0) { } +- +- inline bool is_full() const { return _index == SIZE; } +- inline bool is_empty() const { return _index == 0; } +- inline void push(oop obj) { assert(!is_full(), "List is full"); _oops[_index ++] = obj; } +- inline oop pop() { assert(!is_empty(), "List is empty"); return _oops[--_index]; } +- inline size_t size() const { return _index; } +- inline void reset() { +- _index = 0; +- _next = NULL; +- } +- +- void set_next(ShenandoahStrDedupChunkedList* q) { _next = q; } +- ShenandoahStrDedupChunkedList* next() const { return _next; } +- +- void oops_do(OopClosure* cl) { +- assert(cl != NULL, "null closure"); +- for (uint index = 0; index < size(); index ++) { +- cl->do_oop(&_oops[index]); +- } +- } +-}; +- +-class ShenandoahStrDedupQueueSet; +- +-typedef ShenandoahStrDedupChunkedList<64> QueueChunkedList; +- +-class ShenandoahStrDedupQueue : public CHeapObj { +-private: +- ShenandoahStrDedupQueueSet* _queue_set; +- QueueChunkedList* _current_list; +- uint _queue_num; +- +-public: +- ShenandoahStrDedupQueue(ShenandoahStrDedupQueueSet* queue_set, uint num); +- ~ShenandoahStrDedupQueue(); +- +- uint queue_num() const { return _queue_num; } +- inline void push(oop java_string); +- void oops_do(OopClosure* cl); +-}; +- +-class ShenandoahStrDedupThread; +- +-class ShenandoahStrDedupQueueSet : public CHeapObj { +- friend class ShenandoahStrDedupQueue; +- friend class ShenandoahStrDedupThread; +- +-private: +- ShenandoahStrDedupQueue** _local_queues; +- uint _num_queues; +- QueueChunkedList* volatile * _outgoing_work_list; +- +- QueueChunkedList* _free_list; +- uint _num_free_queues; +- +- Monitor* _lock; +- +- bool _terminated; +- +- volatile size_t _claimed; +- +-public: +- ShenandoahStrDedupQueueSet(uint n); +- ~ShenandoahStrDedupQueueSet(); +- +- uint num_queues() const { return num_queues_nv(); } +- +- ShenandoahStrDedupQueue* queue_at(size_t index) { +- assert(index < num_queues(), "Index out of bound"); +- return _local_queues[index]; +- } +- +- void clear_claimed() { _claimed = 0; } +- void parallel_cleanup(); +- void parallel_oops_do(OopClosure* cl); +- +- // For verification only +- void oops_do_slow(OopClosure* cl); +- +- void terminate(); +- bool has_terminated() { +- return _terminated; +- } +- +-private: +- inline uint num_queues_nv() const { return _num_queues; } +- +- void release_chunked_list(QueueChunkedList* l); +- +- QueueChunkedList* allocate_chunked_list(); +- QueueChunkedList* allocate_no_lock(); +- +- // Atomic publish and retrieve outgoing work list. +- // We don't have ABA problem, since there is only one dedup thread. +- QueueChunkedList* push_and_get_atomic(QueueChunkedList* q, uint queue_num); +- QueueChunkedList* remove_work_list_atomic(uint queue_num); +- +- Monitor* lock() const { return _lock; } +- +- size_t claim(); +-}; +- +-class ShenandoahStrDedupQueueCleanupClosure : public OopClosure { +-private: +- ShenandoahHeap* _heap; +- ShenandoahMarkingContext* const _mark_context; +- +- template +- inline void do_oop_work(T* p); +-public: +- ShenandoahStrDedupQueueCleanupClosure() : _heap(ShenandoahHeap::heap()), +- _mark_context(ShenandoahHeap::heap()->marking_context()) { +- } +- +- inline void do_oop(oop* p) { do_oop_work(p); } +- inline void do_oop(narrowOop* p) { do_oop_work(p); } +-}; +- +-#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHSTRINGDEDUPQUEUE_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahStrDedupQueue.inline.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahStrDedupQueue.inline.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahStrDedupQueue.inline.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahStrDedupQueue.inline.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,53 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHSTRINGDEDUPQUEUE_INLINE_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHSTRINGDEDUPQUEUE_INLINE_HPP +- +-#include "gc_implementation/shenandoah/shenandoahHeap.hpp" +-#include "gc_implementation/shenandoah/shenandoahStrDedupQueue.hpp" +- +-void ShenandoahStrDedupQueue::push(oop java_string) { +- if (_current_list == NULL) { +- _current_list = _queue_set->allocate_chunked_list(); +- } else if (_current_list->is_full()) { +- _current_list = _queue_set->push_and_get_atomic(_current_list, queue_num()); +- } +- +- assert(_current_list != NULL && !_current_list->is_full(), "Sanity"); +- _current_list->push(java_string); +-} +- +-template +-void ShenandoahStrDedupQueueCleanupClosure::do_oop_work(T* p) { +- T o = oopDesc::load_heap_oop(p); +- if (! oopDesc::is_null(o)) { +- oop obj = oopDesc::decode_heap_oop_not_null(o); +- assert(_heap->is_in(obj), "Must be in the heap"); +- if (!_mark_context->is_marked(obj)) { +- oopDesc::encode_store_heap_oop(p, oop()); +- } +- } +-} +- +-#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHSTRINGDEDUPQUEUE_INLINE_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahStrDedupTable.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahStrDedupTable.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahStrDedupTable.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahStrDedupTable.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,504 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +- +-#include "classfile/altHashing.hpp" +-#include "classfile/javaClasses.hpp" +-#include "gc_implementation/shenandoah/shenandoahBarrierSet.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahLogging.hpp" +-#include "gc_implementation/shenandoah/shenandoahStrDedupTable.hpp" +-#include "memory/allocation.hpp" +-#include "runtime/atomic.hpp" +-#include "runtime/safepoint.hpp" +-#include "runtime/vmThread.hpp" +- +-const size_t ShenandoahStrDedupTable::_min_size = (1 << 10); // 1024 +-const size_t ShenandoahStrDedupTable::_max_size = (1 << 24); // 16777216 +-const double ShenandoahStrDedupTable::_grow_load_factor = 2.0; // Grow table at 200% load +-const double ShenandoahStrDedupTable::_shrink_load_factor = _grow_load_factor / 3.0; // Shrink table at 67% load +-const double ShenandoahStrDedupTable::_max_cache_factor = 0.1; // Cache a maximum of 10% of the table size +-const uintx ShenandoahStrDedupTable::_rehash_multiple = 60; // Hash bucket has 60 times more collisions than expected +-const uintx ShenandoahStrDedupTable::_rehash_threshold = (uintx)(_rehash_multiple * _grow_load_factor); +- +-bool ShenandoahStrDedupEntry::cas_set_next(ShenandoahStrDedupEntry* next) { +- return Atomic::cmpxchg_ptr(next, &_next, (ShenandoahStrDedupEntry*)NULL) == NULL; +-} +- +-ShenandoahStrDedupTable::ShenandoahStrDedupTable(size_t size, jint hash_seed) : +- _size(size), _hash_seed(hash_seed), _entries(0), _claimed(0), _partition_size(0), +- _rehash_needed(false), _shrink_threshold((uintx)(size * _shrink_load_factor)), +- _grow_threshold((uintx)(size * _grow_load_factor)) +-{ +- assert(size >= _min_size && size <= _max_size, "Invalid table size"); +- _buckets = NEW_C_HEAP_ARRAY(ShenandoahStrDedupEntry* volatile, size, mtGC); +- for (size_t index = 0; index < size; index ++) { +- _buckets[index] = NULL; +- } +-} +- +-ShenandoahStrDedupTable::~ShenandoahStrDedupTable() { +- for (size_t index = 0; index < size(); index ++) { +- ShenandoahStrDedupEntry* volatile head = bucket(index); +- ShenandoahStrDedupEntry* volatile tmp; +- while (head != NULL) { +- tmp = head; +- head = head->next(); +- release_entry(tmp); +- } +- } +-} +- +-typeArrayOop ShenandoahStrDedupTable::lookup_or_add(typeArrayOop value, unsigned int hash, uintx& count) { +- ShenandoahStrDedupEntry* volatile* head_addr = bucket_addr(hash_to_index(hash)); +- count = 0; +- ShenandoahStrDedupEntry* new_entry = NULL; +- if (*head_addr == NULL) { +- new_entry = allocate_entry(value, hash); +- if (Atomic::cmpxchg_ptr(new_entry, head_addr, (ShenandoahStrDedupEntry*)NULL) == NULL) { +- Atomic::inc((volatile jint*)&_entries); +- return value; +- } +- } +- +- ShenandoahStrDedupEntry* volatile head = *head_addr; +- assert(head != NULL, "Should not be null"); +- +- while (head != NULL) { +- if (head->equals(value, hash)) { +- if (new_entry != NULL) release_entry(new_entry); +- return head->obj(); +- } else if (head->next() == NULL) { +- if (new_entry == NULL) new_entry = allocate_entry(value, hash); +- if (head->cas_set_next(new_entry)) { +- Atomic::inc((volatile jint*)&_entries); +- return value; +- } +- } +- +- count ++; +- head = head->next(); +- assert(head != NULL, "Should not be null"); +- } +- +- // Should have found existing one or added new one +- ShouldNotReachHere(); +- return NULL; +-} +- +-void ShenandoahStrDedupTable::add(ShenandoahStrDedupEntry* entry) { +- assert(SafepointSynchronize::is_at_safepoint(), "Only at a safepoint"); +- assert(!use_java_hash(), "Only used when rehashing the table"); +- unsigned int hash = alt_hash_code(entry->obj()); +- entry->set_hash(hash); +- +- ShenandoahStrDedupEntry* volatile* head_addr = bucket_addr(hash_to_index(hash)); +- if (*head_addr == NULL) { +- if (Atomic::cmpxchg_ptr(entry, head_addr, (ShenandoahStrDedupEntry*)NULL) == NULL) { +- return; +- } +- } +- +- ShenandoahStrDedupEntry* volatile head = *head_addr; +- assert(head != NULL, "Should not be null"); +- +- while (head != NULL) { +- if (head->next() == NULL && (head->cas_set_next(entry))) { +- return; +- } +- +- head = head->next(); +- // Some one beats us +- assert(head != NULL, "Should not be null"); +- } +-} +- +-bool ShenandoahStrDedupTable::deduplicate(oop java_string) { +- assert(java_lang_String::is_instance(java_string), "Must be a string"); +- +- typeArrayOop value = java_lang_String::value(java_string); +- if (value == NULL) { +- return false; +- } +- +- unsigned int hash = hash_code(java_string, value); +- +- uintx count = 0; +- typeArrayOop existing_value = lookup_or_add(value, hash, count); +- assert(existing_value != NULL, "Must have found or added"); +- if (count > _rehash_threshold) { +- _rehash_needed = true; +- } +- +- if (existing_value == value) { +- return false; +- } +- +- // Enqueue the reference to make sure it is kept alive. Concurrent mark might +- // otherwise declare it dead if there are no other strong references to this object. +- ShenandoahBarrierSet* bs = ShenandoahBarrierSet::barrier_set(); +- bs->keep_alive_barrier(existing_value); +- +- // Existing value found, deduplicate string +- java_lang_String::set_value(java_string, typeArrayOop(existing_value)); +- return true; +-} +- +-void ShenandoahStrDedupTable::clear_claimed() { +- _claimed = 0; +- _partition_size = size() / (ShenandoahHeap::heap()->max_workers() * 4); +- _partition_size = MAX2(_partition_size, size_t(1)); +-} +- +-size_t ShenandoahStrDedupTable::claim() { +- return (size_t)Atomic::add((jint)_partition_size, (volatile jint*)&_claimed) - _partition_size; +-} +- +-void ShenandoahStrDedupTable::parallel_oops_do(OopClosure* cl) { +- assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint"); +- +- size_t index; +- size_t end_index; +- do { +- index = claim(); +- end_index = MIN2(index + partition_size(), size()); +- +- for (; index < end_index; index ++) { +- ShenandoahStrDedupEntry* volatile p = bucket(index); +- while (p != NULL) { +- p->do_oop(cl); +- p = p->next(); +- } +- } +- } while (index < size()); +-} +- +-void ShenandoahStrDedupTable::oops_do_slow(OopClosure* cl) { +- assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint"); +- for (size_t index = 0; index < size(); index ++) { +- ShenandoahStrDedupEntry* volatile p = bucket(index); +- while (p != NULL) { +- p->do_oop(cl); +- p = p->next(); +- } +- } +-} +- +-ShenandoahStrDedupEntry* ShenandoahStrDedupTable::allocate_entry(typeArrayOop value, unsigned int hash) { +- ShenandoahStrDedupEntry* entry = new ShenandoahStrDedupEntry(); +- entry->set_hash(hash); +- entry->set_obj(value); +- return entry; +-} +- +-void ShenandoahStrDedupTable::release_entry(ShenandoahStrDedupEntry* entry) { +- assert(entry != NULL, "null entry"); +- delete entry; +-} +- +-unsigned int ShenandoahStrDedupTable::hash_code(oop java_string, typeArrayOop value) { +- if (use_java_hash()) { +- unsigned int hash = java_lang_String::hash(java_string); +- if (hash == 0) { +- hash = java_hash_code(value); +- java_lang_String::set_hash(java_string, hash); +- } +- return hash; +- } else { +- return alt_hash_code(value); +- } +-} +- +-unsigned int ShenandoahStrDedupTable::java_hash_code(typeArrayOop value) { +- assert(use_java_hash(), "Must use java hash code"); +- int length = value->length(); +- const jchar* data = (jchar*)value->base(T_CHAR); +- return java_lang_String::hash_code(data, length); +-} +- +-unsigned int ShenandoahStrDedupTable::alt_hash_code(typeArrayOop value) { +- assert(hash_seed() != 0, "Must have hash seed"); +- int length = value->length(); +- const jchar* data = (jchar*)value->base(T_CHAR); +- return AltHashing::halfsiphash_32(hash_seed(), (const uint16_t*)data, length); +-} +- +-void ShenandoahStrDedupTable::print_statistics(outputStream* out) const { +- out->print_cr("ShenandoahStrDedupTable: buckets: " SIZE_FORMAT " entries: " SIZE_FORMAT, +- size(), _entries); +-} +- +-#ifdef ASSERT +-void ShenandoahStrDedupTable::verify() { +- assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint"); +- assert(Thread::current() == VMThread::vm_thread(), "only by vm thread"); +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- +- size_t num_entries = 0; +- +- for (size_t index = 0; index < size(); index ++) { +- ShenandoahStrDedupEntry* volatile head = bucket(index); +- while (head != NULL) { +- assert(heap->marking_context()->is_marked(head->obj()), "Must be marked"); +- +- if (use_java_hash()) { +- assert(head->hash() == java_hash_code(head->obj()), "Wrong hash code"); +- } else { +- assert(head->hash() == alt_hash_code(head->obj()), "Wrong alt hash code"); +- } +- +- assert(index == hash_to_index(head->hash()), "Wrong bucket"); +- num_entries ++; +- head = head->next(); +- } +- } +- assert(num_entries == _entries, "The number of entries does not match"); +-} +- +-#endif +- +-ShenandoahStrDedupTableCleanupTask::ShenandoahStrDedupTableCleanupTask() : +- _mark_context(ShenandoahHeap::heap()->marking_context()) { +-} +- +-bool ShenandoahStrDedupTableCleanupTask::is_alive(oop obj) const { +- return _mark_context->is_marked(obj); +-} +- +-ShenandoahStrDedupTableUnlinkTask::ShenandoahStrDedupTableUnlinkTask(ShenandoahStrDedupTable* const table) : +- _table(table) { +- log_debug(gc, stringdedup)("Cleanup StringDedup table"); +- table->clear_claimed(); +-} +- +-void ShenandoahStrDedupTableUnlinkTask::do_parallel_cleanup() { +- ShenandoahStrDedupTable* const table = _table; +- size_t partition = table->partition_size(); +- size_t removed = 0; +- size_t table_end = table->size(); +- +- size_t index; +- size_t end_index; +- do { +- index = table->claim(); +- end_index = MIN2(index + partition, table_end); +- for (; index < end_index; index ++) { +- ShenandoahStrDedupEntry* volatile* head_addr = table->bucket_addr(index); +- ShenandoahStrDedupEntry* volatile head; +- while (*head_addr != NULL) { +- head = *head_addr; +- if (!is_alive(head->obj())) { +- *head_addr = head->next(); +- table->release_entry(head); +- removed ++; +- } else { +- head_addr = head->next_addr(); +- } +- } +- } +- } while (index < table_end); +- +- Atomic::add(-((jlong)removed), (volatile jlong*)&table->_entries); +-} +- +-ShenandoahStrDedupTableRemapTask::ShenandoahStrDedupTableRemapTask(ShenandoahStrDedupTable* const src, +- ShenandoahStrDedupTable* const dest) : +- _src_table(src), _dest_table(dest) { +- src->clear_claimed(); +-} +- +-ShenandoahStrDedupTableRehashTask::ShenandoahStrDedupTableRehashTask( +- ShenandoahStrDedupTable* const src, ShenandoahStrDedupTable* const dest) : +- ShenandoahStrDedupTableRemapTask(src, dest) { +- log_debug(gc, stringdedup)("Rehash StringDedup table"); +-} +- +-void ShenandoahStrDedupTableRehashTask::do_parallel_cleanup() { +- size_t partition = src_table()->partition_size(); +- +- size_t added = 0; +- size_t table_end = src_table()->size(); +- size_t index; +- size_t end_index; +- do { +- index = src_table()->claim(); +- end_index = MIN2(index + partition, table_end); +- for (; index < end_index; index ++) { +- ShenandoahStrDedupEntry* volatile * head_addr = src_table()->bucket_addr(index); +- ShenandoahStrDedupEntry* volatile head = *head_addr; +- *head_addr = NULL; +- +- ShenandoahStrDedupEntry* tmp; +- while(head != NULL) { +- tmp = head; +- head = head->next(); +- tmp->set_next(NULL); +- if (is_alive(tmp->obj())) { +- dest_table()->add(tmp); +- added ++; +- } else { +- src_table()->release_entry(tmp); +- } +- } +- } +- } while (index < table_end); +- +- Atomic::add((jlong)added, (volatile jlong*)&dest_table()->_entries); +-} +- +-ShenandoahStrDedupShrinkTableTask::ShenandoahStrDedupShrinkTableTask( +- ShenandoahStrDedupTable* const src, ShenandoahStrDedupTable* const dest) : +- ShenandoahStrDedupTableRemapTask(src, dest) { +- assert(is_power_of_2(src->size()), "Source table size must be a power of 2"); +- assert(is_power_of_2(dest->size()), "Destination table size must be a power of 2"); +- assert(src->size() / dest->size() == 2, "Shrink in half"); +- log_debug(gc, stringdedup)("Shrink StringDedup table"); +-} +- +-void ShenandoahStrDedupShrinkTableTask::do_parallel_cleanup() { +- size_t partition = src_table()->partition_size(); +- size_t transferred = 0; +- +- size_t half_size = src_table()->size() / 2; +- // Only scan first half of table. +- // To shrink the table in half, we merge buckets at index and (index + half_size) +- size_t table_end = src_table()->size() / 2; +- +- size_t index; +- size_t end_index; +- do { +- index = src_table()->claim(); +- end_index = MIN2(index + partition, table_end); +- for (; index < end_index; index ++) { +- ShenandoahStrDedupEntry* volatile * src_head_addr = src_table()->bucket_addr(index); +- ShenandoahStrDedupEntry* volatile * dest_head_addr = dest_table()->bucket_addr(index); +- ShenandoahStrDedupEntry* volatile src_head = *src_head_addr; +- *src_head_addr = NULL; +- // transfer entries at index +- transferred += transfer_bucket(src_head, dest_head_addr); +- +- // transfer entries at index + half_size +- src_head_addr = src_table()->bucket_addr(index + half_size); +- src_head = *src_head_addr; +- *src_head_addr = NULL; +- transferred += transfer_bucket(src_head, dest_head_addr); +- } +- } while (index < table_end); +- +- Atomic::add((jlong)transferred, (volatile jlong*)&dest_table()->_entries); +-} +- +-size_t ShenandoahStrDedupShrinkTableTask::transfer_bucket(ShenandoahStrDedupEntry* volatile src, +- ShenandoahStrDedupEntry* volatile * dest) { +- ShenandoahStrDedupEntry* tmp; +- size_t transferred = 0; +- +- while (src != NULL) { +- tmp = src; +- src = src->next(); +- tmp->set_next(NULL); +- if (is_alive(tmp->obj())) { +- if (*dest != NULL) { +- tmp->set_next(*dest); +- } +- *dest = tmp; +- transferred ++; +- } else { +- src_table()->release_entry(tmp); +- } +- } +- +- return transferred; +-} +- +-ShenandoahStrDedupExpandTableTask::ShenandoahStrDedupExpandTableTask( +- ShenandoahStrDedupTable* const src, ShenandoahStrDedupTable* const dest) : +- ShenandoahStrDedupTableRemapTask(src, dest) { +- assert(is_power_of_2(src->size()), "Source table size must be a power of 2"); +- assert(is_power_of_2(dest->size()), "Destination table size must be a power of 2"); +- assert(dest->size() == 2 * src->size(), "Double the size"); +- +- log_debug(gc, stringdedup)("Expand StringDedup table"); +- +- int n = exact_log2_long(src->size()); +- _bit_mask = nth_bit(n); +-} +- +-void ShenandoahStrDedupExpandTableTask::do_parallel_cleanup() { +- size_t partition = src_table()->partition_size(); +- size_t table_end = src_table()->size(); +- +- size_t transferred = 0; +- size_t index; +- size_t end_index; +- do { +- index = src_table()->claim(); +- end_index = MIN2(index + partition, table_end); +- for (; index < end_index; index ++) { +- // split current source bucket into bucket[index] and bucket[index + half_size] +- // in destination table +- ShenandoahStrDedupEntry* volatile * src_head_addr = src_table()->bucket_addr(index); +- ShenandoahStrDedupEntry* volatile src_head = *src_head_addr; +- ShenandoahStrDedupEntry* volatile * dest_low_addr = dest_table()->bucket_addr(index); +- ShenandoahStrDedupEntry* volatile * dest_high_addr = dest_table()->bucket_addr(index + src_table()->size()); +- *src_head_addr = NULL; +- +- transferred += split_bucket(src_head, dest_low_addr, dest_high_addr); +- } +- } while (index < table_end); +- Atomic::add((jlong)transferred, (volatile jlong*)&dest_table()->_entries); +-} +- +-size_t ShenandoahStrDedupExpandTableTask::split_bucket(ShenandoahStrDedupEntry* volatile src, +- ShenandoahStrDedupEntry* volatile * dest_low, +- ShenandoahStrDedupEntry* volatile * dest_high) { +- size_t transferred = 0; +- +- ShenandoahStrDedupEntry* volatile tmp; +- ShenandoahStrDedupEntry* volatile * target; +- while (src != NULL) { +- tmp = src; +- src = src->next(); +- +- if (is_alive(tmp->obj())) { +- tmp->set_next(NULL); +- unsigned int hash = tmp->hash(); +- if ((hash & _bit_mask) == 0) { +- target = dest_low; +- } else { +- target = dest_high; +- } +- +- if (*target != NULL) { +- tmp->set_next(*target); +- } +- +- *target = tmp; +- transferred ++; +- } else { +- src_table()->release_entry(tmp); +- } +- } +- return transferred; +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahStrDedupTable.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahStrDedupTable.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahStrDedupTable.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahStrDedupTable.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,284 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAHSTRDEDUPTABLE_HPP +-#define SHARE_VM_GC_SHENANDOAHSTRDEDUPTABLE_HPP +- +-#include "utilities/ostream.hpp" +- +-class ShenandoahStrDedupEntry : public CHeapObj { +-private: +- ShenandoahStrDedupEntry* volatile _next; +- unsigned int _hash; +- typeArrayOop _obj; +- +-public: +- ShenandoahStrDedupEntry() : _next(NULL), _hash(0), _obj(NULL) { +- } +- +- ShenandoahStrDedupEntry* volatile next() { +- return _next; +- } +- +- ShenandoahStrDedupEntry* volatile* next_addr() { +- return &_next; +- } +- +- void set_next(ShenandoahStrDedupEntry* next) { +- _next = next; +- } +- +- bool cas_set_next(ShenandoahStrDedupEntry* next); +- +- unsigned int hash() const { +- return _hash; +- } +- +- void set_hash(unsigned int hash) { +- _hash = hash; +- } +- +- typeArrayOop obj() const { +- return _obj; +- } +- +- typeArrayOop* obj_addr() { +- return &_obj; +- } +- +- void set_obj(typeArrayOop obj) { +- _obj = obj; +- } +- +- bool equals(typeArrayOop value, unsigned int hash) const { +- return (hash == this->hash() && +- equals(value, obj())); +- } +- +- void do_oop(OopClosure* cl) { +- oop* p = (oop*)obj_addr(); +- cl->do_oop(p); +- } +- +-private: +- static bool equals(typeArrayOop value1, typeArrayOop value2) { +- return (value1 == value2 || +- (value1->length() == value2->length() && +- (!memcmp(value1->base(T_CHAR), +- value2->base(T_CHAR), +- value1->length() * sizeof(jchar))))); +- } +-}; +- +-/* ShenandoahStringDedupTable: +- * - Lookup and add are lock free +- * - Cleanup, resize and rehash are at safepoints +- */ +-class ShenandoahStrDedupTable : public CHeapObj { +- friend class ShenandoahStrDedupTableUnlinkTask; +- friend class ShenandoahStrDedupTableRehashTask; +- friend class ShenandoahStrDedupShrinkTableTask; +- friend class ShenandoahStrDedupExpandTableTask; +- +-private: +- ShenandoahStrDedupEntry* volatile * _buckets; +- size_t _size; +- volatile size_t _entries; +- +- uintx _shrink_threshold; +- uintx _grow_threshold; +- bool _rehash_needed; +- +- // The hash seed also dictates which hash function to use. A +- // zero hash seed means we will use the Java compatible hash +- // function (which doesn't use a seed), and a non-zero hash +- // seed means we use the murmur3 hash function. +- jint _hash_seed; +- +- // Constants governing table resize/rehash/cache. +- static const size_t _min_size; +- static const size_t _max_size; +- static const double _grow_load_factor; +- static const double _shrink_load_factor; +- static const uintx _rehash_multiple; +- static const uintx _rehash_threshold; +- static const double _max_cache_factor; +- +- volatile size_t _claimed; +- size_t _partition_size; +- +-public: +- ShenandoahStrDedupTable(size_t size = _min_size, jint hash_seed = 0); +- ~ShenandoahStrDedupTable(); +- +- jint hash_seed() const { return _hash_seed; } +- size_t size() const { return _size; } +- bool need_rehash() const { return _rehash_needed; } +- bool need_expand() const { return _entries >= _grow_threshold && size() < max_size(); } +- bool need_shrink() const { return _entries <= _shrink_threshold && size() > min_size(); } +- +- // parallel scanning the table +- void clear_claimed(); +- size_t claim(); +- void parallel_oops_do(OopClosure* cl); +- +- // For verification only +- void oops_do_slow(OopClosure* cl); +- +- bool deduplicate(oop java_string); +- +- // Returns an existing character array in the table, or inserts a new +- // table entry if no matching character array exists. +- typeArrayOop lookup_or_add(typeArrayOop value, unsigned int hash, uintx& count); +- +- void print_statistics(outputStream* out) const; +- +- static size_t min_size() { return _min_size; } +- static size_t max_size() { return _max_size; } +- +- void verify() PRODUCT_RETURN; +- +-private: +- inline bool use_java_hash() { +- return _hash_seed == 0; +- } +- +- // Returns the hash bucket index for the given hash code. +- size_t hash_to_index(unsigned int hash) { +- return (size_t)hash & (size() - 1); +- } +- +- ShenandoahStrDedupEntry* volatile * bucket_addr(size_t index) const { +- assert(index < size(), "Index out of bound"); +- return &_buckets[index]; +- } +- +- // Returns the hash bucket at the given index. +- ShenandoahStrDedupEntry* volatile bucket(size_t index) const { +- assert(index < size(), "Index out of bound"); +- return _buckets[index]; +- } +- +- size_t partition_size() const { return _partition_size; } +- +- ShenandoahStrDedupEntry* allocate_entry(typeArrayOop value, unsigned int hash); +- void release_entry(ShenandoahStrDedupEntry* entry); +- +- unsigned int hash_code(oop java_string, typeArrayOop value); +- unsigned int java_hash_code(typeArrayOop value); +- unsigned int alt_hash_code(typeArrayOop value); +- +- // Adds a new table entry to the given hash bucket. +- void add(ShenandoahStrDedupEntry* entry); +- +- // Clean up a bucket, return number of entries removed +- size_t cleanup_bucket(size_t index); +-}; +- +-class ShenandoahHeap; +- +-class ShenandoahStrDedupTableCleanupTask : public CHeapObj { +-private: +- ShenandoahMarkingContext* const _mark_context; +- +-public: +- ShenandoahStrDedupTableCleanupTask(); +- virtual ~ShenandoahStrDedupTableCleanupTask() {}; +- virtual void do_parallel_cleanup() = 0; +- +-protected: +- bool is_alive(oop obj) const; +-}; +- +-// Cleanup current string dedup table, remove all dead entries +-class ShenandoahStrDedupTableUnlinkTask : public ShenandoahStrDedupTableCleanupTask { +-private: +- ShenandoahStrDedupTable* const _table; +- +-public: +- ShenandoahStrDedupTableUnlinkTask(ShenandoahStrDedupTable* const table); +- void do_parallel_cleanup(); +-}; +- +-// The task transfers live entries from source table to destination table +-class ShenandoahStrDedupTableRemapTask : public ShenandoahStrDedupTableCleanupTask { +-protected: +- ShenandoahStrDedupTable* const _src_table; +- ShenandoahStrDedupTable* const _dest_table; +- +-public: +- ShenandoahStrDedupTableRemapTask(ShenandoahStrDedupTable* const src, +- ShenandoahStrDedupTable* const dest); +-protected: +- ShenandoahStrDedupTable* const src_table() const { return _src_table; } +- ShenandoahStrDedupTable* const dest_table() const { return _dest_table; } +-}; +- +-// The task rehashes live entries from source table to destination table. +-// Source and destination tables are not necessary the same size. +-class ShenandoahStrDedupTableRehashTask : public ShenandoahStrDedupTableRemapTask { +-public: +- ShenandoahStrDedupTableRehashTask(ShenandoahStrDedupTable* const src, +- ShenandoahStrDedupTable* const dest); +- void do_parallel_cleanup(); +-}; +- +-/* The task remaps live entries from source table into destination table of +- * the half size. +- * Hash function should *not* be changed during shrinking of the table, +- * so we can merge buckets from source table into destination table. +- * bucket [index ] and bucket [index + half_table_size] -> bucket [index] +- */ +-class ShenandoahStrDedupShrinkTableTask : public ShenandoahStrDedupTableRemapTask { +-public: +- ShenandoahStrDedupShrinkTableTask(ShenandoahStrDedupTable* const src, +- ShenandoahStrDedupTable* const dest); +- void do_parallel_cleanup(); +- +-protected: +- size_t transfer_bucket(ShenandoahStrDedupEntry* volatile src, +- ShenandoahStrDedupEntry* volatile * dest); +-}; +- +-/* The task remaps live entries from source table into destination table of +- * twice the size. +- * Hash function should *not* be changed during shrinking of the table, +- * so we can split buckets from source table into destination table. +- * bucket [index ] -> bucket [index] or bucket [index + half_table_size] +- */ +-class ShenandoahStrDedupExpandTableTask : public ShenandoahStrDedupTableRemapTask { +-private: +- int _bit_mask; +- +-public: +- ShenandoahStrDedupExpandTableTask(ShenandoahStrDedupTable* const src, +- ShenandoahStrDedupTable* const dest); +- void do_parallel_cleanup(); +- +-protected: +- size_t split_bucket(ShenandoahStrDedupEntry* volatile src, +- ShenandoahStrDedupEntry* volatile * dest_low, +- ShenandoahStrDedupEntry* volatile * dest_high); +-}; +- +-#endif // SHARE_VM_GC_SHENANDOAHSTRDEDUPTABLE_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahStrDedupThread.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahStrDedupThread.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahStrDedupThread.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahStrDedupThread.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,199 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +- +-#include "gc_implementation/shared/suspendibleThreadSet.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahLogging.hpp" +-#include "gc_implementation/shenandoah/shenandoahStrDedupQueue.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahStrDedupThread.hpp" +-#include "gc_implementation/shenandoah/shenandoahStringDedup.hpp" +-#include "gc_implementation/shenandoah/shenandoahUtils.hpp" +- +-ShenandoahStrDedupThread::ShenandoahStrDedupThread(ShenandoahStrDedupQueueSet* queues) : +- ConcurrentGCThread(), _queues(queues), _claimed(0) { +- size_t num_queues = queues->num_queues(); +- _work_list = NEW_C_HEAP_ARRAY(QueueChunkedList*, num_queues, mtGC); +- for (size_t index = 0; index < num_queues; index ++) { +- _work_list[index] = NULL; +- } +- +- set_name("%s", "ShenandoahStringDedupTherad"); +- create_and_start(); +-} +- +-ShenandoahStrDedupThread::~ShenandoahStrDedupThread() { +- ShouldNotReachHere(); +-} +- +-void ShenandoahStrDedupThread::run() { +- initialize_in_thread(); +- wait_for_universe_init(); +- +- for (;;) { +- ShenandoahStrDedupStats stats; +- +- assert(is_work_list_empty(), "Work list must be empty"); +- // Queue has been shutdown +- if (!poll(&stats)) { +- assert(queues()->has_terminated(), "Must be terminated"); +- break; +- } +- +- // Include thread in safepoints +- SuspendibleThreadSetJoiner sts_join; +- // Process the queue +- for (uint queue_index = 0; queue_index < queues()->num_queues(); queue_index ++) { +- QueueChunkedList* cur_list = _work_list[queue_index]; +- +- while (cur_list != NULL) { +- stats.mark_exec(); +- +- while (!cur_list->is_empty()) { +- oop java_string = cur_list->pop(); +- stats.inc_inspected(); +- assert(!ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must not at Shenandoah safepoint"); +- +- if (oopDesc::is_null(java_string) || +- !ShenandoahStringDedup::is_candidate(java_string)) { +- stats.inc_skipped(); +- } else { +- if (ShenandoahStringDedup::deduplicate(java_string, false /* update counter */)) { +- stats.inc_deduped(); +- } else { +- stats.inc_known(); +- } +- } +- +- // Safepoint this thread if needed +- if (sts_join.should_yield()) { +- stats.mark_block(); +- sts_join.yield(); +- stats.mark_unblock(); +- } +- } +- +- // Advance list only after processed. Otherwise, we may miss scanning +- // during safepoints +- _work_list[queue_index] = cur_list->next(); +- queues()->release_chunked_list(cur_list); +- cur_list = _work_list[queue_index]; +- } +- } +- +- stats.mark_done(); +- +- ShenandoahStringDedup::dedup_stats().update(stats); +- +- if (ShenandoahLogDebug) { +- stats.print_statistics(tty); +- } +- } +- +- if (ShenandoahLogDebug) { +- ShenandoahStringDedup::print_statistics(tty); +- } +-} +- +-void ShenandoahStrDedupThread::stop() { +- queues()->terminate(); +-} +- +-void ShenandoahStrDedupThread::parallel_oops_do(OopClosure* cl) { +- assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint"); +- size_t claimed_index; +- while ((claimed_index = claim()) < queues()->num_queues()) { +- QueueChunkedList* q = _work_list[claimed_index]; +- while (q != NULL) { +- q->oops_do(cl); +- q = q->next(); +- } +- } +-} +- +-void ShenandoahStrDedupThread::oops_do_slow(OopClosure* cl) { +- assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint"); +- for (size_t index = 0; index < queues()->num_queues(); index ++) { +- QueueChunkedList* q = _work_list[index]; +- while (q != NULL) { +- q->oops_do(cl); +- q = q->next(); +- } +- } +-} +- +-bool ShenandoahStrDedupThread::is_work_list_empty() const { +- assert(Thread::current() == this, "Only from dedup thread"); +- for (uint index = 0; index < queues()->num_queues(); index ++) { +- if (_work_list[index] != NULL) return false; +- } +- return true; +-} +- +-void ShenandoahStrDedupThread::parallel_cleanup() { +- ShenandoahStrDedupQueueCleanupClosure cl; +- parallel_oops_do(&cl); +-} +- +-bool ShenandoahStrDedupThread::poll(ShenandoahStrDedupStats* stats) { +- assert(is_work_list_empty(), "Only poll when work list is empty"); +- +- while (!_queues->has_terminated()) { +- { +- bool has_work = false; +- stats->mark_exec(); +- // Include thread in safepoints +- SuspendibleThreadSetJoiner sts_join; +- +- for (uint index = 0; index < queues()->num_queues(); index ++) { +- assert(!ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Not at Shenandoah Safepoint"); +- _work_list[index] = queues()->remove_work_list_atomic(index); +- if (_work_list[index] != NULL) { +- has_work = true; +- } +- +- // Safepoint this thread if needed +- if (sts_join.should_yield()) { +- stats->mark_block(); +- sts_join.yield(); +- stats->mark_unblock(); +- } +- } +- +- if (has_work) return true; +- } +- +- { +- stats->mark_idle(); +- MonitorLockerEx locker(queues()->lock(), Monitor::_no_safepoint_check_flag); +- locker.wait(Mutex::_no_safepoint_check_flag); +- } +- } +- return false; +-} +- +-size_t ShenandoahStrDedupThread::claim() { +- size_t index = (size_t)Atomic::add(1, (volatile jint*)&_claimed) - 1; +- return index; +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahStrDedupThread.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahStrDedupThread.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahStrDedupThread.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahStrDedupThread.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,64 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHSTRINGDEDUPTHREAD_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHSTRINGDEDUPTHREAD_HPP +- +-#include "gc_implementation/shared/concurrentGCThread.hpp" +-#include "gc_implementation/shenandoah/shenandoahStrDedupQueue.hpp" +-#include "memory/iterator.hpp" +- +-class ShenandoahStrDedupStats; +- +-class ShenandoahStrDedupThread: public ConcurrentGCThread { +-private: +- ShenandoahStrDedupQueueSet* _queues; +- QueueChunkedList** _work_list; +- volatile size_t _claimed; +- +-public: +- ShenandoahStrDedupThread(ShenandoahStrDedupQueueSet* queues); +- ~ShenandoahStrDedupThread(); +- +- void clear_claimed() { _claimed = 0; } +- void parallel_oops_do(OopClosure* cl); +- void parallel_cleanup(); +- +- // For verification only +- void oops_do_slow(OopClosure* cl); +- +- virtual void run(); +- virtual void stop(); +- +-private: +- bool poll(ShenandoahStrDedupStats* stats); +- bool is_work_list_empty() const; +- +- ShenandoahStrDedupQueueSet* queues() const { +- return _queues; +- } +- +- size_t claim(); +-}; +- +-#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHSTRINGDEDUPTHREAD_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahStringDedup.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahStringDedup.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahStringDedup.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahStringDedup.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,338 +0,0 @@ +-/* +- * Copyright (c) 2017, 2019, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +- +-#include "classfile/altHashing.hpp" +-#include "gc_implementation/shenandoah/shenandoahCollectionSet.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahMarkingContext.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp" +-#include "gc_implementation/shenandoah/shenandoahStrDedupQueue.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahStrDedupTable.hpp" +-#include "gc_implementation/shenandoah/shenandoahStrDedupThread.hpp" +-#include "gc_implementation/shenandoah/shenandoahStringDedup.hpp" +-#include "gc_implementation/shenandoah/shenandoahWorkGroup.hpp" +-#include "gc_implementation/shenandoah/shenandoahUtils.hpp" +-#include "runtime/os.hpp" +-#include "utilities/workgroup.hpp" +- +-ShenandoahStrDedupQueueSet* ShenandoahStringDedup::_queues = NULL; +-ShenandoahStrDedupTable* ShenandoahStringDedup::_table = NULL; +-ShenandoahStrDedupThread* ShenandoahStringDedup::_thread = NULL; +-ShenandoahStrDedupStats ShenandoahStringDedup::_stats; +-bool ShenandoahStringDedup::_enabled = false; +- +-void ShenandoahStringDedup::initialize() { +- if (UseStringDeduplication) { +- _queues = new ShenandoahStrDedupQueueSet(ShenandoahHeap::heap()->max_workers()); +- _table = new ShenandoahStrDedupTable(); +- _thread = new ShenandoahStrDedupThread(_queues); +- _enabled = true; +- } +-} +- +-/* Enqueue candidates for deduplication. +- * The method should only be called by GC worker threads, during concurrent marking phase. +- */ +-void ShenandoahStringDedup::enqueue_candidate(oop java_string, ShenandoahStrDedupQueue* q) { +- assert(Thread::current()->is_Worker_thread(), "Only be GC worker thread"); +- +- if (java_string->age() <= StringDeduplicationAgeThreshold) { +- const markOop mark = java_string->mark(); +- +- // Having/had displaced header, too risk to deal with them, skip +- if (mark == markOopDesc::INFLATING() || mark->has_displaced_mark_helper()) { +- return; +- } +- +- // Increase string age and enqueue it when it rearches age threshold +- markOop new_mark = mark->incr_age(); +- if (mark == java_string->cas_set_mark(new_mark, mark)) { +- if (mark->age() == StringDeduplicationAgeThreshold) { +- q->push(java_string); +- } +- } +- } +-} +- +-// Deduplicate a string, return true if it is deduplicated. +-bool ShenandoahStringDedup::deduplicate(oop java_string, bool update_counter) { +- assert(is_candidate(java_string), "Not a candidate"); +- assert(_table != NULL, "Shenandoah Dedup table not initialized"); +- bool deduped = _table->deduplicate(java_string); +- +- if (update_counter) { +- dedup_stats().atomic_inc_inspected(1); +- if (deduped) { +- dedup_stats().atomic_inc_skipped(1); +- } else { +- dedup_stats().atomic_inc_known(1); +- } +- } +- return deduped; +-} +- +-ShenandoahStrDedupQueue* ShenandoahStringDedup::queue(uint worker_id) { +- assert(_queues != NULL, "QueueSet not initialized"); +- return _queues->queue_at(worker_id); +-} +- +-void ShenandoahStringDedup::threads_do(ThreadClosure* tc) { +- assert(_thread != NULL, "Shenandoah Dedup Thread not initialized"); +- tc->do_thread(_thread); +-} +- +-void ShenandoahStringDedup::parallel_oops_do(ShenandoahPhaseTimings::Phase phase, OopClosure* cl, uint worker_id) { +- { +- ShenandoahWorkerTimingsTracker x(phase, ShenandoahPhaseTimings::StringDedupQueueRoots, worker_id); +- _queues->parallel_oops_do(cl); +- } +- +- { +- ShenandoahWorkerTimingsTracker x(phase, ShenandoahPhaseTimings::StringDedupTableRoots, worker_id); +- _table->parallel_oops_do(cl); +- } +- +- { +- ShenandoahWorkerTimingsTracker x(phase, ShenandoahPhaseTimings::StringDedupThreadRoots, worker_id); +- _thread->parallel_oops_do(cl); +- } +-} +- +-void ShenandoahStringDedup::oops_do_slow(OopClosure* cl) { +- _queues->oops_do_slow(cl); +- _table->oops_do_slow(cl); +- _thread->oops_do_slow(cl); +-} +- +-class ShenandoahStrDedupCleanupTask : public AbstractGangTask { +-private: +- ShenandoahStrDedupQueueSet* _queues; +- ShenandoahStrDedupThread* _thread; +- ShenandoahStrDedupTable** _table; +- ShenandoahStrDedupTable* _dest_table; +- +- ShenandoahStrDedupTableCleanupTask* _dedup_table_cleanup_task; +- +-public: +- ShenandoahStrDedupCleanupTask(ShenandoahStrDedupQueueSet* qset, +- ShenandoahStrDedupThread* thread, ShenandoahStrDedupTable** table) +- : AbstractGangTask("Shenandoah dedup cleanup task"), +- _queues(qset), _table(table), _thread(thread), _dest_table(NULL) { +- +- ShenandoahStrDedupTable* the_table = *table; +- bool rehash = the_table->need_rehash(); +- size_t table_size = the_table->size(); +- if (the_table->need_expand()) { +- table_size *= 2; +- table_size = MIN2(table_size, ShenandoahStrDedupTable::max_size()); +- } else if (the_table->need_shrink()) { +- table_size /= 2; +- table_size = MAX2(table_size, ShenandoahStrDedupTable::min_size()); +- } +- +- if (rehash) { +- _dest_table = new ShenandoahStrDedupTable(table_size, AltHashing::compute_seed()); +- _dedup_table_cleanup_task = new ShenandoahStrDedupTableRehashTask(the_table, _dest_table); +- ShenandoahStringDedup::dedup_stats().inc_table_rehashed(); +- } else if (the_table->need_expand()) { +- _dest_table = new ShenandoahStrDedupTable(table_size, the_table->hash_seed()); +- _dedup_table_cleanup_task = new ShenandoahStrDedupExpandTableTask(the_table, _dest_table); +- ShenandoahStringDedup::dedup_stats().inc_table_expanded(); +- } else if (the_table->need_shrink()) { +- _dest_table = new ShenandoahStrDedupTable(table_size, the_table->hash_seed()); +- _dedup_table_cleanup_task = new ShenandoahStrDedupShrinkTableTask(the_table, _dest_table); +- ShenandoahStringDedup::dedup_stats().inc_table_shrinked(); +- } else { +- _dedup_table_cleanup_task = new ShenandoahStrDedupTableUnlinkTask(the_table); +- } +- } +- +- ~ShenandoahStrDedupCleanupTask() { +- assert(_dedup_table_cleanup_task != NULL, "Should not be null"); +- delete _dedup_table_cleanup_task; +- +- // Install new table +- if (_dest_table != NULL) { +- delete *_table; +- *_table = _dest_table; +- } +- +- (*_table)->verify(); +- } +- +- void work(uint worker_id) { +- _queues->parallel_cleanup(); +- _thread->parallel_cleanup(); +- _dedup_table_cleanup_task->do_parallel_cleanup(); +- } +-}; +- +-void ShenandoahStringDedup::parallel_cleanup() { +- assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint"); +- log_debug(gc, stringdedup)("String dedup cleanup"); +- ShenandoahStringDedup::clear_claimed(); +- ShenandoahStrDedupCleanupTask task(_queues, _thread, &_table); +- ShenandoahHeap::heap()->workers()->run_task(&task); +-} +- +-void ShenandoahStringDedup::stop() { +- assert(ShenandoahStringDedup::is_enabled(), "Must be enabled"); +- assert(_thread != NULL, "Not dedup thread"); +- _thread->stop(); +-} +- +-void ShenandoahStringDedup::clear_claimed() { +- assert(is_enabled(), "Must be enabled"); +- _queues->clear_claimed(); +- _table->clear_claimed(); +- _thread->clear_claimed(); +-} +- +-void ShenandoahStringDedup::print_statistics(outputStream* out) { +- assert(is_enabled(), "Must be enabled"); +- +- out->print_cr("Shenandoah String Dedup Statistics:"); +- dedup_stats().print_statistics(out); +- _table->print_statistics(out); +-} +- +-ShenandoahStrDedupStats::ShenandoahStrDedupStats() : +- _inspected(0), _deduped(0), _skipped(0), _known(0), _idle(0), _exec(0), _block(0), +- _idle_elapsed(0), _exec_elapsed(0), _block_elapsed(0), +- _start_phase(0), _start_concurrent(0), _end_concurrent(0), +- _table_expanded_count(0), _table_shrinked_count(0), _table_rehashed_count(0) { +-} +- +-void ShenandoahStrDedupStats::atomic_inc_inspected(size_t count) { +- Atomic::add((jlong)count, (volatile jlong*)&_inspected); +-} +- +-void ShenandoahStrDedupStats::atomic_inc_skipped(size_t count) { +- Atomic::add((jlong)count, (volatile jlong*)&_skipped); +-} +- +-void ShenandoahStrDedupStats::atomic_inc_deduped(size_t count) { +- Atomic::add((jlong)count, (volatile jlong*)&_deduped); +-} +- +-void ShenandoahStrDedupStats::atomic_inc_known(size_t count) { +- Atomic::add((jlong)count, (volatile jlong*)&_known); +-} +- +-void ShenandoahStrDedupStats::mark_idle() { +- assert_thread(); +- _start_phase = os::elapsedTime(); +- _idle++; +-} +- +-void ShenandoahStrDedupStats::mark_exec() { +- assert_thread(); +- double now = os::elapsedTime(); +- _idle_elapsed = now - _start_phase; +- _start_phase = now; +- _start_concurrent = now; +- _exec++; +-} +- +-void ShenandoahStrDedupStats::mark_block() { +- assert_thread(); +- double now = os::elapsedTime(); +- _exec_elapsed += now - _start_phase; +- _start_phase = now; +- _block++; +-} +- +-void ShenandoahStrDedupStats::mark_unblock() { +- assert_thread(); +- double now = os::elapsedTime(); +- _block_elapsed += now - _start_phase; +- _start_phase = now; +-} +- +-void ShenandoahStrDedupStats::mark_done() { +- assert_thread(); +- double now = os::elapsedTime(); +- _exec_elapsed += now - _start_phase; +- _end_concurrent = now; +-} +- +-void ShenandoahStrDedupStats::update(const ShenandoahStrDedupStats& sts) { +- assert_thread(); +- // Counters +- atomic_inc_inspected(sts._inspected); +- atomic_inc_deduped(sts._deduped); +- atomic_inc_skipped(sts._skipped); +- atomic_inc_known(sts._known); +- +- _idle += sts._idle; +- _exec += sts._exec; +- _block += sts._block; +- +- // Time spent by the deduplication thread in different phases +- _idle_elapsed += sts._idle_elapsed; +- _exec_elapsed += sts._exec_elapsed; +- _block_elapsed += sts._block_elapsed; +-} +- +-void ShenandoahStrDedupStats::inc_table_expanded() { +- assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint"); +- assert(Thread::current() == VMThread::vm_thread(), "Only by VM thread"); +- _table_expanded_count ++; +-} +- +-void ShenandoahStrDedupStats::inc_table_shrinked() { +- assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint"); +- assert(Thread::current() == VMThread::vm_thread(), "Only by VM thread"); +- _table_shrinked_count ++; +-} +- +-void ShenandoahStrDedupStats::inc_table_rehashed() { +- assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint"); +- assert(Thread::current() == VMThread::vm_thread(), "Only by VM thread"); +- _table_rehashed_count ++; +-} +- +-void ShenandoahStrDedupStats::print_statistics(outputStream* out) const { +- out->print_cr(" Inspected: " SIZE_FORMAT_W(12), _inspected); +- out->print_cr(" Skipped: " SIZE_FORMAT_W(12), _skipped); +- out->print_cr(" Deduped: " SIZE_FORMAT_W(12), _deduped); +- out->print_cr(" Known: " SIZE_FORMAT_W(12), _known); +- out->cr(); +- out->print_cr(" Idle: " STRDEDUP_TIME_FORMAT_MS " Exec: " STRDEDUP_TIME_FORMAT_MS " Block: " STRDEDUP_TIME_FORMAT_MS, +- STRDEDUP_TIME_PARAM_MS(_idle_elapsed), STRDEDUP_TIME_PARAM_MS(_exec_elapsed), STRDEDUP_TIME_PARAM_MS(_block_elapsed)); +- if (_table_expanded_count != 0 || _table_shrinked_count != 0 || _table_rehashed_count != 0) { +- out->print_cr(" Table expanded: " SIZE_FORMAT " shrinked: " SIZE_FORMAT " rehashed: " SIZE_FORMAT, +- _table_expanded_count, _table_shrinked_count, _table_rehashed_count); +- } +- +- out->cr(); +-} +- +-#ifdef ASSERT +-void ShenandoahStrDedupStats::assert_thread() { +- assert(Thread::current() == ShenandoahStringDedup::_thread, "Can only be done by dedup thread"); +-} +- +-#endif +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahStringDedup.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahStringDedup.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahStringDedup.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahStringDedup.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,150 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHSTRINGDEDUP_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHSTRINGDEDUP_HPP +- +-#include "classfile/javaClasses.hpp" +-#include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp" +-#include "memory/iterator.hpp" +-#include "utilities/ostream.hpp" +- +-#define STRDEDUP_TIME_FORMAT_MS "%.3fms" +-#define STRDEDUP_TIME_PARAM_MS(time) ((time) * MILLIUNITS) +- +-class ShenandoahStrDedupStats VALUE_OBJ_CLASS_SPEC { +-private: +- // Counters +- volatile size_t _inspected; +- volatile size_t _deduped; +- volatile size_t _skipped; +- volatile size_t _known; +- +- size_t _idle; +- size_t _exec; +- size_t _block; +- +- // Time spent by the deduplication thread in different phases +- double _start_concurrent; +- double _end_concurrent; +- double _start_phase; +- double _idle_elapsed; +- double _exec_elapsed; +- double _block_elapsed; +- +- size_t _table_expanded_count; +- size_t _table_shrinked_count; +- size_t _table_rehashed_count; +- +-public: +- ShenandoahStrDedupStats(); +- +- void inc_inspected() { assert_thread(); _inspected ++; } +- void inc_skipped() { assert_thread(); _skipped ++; } +- void inc_known() { assert_thread(); _known ++; } +- void inc_deduped() { +- assert_thread(); +- _deduped ++; +- } +- +- void atomic_inc_inspected(size_t count); +- void atomic_inc_deduped(size_t count); +- void atomic_inc_skipped(size_t count); +- void atomic_inc_known(size_t count); +- +- void mark_idle(); +- void mark_exec(); +- void mark_block(); +- void mark_unblock(); +- void mark_done(); +- +- void inc_table_expanded(); +- void inc_table_shrinked(); +- void inc_table_rehashed(); +- +- void update(const ShenandoahStrDedupStats& sts); +- +- void print_statistics(outputStream* out) const; +- +-private: +- void assert_thread() PRODUCT_RETURN; +-}; +- +-class ShenandoahStrDedupQueue; +-class ShenandoahStrDedupQueueSet; +-class ShenandoahStrDedupTable; +-class ShenandoahStrDedupThread; +- +-class ShenandoahStringDedup : AllStatic { +- friend class ShenandoahStrDedupStats; +- +-private: +- static ShenandoahStrDedupQueueSet* _queues; +- static ShenandoahStrDedupTable* _table; +- static ShenandoahStrDedupThread* _thread; +- static bool _enabled; +- static ShenandoahStrDedupStats _stats; +- +-public: +- // Initialize string deduplication. +- static void initialize(); +- +- static bool is_enabled() { return _enabled; } +- +- // Enqueue a string to worker's local string dedup queue +- static void enqueue_candidate(oop java_string, ShenandoahStrDedupQueue* q); +- +- // Get string dedup queue associated to specific worker id +- static ShenandoahStrDedupQueue* queue(uint worker_id); +- +- // Deduplicate a string, the call is lock-free +- static bool deduplicate(oop java_string, bool update_counter = true); +- +- // Parallel scan string dedup queues/table +- static void clear_claimed(); +- +- static void parallel_oops_do(ShenandoahPhaseTimings::Phase phase, OopClosure* cl, uint worker_id); +- +- // For verification only +- static void oops_do_slow(OopClosure* cl); +- +- static void threads_do(ThreadClosure* tc); +- +- static void print_worker_threads_on(outputStream* out) { } +- +- static ShenandoahStrDedupStats& dedup_stats() { return _stats; } +- +- // Parallel cleanup string dedup queues/table +- static void parallel_cleanup(); +- +- static void stop(); +- +- static inline bool is_candidate(oop obj) { +- return java_lang_String::is_instance(obj) && +- java_lang_String::value(obj) != NULL; +- } +- +- static void print_statistics(outputStream* out); +-}; +- +-#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHSTRINGDEDUP_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahSynchronizerIterator.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahSynchronizerIterator.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahSynchronizerIterator.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahSynchronizerIterator.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,79 +0,0 @@ +-/* +- * Copyright (c) 2019, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +- +-#include "gc_implementation/shenandoah/shenandoahSynchronizerIterator.hpp" +-#include "runtime/atomic.hpp" +-#include "runtime/thread.hpp" +-#include "runtime/objectMonitor.hpp" +-#include "runtime/objectMonitor.inline.hpp" +-#include "runtime/synchronizer.hpp" +-#include "runtime/safepoint.hpp" +- +-#define CHAINMARKER (cast_to_oop(-1)) +- +-// ParallelObjectSynchronizerIterator implementation +-ShenandoahSynchronizerIterator::ShenandoahSynchronizerIterator() +- : _cur(ObjectSynchronizer::gBlockList) { +- assert(SafepointSynchronize::is_at_safepoint(), "Must at safepoint"); +-} +- +-// Get the next block in the block list. +-static inline ObjectMonitor* next(ObjectMonitor* block) { +- assert(block->object() == CHAINMARKER, "must be a block header"); +- block = block->FreeNext ; +- assert(block == NULL || block->object() == CHAINMARKER, "must be a block header"); +- return block; +-} +- +-ObjectMonitor* ShenandoahSynchronizerIterator::claim() { +- ObjectMonitor* my_cur = _cur; +- +- while (true) { +- if (my_cur == NULL) return NULL; +- ObjectMonitor* next_block = next(my_cur); +- ObjectMonitor* cas_result = (ObjectMonitor*) Atomic::cmpxchg_ptr(next_block, &_cur, my_cur); +- if (my_cur == cas_result) { +- // We succeeded. +- return my_cur; +- } else { +- // We failed. Retry with offending CAS result. +- my_cur = cas_result; +- } +- } +-} +- +-bool ShenandoahSynchronizerIterator::parallel_oops_do(OopClosure* f) { +- ObjectMonitor* block = claim(); +- if (block != NULL) { +- for (int i = 1; i < ObjectSynchronizer::_BLOCKSIZE; i++) { +- ObjectMonitor* mid = &block[i]; +- if (mid->object() != NULL) { +- f->do_oop((oop*) mid->object_addr()); +- } +- } +- return true; +- } +- return false; +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahSynchronizerIterator.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahSynchronizerIterator.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahSynchronizerIterator.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahSynchronizerIterator.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,42 +0,0 @@ +-/* +- * Copyright (c) 2019, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHSYNCHRONIZERITERATOR_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHSYNCHRONIZERITERATOR_HPP +- +-#include "utilities/globalDefinitions.hpp" +- +-class ObjectMonitor; +-class OopClosure; +- +-class ShenandoahSynchronizerIterator VALUE_OBJ_CLASS_SPEC { +-private: +- ObjectMonitor* volatile _cur; +- ObjectMonitor* claim(); +- +-public: +- ShenandoahSynchronizerIterator(); +- bool parallel_oops_do(OopClosure* f); +-}; +- +-#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHSYNCHRONIZERITERATOR_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahTaskqueue.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahTaskqueue.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahTaskqueue.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahTaskqueue.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,227 +0,0 @@ +-/* +- * Copyright (c) 2016, 2019, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +- +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahLogging.hpp" +-#include "gc_implementation/shenandoah/shenandoahTaskqueue.inline.hpp" +- +-void ShenandoahObjToScanQueueSet::clear() { +- uint size = GenericTaskQueueSet::size(); +- for (uint index = 0; index < size; index ++) { +- ShenandoahObjToScanQueue* q = queue(index); +- assert(q != NULL, "Sanity"); +- q->clear(); +- } +-} +- +-bool ShenandoahObjToScanQueueSet::is_empty() { +- uint size = GenericTaskQueueSet::size(); +- for (uint index = 0; index < size; index ++) { +- ShenandoahObjToScanQueue* q = queue(index); +- assert(q != NULL, "Sanity"); +- if (!q->is_empty()) { +- return false; +- } +- } +- return true; +-} +- +-bool ShenandoahTaskTerminator::offer_termination(ShenandoahTerminatorTerminator* terminator) { +- assert(_n_threads > 0, "Initialization is incorrect"); +- assert(_offered_termination < _n_threads, "Invariant"); +- assert(_blocker != NULL, "Invariant"); +- +- // single worker, done +- if (_n_threads == 1) { +- return true; +- } +- +- _blocker->lock_without_safepoint_check(); +- // all arrived, done +- if (++ _offered_termination == _n_threads) { +- _blocker->notify_all(); +- _blocker->unlock(); +- return true; +- } +- +- Thread* the_thread = Thread::current(); +- while (true) { +- if (_spin_master == NULL) { +- _spin_master = the_thread; +- +- _blocker->unlock(); +- +- if (do_spin_master_work(terminator)) { +- assert(_offered_termination == _n_threads, "termination condition"); +- return true; +- } else { +- _blocker->lock_without_safepoint_check(); +- } +- } else { +- _blocker->wait(true, WorkStealingSleepMillis); +- +- if (_offered_termination == _n_threads) { +- _blocker->unlock(); +- return true; +- } +- } +- +- if (peek_in_queue_set() || (terminator != NULL && terminator->should_exit_termination())) { +- _offered_termination --; +- _blocker->unlock(); +- return false; +- } +- } +-} +- +-#if TASKQUEUE_STATS +-void ShenandoahObjToScanQueueSet::print_taskqueue_stats_hdr(outputStream* const st) { +- st->print_raw_cr("GC Task Stats"); +- st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr(); +- st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr(); +-} +- +-void ShenandoahObjToScanQueueSet::print_taskqueue_stats() { +- if (! ShenandoahLogTrace) { +- return; +- } +- ResourceMark rm; +- outputStream* st = gclog_or_tty; +- print_taskqueue_stats_hdr(st); +- +- TaskQueueStats totals; +- const uint n = size(); +- for (uint i = 0; i < n; ++i) { +- st->print(UINT32_FORMAT_W(3), i); +- queue(i)->stats.print(st); +- st->cr(); +- totals += queue(i)->stats; +- } +- st->print("tot "); totals.print(st); st->cr(); +- DEBUG_ONLY(totals.verify()); +-} +- +-void ShenandoahObjToScanQueueSet::reset_taskqueue_stats() { +- const uint n = size(); +- for (uint i = 0; i < n; ++i) { +- queue(i)->stats.reset(); +- } +-} +-#endif // TASKQUEUE_STATS +- +-bool ShenandoahTaskTerminator::do_spin_master_work(ShenandoahTerminatorTerminator* terminator) { +- uint yield_count = 0; +- // Number of hard spin loops done since last yield +- uint hard_spin_count = 0; +- // Number of iterations in the hard spin loop. +- uint hard_spin_limit = WorkStealingHardSpins; +- +- // If WorkStealingSpinToYieldRatio is 0, no hard spinning is done. +- // If it is greater than 0, then start with a small number +- // of spins and increase number with each turn at spinning until +- // the count of hard spins exceeds WorkStealingSpinToYieldRatio. +- // Then do a yield() call and start spinning afresh. +- if (WorkStealingSpinToYieldRatio > 0) { +- hard_spin_limit = WorkStealingHardSpins >> WorkStealingSpinToYieldRatio; +- hard_spin_limit = MAX2(hard_spin_limit, 1U); +- } +- // Remember the initial spin limit. +- uint hard_spin_start = hard_spin_limit; +- +- // Loop waiting for all threads to offer termination or +- // more work. +- while (true) { +- // Look for more work. +- // Periodically sleep() instead of yield() to give threads +- // waiting on the cores the chance to grab this code +- if (yield_count <= WorkStealingYieldsBeforeSleep) { +- // Do a yield or hardspin. For purposes of deciding whether +- // to sleep, count this as a yield. +- yield_count++; +- +- // Periodically call yield() instead spinning +- // After WorkStealingSpinToYieldRatio spins, do a yield() call +- // and reset the counts and starting limit. +- if (hard_spin_count > WorkStealingSpinToYieldRatio) { +- yield(); +- hard_spin_count = 0; +- hard_spin_limit = hard_spin_start; +-#ifdef TRACESPINNING +- _total_yields++; +-#endif +- } else { +- // Hard spin this time +- // Increase the hard spinning period but only up to a limit. +- hard_spin_limit = MIN2(2*hard_spin_limit, +- (uint) WorkStealingHardSpins); +- for (uint j = 0; j < hard_spin_limit; j++) { +- SpinPause(); +- } +- hard_spin_count++; +-#ifdef TRACESPINNING +- _total_spins++; +-#endif +- } +- } else { +- log_develop_trace(gc, task)("ShenanddoahTaskTerminator::do_spin_master_work() thread " PTR_FORMAT " sleeps after %u yields", +- p2i(Thread::current()), yield_count); +- yield_count = 0; +- +- MonitorLockerEx locker(_blocker, Mutex::_no_safepoint_check_flag); // no safepoint check +- _spin_master = NULL; +- locker.wait(Mutex::_no_safepoint_check_flag, WorkStealingSleepMillis); +- if (_spin_master == NULL) { +- _spin_master = Thread::current(); +- } else { +- return false; +- } +- } +- +-#ifdef TRACESPINNING +- _total_peeks++; +-#endif +- size_t tasks = tasks_in_queue_set(); +- if (tasks > 0 || (terminator != NULL && terminator->should_exit_termination())) { +- MonitorLockerEx locker(_blocker, Mutex::_no_safepoint_check_flag); // no safepoint check +- +- if ((int) tasks >= _offered_termination - 1) { +- locker.notify_all(); +- } else { +- for (; tasks > 1; tasks --) { +- locker.notify(); +- } +- } +- _spin_master = NULL; +- return false; +- } else if (_offered_termination == _n_threads) { +- return true; +- } +- } +-} +- +-bool ShenandoahTerminatorTerminator::should_exit_termination() { +- return _heap->cancelled_gc(); +-} +- +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahTaskqueue.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahTaskqueue.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahTaskqueue.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahTaskqueue.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,420 +0,0 @@ +-/* +- * Copyright (c) 2016, 2019, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHTASKQUEUE_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHTASKQUEUE_HPP +- +-#include "gc_implementation/shenandoah/shenandoahPadding.hpp" +-#include "memory/padded.hpp" +-#include "utilities/taskqueue.hpp" +-#include "runtime/mutex.hpp" +-#include "utilities/debug.hpp" +- +-class ShenandoahHeap; +-class Thread; +- +-template +-class BufferedOverflowTaskQueue: public OverflowTaskQueue +-{ +-public: +- typedef OverflowTaskQueue taskqueue_t; +- +- BufferedOverflowTaskQueue() : _buf_empty(true) {}; +- +- TASKQUEUE_STATS_ONLY(using taskqueue_t::stats;) +- +- // Push task t into the queue. Returns true on success. +- inline bool push(E t); +- +- // Attempt to pop from the queue. Returns true on success. +- inline bool pop(E &t); +- +- inline void clear(); +- +- inline bool is_empty() const { +- return _buf_empty && taskqueue_t::is_empty(); +- } +- +-private: +- bool _buf_empty; +- E _elem; +-}; +- +-#ifdef _MSC_VER +-#pragma warning(push) +-// warning C4522: multiple assignment operators specified +-#pragma warning(disable:4522) +-#endif +- +-// ShenandoahMarkTask +-// +-// Encodes both regular oops, and the array oops plus chunking data for parallel array processing. +-// The design goal is to make the regular oop ops very fast, because that would be the prevailing +-// case. On the other hand, it should not block parallel array processing from efficiently dividing +-// the array work. +-// +-// The idea is to steal the bits from the 64-bit oop to encode array data, if needed. For the +-// proper divide-and-conquer strategies, we want to encode the "blocking" data. It turns out, the +-// most efficient way to do this is to encode the array block as (chunk * 2^pow), where it is assumed +-// that the block has the size of 2^pow. This requires for pow to have only 5 bits (2^32) to encode +-// all possible arrays. +-// +-// |---------oop---------|-pow-|--chunk---| +-// 0 49 54 64 +-// +-// By definition, chunk == 0 means "no chunk", i.e. chunking starts from 1. +-// +-// This encoding gives a few interesting benefits: +-// +-// a) Encoding/decoding regular oops is very simple, because the upper bits are zero in that task: +-// +-// |---------oop---------|00000|0000000000| // no chunk data +-// +-// This helps the most ubiquitous path. The initialization amounts to putting the oop into the word +-// with zero padding. Testing for "chunkedness" is testing for zero with chunk mask. +-// +-// b) Splitting tasks for divide-and-conquer is possible. Suppose we have chunk that covers +-// interval [ (C-1)*2^P; C*2^P ). We can then split it into two chunks: +-// <2*C - 1, P-1>, that covers interval [ (2*C - 2)*2^(P-1); (2*C - 1)*2^(P-1) ) +-// <2*C, P-1>, that covers interval [ (2*C - 1)*2^(P-1); 2*C*2^(P-1) ) +-// +-// Observe that the union of these two intervals is: +-// [ (2*C - 2)*2^(P-1); 2*C*2^(P-1) ) +-// +-// ...which is the original interval: +-// [ (C-1)*2^P; C*2^P ) +-// +-// c) The divide-and-conquer strategy could even start with chunk <1, round-log2-len(arr)>, and split +-// down in the parallel threads, which alleviates the upfront (serial) splitting costs. +-// +-// Encoding limitations caused by current bitscales mean: +-// 10 bits for chunk: max 1024 blocks per array +-// 5 bits for power: max 2^32 array +-// 49 bits for oop: max 512 TB of addressable space +-// +-// Stealing bits from oop trims down the addressable space. Stealing too few bits for chunk ID limits +-// potential parallelism. Stealing too few bits for pow limits the maximum array size that can be handled. +-// In future, these might be rebalanced to favor one degree of freedom against another. For example, +-// if/when Arrays 2.0 bring 2^64-sized arrays, we might need to steal another bit for power. We could regain +-// some bits back if chunks are counted in ObjArrayMarkingStride units. +-// +-// There is also a fallback version that uses plain fields, when we don't have enough space to steal the +-// bits from the native pointer. It is useful to debug the optimized version. +-// +- +-#ifdef _MSC_VER +-#pragma warning(push) +-// warning C4522: multiple assignment operators specified +-#pragma warning( disable:4522 ) +-#endif +- +-#ifdef _LP64 +-#define SHENANDOAH_OPTIMIZED_MARKTASK 1 +-#else +-#define SHENANDOAH_OPTIMIZED_MARKTASK 0 +-#endif +- +-#if SHENANDOAH_OPTIMIZED_MARKTASK +-class ShenandoahMarkTask +-{ +-private: +- // Everything is encoded into this field... +- uintptr_t _obj; +- +- // ...with these: +- static const uint8_t chunk_bits = 10; +- static const uint8_t pow_bits = 5; +- static const uint8_t oop_bits = sizeof(uintptr_t)*8 - chunk_bits - pow_bits; +- +- static const uint8_t oop_shift = 0; +- static const uint8_t pow_shift = oop_bits; +- static const uint8_t chunk_shift = oop_bits + pow_bits; +- +- static const uintptr_t oop_extract_mask = right_n_bits(oop_bits); +- static const uintptr_t chunk_pow_extract_mask = ~right_n_bits(oop_bits); +- +- static const int chunk_range_mask = right_n_bits(chunk_bits); +- static const int pow_range_mask = right_n_bits(pow_bits); +- +- inline oop decode_oop(uintptr_t val) const { +- STATIC_ASSERT(oop_shift == 0); +- return cast_to_oop(val & oop_extract_mask); +- } +- +- inline bool decode_not_chunked(uintptr_t val) const { +- // No need to shift for a comparison to zero +- return (val & chunk_pow_extract_mask) == 0; +- } +- +- inline int decode_chunk(uintptr_t val) const { +- return (int) ((val >> chunk_shift) & chunk_range_mask); +- } +- +- inline int decode_pow(uintptr_t val) const { +- return (int) ((val >> pow_shift) & pow_range_mask); +- } +- +- inline uintptr_t encode_oop(oop obj) const { +- STATIC_ASSERT(oop_shift == 0); +- return cast_from_oop(obj); +- } +- +- inline uintptr_t encode_chunk(int chunk) const { +- return ((uintptr_t) chunk) << chunk_shift; +- } +- +- inline uintptr_t encode_pow(int pow) const { +- return ((uintptr_t) pow) << pow_shift; +- } +- +-public: +- ShenandoahMarkTask(oop o = NULL) { +- uintptr_t enc = encode_oop(o); +- assert(decode_oop(enc) == o, err_msg("oop encoding should work: " PTR_FORMAT, p2i(o))); +- assert(decode_not_chunked(enc), "task should not be chunked"); +- _obj = enc; +- } +- +- ShenandoahMarkTask(oop o, int chunk, int pow) { +- uintptr_t enc_oop = encode_oop(o); +- uintptr_t enc_chunk = encode_chunk(chunk); +- uintptr_t enc_pow = encode_pow(pow); +- uintptr_t enc = enc_oop | enc_chunk | enc_pow; +- assert(decode_oop(enc) == o, err_msg("oop encoding should work: " PTR_FORMAT, p2i(o))); +- assert(decode_chunk(enc) == chunk, err_msg("chunk encoding should work: %d", chunk)); +- assert(decode_pow(enc) == pow, err_msg("pow encoding should work: %d", pow)); +- assert(!decode_not_chunked(enc), "task should be chunked"); +- _obj = enc; +- } +- +- ShenandoahMarkTask(const ShenandoahMarkTask& t): _obj(t._obj) { } +- +- ShenandoahMarkTask& operator =(const ShenandoahMarkTask& t) { +- _obj = t._obj; +- return *this; +- } +- +- volatile ShenandoahMarkTask& +- operator =(const volatile ShenandoahMarkTask& t) volatile { +- (void) const_cast(_obj = t._obj); +- return *this; +- } +- +-public: +- inline oop obj() const { return decode_oop(_obj); } +- inline int chunk() const { return decode_chunk(_obj); } +- inline int pow() const { return decode_pow(_obj); } +- +- inline bool is_not_chunked() const { return decode_not_chunked(_obj); } +- +- DEBUG_ONLY(bool is_valid() const;) // Tasks to be pushed/popped must be valid. +- +- static uintptr_t max_addressable() { +- return nth_bit(oop_bits); +- } +- +- static int chunk_size() { +- return nth_bit(chunk_bits); +- } +-}; +-#else +-class ShenandoahMarkTask +-{ +-private: +- static const uint8_t chunk_bits = 10; +- static const uint8_t pow_bits = 5; +- +- static const int chunk_max = nth_bit(chunk_bits) - 1; +- static const int pow_max = nth_bit(pow_bits) - 1; +- +- oop _obj; +- int _chunk; +- int _pow; +- +-public: +- ShenandoahMarkTask(oop o = NULL, int chunk = 0, int pow = 0): +- _obj(o), _chunk(chunk), _pow(pow) { +- assert(0 <= chunk && chunk < chunk_max, err_msg("chunk is sane: %d", chunk)); +- assert(0 <= pow && pow < pow_max, err_msg("pow is sane: %d", pow)); +- } +- +- ShenandoahMarkTask(const ShenandoahMarkTask& t): _obj(t._obj), _chunk(t._chunk), _pow(t._pow) { } +- +- ShenandoahMarkTask& operator =(const ShenandoahMarkTask& t) { +- _obj = t._obj; +- _chunk = t._chunk; +- _pow = t._pow; +- return *this; +- } +- +- volatile ShenandoahMarkTask& +- operator =(const volatile ShenandoahMarkTask& t) volatile { +- (void)const_cast(_obj = t._obj); +- _chunk = t._chunk; +- _pow = t._pow; +- return *this; +- } +- +- inline oop obj() const { return _obj; } +- inline int chunk() const { return _chunk; } +- inline int pow() const { return _pow; } +- inline bool is_not_chunked() const { return _chunk == 0; } +- +- DEBUG_ONLY(bool is_valid() const;) // Tasks to be pushed/popped must be valid. +- +- static size_t max_addressable() { +- return sizeof(oop); +- } +- +- static int chunk_size() { +- return nth_bit(chunk_bits); +- } +-}; +-#endif // SHENANDOAH_OPTIMIZED_MARKTASK +- +-#ifdef _MSC_VER +-#pragma warning(pop) +-#endif +- +-typedef BufferedOverflowTaskQueue ShenandoahBufferedOverflowTaskQueue; +-typedef Padded ShenandoahObjToScanQueue; +- +-template +-class ParallelClaimableQueueSet: public GenericTaskQueueSet { +-private: +- shenandoah_padding(0); +- volatile jint _claimed_index; +- shenandoah_padding(1); +- +- debug_only(uint _reserved; ) +- +-public: +- using GenericTaskQueueSet::size; +- +-public: +- ParallelClaimableQueueSet(int n) : GenericTaskQueueSet(n), _claimed_index(0) { +- debug_only(_reserved = 0; ) +- } +- +- void clear_claimed() { _claimed_index = 0; } +- T* claim_next(); +- +- // reserve queues that not for parallel claiming +- void reserve(uint n) { +- assert(n <= size(), "Sanity"); +- _claimed_index = (jint)n; +- debug_only(_reserved = n;) +- } +- +- debug_only(uint get_reserved() const { return (uint)_reserved; }) +-}; +- +-template +-T* ParallelClaimableQueueSet::claim_next() { +- jint size = (jint)GenericTaskQueueSet::size(); +- +- if (_claimed_index >= size) { +- return NULL; +- } +- +- jint index = Atomic::add(1, &_claimed_index); +- +- if (index <= size) { +- return GenericTaskQueueSet::queue((uint)index - 1); +- } else { +- return NULL; +- } +-} +- +-class ShenandoahObjToScanQueueSet: public ParallelClaimableQueueSet { +-public: +- ShenandoahObjToScanQueueSet(int n) : ParallelClaimableQueueSet(n) {} +- +- bool is_empty(); +- void clear(); +- +-#if TASKQUEUE_STATS +- static void print_taskqueue_stats_hdr(outputStream* const st); +- void print_taskqueue_stats(); +- void reset_taskqueue_stats(); +-#endif // TASKQUEUE_STATS +-}; +- +-class ShenandoahTerminatorTerminator : public TerminatorTerminator { +-private: +- ShenandoahHeap* const _heap; +-public: +- ShenandoahTerminatorTerminator(ShenandoahHeap* const heap) : _heap(heap) { } +- // return true, terminates immediately, even if there's remaining work left +- virtual bool should_exit_termination(); +-}; +- +-/* +- * This is an enhanced implementation of Google's work stealing +- * protocol, which is described in the paper: +- * Understanding and improving JVM GC work stealing at the data center scale +- * (http://dl.acm.org/citation.cfm?id=2926706) +- * +- * Instead of a dedicated spin-master, our implementation will let spin-master to relinquish +- * the role before it goes to sleep/wait, so allows newly arrived thread to compete for the role. +- * The intention of above enhancement, is to reduce spin-master's latency on detecting new tasks +- * for stealing and termination condition. +- */ +- +-class ShenandoahTaskTerminator: public ParallelTaskTerminator { +-private: +- Monitor* _blocker; +- Thread* _spin_master; +- +-public: +- ShenandoahTaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set) : +- ParallelTaskTerminator(n_threads, queue_set), _spin_master(NULL) { +- _blocker = new Monitor(Mutex::leaf, "ShenandoahTaskTerminator", false); +- } +- +- ~ShenandoahTaskTerminator() { +- assert(_blocker != NULL, "Can not be NULL"); +- delete _blocker; +- } +- +- bool offer_termination(ShenandoahTerminatorTerminator* terminator); +- bool offer_termination() { return offer_termination((ShenandoahTerminatorTerminator*)NULL); } +- +-private: +- bool offer_termination(TerminatorTerminator* terminator) { +- ShouldNotReachHere(); +- return false; +- } +- +-private: +- size_t tasks_in_queue_set() { return _queue_set->tasks(); } +- +- /* +- * Perform spin-master task. +- * return true if termination condition is detected +- * otherwise, return false +- */ +- bool do_spin_master_work(ShenandoahTerminatorTerminator* terminator); +-}; +- +-#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHTASKQUEUE_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahTaskqueue.inline.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahTaskqueue.inline.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahTaskqueue.inline.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahTaskqueue.inline.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,66 +0,0 @@ +-/* +- * Copyright (c) 2016, 2019, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHTASKQUEUE_INLINE_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHTASKQUEUE_INLINE_HPP +- +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "utilities/stack.inline.hpp" +- +-template +-bool BufferedOverflowTaskQueue::pop(E &t) { +- if (!_buf_empty) { +- t = _elem; +- _buf_empty = true; +- return true; +- } +- +- if (taskqueue_t::pop_local(t)) { +- return true; +- } +- +- return taskqueue_t::pop_overflow(t); +-} +- +-template +-inline bool BufferedOverflowTaskQueue::push(E t) { +- if (_buf_empty) { +- _elem = t; +- _buf_empty = false; +- } else { +- bool pushed = taskqueue_t::push(_elem); +- assert(pushed, "overflow queue should always succeed pushing"); +- _elem = t; +- } +- return true; +-} +- +-template +-void BufferedOverflowTaskQueue::clear() { +- _buf_empty = true; +- taskqueue_t::set_empty(); +- taskqueue_t::overflow_stack()->clear(); +-} +- +- +-#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHTASKQUEUE_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahUtils.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahUtils.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahUtils.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahUtils.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,154 +0,0 @@ +-/* +- * Copyright (c) 2017, 2019, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +- +-#include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp" +-#include "gc_implementation/shenandoah/shenandoahMarkCompact.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahUtils.hpp" +-#include "gc_implementation/shenandoah/shenandoahLogging.hpp" +-#include "gc_implementation/shenandoah/heuristics/shenandoahHeuristics.hpp" +-#include "gc_interface/gcCause.hpp" +-#include "gc_implementation/shared/gcTimer.hpp" +-#include "gc_implementation/shared/gcWhen.hpp" +-#include "gc_implementation/shared/gcTrace.hpp" +-#include "utilities/debug.hpp" +- +-ShenandoahPhaseTimings::Phase ShenandoahGCPhase::_current_phase = ShenandoahPhaseTimings::_invalid_phase; +- +-ShenandoahGCSession::ShenandoahGCSession(GCCause::Cause cause) : +- _heap(ShenandoahHeap::heap()), +- _timer(ShenandoahHeap::heap()->gc_timer()), +- _tracer(ShenandoahHeap::heap()->tracer()) { +- +- assert(!ShenandoahGCPhase::is_current_phase_valid(), "No current GC phase"); +- +- _heap->set_gc_cause(cause); +- _timer->register_gc_start(); +- _tracer->report_gc_start(cause, _timer->gc_start()); +- _heap->trace_heap(GCWhen::BeforeGC, _tracer); +- +- _heap->shenandoah_policy()->record_cycle_start(); +- _heap->heuristics()->record_cycle_start(); +- _trace_cycle.initialize(false, cause, +- /* allMemoryPoolsAffected */ true, +- /* recordGCBeginTime = */ true, +- /* recordPreGCUsage = */ true, +- /* recordPeakUsage = */ true, +- /* recordPostGCUsage = */ true, +- /* recordAccumulatedGCTime = */ true, +- /* recordGCEndTime = */ true, +- /* countCollection = */ true +- ); +-} +- +-ShenandoahGCSession::~ShenandoahGCSession() { +- ShenandoahHeap::heap()->heuristics()->record_cycle_end(); +- _tracer->report_gc_end(_timer->gc_end(), _timer->time_partitions()); +- _timer->register_gc_end(); +- +- assert(!ShenandoahGCPhase::is_current_phase_valid(), "No current GC phase"); +- _heap->set_gc_cause(GCCause::_no_gc); +-} +- +-ShenandoahGCPauseMark::ShenandoahGCPauseMark(SvcGCMarker::reason_type type) : +- _heap(ShenandoahHeap::heap()), _svc_gc_mark(type), _is_gc_active_mark() { +- // FIXME: It seems that JMC throws away level 0 events, which are the Shenandoah +- // pause events. Create this pseudo level 0 event to push real events to level 1. +- _heap->gc_timer()->register_gc_phase_start("Shenandoah", Ticks::now()); +- _trace_pause.initialize(true, _heap->gc_cause(), +- /* allMemoryPoolsAffected */ true, +- /* recordGCBeginTime = */ true, +- /* recordPreGCUsage = */ false, +- /* recordPeakUsage = */ false, +- /* recordPostGCUsage = */ false, +- /* recordAccumulatedGCTime = */ true, +- /* recordGCEndTime = */ true, +- /* countCollection = */ true +- ); +-} +- +-ShenandoahGCPauseMark::~ShenandoahGCPauseMark() { +- _heap->gc_timer()->register_gc_phase_end(Ticks::now()); +-} +- +-ShenandoahGCPhase::ShenandoahGCPhase(const ShenandoahPhaseTimings::Phase phase) : +- _timings(ShenandoahHeap::heap()->phase_timings()), _phase(phase) { +- assert(Thread::current()->is_VM_thread() || +- Thread::current()->is_ConcurrentGC_thread(), +- "Must be set by these threads"); +- _parent_phase = _current_phase; +- _current_phase = phase; +- _start = os::elapsedTime(); +-} +- +-ShenandoahGCPhase::~ShenandoahGCPhase() { +- _timings->record_phase_time(_phase, os::elapsedTime() - _start); +- _current_phase = _parent_phase; +-} +- +-bool ShenandoahGCPhase::is_current_phase_valid() { +- return _current_phase < ShenandoahPhaseTimings::_num_phases; +-} +- +-ShenandoahGCWorkerPhase::ShenandoahGCWorkerPhase(const ShenandoahPhaseTimings::Phase phase) : +- _timings(ShenandoahHeap::heap()->phase_timings()), _phase(phase) { +- _timings->record_workers_start(_phase); +-} +- +-ShenandoahGCWorkerPhase::~ShenandoahGCWorkerPhase() { +- _timings->record_workers_end(_phase); +-} +- +-ShenandoahWorkerSession::ShenandoahWorkerSession(uint worker_id) : _worker_id(worker_id) { +- Thread* thr = Thread::current(); +- assert(thr->worker_id() == INVALID_WORKER_ID, "Already set"); +- thr->set_worker_id(worker_id); +-} +- +-ShenandoahConcurrentWorkerSession::~ShenandoahConcurrentWorkerSession() { +- // Do nothing. Per-worker events are not supported in this JDK. +-} +- +-ShenandoahParallelWorkerSession::~ShenandoahParallelWorkerSession() { +- // Do nothing. Per-worker events are not supported in this JDK. +-} +- +-ShenandoahWorkerSession::~ShenandoahWorkerSession() { +-#ifdef ASSERT +- Thread* thr = Thread::current(); +- assert(thr->worker_id() != INVALID_WORKER_ID, "Must be set"); +- thr->set_worker_id(INVALID_WORKER_ID); +-#endif +-} +- +-size_t ShenandoahUtils::round_up_power_of_2(size_t value) { +- assert(value != 0, "Invalid value"); +- +- if (is_power_of_2(value)) { +- return value; +- } +- +- return (size_t)1 << (log2_intptr(value) + 1); +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahUtils.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahUtils.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahUtils.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahUtils.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,154 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAHUTILS_HPP +-#define SHARE_VM_GC_SHENANDOAHUTILS_HPP +- +-#include "runtime/vmThread.hpp" +-#include "gc_interface/gcCause.hpp" +-#include "gc_implementation/shared/isGCActiveMark.hpp" +-#include "gc_implementation/shared/vmGCOperations.hpp" +-#include "memory/allocation.hpp" +-#include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp" +- +-class GCTimer; +-class GCTracer; +- +-class ShenandoahGCSession : public StackObj { +-private: +- ShenandoahHeap* const _heap; +- GCTimer* const _timer; +- GCTracer* const _tracer; +- +- TraceMemoryManagerStats _trace_cycle; +-public: +- ShenandoahGCSession(GCCause::Cause cause); +- ~ShenandoahGCSession(); +-}; +- +-class ShenandoahGCPhase : public StackObj { +-private: +- static ShenandoahPhaseTimings::Phase _current_phase; +- +- ShenandoahPhaseTimings* const _timings; +- const ShenandoahPhaseTimings::Phase _phase; +- ShenandoahPhaseTimings::Phase _parent_phase; +- double _start; +- +-public: +- ShenandoahGCPhase(ShenandoahPhaseTimings::Phase phase); +- ~ShenandoahGCPhase(); +- +- static ShenandoahPhaseTimings::Phase current_phase() { return _current_phase; } +- +- static bool is_current_phase_valid(); +-}; +- +-class ShenandoahGCWorkerPhase : public StackObj { +-private: +- ShenandoahPhaseTimings* const _timings; +- const ShenandoahPhaseTimings::Phase _phase; +-public: +- ShenandoahGCWorkerPhase(ShenandoahPhaseTimings::Phase phase); +- ~ShenandoahGCWorkerPhase(); +-}; +- +-// Aggregates all the things that should happen before/after the pause. +-class ShenandoahGCPauseMark : public StackObj { +-private: +- ShenandoahHeap* const _heap; +- const SvcGCMarker _svc_gc_mark; +- const IsGCActiveMark _is_gc_active_mark; +- TraceMemoryManagerStats _trace_pause; +-public: +- ShenandoahGCPauseMark(SvcGCMarker::reason_type type); +- ~ShenandoahGCPauseMark(); +-}; +- +-class ShenandoahSafepoint : public AllStatic { +-public: +- // Check if Shenandoah GC safepoint is in progress. This is nominally +- // equivalent to calling SafepointSynchronize::is_at_safepoint(), but +- // it also checks the Shenandoah specifics, when it can. +- static inline bool is_at_shenandoah_safepoint() { +- if (!SafepointSynchronize::is_at_safepoint()) return false; +- +- Thread* const thr = Thread::current(); +- // Shenandoah GC specific safepoints are scheduled by control thread. +- // So if we are enter here from control thread, then we are definitely not +- // at Shenandoah safepoint, but at something else. +- if (thr == ShenandoahHeap::heap()->control_thread()) return false; +- +- // This is not VM thread, cannot see what VM thread is doing, +- // so pretend this is a proper Shenandoah safepoint +- if (!thr->is_VM_thread()) return true; +- +- // Otherwise check we are at proper operation type +- VM_Operation* vm_op = VMThread::vm_operation(); +- if (vm_op == NULL) return false; +- +- VM_Operation::VMOp_Type type = vm_op->type(); +- return type == VM_Operation::VMOp_ShenandoahInitMark || +- type == VM_Operation::VMOp_ShenandoahFinalMarkStartEvac || +- type == VM_Operation::VMOp_ShenandoahInitUpdateRefs || +- type == VM_Operation::VMOp_ShenandoahFinalUpdateRefs || +- type == VM_Operation::VMOp_ShenandoahFullGC || +- type == VM_Operation::VMOp_ShenandoahDegeneratedGC; +- } +-}; +- +-class ShenandoahWorkerSession : public StackObj { +- static const uint INVALID_WORKER_ID = uint(-1); +-protected: +- uint _worker_id; +- +- ShenandoahWorkerSession(uint worker_id); +- ~ShenandoahWorkerSession(); +- +-public: +- static inline uint worker_id() { +- Thread* thr = Thread::current(); +- uint id = thr->worker_id(); +- assert(id != INVALID_WORKER_ID, "Worker session has not been created"); +- return id; +- } +-}; +- +-class ShenandoahConcurrentWorkerSession : public ShenandoahWorkerSession { +-public: +- ShenandoahConcurrentWorkerSession(uint worker_id) : ShenandoahWorkerSession(worker_id) { } +- ~ShenandoahConcurrentWorkerSession(); +-}; +- +-class ShenandoahParallelWorkerSession : public ShenandoahWorkerSession { +-public: +- ShenandoahParallelWorkerSession(uint worker_id) : ShenandoahWorkerSession(worker_id) { } +- ~ShenandoahParallelWorkerSession(); +-}; +- +-class ShenandoahUtils { +-public: +- static size_t round_up_power_of_2(size_t value); +-}; +- +-#endif // SHARE_VM_GC_SHENANDOAHUTILS_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahVerifier.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahVerifier.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahVerifier.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahVerifier.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,962 +0,0 @@ +-/* +- * Copyright (c) 2017, 2019, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +- +-#include "gc_implementation/shenandoah/shenandoahAsserts.hpp" +-#include "gc_implementation/shenandoah/shenandoahForwarding.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegion.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahRootProcessor.hpp" +-#include "gc_implementation/shenandoah/shenandoahUtils.hpp" +-#include "gc_implementation/shenandoah/shenandoahVerifier.hpp" +-#include "gc_implementation/shenandoah/shenandoahWorkGroup.hpp" +-#include "memory/allocation.hpp" +-#include "memory/resourceArea.hpp" +- +-// Avoid name collision on verify_oop (defined in macroAssembler_arm.hpp) +-#ifdef verify_oop +-#undef verify_oop +-#endif +- +-class ShenandoahVerifyOopClosure : public ExtendedOopClosure { +-private: +- const char* _phase; +- ShenandoahVerifier::VerifyOptions _options; +- ShenandoahVerifierStack* _stack; +- ShenandoahHeap* _heap; +- MarkBitMap* _map; +- ShenandoahLivenessData* _ld; +- void* _interior_loc; +- oop _loc; +- +-public: +- ShenandoahVerifyOopClosure(ShenandoahVerifierStack* stack, MarkBitMap* map, ShenandoahLivenessData* ld, +- const char* phase, ShenandoahVerifier::VerifyOptions options) : +- _phase(phase), +- _options(options), +- _stack(stack), +- _heap(ShenandoahHeap::heap()), +- _map(map), +- _ld(ld), +- _interior_loc(NULL), +- _loc(NULL) { } +- +-private: +- void verify(ShenandoahAsserts::SafeLevel level, oop obj, bool test, const char* label) { +- if (!test) { +- ShenandoahAsserts::print_failure(level, obj, _interior_loc, _loc, _phase, label, __FILE__, __LINE__); +- } +- } +- +- template +- void do_oop_work(T* p) { +- T o = oopDesc::load_heap_oop(p); +- if (!oopDesc::is_null(o)) { +- oop obj = oopDesc::decode_heap_oop_not_null(o); +- +- // Single threaded verification can use faster non-atomic stack and bitmap +- // methods. +- // +- // For performance reasons, only fully verify non-marked field values. +- // We are here when the host object for *p is already marked. +- +- HeapWord* addr = (HeapWord*) obj; +- if (_map->parMark(addr)) { +- verify_oop_at(p, obj); +- _stack->push(ShenandoahVerifierTask(obj)); +- } +- } +- } +- +- void verify_oop(oop obj) { +- // Perform consistency checks with gradually decreasing safety level. This guarantees +- // that failure report would not try to touch something that was not yet verified to be +- // safe to process. +- +- verify(ShenandoahAsserts::_safe_unknown, obj, _heap->is_in(obj), +- "oop must be in heap"); +- verify(ShenandoahAsserts::_safe_unknown, obj, check_obj_alignment(obj), +- "oop must be aligned"); +- +- ShenandoahHeapRegion *obj_reg = _heap->heap_region_containing(obj); +- Klass* obj_klass = obj->klass_or_null(); +- +- // Verify that obj is not in dead space: +- { +- // Do this before touching obj->size() +- verify(ShenandoahAsserts::_safe_unknown, obj, obj_klass != NULL, +- "Object klass pointer should not be NULL"); +- verify(ShenandoahAsserts::_safe_unknown, obj, Metaspace::contains(obj_klass), +- "Object klass pointer must go to metaspace"); +- +- HeapWord *obj_addr = (HeapWord *) obj; +- verify(ShenandoahAsserts::_safe_unknown, obj, obj_addr < obj_reg->top(), +- "Object start should be within the region"); +- +- if (!obj_reg->is_humongous()) { +- verify(ShenandoahAsserts::_safe_unknown, obj, (obj_addr + obj->size()) <= obj_reg->top(), +- "Object end should be within the region"); +- } else { +- size_t humongous_start = obj_reg->index(); +- size_t humongous_end = humongous_start + (obj->size() >> ShenandoahHeapRegion::region_size_words_shift()); +- for (size_t idx = humongous_start + 1; idx < humongous_end; idx++) { +- verify(ShenandoahAsserts::_safe_unknown, obj, _heap->get_region(idx)->is_humongous_continuation(), +- "Humongous object is in continuation that fits it"); +- } +- } +- +- // ------------ obj is safe at this point -------------- +- +- verify(ShenandoahAsserts::_safe_oop, obj, obj_reg->is_active(), +- "Object should be in active region"); +- +- switch (_options._verify_liveness) { +- case ShenandoahVerifier::_verify_liveness_disable: +- // skip +- break; +- case ShenandoahVerifier::_verify_liveness_complete: +- Atomic::add((uint) obj->size(), &_ld[obj_reg->index()]); +- // fallthrough for fast failure for un-live regions: +- case ShenandoahVerifier::_verify_liveness_conservative: +- verify(ShenandoahAsserts::_safe_oop, obj, obj_reg->has_live(), +- "Object must belong to region with live data"); +- break; +- default: +- assert(false, "Unhandled liveness verification"); +- } +- } +- +- oop fwd = (oop) ShenandoahForwarding::get_forwardee_raw_unchecked(obj); +- +- ShenandoahHeapRegion* fwd_reg = NULL; +- +- if (obj != fwd) { +- verify(ShenandoahAsserts::_safe_oop, obj, _heap->is_in(fwd), +- "Forwardee must be in heap"); +- verify(ShenandoahAsserts::_safe_oop, obj, !oopDesc::is_null(fwd), +- "Forwardee is set"); +- verify(ShenandoahAsserts::_safe_oop, obj, check_obj_alignment(fwd), +- "Forwardee must be aligned"); +- +- // Do this before touching fwd->size() +- Klass* fwd_klass = fwd->klass_or_null(); +- verify(ShenandoahAsserts::_safe_oop, obj, fwd_klass != NULL, +- "Forwardee klass pointer should not be NULL"); +- verify(ShenandoahAsserts::_safe_oop, obj, Metaspace::contains(fwd_klass), +- "Forwardee klass pointer must go to metaspace"); +- verify(ShenandoahAsserts::_safe_oop, obj, obj_klass == fwd_klass, +- "Forwardee klass pointer must go to metaspace"); +- +- fwd_reg = _heap->heap_region_containing(fwd); +- +- // Verify that forwardee is not in the dead space: +- verify(ShenandoahAsserts::_safe_oop, obj, !fwd_reg->is_humongous(), +- "Should have no humongous forwardees"); +- +- HeapWord *fwd_addr = (HeapWord *) fwd; +- verify(ShenandoahAsserts::_safe_oop, obj, fwd_addr < fwd_reg->top(), +- "Forwardee start should be within the region"); +- verify(ShenandoahAsserts::_safe_oop, obj, (fwd_addr + fwd->size()) <= fwd_reg->top(), +- "Forwardee end should be within the region"); +- +- oop fwd2 = (oop) ShenandoahForwarding::get_forwardee_raw_unchecked(fwd); +- verify(ShenandoahAsserts::_safe_oop, obj, fwd == fwd2, +- "Double forwarding"); +- } else { +- fwd_reg = obj_reg; +- } +- +- // ------------ obj and fwd are safe at this point -------------- +- +- switch (_options._verify_marked) { +- case ShenandoahVerifier::_verify_marked_disable: +- // skip +- break; +- case ShenandoahVerifier::_verify_marked_incomplete: +- verify(ShenandoahAsserts::_safe_all, obj, _heap->marking_context()->is_marked(obj), +- "Must be marked in incomplete bitmap"); +- case ShenandoahVerifier::_verify_marked_complete: +- verify(ShenandoahAsserts::_safe_all, obj, _heap->complete_marking_context()->is_marked(obj), +- "Must be marked in complete bitmap"); +- break; +- default: +- assert(false, "Unhandled mark verification"); +- } +- +- switch (_options._verify_forwarded) { +- case ShenandoahVerifier::_verify_forwarded_disable: +- // skip +- break; +- case ShenandoahVerifier::_verify_forwarded_none: { +- verify(ShenandoahAsserts::_safe_all, obj, obj == fwd, +- "Should not be forwarded"); +- break; +- } +- case ShenandoahVerifier::_verify_forwarded_allow: { +- if (obj != fwd) { +- verify(ShenandoahAsserts::_safe_all, obj, obj_reg != fwd_reg, +- "Forwardee should be in another region"); +- } +- break; +- } +- default: +- assert(false, "Unhandled forwarding verification"); +- } +- +- switch (_options._verify_cset) { +- case ShenandoahVerifier::_verify_cset_disable: +- // skip +- break; +- case ShenandoahVerifier::_verify_cset_none: +- verify(ShenandoahAsserts::_safe_all, obj, !_heap->in_collection_set(obj), +- "Should not have references to collection set"); +- break; +- case ShenandoahVerifier::_verify_cset_forwarded: +- if (_heap->in_collection_set(obj)) { +- verify(ShenandoahAsserts::_safe_all, obj, obj != fwd, +- "Object in collection set, should have forwardee"); +- } +- break; +- default: +- assert(false, "Unhandled cset verification"); +- } +- } +- +-public: +- /** +- * Verify object with known interior reference. +- * @param p interior reference where the object is referenced from; can be off-heap +- * @param obj verified object +- */ +- template +- void verify_oop_at(T* p, oop obj) { +- _interior_loc = p; +- verify_oop(obj); +- _interior_loc = NULL; +- } +- +- /** +- * Verify object without known interior reference. +- * Useful when picking up the object at known offset in heap, +- * but without knowing what objects reference it. +- * @param obj verified object +- */ +- void verify_oop_standalone(oop obj) { +- _interior_loc = NULL; +- verify_oop(obj); +- _interior_loc = NULL; +- } +- +- /** +- * Verify oop fields from this object. +- * @param obj host object for verified fields +- */ +- void verify_oops_from(oop obj) { +- _loc = obj; +- obj->oop_iterate(this); +- _loc = NULL; +- } +- +- void do_oop(oop* p) { do_oop_work(p); } +- void do_oop(narrowOop* p) { do_oop_work(p); } +-}; +- +-class ShenandoahCalculateRegionStatsClosure : public ShenandoahHeapRegionClosure { +-private: +- size_t _used, _committed, _garbage; +-public: +- ShenandoahCalculateRegionStatsClosure() : _used(0), _committed(0), _garbage(0) {}; +- +- void heap_region_do(ShenandoahHeapRegion* r) { +- _used += r->used(); +- _garbage += r->garbage(); +- _committed += r->is_committed() ? ShenandoahHeapRegion::region_size_bytes() : 0; +- } +- +- size_t used() { return _used; } +- size_t committed() { return _committed; } +- size_t garbage() { return _garbage; } +-}; +- +-class ShenandoahVerifyHeapRegionClosure : public ShenandoahHeapRegionClosure { +-private: +- ShenandoahHeap* _heap; +- const char* _phase; +- ShenandoahVerifier::VerifyRegions _regions; +-public: +- ShenandoahVerifyHeapRegionClosure(const char* phase, ShenandoahVerifier::VerifyRegions regions) : +- _heap(ShenandoahHeap::heap()), +- _phase(phase), +- _regions(regions) {}; +- +- void print_failure(ShenandoahHeapRegion* r, const char* label) { +- ResourceMark rm; +- +- ShenandoahMessageBuffer msg("Shenandoah verification failed; %s: %s\n\n", _phase, label); +- +- stringStream ss; +- r->print_on(&ss); +- msg.append("%s", ss.as_string()); +- +- report_vm_error(__FILE__, __LINE__, msg.buffer()); +- } +- +- void verify(ShenandoahHeapRegion* r, bool test, const char* msg) { +- if (!test) { +- print_failure(r, msg); +- } +- } +- +- void heap_region_do(ShenandoahHeapRegion* r) { +- switch (_regions) { +- case ShenandoahVerifier::_verify_regions_disable: +- break; +- case ShenandoahVerifier::_verify_regions_notrash: +- verify(r, !r->is_trash(), +- "Should not have trash regions"); +- break; +- case ShenandoahVerifier::_verify_regions_nocset: +- verify(r, !r->is_cset(), +- "Should not have cset regions"); +- break; +- case ShenandoahVerifier::_verify_regions_notrash_nocset: +- verify(r, !r->is_trash(), +- "Should not have trash regions"); +- verify(r, !r->is_cset(), +- "Should not have cset regions"); +- break; +- default: +- ShouldNotReachHere(); +- } +- +- verify(r, r->capacity() == ShenandoahHeapRegion::region_size_bytes(), +- "Capacity should match region size"); +- +- verify(r, r->bottom() <= r->top(), +- "Region top should not be less than bottom"); +- +- verify(r, r->bottom() <= _heap->marking_context()->top_at_mark_start(r), +- "Region TAMS should not be less than bottom"); +- +- verify(r, _heap->marking_context()->top_at_mark_start(r) <= r->top(), +- "Complete TAMS should not be larger than top"); +- +- verify(r, r->get_live_data_bytes() <= r->capacity(), +- "Live data cannot be larger than capacity"); +- +- verify(r, r->garbage() <= r->capacity(), +- "Garbage cannot be larger than capacity"); +- +- verify(r, r->used() <= r->capacity(), +- "Used cannot be larger than capacity"); +- +- verify(r, r->get_shared_allocs() <= r->capacity(), +- "Shared alloc count should not be larger than capacity"); +- +- verify(r, r->get_tlab_allocs() <= r->capacity(), +- "TLAB alloc count should not be larger than capacity"); +- +- verify(r, r->get_gclab_allocs() <= r->capacity(), +- "GCLAB alloc count should not be larger than capacity"); +- +- verify(r, r->get_shared_allocs() + r->get_tlab_allocs() + r->get_gclab_allocs() == r->used(), +- "Accurate accounting: shared + TLAB + GCLAB = used"); +- +- verify(r, !r->is_empty() || !r->has_live(), +- "Empty regions should not have live data"); +- +- verify(r, r->is_cset() == _heap->collection_set()->is_in(r), +- "Transitional: region flags and collection set agree"); +- } +-}; +- +-class ShenandoahVerifierReachableTask : public AbstractGangTask { +-private: +- const char* _label; +- ShenandoahRootVerifier* _verifier; +- ShenandoahVerifier::VerifyOptions _options; +- ShenandoahHeap* _heap; +- ShenandoahLivenessData* _ld; +- MarkBitMap* _bitmap; +- volatile jlong _processed; +- +-public: +- ShenandoahVerifierReachableTask(MarkBitMap* bitmap, +- ShenandoahLivenessData* ld, +- ShenandoahRootVerifier* verifier, +- const char* label, +- ShenandoahVerifier::VerifyOptions options) : +- AbstractGangTask("Shenandoah Parallel Verifier Reachable Task"), +- _label(label), +- _verifier(verifier), +- _options(options), +- _heap(ShenandoahHeap::heap()), +- _ld(ld), +- _bitmap(bitmap), +- _processed(0) {}; +- +- size_t processed() { +- return (size_t) _processed; +- } +- +- virtual void work(uint worker_id) { +- ResourceMark rm; +- ShenandoahVerifierStack stack; +- +- // On level 2, we need to only check the roots once. +- // On level 3, we want to check the roots, and seed the local stack. +- // It is a lesser evil to accept multiple root scans at level 3, because +- // extended parallelism would buy us out. +- if (((ShenandoahVerifyLevel == 2) && (worker_id == 0)) +- || (ShenandoahVerifyLevel >= 3)) { +- ShenandoahVerifyOopClosure cl(&stack, _bitmap, _ld, +- ShenandoahMessageBuffer("Shenandoah verification failed; %s, Roots", _label), +- _options); +- if (_heap->unload_classes()) { +- _verifier->strong_roots_do(&cl); +- } else { +- _verifier->roots_do(&cl); +- } +- } +- +- jlong processed = 0; +- +- if (ShenandoahVerifyLevel >= 3) { +- ShenandoahVerifyOopClosure cl(&stack, _bitmap, _ld, +- ShenandoahMessageBuffer("Shenandoah verification failed; %s, Reachable", _label), +- _options); +- while (!stack.is_empty()) { +- processed++; +- ShenandoahVerifierTask task = stack.pop(); +- cl.verify_oops_from(task.obj()); +- } +- } +- +- Atomic::add(processed, &_processed); +- } +-}; +- +-class ShenandoahVerifierMarkedRegionTask : public AbstractGangTask { +-private: +- const char* _label; +- ShenandoahVerifier::VerifyOptions _options; +- ShenandoahHeap *_heap; +- ShenandoahLivenessData* _ld; +- MarkBitMap* _bitmap; +- volatile jint _claimed; +- volatile jlong _processed; +- +-public: +- ShenandoahVerifierMarkedRegionTask(MarkBitMap* bitmap, +- ShenandoahLivenessData* ld, +- const char* label, +- ShenandoahVerifier::VerifyOptions options) : +- AbstractGangTask("Shenandoah Parallel Verifier Marked Region"), +- _label(label), +- _options(options), +- _heap(ShenandoahHeap::heap()), +- _bitmap(bitmap), +- _ld(ld), +- _claimed(0), +- _processed(0) {}; +- +- size_t processed() { +- return (size_t) _processed; +- } +- +- virtual void work(uint worker_id) { +- ShenandoahVerifierStack stack; +- ShenandoahVerifyOopClosure cl(&stack, _bitmap, _ld, +- ShenandoahMessageBuffer("Shenandoah verification failed; %s, Marked", _label), +- _options); +- assert((size_t)max_jint >= _heap->num_regions(), "Too many regions"); +- while (true) { +- size_t v = (size_t) (Atomic::add(1, &_claimed) - 1); +- if (v < _heap->num_regions()) { +- ShenandoahHeapRegion* r = _heap->get_region(v); +- if (!r->is_humongous() && !r->is_trash()) { +- work_regular(r, stack, cl); +- } else if (r->is_humongous_start()) { +- work_humongous(r, stack, cl); +- } +- } else { +- break; +- } +- } +- } +- +- virtual void work_humongous(ShenandoahHeapRegion *r, ShenandoahVerifierStack& stack, ShenandoahVerifyOopClosure& cl) { +- jlong processed = 0; +- HeapWord* obj = r->bottom(); +- if (_heap->complete_marking_context()->is_marked((oop)obj)) { +- verify_and_follow(obj, stack, cl, &processed); +- } +- Atomic::add(processed, &_processed); +- } +- +- virtual void work_regular(ShenandoahHeapRegion *r, ShenandoahVerifierStack &stack, ShenandoahVerifyOopClosure &cl) { +- jlong processed = 0; +- MarkBitMap* mark_bit_map = _heap->complete_marking_context()->mark_bit_map(); +- HeapWord* tams = _heap->complete_marking_context()->top_at_mark_start(r); +- +- // Bitmaps, before TAMS +- if (tams > r->bottom()) { +- HeapWord* start = r->bottom(); +- HeapWord* addr = mark_bit_map->getNextMarkedWordAddress(start, tams); +- +- while (addr < tams) { +- verify_and_follow(addr, stack, cl, &processed); +- addr += 1; +- if (addr < tams) { +- addr = mark_bit_map->getNextMarkedWordAddress(addr, tams); +- } +- } +- } +- +- // Size-based, after TAMS +- { +- HeapWord* limit = r->top(); +- HeapWord* addr = tams; +- +- while (addr < limit) { +- verify_and_follow(addr, stack, cl, &processed); +- addr += oop(addr)->size(); +- } +- } +- +- Atomic::add(processed, &_processed); +- } +- +- void verify_and_follow(HeapWord *addr, ShenandoahVerifierStack &stack, ShenandoahVerifyOopClosure &cl, jlong *processed) { +- if (!_bitmap->parMark(addr)) return; +- +- // Verify the object itself: +- oop obj = oop(addr); +- cl.verify_oop_standalone(obj); +- +- // Verify everything reachable from that object too, hopefully realizing +- // everything was already marked, and never touching further: +- cl.verify_oops_from(obj); +- (*processed)++; +- +- while (!stack.is_empty()) { +- ShenandoahVerifierTask task = stack.pop(); +- cl.verify_oops_from(task.obj()); +- (*processed)++; +- } +- } +-}; +- +-class VerifyThreadGCState : public ThreadClosure { +-private: +- const char* const _label; +- char const _expected; +- +-public: +- VerifyThreadGCState(const char* label, char expected) : _label(label), _expected(expected) {} +- void do_thread(Thread* t) { +- assert(t->is_Java_thread(), "sanity"); +- char actual = ((JavaThread*)t)->gc_state(); +- if (actual != _expected) { +- fatal(err_msg("%s: Thread %s: expected gc-state %d, actual %d", _label, t->name(), _expected, actual)); +- } +- } +-}; +- +-class ShenandoahGCStateResetter : public StackObj { +-private: +- ShenandoahHeap* const _heap; +- char _gc_state; +- +-public: +- ShenandoahGCStateResetter() : _heap(ShenandoahHeap::heap()) { +- _gc_state = _heap->gc_state(); +- _heap->_gc_state.clear(); +- } +- +- ~ShenandoahGCStateResetter() { +- _heap->_gc_state.set(_gc_state); +- assert(_heap->gc_state() == _gc_state, "Should be restored"); +- } +-}; +- +-void ShenandoahVerifier::verify_at_safepoint(const char *label, +- VerifyForwarded forwarded, VerifyMarked marked, +- VerifyCollectionSet cset, +- VerifyLiveness liveness, VerifyRegions regions, +- VerifyGCState gcstate) { +- guarantee(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "only when nothing else happens"); +- guarantee(ShenandoahVerify, "only when enabled, and bitmap is initialized in ShenandoahHeap::initialize"); +- +- // Avoid side-effect of changing workers' active thread count, but bypass concurrent/parallel protocol check +- ShenandoahPushWorkerScope verify_worker_scope(_heap->workers(), _heap->max_workers(), false /*bypass check*/); +- +- log_info(gc,start)("Verify %s, Level " INTX_FORMAT, label, ShenandoahVerifyLevel); +- +- // GC state checks +- { +- char expected = -1; +- bool enabled; +- switch (gcstate) { +- case _verify_gcstate_disable: +- enabled = false; +- break; +- case _verify_gcstate_forwarded: +- enabled = true; +- expected = ShenandoahHeap::HAS_FORWARDED; +- break; +- case _verify_gcstate_evacuation: +- enabled = true; +- expected = ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::EVACUATION; +- break; +- case _verify_gcstate_stable: +- enabled = true; +- expected = ShenandoahHeap::STABLE; +- break; +- default: +- enabled = false; +- assert(false, "Unhandled gc-state verification"); +- } +- +- if (enabled) { +- char actual = _heap->gc_state(); +- if (actual != expected) { +- fatal(err_msg("%s: Global gc-state: expected %d, actual %d", label, expected, actual)); +- } +- +- VerifyThreadGCState vtgcs(label, expected); +- Threads::java_threads_do(&vtgcs); +- } +- } +- +- // Deactivate barriers temporarily: Verifier wants plain heap accesses +- ShenandoahGCStateResetter resetter; +- +- // Heap size checks +- { +- ShenandoahHeapLocker lock(_heap->lock()); +- +- ShenandoahCalculateRegionStatsClosure cl; +- _heap->heap_region_iterate(&cl); +- size_t heap_used = _heap->used(); +- guarantee(cl.used() == heap_used, +- err_msg("%s: heap used size must be consistent: heap-used = " SIZE_FORMAT "%s, regions-used = " SIZE_FORMAT "%s", +- label, +- byte_size_in_proper_unit(heap_used), proper_unit_for_byte_size(heap_used), +- byte_size_in_proper_unit(cl.used()), proper_unit_for_byte_size(cl.used()))); +- +- size_t heap_committed = _heap->committed(); +- guarantee(cl.committed() == heap_committed, +- err_msg("%s: heap committed size must be consistent: heap-committed = " SIZE_FORMAT "%s, regions-committed = " SIZE_FORMAT "%s", +- label, +- byte_size_in_proper_unit(heap_committed), proper_unit_for_byte_size(heap_committed), +- byte_size_in_proper_unit(cl.committed()), proper_unit_for_byte_size(cl.committed()))); +- } +- +- // Internal heap region checks +- if (ShenandoahVerifyLevel >= 1) { +- ShenandoahVerifyHeapRegionClosure cl(label, regions); +- _heap->heap_region_iterate(&cl); +- } +- +- OrderAccess::fence(); +- _heap->make_parsable(false); +- +- // Allocate temporary bitmap for storing marking wavefront: +- _verification_bit_map->clear(); +- +- // Allocate temporary array for storing liveness data +- ShenandoahLivenessData* ld = NEW_C_HEAP_ARRAY(ShenandoahLivenessData, _heap->num_regions(), mtGC); +- Copy::fill_to_bytes((void*)ld, _heap->num_regions()*sizeof(ShenandoahLivenessData), 0); +- +- const VerifyOptions& options = ShenandoahVerifier::VerifyOptions(forwarded, marked, cset, liveness, regions, gcstate); +- +- // Steps 1-2. Scan root set to get initial reachable set. Finish walking the reachable heap. +- // This verifies what application can see, since it only cares about reachable objects. +- size_t count_reachable = 0; +- if (ShenandoahVerifyLevel >= 2) { +- ShenandoahRootVerifier verifier; +- +- ShenandoahVerifierReachableTask task(_verification_bit_map, ld, &verifier, label, options); +- _heap->workers()->run_task(&task); +- count_reachable = task.processed(); +- } +- +- // Step 3. Walk marked objects. Marked objects might be unreachable. This verifies what collector, +- // not the application, can see during the region scans. There is no reason to process the objects +- // that were already verified, e.g. those marked in verification bitmap. There is interaction with TAMS: +- // before TAMS, we verify the bitmaps, if available; after TAMS, we walk until the top(). It mimics +- // what marked_object_iterate is doing, without calling into that optimized (and possibly incorrect) +- // version +- +- size_t count_marked = 0; +- if (ShenandoahVerifyLevel >= 4 && marked == _verify_marked_complete) { +- guarantee(_heap->marking_context()->is_complete(), "Marking context should be complete"); +- ShenandoahVerifierMarkedRegionTask task(_verification_bit_map, ld, label, options); +- _heap->workers()->run_task(&task); +- count_marked = task.processed(); +- } else { +- guarantee(ShenandoahVerifyLevel < 4 || marked == _verify_marked_incomplete || marked == _verify_marked_disable, "Should be"); +- } +- +- // Step 4. Verify accumulated liveness data, if needed. Only reliable if verification level includes +- // marked objects. +- +- if (ShenandoahVerifyLevel >= 4 && marked == _verify_marked_complete && liveness == _verify_liveness_complete) { +- for (size_t i = 0; i < _heap->num_regions(); i++) { +- ShenandoahHeapRegion* r = _heap->get_region(i); +- +- jint verf_live = 0; +- if (r->is_humongous()) { +- // For humongous objects, test if start region is marked live, and if so, +- // all humongous regions in that chain have live data equal to their "used". +- jint start_live = OrderAccess::load_acquire(&ld[r->humongous_start_region()->index()]); +- if (start_live > 0) { +- verf_live = (jint)(r->used() / HeapWordSize); +- } +- } else { +- verf_live = OrderAccess::load_acquire(&ld[r->index()]); +- } +- +- size_t reg_live = r->get_live_data_words(); +- if (reg_live != (size_t)verf_live) { +- ResourceMark rm; +- stringStream ss; +- r->print_on(&ss); +- fatal(err_msg("%s: Live data should match: region-live = " SIZE_FORMAT ", verifier-live = " INT32_FORMAT "\n%s", +- label, reg_live, verf_live, ss.as_string())); +- } +- } +- } +- +- log_info(gc)("Verify %s, Level " INTX_FORMAT " (" SIZE_FORMAT " reachable, " SIZE_FORMAT " marked)", +- label, ShenandoahVerifyLevel, count_reachable, count_marked); +- +- FREE_C_HEAP_ARRAY(ShenandoahLivenessData, ld, mtGC); +-} +- +-void ShenandoahVerifier::verify_generic(VerifyOption vo) { +- verify_at_safepoint( +- "Generic Verification", +- _verify_forwarded_allow, // conservatively allow forwarded +- _verify_marked_disable, // do not verify marked: lots ot time wasted checking dead allocations +- _verify_cset_disable, // cset may be inconsistent +- _verify_liveness_disable, // no reliable liveness data +- _verify_regions_disable, // no reliable region data +- _verify_gcstate_disable // no data about gcstate +- ); +-} +- +-void ShenandoahVerifier::verify_before_concmark() { +- verify_at_safepoint( +- "Before Mark", +- _verify_forwarded_none, // UR should have fixed up +- _verify_marked_disable, // do not verify marked: lots ot time wasted checking dead allocations +- _verify_cset_none, // UR should have fixed this +- _verify_liveness_disable, // no reliable liveness data +- _verify_regions_notrash, // no trash regions +- _verify_gcstate_stable // there are no forwarded objects +- ); +-} +- +-void ShenandoahVerifier::verify_after_concmark() { +- verify_at_safepoint( +- "After Mark", +- _verify_forwarded_none, // no forwarded references +- _verify_marked_complete, // bitmaps as precise as we can get +- _verify_cset_none, // no references to cset anymore +- _verify_liveness_complete, // liveness data must be complete here +- _verify_regions_disable, // trash regions not yet recycled +- _verify_gcstate_stable // mark should have stabilized the heap +- ); +-} +- +-void ShenandoahVerifier::verify_before_evacuation() { +- verify_at_safepoint( +- "Before Evacuation", +- _verify_forwarded_none, // no forwarded references +- _verify_marked_complete, // walk over marked objects too +- _verify_cset_disable, // non-forwarded references to cset expected +- _verify_liveness_complete, // liveness data must be complete here +- _verify_regions_disable, // trash regions not yet recycled +- _verify_gcstate_stable // mark should have stabilized the heap +- ); +-} +- +-void ShenandoahVerifier::verify_during_evacuation() { +- verify_at_safepoint( +- "During Evacuation", +- _verify_forwarded_allow, // some forwarded references are allowed +- _verify_marked_disable, // walk only roots +- _verify_cset_disable, // some cset references are not forwarded yet +- _verify_liveness_disable, // liveness data might be already stale after pre-evacs +- _verify_regions_disable, // trash regions not yet recycled +- _verify_gcstate_evacuation // evacuation is in progress +- ); +-} +- +-void ShenandoahVerifier::verify_after_evacuation() { +- verify_at_safepoint( +- "After Evacuation", +- _verify_forwarded_allow, // objects are still forwarded +- _verify_marked_complete, // bitmaps might be stale, but alloc-after-mark should be well +- _verify_cset_forwarded, // all cset refs are fully forwarded +- _verify_liveness_disable, // no reliable liveness data anymore +- _verify_regions_notrash, // trash regions have been recycled already +- _verify_gcstate_forwarded // evacuation produced some forwarded objects +- ); +-} +- +-void ShenandoahVerifier::verify_before_updaterefs() { +- verify_at_safepoint( +- "Before Updating References", +- _verify_forwarded_allow, // forwarded references allowed +- _verify_marked_complete, // bitmaps might be stale, but alloc-after-mark should be well +- _verify_cset_forwarded, // all cset refs are fully forwarded +- _verify_liveness_disable, // no reliable liveness data anymore +- _verify_regions_notrash, // trash regions have been recycled already +- _verify_gcstate_forwarded // evacuation should have produced some forwarded objects +- ); +-} +- +-void ShenandoahVerifier::verify_after_updaterefs() { +- verify_at_safepoint( +- "After Updating References", +- _verify_forwarded_none, // no forwarded references +- _verify_marked_complete, // bitmaps might be stale, but alloc-after-mark should be well +- _verify_cset_none, // no cset references, all updated +- _verify_liveness_disable, // no reliable liveness data anymore +- _verify_regions_nocset, // no cset regions, trash regions have appeared +- _verify_gcstate_stable // update refs had cleaned up forwarded objects +- ); +-} +- +-void ShenandoahVerifier::verify_before_fullgc() { +- verify_at_safepoint( +- "Before Full GC", +- _verify_forwarded_allow, // can have forwarded objects +- _verify_marked_disable, // do not verify marked: lots ot time wasted checking dead allocations +- _verify_cset_disable, // cset might be foobared +- _verify_liveness_disable, // no reliable liveness data anymore +- _verify_regions_disable, // no reliable region data here +- _verify_gcstate_disable // no reliable gcstate data +- ); +-} +- +-void ShenandoahVerifier::verify_after_fullgc() { +- verify_at_safepoint( +- "After Full GC", +- _verify_forwarded_none, // all objects are non-forwarded +- _verify_marked_complete, // all objects are marked in complete bitmap +- _verify_cset_none, // no cset references +- _verify_liveness_disable, // no reliable liveness data anymore +- _verify_regions_notrash_nocset, // no trash, no cset +- _verify_gcstate_stable // degenerated refs had cleaned up forwarded objects +- ); +-} +-void ShenandoahVerifier::verify_after_degenerated() { +- verify_at_safepoint( +- "After Degenerated GC", +- _verify_forwarded_none, // all objects are non-forwarded +- _verify_marked_complete, // all objects are marked in complete bitmap +- _verify_cset_none, // no cset references +- _verify_liveness_disable, // no reliable liveness data anymore +- _verify_regions_notrash_nocset, // no trash, no cset +- _verify_gcstate_stable // full gc cleaned up everything +- ); +-} +- +-class ShenandoahVerifyNoForwared : public OopClosure { +-private: +- template +- void do_oop_work(T* p) { +- T o = oopDesc::load_heap_oop(p); +- if (!oopDesc::is_null(o)) { +- oop obj = oopDesc::decode_heap_oop_not_null(o); +- oop fwd = (oop) ShenandoahForwarding::get_forwardee_raw_unchecked(obj); +- if (obj != fwd) { +- ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, p, NULL, +- "Verify Roots", "Should not be forwarded", __FILE__, __LINE__); +- } +- } +- } +- +-public: +- void do_oop(narrowOop* p) { do_oop_work(p); } +- void do_oop(oop* p) { do_oop_work(p); } +-}; +- +-class ShenandoahVerifyInToSpaceClosure : public OopClosure { +-private: +- template +- void do_oop_work(T* p) { +- T o = oopDesc::load_heap_oop(p); +- if (!oopDesc::is_null(o)) { +- oop obj = oopDesc::decode_heap_oop_not_null(o); +- ShenandoahHeap* heap = ShenandoahHeap::heap(); +- +- if (!heap->marking_context()->is_marked(obj)) { +- ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, p, NULL, +- "Verify Roots In To-Space", "Should be marked", __FILE__, __LINE__); +- } +- +- if (heap->in_collection_set(obj)) { +- ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, p, NULL, +- "Verify Roots In To-Space", "Should not be in collection set", __FILE__, __LINE__); +- } +- +- oop fwd = (oop) ShenandoahForwarding::get_forwardee_raw_unchecked(obj); +- if (obj != fwd) { +- ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, p, NULL, +- "Verify Roots In To-Space", "Should not be forwarded", __FILE__, __LINE__); +- } +- } +- } +- +-public: +- void do_oop(narrowOop* p) { do_oop_work(p); } +- void do_oop(oop* p) { do_oop_work(p); } +-}; +- +-void ShenandoahVerifier::verify_roots_in_to_space() { +- ShenandoahRootVerifier verifier; +- ShenandoahVerifyInToSpaceClosure cl; +- verifier.oops_do(&cl); +-} +- +-void ShenandoahVerifier::verify_roots_no_forwarded() { +- guarantee(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "only when nothing else happens"); +- ShenandoahRootVerifier verifier; +- ShenandoahVerifyNoForwared cl; +- verifier.oops_do(&cl); +-} +- +-void ShenandoahVerifier::verify_roots_no_forwarded_except(ShenandoahRootVerifier::RootTypes types) { +- guarantee(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "only when nothing else happens"); +- ShenandoahRootVerifier verifier; +- verifier.excludes(types); +- ShenandoahVerifyNoForwared cl; +- verifier.oops_do(&cl); +-} +- +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahVerifier.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahVerifier.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahVerifier.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahVerifier.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,191 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHVERIFIER_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHVERIFIER_HPP +- +-#include "gc_implementation/shared/markBitMap.hpp" +-#include "gc_implementation/shenandoah/shenandoahRootVerifier.hpp" +-#include "oops/oopsHierarchy.hpp" +-#include "memory/allocation.hpp" +-#include "utilities/stack.hpp" +- +-class ShenandoahHeap; +- +-class ShenandoahVerifierTask { +-public: +- ShenandoahVerifierTask(oop o = NULL, int idx = 0): _obj(o) { } +- ShenandoahVerifierTask(oop o, size_t idx): _obj(o) { } +- ShenandoahVerifierTask(const ShenandoahVerifierTask& t): _obj(t._obj) { } +- +- ShenandoahVerifierTask& operator =(const ShenandoahVerifierTask& t) { +- _obj = t._obj; +- return *this; +- } +- volatile ShenandoahVerifierTask& +- operator =(const volatile ShenandoahVerifierTask& t) volatile { +- (void)const_cast(_obj = t._obj); +- return *this; +- } +- +- inline oop obj() const { return _obj; } +- +-private: +- oop _obj; +-}; +- +-typedef Stack ShenandoahVerifierStack; +-typedef volatile jint ShenandoahLivenessData; +- +-class ShenandoahVerifier : public CHeapObj { +-private: +- ShenandoahHeap* _heap; +- MarkBitMap* _verification_bit_map; +-public: +- typedef enum { +- // Disable marked objects verification. +- _verify_marked_disable, +- +- // Objects should be marked in "next" bitmap. +- _verify_marked_incomplete, +- +- // Objects should be marked in "complete" bitmap. +- _verify_marked_complete +- } VerifyMarked; +- +- typedef enum { +- // Disable forwarded objects verification. +- _verify_forwarded_disable, +- +- // Objects should not have forwardees. +- _verify_forwarded_none, +- +- // Objects may have forwardees. +- _verify_forwarded_allow +- } VerifyForwarded; +- +- typedef enum { +- // Disable collection set verification. +- _verify_cset_disable, +- +- // Should have no references to cset. +- _verify_cset_none, +- +- // May have references to cset, all should be forwarded. +- // Note: Allowing non-forwarded references to cset is equivalent +- // to _verify_cset_disable. +- _verify_cset_forwarded +- } VerifyCollectionSet; +- +- typedef enum { +- // Disable liveness verification +- _verify_liveness_disable, +- +- // All objects should belong to live regions +- _verify_liveness_conservative, +- +- // All objects should belong to live regions, +- // and liveness data should be accurate +- _verify_liveness_complete +- } VerifyLiveness; +- +- typedef enum { +- // Disable region verification +- _verify_regions_disable, +- +- // No trash regions allowed +- _verify_regions_notrash, +- +- // No collection set regions allowed +- _verify_regions_nocset, +- +- // No trash and no cset regions allowed +- _verify_regions_notrash_nocset +- } VerifyRegions; +- +- typedef enum { +- // Disable gc-state verification +- _verify_gcstate_disable, +- +- // Nothing is in progress, no forwarded objects +- _verify_gcstate_stable, +- +- // Nothing is in progress, some objects are forwarded +- _verify_gcstate_forwarded, +- +- // Evacuation is in progress, some objects are forwarded +- _verify_gcstate_evacuation +- } VerifyGCState; +- +- struct VerifyOptions { +- VerifyForwarded _verify_forwarded; +- VerifyMarked _verify_marked; +- VerifyCollectionSet _verify_cset; +- VerifyLiveness _verify_liveness; +- VerifyRegions _verify_regions; +- VerifyGCState _verify_gcstate; +- +- VerifyOptions(VerifyForwarded verify_forwarded, +- VerifyMarked verify_marked, +- VerifyCollectionSet verify_collection_set, +- VerifyLiveness verify_liveness, +- VerifyRegions verify_regions, +- VerifyGCState verify_gcstate) : +- _verify_forwarded(verify_forwarded), _verify_marked(verify_marked), +- _verify_cset(verify_collection_set), +- _verify_liveness(verify_liveness), _verify_regions(verify_regions), +- _verify_gcstate(verify_gcstate) {} +- }; +- +-private: +- void verify_at_safepoint(const char *label, +- VerifyForwarded forwarded, +- VerifyMarked marked, +- VerifyCollectionSet cset, +- VerifyLiveness liveness, +- VerifyRegions regions, +- VerifyGCState gcstate); +- +-public: +- ShenandoahVerifier(ShenandoahHeap* heap, MarkBitMap* verification_bitmap) : +- _heap(heap), _verification_bit_map(verification_bitmap) {}; +- +- void verify_before_concmark(); +- void verify_after_concmark(); +- void verify_before_evacuation(); +- void verify_during_evacuation(); +- void verify_after_evacuation(); +- void verify_before_updaterefs(); +- void verify_after_updaterefs(); +- void verify_before_fullgc(); +- void verify_after_fullgc(); +- void verify_after_degenerated(); +- void verify_generic(VerifyOption option); +- +- // Roots should only contain to-space oops +- void verify_roots_in_to_space(); +- void verify_roots_no_forwarded(); +- void verify_roots_no_forwarded_except(ShenandoahRootVerifier::RootTypes types); +-}; +- +-#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHVERIFIER_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahVMOperations.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahVMOperations.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahVMOperations.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahVMOperations.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,77 +0,0 @@ +-/* +- * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +- +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahVMOperations.hpp" +-#include "gc_implementation/shenandoah/shenandoahUtils.hpp" +- +-bool VM_ShenandoahReferenceOperation::doit_prologue() { +- if (Thread::current()->is_Java_thread()) { +- InstanceRefKlass::acquire_pending_list_lock(&_pending_list_basic_lock); +- } else { +- ShenandoahHeap *sh = (ShenandoahHeap*) Universe::heap(); +- sh->acquire_pending_refs_lock(); +- } +- return true; +-} +- +-void VM_ShenandoahReferenceOperation::doit_epilogue() { +- if (Thread::current()->is_Java_thread()) { +- InstanceRefKlass::release_and_notify_pending_list_lock(&_pending_list_basic_lock); +- } else { +- ShenandoahHeap *sh = ShenandoahHeap::heap(); +- sh->release_pending_refs_lock(); +- } +-} +- +-void VM_ShenandoahInitMark::doit() { +- ShenandoahGCPauseMark mark(SvcGCMarker::OTHER); +- ShenandoahHeap::heap()->entry_init_mark(); +-} +- +-void VM_ShenandoahFinalMarkStartEvac::doit() { +- ShenandoahGCPauseMark mark(SvcGCMarker::OTHER); +- ShenandoahHeap::heap()->entry_final_mark(); +-} +- +-void VM_ShenandoahFullGC::doit() { +- ShenandoahGCPauseMark mark(SvcGCMarker::FULL); +- ShenandoahHeap::heap()->entry_full(_gc_cause); +-} +- +-void VM_ShenandoahInitUpdateRefs::doit() { +- ShenandoahGCPauseMark mark(SvcGCMarker::OTHER); +- ShenandoahHeap::heap()->entry_init_updaterefs(); +-} +- +-void VM_ShenandoahFinalUpdateRefs::doit() { +- ShenandoahGCPauseMark mark(SvcGCMarker::OTHER); +- ShenandoahHeap::heap()->entry_final_updaterefs(); +-} +- +-void VM_ShenandoahDegeneratedGC::doit() { +- ShenandoahGCPauseMark mark(SvcGCMarker::OTHER); +- ShenandoahHeap::heap()->entry_degenerated(_point); +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahVMOperations.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahVMOperations.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahVMOperations.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahVMOperations.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,107 +0,0 @@ +-/* +- * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_VM_OPERATIONS_SHENANDOAH_HPP +-#define SHARE_VM_GC_SHENANDOAH_VM_OPERATIONS_SHENANDOAH_HPP +- +-#include "gc_implementation/shared/vmGCOperations.hpp" +- +-// VM_operations for the Shenandoah Collector. +-// +-// VM_ShenandoahOperation +-// - VM_ShenandoahInitMark: initiate concurrent marking +-// - VM_ShenandoahReferenceOperation: +-// - VM_ShenandoahFinalMarkStartEvac: finish up concurrent marking, and start evacuation +-// - VM_ShenandoahInitUpdateRefs: initiate update references +-// - VM_ShenandoahFinalUpdateRefs: finish up update references +-// - VM_ShenandoahFullGC: do full GC +- +-class VM_ShenandoahOperation : public VM_Operation { +-public: +- VM_ShenandoahOperation() {}; +-}; +- +-class VM_ShenandoahReferenceOperation : public VM_ShenandoahOperation { +-private: +- BasicLock _pending_list_basic_lock; +-public: +- VM_ShenandoahReferenceOperation() : VM_ShenandoahOperation() {}; +- bool doit_prologue(); +- void doit_epilogue(); +-}; +- +-class VM_ShenandoahInitMark: public VM_ShenandoahOperation { +-public: +- VM_ShenandoahInitMark() : VM_ShenandoahOperation() {}; +- VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahInitMark; } +- const char* name() const { return "Shenandoah Init Marking"; } +- virtual void doit(); +-}; +- +-class VM_ShenandoahFinalMarkStartEvac: public VM_ShenandoahReferenceOperation { +-public: +- VM_ShenandoahFinalMarkStartEvac() : VM_ShenandoahReferenceOperation() {}; +- VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahFinalMarkStartEvac; } +- const char* name() const { return "Shenandoah Final Mark and Start Evacuation"; } +- virtual void doit(); +-}; +- +-class VM_ShenandoahDegeneratedGC: public VM_ShenandoahReferenceOperation { +-private: +- // Really the ShenandoahHeap::ShenandoahDegenerationPoint, but casted to int here +- // in order to avoid dependency on ShenandoahHeap +- int _point; +-public: +- VM_ShenandoahDegeneratedGC(int point) : VM_ShenandoahReferenceOperation(), _point(point) {}; +- VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahDegeneratedGC; } +- const char* name() const { return "Shenandoah Degenerated GC"; } +- virtual void doit(); +-}; +- +-class VM_ShenandoahFullGC : public VM_ShenandoahReferenceOperation { +-private: +- GCCause::Cause _gc_cause; +-public: +- VM_ShenandoahFullGC(GCCause::Cause gc_cause) : VM_ShenandoahReferenceOperation(), _gc_cause(gc_cause) {}; +- VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahFullGC; } +- const char* name() const { return "Shenandoah Full GC"; } +- virtual void doit(); +-}; +- +-class VM_ShenandoahInitUpdateRefs: public VM_ShenandoahOperation { +-public: +- VM_ShenandoahInitUpdateRefs() : VM_ShenandoahOperation() {}; +- VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahInitUpdateRefs; } +- const char* name() const { return "Shenandoah Init Update References"; } +- virtual void doit(); +-}; +- +-class VM_ShenandoahFinalUpdateRefs: public VM_ShenandoahOperation { +-public: +- VM_ShenandoahFinalUpdateRefs() : VM_ShenandoahOperation() {}; +- VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahFinalUpdateRefs; } +- const char* name() const { return "Shenandoah Final Update References"; } +- virtual void doit(); +-}; +- +-#endif //SHARE_VM_GC_SHENANDOAH_VM_OPERATIONS_SHENANDOAH_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahWorkerDataArray.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahWorkerDataArray.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahWorkerDataArray.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahWorkerDataArray.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,81 +0,0 @@ +-/* +- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +-#include "gc_implementation/shenandoah/shenandoahWorkerDataArray.inline.hpp" +-#include "utilities/ostream.hpp" +- +-template <> +-size_t ShenandoahWorkerDataArray::uninitialized() { +- return (size_t)-1; +-} +- +-template <> +-double ShenandoahWorkerDataArray::uninitialized() { +- return -1.0; +-} +- +-template <> +-void ShenandoahWorkerDataArray::WDAPrinter::summary(outputStream* out, double min, double avg, double max, double diff, double sum, bool print_sum) { +- out->print(" Min: %4.1lf, Avg: %4.1lf, Max: %4.1lf, Diff: %4.1lf", min * MILLIUNITS, avg * MILLIUNITS, max * MILLIUNITS, diff* MILLIUNITS); +- if (print_sum) { +- out->print(", Sum: %4.1lf", sum * MILLIUNITS); +- } +-} +- +-template <> +-void ShenandoahWorkerDataArray::WDAPrinter::summary(outputStream* out, size_t min, double avg, size_t max, size_t diff, size_t sum, bool print_sum) { +- out->print(" Min: " SIZE_FORMAT ", Avg: %4.1lf, Max: " SIZE_FORMAT ", Diff: " SIZE_FORMAT, min, avg, max, diff); +- if (print_sum) { +- out->print(", Sum: " SIZE_FORMAT, sum); +- } +-} +- +-template <> +-void ShenandoahWorkerDataArray::WDAPrinter::details(const ShenandoahWorkerDataArray* phase, outputStream* out) { +- out->print("%-25s", ""); +- for (uint i = 0; i < phase->_length; ++i) { +- double value = phase->get(i); +- if (value != phase->uninitialized()) { +- out->print(" %4.1lf", phase->get(i) * 1000.0); +- } else { +- out->print(" -"); +- } +- } +- out->cr(); +-} +- +-template <> +-void ShenandoahWorkerDataArray::WDAPrinter::details(const ShenandoahWorkerDataArray* phase, outputStream* out) { +- out->print("%-25s", ""); +- for (uint i = 0; i < phase->_length; ++i) { +- size_t value = phase->get(i); +- if (value != phase->uninitialized()) { +- out->print(" " SIZE_FORMAT, phase->get(i)); +- } else { +- out->print(" -"); +- } +- } +- out->cr(); +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahWorkerDataArray.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahWorkerDataArray.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahWorkerDataArray.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahWorkerDataArray.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,85 +0,0 @@ +-/* +- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_G1_WORKERDATAARRAY_HPP +-#define SHARE_VM_GC_G1_WORKERDATAARRAY_HPP +- +-#include "memory/allocation.hpp" +-#include "utilities/debug.hpp" +- +-class outputStream; +- +-template +-class ShenandoahWorkerDataArray : public CHeapObj { +- friend class WDAPrinter; +- T* _data; +- uint _length; +- const char* _title; +- +- ShenandoahWorkerDataArray* _thread_work_items; +- +- public: +- ShenandoahWorkerDataArray(uint length, const char* title); +- ~ShenandoahWorkerDataArray(); +- +- void link_thread_work_items(ShenandoahWorkerDataArray* thread_work_items); +- void set_thread_work_item(uint worker_i, size_t value); +- ShenandoahWorkerDataArray* thread_work_items() const { +- return _thread_work_items; +- } +- +- static T uninitialized(); +- +- void set(uint worker_i, T value); +- T get(uint worker_i) const; +- +- void add(uint worker_i, T value); +- +- // The sum() and average() methods below consider uninitialized slots to be 0. +- double average() const; +- T sum() const; +- +- const char* title() const { +- return _title; +- } +- +- void reset(); +- void set_all(T value); +- +- private: +- class WDAPrinter { +- public: +- static void summary(outputStream* out, double min, double avg, double max, double diff, double sum, bool print_sum); +- static void summary(outputStream* out, size_t min, double avg, size_t max, size_t diff, size_t sum, bool print_sum); +- +- static void details(const ShenandoahWorkerDataArray* phase, outputStream* out); +- static void details(const ShenandoahWorkerDataArray* phase, outputStream* out); +- }; +- +- public: +- void print_summary_on(outputStream* out, bool print_sum = true) const; +- void print_details_on(outputStream* out) const; +-}; +- +-#endif // SHARE_VM_GC_G1_WORKERDATAARRAY_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahWorkerDataArray.inline.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahWorkerDataArray.inline.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahWorkerDataArray.inline.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahWorkerDataArray.inline.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,156 +0,0 @@ +-/* +- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_G1_WORKERDATAARRAY_INLINE_HPP +-#define SHARE_VM_GC_G1_WORKERDATAARRAY_INLINE_HPP +- +-#include "gc_implementation/shenandoah/shenandoahWorkerDataArray.hpp" +-#include "memory/allocation.inline.hpp" +-#include "utilities/ostream.hpp" +- +-template +-ShenandoahWorkerDataArray::ShenandoahWorkerDataArray(uint length, const char* title) : +- _title(title), +- _length(0), +- _thread_work_items(NULL) { +- assert(length > 0, "Must have some workers to store data for"); +- _length = length; +- _data = NEW_C_HEAP_ARRAY(T, _length, mtGC); +- reset(); +-} +- +-template +-void ShenandoahWorkerDataArray::set(uint worker_i, T value) { +- assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length)); +- assert(_data[worker_i] == uninitialized(), err_msg("Overwriting data for worker %d in %s", worker_i, _title)); +- _data[worker_i] = value; +-} +- +-template +-T ShenandoahWorkerDataArray::get(uint worker_i) const { +- assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length)); +- return _data[worker_i]; +-} +- +-template +-ShenandoahWorkerDataArray::~ShenandoahWorkerDataArray() { +- FREE_C_HEAP_ARRAY(T, _data, mtGC); +-} +- +-template +-void ShenandoahWorkerDataArray::link_thread_work_items(ShenandoahWorkerDataArray* thread_work_items) { +- _thread_work_items = thread_work_items; +-} +- +-template +-void ShenandoahWorkerDataArray::set_thread_work_item(uint worker_i, size_t value) { +- assert(_thread_work_items != NULL, "No sub count"); +- _thread_work_items->set(worker_i, value); +-} +- +-template +-void ShenandoahWorkerDataArray::add(uint worker_i, T value) { +- assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length)); +- assert(_data[worker_i] != uninitialized(), err_msg("No data to add to for worker %d", worker_i)); +- _data[worker_i] += value; +-} +- +-template +-double ShenandoahWorkerDataArray::average() const { +- uint contributing_threads = 0; +- for (uint i = 0; i < _length; ++i) { +- if (get(i) != uninitialized()) { +- contributing_threads++; +- } +- } +- if (contributing_threads == 0) { +- return 0.0; +- } +- return sum() / (double) contributing_threads; +-} +- +-template +-T ShenandoahWorkerDataArray::sum() const { +- T s = 0; +- for (uint i = 0; i < _length; ++i) { +- if (get(i) != uninitialized()) { +- s += get(i); +- } +- } +- return s; +-} +- +-template +-void ShenandoahWorkerDataArray::set_all(T value) { +- for (uint i = 0; i < _length; i++) { +- _data[i] = value; +- } +-} +- +-template +-void ShenandoahWorkerDataArray::print_summary_on(outputStream* out, bool print_sum) const { +- out->print("%-25s", title()); +- uint start = 0; +- while (start < _length && get(start) == uninitialized()) { +- start++; +- } +- if (start < _length) { +- T min = get(start); +- T max = min; +- T sum = 0; +- uint contributing_threads = 0; +- for (uint i = start; i < _length; ++i) { +- T value = get(i); +- if (value != uninitialized()) { +- max = MAX2(max, value); +- min = MIN2(min, value); +- sum += value; +- contributing_threads++; +- } +- } +- T diff = max - min; +- assert(contributing_threads != 0, "Must be since we found a used value for the start index"); +- double avg = sum / (double) contributing_threads; +- WDAPrinter::summary(out, min, avg, max, diff, sum, print_sum); +- out->print_cr(", Workers: %d", contributing_threads); +- } else { +- // No data for this phase. +- out->print_cr(" skipped"); +- } +-} +- +-template +-void ShenandoahWorkerDataArray::print_details_on(outputStream* out) const { +- WDAPrinter::details(this, out); +-} +- +-template +-void ShenandoahWorkerDataArray::reset() { +- set_all(uninitialized()); +- if (_thread_work_items != NULL) { +- _thread_work_items->reset(); +- } +-} +- +-#endif // SHARE_VM_GC_G1_WORKERDATAARRAY_INLINE_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahWorkerPolicy.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahWorkerPolicy.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahWorkerPolicy.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahWorkerPolicy.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,135 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +- +-#include "gc_implementation/shared/adaptiveSizePolicy.hpp" +-#include "gc_implementation/shenandoah/shenandoahWorkerPolicy.hpp" +-#include "runtime/thread.hpp" +- +-uint ShenandoahWorkerPolicy::_prev_par_marking = 0; +-uint ShenandoahWorkerPolicy::_prev_conc_marking = 0; +-uint ShenandoahWorkerPolicy::_prev_conc_evac = 0; +-uint ShenandoahWorkerPolicy::_prev_fullgc = 0; +-uint ShenandoahWorkerPolicy::_prev_degengc = 0; +-uint ShenandoahWorkerPolicy::_prev_conc_update_ref = 0; +-uint ShenandoahWorkerPolicy::_prev_par_update_ref = 0; +-uint ShenandoahWorkerPolicy::_prev_conc_cleanup = 0; +-uint ShenandoahWorkerPolicy::_prev_conc_reset = 0; +- +-uint ShenandoahWorkerPolicy::calc_workers_for_init_marking() { +- uint active_workers = (_prev_par_marking == 0) ? ParallelGCThreads : _prev_par_marking; +- +- _prev_par_marking = +- AdaptiveSizePolicy::calc_active_workers(ParallelGCThreads, +- active_workers, +- Threads::number_of_non_daemon_threads()); +- return _prev_par_marking; +-} +- +-uint ShenandoahWorkerPolicy::calc_workers_for_conc_marking() { +- uint active_workers = (_prev_conc_marking == 0) ? ConcGCThreads : _prev_conc_marking; +- _prev_conc_marking = +- AdaptiveSizePolicy::calc_active_conc_workers(ConcGCThreads, +- active_workers, +- Threads::number_of_non_daemon_threads()); +- return _prev_conc_marking; +-} +- +-// Reuse the calculation result from init marking +-uint ShenandoahWorkerPolicy::calc_workers_for_final_marking() { +- return _prev_par_marking; +-} +- +-// Calculate workers for concurrent evacuation (concurrent GC) +-uint ShenandoahWorkerPolicy::calc_workers_for_conc_evac() { +- uint active_workers = (_prev_conc_evac == 0) ? ConcGCThreads : _prev_conc_evac; +- _prev_conc_evac = +- AdaptiveSizePolicy::calc_active_conc_workers(ConcGCThreads, +- active_workers, +- Threads::number_of_non_daemon_threads()); +- return _prev_conc_evac; +-} +- +-// Calculate workers for parallel fullgc +-uint ShenandoahWorkerPolicy::calc_workers_for_fullgc() { +- uint active_workers = (_prev_fullgc == 0) ? ParallelGCThreads : _prev_fullgc; +- _prev_fullgc = +- AdaptiveSizePolicy::calc_active_workers(ParallelGCThreads, +- active_workers, +- Threads::number_of_non_daemon_threads()); +- return _prev_fullgc; +-} +- +-// Calculate workers for concurrent reference update +-uint ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref() { +- uint active_workers = (_prev_conc_update_ref == 0) ? ConcGCThreads : _prev_conc_update_ref; +- _prev_conc_update_ref = +- AdaptiveSizePolicy::calc_active_conc_workers(ConcGCThreads, +- active_workers, +- Threads::number_of_non_daemon_threads()); +- return _prev_conc_update_ref; +-} +- +-// Calculate workers for parallel reference update +-uint ShenandoahWorkerPolicy::calc_workers_for_final_update_ref() { +- uint active_workers = (_prev_par_update_ref == 0) ? ParallelGCThreads : _prev_par_update_ref; +- _prev_par_update_ref = +- AdaptiveSizePolicy::calc_active_workers(ParallelGCThreads, +- active_workers, +- Threads::number_of_non_daemon_threads()); +- return _prev_par_update_ref; +-} +- +-// Calculate workers for parallel degenerated gc +-uint ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated() { +- uint active_workers = (_prev_degengc == 0) ? ParallelGCThreads : _prev_degengc; +- _prev_degengc = +- AdaptiveSizePolicy::calc_active_workers(ParallelGCThreads, +- active_workers, +- Threads::number_of_non_daemon_threads()); +- return _prev_degengc; +-} +- +-uint ShenandoahWorkerPolicy::calc_workers_for_conc_preclean() { +- // Precleaning is single-threaded +- return 1; +-} +- +-uint ShenandoahWorkerPolicy::calc_workers_for_conc_cleanup() { +- uint active_workers = (_prev_conc_cleanup == 0) ? ConcGCThreads : _prev_conc_cleanup; +- _prev_conc_cleanup = +- AdaptiveSizePolicy::calc_active_conc_workers(ConcGCThreads, +- active_workers, +- Threads::number_of_non_daemon_threads()); +- return _prev_conc_cleanup; +-} +- +-uint ShenandoahWorkerPolicy::calc_workers_for_conc_reset() { +- uint active_workers = (_prev_conc_reset == 0) ? ConcGCThreads : _prev_conc_reset; +- _prev_conc_reset = +- AdaptiveSizePolicy::calc_active_conc_workers(ConcGCThreads, +- active_workers, +- Threads::number_of_non_daemon_threads()); +- return _prev_conc_reset; +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahWorkerPolicy.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahWorkerPolicy.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahWorkerPolicy.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahWorkerPolicy.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,76 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHWORKERPOLICY_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAHWORKERPOLICY_HPP +- +-#include "memory/allocation.hpp" +- +-class ShenandoahWorkerPolicy : AllStatic { +-private: +- static uint _prev_par_marking; +- static uint _prev_conc_marking; +- static uint _prev_conc_evac; +- static uint _prev_fullgc; +- static uint _prev_degengc; +- static uint _prev_conc_update_ref; +- static uint _prev_par_update_ref; +- static uint _prev_conc_cleanup; +- static uint _prev_conc_reset; +- +-public: +- // Calculate the number of workers for initial marking +- static uint calc_workers_for_init_marking(); +- +- // Calculate the number of workers for concurrent marking +- static uint calc_workers_for_conc_marking(); +- +- // Calculate the number of workers for final marking +- static uint calc_workers_for_final_marking(); +- +- // Calculate workers for concurrent evacuation (concurrent GC) +- static uint calc_workers_for_conc_evac(); +- +- // Calculate workers for parallel full gc +- static uint calc_workers_for_fullgc(); +- +- // Calculate workers for parallel degenerated gc +- static uint calc_workers_for_stw_degenerated(); +- +- // Calculate workers for concurrent reference update +- static uint calc_workers_for_conc_update_ref(); +- +- // Calculate workers for parallel/final reference update +- static uint calc_workers_for_final_update_ref(); +- +- // Calculate workers for concurrent precleaning +- static uint calc_workers_for_conc_preclean(); +- +- // Calculate workers for concurrent cleanup +- static uint calc_workers_for_conc_cleanup(); +- +- // Calculate workers for concurrent reset +- static uint calc_workers_for_conc_reset(); +-}; +- +-#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHWORKERPOLICY_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahWorkGroup.cpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahWorkGroup.cpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahWorkGroup.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahWorkGroup.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,65 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +- +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahWorkGroup.hpp" +-#include "gc_implementation/shenandoah/shenandoahLogging.hpp" +- +-ShenandoahWorkerScope::ShenandoahWorkerScope(ShenandoahWorkGang* workers, uint nworkers, const char* msg, bool check) : +- _n_workers(nworkers), +- _workers(workers) { +- assert(msg != NULL, "Missing message"); +- log_info(gc, task)("Using %u of %u workers for %s", +- nworkers, ShenandoahHeap::heap()->max_workers(), msg); +- +- if (check) { +- ShenandoahHeap::heap()->assert_gc_workers(nworkers); +- } +- _workers->set_active_workers(nworkers); +-} +- +-ShenandoahWorkerScope::~ShenandoahWorkerScope() { +- assert(_workers->active_workers() == _n_workers, +- "Active workers can not be changed within this scope"); +-} +- +-ShenandoahPushWorkerScope::ShenandoahPushWorkerScope(ShenandoahWorkGang* workers, uint nworkers, bool check) : +- _n_workers(nworkers), +- _old_workers(workers->active_workers()), +- _workers(workers) { +- _workers->set_active_workers(nworkers); +- +- // bypass concurrent/parallel protocol check for non-regular paths, e.g. verifier, etc. +- if (check) { +- ShenandoahHeap::heap()->assert_gc_workers(nworkers); +- } +-} +- +-ShenandoahPushWorkerScope::~ShenandoahPushWorkerScope() { +- assert(_workers->active_workers() == _n_workers, +- "Active workers can not be changed within this scope"); +- // Restore old worker value +- _workers->set_active_workers(_old_workers); +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahWorkGroup.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahWorkGroup.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahWorkGroup.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/shenandoahWorkGroup.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,72 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAH_WORKGROUP_HPP +-#define SHARE_VM_GC_SHENANDOAH_SHENANDOAH_WORKGROUP_HPP +- +-#include "gc_implementation/shenandoah/shenandoahTaskqueue.hpp" +-#include "utilities/workgroup.hpp" +-#include "memory/allocation.hpp" +- +-class ShenandoahWorkGang; +- +-class ShenandoahWorkerScope : public StackObj { +-private: +- uint _n_workers; +- ShenandoahWorkGang* _workers; +-public: +- ShenandoahWorkerScope(ShenandoahWorkGang* workers, uint nworkers, const char* msg, bool do_check = true); +- ~ShenandoahWorkerScope(); +-}; +- +-class ShenandoahPushWorkerScope : StackObj { +-private: +- uint _n_workers; +- uint _old_workers; +- ShenandoahWorkGang* _workers; +- +-public: +- ShenandoahPushWorkerScope(ShenandoahWorkGang* workers, uint nworkers, bool do_check = true); +- ~ShenandoahPushWorkerScope(); +-}; +- +-class ShenandoahWorkGang : public FlexibleWorkGang { +-public: +- ShenandoahWorkGang(const char* name, uint workers, +- bool are_GC_task_threads, +- bool are_ConcurrentGC_threads) : +- FlexibleWorkGang(name, workers, are_GC_task_threads, are_ConcurrentGC_threads) { +- } +- +- // Hide FlexibleWorkGang's implementation, avoid _active_workers == _total_workers +- // check +- void set_active_workers(uint v) { +- assert(v <= _total_workers, +- "Trying to set more workers active than there are"); +- _active_workers = MIN2(v, _total_workers); +- assert(v != 0, "Trying to set active workers to 0"); +- _active_workers = MAX2(1U, _active_workers); +- } +-}; +- +-#endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHWORKGROUP_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/vmStructs_shenandoah.hpp afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/vmStructs_shenandoah.hpp +--- openjdk/hotspot/src/share/vm/gc_implementation/shenandoah/vmStructs_shenandoah.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_implementation/shenandoah/vmStructs_shenandoah.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,51 +0,0 @@ +-/* +- * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +-#ifndef SHARE_VM_GC_SHENANDOAH_VMSTRUCTS_SHENANDOAH_HPP +-#define SHARE_VM_GC_SHENANDOAH_VMSTRUCTS_SHENANDOAH_HPP +- +-#include "gc_implementation/shenandoah/shenandoahHeap.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegion.hpp" +-#include "gc_implementation/shenandoah/shenandoahMonitoringSupport.hpp" +- +-#define VM_STRUCTS_SHENANDOAH(nonstatic_field, volatile_nonstatic_field, static_field) \ +- static_field(ShenandoahHeapRegion, RegionSizeBytes, size_t) \ +- nonstatic_field(ShenandoahHeap, _num_regions, size_t) \ +- nonstatic_field(ShenandoahHeap, _regions, ShenandoahHeapRegion**) \ +- volatile_nonstatic_field(ShenandoahHeap, _used, jlong) \ +- volatile_nonstatic_field(ShenandoahHeap, _committed, size_t) \ +- nonstatic_field(ShenandoahHeapRegion, _index, size_t const) \ +- nonstatic_field(ShenandoahHeapRegion, _bottom, HeapWord* const) \ +- nonstatic_field(ShenandoahHeapRegion, _top, HeapWord*) \ +- nonstatic_field(ShenandoahHeapRegion, _end, HeapWord* const) +- +-#define VM_INT_CONSTANTS_SHENANDOAH(declare_constant, declare_constant_with_value) +- +-#define VM_TYPES_SHENANDOAH(declare_type, \ +- declare_toplevel_type, \ +- declare_integer_type) \ +- declare_type(ShenandoahHeap, CollectedHeap) \ +- declare_toplevel_type(ShenandoahHeapRegion) \ +- declare_toplevel_type(ShenandoahHeap*) \ +- declare_toplevel_type(ShenandoahHeapRegion*) \ +- +-#endif // SHARE_VM_GC_SHENANDOAH_VMSTRUCTS_SHENANDOAH_HPP +diff -uNr openjdk/hotspot/src/share/vm/gc_interface/collectedHeap.cpp afu8u/hotspot/src/share/vm/gc_interface/collectedHeap.cpp +--- openjdk/hotspot/src/share/vm/gc_interface/collectedHeap.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_interface/collectedHeap.cpp 2025-05-06 10:53:45.059633671 +0800 +@@ -606,24 +606,3 @@ + err_msg("after_heap: " PTR_FORMAT " is unexpectedly in the heap", p2i(after_heap))); + } + #endif +- +-void CollectedHeap::shutdown() { +- // Default implementation does nothing. +-} +- +-void CollectedHeap::accumulate_statistics_all_gclabs() { +- // Default implementation does nothing. +-} +- +-bool CollectedHeap::supports_object_pinning() const { +- return false; +-} +- +-oop CollectedHeap::pin_object(JavaThread* thread, oop obj) { +- ShouldNotReachHere(); +- return NULL; +-} +- +-void CollectedHeap::unpin_object(JavaThread* thread, oop obj) { +- ShouldNotReachHere(); +-} +diff -uNr openjdk/hotspot/src/share/vm/gc_interface/collectedHeap.hpp afu8u/hotspot/src/share/vm/gc_interface/collectedHeap.hpp +--- openjdk/hotspot/src/share/vm/gc_interface/collectedHeap.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_interface/collectedHeap.hpp 2025-05-06 10:53:45.059633671 +0800 +@@ -80,7 +80,6 @@ + // GenCollectedHeap + // G1CollectedHeap + // ParallelScavengeHeap +-// ShenandoahHeap + // + class CollectedHeap : public CHeapObj { + friend class VMStructs; +@@ -189,8 +188,7 @@ + SharedHeap, + GenCollectedHeap, + ParallelScavengeHeap, +- G1CollectedHeap, +- ShenandoahHeap ++ G1CollectedHeap + }; + + static inline size_t filler_array_max_size() { +@@ -608,19 +606,6 @@ + // Heap verification + virtual void verify(bool silent, VerifyOption option) = 0; + +- // Shut down all GC workers and other GC related threads. +- virtual void shutdown(); +- +- // Accumulate additional statistics from GCLABs. +- virtual void accumulate_statistics_all_gclabs(); +- +- // Support for object pinning. This is used by JNI Get*Critical() +- // and Release*Critical() family of functions. If supported, the GC +- // must guarantee that pinned objects never move. +- virtual bool supports_object_pinning() const; +- virtual oop pin_object(JavaThread* thread, oop obj); +- virtual void unpin_object(JavaThread* thread, oop obj); +- + // Non product verification and debugging. + #ifndef PRODUCT + // Support for PromotionFailureALot. Return true if it's time to cause a +diff -uNr openjdk/hotspot/src/share/vm/gc_interface/gcCause.cpp afu8u/hotspot/src/share/vm/gc_interface/gcCause.cpp +--- openjdk/hotspot/src/share/vm/gc_interface/gcCause.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_interface/gcCause.cpp 2025-05-06 10:53:45.059633671 +0800 +@@ -100,18 +100,6 @@ + case _g1_humongous_allocation: + return "G1 Humongous Allocation"; + +- case _shenandoah_allocation_failure_evac: +- return "Allocation Failure During Evacuation"; +- +- case _shenandoah_stop_vm: +- return "Stopping VM"; +- +- case _shenandoah_concurrent_gc: +- return "Concurrent GC"; +- +- case _shenandoah_upgrade_to_full_gc: +- return "Upgrade To Full GC"; +- + case _last_ditch_collection: + return "Last ditch collection"; + +diff -uNr openjdk/hotspot/src/share/vm/gc_interface/gcCause.hpp afu8u/hotspot/src/share/vm/gc_interface/gcCause.hpp +--- openjdk/hotspot/src/share/vm/gc_interface/gcCause.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_interface/gcCause.hpp 2025-05-06 10:53:45.059633671 +0800 +@@ -73,12 +73,6 @@ + _g1_inc_collection_pause, + _g1_humongous_allocation, + +- _shenandoah_stop_vm, +- _shenandoah_metadata_gc_clear_softrefs, +- _shenandoah_allocation_failure_evac, +- _shenandoah_concurrent_gc, +- _shenandoah_upgrade_to_full_gc, +- + _last_ditch_collection, + _last_gc_cause + }; +diff -uNr openjdk/hotspot/src/share/vm/gc_interface/gcName.hpp afu8u/hotspot/src/share/vm/gc_interface/gcName.hpp +--- openjdk/hotspot/src/share/vm/gc_interface/gcName.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/gc_interface/gcName.hpp 2025-05-06 10:53:45.059633671 +0800 +@@ -37,7 +37,6 @@ + G1New, + ConcurrentMarkSweep, + G1Old, +- Shenandoah, + GCNameEndSentinel + }; + +@@ -54,7 +53,6 @@ + case G1New: return "G1New"; + case ConcurrentMarkSweep: return "ConcurrentMarkSweep"; + case G1Old: return "G1Old"; +- case Shenandoah: return "Shenandoah"; + default: ShouldNotReachHere(); return NULL; + } + } +diff -uNr openjdk/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp afu8u/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp +--- openjdk/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp 2025-05-06 10:53:45.059633671 +0800 +@@ -42,6 +42,8 @@ + # include "interp_masm_zero.hpp" + #elif defined TARGET_ARCH_MODEL_ppc_64 + # include "interp_masm_ppc_64.hpp" ++#elif defined TARGET_ARCH_MODEL_sw64 ++# include "interp_masm_sw64.hpp" + #endif + + // This file contains the platform-independent parts +diff -uNr openjdk/hotspot/src/share/vm/interpreter/bytecode.hpp afu8u/hotspot/src/share/vm/interpreter/bytecode.hpp +--- openjdk/hotspot/src/share/vm/interpreter/bytecode.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/interpreter/bytecode.hpp 2025-05-06 10:53:45.059633671 +0800 +@@ -31,6 +31,9 @@ + #ifdef TARGET_ARCH_x86 + # include "bytes_x86.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "bytes_sw64.hpp" ++#endif + #ifdef TARGET_ARCH_aarch64 + # include "bytes_aarch64.hpp" + #endif +diff -uNr openjdk/hotspot/src/share/vm/interpreter/bytecodeInterpreter.hpp afu8u/hotspot/src/share/vm/interpreter/bytecodeInterpreter.hpp +--- openjdk/hotspot/src/share/vm/interpreter/bytecodeInterpreter.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/interpreter/bytecodeInterpreter.hpp 2025-05-06 10:53:45.063633671 +0800 +@@ -35,6 +35,9 @@ + #ifdef TARGET_ARCH_x86 + # include "bytes_x86.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "bytes_sw64.hpp" ++#endif + #ifdef TARGET_ARCH_aarch64 + # include "bytes_aarch64.hpp" + #endif +@@ -595,6 +598,9 @@ + #ifdef TARGET_ARCH_aarch64 + # include "bytecodeInterpreter_aarch64.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "bytecodeInterpreter_sw64.hpp" ++#endif + #ifdef TARGET_ARCH_sparc + # include "bytecodeInterpreter_sparc.hpp" + #endif +diff -uNr openjdk/hotspot/src/share/vm/interpreter/bytecodeInterpreter.inline.hpp afu8u/hotspot/src/share/vm/interpreter/bytecodeInterpreter.inline.hpp +--- openjdk/hotspot/src/share/vm/interpreter/bytecodeInterpreter.inline.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/interpreter/bytecodeInterpreter.inline.hpp 2025-05-06 10:53:45.063633671 +0800 +@@ -46,6 +46,9 @@ + #ifdef TARGET_ARCH_x86 + # include "bytecodeInterpreter_x86.inline.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "bytecodeInterpreter_sw64.inline.hpp" ++#endif + #ifdef TARGET_ARCH_aarch64 + # include "bytecodeInterpreter_aarch64.inline.hpp" + #endif +diff -uNr openjdk/hotspot/src/share/vm/interpreter/bytecodes.cpp afu8u/hotspot/src/share/vm/interpreter/bytecodes.cpp +--- openjdk/hotspot/src/share/vm/interpreter/bytecodes.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/interpreter/bytecodes.cpp 2025-05-06 10:53:45.063633671 +0800 +@@ -29,6 +29,9 @@ + #ifdef TARGET_ARCH_x86 + # include "bytes_x86.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "bytes_sw64.hpp" ++#endif + #ifdef TARGET_ARCH_aarch64 + # include "bytes_aarch64.hpp" + #endif +diff -uNr openjdk/hotspot/src/share/vm/interpreter/bytecodes.hpp afu8u/hotspot/src/share/vm/interpreter/bytecodes.hpp +--- openjdk/hotspot/src/share/vm/interpreter/bytecodes.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/interpreter/bytecodes.hpp 2025-05-06 10:53:45.063633671 +0800 +@@ -292,6 +292,9 @@ + #ifdef TARGET_ARCH_x86 + # include "bytecodes_x86.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "bytecodes_sw64.hpp" ++#endif + #ifdef TARGET_ARCH_aarch64 + # include "bytecodes_aarch64.hpp" + #endif +diff -uNr openjdk/hotspot/src/share/vm/interpreter/bytecodeStream.hpp afu8u/hotspot/src/share/vm/interpreter/bytecodeStream.hpp +--- openjdk/hotspot/src/share/vm/interpreter/bytecodeStream.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/interpreter/bytecodeStream.hpp 2025-05-06 10:53:45.063633671 +0800 +@@ -32,6 +32,9 @@ + #ifdef TARGET_ARCH_x86 + # include "bytes_x86.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "bytes_sw64.hpp" ++#endif + #ifdef TARGET_ARCH_aarch64 + # include "bytes_aarch64.hpp" + #endif +diff -uNr openjdk/hotspot/src/share/vm/interpreter/cppInterpreterGenerator.hpp afu8u/hotspot/src/share/vm/interpreter/cppInterpreterGenerator.hpp +--- openjdk/hotspot/src/share/vm/interpreter/cppInterpreterGenerator.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/interpreter/cppInterpreterGenerator.hpp 2025-05-06 10:53:45.063633671 +0800 +@@ -50,6 +50,9 @@ + #ifdef TARGET_ARCH_x86 + # include "cppInterpreterGenerator_x86.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "cppInterpreterGenerator_sw64.hpp" ++#endif + #ifdef TARGET_ARCH_aarch64 + # include "cppInterpreterGenerator_aarch64.hpp" + #endif +diff -uNr openjdk/hotspot/src/share/vm/interpreter/cppInterpreter.hpp afu8u/hotspot/src/share/vm/interpreter/cppInterpreter.hpp +--- openjdk/hotspot/src/share/vm/interpreter/cppInterpreter.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/interpreter/cppInterpreter.hpp 2025-05-06 10:53:45.063633671 +0800 +@@ -84,6 +84,9 @@ + #ifdef TARGET_ARCH_x86 + # include "cppInterpreter_x86.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "cppInterpreter_sw64.hpp" ++#endif + #ifdef TARGET_ARCH_aarch64 + # include "cppInterpreter_aarch64.hpp" + #endif +diff -uNr openjdk/hotspot/src/share/vm/interpreter/interpreterGenerator.hpp afu8u/hotspot/src/share/vm/interpreter/interpreterGenerator.hpp +--- openjdk/hotspot/src/share/vm/interpreter/interpreterGenerator.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/interpreter/interpreterGenerator.hpp 2025-05-06 10:53:45.063633671 +0800 +@@ -44,6 +44,9 @@ + #ifdef TARGET_ARCH_x86 + # include "interpreterGenerator_x86.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "interpreterGenerator_sw64.hpp" ++#endif + #ifdef TARGET_ARCH_aarch64 + # include "interpreterGenerator_aarch64.hpp" + #endif +diff -uNr openjdk/hotspot/src/share/vm/interpreter/interpreter.hpp afu8u/hotspot/src/share/vm/interpreter/interpreter.hpp +--- openjdk/hotspot/src/share/vm/interpreter/interpreter.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/interpreter/interpreter.hpp 2025-05-06 10:53:45.063633671 +0800 +@@ -148,6 +148,9 @@ + #ifdef TARGET_ARCH_x86 + # include "interpreter_x86.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "interpreter_sw64.hpp" ++#endif + #ifdef TARGET_ARCH_aarch64 + # include "interpreter_aarch64.hpp" + #endif +diff -uNr openjdk/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp afu8u/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp +--- openjdk/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp 2025-05-06 10:53:45.063633671 +0800 +@@ -59,6 +59,9 @@ + #ifdef TARGET_ARCH_x86 + # include "vm_version_x86.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "vm_version_sw64.hpp" ++#endif + #ifdef TARGET_ARCH_aarch64 + # include "vm_version_aarch64.hpp" + #endif +@@ -1290,7 +1293,7 @@ + // preparing the same method will be sure to see non-null entry & mirror. + IRT_END + +-#if defined(IA32) || defined(AMD64) || defined(ARM) || defined(AARCH64) ++#if defined(IA32) || defined(AMD64) || defined(ARM) || defined(AARCH64) || defined(SW64) + IRT_LEAF(void, InterpreterRuntime::popframe_move_outgoing_args(JavaThread* thread, void* src_address, void* dest_address)) + if (src_address == dest_address) { + return; +diff -uNr openjdk/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp afu8u/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp +--- openjdk/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp 2025-05-06 10:53:45.063633671 +0800 +@@ -156,7 +156,7 @@ + Method* method, + intptr_t* from, intptr_t* to); + +-#if defined(IA32) || defined(AMD64) || defined(ARM) || defined(AARCH64) ++#if defined(IA32) || defined(AMD64) || defined(ARM) || defined(AARCH64) || defined(SW64) + // Popframe support (only needed on x86, AMD64 and ARM) + static void popframe_move_outgoing_args(JavaThread* thread, void* src_address, void* dest_address); + #endif +@@ -168,6 +168,9 @@ + #ifdef TARGET_ARCH_aarch64 + # include "interpreterRT_aarch64.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "interpreterRT_sw64.hpp" ++#endif + #ifdef TARGET_ARCH_sparc + # include "interpreterRT_sparc.hpp" + #endif +diff -uNr openjdk/hotspot/src/share/vm/interpreter/templateInterpreterGenerator.hpp afu8u/hotspot/src/share/vm/interpreter/templateInterpreterGenerator.hpp +--- openjdk/hotspot/src/share/vm/interpreter/templateInterpreterGenerator.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/interpreter/templateInterpreterGenerator.hpp 2025-05-06 10:53:45.063633671 +0800 +@@ -89,6 +89,9 @@ + #ifdef TARGET_ARCH_x86 + # include "templateInterpreterGenerator_x86.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "templateInterpreterGenerator_sw64.hpp" ++#endif + #ifdef TARGET_ARCH_aarch64 + # include "templateInterpreterGenerator_aarch64.hpp" + #endif +diff -uNr openjdk/hotspot/src/share/vm/interpreter/templateInterpreter.hpp afu8u/hotspot/src/share/vm/interpreter/templateInterpreter.hpp +--- openjdk/hotspot/src/share/vm/interpreter/templateInterpreter.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/interpreter/templateInterpreter.hpp 2025-05-06 10:53:45.063633671 +0800 +@@ -190,6 +190,9 @@ + #ifdef TARGET_ARCH_x86 + # include "templateInterpreter_x86.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "templateInterpreter_sw64.hpp" ++#endif + #ifdef TARGET_ARCH_aarch64 + # include "templateInterpreter_aarch64.hpp" + #endif +diff -uNr openjdk/hotspot/src/share/vm/interpreter/templateTable.hpp afu8u/hotspot/src/share/vm/interpreter/templateTable.hpp +--- openjdk/hotspot/src/share/vm/interpreter/templateTable.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/interpreter/templateTable.hpp 2025-05-06 10:53:45.063633671 +0800 +@@ -40,6 +40,8 @@ + # include "interp_masm_zero.hpp" + #elif defined TARGET_ARCH_MODEL_ppc_64 + # include "interp_masm_ppc_64.hpp" ++#elif defined TARGET_ARCH_MODEL_sw64 ++# include "interp_masm_sw64.hpp" + #endif + + #ifndef CC_INTERP +@@ -367,6 +369,8 @@ + # include "templateTable_zero.hpp" + #elif defined TARGET_ARCH_MODEL_ppc_64 + # include "templateTable_ppc_64.hpp" ++#elif defined TARGET_ARCH_MODEL_sw64 ++# include "templateTable_sw64.hpp" + #endif + + }; +diff -uNr openjdk/hotspot/src/share/vm/jfr/metadata/metadata.xml afu8u/hotspot/src/share/vm/jfr/metadata/metadata.xml +--- openjdk/hotspot/src/share/vm/jfr/metadata/metadata.xml 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/jfr/metadata/metadata.xml 2025-05-06 11:13:08.123672950 +0800 +@@ -907,27 +907,6 @@ + + + +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +- + + + +diff -uNr openjdk/hotspot/src/share/vm/jfr/periodic/jfrPeriodic.cpp afu8u/hotspot/src/share/vm/jfr/periodic/jfrPeriodic.cpp +--- openjdk/hotspot/src/share/vm/jfr/periodic/jfrPeriodic.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/jfr/periodic/jfrPeriodic.cpp 2025-05-06 10:53:45.071633672 +0800 +@@ -58,10 +58,6 @@ + #include "utilities/exceptions.hpp" + #include "utilities/globalDefinitions.hpp" + +-#if INCLUDE_ALL_GCS +-#include "gc_implementation/shenandoah/shenandoahJfrSupport.hpp" +-#endif +- + /** + * JfrPeriodic class + * Implementation of declarations in +@@ -562,14 +558,3 @@ + event.set_flushingEnabled(UseCodeCacheFlushing); + event.commit(); + } +- +- +-TRACE_REQUEST_FUNC(ShenandoahHeapRegionInformation) { +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- VM_ShenandoahSendHeapRegionInfoEvents op; +- VMThread::execute(&op); +- } +-#endif +-} +- +diff -uNr openjdk/hotspot/src/share/vm/jfr/writers/jfrEncoders.hpp afu8u/hotspot/src/share/vm/jfr/writers/jfrEncoders.hpp +--- openjdk/hotspot/src/share/vm/jfr/writers/jfrEncoders.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/jfr/writers/jfrEncoders.hpp 2025-05-06 10:53:45.075633672 +0800 +@@ -46,6 +46,9 @@ + #ifdef TARGET_ARCH_aarch64 + # include "bytes_aarch64.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "bytes_sw64.hpp" ++#endif + + // + // The Encoding policy prescribes a template +diff -uNr openjdk/hotspot/src/share/vm/memory/barrierSet.cpp afu8u/hotspot/src/share/vm/memory/barrierSet.cpp +--- openjdk/hotspot/src/share/vm/memory/barrierSet.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/memory/barrierSet.cpp 2025-05-06 10:53:45.079633672 +0800 +@@ -42,33 +42,6 @@ + } + + // count is number of array elements being written +-void BarrierSet::write_ref_array(HeapWord* start, size_t count) { +- assert(count <= (size_t)max_intx, "count too large"); +- HeapWord* end = (HeapWord*)((char*)start + (count*heapOopSize)); +- // In the case of compressed oops, start and end may potentially be misaligned; +- // so we need to conservatively align the first downward (this is not +- // strictly necessary for current uses, but a case of good hygiene and, +- // if you will, aesthetics) and the second upward (this is essential for +- // current uses) to a HeapWord boundary, so we mark all cards overlapping +- // this write. If this evolves in the future to calling a +- // logging barrier of narrow oop granularity, like the pre-barrier for G1 +- // (mentioned here merely by way of example), we will need to change this +- // interface, so it is "exactly precise" (if i may be allowed the adverbial +- // redundancy for emphasis) and does not include narrow oop slots not +- // included in the original write interval. +- HeapWord* aligned_start = (HeapWord*)align_size_down((uintptr_t)start, HeapWordSize); +- HeapWord* aligned_end = (HeapWord*)align_size_up ((uintptr_t)end, HeapWordSize); +- // If compressed oops were not being used, these should already be aligned +- assert(UseCompressedOops || (aligned_start == start && aligned_end == end), +- "Expected heap word alignment of start and end"); +-#if 0 +- warning("Post:\t" INTPTR_FORMAT "[" SIZE_FORMAT "] : [" INTPTR_FORMAT "," INTPTR_FORMAT ")\t", +- start, count, aligned_start, aligned_end); +-#endif +- write_ref_array_work(MemRegion(aligned_start, aligned_end)); +-} +- +-// count is number of array elements being written + void BarrierSet::static_write_ref_array_post(HeapWord* start, size_t count) { + // simply delegate to instance method + Universe::heap()->barrier_set()->write_ref_array(start, count); +diff -uNr openjdk/hotspot/src/share/vm/memory/barrierSet.hpp afu8u/hotspot/src/share/vm/memory/barrierSet.hpp +--- openjdk/hotspot/src/share/vm/memory/barrierSet.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/memory/barrierSet.hpp 2025-05-06 10:53:45.079633672 +0800 +@@ -40,7 +40,6 @@ + CardTableExtension, + G1SATBCT, + G1SATBCTLogging, +- ShenandoahBarrierSet, + Other, + Uninit + }; +@@ -138,7 +137,7 @@ + bool dest_uninitialized = false) {} + // Below count is the # array elements being written, starting + // at the address "start", which may not necessarily be HeapWord-aligned +- virtual void write_ref_array(HeapWord* start, size_t count); ++ inline void write_ref_array(HeapWord* start, size_t count); + + // Static versions, suitable for calling from generated code; + // count is # array elements being written, starting with "start", +diff -uNr openjdk/hotspot/src/share/vm/memory/barrierSet.inline.hpp afu8u/hotspot/src/share/vm/memory/barrierSet.inline.hpp +--- openjdk/hotspot/src/share/vm/memory/barrierSet.inline.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/memory/barrierSet.inline.hpp 2025-05-06 10:53:45.079633672 +0800 +@@ -48,6 +48,34 @@ + } + } + ++// count is number of array elements being written ++void BarrierSet::write_ref_array(HeapWord* start, size_t count) { ++ assert(count <= (size_t)max_intx, "count too large"); ++ HeapWord* end = (HeapWord*)((char*)start + (count*heapOopSize)); ++ // In the case of compressed oops, start and end may potentially be misaligned; ++ // so we need to conservatively align the first downward (this is not ++ // strictly necessary for current uses, but a case of good hygiene and, ++ // if you will, aesthetics) and the second upward (this is essential for ++ // current uses) to a HeapWord boundary, so we mark all cards overlapping ++ // this write. If this evolves in the future to calling a ++ // logging barrier of narrow oop granularity, like the pre-barrier for G1 ++ // (mentioned here merely by way of example), we will need to change this ++ // interface, so it is "exactly precise" (if i may be allowed the adverbial ++ // redundancy for emphasis) and does not include narrow oop slots not ++ // included in the original write interval. ++ HeapWord* aligned_start = (HeapWord*)align_size_down((uintptr_t)start, HeapWordSize); ++ HeapWord* aligned_end = (HeapWord*)align_size_up ((uintptr_t)end, HeapWordSize); ++ // If compressed oops were not being used, these should already be aligned ++ assert(UseCompressedOops || (aligned_start == start && aligned_end == end), ++ "Expected heap word alignment of start and end"); ++#if 0 ++ warning("Post:\t" INTPTR_FORMAT "[" SIZE_FORMAT "] : [" INTPTR_FORMAT "," INTPTR_FORMAT ")\t", ++ start, count, aligned_start, aligned_end); ++#endif ++ write_ref_array_work(MemRegion(aligned_start, aligned_end)); ++} ++ ++ + void BarrierSet::write_region(MemRegion mr) { + if (kind() == CardTableModRef) { + ((CardTableModRefBS*)this)->inline_write_region(mr); +diff -uNr openjdk/hotspot/src/share/vm/memory/binaryTreeDictionary.hpp afu8u/hotspot/src/share/vm/memory/binaryTreeDictionary.hpp +--- openjdk/hotspot/src/share/vm/memory/binaryTreeDictionary.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/memory/binaryTreeDictionary.hpp 2025-05-06 10:53:45.079633672 +0800 +@@ -27,7 +27,6 @@ + + #include "memory/freeBlockDictionary.hpp" + #include "memory/freeList.hpp" +-#include "memory/memRegion.hpp" + + /* + * A binary tree based search structure for free blocks. +diff -uNr openjdk/hotspot/src/share/vm/memory/metaspace.hpp afu8u/hotspot/src/share/vm/memory/metaspace.hpp +--- openjdk/hotspot/src/share/vm/memory/metaspace.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/memory/metaspace.hpp 2025-05-06 10:53:45.083633672 +0800 +@@ -65,7 +65,6 @@ + class MetaWord; + class Mutex; + class outputStream; +-class ShenandoahCollectorPolicy; + class SpaceManager; + class VirtualSpaceList; + +@@ -88,7 +87,6 @@ + friend class VM_CollectForMetadataAllocation; + friend class MetaspaceGC; + friend class MetaspaceAux; +- friend class ShenandoahCollectorPolicy; + + public: + enum MetadataType { +diff -uNr openjdk/hotspot/src/share/vm/memory/referenceProcessor.cpp afu8u/hotspot/src/share/vm/memory/referenceProcessor.cpp +--- openjdk/hotspot/src/share/vm/memory/referenceProcessor.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/memory/referenceProcessor.cpp 2025-05-06 11:13:08.127672950 +0800 +@@ -1188,7 +1188,7 @@ + // Check assumption that an object is not potentially + // discovered twice except by concurrent collectors that potentially + // trace the same Reference object twice. +- assert(UseConcMarkSweepGC || UseG1GC || UseShenandoahGC, ++ assert(UseConcMarkSweepGC || UseG1GC, + "Only possible with a concurrent marking collector"); + return true; + } +diff -uNr openjdk/hotspot/src/share/vm/memory/referenceProcessor.hpp afu8u/hotspot/src/share/vm/memory/referenceProcessor.hpp +--- openjdk/hotspot/src/share/vm/memory/referenceProcessor.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/memory/referenceProcessor.hpp 2025-05-06 11:13:08.127672950 +0800 +@@ -210,7 +210,7 @@ + + class ReferenceProcessor : public CHeapObj { + +- public: ++ private: + size_t total_count(DiscoveredList lists[]); + + protected: +@@ -667,10 +667,6 @@ + bool marks_oops_alive() const + { return _marks_oops_alive; } + +- bool is_empty() const { +- return _ref_processor.total_count(_refs_lists) == 0; +- } +- + protected: + ReferenceProcessor& _ref_processor; + DiscoveredList* _refs_lists; +diff -uNr openjdk/hotspot/src/share/vm/memory/specialized_oop_closures.hpp afu8u/hotspot/src/share/vm/memory/specialized_oop_closures.hpp +--- openjdk/hotspot/src/share/vm/memory/specialized_oop_closures.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/memory/specialized_oop_closures.hpp 2025-05-06 10:53:45.087633672 +0800 +@@ -29,7 +29,6 @@ + #include "utilities/macros.hpp" + #if INCLUDE_ALL_GCS + #include "gc_implementation/g1/g1_specialized_oop_closures.hpp" +-#include "gc_implementation/shenandoah/shenandoah_specialized_oop_closures.hpp" + #endif // INCLUDE_ALL_GCS + + // The following OopClosure types get specialized versions of +@@ -105,8 +104,7 @@ + f(Par_PushOrMarkClosure,_nv) \ + f(CMSKeepAliveClosure,_nv) \ + f(CMSInnerParMarkAndPushClosure,_nv) \ +- FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES(f) \ +- SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_SHENANDOAH(f) ++ FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES(f) + #else // INCLUDE_ALL_GCS + #define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_2(f) + #endif // INCLUDE_ALL_GCS +diff -uNr openjdk/hotspot/src/share/vm/memory/threadLocalAllocBuffer.cpp afu8u/hotspot/src/share/vm/memory/threadLocalAllocBuffer.cpp +--- openjdk/hotspot/src/share/vm/memory/threadLocalAllocBuffer.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/memory/threadLocalAllocBuffer.cpp 2025-05-06 10:53:45.087633672 +0800 +@@ -53,8 +53,6 @@ + thread->tlab().initialize_statistics(); + } + +- Universe::heap()->accumulate_statistics_all_gclabs(); +- + // Publish new stats if some allocation occurred. + if (global_stats()->allocation() != 0) { + global_stats()->publish(); +@@ -70,7 +68,7 @@ + size_t used = Universe::heap()->tlab_used(thread); + + _gc_waste += (unsigned)remaining(); +- size_t total_allocated = _gclab ? thread->allocated_bytes_gclab() : thread->allocated_bytes(); ++ size_t total_allocated = thread->allocated_bytes(); + size_t allocated_since_last_gc = total_allocated - _allocated_before_last_gc; + _allocated_before_last_gc = total_allocated; + +@@ -118,11 +116,7 @@ + invariants(); + + if (retire) { +- if (_gclab) { +- myThread()->incr_allocated_bytes_gclab(used_bytes()); +- } else { +- myThread()->incr_allocated_bytes(used_bytes()); +- } ++ myThread()->incr_allocated_bytes(used_bytes()); + } + + CollectedHeap::fill_with_object(top(), hard_end(), retire); +@@ -200,9 +194,7 @@ + invariants(); + } + +-void ThreadLocalAllocBuffer::initialize(bool gclab) { +- _initialized = true; +- _gclab = gclab; ++void ThreadLocalAllocBuffer::initialize() { + initialize(NULL, // start + NULL, // top + NULL); // end +@@ -236,10 +228,7 @@ + // During jvm startup, the main thread is initialized + // before the heap is initialized. So reinitialize it now. + guarantee(Thread::current()->is_Java_thread(), "tlab initialization thread not Java thread"); +- Thread::current()->tlab().initialize(false); +- if (UseShenandoahGC) { +- Thread::current()->gclab().initialize(true); +- } ++ Thread::current()->tlab().initialize(); + + if (PrintTLAB && Verbose) { + gclog_or_tty->print("TLAB min: " SIZE_FORMAT " initial: " SIZE_FORMAT " max: " SIZE_FORMAT "\n", +@@ -271,12 +260,12 @@ + double waste_percent = alloc == 0 ? 0.0 : + 100.0 * waste / alloc; + size_t tlab_used = Universe::heap()->tlab_used(thrd); +- gclog_or_tty->print("TLAB: %s %s thread: " INTPTR_FORMAT " [id: %2d]" ++ gclog_or_tty->print("TLAB: %s thread: " INTPTR_FORMAT " [id: %2d]" + " desired_size: " SIZE_FORMAT "KB" + " slow allocs: %d refill waste: " SIZE_FORMAT "B" + " alloc:%8.5f %8.0fKB refills: %d waste %4.1f%% gc: %dB" + " slow: %dB fast: %dB\n", +- tag, _gclab ? "gclab" : "tlab ", p2i(thrd), thrd->osthread()->thread_id(), ++ tag, thrd, thrd->osthread()->thread_id(), + _desired_size / (K / HeapWordSize), + _slow_allocations, _refill_waste_limit * HeapWordSize, + _allocation_fraction.average(), +@@ -300,22 +289,9 @@ + } + + Thread* ThreadLocalAllocBuffer::myThread() { +- ByteSize gclab_offset = Thread::gclab_start_offset(); +- ByteSize tlab_offset = Thread::tlab_start_offset(); +- ByteSize offs = _gclab ? gclab_offset : tlab_offset; +- Thread* thread = (Thread*)(((char *)this) + +- in_bytes(start_offset()) - in_bytes(offs)); +-#ifdef ASSERT +- assert(this == (_gclab ? &thread->gclab() : &thread->tlab()), "must be"); +-#endif +- return thread; +-} +- +-void ThreadLocalAllocBuffer::rollback(size_t size) { +- HeapWord* old_top = top(); +- if (old_top != NULL) { // Pathological case: we accept that we can't rollback. +- set_top(old_top - size); +- } ++ return (Thread*)(((char *)this) + ++ in_bytes(start_offset()) - ++ in_bytes(Thread::tlab_start_offset())); + } + + +diff -uNr openjdk/hotspot/src/share/vm/memory/threadLocalAllocBuffer.hpp afu8u/hotspot/src/share/vm/memory/threadLocalAllocBuffer.hpp +--- openjdk/hotspot/src/share/vm/memory/threadLocalAllocBuffer.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/memory/threadLocalAllocBuffer.hpp 2025-05-06 10:53:45.087633672 +0800 +@@ -58,8 +58,8 @@ + + AdaptiveWeightedAverage _allocation_fraction; // fraction of eden allocated in tlabs + +- bool _gclab; +- bool _initialized; ++ void accumulate_statistics(); ++ void initialize_statistics(); + + void set_start(HeapWord* start) { _start = start; } + void set_end(HeapWord* end) { _end = end; } +@@ -78,6 +78,9 @@ + // Make parsable and release it. + void reset(); + ++ // Resize based on amount of allocation, etc. ++ void resize(); ++ + void invariants() const { assert(top() >= start() && top() <= end(), "invalid tlab"); } + + void initialize(HeapWord* start, HeapWord* top, HeapWord* end); +@@ -98,12 +101,10 @@ + static GlobalTLABStats* global_stats() { return _global_stats; } + + public: +- ThreadLocalAllocBuffer() : _allocation_fraction(TLABAllocationWeight), _allocated_before_last_gc(0), _initialized(false) { ++ ThreadLocalAllocBuffer() : _allocation_fraction(TLABAllocationWeight), _allocated_before_last_gc(0) { + // do nothing. tlabs must be inited by initialize() calls + } + +- bool is_initialized() const { return _initialized; }; +- + static const size_t min_size() { return align_object_size(MinTLABSize / HeapWordSize) + alignment_reserve(); } + static const size_t max_size() { assert(_max_size != 0, "max_size not set up"); return _max_size; } + static void set_max_size(size_t max_size) { _max_size = max_size; } +@@ -130,16 +131,6 @@ + int reserve_size = typeArrayOopDesc::header_size(T_INT); + return MAX2(reserve_size, VM_Version::reserve_for_allocation_prefetch()); + } +- +- // Resize based on amount of allocation, etc. +- void resize(); +- +- void accumulate_statistics(); +- void initialize_statistics(); +- +- // Rolls back a single allocation of the given size. +- void rollback(size_t size); +- + static size_t alignment_reserve() { return align_object_size(end_reserve()); } + static size_t alignment_reserve_in_bytes() { return alignment_reserve() * HeapWordSize; } + +@@ -167,7 +158,7 @@ + static void resize_all_tlabs(); + + void fill(HeapWord* start, HeapWord* top, size_t new_size); +- void initialize(bool gclab); ++ void initialize(); + + static size_t refill_waste_limit_increment() { return TLABWasteIncrement; } + +diff -uNr openjdk/hotspot/src/share/vm/memory/universe.cpp afu8u/hotspot/src/share/vm/memory/universe.cpp +--- openjdk/hotspot/src/share/vm/memory/universe.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/memory/universe.cpp 2025-05-06 11:13:08.127672950 +0800 +@@ -80,8 +80,6 @@ + #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" + #include "gc_implementation/g1/g1CollectorPolicy_ext.hpp" + #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.hpp" +-#include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp" + #endif // INCLUDE_ALL_GCS + + PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC +@@ -792,15 +790,6 @@ + fatal("UseG1GC not supported in java kernel vm."); + #endif // INCLUDE_ALL_GCS + +- } else if (UseShenandoahGC) { +-#if INCLUDE_ALL_GCS +- ShenandoahCollectorPolicy* shcp = new ShenandoahCollectorPolicy(); +- ShenandoahHeap* sh = new ShenandoahHeap(shcp); +- Universe::_collectedHeap = sh; +-#else // INCLUDE_ALL_GCS +- fatal("UseShenandoahGC not supported in java kernel vm."); +-#endif // INCLUDE_ALL_GCS +- + } else { + GenCollectorPolicy *gc_policy; + +diff -uNr openjdk/hotspot/src/share/vm/oops/constantPool.hpp afu8u/hotspot/src/share/vm/oops/constantPool.hpp +--- openjdk/hotspot/src/share/vm/oops/constantPool.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/oops/constantPool.hpp 2025-05-06 10:53:45.091633672 +0800 +@@ -50,6 +50,10 @@ + #ifdef TARGET_ARCH_ppc + # include "bytes_ppc.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "bytes_sw64.hpp" ++#endif ++ + + // A constantPool is an array containing class constants as described in the + // class file. +diff -uNr openjdk/hotspot/src/share/vm/oops/instanceKlass.cpp afu8u/hotspot/src/share/vm/oops/instanceKlass.cpp +--- openjdk/hotspot/src/share/vm/oops/instanceKlass.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/oops/instanceKlass.cpp 2025-05-06 11:13:08.127672950 +0800 +@@ -73,7 +73,6 @@ + #include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp" + #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp" + #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahOopClosures.inline.hpp" + #include "oops/oop.pcgc.inline.hpp" + #endif // INCLUDE_ALL_GCS + #ifdef COMPILER1 +diff -uNr openjdk/hotspot/src/share/vm/oops/instanceMirrorKlass.cpp afu8u/hotspot/src/share/vm/oops/instanceMirrorKlass.cpp +--- openjdk/hotspot/src/share/vm/oops/instanceMirrorKlass.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/oops/instanceMirrorKlass.cpp 2025-05-06 10:53:45.095633672 +0800 +@@ -46,7 +46,6 @@ + #include "gc_implementation/parNew/parOopClosures.inline.hpp" + #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp" + #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahOopClosures.inline.hpp" + #include "oops/oop.pcgc.inline.hpp" + #endif // INCLUDE_ALL_GCS + +diff -uNr openjdk/hotspot/src/share/vm/oops/instanceRefKlass.cpp afu8u/hotspot/src/share/vm/oops/instanceRefKlass.cpp +--- openjdk/hotspot/src/share/vm/oops/instanceRefKlass.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/oops/instanceRefKlass.cpp 2025-05-06 10:53:45.095633672 +0800 +@@ -42,7 +42,6 @@ + #include "gc_implementation/parNew/parOopClosures.inline.hpp" + #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp" + #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahOopClosures.inline.hpp" + #include "oops/oop.pcgc.inline.hpp" + #endif // INCLUDE_ALL_GCS + +diff -uNr openjdk/hotspot/src/share/vm/oops/klass.cpp afu8u/hotspot/src/share/vm/oops/klass.cpp +--- openjdk/hotspot/src/share/vm/oops/klass.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/oops/klass.cpp 2025-05-06 10:53:45.095633672 +0800 +@@ -488,7 +488,7 @@ + // the beginning. This function is only used when we write oops into Klasses. + void Klass::klass_update_barrier_set_pre(oop* p, oop v) { + #if INCLUDE_ALL_GCS +- if (UseG1GC || (UseShenandoahGC && ShenandoahSATBBarrier)) { ++ if (UseG1GC) { + oop obj = *p; + if (obj != NULL) { + G1SATBCardTableModRefBS::enqueue(obj); +diff -uNr openjdk/hotspot/src/share/vm/oops/klass.hpp afu8u/hotspot/src/share/vm/oops/klass.hpp +--- openjdk/hotspot/src/share/vm/oops/klass.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/oops/klass.hpp 2025-05-06 10:53:45.095633672 +0800 +@@ -37,7 +37,6 @@ + #if INCLUDE_ALL_GCS + #include "gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp" + #include "gc_implementation/g1/g1OopClosures.hpp" +-#include "gc_implementation/shenandoah/shenandoahOopClosures.hpp" + #include "gc_implementation/parNew/parOopClosures.hpp" + #endif // INCLUDE_ALL_GCS + #if INCLUDE_JFR +diff -uNr openjdk/hotspot/src/share/vm/oops/objArrayKlass.cpp afu8u/hotspot/src/share/vm/oops/objArrayKlass.cpp +--- openjdk/hotspot/src/share/vm/oops/objArrayKlass.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/oops/objArrayKlass.cpp 2025-05-06 10:53:45.099633672 +0800 +@@ -56,8 +56,6 @@ + #include "gc_implementation/parallelScavenge/psCompactionManager.hpp" + #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp" + #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahBarrierSet.inline.hpp" +-#include "gc_implementation/shenandoah/shenandoahOopClosures.inline.hpp" + #include "oops/oop.pcgc.inline.hpp" + #endif // INCLUDE_ALL_GCS + +@@ -245,12 +243,6 @@ + assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt"); + assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well."); + +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- ShenandoahBarrierSet::barrier_set()->arraycopy_barrier(src, dst, length); +- } +-#endif +- + if (s == d) { + // since source and destination are equal we do not need conversion checks. + assert(length > 0, "sanity check"); +diff -uNr openjdk/hotspot/src/share/vm/oops/objArrayOop.hpp afu8u/hotspot/src/share/vm/oops/objArrayOop.hpp +--- openjdk/hotspot/src/share/vm/oops/objArrayOop.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/oops/objArrayOop.hpp 2025-05-06 10:53:45.099633672 +0800 +@@ -25,13 +25,8 @@ + #ifndef SHARE_VM_OOPS_OBJARRAYOOP_HPP + #define SHARE_VM_OOPS_OBJARRAYOOP_HPP + +-#include "memory/barrierSet.hpp" + #include "oops/arrayOop.hpp" + +-#if INCLUDE_ALL_GCS +-#include "gc_implementation/shenandoah/shenandoahBarrierSet.hpp" +-#endif +- + // An objArrayOop is an array containing oops. + // Evaluating "String arg[10]" will create an objArrayOop. + +@@ -84,20 +79,13 @@ + + // Accessing + oop obj_at(int index) const { +- oop obj; + // With UseCompressedOops decode the narrow oop in the objArray to an + // uncompressed oop. Otherwise this is simply a "*" operator. + if (UseCompressedOops) { +- obj = load_decode_heap_oop(obj_at_addr(index)); ++ return load_decode_heap_oop(obj_at_addr(index)); + } else { +- obj = load_decode_heap_oop(obj_at_addr(index)); +- } +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj); ++ return load_decode_heap_oop(obj_at_addr(index)); + } +-#endif +- return obj; + } + + void obj_at_put(int index, oop value) { +diff -uNr openjdk/hotspot/src/share/vm/oops/oop.hpp afu8u/hotspot/src/share/vm/oops/oop.hpp +--- openjdk/hotspot/src/share/vm/oops/oop.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/oops/oop.hpp 2025-05-06 10:53:45.099633672 +0800 +@@ -213,7 +213,6 @@ + + jint int_field(int offset) const; + void int_field_put(int offset, jint contents); +- void int_field_put_raw(int offset, jint contents); + + jshort short_field(int offset) const; + void short_field_put(int offset, jshort contents); +diff -uNr openjdk/hotspot/src/share/vm/oops/oop.inline.hpp afu8u/hotspot/src/share/vm/oops/oop.inline.hpp +--- openjdk/hotspot/src/share/vm/oops/oop.inline.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/oops/oop.inline.hpp 2025-05-06 10:53:45.099633672 +0800 +@@ -60,9 +60,8 @@ + #ifdef TARGET_ARCH_ppc + # include "bytes_ppc.hpp" + #endif +- +-#if INCLUDE_ALL_GCS +-#include "gc_implementation/shenandoah/shenandoahBarrierSet.hpp" ++#ifdef TARGET_ARCH_sw64 ++# include "bytes_sw64.hpp" + #endif + + // Implementation of all inlined member functions defined in oop.hpp +@@ -321,39 +320,23 @@ + // These functions are only used to exchange oop fields in instances, + // not headers. + inline oop oopDesc::atomic_exchange_oop(oop exchange_value, volatile HeapWord *dest) { +- oop result; + if (UseCompressedOops) { + // encode exchange value from oop to T + narrowOop val = encode_heap_oop(exchange_value); + narrowOop old = (narrowOop)Atomic::xchg(val, (narrowOop*)dest); + // decode old from T to oop +- result = decode_heap_oop(old); ++ return decode_heap_oop(old); + } else { +- result = (oop)Atomic::xchg_ptr(exchange_value, (oop*)dest); +- } +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- if (exchange_value != NULL) { +- ShenandoahBarrierSet::barrier_set()->storeval_barrier(exchange_value); +- } +- result = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(result); ++ return (oop)Atomic::xchg_ptr(exchange_value, (oop*)dest); + } +-#endif +- return result; + } + + // In order to put or get a field out of an instance, must first check + // if the field has been compressed and uncompress it. + inline oop oopDesc::obj_field(int offset) const { +- oop obj = UseCompressedOops ? ++ return UseCompressedOops ? + load_decode_heap_oop(obj_field_addr(offset)) : + load_decode_heap_oop(obj_field_addr(offset)); +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj); +- } +-#endif +- return obj; + } + inline volatile oop oopDesc::obj_field_volatile(int offset) const { + volatile oop value = obj_field(offset); +@@ -412,17 +395,11 @@ + inline void oopDesc::address_field_put(int offset, address contents) { *address_field_addr(offset) = contents; } + + inline oop oopDesc::obj_field_acquire(int offset) const { +- oop obj = UseCompressedOops ? ++ return UseCompressedOops ? + decode_heap_oop((narrowOop) + OrderAccess::load_acquire(obj_field_addr(offset))) + : decode_heap_oop((oop) + OrderAccess::load_ptr_acquire(obj_field_addr(offset))); +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj); +- } +-#endif +- return obj; + } + inline void oopDesc::release_obj_field_put(int offset, oop value) { + UseCompressedOops ? +@@ -595,11 +572,6 @@ + volatile HeapWord *dest, + oop compare_value, + bool prebarrier) { +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC && ShenandoahCASBarrier) { +- return ShenandoahBarrierSet::barrier_set()->oop_atomic_cmpxchg_in_heap(exchange_value, dest, compare_value); +- } +-#endif + if (UseCompressedOops) { + if (prebarrier) { + update_barrier_set_pre((narrowOop*)dest, exchange_value); +diff -uNr openjdk/hotspot/src/share/vm/opto/addnode.cpp afu8u/hotspot/src/share/vm/opto/addnode.cpp +--- openjdk/hotspot/src/share/vm/opto/addnode.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/addnode.cpp 2025-05-06 10:53:45.099633672 +0800 +@@ -31,9 +31,6 @@ + #include "opto/mulnode.hpp" + #include "opto/phaseX.hpp" + #include "opto/subnode.hpp" +-#if INCLUDE_ALL_GCS +-#include "gc_implementation/shenandoah/c2/shenandoahSupport.hpp" +-#endif + + // Portions of code courtesy of Clifford Click + +diff -uNr openjdk/hotspot/src/share/vm/opto/buildOopMap.cpp afu8u/hotspot/src/share/vm/opto/buildOopMap.cpp +--- openjdk/hotspot/src/share/vm/opto/buildOopMap.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/buildOopMap.cpp 2025-05-06 10:53:45.099633672 +0800 +@@ -50,6 +50,9 @@ + #ifdef TARGET_ARCH_ppc + # include "vmreg_ppc.inline.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "vmreg_sw64.inline.hpp" ++#endif + + // The functions in this file builds OopMaps after all scheduling is done. + // +diff -uNr openjdk/hotspot/src/share/vm/opto/c2compiler.cpp afu8u/hotspot/src/share/vm/opto/c2compiler.cpp +--- openjdk/hotspot/src/share/vm/opto/c2compiler.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/c2compiler.cpp 2025-05-06 10:53:45.099633672 +0800 +@@ -39,6 +39,8 @@ + # include "adfiles/ad_zero.hpp" + #elif defined TARGET_ARCH_MODEL_ppc_64 + # include "adfiles/ad_ppc_64.hpp" ++#elif defined TARGET_ARCH_MODEL_sw64 ++# include "adfiles/ad_sw64.hpp" + #endif + + // register information defined by ADLC +diff -uNr openjdk/hotspot/src/share/vm/opto/c2_globals.hpp afu8u/hotspot/src/share/vm/opto/c2_globals.hpp +--- openjdk/hotspot/src/share/vm/opto/c2_globals.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/c2_globals.hpp 2025-05-06 10:53:45.099633672 +0800 +@@ -35,6 +35,9 @@ + #ifdef TARGET_ARCH_sparc + # include "c2_globals_sparc.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "c2_globals_sw64.hpp" ++#endif + #ifdef TARGET_ARCH_arm + # include "c2_globals_arm.hpp" + #endif +diff -uNr openjdk/hotspot/src/share/vm/opto/callnode.cpp afu8u/hotspot/src/share/vm/opto/callnode.cpp +--- openjdk/hotspot/src/share/vm/opto/callnode.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/callnode.cpp 2025-05-06 10:53:45.103633673 +0800 +@@ -37,9 +37,6 @@ + #include "opto/regmask.hpp" + #include "opto/rootnode.hpp" + #include "opto/runtime.hpp" +-#if INCLUDE_ALL_GCS +-#include "gc_implementation/shenandoah/c2/shenandoahBarrierSetC2.hpp" +-#endif + + // Portions of code courtesy of Clifford Click + +@@ -810,7 +807,7 @@ + } + + +-void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts) { ++void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj) { + projs->fallthrough_proj = NULL; + projs->fallthrough_catchproj = NULL; + projs->fallthrough_ioproj = NULL; +@@ -873,18 +870,17 @@ + } + } + +- // The resproj may not exist because the result could be ignored ++ // The resproj may not exist because the result couuld be ignored + // and the exception object may not exist if an exception handler + // swallows the exception but all the other must exist and be found. + assert(projs->fallthrough_proj != NULL, "must be found"); +- do_asserts = do_asserts && !Compile::current()->inlining_incrementally(); +- assert(!do_asserts || projs->fallthrough_catchproj != NULL, "must be found"); +- assert(!do_asserts || projs->fallthrough_memproj != NULL, "must be found"); +- assert(!do_asserts || projs->fallthrough_ioproj != NULL, "must be found"); +- assert(!do_asserts || projs->catchall_catchproj != NULL, "must be found"); ++ assert(Compile::current()->inlining_incrementally() || projs->fallthrough_catchproj != NULL, "must be found"); ++ assert(Compile::current()->inlining_incrementally() || projs->fallthrough_memproj != NULL, "must be found"); ++ assert(Compile::current()->inlining_incrementally() || projs->fallthrough_ioproj != NULL, "must be found"); ++ assert(Compile::current()->inlining_incrementally() || projs->catchall_catchproj != NULL, "must be found"); + if (separate_io_proj) { +- assert(!do_asserts || projs->catchall_memproj != NULL, "must be found"); +- assert(!do_asserts || projs->catchall_ioproj != NULL, "must be found"); ++ assert(Compile::current()->inlining_incrementally() || projs->catchall_memproj != NULL, "must be found"); ++ assert(Compile::current()->inlining_incrementally() || projs->catchall_ioproj != NULL, "must be found"); + } + } + +@@ -910,6 +906,7 @@ + return SafePointNode::Ideal(phase, can_reshape); + } + ++ + //============================================================================= + uint CallJavaNode::size_of() const { return sizeof(*this); } + uint CallJavaNode::cmp( const Node &n ) const { +@@ -1001,13 +998,6 @@ + Matcher::c_calling_convention( sig_bt, parm_regs, argcnt ); + } + +-bool CallRuntimeNode::is_call_to_arraycopystub() const { +- if (_name != NULL && strstr(_name, "arraycopy") != 0) { +- return true; +- } +- return false; +-} +- + //============================================================================= + //------------------------------calling_convention----------------------------- + +@@ -1021,37 +1011,6 @@ + } + #endif + +-Node *CallLeafNode::Ideal(PhaseGVN *phase, bool can_reshape) { +- if (UseShenandoahGC && is_g1_wb_pre_call()) { +- uint cnt = OptoRuntime::g1_wb_pre_Type()->domain()->cnt(); +- if (req() > cnt) { +- Node* addp = in(cnt); +- if (has_only_g1_wb_pre_uses(addp)) { +- del_req(cnt); +- if (can_reshape) { +- phase->is_IterGVN()->_worklist.push(addp); +- } +- return this; +- } +- } +- } +- +- return CallNode::Ideal(phase, can_reshape); +-} +- +-bool CallLeafNode::has_only_g1_wb_pre_uses(Node* n) { +- if (UseShenandoahGC) { +- return false; +- } +- for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { +- Node* u = n->fast_out(i); +- if (!u->is_g1_wb_pre_call()) { +- return false; +- } +- } +- return n->outcnt() > 0; +-} +- + //============================================================================= + + void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) { +@@ -1563,15 +1522,7 @@ + Node *n = ctrl_proj->in(0); + if (n != NULL && n->is_Unlock()) { + UnlockNode *unlock = n->as_Unlock(); +- Node* lock_obj = lock->obj_node(); +- Node* unlock_obj = unlock->obj_node(); +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- lock_obj = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(lock_obj); +- unlock_obj = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(unlock_obj); +- } +-#endif +- if (lock_obj->eqv_uncast(unlock_obj) && ++ if (lock->obj_node()->eqv_uncast(unlock->obj_node()) && + BoxLockNode::same_slot(lock->box_node(), unlock->box_node()) && + !unlock->is_eliminated()) { + lock_ops.append(unlock); +@@ -1616,15 +1567,7 @@ + } + if (ctrl->is_Lock()) { + LockNode *lock = ctrl->as_Lock(); +- Node* lock_obj = lock->obj_node(); +- Node* unlock_obj = unlock->obj_node(); +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- lock_obj = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(lock_obj); +- unlock_obj = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(unlock_obj); +- } +-#endif +- if (lock_obj->eqv_uncast(unlock_obj) && ++ if (lock->obj_node()->eqv_uncast(unlock->obj_node()) && + BoxLockNode::same_slot(lock->box_node(), unlock->box_node())) { + lock_result = lock; + } +@@ -1655,15 +1598,7 @@ + } + if (lock1_node != NULL && lock1_node->is_Lock()) { + LockNode *lock1 = lock1_node->as_Lock(); +- Node* lock_obj = lock->obj_node(); +- Node* lock1_obj = lock1->obj_node(); +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- lock_obj = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(lock_obj); +- lock1_obj = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(lock1_obj); +- } +-#endif +- if (lock_obj->eqv_uncast(lock1_obj) && ++ if (lock->obj_node()->eqv_uncast(lock1->obj_node()) && + BoxLockNode::same_slot(lock->box_node(), lock1->box_node()) && + !lock1->is_eliminated()) { + lock_ops.append(lock1); +@@ -1859,11 +1794,6 @@ + return false; + } + +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- obj = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(obj); +- } +-#endif + // Look for external lock for the same object. + SafePointNode* sfn = this->as_SafePoint(); + JVMState* youngest_jvms = sfn->jvms(); +@@ -1874,11 +1804,6 @@ + // Loop over monitors + for (int idx = 0; idx < num_mon; idx++) { + Node* obj_node = sfn->monitor_obj(jvms, idx); +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- obj_node = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(obj_node); +- } +-#endif + BoxLockNode* box_node = sfn->monitor_box(jvms, idx)->as_BoxLock(); + if ((box_node->stack_slot() < stk_slot) && obj_node->eqv_uncast(obj)) { + return true; +diff -uNr openjdk/hotspot/src/share/vm/opto/callnode.hpp afu8u/hotspot/src/share/vm/opto/callnode.hpp +--- openjdk/hotspot/src/share/vm/opto/callnode.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/callnode.hpp 2025-05-06 10:53:45.103633673 +0800 +@@ -624,7 +624,7 @@ + // Collect all the interesting edges from a call for use in + // replacing the call by something else. Used by macro expansion + // and the late inlining support. +- void extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts = true); ++ void extract_projections(CallProjections* projs, bool separate_io_proj); + + virtual uint match_edge(uint idx) const; + +@@ -762,8 +762,6 @@ + virtual int Opcode() const; + virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const; + +- bool is_call_to_arraycopystub() const; +- + #ifndef PRODUCT + virtual void dump_spec(outputStream *st) const; + #endif +@@ -782,11 +780,6 @@ + } + virtual int Opcode() const; + virtual bool guaranteed_safepoint() { return false; } +- virtual bool is_g1_wb_pre_call() const { return entry_point() == CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre); } +- virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); +- +- static bool has_only_g1_wb_pre_uses(Node* n); +- + #ifndef PRODUCT + virtual void dump_spec(outputStream *st) const; + #endif +diff -uNr openjdk/hotspot/src/share/vm/opto/cfgnode.cpp afu8u/hotspot/src/share/vm/opto/cfgnode.cpp +--- openjdk/hotspot/src/share/vm/opto/cfgnode.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/cfgnode.cpp 2025-05-06 10:53:45.103633673 +0800 +@@ -36,10 +36,6 @@ + #include "opto/regmask.hpp" + #include "opto/runtime.hpp" + #include "opto/subnode.hpp" +-#if INCLUDE_ALL_GCS +-#include "gc_implementation/shenandoah/c2/shenandoahBarrierSetC2.hpp" +-#include "gc_implementation/shenandoah/c2/shenandoahSupport.hpp" +-#endif + + // Portions of code courtesy of Clifford Click + +@@ -592,9 +588,6 @@ + if( n->as_Phi()->is_unsafe_data_reference(in) ) + in = phase->C->top(); // replaced by top + } +- if (n->outcnt() == 0) { +- in = phase->C->top(); +- } + igvn->replace_node(n, in); + } + else if( n->is_Region() ) { // Update all incoming edges +@@ -1635,12 +1628,7 @@ + if (can_reshape && igvn != NULL) { + igvn->_worklist.push(r); + } +- // Nuke it down +- if (can_reshape) { +- set_req_X(j, top, igvn); +- } else { +- set_req(j, top); +- } ++ set_req(j, top); // Nuke it down + progress = this; // Record progress + } + } +diff -uNr openjdk/hotspot/src/share/vm/opto/cfgnode.hpp afu8u/hotspot/src/share/vm/opto/cfgnode.hpp +--- openjdk/hotspot/src/share/vm/opto/cfgnode.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/cfgnode.hpp 2025-05-06 10:53:45.103633673 +0800 +@@ -364,8 +364,6 @@ + // Returns NULL is it couldn't improve the type. + static const TypeInt* filtered_int_type(PhaseGVN* phase, Node* val, Node* if_proj); + +- bool is_shenandoah_marking_if(PhaseTransform *phase) const; +- + #ifndef PRODUCT + virtual void dump_spec(outputStream *st) const; + #endif +diff -uNr openjdk/hotspot/src/share/vm/opto/classes.cpp afu8u/hotspot/src/share/vm/opto/classes.cpp +--- openjdk/hotspot/src/share/vm/opto/classes.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/classes.cpp 2025-05-06 10:53:45.103633673 +0800 +@@ -39,9 +39,6 @@ + #include "opto/rootnode.hpp" + #include "opto/subnode.hpp" + #include "opto/vectornode.hpp" +-#if INCLUDE_ALL_GCS +-#include "gc_implementation/shenandoah/c2/shenandoahSupport.hpp" +-#endif + + // ---------------------------------------------------------------------------- + // Build a table of virtual functions to map from Nodes to dense integer +diff -uNr openjdk/hotspot/src/share/vm/opto/classes.hpp afu8u/hotspot/src/share/vm/opto/classes.hpp +--- openjdk/hotspot/src/share/vm/opto/classes.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/classes.hpp 2025-05-06 10:53:45.103633673 +0800 +@@ -230,9 +230,6 @@ + macro(RoundFloat) + macro(SafePoint) + macro(SafePointScalarObject) +-macro(ShenandoahCompareAndSwapN) +-macro(ShenandoahCompareAndSwapP) +-macro(ShenandoahLoadReferenceBarrier) + macro(SCMemProj) + macro(SinD) + macro(SqrtD) +diff -uNr openjdk/hotspot/src/share/vm/opto/compile.cpp afu8u/hotspot/src/share/vm/opto/compile.cpp +--- openjdk/hotspot/src/share/vm/opto/compile.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/compile.cpp 2025-05-06 11:13:08.131672951 +0800 +@@ -81,11 +81,8 @@ + # include "adfiles/ad_zero.hpp" + #elif defined TARGET_ARCH_MODEL_ppc_64 + # include "adfiles/ad_ppc_64.hpp" +-#endif +- +-#if INCLUDE_ALL_GCS +-#include "gc_implementation/shenandoah/shenandoahForwarding.hpp" +-#include "gc_implementation/shenandoah/c2/shenandoahSupport.hpp" ++#elif defined TARGET_ARCH_MODEL_sw64 ++# include "adfiles/ad_sw64.hpp" + #endif + + // -------------------- Compile::mach_constant_base_node ----------------------- +@@ -410,11 +407,6 @@ + if (n->outcnt() == 1 && n->has_special_unique_user()) { + record_for_igvn(n->unique_out()); + } +- if (n->Opcode() == Op_AddP && CallLeafNode::has_only_g1_wb_pre_uses(n)) { +- for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { +- record_for_igvn(n->fast_out(i)); +- } +- } + } + // Remove useless macro and predicate opaq nodes + for (int i = C->macro_count()-1; i >= 0; i--) { +@@ -437,12 +429,6 @@ + remove_expensive_node(n); + } + } +- for (int i = C->shenandoah_barriers_count()-1; i >= 0; i--) { +- ShenandoahLoadReferenceBarrierNode* n = C->shenandoah_barrier(i); +- if (!useful.member(n)) { +- remove_shenandoah_barrier(n); +- } +- } + // clean up the late inline lists + remove_useless_late_inlines(&_string_late_inlines, useful); + remove_useless_late_inlines(&_boxing_late_inlines, useful); +@@ -786,7 +772,7 @@ + StartNode* s = new (this) StartNode(root(), tf()->domain()); + initial_gvn()->set_type_bottom(s); + init_start(s); +- if (method()->intrinsic_id() == vmIntrinsics::_Reference_get && (UseG1GC || UseShenandoahGC)) { ++ if (method()->intrinsic_id() == vmIntrinsics::_Reference_get && UseG1GC) { + // With java.lang.ref.reference.get() we must go through the + // intrinsic when G1 is enabled - even when get() is the root + // method of the compile - so that, if necessary, the value in +@@ -1179,7 +1165,6 @@ + _predicate_opaqs = new(comp_arena()) GrowableArray(comp_arena(), 8, 0, NULL); + _expensive_nodes = new(comp_arena()) GrowableArray(comp_arena(), 8, 0, NULL); + _range_check_casts = new(comp_arena()) GrowableArray(comp_arena(), 8, 0, NULL); +- _shenandoah_barriers = new(comp_arena()) GrowableArray(comp_arena(), 8, 0, NULL); + register_library_intrinsics(); + #ifdef ASSERT + _type_verify_symmetry = true; +@@ -2317,12 +2302,6 @@ + igvn.optimize(); + } + +-#ifdef ASSERT +- if (UseShenandoahGC && ShenandoahVerifyOptoBarriers) { +- ShenandoahBarrierC2Support::verify(C->root()); +- } +-#endif +- + { + NOT_PRODUCT( TracePhase t2("macroExpand", &_t_macroExpand, TimeCompiler); ) + PhaseMacroExpand mex(igvn); +@@ -2332,12 +2311,6 @@ + } + } + +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- ShenandoahBarrierC2Support::expand(this, igvn); +- } +-#endif +- + } // (End scope of igvn; run destructor if necessary for asserts.) + + dump_inlining(); +@@ -2772,15 +2745,6 @@ + case Op_CallLeafNoFP: { + assert( n->is_Call(), "" ); + CallNode *call = n->as_Call(); +- if (UseShenandoahGC && call->is_g1_wb_pre_call()) { +- uint cnt = OptoRuntime::g1_wb_pre_Type()->domain()->cnt(); +- if (call->req() > cnt) { +- assert(call->req() == cnt+1, "only one extra input"); +- Node* addp = call->in(cnt); +- assert(!CallLeafNode::has_only_g1_wb_pre_uses(addp), "useless address computation?"); +- call->del_req(cnt); +- } +- } + // Count call sites where the FP mode bit would have to be flipped. + // Do not count uncommon runtime calls: + // uncommon_trap, _complete_monitor_locking, _complete_monitor_unlocking, +@@ -2924,38 +2888,9 @@ + break; + } + +- case Op_CastPP: { +- // Remove CastPP nodes to gain more freedom during scheduling but +- // keep the dependency they encode as control or precedence edges +- // (if control is set already) on memory operations. Some CastPP +- // nodes don't have a control (don't carry a dependency): skip +- // those. +- if (n->in(0) != NULL) { +- ResourceMark rm; +- Unique_Node_List wq; +- wq.push(n); +- for (uint next = 0; next < wq.size(); ++next) { +- Node *m = wq.at(next); +- for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) { +- Node* use = m->fast_out(i); +- if (use->is_Mem() || use->is_EncodeNarrowPtr() || use->Opcode() == Op_ShenandoahLoadReferenceBarrier) { +- use->ensure_control_or_add_prec(n->in(0)); +- } else if (use->in(0) == NULL) { +- switch(use->Opcode()) { +- case Op_AddP: +- case Op_DecodeN: +- case Op_DecodeNKlass: +- case Op_CheckCastPP: +- case Op_CastPP: +- wq.push(use); +- break; +- } +- } +- } +- } +- } +- const bool is_LP64 = LP64_ONLY(true) NOT_LP64(false); +- if (is_LP64 && n->in(1)->is_DecodeN() && Matcher::gen_narrow_oop_implicit_null_checks()) { ++#ifdef _LP64 ++ case Op_CastPP: ++ if (n->in(1)->is_DecodeN() && Matcher::gen_narrow_oop_implicit_null_checks()) { + Node* in1 = n->in(1); + const Type* t = n->bottom_type(); + Node* new_in1 = in1->clone(); +@@ -2988,15 +2923,9 @@ + if (in1->outcnt() == 0) { + in1->disconnect_inputs(NULL, this); + } +- } else { +- n->subsume_by(n->in(1), this); +- if (n->outcnt() == 0) { +- n->disconnect_inputs(NULL, this); +- } + } + break; +- } +-#ifdef _LP64 ++ + case Op_CmpP: + // Do this transformation here to preserve CmpPNode::sub() and + // other TypePtr related Ideal optimizations (for example, ptr nullness). +@@ -3264,9 +3193,6 @@ + n->set_req(MemBarNode::Precedent, top()); + } + break; +- case Op_ShenandoahLoadReferenceBarrier: +- assert(false, "should have been expanded already"); +- break; + default: + assert( !n->is_Call(), "" ); + assert( !n->is_Mem(), "" ); +@@ -3633,7 +3559,7 @@ + // Currently supported: + // - G1 pre-barriers (see GraphKit::g1_write_barrier_pre()) + void Compile::verify_barriers() { +- if (UseG1GC || UseShenandoahGC) { ++ if (UseG1GC) { + // Verify G1 pre-barriers + const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active()); + +@@ -4157,7 +4083,7 @@ + const Type* t_no_spec = t->remove_speculative(); + if (t_no_spec != t) { + bool in_hash = igvn.hash_delete(n); +- assert(in_hash || n->hash() == Node::NO_HASH, "node should be in igvn hash table"); ++ assert(in_hash, "node should be in igvn hash table"); + tn->set_type(t_no_spec); + igvn.hash_insert(n); + igvn._worklist.push(n); // give it a chance to go away +@@ -4252,24 +4178,3 @@ + assert(count > 0, "only positive"); + return (os::random() & RANDOMIZED_DOMAIN_MASK) < (RANDOMIZED_DOMAIN / count); + } +- +-void Compile::shenandoah_eliminate_g1_wb_pre(Node* call, PhaseIterGVN* igvn) { +- assert(UseShenandoahGC && call->is_g1_wb_pre_call(), ""); +- Node* c = call->as_Call()->proj_out(TypeFunc::Control); +- c = c->unique_ctrl_out(); +- assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?"); +- c = c->unique_ctrl_out(); +- assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?"); +- Node* iff = c->in(1)->is_IfProj() ? c->in(1)->in(0) : c->in(2)->in(0); +- assert(iff->is_If(), "expect test"); +- if (!iff->is_shenandoah_marking_if(igvn)) { +- c = c->unique_ctrl_out(); +- assert(c->is_Region() && c->req() == 3, "where's the pre barrier control flow?"); +- iff = c->in(1)->is_IfProj() ? c->in(1)->in(0) : c->in(2)->in(0); +- assert(iff->is_shenandoah_marking_if(igvn), "expect marking test"); +- } +- Node* cmpx = iff->in(1)->in(1); +- igvn->replace_node(cmpx, igvn->makecon(TypeInt::CC_EQ)); +- igvn->rehash_node_delayed(call); +- call->del_req(call->req()-1); +-} +diff -uNr openjdk/hotspot/src/share/vm/opto/compile.hpp afu8u/hotspot/src/share/vm/opto/compile.hpp +--- openjdk/hotspot/src/share/vm/opto/compile.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/compile.hpp 2025-05-06 11:13:08.131672951 +0800 +@@ -69,7 +69,6 @@ + class PhaseCCP_DCE; + class RootNode; + class relocInfo; +-class ShenandoahLoadReferenceBarrierNode; + class Scope; + class StartNode; + class SafePointNode; +@@ -339,14 +338,12 @@ + GrowableArray* _predicate_opaqs; // List of Opaque1 nodes for the loop predicates. + GrowableArray* _expensive_nodes; // List of nodes that are expensive to compute and that we'd better not let the GVN freely common + GrowableArray* _range_check_casts; // List of CastII nodes with a range check dependency +- GrowableArray* _shenandoah_barriers; + ConnectionGraph* _congraph; + #ifndef PRODUCT + IdealGraphPrinter* _printer; + #endif + + +- + // Node management + uint _unique; // Counter for unique Node indices + VectorSet _dead_node_list; // Set of dead nodes +@@ -669,11 +666,9 @@ + int macro_count() const { return _macro_nodes->length(); } + int predicate_count() const { return _predicate_opaqs->length();} + int expensive_count() const { return _expensive_nodes->length(); } +- int shenandoah_barriers_count() const { return _shenandoah_barriers->length(); } + Node* macro_node(int idx) const { return _macro_nodes->at(idx); } + Node* predicate_opaque1_node(int idx) const { return _predicate_opaqs->at(idx);} + Node* expensive_node(int idx) const { return _expensive_nodes->at(idx); } +- ShenandoahLoadReferenceBarrierNode* shenandoah_barrier(int idx) const { return _shenandoah_barriers->at(idx); } + ConnectionGraph* congraph() { return _congraph;} + void set_congraph(ConnectionGraph* congraph) { _congraph = congraph;} + void add_macro_node(Node * n) { +@@ -697,15 +692,6 @@ + _expensive_nodes->remove(n); + } + } +- void add_shenandoah_barrier(ShenandoahLoadReferenceBarrierNode * n) { +- assert(!_shenandoah_barriers->contains(n), "duplicate entry in barrier list"); +- _shenandoah_barriers->append(n); +- } +- void remove_shenandoah_barrier(ShenandoahLoadReferenceBarrierNode * n) { +- if (_shenandoah_barriers->contains(n)) { +- _shenandoah_barriers->remove(n); +- } +- } + void add_predicate_opaq(Node * n) { + assert(!_predicate_opaqs->contains(n), "duplicate entry in predicate opaque1"); + assert(_macro_nodes->contains(n), "should have already been in macro list"); +@@ -738,8 +724,6 @@ + // Sort expensive nodes to locate similar expensive nodes + void sort_expensive_nodes(); + +- GrowableArray* shenandoah_barriers() { return _shenandoah_barriers; } +- + // Compilation environment. + Arena* comp_arena() { return &_comp_arena; } + ciEnv* env() const { return _env; } +@@ -1239,8 +1223,6 @@ + #ifdef ASSERT + bool _type_verify_symmetry; + #endif +- +- void shenandoah_eliminate_g1_wb_pre(Node* call, PhaseIterGVN* igvn); + }; + + #endif // SHARE_VM_OPTO_COMPILE_HPP +diff -uNr openjdk/hotspot/src/share/vm/opto/connode.cpp afu8u/hotspot/src/share/vm/opto/connode.cpp +--- openjdk/hotspot/src/share/vm/opto/connode.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/connode.cpp 2025-05-06 10:53:45.103633673 +0800 +@@ -544,6 +544,20 @@ + #endif + + //============================================================================= ++ ++//------------------------------Ideal_DU_postCCP------------------------------- ++// If not converting int->oop, throw away cast after constant propagation ++Node *CastPPNode::Ideal_DU_postCCP( PhaseCCP *ccp ) { ++ const Type *t = ccp->type(in(1)); ++ if (!t->isa_oop_ptr() || ((in(1)->is_DecodeN()) && Matcher::gen_narrow_oop_implicit_null_checks())) { ++ return NULL; // do not transform raw pointers or narrow oops ++ } ++ return ConstraintCastNode::Ideal_DU_postCCP(ccp); ++} ++ ++ ++ ++//============================================================================= + //------------------------------Identity--------------------------------------- + // If input is already higher or equal to cast type, then this is an identity. + Node *CheckCastPPNode::Identity( PhaseTransform *phase ) { +@@ -677,6 +691,10 @@ + } + + ++Node *EncodeNarrowPtrNode::Ideal_DU_postCCP( PhaseCCP *ccp ) { ++ return MemNode::Ideal_common_DU_postCCP(ccp, this, in(1)); ++} ++ + Node* DecodeNKlassNode::Identity(PhaseTransform* phase) { + const Type *t = phase->type( in(1) ); + if( t == Type::TOP ) return in(1); +diff -uNr openjdk/hotspot/src/share/vm/opto/connode.hpp afu8u/hotspot/src/share/vm/opto/connode.hpp +--- openjdk/hotspot/src/share/vm/opto/connode.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/connode.hpp 2025-05-06 10:53:45.103633673 +0800 +@@ -281,6 +281,7 @@ + CastPPNode (Node *n, const Type *t ): ConstraintCastNode(n, t) {} + virtual int Opcode() const; + virtual uint ideal_reg() const { return Op_RegP; } ++ virtual Node *Ideal_DU_postCCP( PhaseCCP * ); + }; + + //------------------------------CheckCastPPNode-------------------------------- +@@ -298,6 +299,9 @@ + virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); + virtual int Opcode() const; + virtual uint ideal_reg() const { return Op_RegP; } ++ // No longer remove CheckCast after CCP as it gives me a place to hang ++ // the proper address type - which is required to compute anti-deps. ++ //virtual Node *Ideal_DU_postCCP( PhaseCCP * ); + }; + + +@@ -312,6 +316,7 @@ + } + public: + virtual uint ideal_reg() const { return Op_RegN; } ++ virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp ); + }; + + //------------------------------EncodeP-------------------------------- +diff -uNr openjdk/hotspot/src/share/vm/opto/escape.cpp afu8u/hotspot/src/share/vm/opto/escape.cpp +--- openjdk/hotspot/src/share/vm/opto/escape.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/escape.cpp 2025-05-06 10:53:45.107633673 +0800 +@@ -34,9 +34,6 @@ + #include "opto/escape.hpp" + #include "opto/phaseX.hpp" + #include "opto/rootnode.hpp" +-#if INCLUDE_ALL_GCS +-#include "gc_implementation/shenandoah/c2/shenandoahSupport.hpp" +-#endif + + ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) : + _nodes(C->comp_arena(), C->unique(), C->unique(), NULL), +@@ -526,7 +523,7 @@ + // Pointer stores in G1 barriers looks like unsafe access. + // Ignore such stores to be able scalar replace non-escaping + // allocations. +- if ((UseG1GC || UseShenandoahGC) && adr->is_AddP()) { ++ if (UseG1GC && adr->is_AddP()) { + Node* base = get_addp_base(adr); + if (base->Opcode() == Op_LoadP && + base->in(MemNode::Address)->is_AddP()) { +@@ -568,11 +565,6 @@ + add_java_object(n, PointsToNode::ArgEscape); + break; + } +-#if INCLUDE_ALL_GCS +- case Op_ShenandoahLoadReferenceBarrier: +- add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahLoadReferenceBarrierNode::ValueIn), delayed_worklist); +- break; +-#endif + default: + ; // Do nothing for nodes not related to EA. + } +@@ -767,12 +759,7 @@ + } + break; + } +-#if INCLUDE_ALL_GCS +- case Op_ShenandoahLoadReferenceBarrier: +- add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahLoadReferenceBarrierNode::ValueIn), NULL); +- break; +-#endif +- default: { ++ default: { + // This method should be called only for EA specific nodes which may + // miss some edges when they were created. + #ifdef ASSERT +@@ -960,8 +947,6 @@ + (call->as_CallLeaf()->_name != NULL && + (strcmp(call->as_CallLeaf()->_name, "g1_wb_pre") == 0 || + strcmp(call->as_CallLeaf()->_name, "g1_wb_post") == 0 || +- strcmp(call->as_CallLeaf()->_name, "shenandoah_clone_barrier") == 0 || +- strcmp(call->as_CallLeaf()->_name, "shenandoah_cas_obj") == 0 || + strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 || + strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 || + strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 || +@@ -2294,9 +2279,7 @@ + assert(opcode == Op_ConP || opcode == Op_ThreadLocal || + opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() || + (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) || +- (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()) || +- (uncast_base->is_Phi() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) || +- uncast_base->Opcode() == Op_ShenandoahLoadReferenceBarrier, "sanity"); ++ (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()), "sanity"); + } + return base; + } +diff -uNr openjdk/hotspot/src/share/vm/opto/gcm.cpp afu8u/hotspot/src/share/vm/opto/gcm.cpp +--- openjdk/hotspot/src/share/vm/opto/gcm.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/gcm.cpp 2025-05-06 10:53:45.107633673 +0800 +@@ -49,6 +49,8 @@ + # include "adfiles/ad_zero.hpp" + #elif defined TARGET_ARCH_MODEL_ppc_64 + # include "adfiles/ad_ppc_64.hpp" ++#elif defined TARGET_ARCH_MODEL_sw64 ++# include "adfiles/ad_sw64.hpp" + #endif + + +@@ -116,9 +118,6 @@ + } + } + +-static bool is_dominator(Block* d, Block* n) { +- return d->dom_lca(n) == d; +-} + + //------------------------------schedule_pinned_nodes-------------------------- + // Set the basic block for Nodes pinned into blocks +@@ -141,42 +140,6 @@ + schedule_node_into_block(node, block); + } + +- // If the node has precedence edges (added when CastPP nodes are +- // removed in final_graph_reshaping), fix the control of the +- // node to cover the precedence edges and remove the +- // dependencies. +- Node* n = NULL; +- for (uint i = node->len()-1; i >= node->req(); i--) { +- Node* m = node->in(i); +- if (m == NULL) continue; +- // Skip the precedence edge if the test that guarded a CastPP: +- // - was optimized out during escape analysis +- // (OptimizePtrCompare): the CastPP's control isn't an end of +- // block. +- // - is moved in the branch of a dominating If: the control of +- // the CastPP is then a Region. +- if (m->is_block_proj() || m->is_block_start()) { +- node->rm_prec(i); +- if (n == NULL) { +- n = m; +- } else { +- Block* bn = get_block_for_node(n); +- Block* bm = get_block_for_node(m); +- assert(is_dominator(bn, bm) || is_dominator(bm, bn), "one must dominate the other"); +- n = is_dominator(bn, bm) ? m : n; +- } +- } +- } +- if (n != NULL) { +- assert(node->in(0), "control should have been set"); +- Block* bn = get_block_for_node(n); +- Block* bnode = get_block_for_node(node->in(0)); +- assert(is_dominator(bn, bnode) || is_dominator(bnode, bn), "one must dominate the other"); +- if (!is_dominator(bn, bnode)) { +- node->set_req(0, n); +- } +- } +- + // process all inputs that are non NULL + for (int i = node->req() - 1; i >= 0; --i) { + if (node->in(i) != NULL) { +diff -uNr openjdk/hotspot/src/share/vm/opto/graphKit.cpp afu8u/hotspot/src/share/vm/opto/graphKit.cpp +--- openjdk/hotspot/src/share/vm/opto/graphKit.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/graphKit.cpp 2025-05-06 10:53:45.107633673 +0800 +@@ -27,7 +27,6 @@ + #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" + #include "gc_implementation/g1/heapRegion.hpp" + #include "gc_interface/collectedHeap.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.hpp" + #include "memory/barrierSet.hpp" + #include "memory/cardTableModRefBS.hpp" + #include "opto/addnode.hpp" +@@ -41,11 +40,6 @@ + #include "runtime/deoptimization.hpp" + #include "runtime/sharedRuntime.hpp" + +-#if INCLUDE_ALL_GCS +-#include "gc_implementation/shenandoah/c2/shenandoahBarrierSetC2.hpp" +-#include "gc_implementation/shenandoah/c2/shenandoahSupport.hpp" +-#endif +- + //----------------------------GraphKit----------------------------------------- + // Main utility constructor. + GraphKit::GraphKit(JVMState* jvms) +@@ -1541,11 +1535,7 @@ + case BarrierSet::G1SATBCTLogging: + g1_write_barrier_pre(do_load, obj, adr, adr_idx, val, val_type, pre_val, bt); + break; +- case BarrierSet::ShenandoahBarrierSet: +- if (ShenandoahSATBBarrier) { +- g1_write_barrier_pre(do_load, obj, adr, adr_idx, val, val_type, pre_val, bt); +- } +- break; ++ + case BarrierSet::CardTableModRef: + case BarrierSet::CardTableExtension: + case BarrierSet::ModRef: +@@ -1563,7 +1553,6 @@ + switch (bs->kind()) { + case BarrierSet::G1SATBCT: + case BarrierSet::G1SATBCTLogging: +- case BarrierSet::ShenandoahBarrierSet: + return true; // Can move it if no safepoint + + case BarrierSet::CardTableModRef: +@@ -1593,11 +1582,7 @@ + case BarrierSet::G1SATBCTLogging: + g1_write_barrier_post(store, obj, adr, adr_idx, val, bt, use_precise); + break; +- case BarrierSet::ShenandoahBarrierSet: +- if (ShenandoahStoreValEnqueueBarrier) { +- g1_write_barrier_pre(false, NULL, NULL, max_juint, NULL, NULL, val, bt); +- } +- break; ++ + case BarrierSet::CardTableModRef: + case BarrierSet::CardTableExtension: + write_barrier_post(store, obj, adr, adr_idx, val, use_precise); +@@ -1722,11 +1707,6 @@ + elembt = T_OBJECT; // To satisfy switch in LoadNode::make() + } + Node* ld = make_load(ctl, adr, elemtype, elembt, arytype, MemNode::unordered); +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC && (elembt == T_OBJECT || elembt == T_ARRAY)) { +- ld = ShenandoahBarrierSetC2::bsc2()->load_reference_barrier(this, ld); +- } +-#endif + return ld; + } + +@@ -3685,12 +3665,6 @@ + if (ptr == NULL) { // reduce dumb test in callers + return NULL; + } +- +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- ptr = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(ptr); +- } +-#endif + if (ptr->is_CheckCastPP()) { // strip only one raw-to-oop cast + ptr = ptr->in(1); + if (ptr == NULL) return NULL; +@@ -3949,16 +3923,7 @@ + Node* index_adr = __ AddP(no_base, tls, __ ConX(index_offset)); + + // Now some of the values +- Node* marking; +- if (UseShenandoahGC) { +- Node* gc_state = __ AddP(no_base, tls, __ ConX(in_bytes(JavaThread::gc_state_offset()))); +- Node* ld = __ load(__ ctrl(), gc_state, TypeInt::BYTE, T_BYTE, Compile::AliasIdxRaw); +- marking = __ AndI(ld, __ ConI(ShenandoahHeap::MARKING)); +- assert(ShenandoahBarrierC2Support::is_gc_state_load(ld), "Should match the shape"); +- } else { +- assert(UseG1GC, "should be"); +- marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw); +- } ++ Node* marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw); + + // if (!marking) + __ if_then(marking, BoolTest::ne, zero, unlikely); { +@@ -3999,15 +3964,6 @@ + + // Final sync IdealKit and GraphKit. + final_sync(ideal); +- +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC && adr != NULL) { +- Node* c = control(); +- Node* call = c->in(1)->in(1)->in(1)->in(0); +- assert(call->is_g1_wb_pre_call(), "g1_wb_pre call expected"); +- call->add_req(adr); +- } +-#endif + } + + // +@@ -4199,11 +4155,6 @@ + int value_field_idx = C->get_alias_index(value_field_type); + Node* load = make_load(ctrl, basic_plus_adr(str, str, value_offset), + value_type, T_OBJECT, value_field_idx, MemNode::unordered); +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- load = ShenandoahBarrierSetC2::bsc2()->load_reference_barrier(this, load); +- } +-#endif + // String.value field is known to be @Stable. + if (UseImplicitStableValues) { + load = cast_array_to_stable(load, value_type); +diff -uNr openjdk/hotspot/src/share/vm/opto/ifnode.cpp afu8u/hotspot/src/share/vm/opto/ifnode.cpp +--- openjdk/hotspot/src/share/vm/opto/ifnode.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/ifnode.cpp 2025-05-06 10:53:45.111633673 +0800 +@@ -23,7 +23,6 @@ + */ + + #include "precompiled.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.hpp" + #include "memory/allocation.inline.hpp" + #include "opto/addnode.hpp" + #include "opto/cfgnode.hpp" +@@ -610,29 +609,6 @@ + return NULL; // Dead loop? Or hit root? + } + +-bool IfNode::is_shenandoah_marking_if(PhaseTransform *phase) const { +- if (!UseShenandoahGC) { +- return false; +- } +- +- if (Opcode() != Op_If) { +- return false; +- } +- +- Node* bol = in(1); +- assert(bol->is_Bool(), ""); +- Node* cmpx = bol->in(1); +- if (bol->as_Bool()->_test._test == BoolTest::ne && +- cmpx->is_Cmp() && cmpx->in(2) == phase->intcon(0) && +- cmpx->in(1)->in(1)->is_shenandoah_state_load() && +- cmpx->in(1)->in(2)->is_Con() && +- cmpx->in(1)->in(2) == phase->intcon(ShenandoahHeap::MARKING)) { +- return true; +- } +- +- return false; +-} +- + + //------------------------------filtered_int_type-------------------------------- + // Return a possibly more restrictive type for val based on condition control flow for an if +diff -uNr openjdk/hotspot/src/share/vm/opto/lcm.cpp afu8u/hotspot/src/share/vm/opto/lcm.cpp +--- openjdk/hotspot/src/share/vm/opto/lcm.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/lcm.cpp 2025-05-06 10:53:45.111633673 +0800 +@@ -44,6 +44,8 @@ + # include "adfiles/ad_zero.hpp" + #elif defined TARGET_ARCH_MODEL_ppc_64 + # include "adfiles/ad_ppc_64.hpp" ++#elif defined TARGET_ARCH_MODEL_sw64 ++# include "adfiles/ad_sw64.hpp" + #endif + + // Optimization - Graph Style +@@ -187,6 +189,8 @@ + case Op_LoadRange: + case Op_LoadD_unaligned: + case Op_LoadL_unaligned: ++ assert(mach->in(2) == val, "should be address"); ++ break; + case Op_StoreB: + case Op_StoreC: + case Op_StoreCM: +diff -uNr openjdk/hotspot/src/share/vm/opto/library_call.cpp afu8u/hotspot/src/share/vm/opto/library_call.cpp +--- openjdk/hotspot/src/share/vm/opto/library_call.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/library_call.cpp 2025-05-06 11:13:08.131672951 +0800 +@@ -42,11 +42,6 @@ + #include "prims/nativeLookup.hpp" + #include "runtime/sharedRuntime.hpp" + #include "utilities/macros.hpp" +-#if INCLUDE_ALL_GCS +-#include "gc_implementation/shenandoah/shenandoahRuntime.hpp" +-#include "gc_implementation/shenandoah/c2/shenandoahBarrierSetC2.hpp" +-#include "gc_implementation/shenandoah/c2/shenandoahSupport.hpp" +-#endif + + class LibraryIntrinsic : public InlineCallGenerator { + // Extend the set of intrinsics known to the runtime: +@@ -2435,7 +2430,7 @@ + // runtime filters that guard the pre-barrier code. + // Also add memory barrier for non volatile load from the referent field + // to prevent commoning of loads across safepoint. +- if (!(UseG1GC || UseShenandoahGC) && !need_mem_bar) ++ if (!UseG1GC && !need_mem_bar) + return; + + // Some compile time checks. +@@ -2692,14 +2687,6 @@ + // or Compile::must_alias will throw a diagnostic assert.) + bool need_mem_bar = (alias_type->adr_type() == TypeOopPtr::BOTTOM); + +-#if INCLUDE_ALL_GCS +- // Work around JDK-8220714 bug. This is done for Shenandoah only, until +- // the shared code fix is upstreamed and properly tested there. +- if (UseShenandoahGC) { +- need_mem_bar |= is_native_ptr; +- } +-#endif +- + // If we are reading the value of the referent field of a Reference + // object (either by using Unsafe directly or through reflection) + // then, if G1 is enabled, we need to record the referent in an +@@ -2759,11 +2746,6 @@ + // To be valid, unsafe loads may depend on other conditions than + // the one that guards them: pin the Load node + load = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, is_volatile, unaligned, mismatched); +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC && (type == T_OBJECT || type == T_ARRAY)) { +- load = ShenandoahBarrierSetC2::bsc2()->load_reference_barrier(this, load); +- } +-#endif + // load value + switch (type) { + case T_BOOLEAN: +@@ -2817,11 +2799,6 @@ + + if (is_volatile) { + if (!is_store) { +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- load = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(load); +- } +-#endif + Node* mb = insert_mem_bar(Op_MemBarAcquire, load); + mb->as_MemBar()->set_trailing_load(); + } else { +@@ -3134,11 +3111,6 @@ + load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type())); + } + #endif +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- load_store = ShenandoahBarrierSetC2::bsc2()->load_reference_barrier(this, load_store); +- } +-#endif + if (can_move_pre_barrier()) { + // Don't need to load pre_val. The old value is returned by load_store. + // The pre_barrier can execute after the xchg as long as no safepoint +@@ -4574,20 +4546,6 @@ + countx = _gvn.transform(new (C) SubXNode(countx, MakeConX(base_off))); + countx = _gvn.transform(new (C) URShiftXNode(countx, intcon(LogBytesPerLong) )); + +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC && ShenandoahCloneBarrier) { +- assert (src->is_AddP(), "for clone the src should be the interior ptr"); +- assert (dest->is_AddP(), "for clone the dst should be the interior ptr"); +- +- // Make sure that references in the cloned object are updated for Shenandoah. +- make_runtime_call(RC_LEAF|RC_NO_FP, +- OptoRuntime::shenandoah_clone_barrier_Type(), +- CAST_FROM_FN_PTR(address, ShenandoahRuntime::shenandoah_clone_barrier), +- "shenandoah_clone_barrier", TypePtr::BOTTOM, +- src->in(AddPNode::Base)); +- } +-#endif +- + const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM; + bool disjoint_bases = true; + generate_unchecked_arraycopy(raw_adr_type, T_LONG, disjoint_bases, +@@ -5324,7 +5282,7 @@ + // At this point we know we do not need type checks on oop stores. + + // Let's see if we need card marks: +- if (alloc != NULL && use_ReduceInitialCardMarks() && ! UseShenandoahGC) { ++ if (alloc != NULL && use_ReduceInitialCardMarks()) { + // If we do not need card marks, copy using the jint or jlong stub. + copy_type = LP64_ONLY(UseCompressedOops ? T_INT : T_LONG) NOT_LP64(T_INT); + assert(type2aelembytes(basic_elem_type) == type2aelembytes(copy_type), +@@ -6360,12 +6318,6 @@ + Node* no_ctrl = NULL; + Node* result = make_load(no_ctrl, adr, object_type, T_OBJECT, MemNode::unordered); + +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- result = ShenandoahBarrierSetC2::bsc2()->load_reference_barrier(this, result); +- } +-#endif +- + // Use the pre-barrier to record the value in the referent field + pre_barrier(false /* do_load */, + control(), +@@ -6422,12 +6374,6 @@ + // Build the load. + MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered; + Node* loadedField = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, is_vol); +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC && (bt == T_OBJECT || bt == T_ARRAY)) { +- loadedField = ShenandoahBarrierSetC2::bsc2()->load_reference_barrier(this, loadedField); +- } +-#endif +- + // If reference is volatile, prevent following memory ops from + // floating up past the volatile read. Also prevents commoning + // another volatile read. +diff -uNr openjdk/hotspot/src/share/vm/opto/locknode.hpp afu8u/hotspot/src/share/vm/opto/locknode.hpp +--- openjdk/hotspot/src/share/vm/opto/locknode.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/locknode.hpp 2025-05-06 10:53:45.111633673 +0800 +@@ -42,6 +42,8 @@ + # include "adfiles/ad_zero.hpp" + #elif defined TARGET_ARCH_MODEL_ppc_64 + # include "adfiles/ad_ppc_64.hpp" ++#elif defined TARGET_ARCH_MODEL_sw64 ++# include "adfiles/ad_sw64.hpp" + #endif + + //------------------------------BoxLockNode------------------------------------ +diff -uNr openjdk/hotspot/src/share/vm/opto/loopnode.cpp afu8u/hotspot/src/share/vm/opto/loopnode.cpp +--- openjdk/hotspot/src/share/vm/opto/loopnode.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/loopnode.cpp 2025-05-06 11:13:08.131672951 +0800 +@@ -37,10 +37,6 @@ + #include "opto/rootnode.hpp" + #include "opto/superword.hpp" + +-#if INCLUDE_ALL_GCS +-#include "gc_implementation/shenandoah/c2/shenandoahSupport.hpp" +-#endif +- + //============================================================================= + //------------------------------is_loop_iv------------------------------------- + // Determine if a node is Counted loop induction variable. +@@ -2360,12 +2356,6 @@ + C->set_major_progress(); + } + +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC && !C->major_progress()) { +- ShenandoahBarrierC2Support::pin_and_expand(this); +- } +-#endif +- + // Cleanup any modified bits + _igvn.optimize(); + +@@ -3357,10 +3347,7 @@ + } + while(worklist.size() != 0 && LCA != early) { + Node* s = worklist.pop(); +- if (s->is_Load() || +- (UseShenandoahGC && +- (s->is_ShenandoahBarrier() || s->Opcode() == Op_SafePoint || +- (s->is_CallStaticJava() && s->as_CallStaticJava()->uncommon_trap_request() != 0)))) { ++ if (s->is_Load()) { + continue; + } else if (s->is_MergeMem()) { + for (DUIterator_Fast imax, i = s->fast_outs(imax); i < imax; i++) { +@@ -3592,9 +3579,6 @@ + case Op_AryEq: + pinned = false; + } +- if (UseShenandoahGC && n->is_CMove()) { +- pinned = false; +- } + if( pinned ) { + IdealLoopTree *chosen_loop = get_loop(n->is_CFG() ? n : get_ctrl(n)); + if( !chosen_loop->_child ) // Inner loop? +@@ -3649,35 +3633,8 @@ + // which can inhibit range check elimination. + if (least != early) { + Node* ctrl_out = least->unique_ctrl_out(); +- if (UseShenandoahGC && ctrl_out && ctrl_out->is_Loop() && ++ if (ctrl_out && ctrl_out->is_CountedLoop() && + least == ctrl_out->in(LoopNode::EntryControl)) { +- // Move the node above predicates as far up as possible so a +- // following pass of loop predication doesn't hoist a predicate +- // that depends on it above that node. +- Node* new_ctrl = least; +- for (;;) { +- if (!new_ctrl->is_Proj()) { +- break; +- } +- CallStaticJavaNode* call = new_ctrl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none); +- if (call == NULL) { +- break; +- } +- int req = call->uncommon_trap_request(); +- Deoptimization::DeoptReason trap_reason = Deoptimization::trap_request_reason(req); +- if (trap_reason != Deoptimization::Reason_loop_limit_check && +- trap_reason != Deoptimization::Reason_predicate) { +- break; +- } +- Node* c = new_ctrl->in(0)->in(0); +- if (is_dominator(c, early) && c != early) { +- break; +- } +- new_ctrl = c; +- } +- least = new_ctrl; +- } else if (ctrl_out && ctrl_out->is_CountedLoop() && +- least == ctrl_out->in(LoopNode::EntryControl)) { + Node* least_dom = idom(least); + if (get_loop(least_dom)->is_member(get_loop(least))) { + least = least_dom; +@@ -3863,7 +3820,6 @@ + } + } + } +-#endif + + // Collect a R-P-O for the whole CFG. + // Result list is in post-order (scan backwards for RPO) +@@ -3886,6 +3842,7 @@ + } + } + } ++#endif + + + //============================================================================= +diff -uNr openjdk/hotspot/src/share/vm/opto/loopnode.hpp afu8u/hotspot/src/share/vm/opto/loopnode.hpp +--- openjdk/hotspot/src/share/vm/opto/loopnode.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/loopnode.hpp 2025-05-06 11:13:08.131672951 +0800 +@@ -571,12 +571,10 @@ + // Mark as post visited + void set_postvisited( Node *n ) { assert( !is_postvisited( n ), "" ); _preorders[n->_idx] |= 1; } + +-public: + // Set/get control node out. Set lower bit to distinguish from IdealLoopTree + // Returns true if "n" is a data node, false if it's a control node. + bool has_ctrl( Node *n ) const { return ((intptr_t)_nodes[n->_idx]) & 1; } + +-private: + // clear out dead code after build_loop_late + Node_List _deadlist; + +@@ -752,7 +750,6 @@ + } + return n; + } +-public: + Node *idom(Node* d) const { + uint didx = d->_idx; + Node *n = idom_no_update(d); +@@ -765,8 +762,6 @@ + return _dom_depth[d->_idx]; + } + void set_idom(Node* d, Node* n, uint dom_depth); +- +-private: + // Locally compute IDOM using dom_lca call + Node *compute_idom( Node *region ) const; + // Recompute dom_depth +@@ -1050,7 +1045,6 @@ + Node *split_thru_region( Node *n, Node *region ); + // Split Node 'n' through merge point if there is enough win. + Node *split_thru_phi( Node *n, Node *region, int policy ); +- + // Found an If getting its condition-code input from a Phi in the + // same block. Split thru the Region. + void do_split_if( Node *iff ); +@@ -1085,11 +1079,11 @@ + #ifdef ASSERT + void dump_bad_graph(const char* msg, Node* n, Node* early, Node* LCA); + #endif +- void rpo( Node *start, Node_Stack &stk, VectorSet &visited, Node_List &rpo_list ) const; + + #ifndef PRODUCT + void dump( ) const; + void dump( IdealLoopTree *loop, uint rpo_idx, Node_List &rpo_list ) const; ++ void rpo( Node *start, Node_Stack &stk, VectorSet &visited, Node_List &rpo_list ) const; + void verify() const; // Major slow :-) + void verify_compare( Node *n, const PhaseIdealLoop *loop_verify, VectorSet &visited ) const; + IdealLoopTree *get_loop_idx(Node* n) const { +@@ -1101,9 +1095,6 @@ + static int _loop_invokes; // Count of PhaseIdealLoop invokes + static int _loop_work; // Sum of PhaseIdealLoop x _unique + #endif +- +- PhaseIterGVN& igvn() { return _igvn; } +- IdealLoopTree* ltree_root() const { return _ltree_root; } + }; + + inline Node* IdealLoopTree::tail() { +diff -uNr openjdk/hotspot/src/share/vm/opto/loopopts.cpp afu8u/hotspot/src/share/vm/opto/loopopts.cpp +--- openjdk/hotspot/src/share/vm/opto/loopopts.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/loopopts.cpp 2025-05-06 10:53:45.115633673 +0800 +@@ -32,9 +32,6 @@ + #include "opto/mulnode.hpp" + #include "opto/rootnode.hpp" + #include "opto/subnode.hpp" +-#if INCLUDE_ALL_GCS +-#include "gc_implementation/shenandoah/c2/shenandoahSupport.hpp" +-#endif + + //============================================================================= + //------------------------------split_thru_phi--------------------------------- +@@ -120,7 +117,6 @@ + // otherwise it will be not updated during igvn->transform since + // igvn->type(x) is set to x->Value() already. + x->raise_bottom_type(t); +- if (x->Opcode() != Op_ShenandoahLoadReferenceBarrier) { + Node *y = x->Identity(&_igvn); + if (y != x) { + wins++; +@@ -137,9 +133,6 @@ + _igvn._worklist.push(x); + } + } +- } else { +- _igvn._worklist.push(x); +- } + } + if (x != the_clone && the_clone != NULL) + _igvn.remove_dead_node(the_clone); +@@ -311,8 +304,7 @@ + get_ctrl(m->in(2)) != n_ctrl && + get_ctrl(m->in(3)) != n_ctrl) { + // Move the AddP up to dominating point +- Node* c = find_non_split_ctrl(idom(n_ctrl)); +- set_ctrl_and_loop(m, c); ++ set_ctrl_and_loop(m, find_non_split_ctrl(idom(n_ctrl))); + continue; + } + return NULL; +@@ -752,11 +744,6 @@ + !phi->in(LoopNode::LoopBackControl)->is_Load()) + C->set_major_progress(); + +- // Moved a barrier around the loop, 'en-registering' something. +- if (n_blk->is_Loop() && n->Opcode() == Op_ShenandoahLoadReferenceBarrier && +- phi->in(LoopNode::LoopBackControl)->Opcode() != Op_ShenandoahLoadReferenceBarrier) +- C->set_major_progress(); +- + return phi; + } + +diff -uNr openjdk/hotspot/src/share/vm/opto/loopPredicate.cpp afu8u/hotspot/src/share/vm/opto/loopPredicate.cpp +--- openjdk/hotspot/src/share/vm/opto/loopPredicate.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/loopPredicate.cpp 2025-05-06 10:53:45.111633673 +0800 +@@ -409,9 +409,6 @@ + if (_lpt->is_invariant(n)) { // known invariant + _invariant.set(n->_idx); + } else if (!n->is_CFG()) { +- if (n->Opcode() == Op_ShenandoahLoadReferenceBarrier) { +- return; +- } + Node *n_ctrl = _phase->ctrl_or_self(n); + Node *u_ctrl = _phase->ctrl_or_self(use); // self if use is a CFG + if (_phase->is_dominator(n_ctrl, u_ctrl)) { +diff -uNr openjdk/hotspot/src/share/vm/opto/machnode.cpp afu8u/hotspot/src/share/vm/opto/machnode.cpp +--- openjdk/hotspot/src/share/vm/opto/machnode.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/machnode.cpp 2025-05-06 10:53:45.115633673 +0800 +@@ -26,9 +26,6 @@ + #include "gc_interface/collectedHeap.hpp" + #include "opto/machnode.hpp" + #include "opto/regalloc.hpp" +-#if INCLUDE_ALL_GCS +-#include "gc_implementation/shenandoah/c2/shenandoahSupport.hpp" +-#endif + + //============================================================================= + // Return the value requested +@@ -788,12 +785,6 @@ + return &jvms_for_throw; + } + +-uint MachMemBarNode::size_of() const { return sizeof(*this); } +- +-const TypePtr *MachMemBarNode::adr_type() const { +- return _adr_type; +-} +- + //============================================================================= + #ifndef PRODUCT + void labelOper::int_format(PhaseRegAlloc *ra, const MachNode *node, outputStream *st) const { +diff -uNr openjdk/hotspot/src/share/vm/opto/machnode.hpp afu8u/hotspot/src/share/vm/opto/machnode.hpp +--- openjdk/hotspot/src/share/vm/opto/machnode.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/machnode.hpp 2025-05-06 10:53:45.115633673 +0800 +@@ -904,19 +904,6 @@ + virtual JVMState* jvms() const; + }; + +-class MachMemBarNode : public MachNode { +- virtual uint size_of() const; // Size is bigger +-public: +- const TypePtr* _adr_type; // memory effects of call or return +- MachMemBarNode() : MachNode() { +- init_class_id(Class_MachMemBar); +- _adr_type = TypePtr::BOTTOM; // the default: all of memory +- } +- +- void set_adr_type(const TypePtr* atp) { _adr_type = atp; } +- virtual const TypePtr *adr_type() const; +-}; +- + + //------------------------------MachTempNode----------------------------------- + // Node used by the adlc to construct inputs to represent temporary registers +diff -uNr openjdk/hotspot/src/share/vm/opto/macro.cpp afu8u/hotspot/src/share/vm/opto/macro.cpp +--- openjdk/hotspot/src/share/vm/opto/macro.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/macro.cpp 2025-05-06 10:53:45.115633673 +0800 +@@ -41,11 +41,6 @@ + #include "opto/subnode.hpp" + #include "opto/type.hpp" + #include "runtime/sharedRuntime.hpp" +-#if INCLUDE_ALL_GCS +-#include "gc_implementation/shenandoah/shenandoahForwarding.hpp" +-#include "gc_implementation/shenandoah/c2/shenandoahBarrierSetC2.hpp" +-#include "gc_implementation/shenandoah/c2/shenandoahSupport.hpp" +-#endif + + + // +@@ -449,13 +444,7 @@ + if (val == mem) { + values.at_put(j, mem); + } else if (val->is_Store()) { +- Node* n = val->in(MemNode::ValueIn); +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- n = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(n); +- } +-#endif +- values.at_put(j, n); ++ values.at_put(j, val->in(MemNode::ValueIn)); + } else if(val->is_Proj() && val->in(0) == alloc) { + values.at_put(j, _igvn.zerocon(ft)); + } else if (val->is_Phi()) { +@@ -557,13 +546,7 @@ + // hit a sentinel, return appropriate 0 value + return _igvn.zerocon(ft); + } else if (mem->is_Store()) { +- Node* n = mem->in(MemNode::ValueIn); +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- n = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(n); +- } +-#endif +- return n; ++ return mem->in(MemNode::ValueIn); + } else if (mem->is_Phi()) { + // attempt to produce a Phi reflecting the values on the input paths of the Phi + Node_Stack value_phis(a, 8); +@@ -630,8 +613,7 @@ + for (DUIterator_Fast kmax, k = use->fast_outs(kmax); + k < kmax && can_eliminate; k++) { + Node* n = use->fast_out(k); +- if (!n->is_Store() && n->Opcode() != Op_CastP2X && +- (!UseShenandoahGC || !n->is_g1_wb_pre_call())) { ++ if (!n->is_Store() && n->Opcode() != Op_CastP2X) { + DEBUG_ONLY(disq_node = n;) + if (n->is_Load() || n->is_LoadStore()) { + NOT_PRODUCT(fail_eliminate = "Field load";) +@@ -902,14 +884,11 @@ + } + #endif + _igvn.replace_node(n, n->in(MemNode::Memory)); +- } else if (UseShenandoahGC && n->is_g1_wb_pre_call()) { +- C->shenandoah_eliminate_g1_wb_pre(n, &_igvn); + } else { + eliminate_card_mark(n); + } + k -= (oc2 - use->outcnt()); + } +- _igvn.remove_dead_node(use); + } else { + eliminate_card_mark(use); + } +diff -uNr openjdk/hotspot/src/share/vm/opto/matcher.cpp afu8u/hotspot/src/share/vm/opto/matcher.cpp +--- openjdk/hotspot/src/share/vm/opto/matcher.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/matcher.cpp 2025-05-06 11:13:08.135672951 +0800 +@@ -52,9 +52,8 @@ + # include "adfiles/ad_zero.hpp" + #elif defined TARGET_ARCH_MODEL_ppc_64 + # include "adfiles/ad_ppc_64.hpp" +-#endif +-#if INCLUDE_ALL_GCS +-#include "gc_implementation/shenandoah/c2/shenandoahSupport.hpp" ++#elif defined TARGET_ARCH_MODEL_sw64 ++# include "adfiles/ad_sw64.hpp" + #endif + + OptoReg::Name OptoReg::c_frame_pointer; +@@ -1027,9 +1026,6 @@ + m = n->is_SafePoint() ? match_sfpt(n->as_SafePoint()):match_tree(n); + if (C->failing()) return NULL; + if (m == NULL) { Matcher::soft_match_failure(); return NULL; } +- if (n->is_MemBar() && UseShenandoahGC) { +- m->as_MachMemBar()->set_adr_type(n->adr_type()); +- } + } else { // Nothing the matcher cares about + if (n->is_Proj() && n->in(0) != NULL && n->in(0)->is_Multi()) { // Projections? + // Convert to machine-dependent projection +@@ -1074,15 +1070,6 @@ + mstack.push(m, Visit, n, -1); + } + +- // Handle precedence edges for interior nodes +- for (i = n->len()-1; (uint)i >= n->req(); i--) { +- Node *m = n->in(i); +- if (m == NULL || C->node_arena()->contains(m)) continue; +- n->rm_prec(i); +- // set -1 to call add_prec() instead of set_req() during Step1 +- mstack.push(m, Visit, n, -1); +- } +- + // For constant debug info, I'd rather have unmatched constants. + int cnt = n->req(); + JVMState* jvms = n->jvms(); +@@ -1773,14 +1760,6 @@ + return ex; + } + +-void Matcher::handle_precedence_edges(Node* n, MachNode *mach) { +- for (uint i = n->req(); i < n->len(); i++) { +- if (n->in(i) != NULL) { +- mach->add_prec(n->in(i)); +- } +- } +-} +- + void Matcher::ReduceInst_Chain_Rule( State *s, int rule, Node *&mem, MachNode *mach ) { + // 'op' is what I am expecting to receive + int op = _leftOp[rule]; +@@ -1815,8 +1794,6 @@ + + + uint Matcher::ReduceInst_Interior( State *s, int rule, Node *&mem, MachNode *mach, uint num_opnds ) { +- handle_precedence_edges(s->_leaf, mach); +- + if( s->_leaf->is_Load() ) { + Node *mem2 = s->_leaf->in(MemNode::Memory); + assert( mem == (Node*)1 || mem == mem2, "multiple Memories being matched at once?" ); +@@ -1899,9 +1876,6 @@ + mem = s->_leaf->in(MemNode::Memory); + debug_only(_mem_node = s->_leaf;) + } +- +- handle_precedence_edges(s->_leaf, mach); +- + if( s->_leaf->in(0) && s->_leaf->req() > 1) { + if( !mach->in(0) ) + mach->set_req(0,s->_leaf->in(0)); +diff -uNr openjdk/hotspot/src/share/vm/opto/matcher.hpp afu8u/hotspot/src/share/vm/opto/matcher.hpp +--- openjdk/hotspot/src/share/vm/opto/matcher.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/matcher.hpp 2025-05-06 10:53:45.115633673 +0800 +@@ -124,8 +124,6 @@ + // Mach node for ConP #NULL + MachNode* _mach_null; + +- void handle_precedence_edges(Node* n, MachNode *mach); +- + public: + int LabelRootDepth; + // Convert ideal machine register to a register mask for spill-loads +diff -uNr openjdk/hotspot/src/share/vm/opto/memnode.cpp afu8u/hotspot/src/share/vm/opto/memnode.cpp +--- openjdk/hotspot/src/share/vm/opto/memnode.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/memnode.cpp 2025-05-06 10:53:45.119633673 +0800 +@@ -38,11 +38,6 @@ + #include "opto/mulnode.hpp" + #include "opto/phaseX.hpp" + #include "opto/regmask.hpp" +-#if INCLUDE_ALL_GCS +-#include "gc_implementation/shenandoah/shenandoahForwarding.hpp" +-#include "gc_implementation/shenandoah/c2/shenandoahBarrierSetC2.hpp" +-#include "gc_implementation/shenandoah/c2/shenandoahSupport.hpp" +-#endif + + // Portions of code courtesy of Clifford Click + +@@ -677,6 +672,216 @@ + } + } + ++//------------------------adr_phi_is_loop_invariant---------------------------- ++// A helper function for Ideal_DU_postCCP to check if a Phi in a counted ++// loop is loop invariant. Make a quick traversal of Phi and associated ++// CastPP nodes, looking to see if they are a closed group within the loop. ++bool MemNode::adr_phi_is_loop_invariant(Node* adr_phi, Node* cast) { ++ // The idea is that the phi-nest must boil down to only CastPP nodes ++ // with the same data. This implies that any path into the loop already ++ // includes such a CastPP, and so the original cast, whatever its input, ++ // must be covered by an equivalent cast, with an earlier control input. ++ ResourceMark rm; ++ ++ // The loop entry input of the phi should be the unique dominating ++ // node for every Phi/CastPP in the loop. ++ Unique_Node_List closure; ++ closure.push(adr_phi->in(LoopNode::EntryControl)); ++ ++ // Add the phi node and the cast to the worklist. ++ Unique_Node_List worklist; ++ worklist.push(adr_phi); ++ if( cast != NULL ){ ++ if( !cast->is_ConstraintCast() ) return false; ++ worklist.push(cast); ++ } ++ ++ // Begin recursive walk of phi nodes. ++ while( worklist.size() ){ ++ // Take a node off the worklist ++ Node *n = worklist.pop(); ++ if( !closure.member(n) ){ ++ // Add it to the closure. ++ closure.push(n); ++ // Make a sanity check to ensure we don't waste too much time here. ++ if( closure.size() > 20) return false; ++ // This node is OK if: ++ // - it is a cast of an identical value ++ // - or it is a phi node (then we add its inputs to the worklist) ++ // Otherwise, the node is not OK, and we presume the cast is not invariant ++ if( n->is_ConstraintCast() ){ ++ worklist.push(n->in(1)); ++ } else if( n->is_Phi() ) { ++ for( uint i = 1; i < n->req(); i++ ) { ++ worklist.push(n->in(i)); ++ } ++ } else { ++ return false; ++ } ++ } ++ } ++ ++ // Quit when the worklist is empty, and we've found no offending nodes. ++ return true; ++} ++ ++//------------------------------Ideal_DU_postCCP------------------------------- ++// Find any cast-away of null-ness and keep its control. Null cast-aways are ++// going away in this pass and we need to make this memory op depend on the ++// gating null check. ++Node *MemNode::Ideal_DU_postCCP( PhaseCCP *ccp ) { ++ return Ideal_common_DU_postCCP(ccp, this, in(MemNode::Address)); ++} ++ ++// I tried to leave the CastPP's in. This makes the graph more accurate in ++// some sense; we get to keep around the knowledge that an oop is not-null ++// after some test. Alas, the CastPP's interfere with GVN (some values are ++// the regular oop, some are the CastPP of the oop, all merge at Phi's which ++// cannot collapse, etc). This cost us 10% on SpecJVM, even when I removed ++// some of the more trivial cases in the optimizer. Removing more useless ++// Phi's started allowing Loads to illegally float above null checks. I gave ++// up on this approach. CNC 10/20/2000 ++// This static method may be called not from MemNode (EncodePNode calls it). ++// Only the control edge of the node 'n' might be updated. ++Node *MemNode::Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr ) { ++ Node *skipped_cast = NULL; ++ // Need a null check? Regular static accesses do not because they are ++ // from constant addresses. Array ops are gated by the range check (which ++ // always includes a NULL check). Just check field ops. ++ if( n->in(MemNode::Control) == NULL ) { ++ // Scan upwards for the highest location we can place this memory op. ++ while( true ) { ++ switch( adr->Opcode() ) { ++ ++ case Op_AddP: // No change to NULL-ness, so peek thru AddP's ++ adr = adr->in(AddPNode::Base); ++ continue; ++ ++ case Op_DecodeN: // No change to NULL-ness, so peek thru ++ case Op_DecodeNKlass: ++ adr = adr->in(1); ++ continue; ++ ++ case Op_EncodeP: ++ case Op_EncodePKlass: ++ // EncodeP node's control edge could be set by this method ++ // when EncodeP node depends on CastPP node. ++ // ++ // Use its control edge for memory op because EncodeP may go away ++ // later when it is folded with following or preceding DecodeN node. ++ if (adr->in(0) == NULL) { ++ // Keep looking for cast nodes. ++ adr = adr->in(1); ++ continue; ++ } ++ ccp->hash_delete(n); ++ n->set_req(MemNode::Control, adr->in(0)); ++ ccp->hash_insert(n); ++ return n; ++ ++ case Op_CastPP: ++ // If the CastPP is useless, just peek on through it. ++ if( ccp->type(adr) == ccp->type(adr->in(1)) ) { ++ // Remember the cast that we've peeked though. If we peek ++ // through more than one, then we end up remembering the highest ++ // one, that is, if in a loop, the one closest to the top. ++ skipped_cast = adr; ++ adr = adr->in(1); ++ continue; ++ } ++ // CastPP is going away in this pass! We need this memory op to be ++ // control-dependent on the test that is guarding the CastPP. ++ ccp->hash_delete(n); ++ n->set_req(MemNode::Control, adr->in(0)); ++ ccp->hash_insert(n); ++ return n; ++ ++ case Op_Phi: ++ // Attempt to float above a Phi to some dominating point. ++ if (adr->in(0) != NULL && adr->in(0)->is_CountedLoop()) { ++ // If we've already peeked through a Cast (which could have set the ++ // control), we can't float above a Phi, because the skipped Cast ++ // may not be loop invariant. ++ if (adr_phi_is_loop_invariant(adr, skipped_cast)) { ++ adr = adr->in(1); ++ continue; ++ } ++ } ++ ++ // Intentional fallthrough! ++ ++ // No obvious dominating point. The mem op is pinned below the Phi ++ // by the Phi itself. If the Phi goes away (no true value is merged) ++ // then the mem op can float, but not indefinitely. It must be pinned ++ // behind the controls leading to the Phi. ++ case Op_CheckCastPP: ++ // These usually stick around to change address type, however a ++ // useless one can be elided and we still need to pick up a control edge ++ if (adr->in(0) == NULL) { ++ // This CheckCastPP node has NO control and is likely useless. But we ++ // need check further up the ancestor chain for a control input to keep ++ // the node in place. 4959717. ++ skipped_cast = adr; ++ adr = adr->in(1); ++ continue; ++ } ++ ccp->hash_delete(n); ++ n->set_req(MemNode::Control, adr->in(0)); ++ ccp->hash_insert(n); ++ return n; ++ ++ // List of "safe" opcodes; those that implicitly block the memory ++ // op below any null check. ++ case Op_CastX2P: // no null checks on native pointers ++ case Op_Parm: // 'this' pointer is not null ++ case Op_LoadP: // Loading from within a klass ++ case Op_LoadN: // Loading from within a klass ++ case Op_LoadKlass: // Loading from within a klass ++ case Op_LoadNKlass: // Loading from within a klass ++ case Op_ConP: // Loading from a klass ++ case Op_ConN: // Loading from a klass ++ case Op_ConNKlass: // Loading from a klass ++ case Op_CreateEx: // Sucking up the guts of an exception oop ++ case Op_Con: // Reading from TLS ++ case Op_CMoveP: // CMoveP is pinned ++ case Op_CMoveN: // CMoveN is pinned ++ break; // No progress ++ ++ case Op_Proj: // Direct call to an allocation routine ++ case Op_SCMemProj: // Memory state from store conditional ops ++#ifdef ASSERT ++ { ++ assert(adr->as_Proj()->_con == TypeFunc::Parms, "must be return value"); ++ const Node* call = adr->in(0); ++ if (call->is_CallJava()) { ++ const CallJavaNode* call_java = call->as_CallJava(); ++ const TypeTuple *r = call_java->tf()->range(); ++ assert(r->cnt() > TypeFunc::Parms, "must return value"); ++ const Type* ret_type = r->field_at(TypeFunc::Parms); ++ assert(ret_type && ret_type->isa_ptr(), "must return pointer"); ++ // We further presume that this is one of ++ // new_instance_Java, new_array_Java, or ++ // the like, but do not assert for this. ++ } else if (call->is_Allocate()) { ++ // similar case to new_instance_Java, etc. ++ } else if (!call->is_CallLeaf()) { ++ // Projections from fetch_oop (OSR) are allowed as well. ++ ShouldNotReachHere(); ++ } ++ } ++#endif ++ break; ++ default: ++ ShouldNotReachHere(); ++ } ++ break; ++ } ++ } ++ ++ return NULL; // No progress ++} ++ ++ + //============================================================================= + // Should LoadNode::Ideal() attempt to remove control edges? + bool LoadNode::can_remove_control() const { +@@ -901,11 +1106,6 @@ + (tp != NULL) && tp->is_ptr_to_boxed_value()) { + intptr_t ignore = 0; + Node* base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ignore); +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- base = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(base); +- } +-#endif + if (base != NULL && base->is_Proj() && + base->as_Proj()->_con == TypeFunc::Parms && + base->in(0)->is_CallStaticJava() && +@@ -955,41 +1155,8 @@ + if (!phase->type(value)->higher_equal(phase->type(this))) + return this; + } +- PhaseIterGVN* igvn = phase->is_IterGVN(); +- if (UseShenandoahGC && +- igvn != NULL && +- value->is_Phi() && +- value->req() > 2 && +- value->in(1) != NULL && +- value->in(1)->Opcode() == Op_ShenandoahLoadReferenceBarrier) { +- if (igvn->_worklist.member(value) || +- igvn->_worklist.member(value->in(0)) || +- (value->in(0)->in(1) != NULL && +- value->in(0)->in(1)->is_IfProj() && +- (igvn->_worklist.member(value->in(0)->in(1)) || +- (value->in(0)->in(1)->in(0) != NULL && +- igvn->_worklist.member(value->in(0)->in(1)->in(0)))))) { +- igvn->_worklist.push(this); +- return this; +- } +- } + // (This works even when value is a Con, but LoadNode::Value + // usually runs first, producing the singleton type of the Con.) +- // TODO!! +- if (false && UseShenandoahGC) { +- Node* value_no_barrier = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(value->Opcode() == Op_EncodeP ? value->in(1) : value); +- if (value->Opcode() == Op_EncodeP) { +- if (value_no_barrier != value->in(1)) { +- Node* encode = value->clone(); +- encode->set_req(1, value_no_barrier); +- encode = phase->transform(encode); +- return encode; +- } +- } else { +- return value_no_barrier; +- } +- } +- + return value; + } + +@@ -1528,10 +1695,9 @@ + const bool off_beyond_header = ((uint)off >= (uint)min_base_off); + + // Try to constant-fold a stable array element. +- if (FoldStableValues && ary->is_stable()) { ++ if (FoldStableValues && ary->is_stable() && ary->const_oop() != NULL) { + // Make sure the reference is not into the header and the offset is constant +- ciObject* aobj = ary->const_oop(); +- if (aobj != NULL && off_beyond_header && adr->is_AddP() && off != Type::OffsetBot) { ++ if (off_beyond_header && adr->is_AddP() && off != Type::OffsetBot) { + const Type* con_type = fold_stable_ary_elem(ary, off, memory_type()); + if (con_type != NULL) { + return con_type; +diff -uNr openjdk/hotspot/src/share/vm/opto/memnode.hpp afu8u/hotspot/src/share/vm/opto/memnode.hpp +--- openjdk/hotspot/src/share/vm/opto/memnode.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/memnode.hpp 2025-05-06 10:53:45.119633673 +0800 +@@ -89,6 +89,10 @@ + // This one should probably be a phase-specific function: + static bool all_controls_dominate(Node* dom, Node* sub); + ++ // Find any cast-away of null-ness and keep its control. ++ static Node *Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr ); ++ virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp ); ++ + virtual const class TypePtr *adr_type() const; // returns bottom_type of address + + // Shared code for Ideal methods: +@@ -107,12 +111,6 @@ + #endif + } + +-#ifdef ASSERT +- void set_raw_adr_type(const TypePtr *t) { +- _adr_type = t; +- } +-#endif +- + // Map a load or store opcode to its corresponding store opcode. + // (Return -1 if unknown.) + virtual int store_Opcode() const { return -1; } +@@ -260,22 +258,6 @@ + // Helper function to allow a raw load without control edge for some cases + static bool is_immutable_value(Node* adr); + #endif +- +- virtual bool is_g1_marking_load() const { +- const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active()); +- return in(2)->is_AddP() && in(2)->in(2)->Opcode() == Op_ThreadLocal +- && in(2)->in(3)->is_Con() +- && in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == marking_offset; +- } +- +- virtual bool is_shenandoah_state_load() const { +- if (!UseShenandoahGC) return false; +- const int state_offset = in_bytes(JavaThread::gc_state_offset()); +- return in(2)->is_AddP() && in(2)->in(2)->Opcode() == Op_ThreadLocal +- && in(2)->in(3)->is_Con() +- && in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == state_offset; +- } +- + protected: + const Type* load_array_final_field(const TypeKlassPtr *tkls, + ciKlass* klass) const; +@@ -809,9 +791,6 @@ + virtual const Type *bottom_type() const { return _type; } + virtual uint ideal_reg() const; + virtual const class TypePtr *adr_type() const { return _adr_type; } // returns bottom_type of address +- void set_adr_type(const TypePtr *t) { +- _adr_type = t; +- } + + bool result_not_used() const; + MemBarNode* trailing_membar() const; +diff -uNr openjdk/hotspot/src/share/vm/opto/mulnode.cpp afu8u/hotspot/src/share/vm/opto/mulnode.cpp +--- openjdk/hotspot/src/share/vm/opto/mulnode.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/mulnode.cpp 2025-05-06 10:53:45.119633673 +0800 +@@ -30,10 +30,6 @@ + #include "opto/mulnode.hpp" + #include "opto/phaseX.hpp" + #include "opto/subnode.hpp" +-#include "utilities/macros.hpp" +-#if INCLUDE_ALL_GCS +-#include "gc_implementation/shenandoah/c2/shenandoahSupport.hpp" +-#endif + + // Portions of code courtesy of Clifford Click + +@@ -477,15 +473,6 @@ + Node *load = in(1); + uint lop = load->Opcode(); + +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC && ShenandoahBarrierC2Support::is_gc_state_load(load)) { +- // Do not touch the load+mask, we would match the whole sequence exactly. +- // Converting the load to LoadUB/LoadUS would mismatch and waste a register +- // on the barrier fastpath. +- return NULL; +- } +-#endif +- + // Masking bits off of a Character? Hi bits are already zero. + if( lop == Op_LoadUS && + (mask & 0xFFFF0000) ) // Can we make a smaller mask? +diff -uNr openjdk/hotspot/src/share/vm/opto/multnode.cpp afu8u/hotspot/src/share/vm/opto/multnode.cpp +--- openjdk/hotspot/src/share/vm/opto/multnode.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/multnode.cpp 2025-05-06 10:53:45.119633673 +0800 +@@ -32,9 +32,6 @@ + #include "opto/phaseX.hpp" + #include "opto/regmask.hpp" + #include "opto/type.hpp" +-#if INCLUDE_ALL_GCS +-#include "gc_implementation/shenandoah/c2/shenandoahSupport.hpp" +-#endif + + //============================================================================= + //------------------------------MultiNode-------------------------------------- +@@ -151,62 +148,59 @@ + } + + //-------------------------------is_uncommon_trap_proj---------------------------- +-// Return uncommon trap call node if proj is for "proj->[region->..]call_uct" +-// NULL otherwise +-CallStaticJavaNode* ProjNode::is_uncommon_trap_proj(Deoptimization::DeoptReason reason) { ++// Return true if proj is the form of "proj->[region->..]call_uct" ++bool ProjNode::is_uncommon_trap_proj(Deoptimization::DeoptReason reason) { + int path_limit = 10; + Node* out = this; + for (int ct = 0; ct < path_limit; ct++) { + out = out->unique_ctrl_out(); + if (out == NULL) +- return NULL; ++ return false; + if (out->is_CallStaticJava()) { +- CallStaticJavaNode* call = out->as_CallStaticJava(); +- int req = call->uncommon_trap_request(); ++ int req = out->as_CallStaticJava()->uncommon_trap_request(); + if (req != 0) { + Deoptimization::DeoptReason trap_reason = Deoptimization::trap_request_reason(req); + if (trap_reason == reason || reason == Deoptimization::Reason_none) { +- return call; ++ return true; + } + } +- return NULL; // don't do further after call ++ return false; // don't do further after call + } + if (out->Opcode() != Op_Region) +- return NULL; ++ return false; + } +- return NULL; ++ return false; + } + + //-------------------------------is_uncommon_trap_if_pattern------------------------- +-// Return uncommon trap call node for "if(test)-> proj -> ... +-// | +-// V +-// other_proj->[region->..]call_uct" +-// NULL otherwise ++// Return true for "if(test)-> proj -> ... ++// | ++// V ++// other_proj->[region->..]call_uct" ++// + // "must_reason_predicate" means the uct reason must be Reason_predicate +-CallStaticJavaNode* ProjNode::is_uncommon_trap_if_pattern(Deoptimization::DeoptReason reason) { ++bool ProjNode::is_uncommon_trap_if_pattern(Deoptimization::DeoptReason reason) { + Node *in0 = in(0); +- if (!in0->is_If()) return NULL; ++ if (!in0->is_If()) return false; + // Variation of a dead If node. +- if (in0->outcnt() < 2) return NULL; ++ if (in0->outcnt() < 2) return false; + IfNode* iff = in0->as_If(); + + // we need "If(Conv2B(Opaque1(...)))" pattern for reason_predicate + if (reason != Deoptimization::Reason_none) { + if (iff->in(1)->Opcode() != Op_Conv2B || + iff->in(1)->in(1)->Opcode() != Op_Opaque1) { +- return NULL; ++ return false; + } + } + + ProjNode* other_proj = iff->proj_out(1-_con); + if (other_proj == NULL) // Should never happen, but make Parfait happy. +- return NULL; +- CallStaticJavaNode* call = other_proj->is_uncommon_trap_proj(reason); +- if (call != NULL) { ++ return false; ++ if (other_proj->is_uncommon_trap_proj(reason)) { + assert(reason == Deoptimization::Reason_none || + Compile::current()->is_predicate_opaq(iff->in(1)->in(1)), "should be on the list"); +- return call; ++ return true; + } +- return NULL; ++ return false; + } +diff -uNr openjdk/hotspot/src/share/vm/opto/multnode.hpp afu8u/hotspot/src/share/vm/opto/multnode.hpp +--- openjdk/hotspot/src/share/vm/opto/multnode.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/multnode.hpp 2025-05-06 10:53:45.119633673 +0800 +@@ -89,15 +89,13 @@ + virtual void dump_spec(outputStream *st) const; + #endif + +- // Return uncommon trap call node if proj is for "proj->[region->..]call_uct" +- // NULL otherwise +- CallStaticJavaNode* is_uncommon_trap_proj(Deoptimization::DeoptReason reason); +- // Return uncommon trap call node for "if(test)-> proj -> ... +- // | +- // V +- // other_proj->[region->..]call_uct" +- // NULL otherwise +- CallStaticJavaNode* is_uncommon_trap_if_pattern(Deoptimization::DeoptReason reason); ++ // Return true if proj is for "proj->[region->..]call_uct" ++ bool is_uncommon_trap_proj(Deoptimization::DeoptReason reason); ++ // Return true for "if(test)-> proj -> ... ++ // | ++ // V ++ // other_proj->[region->..]call_uct" ++ bool is_uncommon_trap_if_pattern(Deoptimization::DeoptReason reason); + }; + + #endif // SHARE_VM_OPTO_MULTNODE_HPP +diff -uNr openjdk/hotspot/src/share/vm/opto/node.cpp afu8u/hotspot/src/share/vm/opto/node.cpp +--- openjdk/hotspot/src/share/vm/opto/node.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/node.cpp 2025-05-06 10:53:45.119633673 +0800 +@@ -35,9 +35,6 @@ + #include "opto/regmask.hpp" + #include "opto/type.hpp" + #include "utilities/copy.hpp" +-#if INCLUDE_ALL_GCS +-#include "gc_implementation/shenandoah/c2/shenandoahSupport.hpp" +-#endif + + class RegMask; + // #include "phase.hpp" +@@ -532,10 +529,6 @@ + C->add_macro_node(n); + if (is_expensive()) + C->add_expensive_node(n); +- +- if (Opcode() == Op_ShenandoahLoadReferenceBarrier) { +- C->add_shenandoah_barrier(reinterpret_cast(n)); +- } + // If the cloned node is a range check dependent CastII, add it to the list. + CastIINode* cast = n->isa_CastII(); + if (cast != NULL && cast->has_range_check()) { +@@ -669,9 +662,6 @@ + if (is_expensive()) { + compile->remove_expensive_node(this); + } +- if (Opcode() == Op_ShenandoahLoadReferenceBarrier) { +- compile->remove_shenandoah_barrier(reinterpret_cast(this)); +- } + CastIINode* cast = isa_CastII(); + if (cast != NULL && cast->has_range_check()) { + compile->remove_range_check_cast(cast); +@@ -987,11 +977,6 @@ + return (Node*) p; + } + +-// Return true if the current node has an out that matches opcode. +-bool Node::has_out_with(int opcode) { +- return (find_out_with(opcode) != NULL); +-} +- + //------------------------------add_prec--------------------------------------- + // Add a new precedence input. Precedence inputs are unordered, with + // duplicates removed and NULLs packed down at the end. +@@ -1398,9 +1383,6 @@ + if (dead->is_expensive()) { + igvn->C->remove_expensive_node(dead); + } +- if (dead->Opcode() == Op_ShenandoahLoadReferenceBarrier) { +- igvn->C->remove_shenandoah_barrier(reinterpret_cast(dead)); +- } + CastIINode* cast = dead->isa_CastII(); + if (cast != NULL && cast->has_range_check()) { + igvn->C->remove_range_check_cast(cast); +@@ -1423,8 +1405,6 @@ + // The restriction (outcnt() <= 2) is the same as in set_req_X() + // and remove_globally_dead_node(). + igvn->add_users_to_worklist( n ); +- } else if (n->Opcode() == Op_AddP && CallLeafNode::has_only_g1_wb_pre_uses(n)) { +- igvn->add_users_to_worklist(n); + } + } + } +@@ -1452,6 +1432,12 @@ + return false; + } + ++//------------------------------Ideal_DU_postCCP------------------------------- ++// Idealize graph, using DU info. Must clone result into new-space ++Node *Node::Ideal_DU_postCCP( PhaseCCP * ) { ++ return NULL; // Default to no change ++} ++ + //------------------------------hash------------------------------------------- + // Hash function over Nodes. + uint Node::hash() const { +@@ -2140,14 +2126,6 @@ + return found; + } + +-void Node::ensure_control_or_add_prec(Node* c) { +- if (in(0) == NULL) { +- set_req(0, c); +- } else if (in(0) != c) { +- add_prec(c); +- } +-} +- + //============================================================================= + //------------------------------yank------------------------------------------- + // Find and remove +diff -uNr openjdk/hotspot/src/share/vm/opto/node.hpp afu8u/hotspot/src/share/vm/opto/node.hpp +--- openjdk/hotspot/src/share/vm/opto/node.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/node.hpp 2025-05-06 10:53:45.119633673 +0800 +@@ -71,7 +71,6 @@ + class FastLockNode; + class FastUnlockNode; + class IfNode; +-class IfProjNode; + class IfFalseNode; + class IfTrueNode; + class InitializeNode; +@@ -101,7 +100,6 @@ + class MachSpillCopyNode; + class MachTempNode; + class MachMergeNode; +-class MachMemBarNode; + class Matcher; + class MemBarNode; + class MemBarStoreStoreNode; +@@ -133,7 +131,6 @@ + class RootNode; + class SafePointNode; + class SafePointScalarObjectNode; +-class ShenandoahBarrierNode; + class StartNode; + class State; + class StoreNode; +@@ -456,8 +453,6 @@ + bool eqv_uncast(const Node* n) const { + return (this->uncast() == n->uncast()); + } +- // Return true if the current node has an out that matches opcode. +- bool has_out_with(int opcode); + + // Find out of current node that matches opcode. + Node* find_out_with(int opcode); +@@ -641,7 +636,6 @@ + DEFINE_CLASS_ID(MachConstantBase, Mach, 4) + DEFINE_CLASS_ID(MachConstant, Mach, 5) + DEFINE_CLASS_ID(MachMerge, Mach, 6) +- DEFINE_CLASS_ID(MachMemBar, Mach, 7) + + DEFINE_CLASS_ID(Type, Node, 2) + DEFINE_CLASS_ID(Phi, Type, 0) +@@ -656,14 +650,12 @@ + DEFINE_CLASS_ID(EncodeNarrowPtr, Type, 6) + DEFINE_CLASS_ID(EncodeP, EncodeNarrowPtr, 0) + DEFINE_CLASS_ID(EncodePKlass, EncodeNarrowPtr, 1) +- DEFINE_CLASS_ID(ShenandoahBarrier, Type, 7) + + DEFINE_CLASS_ID(Proj, Node, 3) + DEFINE_CLASS_ID(CatchProj, Proj, 0) + DEFINE_CLASS_ID(JumpProj, Proj, 1) +- DEFINE_CLASS_ID(IfProj, Proj, 2) +- DEFINE_CLASS_ID(IfTrue, IfProj, 0) +- DEFINE_CLASS_ID(IfFalse, IfProj, 1) ++ DEFINE_CLASS_ID(IfTrue, Proj, 2) ++ DEFINE_CLASS_ID(IfFalse, Proj, 3) + DEFINE_CLASS_ID(Parm, Proj, 4) + DEFINE_CLASS_ID(MachProj, Proj, 5) + +@@ -789,7 +781,6 @@ + DEFINE_CLASS_QUERY(FastLock) + DEFINE_CLASS_QUERY(FastUnlock) + DEFINE_CLASS_QUERY(If) +- DEFINE_CLASS_QUERY(IfProj) + DEFINE_CLASS_QUERY(IfFalse) + DEFINE_CLASS_QUERY(IfTrue) + DEFINE_CLASS_QUERY(Initialize) +@@ -817,7 +808,6 @@ + DEFINE_CLASS_QUERY(MachSafePoint) + DEFINE_CLASS_QUERY(MachSpillCopy) + DEFINE_CLASS_QUERY(MachTemp) +- DEFINE_CLASS_QUERY(MachMemBar) + DEFINE_CLASS_QUERY(MachMerge) + DEFINE_CLASS_QUERY(Mem) + DEFINE_CLASS_QUERY(MemBar) +@@ -834,7 +824,6 @@ + DEFINE_CLASS_QUERY(Root) + DEFINE_CLASS_QUERY(SafePoint) + DEFINE_CLASS_QUERY(SafePointScalarObject) +- DEFINE_CLASS_QUERY(ShenandoahBarrier) + DEFINE_CLASS_QUERY(Start) + DEFINE_CLASS_QUERY(Store) + DEFINE_CLASS_QUERY(Sub) +@@ -933,14 +922,13 @@ + // Check if 'this' node dominates or equal to 'sub'. + bool dominates(Node* sub, Node_List &nlist); + +- virtual bool is_g1_wb_pre_call() const { return false; } +- virtual bool is_shenandoah_state_load() const { return false; } +- virtual bool is_shenandoah_marking_if(PhaseTransform *phase) const { return false; } +- + protected: + bool remove_dead_region(PhaseGVN *phase, bool can_reshape); + public: + ++ // Idealize graph, using DU info. Done after constant propagation ++ virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp ); ++ + // See if there is valid pipeline info + static const Pipeline *pipeline_class(); + virtual const Pipeline *pipeline() const; +@@ -974,9 +962,6 @@ + // Return the unique control out if only one. Null if none or more than one. + Node* unique_ctrl_out(); + +- // Set control or add control as precedence edge +- void ensure_control_or_add_prec(Node* c); +- + //----------------- Code Generation + + // Ideal register class for Matching. Zero means unmatched instruction +diff -uNr openjdk/hotspot/src/share/vm/opto/output.hpp afu8u/hotspot/src/share/vm/opto/output.hpp +--- openjdk/hotspot/src/share/vm/opto/output.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/output.hpp 2025-05-06 10:53:45.119633673 +0800 +@@ -41,6 +41,8 @@ + # include "adfiles/ad_zero.hpp" + #elif defined TARGET_ARCH_MODEL_ppc_64 + # include "adfiles/ad_ppc_64.hpp" ++#elif defined TARGET_ARCH_MODEL_sw64 ++# include "adfiles/ad_sw64.hpp" + #endif + + class Arena; +diff -uNr openjdk/hotspot/src/share/vm/opto/parse2.cpp afu8u/hotspot/src/share/vm/opto/parse2.cpp +--- openjdk/hotspot/src/share/vm/opto/parse2.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/parse2.cpp 2025-05-06 10:53:45.123633673 +0800 +@@ -40,10 +40,6 @@ + #include "runtime/deoptimization.hpp" + #include "runtime/sharedRuntime.hpp" + +-#if INCLUDE_ALL_GCS +-#include "gc_implementation/shenandoah/c2/shenandoahBarrierSetC2.hpp" +-#endif +- + extern int explicit_null_checks_inserted, + explicit_null_checks_elided; + +@@ -55,11 +51,6 @@ + dec_sp(2); // Pop array and index + const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type); + Node* ld = make_load(control(), adr, elem, elem_type, adr_type, MemNode::unordered); +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC && (elem_type == T_OBJECT || elem_type == T_ARRAY)) { +- ld = ShenandoahBarrierSetC2::bsc2()->load_reference_barrier(this, ld); +- } +-#endif + push(ld); + } + +diff -uNr openjdk/hotspot/src/share/vm/opto/parse3.cpp afu8u/hotspot/src/share/vm/opto/parse3.cpp +--- openjdk/hotspot/src/share/vm/opto/parse3.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/parse3.cpp 2025-05-06 10:53:45.123633673 +0800 +@@ -36,10 +36,6 @@ + #include "runtime/deoptimization.hpp" + #include "runtime/handles.inline.hpp" + +-#if INCLUDE_ALL_GCS +-#include "gc_implementation/shenandoah/c2/shenandoahBarrierSetC2.hpp" +-#endif +- + //============================================================================= + // Helper methods for _get* and _put* bytecodes + //============================================================================= +@@ -240,13 +236,6 @@ + MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered; + Node* ld = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, is_vol); + +- Node* load = ld; +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC && (bt == T_OBJECT || bt == T_ARRAY)) { +- ld = ShenandoahBarrierSetC2::bsc2()->load_reference_barrier(this, ld); +- } +-#endif +- + // Adjust Java stack + if (type2size[bt] == 1) + push(ld); +@@ -285,7 +274,7 @@ + if (field->is_volatile()) { + // Memory barrier includes bogus read of value to force load BEFORE membar + assert(leading_membar == NULL || support_IRIW_for_not_multiple_copy_atomic_cpu, "no leading membar expected"); +- Node* mb = insert_mem_bar(Op_MemBarAcquire, load); ++ Node* mb = insert_mem_bar(Op_MemBarAcquire, ld); + mb->as_MemBar()->set_trailing_load(); + } + } +diff -uNr openjdk/hotspot/src/share/vm/opto/phaseX.cpp afu8u/hotspot/src/share/vm/opto/phaseX.cpp +--- openjdk/hotspot/src/share/vm/opto/phaseX.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/phaseX.cpp 2025-05-06 10:53:45.123633673 +0800 +@@ -35,9 +35,6 @@ + #include "opto/phaseX.hpp" + #include "opto/regalloc.hpp" + #include "opto/rootnode.hpp" +-#if INCLUDE_ALL_GCS +-#include "gc_implementation/shenandoah/c2/shenandoahSupport.hpp" +-#endif + + //============================================================================= + #define NODE_HASH_MINIMUM_SIZE 255 +@@ -1285,9 +1282,7 @@ + i++; + } + assert(!(i < imax), "sanity"); +- } +- } else if (in->Opcode() == Op_AddP && CallLeafNode::has_only_g1_wb_pre_uses(in)) { +- add_users_to_worklist(in); ++ } + } + if (ReduceFieldZeroing && dead->is_Load() && i == MemNode::Memory && + in->is_Proj() && in->in(0) != NULL && in->in(0)->is_Initialize()) { +@@ -1335,9 +1330,6 @@ + if (dead->is_expensive()) { + C->remove_expensive_node(dead); + } +- if (dead->Opcode() == Op_ShenandoahLoadReferenceBarrier) { +- C->remove_shenandoah_barrier(reinterpret_cast(dead)); +- } + CastIINode* cast = dead->isa_CastII(); + if (cast != NULL && cast->has_range_check()) { + C->remove_range_check_cast(cast); +@@ -1554,13 +1546,6 @@ + Node* imem = use->as_Initialize()->proj_out(TypeFunc::Memory); + if (imem != NULL) add_users_to_worklist0(imem); + } +- +- if (use->Opcode() == Op_ShenandoahLoadReferenceBarrier) { +- Node* cmp = use->find_out_with(Op_CmpP); +- if (cmp != NULL) { +- _worklist.push(cmp); +- } +- } + } + } + +@@ -1688,25 +1673,6 @@ + } + } + } +- if (m->Opcode() == Op_ShenandoahLoadReferenceBarrier) { +- for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) { +- Node* p = m->fast_out(i2); +- if (p->Opcode() == Op_CmpP) { +- if(p->bottom_type() != type(p)) { +- worklist.push(p); +- } +- } else if (p->Opcode() == Op_AddP) { +- for (DUIterator_Fast i3max, i3 = p->fast_outs(i3max); i3 < i3max; i3++) { +- Node* q = p->fast_out(i3); +- if (q->is_Load()) { +- if(q->bottom_type() != type(q)) { +- worklist.push(q); +- } +- } +- } +- } +- } +- } + // If n is used in a counted loop exit condition then the type + // of the counted loop's Phi depends on the type of n. See + // PhiNode::Value(). +@@ -1808,6 +1774,11 @@ + _worklist.push(n); // n re-enters the hash table via the worklist + } + ++ // Idealize graph using DU info. Must clone() into new-space. ++ // DU info is generally used to show profitability, progress or safety ++ // (but generally not needed for correctness). ++ Node *nn = n->Ideal_DU_postCCP(this); ++ + // TEMPORARY fix to ensure that 2nd GVN pass eliminates NULL checks + switch( n->Opcode() ) { + case Op_FastLock: // Revisit FastLocks for lock coarsening +@@ -1824,6 +1795,12 @@ + default: + break; + } ++ if( nn ) { ++ _worklist.push(n); ++ // Put users of 'n' onto worklist for second igvn transform ++ add_users_to_worklist(n); ++ return nn; ++ } + + return n; + } +@@ -1976,9 +1953,6 @@ + default: + break; + } +- if (old->Opcode() == Op_AddP && CallLeafNode::has_only_g1_wb_pre_uses(old)) { +- igvn->add_users_to_worklist(old); +- } + } + + } +diff -uNr openjdk/hotspot/src/share/vm/opto/regmask.cpp afu8u/hotspot/src/share/vm/opto/regmask.cpp +--- openjdk/hotspot/src/share/vm/opto/regmask.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/regmask.cpp 2025-05-06 10:53:45.123633673 +0800 +@@ -39,6 +39,8 @@ + # include "adfiles/ad_zero.hpp" + #elif defined TARGET_ARCH_MODEL_ppc_64 + # include "adfiles/ad_ppc_64.hpp" ++#elif defined TARGET_ARCH_MODEL_sw64 ++# include "adfiles/ad_sw64.hpp" + #endif + + #define RM_SIZE _RM_SIZE /* a constant private to the class RegMask */ +diff -uNr openjdk/hotspot/src/share/vm/opto/regmask.hpp afu8u/hotspot/src/share/vm/opto/regmask.hpp +--- openjdk/hotspot/src/share/vm/opto/regmask.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/regmask.hpp 2025-05-06 10:53:45.123633673 +0800 +@@ -42,6 +42,8 @@ + # include "adfiles/adGlobals_zero.hpp" + #elif defined TARGET_ARCH_MODEL_ppc_64 + # include "adfiles/adGlobals_ppc_64.hpp" ++#elif defined TARGET_ARCH_MODEL_sw64 ++# include "adfiles/adGlobals_sw64.hpp" + #endif + + // Some fun naming (textual) substitutions: +diff -uNr openjdk/hotspot/src/share/vm/opto/runtime.cpp afu8u/hotspot/src/share/vm/opto/runtime.cpp +--- openjdk/hotspot/src/share/vm/opto/runtime.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/runtime.cpp 2025-05-06 10:53:45.123633673 +0800 +@@ -34,7 +34,6 @@ + #include "compiler/compileBroker.hpp" + #include "compiler/compilerOracle.hpp" + #include "compiler/oopMap.hpp" +-#include "gc_implementation/shenandoah/shenandoahBarrierSet.hpp" + #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" + #include "gc_implementation/g1/heapRegion.hpp" + #include "gc_interface/collectedHeap.hpp" +@@ -83,6 +82,8 @@ + # include "adfiles/ad_zero.hpp" + #elif defined TARGET_ARCH_MODEL_ppc_64 + # include "adfiles/ad_ppc_64.hpp" ++#elif defined TARGET_ARCH_MODEL_sw64 ++# include "adfiles/ad_sw64.hpp" + #endif + + +@@ -581,31 +582,6 @@ + + return TypeFunc::make(domain, range); + } +- +-const TypeFunc *OptoRuntime::shenandoah_clone_barrier_Type() { +- const Type **fields = TypeTuple::fields(1); +- fields[TypeFunc::Parms+0] = TypeOopPtr::NOTNULL; // src oop +- const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); +- +- // create result type (range) +- fields = TypeTuple::fields(0); +- const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); +- +- return TypeFunc::make(domain, range); +-} +- +-const TypeFunc *OptoRuntime::shenandoah_write_barrier_Type() { +- const Type **fields = TypeTuple::fields(1); +- fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value +- const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); +- +- // create result type (range) +- fields = TypeTuple::fields(1); +- fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; +- const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); +- +- return TypeFunc::make(domain, range); +-} + + const TypeFunc *OptoRuntime::uncommon_trap_Type() { + // create input type (domain) +diff -uNr openjdk/hotspot/src/share/vm/opto/runtime.hpp afu8u/hotspot/src/share/vm/opto/runtime.hpp +--- openjdk/hotspot/src/share/vm/opto/runtime.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/runtime.hpp 2025-05-06 10:53:45.123633673 +0800 +@@ -276,8 +276,6 @@ + static const TypeFunc* multianewarrayN_Type(); // multianewarray + static const TypeFunc* g1_wb_pre_Type(); + static const TypeFunc* g1_wb_post_Type(); +- static const TypeFunc* shenandoah_clone_barrier_Type(); +- static const TypeFunc* shenandoah_write_barrier_Type(); + static const TypeFunc* complete_monitor_enter_Type(); + static const TypeFunc* complete_monitor_exit_Type(); + static const TypeFunc* uncommon_trap_Type(); +diff -uNr openjdk/hotspot/src/share/vm/opto/subnode.cpp afu8u/hotspot/src/share/vm/opto/subnode.cpp +--- openjdk/hotspot/src/share/vm/opto/subnode.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/subnode.cpp 2025-05-06 10:53:45.123633673 +0800 +@@ -36,10 +36,6 @@ + #include "opto/phaseX.hpp" + #include "opto/subnode.hpp" + #include "runtime/sharedRuntime.hpp" +-#if INCLUDE_ALL_GCS +-#include "gc_implementation/shenandoah/c2/shenandoahBarrierSetC2.hpp" +-#include "gc_implementation/shenandoah/c2/shenandoahSupport.hpp" +-#endif + + // Portions of code courtesy of Clifford Click + +@@ -853,14 +849,8 @@ + // Return the klass node for + // LoadP(AddP(foo:Klass, #java_mirror)) + // or NULL if not matching. +- +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- n = ShenandoahBarrierSetC2::bsc2()->step_over_gc_barrier(n); +- } +-#endif +- + if (n->Opcode() != Op_LoadP) return NULL; ++ + const TypeInstPtr* tp = phase->type(n)->isa_instptr(); + if (!tp || tp->klass() != phase->C->env()->Class_klass()) return NULL; + +@@ -927,17 +917,8 @@ + if (k1 && (k2 || conk2)) { + Node* lhs = k1; + Node* rhs = (k2 != NULL) ? k2 : conk2; +-#if INCLUDE_ALL_GCS +- PhaseIterGVN* igvn = phase->is_IterGVN(); +- if (UseShenandoahGC && igvn != NULL) { +- set_req_X(1, lhs, igvn); +- set_req_X(2, rhs, igvn); +- } else +-#endif +- { +- set_req(1, lhs); +- set_req(2, rhs); +- } ++ this->set_req(1, lhs); ++ this->set_req(2, rhs); + return this; + } + } +diff -uNr openjdk/hotspot/src/share/vm/opto/superword.cpp afu8u/hotspot/src/share/vm/opto/superword.cpp +--- openjdk/hotspot/src/share/vm/opto/superword.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/superword.cpp 2025-05-06 10:53:45.127633673 +0800 +@@ -1688,7 +1688,7 @@ + Node* n_tail = n->in(LoopNode::LoopBackControl); + if (n_tail != n->in(LoopNode::EntryControl)) { + if (!n_tail->is_Mem()) { +- assert(n_tail->is_Mem() || n_tail == n, err_msg_res("unexpected node for memory slice: %s", n_tail->Name())); ++ assert(n_tail->is_Mem(), err_msg_res("unexpected node for memory slice: %s", n_tail->Name())); + return false; // Bailout + } + _mem_slice_head.push(n); +@@ -2317,27 +2317,6 @@ + assert(!valid(), "unsafe access"); + return; + } +- if (UseShenandoahGC) { +- // Detect a Shenandoah write barrier between the pre and main loop +- // (which could break loop alignment code) +- CountedLoopNode *main_head = slp->lp()->as_CountedLoop(); +- if (main_head->is_main_loop()) { +- Node* c = main_head->in(LoopNode::EntryControl)->in(0)->in(0)->in(0); +- if (!c->is_CountedLoopEnd()) { +- // in case of a reserve copy +- c = c->in(0)->in(0); +- assert(c->is_CountedLoopEnd(), "where's the pre loop?"); +- } +- CountedLoopEndNode* pre_end = c->as_CountedLoopEnd(); +- CountedLoopNode* pre_loop = pre_end->loopnode(); +- assert(pre_loop->is_pre_loop(), "where's the pre loop?"); +- +- Node* base_c = phase()->get_ctrl(base); +- if (!phase()->is_dominator(base_c, pre_loop)) { +- return; +- } +- } +- } + for (int i = 0; i < 3; i++) { + if (!scaled_iv_plus_offset(adr->in(AddPNode::Offset))) { + assert(!valid(), "too complex"); +diff -uNr openjdk/hotspot/src/share/vm/opto/superword.hpp afu8u/hotspot/src/share/vm/opto/superword.hpp +--- openjdk/hotspot/src/share/vm/opto/superword.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/opto/superword.hpp 2025-05-06 10:53:45.127633673 +0800 +@@ -229,7 +229,6 @@ + // -----------------------------SuperWord--------------------------------- + // Transforms scalar operations into packed (superword) operations. + class SuperWord : public ResourceObj { +- friend class SWPointer; + private: + PhaseIdealLoop* _phase; + Arena* _arena; +diff -uNr openjdk/hotspot/src/share/vm/precompiled/precompiled.hpp afu8u/hotspot/src/share/vm/precompiled/precompiled.hpp +--- openjdk/hotspot/src/share/vm/precompiled/precompiled.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/precompiled/precompiled.hpp 2025-05-06 10:53:45.127633673 +0800 +@@ -324,7 +324,6 @@ + # include "gc_implementation/shared/gcAdaptivePolicyCounters.hpp" + # include "gc_implementation/shared/gcPolicyCounters.hpp" + # include "gc_implementation/shared/parGCAllocBuffer.hpp" +-# include "gc_implementation/shenandoah/shenandoah_globals.hpp" + #endif // INCLUDE_ALL_GCS + + #endif // !DONT_USE_PRECOMPILED_HEADER +diff -uNr openjdk/hotspot/src/share/vm/prims/jniCheck.cpp afu8u/hotspot/src/share/vm/prims/jniCheck.cpp +--- openjdk/hotspot/src/share/vm/prims/jniCheck.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/prims/jniCheck.cpp 2025-05-06 10:53:45.127633673 +0800 +@@ -55,6 +55,9 @@ + #ifdef TARGET_ARCH_ppc + # include "jniTypes_ppc.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "jniTypes_sw64.hpp" ++#endif + + // Complain every extra number of unplanned local refs + #define CHECK_JNI_LOCAL_REF_CAP_WARN_THRESHOLD 32 +diff -uNr openjdk/hotspot/src/share/vm/prims/jni.cpp afu8u/hotspot/src/share/vm/prims/jni.cpp +--- openjdk/hotspot/src/share/vm/prims/jni.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/prims/jni.cpp 2025-05-06 11:13:08.135672951 +0800 +@@ -38,7 +38,6 @@ + #include "utilities/ostream.hpp" + #if INCLUDE_ALL_GCS + #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" +-#include "gc_implementation/shenandoah/shenandoahStringDedup.hpp" + #endif // INCLUDE_ALL_GCS + #include "memory/allocation.hpp" + #include "memory/allocation.inline.hpp" +@@ -2629,7 +2628,7 @@ + // If G1 is enabled and we are accessing the value of the referent + // field in a reference object then we need to register a non-null + // referent with the SATB barrier. +- if (UseG1GC || (UseShenandoahGC && ShenandoahSATBBarrier)) { ++ if (UseG1GC) { + bool needs_barrier = false; + + if (ret != NULL && +@@ -4250,24 +4249,6 @@ + } + JNI_END + +-static oop lock_gc_or_pin_object(JavaThread* thread, jobject obj) { +- if (Universe::heap()->supports_object_pinning()) { +- const oop o = JNIHandles::resolve_non_null(obj); +- return Universe::heap()->pin_object(thread, o); +- } else { +- GC_locker::lock_critical(thread); +- return JNIHandles::resolve_non_null(obj); +- } +-} +- +-static void unlock_gc_or_unpin_object(JavaThread* thread, jobject obj) { +- if (Universe::heap()->supports_object_pinning()) { +- const oop o = JNIHandles::resolve_non_null(obj); +- return Universe::heap()->unpin_object(thread, o); +- } else { +- GC_locker::unlock_critical(thread); +- } +-} + + JNI_ENTRY(void*, jni_GetPrimitiveArrayCritical(JNIEnv *env, jarray array, jboolean *isCopy)) + JNIWrapper("GetPrimitiveArrayCritical"); +@@ -4277,10 +4258,11 @@ + HOTSPOT_JNI_GETPRIMITIVEARRAYCRITICAL_ENTRY( + env, array, (uintptr_t *) isCopy); + #endif /* USDT2 */ ++ GC_locker::lock_critical(thread); + if (isCopy != NULL) { + *isCopy = JNI_FALSE; + } +- oop a = lock_gc_or_pin_object(thread, array); ++ oop a = JNIHandles::resolve_non_null(array); + assert(a->is_array(), "just checking"); + BasicType type; + if (a->is_objArray()) { +@@ -4308,7 +4290,7 @@ + env, array, carray, mode); + #endif /* USDT2 */ + // The array, carray and mode arguments are ignored +- unlock_gc_or_unpin_object(thread, array); ++ GC_locker::unlock_critical(thread); + #ifndef USDT2 + DTRACE_PROBE(hotspot_jni, ReleasePrimitiveArrayCritical__return); + #else /* USDT2 */ +@@ -4326,48 +4308,20 @@ + HOTSPOT_JNI_GETSTRINGCRITICAL_ENTRY( + env, string, (uintptr_t *) isCopy); + #endif /* USDT2 */ +- jchar* ret; +- if (!UseShenandoahGC) { +- GC_locker::lock_critical(thread); +- if (isCopy != NULL) { +- *isCopy = JNI_FALSE; +- } +- oop s = JNIHandles::resolve_non_null(string); +- int s_len = java_lang_String::length(s); +- typeArrayOop s_value = java_lang_String::value(s); +- int s_offset = java_lang_String::offset(s); +- if (s_len > 0) { +- ret = s_value->char_at_addr(s_offset); +- } else { +- ret = (jchar*) s_value->base(T_CHAR); +- } +- } +-#if INCLUDE_ALL_GCS +- else { +- assert(UseShenandoahGC, "This path should only be taken with Shenandoah"); +- oop s = JNIHandles::resolve_non_null(string); +- if (ShenandoahStringDedup::is_enabled()) { +- typeArrayOop s_value = java_lang_String::value(s); +- int s_len = java_lang_String::length(s); +- ret = NEW_C_HEAP_ARRAY_RETURN_NULL(jchar, s_len + 1, mtInternal); // add one for zero termination +- /* JNI Specification states return NULL on OOM */ +- if (ret != NULL) { +- memcpy(ret, s_value->char_at_addr(0), s_len * sizeof(jchar)); +- ret[s_len] = 0; +- } +- if (isCopy != NULL) *isCopy = JNI_TRUE; +- } else { +- typeArrayOop s_value = java_lang_String::value(s); +- s_value = (typeArrayOop) Universe::heap()->pin_object(thread, s_value); +- ret = (jchar *) s_value->base(T_CHAR); +- if (isCopy != NULL) *isCopy = JNI_FALSE; +- } ++ GC_locker::lock_critical(thread); ++ if (isCopy != NULL) { ++ *isCopy = JNI_FALSE; + } +-#else +- else { +- ShouldNotReachHere(); ++ oop s = JNIHandles::resolve_non_null(string); ++ int s_len = java_lang_String::length(s); ++ typeArrayOop s_value = java_lang_String::value(s); ++ int s_offset = java_lang_String::offset(s); ++ const jchar* ret; ++ if (s_len > 0) { ++ ret = s_value->char_at_addr(s_offset); ++ } else { ++ ret = (jchar*) s_value->base(T_CHAR); + } +-#endif + #ifndef USDT2 + DTRACE_PROBE1(hotspot_jni, GetStringCritical__return, ret); + #else /* USDT2 */ +@@ -4386,28 +4340,8 @@ + HOTSPOT_JNI_RELEASESTRINGCRITICAL_ENTRY( + env, str, (uint16_t *) chars); + #endif /* USDT2 */ +- if (!UseShenandoahGC) { +- // The str and chars arguments are ignored +- GC_locker::unlock_critical(thread); +- } +-#if INCLUDE_ALL_GCS +- else if (ShenandoahStringDedup::is_enabled()) { +- assert(UseShenandoahGC, "This path should only be taken with Shenandoah"); +- // For copied string value, free jchar array allocated by earlier call to GetStringCritical. +- // This assumes that ReleaseStringCritical bookends GetStringCritical. +- FREE_C_HEAP_ARRAY(jchar, chars, mtInternal); +- } else { +- assert(UseShenandoahGC, "This path should only be taken with Shenandoah"); +- oop s = JNIHandles::resolve_non_null(str); +- // For not copied string value, drop the associated gc-locker/pin. +- typeArrayOop s_value = java_lang_String::value(s); +- Universe::heap()->unpin_object(thread, s_value); +- } +-#else +- else { +- ShouldNotReachHere(); +- } +-#endif ++ // The str and chars arguments are ignored ++ GC_locker::unlock_critical(thread); + #ifndef USDT2 + DTRACE_PROBE(hotspot_jni, ReleaseStringCritical__return); + #else /* USDT2 */ +@@ -5090,14 +5024,11 @@ + if (event.should_commit()) { + event.set_thread(JFR_THREAD_ID(jt)); + event.set_parentThread((traceid)0); +-#if INCLUDE_JFR + if (EventThreadStart::is_stacktrace_enabled()) { + jt->jfr_thread_local()->set_cached_stack_trace_id((traceid)0); + event.commit(); + jt->jfr_thread_local()->clear_cached_stack_trace(); +- } else +-#endif +- { ++ } else { + event.commit(); + } + } +diff -uNr openjdk/hotspot/src/share/vm/prims/jni_md.h afu8u/hotspot/src/share/vm/prims/jni_md.h +--- openjdk/hotspot/src/share/vm/prims/jni_md.h 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/prims/jni_md.h 2025-05-06 10:53:45.127633673 +0800 +@@ -42,6 +42,9 @@ + #ifdef TARGET_ARCH_ppc + # include "jni_ppc.h" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "jni_sw64.h" ++#endif + + + /* +diff -uNr openjdk/hotspot/src/share/vm/prims/jvm.cpp afu8u/hotspot/src/share/vm/prims/jvm.cpp +--- openjdk/hotspot/src/share/vm/prims/jvm.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/prims/jvm.cpp 2025-05-06 11:13:08.135672951 +0800 +@@ -73,7 +73,6 @@ + #include "utilities/dtrace.hpp" + #include "utilities/events.hpp" + #include "utilities/histogram.hpp" +-#include "utilities/macros.hpp" + #include "utilities/top.hpp" + #include "utilities/utf8.hpp" + #ifdef TARGET_OS_FAMILY_linux +@@ -94,7 +93,6 @@ + + #if INCLUDE_ALL_GCS + #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" +-#include "gc_implementation/shenandoah/shenandoahBarrierSetClone.inline.hpp" + #endif // INCLUDE_ALL_GCS + + #include +@@ -601,7 +599,7 @@ + // If G1 is enabled then we need to register a non-null referent + // with the SATB barrier. + #if INCLUDE_ALL_GCS +- if (UseG1GC || (UseShenandoahGC && ShenandoahSATBBarrier)) { ++ if (UseG1GC) { + oop referent = java_lang_ref_Reference::referent(clone); + if (referent != NULL) { + G1SATBCardTableModRefBS::enqueue(referent); +@@ -658,12 +656,6 @@ + new_obj_oop = CollectedHeap::obj_allocate(klass, size, CHECK_NULL); + } + +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC && ShenandoahCloneBarrier) { +- ShenandoahBarrierSet::barrier_set()->clone_barrier_runtime(obj()); +- } +-#endif +- + // 4839641 (4840070): We must do an oop-atomic copy, because if another thread + // is modifying a reference field in the clonee, a non-oop-atomic copy might + // be suspended in the middle of copying the pointer and end up with parts +@@ -1170,6 +1162,7 @@ + return (jclass) JNIHandles::make_local(env, k->java_mirror()); + } + ++ + JVM_ENTRY(jclass, JVM_DefineClass(JNIEnv *env, const char *name, jobject loader, const jbyte *buf, jsize len, jobject pd)) + JVMWrapper2("JVM_DefineClass %s", name); + +diff -uNr openjdk/hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.cpp afu8u/hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.cpp +--- openjdk/hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/prims/jvmtiClassFileReconstituter.cpp 2025-05-06 10:53:45.135633674 +0800 +@@ -46,6 +46,9 @@ + #ifdef TARGET_ARCH_ppc + # include "bytes_ppc.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "bytes_sw64.hpp" ++#endif + // FIXME: add Deprecated attribute + // FIXME: fix Synthetic attribute + // FIXME: per Serguei, add error return handling for ConstantPool::copy_cpool_bytes() +diff -uNr openjdk/hotspot/src/share/vm/prims/jvmtiGetLoadedClasses.cpp afu8u/hotspot/src/share/vm/prims/jvmtiGetLoadedClasses.cpp +--- openjdk/hotspot/src/share/vm/prims/jvmtiGetLoadedClasses.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/prims/jvmtiGetLoadedClasses.cpp 2025-05-06 10:53:45.135633674 +0800 +@@ -46,7 +46,7 @@ + // to get notified about this potential resurrection, otherwise the marking + // might not find the object. + #if INCLUDE_ALL_GCS +- if ((o != NULL) && (UseG1GC || (UseShenandoahGC && ShenandoahSATBBarrier))) { ++ if (UseG1GC && o != NULL) { + G1SATBCardTableModRefBS::enqueue(o); + } + #endif +@@ -60,6 +60,7 @@ + void do_klass(Klass* k) { + // Collect all jclasses + _classStack.push((jclass) _env->jni_reference(k->java_mirror())); ++ ensure_klass_alive(k->java_mirror()); + } + + int extract(jclass* result_list) { +@@ -69,10 +70,7 @@ + + // Pop all jclasses, fill backwards + while (!_classStack.is_empty()) { +- jclass klass_handle = _classStack.pop(); +- oop klass_mirror = JNIHandles::resolve(klass_handle); +- ensure_klass_alive(klass_mirror); +- result_list[--i] = klass_handle; ++ result_list[--i] = _classStack.pop(); + } + + // Return the number of elements written +diff -uNr openjdk/hotspot/src/share/vm/prims/jvmtiTagMap.cpp afu8u/hotspot/src/share/vm/prims/jvmtiTagMap.cpp +--- openjdk/hotspot/src/share/vm/prims/jvmtiTagMap.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/prims/jvmtiTagMap.cpp 2025-05-06 10:53:45.139633674 +0800 +@@ -1521,7 +1521,7 @@ + oop o = entry->object(); + assert(o != NULL && Universe::heap()->is_in_reserved(o), "sanity check"); + #if INCLUDE_ALL_GCS +- if (UseG1GC || (UseShenandoahGC && ShenandoahSATBBarrier)) { ++ if (UseG1GC) { + // The reference in this tag map could be the only (implicitly weak) + // reference to that object. If we hand it out, we need to keep it live wrt + // SATB marking similar to other j.l.ref.Reference referents. +diff -uNr openjdk/hotspot/src/share/vm/prims/methodHandles.hpp afu8u/hotspot/src/share/vm/prims/methodHandles.hpp +--- openjdk/hotspot/src/share/vm/prims/methodHandles.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/prims/methodHandles.hpp 2025-05-06 11:13:08.139672951 +0800 +@@ -198,6 +198,10 @@ + #ifdef TARGET_ARCH_ppc + # include "methodHandles_ppc.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "methodHandles_sw64.hpp" ++#endif ++ + + // Tracing + static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN; +diff -uNr openjdk/hotspot/src/share/vm/prims/unsafe.cpp afu8u/hotspot/src/share/vm/prims/unsafe.cpp +--- openjdk/hotspot/src/share/vm/prims/unsafe.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/prims/unsafe.cpp 2025-05-06 10:53:45.139633674 +0800 +@@ -27,7 +27,6 @@ + #include "utilities/macros.hpp" + #if INCLUDE_ALL_GCS + #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" +-#include "gc_implementation/shenandoah/shenandoahBarrierSet.inline.hpp" + #endif // INCLUDE_ALL_GCS + #include "jfr/jfrEvents.hpp" + #include "memory/allocation.inline.hpp" +@@ -197,6 +196,7 @@ + v = *(oop*)index_oop_from_field_offset_long(p, offset); \ + } + ++ + // Get/SetObject must be special-cased, since it works with handles. + + // We could be accessing the referent field in a reference +@@ -218,7 +218,7 @@ + + static void ensure_satb_referent_alive(oop o, jlong offset, oop v) { + #if INCLUDE_ALL_GCS +- if ((UseG1GC || (UseShenandoahGC && ShenandoahSATBBarrier)) && v != NULL && is_java_lang_ref_Reference_access(o, offset)) { ++ if (UseG1GC && v != NULL && is_java_lang_ref_Reference_access(o, offset)) { + G1SATBCardTableModRefBS::enqueue(v); + } + #endif +@@ -230,12 +230,6 @@ + if (obj == NULL) THROW_0(vmSymbols::java_lang_NullPointerException()); + GET_OOP_FIELD(obj, offset, v) + +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- v = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(v); +- } +-#endif +- + ensure_satb_referent_alive(p, offset, v); + + return JNIHandles::make_local(env, v); +@@ -272,12 +266,6 @@ + UnsafeWrapper("Unsafe_GetObject"); + GET_OOP_FIELD(obj, offset, v) + +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- v = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(v); +- } +-#endif +- + ensure_satb_referent_alive(p, offset, v); + + return JNIHandles::make_local(env, v); +@@ -306,12 +294,6 @@ + (void)const_cast(v = *(volatile oop*) addr); + } + +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- (void)const_cast(v = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(v)); +- } +-#endif +- + ensure_satb_referent_alive(p, offset, v); + + OrderAccess::acquire(); +diff -uNr openjdk/hotspot/src/share/vm/runtime/arguments.cpp afu8u/hotspot/src/share/vm/runtime/arguments.cpp +--- openjdk/hotspot/src/share/vm/runtime/arguments.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/runtime/arguments.cpp 2025-05-06 10:53:45.143633674 +0800 +@@ -66,9 +66,6 @@ + #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp" + #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" + #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeap.hpp" +-#include "gc_implementation/shenandoah/shenandoahLogging.hpp" +-#include "gc_implementation/shenandoah/shenandoahHeapRegion.hpp" + #endif // INCLUDE_ALL_GCS + + // Note: This is a special bug reporting site for the JVM +@@ -1522,7 +1519,11 @@ + if (max_heap_size <= max_heap_for_compressed_oops()) { + #if !defined(COMPILER1) || defined(TIERED) + if (FLAG_IS_DEFAULT(UseCompressedOops)) { ++#ifdef SW64 ++ FLAG_SET_ERGO(bool, UseCompressedOops, false); ++#else + FLAG_SET_ERGO(bool, UseCompressedOops, true); ++#endif + } + #endif + #ifdef _WIN64 +@@ -1545,6 +1546,7 @@ + #endif // ZERO + } + ++ + // NOTE: set_use_compressed_klass_ptrs() must be called after calling + // set_use_compressed_oops(). + void Arguments::set_use_compressed_klass_ptrs() { +@@ -1584,8 +1586,6 @@ + heap_alignment = ParallelScavengeHeap::conservative_max_heap_alignment(); + } else if (UseG1GC) { + heap_alignment = G1CollectedHeap::conservative_max_heap_alignment(); +- } else if (UseShenandoahGC) { +- heap_alignment = ShenandoahHeap::conservative_max_heap_alignment(); + } + #endif // INCLUDE_ALL_GCS + _conservative_max_heap_alignment = MAX4(heap_alignment, +@@ -1737,178 +1737,6 @@ + } + } + +-void Arguments::set_shenandoah_gc_flags() { +- +-#if !(defined AARCH64 || defined AMD64 || defined IA32) +- UNSUPPORTED_GC_OPTION(UseShenandoahGC); +-#endif +- +-#if 0 // leave this block as stepping stone for future platforms +- warning("Shenandoah GC is not fully supported on this platform:"); +- warning(" concurrent modes are not supported, only STW cycles are enabled;"); +- warning(" arch-specific barrier code is not implemented, disabling barriers;"); +- +-#if INCLUDE_ALL_GCS +- FLAG_SET_DEFAULT(ShenandoahGCHeuristics, "passive"); +- +- FLAG_SET_DEFAULT(ShenandoahSATBBarrier, false); +- FLAG_SET_DEFAULT(ShenandoahLoadRefBarrier, false); +- FLAG_SET_DEFAULT(ShenandoahStoreValEnqueueBarrier, false); +- FLAG_SET_DEFAULT(ShenandoahCASBarrier, false); +- FLAG_SET_DEFAULT(ShenandoahCloneBarrier, false); +- +- FLAG_SET_DEFAULT(ShenandoahVerifyOptoBarriers, false); +-#endif +-#endif +- +-#if INCLUDE_ALL_GCS +- if (!FLAG_IS_DEFAULT(ShenandoahGarbageThreshold)) { +- if (0 > ShenandoahGarbageThreshold || ShenandoahGarbageThreshold > 100) { +- vm_exit_during_initialization("The flag -XX:ShenandoahGarbageThreshold is out of range", NULL); +- } +- } +- +- if (!FLAG_IS_DEFAULT(ShenandoahAllocationThreshold)) { +- if (0 > ShenandoahAllocationThreshold || ShenandoahAllocationThreshold > 100) { +- vm_exit_during_initialization("The flag -XX:ShenandoahAllocationThreshold is out of range", NULL); +- } +- } +- +- if (!FLAG_IS_DEFAULT(ShenandoahMinFreeThreshold)) { +- if (0 > ShenandoahMinFreeThreshold || ShenandoahMinFreeThreshold > 100) { +- vm_exit_during_initialization("The flag -XX:ShenandoahMinFreeThreshold is out of range", NULL); +- } +- } +-#endif +- +-#if INCLUDE_ALL_GCS +- if (UseLargePages && (MaxHeapSize / os::large_page_size()) < ShenandoahHeapRegion::MIN_NUM_REGIONS) { +- warning("Large pages size (" SIZE_FORMAT "K) is too large to afford page-sized regions, disabling uncommit", +- os::large_page_size() / K); +- FLAG_SET_DEFAULT(ShenandoahUncommit, false); +- } +-#endif +- +- // Enable NUMA by default. While Shenandoah is not NUMA-aware, enabling NUMA makes +- // storage allocation code NUMA-aware. +- if (FLAG_IS_DEFAULT(UseNUMA)) { +- FLAG_SET_DEFAULT(UseNUMA, true); +- } +- +- // Set up default number of concurrent threads. We want to have cycles complete fast +- // enough, but we also do not want to steal too much CPU from the concurrently running +- // application. Using 1/4 of available threads for concurrent GC seems a good +- // compromise here. +- bool ergo_conc = FLAG_IS_DEFAULT(ConcGCThreads); +- if (ergo_conc) { +- FLAG_SET_DEFAULT(ConcGCThreads, MAX2(1, os::initial_active_processor_count() / 4)); +- } +- +- if (ConcGCThreads == 0) { +- vm_exit_during_initialization("Shenandoah expects ConcGCThreads > 0, check -XX:ConcGCThreads=#"); +- } +- +- // Set up default number of parallel threads. We want to have decent pauses performance +- // which would use parallel threads, but we also do not want to do too many threads +- // that will overwhelm the OS scheduler. Using 1/2 of available threads seems to be a fair +- // compromise here. Due to implementation constraints, it should not be lower than +- // the number of concurrent threads. +- bool ergo_parallel = FLAG_IS_DEFAULT(ParallelGCThreads); +- if (ergo_parallel) { +- FLAG_SET_DEFAULT(ParallelGCThreads, MAX2(1, os::initial_active_processor_count() / 2)); +- } +- +- if (ParallelGCThreads == 0) { +- vm_exit_during_initialization("Shenandoah expects ParallelGCThreads > 0, check -XX:ParallelGCThreads=#"); +- } +- +- // Make sure ergonomic decisions do not break the thread count invariants. +- // This may happen when user overrides one of the flags, but not the other. +- // When that happens, we want to adjust the setting that was set ergonomically. +- if (ParallelGCThreads < ConcGCThreads) { +- if (ergo_conc && !ergo_parallel) { +- FLAG_SET_DEFAULT(ConcGCThreads, ParallelGCThreads); +- } else if (!ergo_conc && ergo_parallel) { +- FLAG_SET_DEFAULT(ParallelGCThreads, ConcGCThreads); +- } else if (ergo_conc && ergo_parallel) { +- // Should not happen, check the ergonomic computation above. Fail with relevant error. +- vm_exit_during_initialization("Shenandoah thread count ergonomic error"); +- } else { +- // User settings error, report and ask user to rectify. +- vm_exit_during_initialization("Shenandoah expects ConcGCThreads <= ParallelGCThreads, check -XX:ParallelGCThreads, -XX:ConcGCThreads"); +- } +- } +- +- if (FLAG_IS_DEFAULT(ParallelRefProcEnabled)) { +- FLAG_SET_DEFAULT(ParallelRefProcEnabled, true); +- } +- +-#if INCLUDE_ALL_GCS +- if (ShenandoahRegionSampling && FLAG_IS_DEFAULT(PerfDataMemorySize)) { +- // When sampling is enabled, max out the PerfData memory to get more +- // Shenandoah data in, including Matrix. +- FLAG_SET_DEFAULT(PerfDataMemorySize, 2048*K); +- } +-#endif +- +-#ifdef COMPILER2 +- // Shenandoah cares more about pause times, rather than raw throughput. +- // Enabling safepoints in counted loops makes it more responsive with +- // long loops. However, it is risky in 8u, due to bugs it brings, for +- // example JDK-8176506. Warn user about this, and proceed. +- if (UseCountedLoopSafepoints) { +- warning("Enabling -XX:UseCountedLoopSafepoints is known to cause JVM bugs. Use at your own risk."); +- } +- +-#ifdef ASSERT +- // C2 barrier verification is only reliable when all default barriers are enabled +- if (ShenandoahVerifyOptoBarriers && +- (!FLAG_IS_DEFAULT(ShenandoahSATBBarrier) || +- !FLAG_IS_DEFAULT(ShenandoahLoadRefBarrier) || +- !FLAG_IS_DEFAULT(ShenandoahStoreValEnqueueBarrier) || +- !FLAG_IS_DEFAULT(ShenandoahCASBarrier) || +- !FLAG_IS_DEFAULT(ShenandoahCloneBarrier) +- )) { +- warning("Unusual barrier configuration, disabling C2 barrier verification"); +- FLAG_SET_DEFAULT(ShenandoahVerifyOptoBarriers, false); +- } +-#else +- guarantee(!ShenandoahVerifyOptoBarriers, "Should be disabled"); +-#endif // ASSERT +-#endif // COMPILER2 +- +-#if INCLUDE_ALL_GCS +- if ((InitialHeapSize == MaxHeapSize) && ShenandoahUncommit) { +- if (PrintGC) { +- tty->print_cr("Min heap equals to max heap, disabling ShenandoahUncommit"); +- } +- FLAG_SET_DEFAULT(ShenandoahUncommit, false); +- } +- +- // If class unloading is disabled, no unloading for concurrent cycles as well. +- if (!ClassUnloading) { +- FLAG_SET_DEFAULT(ClassUnloadingWithConcurrentMark, false); +- } +- +- // TLAB sizing policy makes resizing decisions before each GC cycle. It averages +- // historical data, assigning more recent data the weight according to TLABAllocationWeight. +- // Current default is good for generational collectors that run frequent young GCs. +- // With Shenandoah, GC cycles are much less frequent, so we need we need sizing policy +- // to converge faster over smaller number of resizing decisions. +- if (FLAG_IS_DEFAULT(TLABAllocationWeight)) { +- FLAG_SET_DEFAULT(TLABAllocationWeight, 90); +- } +- +- if (FLAG_IS_DEFAULT(ShenandoahSoftMaxHeapSize)) { +- FLAG_SET_DEFAULT(ShenandoahSoftMaxHeapSize, MaxHeapSize); +- } else { +- if (ShenandoahSoftMaxHeapSize > MaxHeapSize) { +- vm_exit_during_initialization("ShenandoahSoftMaxHeapSize must be less than or equal to the maximum heap size\n"); +- } +- } +-#endif +-} +- + #if !INCLUDE_ALL_GCS + #ifdef ASSERT + static bool verify_serial_gc_flags() { +@@ -1930,8 +1758,6 @@ + set_parnew_gc_flags(); + } else if (UseG1GC) { + set_g1_gc_flags(); +- } else if (UseShenandoahGC) { +- set_shenandoah_gc_flags(); + } + check_deprecated_gcs(); + check_deprecated_gc_flags(); +@@ -1952,7 +1778,6 @@ + FLAG_SET_CMDLINE(bool, CMSClassUnloadingEnabled, false); + FLAG_SET_CMDLINE(bool, ClassUnloadingWithConcurrentMark, false); + FLAG_SET_CMDLINE(bool, ExplicitGCInvokesConcurrentAndUnloadsClasses, false); +- FLAG_SET_CMDLINE(uintx, ShenandoahUnloadClassesFrequency, 0); + } + #else // INCLUDE_ALL_GCS + assert(verify_serial_gc_flags(), "SerialGC unset"); +@@ -2333,11 +2158,6 @@ + jio_fprintf(defaultStream::output_stream(), + "GCLogFileSize changed to minimum 8K\n"); + } +- +- // Record more information about previous cycles for improved debugging pleasure +- if (FLAG_IS_DEFAULT(LogEventsBufferEntries)) { +- FLAG_SET_DEFAULT(LogEventsBufferEntries, 250); +- } + } + + // This function is called for -Xloggc:, it can be used +@@ -2433,7 +2253,6 @@ + if (UseConcMarkSweepGC || UseParNewGC) i++; + if (UseParallelGC || UseParallelOldGC) i++; + if (UseG1GC) i++; +- if (UseShenandoahGC) i++; + if (i > 1) { + jio_fprintf(defaultStream::error_stream(), + "Conflicting collector combinations in option list; " +@@ -2821,11 +2640,11 @@ + "Invalid ReservedCodeCacheSize=%dK. Must be at least %uK.\n", ReservedCodeCacheSize/K, + min_code_cache_size/K); + status = false; +- } else if (ReservedCodeCacheSize > CODE_CACHE_SIZE_LIMIT) { +- // Code cache size larger than CODE_CACHE_SIZE_LIMIT is not supported. ++ } else if (ReservedCodeCacheSize > 2*G) { ++ // Code cache size larger than MAXINT is not supported. + jio_fprintf(defaultStream::error_stream(), + "Invalid ReservedCodeCacheSize=%dM. Must be at most %uM.\n", ReservedCodeCacheSize/M, +- CODE_CACHE_SIZE_LIMIT/M); ++ (2*G)/M); + status = false; + } + +diff -uNr openjdk/hotspot/src/share/vm/runtime/arguments.hpp afu8u/hotspot/src/share/vm/runtime/arguments.hpp +--- openjdk/hotspot/src/share/vm/runtime/arguments.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/runtime/arguments.hpp 2025-05-06 10:53:45.143633674 +0800 +@@ -335,8 +335,6 @@ + static void set_parallel_gc_flags(); + // Garbage-First (UseG1GC) + static void set_g1_gc_flags(); +- // Shenandoah GC (UseShenandoahGC) +- static void set_shenandoah_gc_flags(); + // GC ergonomics + static void set_conservative_max_heap_alignment(); + static void set_use_compressed_oops(); +@@ -615,7 +613,7 @@ + + bool Arguments::gc_selected() { + return UseConcMarkSweepGC || UseG1GC || UseParallelGC || UseParallelOldGC || +- UseParNewGC || UseSerialGC || UseShenandoahGC; ++ UseParNewGC || UseSerialGC; + } + + #endif // SHARE_VM_RUNTIME_ARGUMENTS_HPP +diff -uNr openjdk/hotspot/src/share/vm/runtime/atomic.inline.hpp afu8u/hotspot/src/share/vm/runtime/atomic.inline.hpp +--- openjdk/hotspot/src/share/vm/runtime/atomic.inline.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/runtime/atomic.inline.hpp 2025-05-06 10:53:45.143633674 +0800 +@@ -31,6 +31,9 @@ + #ifdef TARGET_OS_ARCH_linux_x86 + # include "atomic_linux_x86.inline.hpp" + #endif ++#ifdef TARGET_OS_ARCH_linux_sw64 ++# include "atomic_linux_sw64.inline.hpp" ++#endif + #ifdef TARGET_OS_ARCH_linux_sparc + # include "atomic_linux_sparc.inline.hpp" + #endif +diff -uNr openjdk/hotspot/src/share/vm/runtime/deoptimization.cpp afu8u/hotspot/src/share/vm/runtime/deoptimization.cpp +--- openjdk/hotspot/src/share/vm/runtime/deoptimization.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/runtime/deoptimization.cpp 2025-05-06 10:53:45.143633674 +0800 +@@ -68,6 +68,9 @@ + #ifdef TARGET_ARCH_ppc + # include "vmreg_ppc.inline.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "vmreg_sw64.inline.hpp" ++#endif + #ifdef COMPILER2 + #if defined AD_MD_HPP + # include AD_MD_HPP +@@ -83,6 +86,8 @@ + # include "adfiles/ad_zero.hpp" + #elif defined TARGET_ARCH_MODEL_ppc_64 + # include "adfiles/ad_ppc_64.hpp" ++#elif defined TARGET_ARCH_MODEL_sw64 ++# include "adfiles/ad_sw64.hpp" + #endif + #endif // COMPILER2 + +diff -uNr openjdk/hotspot/src/share/vm/runtime/dtraceJSDT.hpp afu8u/hotspot/src/share/vm/runtime/dtraceJSDT.hpp +--- openjdk/hotspot/src/share/vm/runtime/dtraceJSDT.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/runtime/dtraceJSDT.hpp 2025-05-06 10:53:45.143633674 +0800 +@@ -44,6 +44,9 @@ + #ifdef TARGET_ARCH_ppc + # include "nativeInst_ppc.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "nativeInst_sw64.hpp" ++#endif + + class RegisteredProbes; + typedef jlong OpaqueProbes; +diff -uNr openjdk/hotspot/src/share/vm/runtime/fieldDescriptor.hpp afu8u/hotspot/src/share/vm/runtime/fieldDescriptor.hpp +--- openjdk/hotspot/src/share/vm/runtime/fieldDescriptor.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/runtime/fieldDescriptor.hpp 2025-05-06 10:53:45.143633674 +0800 +@@ -98,7 +98,6 @@ + + bool is_static() const { return access_flags().is_static(); } + bool is_final() const { return access_flags().is_final(); } +- bool is_stable() const { return access_flags().is_stable(); } + bool is_volatile() const { return access_flags().is_volatile(); } + bool is_transient() const { return access_flags().is_transient(); } + +diff -uNr openjdk/hotspot/src/share/vm/runtime/frame.cpp afu8u/hotspot/src/share/vm/runtime/frame.cpp +--- openjdk/hotspot/src/share/vm/runtime/frame.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/runtime/frame.cpp 2025-05-06 10:53:45.143633674 +0800 +@@ -64,6 +64,9 @@ + #ifdef TARGET_ARCH_ppc + # include "nativeInst_ppc.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "nativeInst_sw64.hpp" ++#endif + + PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC + +diff -uNr openjdk/hotspot/src/share/vm/runtime/frame.hpp afu8u/hotspot/src/share/vm/runtime/frame.hpp +--- openjdk/hotspot/src/share/vm/runtime/frame.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/runtime/frame.hpp 2025-05-06 10:53:45.143633674 +0800 +@@ -45,6 +45,8 @@ + # include "adfiles/adGlobals_zero.hpp" + #elif defined TARGET_ARCH_MODEL_ppc_64 + # include "adfiles/adGlobals_ppc_64.hpp" ++#elif defined TARGET_ARCH_MODEL_sw64 ++# include "adfiles/adGlobals_sw64.hpp" + #endif + #endif // COMPILER2 + #ifdef TARGET_ARCH_zero +@@ -489,6 +491,9 @@ + #ifdef TARGET_ARCH_x86 + # include "frame_x86.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "frame_sw64.hpp" ++#endif + #ifdef TARGET_ARCH_aarch64 + # include "frame_aarch64.hpp" + #endif +diff -uNr openjdk/hotspot/src/share/vm/runtime/frame.inline.hpp afu8u/hotspot/src/share/vm/runtime/frame.inline.hpp +--- openjdk/hotspot/src/share/vm/runtime/frame.inline.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/runtime/frame.inline.hpp 2025-05-06 10:53:45.143633674 +0800 +@@ -49,6 +49,9 @@ + #ifdef TARGET_ARCH_ppc + # include "jniTypes_ppc.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "jniTypes_sw64.hpp" ++#endif + #ifdef TARGET_ARCH_zero + # include "entryFrame_zero.hpp" + # include "fakeStubFrame_zero.hpp" +@@ -115,6 +118,9 @@ + #ifdef TARGET_ARCH_ppc + # include "frame_ppc.inline.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "frame_sw64.inline.hpp" ++#endif + + + #endif // SHARE_VM_RUNTIME_FRAME_INLINE_HPP +diff -uNr openjdk/hotspot/src/share/vm/runtime/globals.cpp afu8u/hotspot/src/share/vm/runtime/globals.cpp +--- openjdk/hotspot/src/share/vm/runtime/globals.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/runtime/globals.cpp 2025-05-06 11:13:08.139672951 +0800 +@@ -34,7 +34,6 @@ + #include "utilities/top.hpp" + #if INCLUDE_ALL_GCS + #include "gc_implementation/g1/g1_globals.hpp" +-#include "gc_implementation/shenandoah/shenandoah_globals.hpp" + #endif // INCLUDE_ALL_GCS + #ifdef COMPILER1 + #include "c1/c1_globals.hpp" +@@ -481,7 +480,6 @@ + RUNTIME_OS_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, RUNTIME_PD_DEVELOP_FLAG_STRUCT, RUNTIME_PRODUCT_FLAG_STRUCT, RUNTIME_PD_PRODUCT_FLAG_STRUCT, RUNTIME_DIAGNOSTIC_FLAG_STRUCT, RUNTIME_NOTPRODUCT_FLAG_STRUCT) + #if INCLUDE_ALL_GCS + G1_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, RUNTIME_PD_DEVELOP_FLAG_STRUCT, RUNTIME_PRODUCT_FLAG_STRUCT, RUNTIME_PD_PRODUCT_FLAG_STRUCT, RUNTIME_DIAGNOSTIC_FLAG_STRUCT, RUNTIME_EXPERIMENTAL_FLAG_STRUCT, RUNTIME_NOTPRODUCT_FLAG_STRUCT, RUNTIME_MANAGEABLE_FLAG_STRUCT, RUNTIME_PRODUCT_RW_FLAG_STRUCT) +- SHENANDOAH_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, RUNTIME_PD_DEVELOP_FLAG_STRUCT, RUNTIME_PRODUCT_FLAG_STRUCT, RUNTIME_PD_PRODUCT_FLAG_STRUCT, RUNTIME_DIAGNOSTIC_FLAG_STRUCT, RUNTIME_EXPERIMENTAL_FLAG_STRUCT, RUNTIME_NOTPRODUCT_FLAG_STRUCT, RUNTIME_MANAGEABLE_FLAG_STRUCT, RUNTIME_PRODUCT_RW_FLAG_STRUCT) + #endif // INCLUDE_ALL_GCS + #ifdef COMPILER1 + C1_FLAGS(C1_DEVELOP_FLAG_STRUCT, C1_PD_DEVELOP_FLAG_STRUCT, C1_PRODUCT_FLAG_STRUCT, C1_PD_PRODUCT_FLAG_STRUCT, C1_DIAGNOSTIC_FLAG_STRUCT, C1_NOTPRODUCT_FLAG_STRUCT) +diff -uNr openjdk/hotspot/src/share/vm/runtime/globals_extension.hpp afu8u/hotspot/src/share/vm/runtime/globals_extension.hpp +--- openjdk/hotspot/src/share/vm/runtime/globals_extension.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/runtime/globals_extension.hpp 2025-05-06 10:53:45.147633674 +0800 +@@ -76,7 +76,6 @@ + RUNTIME_OS_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER, RUNTIME_PD_DEVELOP_FLAG_MEMBER, RUNTIME_PRODUCT_FLAG_MEMBER, RUNTIME_PD_PRODUCT_FLAG_MEMBER, RUNTIME_DIAGNOSTIC_FLAG_MEMBER, RUNTIME_NOTPRODUCT_FLAG_MEMBER) + #if INCLUDE_ALL_GCS + G1_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER, RUNTIME_PD_DEVELOP_FLAG_MEMBER, RUNTIME_PRODUCT_FLAG_MEMBER, RUNTIME_PD_PRODUCT_FLAG_MEMBER, RUNTIME_DIAGNOSTIC_FLAG_MEMBER, RUNTIME_EXPERIMENTAL_FLAG_MEMBER, RUNTIME_NOTPRODUCT_FLAG_MEMBER, RUNTIME_MANAGEABLE_FLAG_MEMBER, RUNTIME_PRODUCT_RW_FLAG_MEMBER) +- SHENANDOAH_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER, RUNTIME_PD_DEVELOP_FLAG_MEMBER, RUNTIME_PRODUCT_FLAG_MEMBER, RUNTIME_PD_PRODUCT_FLAG_MEMBER, RUNTIME_DIAGNOSTIC_FLAG_MEMBER, RUNTIME_EXPERIMENTAL_FLAG_MEMBER, RUNTIME_NOTPRODUCT_FLAG_MEMBER, RUNTIME_MANAGEABLE_FLAG_MEMBER, RUNTIME_PRODUCT_RW_FLAG_MEMBER) + #endif // INCLUDE_ALL_GCS + #ifdef COMPILER1 + C1_FLAGS(C1_DEVELOP_FLAG_MEMBER, C1_PD_DEVELOP_FLAG_MEMBER, C1_PRODUCT_FLAG_MEMBER, C1_PD_PRODUCT_FLAG_MEMBER, C1_DIAGNOSTIC_FLAG_MEMBER, C1_NOTPRODUCT_FLAG_MEMBER) +@@ -152,15 +151,6 @@ + RUNTIME_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE, + RUNTIME_PRODUCT_FLAG_MEMBER_WITH_TYPE, + RUNTIME_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE, +- RUNTIME_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE, +- RUNTIME_EXPERIMENTAL_FLAG_MEMBER_WITH_TYPE, +- RUNTIME_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE, +- RUNTIME_MANAGEABLE_FLAG_MEMBER_WITH_TYPE, +- RUNTIME_PRODUCT_RW_FLAG_MEMBER_WITH_TYPE) +- SHENANDOAH_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER_WITH_TYPE, +- RUNTIME_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE, +- RUNTIME_PRODUCT_FLAG_MEMBER_WITH_TYPE, +- RUNTIME_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE, + RUNTIME_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE, + RUNTIME_EXPERIMENTAL_FLAG_MEMBER_WITH_TYPE, + RUNTIME_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE, +diff -uNr openjdk/hotspot/src/share/vm/runtime/globals.hpp afu8u/hotspot/src/share/vm/runtime/globals.hpp +--- openjdk/hotspot/src/share/vm/runtime/globals.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/runtime/globals.hpp 2025-05-06 10:53:45.147633674 +0800 +@@ -55,6 +55,9 @@ + #ifdef TARGET_ARCH_ppc + # include "globals_ppc.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "globals_sw64.hpp" ++#endif + #ifdef TARGET_OS_FAMILY_linux + # include "globals_linux.hpp" + #endif +@@ -79,6 +82,9 @@ + #ifdef TARGET_OS_ARCH_linux_sparc + # include "globals_linux_sparc.hpp" + #endif ++#ifdef TARGET_OS_ARCH_linux_sw64 ++# include "globals_linux_sw64.hpp" ++#endif + #ifdef TARGET_OS_ARCH_linux_zero + # include "globals_linux_zero.hpp" + #endif +@@ -116,6 +122,9 @@ + #ifdef TARGET_ARCH_sparc + # include "c1_globals_sparc.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "c1_globals_sw64.hpp" ++#endif + #ifdef TARGET_ARCH_arm + # include "c1_globals_arm.hpp" + #endif +@@ -148,6 +157,9 @@ + #ifdef TARGET_ARCH_sparc + # include "c2_globals_sparc.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "c2_globals_sw64.hpp" ++#endif + #ifdef TARGET_ARCH_arm + # include "c2_globals_arm.hpp" + #endif +@@ -1434,9 +1446,6 @@ + product(bool, UseParallelOldGC, false, \ + "Use the Parallel Old garbage collector") \ + \ +- product(bool, UseShenandoahGC, false, \ +- "Use the Shenandoah garbage collector") \ +- \ + product(uintx, HeapMaximumCompactionInterval, 20, \ + "How often should we maximally compact the heap (not allowing " \ + "any dead space)") \ +@@ -3212,7 +3221,7 @@ + product(uintx, InitialHeapSize, 0, \ + "Initial heap size (in bytes); zero means use ergonomics") \ + \ +- product(uintx, MaxHeapSize, ScaleForWordSize(96*M), \ ++ product(uintx, MaxHeapSize, ScaleForWordSize(SW64_ONLY(1500) NOT_SW64(96) *M), \ + "Maximum heap size (in bytes)") \ + \ + product(uintx, OldSize, ScaleForWordSize(4*M), \ +diff -uNr openjdk/hotspot/src/share/vm/runtime/icache.hpp afu8u/hotspot/src/share/vm/runtime/icache.hpp +--- openjdk/hotspot/src/share/vm/runtime/icache.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/runtime/icache.hpp 2025-05-06 10:53:45.147633674 +0800 +@@ -86,7 +86,9 @@ + #ifdef TARGET_ARCH_ppc + # include "icache_ppc.hpp" + #endif +- ++#ifdef TARGET_ARCH_sw64 ++# include "icache_sw64.hpp" ++#endif + + + class ICacheStubGenerator : public StubCodeGenerator { +diff -uNr openjdk/hotspot/src/share/vm/runtime/javaCalls.hpp afu8u/hotspot/src/share/vm/runtime/javaCalls.hpp +--- openjdk/hotspot/src/share/vm/runtime/javaCalls.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/runtime/javaCalls.hpp 2025-05-06 10:53:45.147633674 +0800 +@@ -49,6 +49,9 @@ + #ifdef TARGET_ARCH_ppc + # include "jniTypes_ppc.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "jniTypes_sw64.hpp" ++#endif + + // A JavaCallWrapper is constructed before each JavaCall and destructed after the call. + // Its purpose is to allocate/deallocate a new handle block and to save/restore the last +diff -uNr openjdk/hotspot/src/share/vm/runtime/java.cpp afu8u/hotspot/src/share/vm/runtime/java.cpp +--- openjdk/hotspot/src/share/vm/runtime/java.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/runtime/java.cpp 2025-05-06 10:53:45.147633674 +0800 +@@ -84,6 +84,9 @@ + #ifdef TARGET_ARCH_ppc + # include "vm_version_ppc.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "vm_version_sw64.hpp" ++#endif + #if INCLUDE_ALL_GCS + #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp" + #include "gc_implementation/parallelScavenge/psScavenge.hpp" +diff -uNr openjdk/hotspot/src/share/vm/runtime/javaFrameAnchor.hpp afu8u/hotspot/src/share/vm/runtime/javaFrameAnchor.hpp +--- openjdk/hotspot/src/share/vm/runtime/javaFrameAnchor.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/runtime/javaFrameAnchor.hpp 2025-05-06 10:53:45.147633674 +0800 +@@ -80,6 +80,9 @@ + #ifdef TARGET_ARCH_x86 + # include "javaFrameAnchor_x86.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "javaFrameAnchor_sw64.hpp" ++#endif + #ifdef TARGET_ARCH_aarch64 + # include "javaFrameAnchor_aarch64.hpp" + #endif +diff -uNr openjdk/hotspot/src/share/vm/runtime/jniHandles.cpp afu8u/hotspot/src/share/vm/runtime/jniHandles.cpp +--- openjdk/hotspot/src/share/vm/runtime/jniHandles.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/runtime/jniHandles.cpp 2025-05-06 10:53:45.147633674 +0800 +@@ -116,7 +116,7 @@ + oop result = jweak_ref(handle); + result = guard_value(result); + #if INCLUDE_ALL_GCS +- if (result != NULL && (UseG1GC || (UseShenandoahGC && ShenandoahSATBBarrier))) { ++ if (result != NULL && UseG1GC) { + G1SATBCardTableModRefBS::enqueue(result); + } + #endif // INCLUDE_ALL_GCS +diff -uNr openjdk/hotspot/src/share/vm/runtime/mutexLocker.cpp afu8u/hotspot/src/share/vm/runtime/mutexLocker.cpp +--- openjdk/hotspot/src/share/vm/runtime/mutexLocker.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/runtime/mutexLocker.cpp 2025-05-06 10:53:45.147633674 +0800 +@@ -209,11 +209,6 @@ + def(StringDedupQueue_lock , Monitor, leaf, true ); + def(StringDedupTable_lock , Mutex , leaf, true ); + } +- if (UseShenandoahGC) { +- def(SATB_Q_FL_lock , Mutex , special, true); +- def(SATB_Q_CBL_mon , Monitor, nonleaf, true); +- def(Shared_SATB_Q_lock , Mutex, nonleaf, true); +- } + def(ParGCRareEvent_lock , Mutex , leaf , true ); + def(DerivedPointerTableGC_lock , Mutex, leaf, true ); + def(CodeCache_lock , Mutex , special, true ); +diff -uNr openjdk/hotspot/src/share/vm/runtime/objectMonitor.hpp afu8u/hotspot/src/share/vm/runtime/objectMonitor.hpp +--- openjdk/hotspot/src/share/vm/runtime/objectMonitor.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/runtime/objectMonitor.hpp 2025-05-06 10:53:45.151633674 +0800 +@@ -29,8 +29,6 @@ + #include "runtime/park.hpp" + #include "runtime/perfData.hpp" + +-class ObjectMonitor; +- + // ObjectWaiter serves as a "proxy" or surrogate thread. + // TODO-FIXME: Eliminate ObjectWaiter and use the thread-specific + // ParkEvent instead. Beware, however, that the JVMTI code +diff -uNr openjdk/hotspot/src/share/vm/runtime/orderAccess.inline.hpp afu8u/hotspot/src/share/vm/runtime/orderAccess.inline.hpp +--- openjdk/hotspot/src/share/vm/runtime/orderAccess.inline.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/runtime/orderAccess.inline.hpp 2025-05-06 10:53:45.151633674 +0800 +@@ -47,6 +47,9 @@ + #ifdef TARGET_OS_ARCH_linux_ppc + # include "orderAccess_linux_ppc.inline.hpp" + #endif ++#ifdef TARGET_OS_ARCH_linux_sw64 ++# include "orderAccess_linux_sw64.inline.hpp" ++#endif + + // Solaris + #ifdef TARGET_OS_ARCH_solaris_x86 +diff -uNr openjdk/hotspot/src/share/vm/runtime/os.hpp afu8u/hotspot/src/share/vm/runtime/os.hpp +--- openjdk/hotspot/src/share/vm/runtime/os.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/runtime/os.hpp 2025-05-06 10:53:45.151633674 +0800 +@@ -28,6 +28,7 @@ + #include "jvmtifiles/jvmti.h" + #include "runtime/atomic.hpp" + #include "runtime/extendedPC.hpp" ++#include "runtime/handles.hpp" + #include "utilities/top.hpp" + #ifdef TARGET_OS_FAMILY_linux + # include "jvm_linux.h" +@@ -53,8 +54,6 @@ + #endif + + class AgentLibrary; +-class methodHandle; +-class instanceKlassHandle; + + // os defines the interface to operating system; this includes traditional + // OS services (time, I/O) as well as other functionality with system- +@@ -861,6 +860,9 @@ + #ifdef TARGET_OS_ARCH_linux_aarch64 + # include "os_linux_aarch64.hpp" + #endif ++#ifdef TARGET_OS_ARCH_linux_sw64 ++# include "os_linux_sw64.hpp" ++#endif + #ifdef TARGET_OS_ARCH_linux_sparc + # include "os_linux_sparc.hpp" + #endif +diff -uNr openjdk/hotspot/src/share/vm/runtime/prefetch.inline.hpp afu8u/hotspot/src/share/vm/runtime/prefetch.inline.hpp +--- openjdk/hotspot/src/share/vm/runtime/prefetch.inline.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/runtime/prefetch.inline.hpp 2025-05-06 10:53:45.151633674 +0800 +@@ -46,6 +46,9 @@ + #ifdef TARGET_OS_ARCH_linux_ppc + # include "prefetch_linux_ppc.inline.hpp" + #endif ++#ifdef TARGET_OS_ARCH_linux_sw64 ++# include "prefetch_linux_sw64.inline.hpp" ++#endif + + // Solaris + #ifdef TARGET_OS_ARCH_solaris_x86 +diff -uNr openjdk/hotspot/src/share/vm/runtime/registerMap.hpp afu8u/hotspot/src/share/vm/runtime/registerMap.hpp +--- openjdk/hotspot/src/share/vm/runtime/registerMap.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/runtime/registerMap.hpp 2025-05-06 10:53:45.151633674 +0800 +@@ -45,6 +45,9 @@ + #ifdef TARGET_ARCH_ppc + # include "register_ppc.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "register_sw64.hpp" ++#endif + + class JavaThread; + +@@ -156,6 +159,9 @@ + #ifdef TARGET_ARCH_ppc + # include "registerMap_ppc.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "registerMap_sw64.hpp" ++#endif + + }; + +diff -uNr openjdk/hotspot/src/share/vm/runtime/relocator.hpp afu8u/hotspot/src/share/vm/runtime/relocator.hpp +--- openjdk/hotspot/src/share/vm/runtime/relocator.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/runtime/relocator.hpp 2025-05-06 10:53:45.151633674 +0800 +@@ -45,6 +45,9 @@ + #ifdef TARGET_ARCH_ppc + # include "bytes_ppc.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "bytes_sw64.hpp" ++#endif + + // This code has been converted from the 1.1E java virtual machine + // Thanks to the JavaTopics group for using the code +diff -uNr openjdk/hotspot/src/share/vm/runtime/safepoint.cpp afu8u/hotspot/src/share/vm/runtime/safepoint.cpp +--- openjdk/hotspot/src/share/vm/runtime/safepoint.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/runtime/safepoint.cpp 2025-05-06 10:53:45.151633674 +0800 +@@ -78,6 +78,10 @@ + # include "nativeInst_ppc.hpp" + # include "vmreg_ppc.inline.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "nativeInst_sw64.hpp" ++# include "vmreg_sw64.inline.hpp" ++#endif + #if INCLUDE_ALL_GCS + #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp" + #include "gc_implementation/shared/suspendibleThreadSet.hpp" +@@ -183,7 +187,7 @@ + // In the future we should investigate whether CMS can use the + // more-general mechanism below. DLD (01/05). + ConcurrentMarkSweepThread::synchronize(false); +- } else if (UseG1GC || (UseShenandoahGC && UseStringDeduplication)) { ++ } else if (UseG1GC) { + SuspendibleThreadSet::synchronize(); + } + #endif // INCLUDE_ALL_GCS +@@ -589,7 +593,7 @@ + // If there are any concurrent GC threads resume them. + if (UseConcMarkSweepGC) { + ConcurrentMarkSweepThread::desynchronize(false); +- } else if (UseG1GC || (UseShenandoahGC && UseStringDeduplication)) { ++ } else if (UseG1GC) { + SuspendibleThreadSet::desynchronize(); + } + #endif // INCLUDE_ALL_GCS +diff -uNr openjdk/hotspot/src/share/vm/runtime/sharedRuntime.cpp afu8u/hotspot/src/share/vm/runtime/sharedRuntime.cpp +--- openjdk/hotspot/src/share/vm/runtime/sharedRuntime.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/runtime/sharedRuntime.cpp 2025-05-06 10:53:45.155633674 +0800 +@@ -82,6 +82,11 @@ + # include "nativeInst_ppc.hpp" + # include "vmreg_ppc.inline.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "nativeInst_sw64.hpp" ++# include "vmreg_sw64.inline.hpp" ++#endif ++ + #ifdef COMPILER1 + #include "c1/c1_Runtime1.hpp" + #endif +@@ -247,6 +252,26 @@ + JRT_END + + ++#ifdef SW64 //ZHJ20110311(LZS) ++JRT_LEAF(jint, SharedRuntime::sdiv(jint y, jint x)) ++ if (x == min_jint && y == CONST64(-1)) { ++ return x; ++ } else { ++ return x / y; ++ } ++JRT_END ++ ++ ++JRT_LEAF(jint, SharedRuntime::srem(jint y, jint x)) ++ if (x == min_jint && y == CONST64(-1)) { ++ return 0; ++ } else { ++ return x % y; ++ } ++JRT_END ++#endif ++ ++ + JRT_LEAF(jlong, SharedRuntime::ldiv(jlong y, jlong x)) + if (x == min_jlong && y == CONST64(-1)) { + return x; +@@ -2825,22 +2850,6 @@ + } + } + +-JRT_LEAF(oopDesc*, SharedRuntime::pin_object(JavaThread* thread, oopDesc* obj)) +- assert(Universe::heap()->supports_object_pinning(), "Why we here?"); +- assert(obj != NULL, "Should not be null"); +- oop o(obj); +- o = Universe::heap()->pin_object(thread, o); +- assert(o != NULL, "Should not be null"); +- return o; +-JRT_END +- +-JRT_LEAF(void, SharedRuntime::unpin_object(JavaThread* thread, oopDesc* obj)) +- assert(Universe::heap()->supports_object_pinning(), "Why we here?"); +- assert(obj != NULL, "Should not be null"); +- oop o(obj); +- Universe::heap()->unpin_object(thread, o); +-JRT_END +- + // ------------------------------------------------------------------------- + // Java-Java calling convention + // (what you use when Java calls Java) +diff -uNr openjdk/hotspot/src/share/vm/runtime/sharedRuntime.hpp afu8u/hotspot/src/share/vm/runtime/sharedRuntime.hpp +--- openjdk/hotspot/src/share/vm/runtime/sharedRuntime.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/runtime/sharedRuntime.hpp 2025-05-06 10:53:45.155633674 +0800 +@@ -92,6 +92,11 @@ + // not have machine instructions to implement their functionality. + // Do not remove these. + ++#ifdef SW64 //ZHJ20110311(LZS) ++ static jint sdiv(jint y, jint x); ++ static jint srem(jint y, jint x); ++#endif ++ + // long arithmetics + static jlong lmul(jlong y, jlong x); + static jlong ldiv(jlong y, jlong x); +@@ -145,6 +150,10 @@ + static double dsqrt(double f); + #endif + ++#ifdef SW64 ++ static unsigned int updateBytesCRC32(unsigned long crc, const unsigned char *buf_bytes, unsigned int len_ints); ++#endif ++ + // Montgomery multiplication + static void montgomery_multiply(jint *a_ints, jint *b_ints, jint *n_ints, + jint len, jlong inv, jint *m_ints); +@@ -490,10 +499,6 @@ + static void get_utf(oopDesc* src, address dst); + #endif // def HAVE_DTRACE_H + +- // Pin/Unpin object +- static oopDesc* pin_object(JavaThread* thread, oopDesc* obj); +- static void unpin_object(JavaThread* thread, oopDesc* obj); +- + // A compiled caller has just called the interpreter, but compiled code + // exists. Patch the caller so he no longer calls into the interpreter. + static void fixup_callers_callsite(Method* moop, address ret_pc); +@@ -591,6 +596,13 @@ + static void print_ic_miss_histogram(); + + #endif // PRODUCT ++// static void print_long(long long i); ++// static void print_int(int i); ++// static void print_float(float i); ++// static void print_double(double i); ++// static void print_str(char *str); ++// ++// static void print_reg_with_pc(char *reg_name, long i, long pc); + }; + + +diff -uNr openjdk/hotspot/src/share/vm/runtime/sharedRuntimeTrig.cpp afu8u/hotspot/src/share/vm/runtime/sharedRuntimeTrig.cpp +--- openjdk/hotspot/src/share/vm/runtime/sharedRuntimeTrig.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/runtime/sharedRuntimeTrig.cpp 2025-05-06 10:53:45.155633674 +0800 +@@ -534,6 +534,14 @@ + * then 3 2 + * sin(x) = x + (S1*x + (x *(r-y/2)+y)) + */ ++#if defined(SW64) ++#undef S1 ++#undef S2 ++#undef S3 ++#undef S4 ++#undef S5 ++#undef S6 ++#endif + + static const double + S1 = -1.66666666666666324348e-01, /* 0xBFC55555, 0x55555549 */ +diff -uNr openjdk/hotspot/src/share/vm/runtime/stackValueCollection.cpp afu8u/hotspot/src/share/vm/runtime/stackValueCollection.cpp +--- openjdk/hotspot/src/share/vm/runtime/stackValueCollection.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/runtime/stackValueCollection.cpp 2025-05-06 10:53:45.155633674 +0800 +@@ -42,6 +42,9 @@ + #ifdef TARGET_ARCH_ppc + # include "jniTypes_ppc.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "jniTypes_sw64.hpp" ++#endif + + PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC + +diff -uNr openjdk/hotspot/src/share/vm/runtime/stackValue.cpp afu8u/hotspot/src/share/vm/runtime/stackValue.cpp +--- openjdk/hotspot/src/share/vm/runtime/stackValue.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/runtime/stackValue.cpp 2025-05-06 10:53:45.155633674 +0800 +@@ -102,15 +102,8 @@ + } else { + value.noop = *(narrowOop*) value_addr; + } +- // Decode narrowoop +- oop val = oopDesc::decode_heap_oop(value.noop); +- // Deoptimization must make sure all oops have passed load barriers +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- val = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(val); +- } +-#endif +- Handle h(val); // Wrap a handle around the oop ++ // Decode narrowoop and wrap a handle around the oop ++ Handle h(oopDesc::decode_heap_oop(value.noop)); + return new StackValue(h); + } + #endif +@@ -125,12 +118,6 @@ + val = (oop)NULL; + } + #endif +- // Deoptimization must make sure all oops have passed load barriers +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- val = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(val); +- } +-#endif + Handle h(val); // Wrap a handle around the oop + return new StackValue(h); + } +diff -uNr openjdk/hotspot/src/share/vm/runtime/statSampler.cpp afu8u/hotspot/src/share/vm/runtime/statSampler.cpp +--- openjdk/hotspot/src/share/vm/runtime/statSampler.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/runtime/statSampler.cpp 2025-05-06 10:53:45.155633674 +0800 +@@ -51,6 +51,9 @@ + #ifdef TARGET_ARCH_ppc + # include "vm_version_ppc.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "vm_version_sw64.hpp" ++#endif + + // -------------------------------------------------------- + // StatSamplerTask +diff -uNr openjdk/hotspot/src/share/vm/runtime/stubRoutines.hpp afu8u/hotspot/src/share/vm/runtime/stubRoutines.hpp +--- openjdk/hotspot/src/share/vm/runtime/stubRoutines.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/runtime/stubRoutines.hpp 2025-05-06 10:53:45.155633674 +0800 +@@ -49,6 +49,9 @@ + #ifdef TARGET_ARCH_ppc + # include "nativeInst_ppc.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "nativeInst_sw64.hpp" ++#endif + + // StubRoutines provides entry points to assembly routines used by + // compiled code and the run-time system. Platform-specific entry +@@ -116,6 +119,8 @@ + # include "stubRoutines_zero.hpp" + #elif defined TARGET_ARCH_MODEL_ppc_64 + # include "stubRoutines_ppc_64.hpp" ++#elif defined TARGET_ARCH_MODEL_sw64 ++# include "stubRoutines_sw64.hpp" + #endif + + static jint _verify_oop_count; +diff -uNr openjdk/hotspot/src/share/vm/runtime/synchronizer.hpp afu8u/hotspot/src/share/vm/runtime/synchronizer.hpp +--- openjdk/hotspot/src/share/vm/runtime/synchronizer.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/runtime/synchronizer.hpp 2025-05-06 10:53:45.155633674 +0800 +@@ -36,11 +36,7 @@ + + class ObjectSynchronizer : AllStatic { + friend class VMStructs; +-#if INCLUDE_ALL_GCS +- friend class ShenandoahSynchronizerIterator; +-#endif +- +-public: ++ public: + typedef enum { + owner_self, + owner_none, +diff -uNr openjdk/hotspot/src/share/vm/runtime/thread.cpp afu8u/hotspot/src/share/vm/runtime/thread.cpp +--- openjdk/hotspot/src/share/vm/runtime/thread.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/runtime/thread.cpp 2025-05-06 11:13:08.143672951 +0800 +@@ -96,7 +96,6 @@ + # include "os_bsd.inline.hpp" + #endif + #if INCLUDE_ALL_GCS +-#include "gc_implementation/shenandoah/shenandoahControlThread.hpp" + #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp" + #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" + #include "gc_implementation/parallelScavenge/pcTasks.hpp" +@@ -304,42 +303,8 @@ + "bug in forced alignment of thread objects"); + } + #endif /* ASSERT */ +- +- _oom_during_evac = 0; +-#if INCLUDE_ALL_GCS +- _gc_state = _gc_state_global; +- _worker_id = (uint)(-1); // Actually, ShenandoahWorkerSession::INVALID_WORKER_ID, but avoid dependencies. +- _force_satb_flush = false; +- _paced_time = 0; +-#endif +-} +- +-void Thread::set_oom_during_evac(bool oom) { +- if (oom) { +- _oom_during_evac |= 1; +- } else { +- _oom_during_evac &= ~1; +- } + } + +-bool Thread::is_oom_during_evac() const { +- return (_oom_during_evac & 1) == 1; +-} +- +-#ifdef ASSERT +-void Thread::set_evac_allowed(bool evac_allowed) { +- if (evac_allowed) { +- _oom_during_evac |= 2; +- } else { +- _oom_during_evac &= ~2; +- } +-} +- +-bool Thread::is_evac_allowed() const { +- return (_oom_during_evac & 2) == 2; +-} +-#endif +- + void Thread::initialize_thread_local_storage() { + // Note: Make sure this method only calls + // non-blocking operations. Otherwise, it might not work +@@ -1555,7 +1520,6 @@ + #if INCLUDE_ALL_GCS + SATBMarkQueueSet JavaThread::_satb_mark_queue_set; + DirtyCardQueueSet JavaThread::_dirty_card_queue_set; +-char Thread::_gc_state_global = 0; + #endif // INCLUDE_ALL_GCS + + JavaThread::JavaThread(bool is_attaching_via_jni) : +@@ -1959,12 +1923,9 @@ + // from the list of active threads. We must do this after any deferred + // card marks have been flushed (above) so that any entries that are + // added to the thread's dirty card queue as a result are not lost. +- if (UseG1GC || (UseShenandoahGC)) { ++ if (UseG1GC) { + flush_barrier_queues(); + } +- if (UseShenandoahGC && UseTLAB && gclab().is_initialized()) { +- gclab().make_parsable(true); +- } + #endif // INCLUDE_ALL_GCS + + // Remove from list of active threads list, and notify VM thread if we are the last non-daemon thread +@@ -1998,27 +1959,6 @@ + // The dirty card queue should have been constructed with its + // active field set to true. + assert(dirty_queue.is_active(), "dirty card queue should be active"); +- +- _gc_state = _gc_state_global; +-} +- +-void JavaThread::set_gc_state(char in_prog) { +- _gc_state = in_prog; +-} +- +-void JavaThread::set_gc_state_all_threads(char in_prog) { +- assert_locked_or_safepoint(Threads_lock); +- _gc_state_global = in_prog; +- for (JavaThread* t = Threads::first(); t != NULL; t = t->next()) { +- t->set_gc_state(in_prog); +- } +-} +- +-void JavaThread::set_force_satb_flush_all_threads(bool value) { +- assert_locked_or_safepoint(Threads_lock); +- for (JavaThread* t = Threads::first(); t != NULL; t = t->next()) { +- t->set_force_satb_flush(value); +- } + } + #endif // INCLUDE_ALL_GCS + +@@ -2049,12 +1989,9 @@ + } + + #if INCLUDE_ALL_GCS +- if (UseG1GC || (UseShenandoahGC)) { ++ if (UseG1GC) { + flush_barrier_queues(); + } +- if (UseShenandoahGC && UseTLAB && gclab().is_initialized()) { +- gclab().make_parsable(true); +- } + #endif // INCLUDE_ALL_GCS + + Threads::remove(this); +@@ -3359,13 +3296,6 @@ + // All JavaThreads + #define ALL_JAVA_THREADS(X) for (JavaThread* X = _thread_list; X; X = X->next()) + +-void Threads::java_threads_do(ThreadClosure* tc) { +- assert_locked_or_safepoint(Threads_lock); +- ALL_JAVA_THREADS(p) { +- tc->do_thread(p); +- } +-} +- + // All JavaThreads + all non-JavaThreads (i.e., every thread in the system) + void Threads::threads_do(ThreadClosure* tc) { + assert_locked_or_safepoint(Threads_lock); +@@ -3524,6 +3454,11 @@ + + JFR_ONLY(Jfr::on_vm_init();) + ++#if (defined(SW64) && !defined ZERO) ++ /* 2013/11/5 Jin: To be accessed in NativeGeneralJump::patch_verified_entry() */ ++ main_thread->set_handle_wrong_method_stub(SharedRuntime::get_handle_wrong_method_stub()); ++#endif ++ + // Should be done after the heap is fully created + main_thread->cache_global_variables(); + +@@ -3682,11 +3617,9 @@ + // Support for ConcurrentMarkSweep. This should be cleaned up + // and better encapsulated. The ugly nested if test would go away + // once things are properly refactored. XXX YSR +- if (UseConcMarkSweepGC || UseG1GC || UseShenandoahGC) { ++ if (UseConcMarkSweepGC || UseG1GC) { + if (UseConcMarkSweepGC) { + ConcurrentMarkSweepThread::makeSurrogateLockerThread(THREAD); +- } else if (UseShenandoahGC) { +- ShenandoahControlThread::makeSurrogateLockerThread(THREAD); + } else { + ConcurrentMarkThread::makeSurrogateLockerThread(THREAD); + } +@@ -4271,8 +4204,7 @@ + bool is_par = sh->n_par_threads() > 0; + assert(!is_par || + (SharedHeap::heap()->n_par_threads() == +- SharedHeap::heap()->workers()->active_workers() +- || UseShenandoahGC), "Mismatch"); ++ SharedHeap::heap()->workers()->active_workers()), "Mismatch"); + int cp = SharedHeap::heap()->strong_roots_parity(); + ALL_JAVA_THREADS(p) { + if (p->claim_oops_do(is_par, cp)) { +diff -uNr openjdk/hotspot/src/share/vm/runtime/thread.hpp afu8u/hotspot/src/share/vm/runtime/thread.hpp +--- openjdk/hotspot/src/share/vm/runtime/thread.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/runtime/thread.hpp 2025-05-06 11:13:08.143672951 +0800 +@@ -102,16 +102,6 @@ + + class Thread: public ThreadShadow { + friend class VMStructs; +- +-#if INCLUDE_ALL_GCS +-protected: +- // Support for Shenandoah barriers. This is only accessible from JavaThread, +- // but we really want to keep this field at lower Thread offset (below first +- // 128 bytes), because that makes barrier fastpaths optimally encoded. +- char _gc_state; +- static char _gc_state_global; +-#endif +- + private: + // Exception handling + // (Note: _pending_exception and friends are in ThreadShadow) +@@ -267,15 +257,8 @@ + friend class GC_locker; + + ThreadLocalAllocBuffer _tlab; // Thread-local eden +- ThreadLocalAllocBuffer _gclab; // Thread-local allocation buffer for GC (e.g. evacuation) +- uint _worker_id; // Worker ID +- bool _force_satb_flush; // Force SATB flush +- double _paced_time; // Accumulated paced time +- + jlong _allocated_bytes; // Cumulative number of bytes allocated on + // the Java heap +- jlong _allocated_bytes_gclab; // Cumulative number of bytes allocated on +- // the Java heap, in GCLABs + + // Thread-local buffer used by MetadataOnStackMark. + MetadataOnStackBuffer* _metadata_on_stack_buffer; +@@ -287,8 +270,6 @@ + int _vm_operation_started_count; // VM_Operation support + int _vm_operation_completed_count; // VM_Operation support + +- char _oom_during_evac; +- + ObjectMonitor* _current_pending_monitor; // ObjectMonitor this thread + // is waiting to lock + bool _current_pending_monitor_is_from_java; // locking is from Java code +@@ -407,14 +388,6 @@ + clear_suspend_flag(_critical_native_unlock); + } + +- bool is_oom_during_evac() const; +- void set_oom_during_evac(bool oom); +- +-#ifdef ASSERT +- bool is_evac_allowed() const; +- void set_evac_allowed(bool evac_allowed); +-#endif +- + // Support for Unhandled Oop detection + #ifdef CHECK_UNHANDLED_OOPS + private: +@@ -464,40 +437,15 @@ + ThreadLocalAllocBuffer& tlab() { return _tlab; } + void initialize_tlab() { + if (UseTLAB) { +- tlab().initialize(false); +- if (UseShenandoahGC && (is_Java_thread() || is_Worker_thread())) { +- gclab().initialize(true); +- } ++ tlab().initialize(); + } + } + +- // Thread-Local GC Allocation Buffer (GCLAB) support +- ThreadLocalAllocBuffer& gclab() { +- assert (UseShenandoahGC, "Only for Shenandoah"); +- assert (!_gclab.is_initialized() || (is_Java_thread() || is_Worker_thread()), +- "Only Java and GC worker threads are allowed to get GCLABs"); +- return _gclab; +- } +- +- void set_worker_id(uint id) { _worker_id = id; } +- uint worker_id() { return _worker_id; } +- +- void set_force_satb_flush(bool value) { _force_satb_flush = value; } +- bool is_force_satb_flush() { return _force_satb_flush; } +- +- void add_paced_time(double v) { _paced_time += v; } +- double paced_time() { return _paced_time; } +- void reset_paced_time() { _paced_time = 0; } +- + jlong allocated_bytes() { return _allocated_bytes; } + void set_allocated_bytes(jlong value) { _allocated_bytes = value; } + void incr_allocated_bytes(jlong size) { _allocated_bytes += size; } + inline jlong cooked_allocated_bytes(); + +- jlong allocated_bytes_gclab() { return _allocated_bytes_gclab; } +- void set_allocated_bytes_gclab(jlong value) { _allocated_bytes_gclab = value; } +- void incr_allocated_bytes_gclab(jlong size) { _allocated_bytes_gclab += size; } +- + JFR_ONLY(DEFINE_THREAD_LOCAL_ACCESSOR_JFR;) + JFR_ONLY(DEFINE_TRACE_SUSPEND_FLAG_METHODS) + +@@ -682,10 +630,6 @@ + + #undef TLAB_FIELD_OFFSET + +- static ByteSize gclab_start_offset() { return byte_offset_of(Thread, _gclab) + ThreadLocalAllocBuffer::start_offset(); } +- static ByteSize gclab_top_offset() { return byte_offset_of(Thread, _gclab) + ThreadLocalAllocBuffer::top_offset(); } +- static ByteSize gclab_end_offset() { return byte_offset_of(Thread, _gclab) + ThreadLocalAllocBuffer::end_offset(); } +- + static ByteSize allocated_bytes_offset() { return byte_offset_of(Thread, _allocated_bytes ); } + + JFR_ONLY(DEFINE_THREAD_LOCAL_OFFSET_JFR;) +@@ -1110,7 +1054,7 @@ + address last_Java_pc(void) { return _anchor.last_Java_pc(); } + + // Safepoint support +-#if !(defined(PPC64) || defined(AARCH64)) ++#if !(defined(PPC64) || defined(AARCH64) || defined(SW64)) + JavaThreadState thread_state() const { return _thread_state; } + void set_thread_state(JavaThreadState s) { _thread_state = s; } + #else +@@ -1441,9 +1385,6 @@ + #if INCLUDE_ALL_GCS + static ByteSize satb_mark_queue_offset() { return byte_offset_of(JavaThread, _satb_mark_queue); } + static ByteSize dirty_card_queue_offset() { return byte_offset_of(JavaThread, _dirty_card_queue); } +- +- static ByteSize gc_state_offset() { return byte_offset_of(JavaThread, _gc_state); } +- + #endif // INCLUDE_ALL_GCS + + // Returns the jni environment for this thread +@@ -1741,15 +1682,6 @@ + static DirtyCardQueueSet& dirty_card_queue_set() { + return _dirty_card_queue_set; + } +- +- inline char gc_state() const; +- +-private: +- void set_gc_state(char in_prog); +- +-public: +- static void set_gc_state_all_threads(char in_prog); +- static void set_force_satb_flush_all_threads(bool value); + #endif // INCLUDE_ALL_GCS + + // This method initializes the SATB and dirty card queues before a +@@ -1782,6 +1714,9 @@ + #ifdef TARGET_OS_ARCH_linux_aarch64 + # include "thread_linux_aarch64.hpp" + #endif ++#ifdef TARGET_OS_ARCH_linux_sw64 ++# include "thread_linux_sw64.hpp" ++#endif + #ifdef TARGET_OS_ARCH_linux_sparc + # include "thread_linux_sparc.hpp" + #endif +@@ -1995,7 +1930,6 @@ + static bool includes(JavaThread* p); + static JavaThread* first() { return _thread_list; } + static void threads_do(ThreadClosure* tc); +- static void java_threads_do(ThreadClosure* tc); + + // Initializes the vm and creates the vm thread + static jint create_vm(JavaVMInitArgs* args, bool* canTryAgain); +diff -uNr openjdk/hotspot/src/share/vm/runtime/thread.inline.hpp afu8u/hotspot/src/share/vm/runtime/thread.inline.hpp +--- openjdk/hotspot/src/share/vm/runtime/thread.inline.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/runtime/thread.inline.hpp 2025-05-06 10:53:45.159633674 +0800 +@@ -59,7 +59,7 @@ + return allocated_bytes; + } + +-#if defined(PPC64) || defined (AARCH64) ++#if defined(PPC64) || defined (AARCH64) | defined (SW64) + inline JavaThreadState JavaThread::thread_state() const { + return (JavaThreadState) OrderAccess::load_acquire((volatile jint*)&_thread_state); + } +@@ -74,10 +74,4 @@ + OrderAccess::fence(); + } + +-#if INCLUDE_ALL_GCS +-char JavaThread::gc_state() const { +- return _gc_state; +-} +-#endif +- + #endif // SHARE_VM_RUNTIME_THREAD_INLINE_HPP +diff -uNr openjdk/hotspot/src/share/vm/runtime/threadLocalStorage.hpp afu8u/hotspot/src/share/vm/runtime/threadLocalStorage.hpp +--- openjdk/hotspot/src/share/vm/runtime/threadLocalStorage.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/runtime/threadLocalStorage.hpp 2025-05-06 10:53:45.159633674 +0800 +@@ -51,6 +51,9 @@ + #ifdef TARGET_OS_ARCH_linux_x86 + # include "threadLS_linux_x86.hpp" + #endif ++#ifdef TARGET_OS_ARCH_linux_sw64 ++# include "threadLS_linux_sw64.hpp" ++#endif + #ifdef TARGET_OS_ARCH_linux_aarch64 + # include "threadLS_linux_aarch64.hpp" + #endif +diff -uNr openjdk/hotspot/src/share/vm/runtime/vm_operations.hpp afu8u/hotspot/src/share/vm/runtime/vm_operations.hpp +--- openjdk/hotspot/src/share/vm/runtime/vm_operations.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/runtime/vm_operations.hpp 2025-05-06 10:53:45.163633675 +0800 +@@ -93,12 +93,6 @@ + template(HeapIterateOperation) \ + template(ReportJavaOutOfMemory) \ + template(JFRCheckpoint) \ +- template(ShenandoahFullGC) \ +- template(ShenandoahInitMark) \ +- template(ShenandoahFinalMarkStartEvac) \ +- template(ShenandoahInitUpdateRefs) \ +- template(ShenandoahFinalUpdateRefs) \ +- template(ShenandoahDegeneratedGC) \ + template(Exit) \ + template(LinuxDllLoad) \ + template(RotateGCLog) \ +diff -uNr openjdk/hotspot/src/share/vm/runtime/vmStructs.cpp afu8u/hotspot/src/share/vm/runtime/vmStructs.cpp +--- openjdk/hotspot/src/share/vm/runtime/vmStructs.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/runtime/vmStructs.cpp 2025-05-06 11:13:08.143672951 +0800 +@@ -122,6 +122,9 @@ + #ifdef TARGET_ARCH_ppc + # include "vmStructs_ppc.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "vmStructs_sw64.hpp" ++#endif + #ifdef TARGET_OS_ARCH_linux_x86 + # include "vmStructs_linux_x86.hpp" + #endif +@@ -149,6 +152,9 @@ + #ifdef TARGET_OS_ARCH_linux_ppc + # include "vmStructs_linux_ppc.hpp" + #endif ++#ifdef TARGET_OS_ARCH_linux_sw64 ++# include "vmStructs_linux_sw64.hpp" ++#endif + #ifdef TARGET_OS_ARCH_aix_ppc + # include "vmStructs_aix_ppc.hpp" + #endif +@@ -173,7 +179,6 @@ + #include "gc_implementation/parallelScavenge/psYoungGen.hpp" + #include "gc_implementation/parallelScavenge/vmStructs_parallelgc.hpp" + #include "gc_implementation/g1/vmStructs_g1.hpp" +-#include "gc_implementation/shenandoah/vmStructs_shenandoah.hpp" + #endif // INCLUDE_ALL_GCS + + #ifdef COMPILER2 +@@ -209,6 +214,8 @@ + # include "adfiles/adGlobals_zero.hpp" + #elif defined TARGET_ARCH_MODEL_ppc_64 + # include "adfiles/adGlobals_ppc_64.hpp" ++#elif defined TARGET_ARCH_MODEL_sw64 ++# include "adfiles/adGlobals_sw64.hpp" + #endif + #endif // COMPILER2 + +@@ -2223,7 +2230,6 @@ + declare_constant(BarrierSet::CardTableExtension) \ + declare_constant(BarrierSet::G1SATBCT) \ + declare_constant(BarrierSet::G1SATBCTLogging) \ +- declare_constant(BarrierSet::ShenandoahBarrierSet) \ + declare_constant(BarrierSet::Other) \ + \ + declare_constant(BlockOffsetSharedArray::LogN) \ +@@ -2247,7 +2253,6 @@ + declare_constant(CollectedHeap::Abstract) \ + declare_constant(CollectedHeap::SharedHeap) \ + declare_constant(CollectedHeap::GenCollectedHeap) \ +- declare_constant(CollectedHeap::ShenandoahHeap) \ + \ + declare_constant(GenCollectedHeap::max_gens) \ + \ +@@ -2925,11 +2930,6 @@ + + VM_STRUCTS_G1(GENERATE_NONSTATIC_VM_STRUCT_ENTRY, + GENERATE_STATIC_VM_STRUCT_ENTRY) +- +- VM_STRUCTS_SHENANDOAH(GENERATE_NONSTATIC_VM_STRUCT_ENTRY, +- GENERATE_NONSTATIC_VM_STRUCT_ENTRY, +- GENERATE_STATIC_VM_STRUCT_ENTRY) +- + #endif // INCLUDE_ALL_GCS + + VM_STRUCTS_CPU(GENERATE_NONSTATIC_VM_STRUCT_ENTRY, +@@ -2975,10 +2975,6 @@ + + VM_TYPES_G1(GENERATE_VM_TYPE_ENTRY, + GENERATE_TOPLEVEL_VM_TYPE_ENTRY) +- +- VM_TYPES_SHENANDOAH(GENERATE_VM_TYPE_ENTRY, +- GENERATE_TOPLEVEL_VM_TYPE_ENTRY, +- GENERATE_INTEGER_VM_TYPE_ENTRY) + #endif // INCLUDE_ALL_GCS + + VM_TYPES_CPU(GENERATE_VM_TYPE_ENTRY, +@@ -3014,9 +3010,6 @@ + VM_INT_CONSTANTS_CMS(GENERATE_VM_INT_CONSTANT_ENTRY) + + VM_INT_CONSTANTS_PARNEW(GENERATE_VM_INT_CONSTANT_ENTRY) +- +- VM_INT_CONSTANTS_SHENANDOAH(GENERATE_VM_INT_CONSTANT_ENTRY, +- GENERATE_VM_INT_CONSTANT_WITH_VALUE_ENTRY) + #endif // INCLUDE_ALL_GCS + + VM_INT_CONSTANTS_CPU(GENERATE_VM_INT_CONSTANT_ENTRY, +@@ -3083,10 +3076,6 @@ + VM_STRUCTS_G1(CHECK_NONSTATIC_VM_STRUCT_ENTRY, + CHECK_STATIC_VM_STRUCT_ENTRY); + +- +- VM_STRUCTS_SHENANDOAH(CHECK_NONSTATIC_VM_STRUCT_ENTRY, +- CHECK_VOLATILE_NONSTATIC_VM_STRUCT_ENTRY, +- CHECK_STATIC_VM_STRUCT_ENTRY); + #endif // INCLUDE_ALL_GCS + + VM_STRUCTS_CPU(CHECK_NONSTATIC_VM_STRUCT_ENTRY, +@@ -3128,9 +3117,6 @@ + VM_TYPES_G1(CHECK_VM_TYPE_ENTRY, + CHECK_SINGLE_ARG_VM_TYPE_NO_OP); + +- VM_TYPES_SHENANDOAH(CHECK_VM_TYPE_ENTRY, +- CHECK_SINGLE_ARG_VM_TYPE_NO_OP, +- CHECK_SINGLE_ARG_VM_TYPE_NO_OP); + #endif // INCLUDE_ALL_GCS + + VM_TYPES_CPU(CHECK_VM_TYPE_ENTRY, +diff -uNr openjdk/hotspot/src/share/vm/runtime/vm_version.cpp afu8u/hotspot/src/share/vm/runtime/vm_version.cpp +--- openjdk/hotspot/src/share/vm/runtime/vm_version.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/runtime/vm_version.cpp 2025-05-06 11:13:08.143672951 +0800 +@@ -44,6 +44,9 @@ + #ifdef TARGET_ARCH_ppc + # include "vm_version_ppc.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "vm_version_sw64.hpp" ++#endif + + const char* Abstract_VM_Version::_s_vm_release = Abstract_VM_Version::vm_release(); + const char* Abstract_VM_Version::_s_internal_vm_info_string = Abstract_VM_Version::internal_vm_info_string(); +@@ -197,6 +200,7 @@ + #define CPU IA32_ONLY("x86") \ + IA64_ONLY("ia64") \ + AMD64_ONLY("amd64") \ ++ SW64_ONLY("sw64") \ + AARCH64_ONLY("aarch64") \ + SPARC_ONLY("sparc") + #endif // ZERO +diff -uNr openjdk/hotspot/src/share/vm/services/heapDumper.cpp afu8u/hotspot/src/share/vm/services/heapDumper.cpp +--- openjdk/hotspot/src/share/vm/services/heapDumper.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/services/heapDumper.cpp 2025-05-06 10:53:45.163633675 +0800 +@@ -756,12 +756,6 @@ + o = oopDesc::load_decode_heap_oop((oop*)addr); + } + +-#if INCLUDE_ALL_GCS +- if (UseShenandoahGC) { +- o = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(o); +- } +-#endif +- + // reflection and sun.misc.Unsafe classes may have a reference to a + // Klass* so filter it out. + assert(o->is_oop_or_null(), "should always be an oop"); +diff -uNr openjdk/hotspot/src/share/vm/services/memoryManager.cpp afu8u/hotspot/src/share/vm/services/memoryManager.cpp +--- openjdk/hotspot/src/share/vm/services/memoryManager.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/services/memoryManager.cpp 2025-05-06 10:53:45.163633675 +0800 +@@ -100,14 +100,6 @@ + return (GCMemoryManager*) new G1OldGenMemoryManager(); + } + +-GCMemoryManager* MemoryManager::get_shenandoah_cycles_memory_manager() { +- return (GCMemoryManager*) new ShenandoahCyclesMemoryManager(); +-} +- +-GCMemoryManager* MemoryManager::get_shenandoah_pauses_memory_manager() { +- return (GCMemoryManager*) new ShenandoahPausesMemoryManager(); +-} +- + instanceOop MemoryManager::get_memory_manager_instance(TRAPS) { + // Must do an acquire so as to force ordering of subsequent + // loads from anything _memory_mgr_obj points to or implies. +diff -uNr openjdk/hotspot/src/share/vm/services/memoryManager.hpp afu8u/hotspot/src/share/vm/services/memoryManager.hpp +--- openjdk/hotspot/src/share/vm/services/memoryManager.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/services/memoryManager.hpp 2025-05-06 10:53:45.167633675 +0800 +@@ -65,9 +65,7 @@ + PSScavenge, + PSMarkSweep, + G1YoungGen, +- G1OldGen, +- ShenandoahCycles, +- ShenandoahPauses ++ G1OldGen + }; + + MemoryManager(); +@@ -101,8 +99,7 @@ + static GCMemoryManager* get_psMarkSweep_memory_manager(); + static GCMemoryManager* get_g1YoungGen_memory_manager(); + static GCMemoryManager* get_g1OldGen_memory_manager(); +- static GCMemoryManager* get_shenandoah_cycles_memory_manager(); +- static GCMemoryManager* get_shenandoah_pauses_memory_manager(); ++ + }; + + class CodeCacheMemoryManager : public MemoryManager { +@@ -299,19 +296,4 @@ + const char* name() { return "G1 Old Generation"; } + }; + +-class ShenandoahCyclesMemoryManager : public GCMemoryManager { +-public: +- ShenandoahCyclesMemoryManager() : GCMemoryManager() {} +- +- MemoryManager::Name kind() { return MemoryManager::ShenandoahCycles; } +- const char* name() { return "Shenandoah Cycles"; } +-}; +- +-class ShenandoahPausesMemoryManager : public GCMemoryManager { +-public: +- ShenandoahPausesMemoryManager() : GCMemoryManager() {} +- +- MemoryManager::Name kind() { return MemoryManager::ShenandoahPauses; } +- const char* name() { return "Shenandoah Pauses"; } +-}; + #endif // SHARE_VM_SERVICES_MEMORYMANAGER_HPP +diff -uNr openjdk/hotspot/src/share/vm/services/memoryService.cpp afu8u/hotspot/src/share/vm/services/memoryService.cpp +--- openjdk/hotspot/src/share/vm/services/memoryService.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/services/memoryService.cpp 2025-05-06 10:53:45.167633675 +0800 +@@ -46,7 +46,6 @@ + #include "utilities/growableArray.hpp" + #include "utilities/macros.hpp" + #if INCLUDE_ALL_GCS +-#include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" + #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp" + #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" + #include "gc_implementation/parNew/parNewGeneration.hpp" +@@ -55,7 +54,6 @@ + #include "gc_implementation/parallelScavenge/psYoungGen.hpp" + #include "services/g1MemoryPool.hpp" + #include "services/psMemoryPool.hpp" +-#include "services/shenandoahMemoryPool.hpp" + #endif // INCLUDE_ALL_GCS + + GrowableArray* MemoryService::_pools_list = +@@ -98,10 +96,6 @@ + add_g1_heap_info(G1CollectedHeap::heap()); + break; + } +- case CollectedHeap::ShenandoahHeap : { +- add_shenandoah_heap_info(ShenandoahHeap::heap()); +- break; +- } + #endif // INCLUDE_ALL_GCS + default: { + guarantee(false, "Unrecognized kind of heap"); +@@ -195,24 +189,6 @@ + add_g1YoungGen_memory_pool(g1h, _major_gc_manager, _minor_gc_manager); + add_g1OldGen_memory_pool(g1h, _major_gc_manager, _minor_gc_manager); + } +- +-void MemoryService::add_shenandoah_heap_info(ShenandoahHeap* heap) { +- assert(UseShenandoahGC, "sanity"); +- +- // We reuse the "minor/major" names, even though they make little sense +- // in Shenandoah. JDK 10+ makes this right, but not JDK 9-. +- _major_gc_manager = MemoryManager::get_shenandoah_pauses_memory_manager(); +- _minor_gc_manager = MemoryManager::get_shenandoah_cycles_memory_manager(); +- _managers_list->append(_major_gc_manager); +- _managers_list->append(_minor_gc_manager); +- +- ShenandoahMemoryPool* pool = new ShenandoahMemoryPool(heap); +- _pools_list->append(pool); +- +- _major_gc_manager->add_pool(pool); +- _minor_gc_manager->add_pool(pool); +-} +- + #endif // INCLUDE_ALL_GCS + + MemoryPool* MemoryService::add_gen(Generation* gen, +diff -uNr openjdk/hotspot/src/share/vm/services/memoryService.hpp afu8u/hotspot/src/share/vm/services/memoryService.hpp +--- openjdk/hotspot/src/share/vm/services/memoryService.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/services/memoryService.hpp 2025-05-06 10:53:45.167633675 +0800 +@@ -46,7 +46,6 @@ + class GenCollectedHeap; + class ParallelScavengeHeap; + class G1CollectedHeap; +-class ShenandoahHeap; + + // VM Monitoring and Management Support + +@@ -122,7 +121,6 @@ + static void add_gen_collected_heap_info(GenCollectedHeap* heap); + static void add_parallel_scavenge_heap_info(ParallelScavengeHeap* heap); + static void add_g1_heap_info(G1CollectedHeap* g1h); +- static void add_shenandoah_heap_info(ShenandoahHeap* heap); + + public: + static void set_universe_heap(CollectedHeap* heap); +diff -uNr openjdk/hotspot/src/share/vm/services/shenandoahMemoryPool.cpp afu8u/hotspot/src/share/vm/services/shenandoahMemoryPool.cpp +--- openjdk/hotspot/src/share/vm/services/shenandoahMemoryPool.cpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/services/shenandoahMemoryPool.cpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,53 +0,0 @@ +-/* +- * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "precompiled.hpp" +-#include "services/shenandoahMemoryPool.hpp" +- +-ShenandoahMemoryPool::ShenandoahMemoryPool(ShenandoahHeap* heap) : +- CollectedMemoryPool("Shenandoah", +- MemoryPool::Heap, +- heap->initial_capacity(), +- heap->max_capacity(), +- true /* support_usage_threshold */), +- _heap(heap) {} +- +-MemoryUsage ShenandoahMemoryPool::get_memory_usage() { +- size_t initial = initial_size(); +- size_t max = max_size(); +- size_t used = used_in_bytes(); +- size_t committed = _heap->committed(); +- +- // These asserts can never fail: max is stable, and all updates to other values never overflow max. +- assert(initial <= max, err_msg("initial: " SIZE_FORMAT ", max: " SIZE_FORMAT, initial, max)); +- assert(used <= max, err_msg("used: " SIZE_FORMAT ", max: " SIZE_FORMAT, used, max)); +- assert(committed <= max, err_msg("committed: " SIZE_FORMAT ", max: " SIZE_FORMAT, committed, max)); +- assert(used <= committed, err_msg("used: " SIZE_FORMAT ", committed: " SIZE_FORMAT, used, committed)); +- +- // Committed and used are updated concurrently and independently. They can momentarily break +- // the assert below, which would also fail in downstream code. To avoid that, adjust values +- // to make sense under the race. See JDK-8207200. +- committed = MAX2(used, committed); +- +- return MemoryUsage(initial, used, committed, max); +-} +diff -uNr openjdk/hotspot/src/share/vm/services/shenandoahMemoryPool.hpp afu8u/hotspot/src/share/vm/services/shenandoahMemoryPool.hpp +--- openjdk/hotspot/src/share/vm/services/shenandoahMemoryPool.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/services/shenandoahMemoryPool.hpp 1970-01-01 08:00:00.000000000 +0800 +@@ -1,44 +0,0 @@ +-/* +- * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#ifndef SHARE_VM_SERVICES_SHENANDOAHMEMORYPOOL_HPP +-#define SHARE_VM_SERVICES_SHENANDOAHMEMORYPOOL_HPP +- +-#ifndef SERIALGC +-#include "gc_implementation/shenandoah/shenandoahHeap.hpp" +-#include "services/memoryPool.hpp" +-#include "services/memoryUsage.hpp" +-#endif +- +-class ShenandoahMemoryPool : public CollectedMemoryPool { +-private: +- ShenandoahHeap* _heap; +- +-public: +- ShenandoahMemoryPool(ShenandoahHeap* pool); +- MemoryUsage get_memory_usage(); +- size_t used_in_bytes() { return _heap->used(); } +- size_t max_size() const { return _heap->max_capacity(); } +-}; +- +-#endif //SHARE_VM_SERVICES_SHENANDOAHMEMORYPOOL_HPP +diff -uNr openjdk/hotspot/src/share/vm/utilities/copy.hpp afu8u/hotspot/src/share/vm/utilities/copy.hpp +--- openjdk/hotspot/src/share/vm/utilities/copy.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/utilities/copy.hpp 2025-05-06 10:53:45.171633675 +0800 +@@ -331,6 +331,27 @@ + #endif + } + ++ ++ // SAPJVM AS 2011-09-20. Template for atomic copy. ++ template static void copy_conjoint_atomic(T* from, T* to, size_t count) ++ { ++ if (from > to) { ++ while (count-- > 0) { ++ // Copy forwards ++ *to++ = *from++; ++ } ++ } else { ++ from += count - 1; ++ to += count - 1; ++ while (count-- > 0) { ++ // Copy backwards ++ *to-- = *from--; ++ } ++ } ++ } ++ ++ ++ + // Platform dependent implementations of the above methods. + #ifdef TARGET_ARCH_x86 + # include "copy_x86.hpp" +@@ -350,6 +371,10 @@ + #ifdef TARGET_ARCH_ppc + # include "copy_ppc.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "copy_sw64.hpp" ++#endif ++ + + }; + +diff -uNr openjdk/hotspot/src/share/vm/utilities/globalDefinitions.hpp afu8u/hotspot/src/share/vm/utilities/globalDefinitions.hpp +--- openjdk/hotspot/src/share/vm/utilities/globalDefinitions.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/utilities/globalDefinitions.hpp 2025-05-06 10:53:45.171633675 +0800 +@@ -150,14 +150,8 @@ + #ifdef _LP64 + const int SerializePageShiftCount = 4; + #else +-#if INCLUDE_JFR && INCLUDE_ALL_GCS +-// JavaThread already has quite a few Shenandoah fields. Adding many JFR fields +-// trips sizeof(JavaThread) > 1024. Need to adjust it here. +-const int SerializePageShiftCount = 4; +-#else + const int SerializePageShiftCount = 3; + #endif +-#endif + + // An opaque struct of heap-word width, so that HeapWord* can be a generic + // pointer into the heap. We require that object sizes be measured in +@@ -461,6 +455,9 @@ + #ifdef TARGET_ARCH_ppc + # include "globalDefinitions_ppc.hpp" + #endif ++#ifdef TARGET_ARCH_sw64 ++# include "globalDefinitions_sw64.hpp" ++#endif + + /* + * If a platform does not support native stack walking +@@ -1438,7 +1435,6 @@ + #define UINT64_FORMAT_X "%" PRIx64 + #define INT64_FORMAT_W(width) "%" #width PRId64 + #define UINT64_FORMAT_W(width) "%" #width PRIu64 +-#define UINT64_FORMAT_X_W(width) "%" #width PRIx64 + + #define PTR64_FORMAT "0x%016" PRIx64 + +diff -uNr openjdk/hotspot/src/share/vm/utilities/macros.hpp afu8u/hotspot/src/share/vm/utilities/macros.hpp +--- openjdk/hotspot/src/share/vm/utilities/macros.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/utilities/macros.hpp 2025-05-06 11:13:08.143672951 +0800 +@@ -179,10 +179,8 @@ + #define TIERED + #endif + #define COMPILER1_PRESENT(code) code +-#define NOT_COMPILER1(code) + #else // COMPILER1 + #define COMPILER1_PRESENT(code) +-#define NOT_COMPILER1(code) code + #endif // COMPILER1 + + // COMPILER2 variant +@@ -375,6 +373,14 @@ + #define NOT_SPARC(code) code + #endif + ++#ifdef SW64 ++#define SW64_ONLY(code) code ++#define NOT_SW64(code) ++#else ++#define SW64_ONLY(code) ++#define NOT_SW64(code) code ++#endif ++ + #if defined(PPC32) || defined(PPC64) + #ifndef PPC + #define PPC +diff -uNr openjdk/hotspot/src/share/vm/utilities/taskqueue.hpp afu8u/hotspot/src/share/vm/utilities/taskqueue.hpp +--- openjdk/hotspot/src/share/vm/utilities/taskqueue.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/utilities/taskqueue.hpp 2025-05-06 10:53:45.175633675 +0800 +@@ -501,7 +501,6 @@ + public: + // Returns "true" if some TaskQueue in the set contains a task. + virtual bool peek() = 0; +- virtual size_t tasks() = 0; + }; + + template class TaskQueueSetSuperImpl: public CHeapObj, public TaskQueueSetSuper { +@@ -538,9 +537,6 @@ + bool steal(uint queue_num, int* seed, E& t); + + bool peek(); +- size_t tasks(); +- +- uint size() const { return _n; } + }; + + template void +@@ -598,15 +594,6 @@ + return false; + } + +-template +-size_t GenericTaskQueueSet::tasks() { +- size_t n = 0; +- for (uint j = 0; j < _n; j++) { +- n += _queues[j]->size(); +- } +- return n; +-} +- + // When to terminate from the termination protocol. + class TerminatorTerminator: public CHeapObj { + public: +@@ -619,7 +606,7 @@ + #undef TRACESPINNING + + class ParallelTaskTerminator: public StackObj { +-protected: ++private: + int _n_threads; + TaskQueueSetSuper* _queue_set; + char _pad_before[DEFAULT_CACHE_LINE_SIZE]; +@@ -647,7 +634,7 @@ + // else is. If returns "true", all threads are terminated. If returns + // "false", available work has been observed in one of the task queues, + // so the global task is not complete. +- virtual bool offer_termination() { ++ bool offer_termination() { + return offer_termination(NULL); + } + +diff -uNr openjdk/hotspot/src/share/vm/utilities/ticks.hpp afu8u/hotspot/src/share/vm/utilities/ticks.hpp +--- openjdk/hotspot/src/share/vm/utilities/ticks.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/utilities/ticks.hpp 2025-05-06 10:53:45.175633675 +0800 +@@ -57,6 +57,43 @@ + static uint64_t nanoseconds(Type value); + }; + ++#ifdef TARGET_ARCH_sw64 ++template ++class PairRep { ++ public: ++ XT1 val1; ++ XT2 val2; ++ ++ PairRep() : val1((XT1)0), val2((XT2)0) {} ++ void operator+=(const PairRep& rhs) { ++ val1 += rhs.val1; ++ val2 += rhs.val2; ++ } ++ void operator-=(const PairRep& rhs) { ++ val1 -= rhs.val1; ++ val2 -= rhs.val2; ++ } ++ bool operator==(const PairRep& rhs) const { ++ return val1 == rhs.val1; ++ } ++ bool operator!=(const PairRep& rhs) const { ++ return !operator==(rhs); ++ } ++ bool operator<(const PairRep& rhs) const { ++ return val1 < rhs.val1; ++ } ++ bool operator>(const PairRep& rhs) const { ++ return val1 > rhs.val1; ++ } ++}; ++ ++template ++PairRep operator-(const PairRep& lhs, const PairRep& rhs) { ++ PairRep temp(lhs); ++ temp -= rhs; ++ return temp; ++} ++#else + template + class PairRep { + public: +@@ -92,6 +129,7 @@ + temp -= rhs; + return temp; + } ++#endif + + typedef PairRep CompositeTime; + +diff -uNr openjdk/hotspot/src/share/vm/utilities/top.hpp afu8u/hotspot/src/share/vm/utilities/top.hpp +--- openjdk/hotspot/src/share/vm/utilities/top.hpp 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/src/share/vm/utilities/top.hpp 2025-05-06 10:53:45.175633675 +0800 +@@ -35,7 +35,6 @@ + #include "utilities/sizes.hpp" + #if INCLUDE_ALL_GCS + #include "gc_implementation/g1/g1_globals.hpp" +-#include "gc_implementation/shenandoah/shenandoah_globals.hpp" + #endif // INCLUDE_ALL_GCS + #ifdef COMPILER1 + #include "c1/c1_globals.hpp" +diff -uNr openjdk/hotspot/test/compiler/gcbarriers/EqvUncastStepOverBarrier.java afu8u/hotspot/test/compiler/gcbarriers/EqvUncastStepOverBarrier.java +--- openjdk/hotspot/test/compiler/gcbarriers/EqvUncastStepOverBarrier.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/compiler/gcbarriers/EqvUncastStepOverBarrier.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,81 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- */ +- +-/* +- * @test +- * @bug 8212673 +- * @summary Node::eqv_uncast() shouldn't step over load barriers unconditionally +- * @library /test/lib / +- * @modules java.base/jdk.internal.misc +- * +- * @build sun.hotspot.WhiteBox +- * @run driver ClassFileInstaller sun.hotspot.WhiteBox +- * sun.hotspot.WhiteBox$WhiteBoxPermission +- * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:-UseOnStackReplacement -XX:-TieredCompilation EqvUncastStepOverBarrier +- */ +- +-import sun.hotspot.WhiteBox; +-import java.lang.reflect.Method; +- +-public class EqvUncastStepOverBarrier { +- static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox(); +- +- private static Object field = new A(); +- +- public static void main(String[] args) throws Exception { +- for (int i = 0; i < 20_000; i++) { +- test(); +- test(); +- test_helper(null, 0); +- } +- Method m = EqvUncastStepOverBarrier.class.getDeclaredMethod("test"); +- WHITE_BOX.enqueueMethodForCompilation(m, 4); +- if (!WHITE_BOX.isMethodCompiled(m, false)) { +- throw new RuntimeException("Method compilation failed"); +- } +- } +- +- private static Object test() { +- Object o = field; +- if (o == null) {} +- for (int i = 1; i < 100; i *= 2) { +- int j = 0; +- for (; j < 4; j++) ; +- o = test_helper(o, j); +- } +- return o; +- } +- +- private static Object test_helper(Object o, int j) { +- if (j == 4) { +- A a = (A) o; +- o = a; +- } else { +- o = new Object(); +- } +- return o; +- } +- +- private static class A { +- } +-} +diff -uNr openjdk/hotspot/test/gc/arguments/TestAlignmentToUseLargePages.java afu8u/hotspot/test/gc/arguments/TestAlignmentToUseLargePages.java +--- openjdk/hotspot/test/gc/arguments/TestAlignmentToUseLargePages.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/arguments/TestAlignmentToUseLargePages.java 2025-05-06 10:53:45.231633677 +0800 +@@ -39,8 +39,6 @@ + * @run main/othervm -Xms7M -Xmx9M -XX:+UseConcMarkSweepGC -XX:-UseLargePages TestAlignmentToUseLargePages + * @run main/othervm -Xms7M -Xmx9M -XX:+UseG1GC -XX:+UseLargePages TestAlignmentToUseLargePages + * @run main/othervm -Xms7M -Xmx9M -XX:+UseG1GC -XX:-UseLargePages TestAlignmentToUseLargePages +- * @run main/othervm -Xms7M -Xmx9M -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+UseLargePages TestAlignmentToUseLargePages +- * @run main/othervm -Xms7M -Xmx9M -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:-UseLargePages TestAlignmentToUseLargePages + */ + + public class TestAlignmentToUseLargePages { +diff -uNr openjdk/hotspot/test/gc/arguments/TestUseCompressedOopsErgo.java afu8u/hotspot/test/gc/arguments/TestUseCompressedOopsErgo.java +--- openjdk/hotspot/test/gc/arguments/TestUseCompressedOopsErgo.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/arguments/TestUseCompressedOopsErgo.java 2025-05-06 10:53:45.231633677 +0800 +@@ -34,7 +34,6 @@ + * @run main/othervm TestUseCompressedOopsErgo -XX:+UseParallelGC -XX:-UseParallelOldGC + * @run main/othervm TestUseCompressedOopsErgo -XX:+UseConcMarkSweepGC + * @run main/othervm TestUseCompressedOopsErgo -XX:+UseSerialGC +- * @run main/othervm TestUseCompressedOopsErgo -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC + */ + + public class TestUseCompressedOopsErgo { +diff -uNr openjdk/hotspot/test/gc/logging/TestGCId.java afu8u/hotspot/test/gc/logging/TestGCId.java +--- openjdk/hotspot/test/gc/logging/TestGCId.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/logging/TestGCId.java 2025-05-06 10:53:45.235633677 +0800 +@@ -45,9 +45,6 @@ + + testGCId("UseSerialGC", "PrintGC"); + testGCId("UseSerialGC", "PrintGCDetails"); +- +- testGCId("UseShenandoahGC", "PrintGC"); +- testGCId("UseShenandoahGC", "PrintGCDetails"); + } + + private static void verifyContainsGCIDs(OutputAnalyzer output) { +@@ -64,17 +61,17 @@ + private static void testGCId(String gcFlag, String logFlag) throws Exception { + // GCID logging enabled + ProcessBuilder pb_enabled = +- ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", "-XX:+" + gcFlag, "-XX:+" + logFlag, "-Xmx10M", "-XX:+PrintGCID", GCTest.class.getName()); ++ ProcessTools.createJavaProcessBuilder("-XX:+" + gcFlag, "-XX:+" + logFlag, "-Xmx10M", "-XX:+PrintGCID", GCTest.class.getName()); + verifyContainsGCIDs(new OutputAnalyzer(pb_enabled.start())); + + // GCID logging disabled + ProcessBuilder pb_disabled = +- ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", "-XX:+" + gcFlag, "-XX:+" + logFlag, "-Xmx10M", "-XX:-PrintGCID", GCTest.class.getName()); ++ ProcessTools.createJavaProcessBuilder("-XX:+" + gcFlag, "-XX:+" + logFlag, "-Xmx10M", "-XX:-PrintGCID", GCTest.class.getName()); + verifyContainsNoGCIDs(new OutputAnalyzer(pb_disabled.start())); + + // GCID logging default + ProcessBuilder pb_default = +- ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", "-XX:+" + gcFlag, "-XX:+" + logFlag, "-Xmx10M", GCTest.class.getName()); ++ ProcessTools.createJavaProcessBuilder("-XX:+" + gcFlag, "-XX:+" + logFlag, "-Xmx10M", GCTest.class.getName()); + verifyContainsNoGCIDs(new OutputAnalyzer(pb_default.start())); + } + +diff -uNr openjdk/hotspot/test/gc/metaspace/TestMetaspacePerfCounters.java afu8u/hotspot/test/gc/metaspace/TestMetaspacePerfCounters.java +--- openjdk/hotspot/test/gc/metaspace/TestMetaspacePerfCounters.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/metaspace/TestMetaspacePerfCounters.java 2025-05-06 10:53:45.235633677 +0800 +@@ -37,12 +37,10 @@ + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedClassPointers -XX:+UsePerfData -XX:+UseSerialGC TestMetaspacePerfCounters + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedClassPointers -XX:+UsePerfData -XX:+UseParallelGC -XX:+UseParallelOldGC TestMetaspacePerfCounters + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedClassPointers -XX:+UsePerfData -XX:+UseG1GC TestMetaspacePerfCounters +- * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedClassPointers -XX:+UsePerfData -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC TestMetaspacePerfCounters + * + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers -XX:+UsePerfData -XX:+UseSerialGC TestMetaspacePerfCounters + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers -XX:+UsePerfData -XX:+UseParallelGC -XX:+UseParallelOldGC TestMetaspacePerfCounters + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers -XX:+UsePerfData -XX:+UseG1GC TestMetaspacePerfCounters +- * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers -XX:+UsePerfData -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC TestMetaspacePerfCounters + */ + public class TestMetaspacePerfCounters { + public static Class fooClass = null; +diff -uNr openjdk/hotspot/test/gc/metaspace/TestPerfCountersAndMemoryPools.java afu8u/hotspot/test/gc/metaspace/TestPerfCountersAndMemoryPools.java +--- openjdk/hotspot/test/gc/metaspace/TestPerfCountersAndMemoryPools.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/metaspace/TestPerfCountersAndMemoryPools.java 2025-05-06 10:53:45.235633677 +0800 +@@ -33,8 +33,8 @@ + * @requires vm.gc=="Serial" | vm.gc=="null" + * @summary Tests that a MemoryPoolMXBeans and PerfCounters for metaspace + * report the same data. +- * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedClassPointers -XX:+UseSerialGC -XX:+UsePerfData TestPerfCountersAndMemoryPools +- * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers -XX:+UseSerialGC -XX:+UsePerfData TestPerfCountersAndMemoryPools ++ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedKlassPointers -XX:+UseSerialGC -XX:+UsePerfData -Xint TestPerfCountersAndMemoryPools ++ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers -XX:+UseSerialGC -XX:+UsePerfData -Xint TestPerfCountersAndMemoryPools + */ + public class TestPerfCountersAndMemoryPools { + public static void main(String[] args) throws Exception { +diff -uNr openjdk/hotspot/test/gc/shenandoah/compiler/BarrierInInfiniteLoop.java afu8u/hotspot/test/gc/shenandoah/compiler/BarrierInInfiniteLoop.java +--- openjdk/hotspot/test/gc/shenandoah/compiler/BarrierInInfiniteLoop.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/compiler/BarrierInInfiniteLoop.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,79 +0,0 @@ +-/* +- * Copyright (c) 2020, Red Hat, Inc. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- */ +- +-/** +- * @test +- * @bug 8237837 8244721 +- * @summary Shenandoah: assert(mem == __null) failed: only one safepoint +- * @key gc +- * +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xcomp -XX:CompileOnly=BarrierInInfiniteLoop::test1 +- * -XX:CompileOnly=BarrierInInfiniteLoop::test2 -XX:CompileOnly=BarrierInInfiniteLoop::test3 -XX:CompileCommand=quiet BarrierInInfiniteLoop +- * +- */ +- +-public class BarrierInInfiniteLoop { +- private static Object field1 = new Object(); +- private static Object field2 = new Object(); +- private static int field3; +- +- public static void main(String[] args) { +- test1(false); +- test2(false, false); +- test3(false); +- } +- +- private static void test1(boolean flag) { +- if (flag) { +- for (;;) { +- field1 = field2; +- } +- } +- } +- +- private static void test2(boolean flag1, boolean flag2) { +- if (flag1) { +- for (;;) { +- for (;;) { +- if (flag2) { +- break; +- } +- field1 = field2; +- } +- } +- } +- } +- +- private static void test3(boolean flag) { +- if (flag) { +- for (;;) { +- for (;;) { +- field3 = 42; +- if (field1 == field2) { +- break; +- } +- } +- } +- } +- } +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/compiler/CallMultipleCatchProjs.java afu8u/hotspot/test/gc/shenandoah/compiler/CallMultipleCatchProjs.java +--- openjdk/hotspot/test/gc/shenandoah/compiler/CallMultipleCatchProjs.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/compiler/CallMultipleCatchProjs.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,66 +0,0 @@ +-/* +- * Copyright (c) 2019, Red Hat, Inc. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- */ +- +-/** +- * @test +- * @bug 8231405 +- * @summary barrier expansion breaks if barrier is right after call to rethrow stub +- * @key gc +- * +- * @run main/othervm -XX:CompileOnly=CallMultipleCatchProjs::test -Xcomp -Xverify:none -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC CallMultipleCatchProjs +- * +- */ +- +-public class CallMultipleCatchProjs { +- private static A field = new A(); +- +- public static void main(String[] args) throws Exception { +- Exception3 exception3 = new Exception3(); +- test(new Exception2()); +- } +- +- static int test(Exception exception) throws Exception { +- try { +- throw exception; +- } catch (Exception1 e1) { +- return 1; +- } catch (Exception2 e2) { +- return field.i + 2; +- } catch (Exception3 e3) { +- return field.i + 3; +- } +- } +- +- private static class Exception1 extends Exception { +- } +- +- private static class Exception2 extends Exception { +- } +- +- private static class Exception3 extends Exception { +- } +- +- private static class A { +- public int i; +- } +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/compiler/LRBRightAfterMemBar.java afu8u/hotspot/test/gc/shenandoah/compiler/LRBRightAfterMemBar.java +--- openjdk/hotspot/test/gc/shenandoah/compiler/LRBRightAfterMemBar.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/compiler/LRBRightAfterMemBar.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,63 +0,0 @@ +-/* +- * Copyright (c) 2020, Red Hat, Inc. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- */ +- +-/** +- * @test +- * @bug 8237007 +- * @summary Shenandoah: assert(_base == Tuple) failure during C2 compilation +- * @key gc +- * +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:-BackgroundCompilation -XX:+UseShenandoahGC LRBRightAfterMemBar +- * +- */ +- +-public class LRBRightAfterMemBar { +- private static Object field1; +- private static Object field2; +- static volatile int barrier; +- +- public static void main(String[] args) { +- for (int i = 0; i < 20_000; i++) { +- test(true, true, new Object()); +- test(false, false, new Object()); +- } +- } +- +- private static Object test(boolean flag, boolean flag2, Object o2) { +- for (int i = 0; i < 10; i++) { +- barrier = 0x42; // Membar +- if (o2 == null) { // hoisted out of loop +- } +- // The following line is converted to a CMove with an out +- // of loop control once the null check above is +- // hoisted. The CMove is pinned right after the membar and +- // assigned the membar as control. +- Object o = flag ? field1 : field2; +- if (flag2) { +- return o; +- } +- } +- +- return null; +- } +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/compiler/TestC1VectorizedMismatch.java afu8u/hotspot/test/gc/shenandoah/compiler/TestC1VectorizedMismatch.java +--- openjdk/hotspot/test/gc/shenandoah/compiler/TestC1VectorizedMismatch.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/compiler/TestC1VectorizedMismatch.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,64 +0,0 @@ +-/* +- * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* @test TestC1VectorizedMismatch +- * @summary test C1 vectorized mismatch intrinsic +- * @key gc +- * +- * @run main/othervm -XX:TieredStopAtLevel=1 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive TestC1VectorizedMismatch +- */ +- +-import java.util.Arrays; +- +-public class TestC1VectorizedMismatch { +- +- private static final int NUM_RUNS = 10000; +- private static final int ARRAY_SIZE = 10000; +- private static int[] a; +- private static int[] b; +- +- public static void main(String[] args) { +- a = new int[ARRAY_SIZE]; +- b = new int[ARRAY_SIZE]; +- for (int i = 0; i < NUM_RUNS; i++) { +- test(); +- } +- } +- +- private static void test() { +- int[] a1 = new int[ARRAY_SIZE]; +- int[] b1 = new int[ARRAY_SIZE]; +- fillArray(a); +- System.arraycopy(a, 0, b, 0, ARRAY_SIZE); +- if (!Arrays.equals(a, b)) { +- throw new RuntimeException("arrays not equal"); +- } +- } +- +- private static void fillArray(int[] array) { +- for (int i = 0; i < ARRAY_SIZE; i++) { +- int val = (int) (Math.random() * Integer.MAX_VALUE); +- array[i] = val; +- } +- } +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/compiler/TestClone.java afu8u/hotspot/test/gc/shenandoah/compiler/TestClone.java +--- openjdk/hotspot/test/gc/shenandoah/compiler/TestClone.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/compiler/TestClone.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,236 +0,0 @@ +-/* +- * Copyright (c) 2019, Red Hat, Inc. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- */ +- +-/* +- * @test TestClone +- * @summary Test clone barriers work correctly +- * @key gc +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g +- * -XX:+UseShenandoahGC +- * TestClone +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g +- * -XX:+UseShenandoahGC +- * -Xint +- * TestClone +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g +- * -XX:+UseShenandoahGC +- * -XX:-TieredCompilation +- * TestClone +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g +- * -XX:+UseShenandoahGC +- * -XX:TieredStopAtLevel=1 +- * TestClone +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g +- * -XX:+UseShenandoahGC +- * -XX:TieredStopAtLevel=4 +- * TestClone +- */ +- +-/* +- * @test TestClone +- * @summary Test clone barriers work correctly +- * @key gc +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g +- * -XX:+UseShenandoahGC +- * -XX:+ShenandoahVerify +- * TestClone +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g +- * -XX:+UseShenandoahGC +- * -XX:+ShenandoahVerify +- * -Xint +- * TestClone +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g +- * -XX:+UseShenandoahGC +- * -XX:+ShenandoahVerify +- * -XX:-TieredCompilation +- * TestClone +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g +- * -XX:+UseShenandoahGC +- * -XX:+ShenandoahVerify +- * -XX:TieredStopAtLevel=1 +- * TestClone +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g +- * -XX:+UseShenandoahGC +- * -XX:+ShenandoahVerify +- * -XX:TieredStopAtLevel=4 +- * TestClone +- */ +- +-/* +- * @test TestClone +- * @summary Test clone barriers work correctly +- * @key gc +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * TestClone +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * -Xint +- * TestClone +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * -XX:-TieredCompilation +- * TestClone +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * -XX:TieredStopAtLevel=1 +- * TestClone +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * -XX:TieredStopAtLevel=4 +- * TestClone +- */ +- +-/* +- * @test TestClone +- * @summary Test clone barriers work correctly +- * @key gc +- * @requires (vm.bits == "64") +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g +- * -XX:-UseCompressedOops +- * -XX:+UseShenandoahGC +- * TestClone +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g +- * -XX:-UseCompressedOops +- * -XX:+UseShenandoahGC +- * -Xint +- * TestClone +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g +- * -XX:-UseCompressedOops +- * -XX:+UseShenandoahGC +- * -XX:-TieredCompilation +- * TestClone +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g +- * -XX:-UseCompressedOops +- * -XX:+UseShenandoahGC +- * -XX:TieredStopAtLevel=1 +- * TestClone +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g +- * -XX:-UseCompressedOops +- * -XX:+UseShenandoahGC +- * -XX:TieredStopAtLevel=4 +- * TestClone +- */ +- +-/* +- * @test TestClone +- * @summary Test clone barriers work correctly +- * @key gc +- * @requires (vm.bits == "64") +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g +- * -XX:-UseCompressedOops +- * -XX:+UseShenandoahGC +- * -XX:+ShenandoahVerify +- * TestClone +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g +- * -XX:-UseCompressedOops +- * -XX:+UseShenandoahGC +- * -XX:+ShenandoahVerify +- * -Xint +- * TestClone +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g +- * -XX:-UseCompressedOops +- * -XX:+UseShenandoahGC +- * -XX:+ShenandoahVerify +- * -XX:-TieredCompilation +- * TestClone +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g +- * -XX:-UseCompressedOops +- * -XX:+UseShenandoahGC +- * -XX:+ShenandoahVerify +- * -XX:TieredStopAtLevel=1 +- * TestClone +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g +- * -XX:-UseCompressedOops +- * -XX:+UseShenandoahGC +- * -XX:+ShenandoahVerify +- * -XX:TieredStopAtLevel=4 +- * TestClone +- */ +- +-/* +- * @test TestClone +- * @summary Test clone barriers work correctly +- * @key gc +- * @requires (vm.bits == "64") +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g +- * -XX:-UseCompressedOops +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * TestClone +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g +- * -XX:-UseCompressedOops +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * -Xint +- * TestClone +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g +- * -XX:-UseCompressedOops +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * -XX:-TieredCompilation +- * TestClone +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g +- * -XX:-UseCompressedOops +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * -XX:TieredStopAtLevel=1 +- * TestClone +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xms1g -Xmx1g +- * -XX:-UseCompressedOops +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * -XX:TieredStopAtLevel=4 +- * TestClone +- */ +- +- +-public class TestClone { +- +- public static void main(String[] args) throws Exception { +- for (int i = 0; i < 10000; i++) { +- Object[] src = new Object[i]; +- for (int c = 0; c < src.length; c++) { +- src[c] = new Object(); +- } +- testWith(src); +- } +- } +- +- static void testWith(Object[] src) { +- Object[] dst = src.clone(); +- int srcLen = src.length; +- int dstLen = dst.length; +- if (srcLen != dstLen) { +- throw new IllegalStateException("Lengths do not match: " + srcLen + " vs " + dstLen); +- } +- for (int c = 0; c < src.length; c++) { +- Object s = src[c]; +- Object d = dst[c]; +- if (s != d) { +- throw new IllegalStateException("Elements do not match at " + c + ": " + s + " vs " + d); +- } +- } +- } +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/compiler/TestExpandedWBLostNullCheckDep.java afu8u/hotspot/test/gc/shenandoah/compiler/TestExpandedWBLostNullCheckDep.java +--- openjdk/hotspot/test/gc/shenandoah/compiler/TestExpandedWBLostNullCheckDep.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/compiler/TestExpandedWBLostNullCheckDep.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,56 +0,0 @@ +-/* +- * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- */ +- +-/** +- * @test TestExpandedWBLostNullCheckDep +- * @summary Logic that moves a null check in the expanded barrier may cause a memory access that doesn't depend on the barrier to bypass the null check +- * @key gc +- * @requires vm.flavor == "server" +- * +- * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:-TieredCompilation +- * -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC +- * -XX:+StressGCM -XX:+StressLCM TestExpandedWBLostNullCheckDep +- */ +- +-public class TestExpandedWBLostNullCheckDep { +- +- static void test(int i, int[] arr) { +- // arr.length depends on a null check for arr +- if (i < 0 || i >= arr.length) { +- } +- // The write barrier here also depends on the null check. The +- // null check is moved in the barrier to enable implicit null +- // checks. The null check must not be moved arr.length +- arr[i] = 0x42; +- } +- +- static public void main(String[] args) { +- int[] int_arr = new int[10]; +- for (int i = 0; i < 20000; i++) { +- test(0, int_arr); +- } +- try { +- test(0, null); +- } catch (NullPointerException npe) {} +- } +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/compiler/TestMaybeNullUnsafeAccess.java afu8u/hotspot/test/gc/shenandoah/compiler/TestMaybeNullUnsafeAccess.java +--- openjdk/hotspot/test/gc/shenandoah/compiler/TestMaybeNullUnsafeAccess.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/compiler/TestMaybeNullUnsafeAccess.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,85 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- */ +- +-/** +- * @test TestMaybeNullUnsafeAccess +- * @summary cast on before unsafe access moved in dominating null check null path causes crash +- * @key gc +- * @library /testlibrary +- * +- * @run main/othervm -XX:-UseOnStackReplacement -XX:-BackgroundCompilation -XX:-TieredCompilation +- * TestMaybeNullUnsafeAccess +- * +- * @run main/othervm -XX:-UseOnStackReplacement -XX:-BackgroundCompilation -XX:-TieredCompilation +- * -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC +- * TestMaybeNullUnsafeAccess +- * +- */ +- +-import sun.misc.Unsafe; +- +-import java.lang.reflect.Field; +- +-import com.oracle.java.testlibrary.*; +- +-public class TestMaybeNullUnsafeAccess { +- +- static final sun.misc.Unsafe UNSAFE = Utils.getUnsafe(); +- static final long F_OFFSET; +- +- static class A { +- int f; +- } +- +- static { +- try { +- Field fField = A.class.getDeclaredField("f"); +- F_OFFSET = UNSAFE.objectFieldOffset(fField); +- } catch (Exception e) { +- throw new RuntimeException(e); +- } +- } +- +- static A test_helper(Object o) { +- return (A) o; +- } +- +- static int test(Object o) { +- int f = 0; +- for (int i = 0; i < 100; i++) { +- A a = test_helper(o); +- f = UNSAFE.getInt(a, F_OFFSET); +- } +- return f; +- } +- +- static public void main(String[] args) { +- A a = new A(); +- for (int i = 0; i < 20000; i++) { +- test_helper(null); +- test_helper(a); +- test(a); +- } +- } +- +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/compiler/TestNullCheck.java afu8u/hotspot/test/gc/shenandoah/compiler/TestNullCheck.java +--- openjdk/hotspot/test/gc/shenandoah/compiler/TestNullCheck.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/compiler/TestNullCheck.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,65 +0,0 @@ +-/* +- * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- */ +- +-/** +- * @test TestNullCheck +- * @summary implicit null check on brooks pointer must not cause crash +- * @key gc +- * @requires (vm.bits == "64") +- * +- * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:-TieredCompilation +- * -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC +- * -Xmx4G -XX:HeapBaseMinAddress=32G TestNullCheck +- */ +- +-// HeapBaseMinAddress above forces compressed oops with a base +- +-public class TestNullCheck { +- +- int f; +- +- static int test1(TestNullCheck o) { +- return o.f; +- } +- +- static TestNullCheck static_obj = new TestNullCheck(); +- +- static int test2() { +- return static_obj.f; +- } +- +- static public void main(String[] args) { +- TestNullCheck o = new TestNullCheck(); +- for (int i = 0; i < 20000; i++) { +- test1(o); +- test2(); +- } +- try { +- test1(null); +- } catch (NullPointerException npe) {} +- static_obj = null; +- try { +- test2(); +- } catch (NullPointerException npe) {} +- } +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/compiler/TestReferenceCAS.java afu8u/hotspot/test/gc/shenandoah/compiler/TestReferenceCAS.java +--- openjdk/hotspot/test/gc/shenandoah/compiler/TestReferenceCAS.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/compiler/TestReferenceCAS.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,122 +0,0 @@ +-/* +- * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestReferenceCAS +- * @summary Shenandoah reference CAS test +- * @key gc +- * +- * @run main/othervm -Diters=20000 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahGCHeuristics=aggressive -XX:+UseShenandoahGC TestReferenceCAS +- * @run main/othervm -Diters=100 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahGCHeuristics=aggressive -XX:+UseShenandoahGC -Xint TestReferenceCAS +- * @run main/othervm -Diters=20000 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahGCHeuristics=aggressive -XX:+UseShenandoahGC -XX:-TieredCompilation TestReferenceCAS +- * @run main/othervm -Diters=20000 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahGCHeuristics=aggressive -XX:+UseShenandoahGC -XX:TieredStopAtLevel=1 TestReferenceCAS +- * @run main/othervm -Diters=20000 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahGCHeuristics=aggressive -XX:+UseShenandoahGC -XX:TieredStopAtLevel=4 TestReferenceCAS +- */ +- +-/* +- * @test TestReferenceCAS +- * @summary Shenandoah reference CAS test +- * @key gc +- * @requires (vm.bits == "64") +- * @modules java.base/jdk.internal.misc:+open +- * +- * @run main/othervm -Diters=20000 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahGCHeuristics=aggressive -XX:+UseShenandoahGC -XX:-UseCompressedOops TestReferenceCAS +- * @run main/othervm -Diters=100 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahGCHeuristics=aggressive -XX:+UseShenandoahGC -XX:-UseCompressedOops -Xint TestReferenceCAS +- * @run main/othervm -Diters=20000 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahGCHeuristics=aggressive -XX:+UseShenandoahGC -XX:-UseCompressedOops -XX:-TieredCompilation TestReferenceCAS +- * @run main/othervm -Diters=20000 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahGCHeuristics=aggressive -XX:+UseShenandoahGC -XX:-UseCompressedOops -XX:TieredStopAtLevel=1 TestReferenceCAS +- * @run main/othervm -Diters=20000 -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahGCHeuristics=aggressive -XX:+UseShenandoahGC -XX:-UseCompressedOops -XX:TieredStopAtLevel=4 TestReferenceCAS +- */ +- +-import java.lang.reflect.Field; +- +-public class TestReferenceCAS { +- +- static final int ITERS = Integer.getInteger("iters", 1); +- static final int WEAK_ATTEMPTS = Integer.getInteger("weakAttempts", 10); +- +- static final sun.misc.Unsafe UNSAFE; +- static final long V_OFFSET; +- +- static { +- try { +- Field f = sun.misc.Unsafe.class.getDeclaredField("theUnsafe"); +- f.setAccessible(true); +- UNSAFE = (sun.misc.Unsafe) f.get(null); +- } catch (Exception e) { +- throw new RuntimeException("Unable to get Unsafe instance.", e); +- } +- +- try { +- Field vField = TestReferenceCAS.class.getDeclaredField("v"); +- V_OFFSET = UNSAFE.objectFieldOffset(vField); +- } catch (Exception e) { +- throw new RuntimeException(e); +- } +- } +- +- Object v; +- +- private static void assertEquals(boolean a, boolean b, String msg) { +- if (a != b) { +- throw new RuntimeException("a (" + a + ") != b (" + b + "): " + msg); +- } +- } +- +- private static void assertEquals(Object a, Object b, String msg) { +- if (!a.equals(b)) { +- throw new RuntimeException("a (" + a.toString() + ") != b (" + b.toString() + "): " + msg); +- } +- } +- +- public static void main(String[] args) { +- TestReferenceCAS t = new TestReferenceCAS(); +- for (int c = 0; c < ITERS; c++) { +- testAccess(t, V_OFFSET); +- } +- } +- +- static void testAccess(Object base, long offset) { +- String foo = new String("foo"); +- String bar = new String("bar"); +- String baz = new String("baz"); +- UNSAFE.putObject(base, offset, "foo"); +- { +- String newval = bar; +- boolean r = UNSAFE.compareAndSwapObject(base, offset, "foo", newval); +- assertEquals(r, true, "success compareAndSwap Object"); +- assertEquals(newval, "bar", "must not destroy newval"); +- Object x = UNSAFE.getObject(base, offset); +- assertEquals(x, "bar", "success compareAndSwap Object value"); +- } +- +- { +- String newval = baz; +- boolean r = UNSAFE.compareAndSwapObject(base, offset, "foo", newval); +- assertEquals(r, false, "failing compareAndSwap Object"); +- assertEquals(newval, "baz", "must not destroy newval"); +- Object x = UNSAFE.getObject(base, offset); +- assertEquals(x, "bar", "failing compareAndSwap Object value"); +- } +- } +- +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/compiler/TestShenandoahCmpPAfterCall.java afu8u/hotspot/test/gc/shenandoah/compiler/TestShenandoahCmpPAfterCall.java +--- openjdk/hotspot/test/gc/shenandoah/compiler/TestShenandoahCmpPAfterCall.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/compiler/TestShenandoahCmpPAfterCall.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,78 +0,0 @@ +-/* +- * Copyright (c) 2020, Red Hat, Inc. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- */ +- +-/** +- * @test +- * @bug 8244663 +- * @summary Shenandoah: C2 assertion fails in Matcher::collect_null_checks +- * @key gc +- * +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:-TieredCompilation -XX:-BackgroundCompilation -XX:-UseOnStackReplacement +- * -XX:CompileCommand=dontinline,TestShenandoahCmpPAfterCall::not_inlined TestShenandoahCmpPAfterCall +- * +- */ +- +-public class TestShenandoahCmpPAfterCall { +- private static Object field1 = new Object(); +- private static Object field2 = new Object(); +- private static Object o3; +- private static volatile int barrier; +- +- public static void main(String[] args) { +- for (int i = 0; i < 20_000; i++) { +- test(); +- } +- } +- +- private static void test() { +- Object o1 = null; +- Object o2 = field2; +- try { +- not_inlined(); +- o1 = field1; +- if (o1 == o2) { +- +- } +- } catch (Exception1 ex1) { +- o1 = field1; +- if (o1 == o2) { +- +- } +- } +- barrier = 42; +- if (o1 == o2) { +- +- } +- } +- +- static int count = 0; +- private static void not_inlined() throws Exception1 { +- count++; +- if ((count % 100) == 0) { +- throw new Exception1(); +- } +- } +- +- private static class Exception1 extends Exception { +- } +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/compiler/TestUnsafeOffheapSwap.java afu8u/hotspot/test/gc/shenandoah/compiler/TestUnsafeOffheapSwap.java +--- openjdk/hotspot/test/gc/shenandoah/compiler/TestUnsafeOffheapSwap.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/compiler/TestUnsafeOffheapSwap.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,119 +0,0 @@ +-/* +- * Copyright (c) 2019, Red Hat, Inc. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- */ +- +-/** +- * @test TestUnsafeOffheapSwap +- * @summary Miscompilation in Unsafe off-heap swap routines +- * @key gc +- * +- * @run main/othervm -XX:-UseOnStackReplacement -XX:-BackgroundCompilation -XX:-TieredCompilation +- * -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC +- * TestUnsafeOffheapSwap +- */ +- +-import java.util.*; +-import java.lang.reflect.*; +-import sun.misc.Unsafe; +- +-public class TestUnsafeOffheapSwap { +- +- static final int SIZE = 10000; +- static final long SEED = 1; +- +- static final sun.misc.Unsafe UNSAFE; +- static final int SCALE; +- +- static { +- try { +- Field f = sun.misc.Unsafe.class.getDeclaredField("theUnsafe"); +- f.setAccessible(true); +- UNSAFE = (sun.misc.Unsafe) f.get(null); +- SCALE = UNSAFE.ARRAY_INT_INDEX_SCALE; +- } catch (Exception e) { +- throw new RuntimeException("Unable to get Unsafe instance.", e); +- } +- } +- +- static Memory mem; +- static int[] arr; +- +- public static void main(String[] args) throws Exception { +- // Bug is exposed when memory.addr is not known statically +- mem = new Memory(SIZE*SCALE); +- arr = new int[SIZE]; +- +- for (int i = 0; i < 10; i++) { +- test(); +- } +- } +- +- static void test() { +- Random rnd = new Random(SEED); +- for (int i = 0; i < SIZE; i++) { +- int value = rnd.nextInt(); +- mem.setInt(i, value); +- arr[i] = value; +- } +- +- for (int i = 0; i < SIZE; i++) { +- if (arr[i] != mem.getInt(i)) { +- throw new IllegalStateException("TESTBUG: Values mismatch before swaps"); +- } +- } +- +- for (int i = 1; i < SIZE; i++) { +- mem.swap(i - 1, i); +- int tmp = arr[i - 1]; +- arr[i - 1] = arr[i]; +- arr[i] = tmp; +- } +- +- for (int i = 0; i < SIZE; i++) { +- if (arr[i] != mem.getInt(i)) { +- throw new IllegalStateException("Values mismatch after swaps"); +- } +- } +- } +- +- static class Memory { +- private final long addr; +- +- Memory(int size) { +- addr = UNSAFE.allocateMemory(size); +- } +- +- public int getInt(int idx) { +- return UNSAFE.getInt(addr + idx*SCALE); +- } +- +- public void setInt(int idx, int val) { +- UNSAFE.putInt(addr + idx*SCALE, val); +- } +- +- public void swap(int a, int b) { +- int tmp = getInt(a); +- setInt(a, getInt(b)); +- setInt(b, tmp); +- } +- } +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/compiler/TestWriteBarrierClearControl.java afu8u/hotspot/test/gc/shenandoah/compiler/TestWriteBarrierClearControl.java +--- openjdk/hotspot/test/gc/shenandoah/compiler/TestWriteBarrierClearControl.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/compiler/TestWriteBarrierClearControl.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,65 +0,0 @@ +-/* +- * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- */ +- +-/** +- * @test TestWriteBarrierClearControl +- * @summary Clearing control during final graph reshape causes memory barrier to loose dependency on null check +- * @key gc +- * @requires vm.flavor == "server" +- * +- * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:-TieredCompilation +- * -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC +- * -XX:+UnlockDiagnosticVMOptions -XX:+StressLCM -XX:+StressGCM +- * TestWriteBarrierClearControl +- * +- */ +-public class TestWriteBarrierClearControl { +- +- int f; +- +- static void test1(TestWriteBarrierClearControl o) { +- o.f = 0x42; +- } +- +- static TestWriteBarrierClearControl fo = new TestWriteBarrierClearControl(); +- +- static void test2() { +- TestWriteBarrierClearControl o = fo; +- o.f = 0x42; +- } +- +- static public void main(String[] args) { +- TestWriteBarrierClearControl o = new TestWriteBarrierClearControl(); +- for (int i = 0; i < 20000; i++) { +- test1(o); +- test2(); +- } +- try { +- test1(null); +- } catch (NullPointerException npe) {} +- fo = null; +- try { +- test2(); +- } catch (NullPointerException npe) {} +- } +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/jni/libTestCriticalNative.c afu8u/hotspot/test/gc/shenandoah/jni/libTestCriticalNative.c +--- openjdk/hotspot/test/gc/shenandoah/jni/libTestCriticalNative.c 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/jni/libTestCriticalNative.c 1970-01-01 08:00:00.000000000 +0800 +@@ -1,121 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include "jni.h" +- +-JNIEXPORT jlong JNICALL JavaCritical_TestCriticalNativeStress_sum1 +- (jint length, jlong* a) { +- jlong sum = 0; +- jint index; +- for (index = 0; index < length; index ++) { +- sum += a[index]; +- } +- +- return sum; +-} +- +-JNIEXPORT jlong JNICALL JavaCritical_TestCriticalNativeStress_sum2 +- (jlong a1, jint a2_length, jint* a2, jint a4_length, jint* a4, jint a6_length, jlong* a6, jint a8_length, jint* a8) { +- jlong sum = a1; +- jint index; +- for (index = 0; index < a2_length; index ++) { +- sum += a2[index]; +- } +- +- for (index = 0; index < a4_length; index ++) { +- sum += a4[index]; +- } +- +- for (index = 0; index < a6_length; index ++) { +- sum += a6[index]; +- } +- +- for (index = 0; index < a8_length; index ++) { +- sum += a8[index]; +- } +- return sum; +-} +- +-JNIEXPORT jlong JNICALL Java_TestCriticalNativeStress_sum1 +- (JNIEnv *env, jclass jclazz, jlongArray a) { +- jlong sum = 0; +- jsize len = (*env)->GetArrayLength(env, a); +- jsize index; +- jlong* arr = (jlong*)(*env)->GetPrimitiveArrayCritical(env, a, 0); +- for (index = 0; index < len; index ++) { +- sum += arr[index]; +- } +- +- (*env)->ReleasePrimitiveArrayCritical(env, a, arr, 0); +- return sum; +-} +- +-JNIEXPORT jlong JNICALL Java_TestCriticalNativeStress_sum2 +- (JNIEnv *env, jclass jclazz, jlong a1, jintArray a2, jintArray a3, jlongArray a4, jintArray a5) { +- jlong sum = a1; +- jsize index; +- jsize len = (*env)->GetArrayLength(env, a2); +- jint* a2_arr = (jint*)(*env)->GetPrimitiveArrayCritical(env, a2, 0); +- for (index = 0; index < len; index ++) { +- sum += a2_arr[index]; +- } +- (*env)->ReleasePrimitiveArrayCritical(env, a2, a2_arr, 0); +- +- len = (*env)->GetArrayLength(env, a3); +- jint* a3_arr = (jint*)(*env)->GetPrimitiveArrayCritical(env, a3, 0); +- for (index = 0; index < len; index ++) { +- sum += a3_arr[index]; +- } +- (*env)->ReleasePrimitiveArrayCritical(env, a3, a3_arr, 0); +- +- len = (*env)->GetArrayLength(env, a4); +- jlong* a4_arr = (jlong*)(*env)->GetPrimitiveArrayCritical(env, a4, 0); +- for (index = 0; index < len; index ++) { +- sum += a4_arr[index]; +- } +- (*env)->ReleasePrimitiveArrayCritical(env, a4, a4_arr, 0); +- +- len = (*env)->GetArrayLength(env, a5); +- jint* a5_arr = (jint*)(*env)->GetPrimitiveArrayCritical(env, a5, 0); +- for (index = 0; index < len; index ++) { +- sum += a5_arr[index]; +- } +- (*env)->ReleasePrimitiveArrayCritical(env, a5, a5_arr, 0); +- +- return sum; +-} +- +-JNIEXPORT jboolean JNICALL JavaCritical_TestCriticalNativeArgs_isNull +- (jint length, jint* a) { +- return (a == NULL) && (length == 0); +-} +- +-JNIEXPORT jboolean JNICALL Java_TestCriticalNativeArgs_isNull +- (JNIEnv *env, jclass jclazz, jintArray a) { +- jboolean is_null; +- jsize len = (*env)->GetArrayLength(env, a); +- jint* arr = (jint*)(*env)->GetPrimitiveArrayCritical(env, a, 0); +- is_null = (arr == NULL) && (len == 0); +- (*env)->ReleasePrimitiveArrayCritical(env, a, arr, 0); +- return is_null; +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/jni/libTestJNICritical.c afu8u/hotspot/test/gc/shenandoah/jni/libTestJNICritical.c +--- openjdk/hotspot/test/gc/shenandoah/jni/libTestJNICritical.c 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/jni/libTestJNICritical.c 1970-01-01 08:00:00.000000000 +0800 +@@ -1,35 +0,0 @@ +-/* +- * Copyright (c) 2016, 2017, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include +-#include +- +-JNIEXPORT void JNICALL +-Java_TestJNICritical_copyAtoB(JNIEnv *env, jclass unused, jintArray a, jintArray b) { +- jint len = (*env)->GetArrayLength(env, a); +- jint* aa = (*env)->GetPrimitiveArrayCritical(env, a, 0); +- jint* bb = (*env)->GetPrimitiveArrayCritical(env, b, 0); +- memcpy(bb, aa, len * sizeof(jint)); +- (*env)->ReleasePrimitiveArrayCritical(env, b, bb, 0); +- (*env)->ReleasePrimitiveArrayCritical(env, a, aa, 0); +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/jni/libTestJNIGlobalRefs.c afu8u/hotspot/test/gc/shenandoah/jni/libTestJNIGlobalRefs.c +--- openjdk/hotspot/test/gc/shenandoah/jni/libTestJNIGlobalRefs.c 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/jni/libTestJNIGlobalRefs.c 1970-01-01 08:00:00.000000000 +0800 +@@ -1,48 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include +-#include +- +-jobject global_ref = NULL; +-jobject weak_global_ref = NULL; +- +-JNIEXPORT void JNICALL +-Java_TestJNIGlobalRefs_makeGlobalRef(JNIEnv *env, jclass unused, jobject o) { +- global_ref = (*env)->NewGlobalRef(env, o); +-} +- +-JNIEXPORT void JNICALL +-Java_TestJNIGlobalRefs_makeWeakGlobalRef(JNIEnv *env, jclass unused, jobject o) { +- weak_global_ref = (*env)->NewWeakGlobalRef(env, o); +-} +- +-JNIEXPORT jobject JNICALL +-Java_TestJNIGlobalRefs_readGlobalRef(JNIEnv *env, jclass unused) { +- return global_ref; +-} +- +-JNIEXPORT jobject JNICALL +-Java_TestJNIGlobalRefs_readWeakGlobalRef(JNIEnv *env, jclass unused) { +- return weak_global_ref; +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/jni/libTestPinnedGarbage.c afu8u/hotspot/test/gc/shenandoah/jni/libTestPinnedGarbage.c +--- openjdk/hotspot/test/gc/shenandoah/jni/libTestPinnedGarbage.c 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/jni/libTestPinnedGarbage.c 1970-01-01 08:00:00.000000000 +0800 +@@ -1,37 +0,0 @@ +-/* +- * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include +-#include +- +-static jint* pinned; +- +-JNIEXPORT void JNICALL +-Java_TestPinnedGarbage_pin(JNIEnv *env, jclass unused, jintArray a) { +- pinned = (*env)->GetPrimitiveArrayCritical(env, a, 0); +-} +- +-JNIEXPORT void JNICALL +-Java_TestPinnedGarbage_unpin(JNIEnv *env, jclass unused, jintArray a) { +- (*env)->ReleasePrimitiveArrayCritical(env, a, pinned, 0); +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/jni/TestCriticalNativeArgs.java afu8u/hotspot/test/gc/shenandoah/jni/TestCriticalNativeArgs.java +--- openjdk/hotspot/test/gc/shenandoah/jni/TestCriticalNativeArgs.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/jni/TestCriticalNativeArgs.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,42 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-public class TestCriticalNativeArgs { +- static { +- System.loadLibrary("TestCriticalNative"); +- } +- +- static native boolean isNull(int[] a); +- +- public static void main(String[] args) { +- int[] arr = new int[2]; +- +- if (isNull(arr)) { +- throw new RuntimeException("Should not be null"); +- } +- +- if (!isNull(null)) { +- throw new RuntimeException("Should be null"); +- } +- } +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/jni/TestCriticalNativeArgs.sh afu8u/hotspot/test/gc/shenandoah/jni/TestCriticalNativeArgs.sh +--- openjdk/hotspot/test/gc/shenandoah/jni/TestCriticalNativeArgs.sh 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/jni/TestCriticalNativeArgs.sh 1970-01-01 08:00:00.000000000 +0800 +@@ -1,120 +0,0 @@ +-#!/bin/sh +- +-# +-# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. +-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +-# +-# This code is free software; you can redistribute it and/or modify it +-# under the terms of the GNU General Public License version 2 only, as +-# published by the Free Software Foundation. +-# +-# This code is distributed in the hope that it will be useful, but WITHOUT +-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +-# version 2 for more details (a copy is included in the LICENSE file that +-# accompanied this code). +-# +-# You should have received a copy of the GNU General Public License version +-# 2 along with this work; if not, write to the Free Software Foundation, +-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +-# +-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +-# or visit www.oracle.com if you need additional information or have any +-# questions. +-# +- +-## +-## @test +-## @requires (os.arch == "x86_64" | os.arch == "amd64" | os.arch=="x86" | os.arch=="i386") +-## @summary test JNI critical arrays support in Shenandoah +-## @run shell/timeout=480 TestCriticalNativeArgs.sh +-## +- +-if [ "${TESTSRC}" = "" ] +-then +- TESTSRC=${PWD} +- echo "TESTSRC not set. Using "${TESTSRC}" as default" +-fi +-echo "TESTSRC=${TESTSRC}" +-## Adding common setup Variables for running shell tests. +-. ${TESTSRC}/../../../test_env.sh +- +-# set platform-dependent variables +-if [ "$VM_OS" = "linux" ]; then +- echo "Testing on linux" +- gcc_cmd=`which gcc` +- if [ "x$gcc_cmd" = "x" ]; then +- echo "WARNING: gcc not found. Cannot execute test." 2>&1 +- exit 0; +- fi +-else +- echo "Test passed; only valid for linux: $VM_OS" +- exit 0; +-fi +- +-THIS_DIR=. +- +-cp ${TESTSRC}${FS}*.java ${THIS_DIR} +-${TESTJAVA}${FS}bin${FS}javac TestCriticalNativeArgs.java +- +-# default target 64-bits +-GCC_TARGET_BITS="" +-if [ "$VM_BITS" = "32" ]; then +- GCC_TARGET_BITS="-m32" +-fi +- +-$gcc_cmd -O1 -DLINUX -fPIC -shared ${GCC_TARGET_BITS} \ +- -o ${THIS_DIR}${FS}libTestCriticalNative.so \ +- -I${TESTJAVA}${FS}include \ +- -I${TESTJAVA}${FS}include${FS}linux \ +- ${TESTSRC}${FS}libTestCriticalNative.c +- +-# run the java test in the background +-cmd="${TESTJAVA}${FS}bin${FS}java -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive -XX:-ShenandoahDegeneratedGC -Xcomp -Xmx512M -XX:+CriticalJNINatives \ +- -Djava.library.path=${THIS_DIR}${FS} TestCriticalNativeArgs" +- +-echo "$cmd" +-eval $cmd +- +-if [ $? -ne 0 ] +-then +- echo "Test Failed" +- exit 1 +-fi +- +-cmd="${TESTJAVA}${FS}bin${FS}java -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive -XX:+ShenandoahDegeneratedGC -Xcomp -Xmx512M -XX:+CriticalJNINatives \ +- -Djava.library.path=${THIS_DIR}${FS} TestCriticalNativeArgs" +- +-echo "$cmd" +-eval $cmd +- +-if [ $? -ne 0 ] +-then +- echo "Test Failed" +- exit 1 +-fi +- +-cmd="${TESTJAVA}${FS}bin${FS}java -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xcomp -Xmx256M -XX:+CriticalJNINatives \ +- -Djava.library.path=${THIS_DIR}${FS} TestCriticalNativeArgs" +- +-echo "$cmd" +-eval $cmd +- +-if [ $? -ne 0 ] +-then +- echo "Test Failed" +- exit 1 +-fi +- +-cmd="${TESTJAVA}${FS}bin${FS}java -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive -Xcomp -Xmx512M -XX:+CriticalJNINatives \ +- -Djava.library.path=${THIS_DIR}${FS} TestCriticalNativeArgs" +- +-echo "$cmd" +-eval $cmd +- +-if [ $? -ne 0 ] +-then +- echo "Test Failed" +- exit 1 +-fi +- +diff -uNr openjdk/hotspot/test/gc/shenandoah/jni/TestCriticalNativeStress.java afu8u/hotspot/test/gc/shenandoah/jni/TestCriticalNativeStress.java +--- openjdk/hotspot/test/gc/shenandoah/jni/TestCriticalNativeStress.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/jni/TestCriticalNativeStress.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,170 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-import java.util.Random; +- +-public class TestCriticalNativeStress { +- private static Random rand = new Random(); +- +- static { +- System.loadLibrary("TestCriticalNative"); +- } +- +- static final int CYCLES = 50; +- static final int THREAD_PER_CASE = 1; +- +- static native long sum1(long[] a); +- +- // More than 6 parameters +- static native long sum2(long a1, int[] a2, int[] a3, long[] a4, int[] a5); +- +- static long sum(long[] a) { +- long sum = 0; +- for (int index = 0; index < a.length; index++) { +- sum += a[index]; +- } +- return sum; +- } +- +- static long sum(int[] a) { +- long sum = 0; +- for (int index = 0; index < a.length; index++) { +- sum += a[index]; +- } +- return sum; +- } +- +- private static volatile String garbage_array[]; +- +- static void create_garbage(int len) { +- len = Math.max(len, 1024); +- String array[] = new String[len]; +- for (int index = 0; index < len; index++) { +- array[index] = "String " + index; +- } +- garbage_array = array; +- } +- +- static void run_test_case1() { +- int length = rand.nextInt(50) + 1; +- long[] arr = new long[length]; +- for (int index = 0; index < length; index++) { +- arr[index] = rand.nextLong() % 10002; +- } +- +- for (int index = 0; index < length; index++) { +- create_garbage(index); +- } +- +- long native_sum = sum1(arr); +- long java_sum = sum(arr); +- if (native_sum != java_sum) { +- StringBuffer sb = new StringBuffer("Sums do not match: native = ") +- .append(native_sum).append(" java = ").append(java_sum); +- +- throw new RuntimeException(sb.toString()); +- } +- } +- +- static void run_test_case2() { +- int index; +- long a1 = rand.nextLong() % 10245; +- +- int a2_length = rand.nextInt(50) + 1; +- int[] a2 = new int[a2_length]; +- for (index = 0; index < a2_length; index++) { +- a2[index] = rand.nextInt(106); +- } +- +- int a3_length = rand.nextInt(150) + 1; +- int[] a3 = new int[a3_length]; +- for (index = 0; index < a3_length; index++) { +- a3[index] = rand.nextInt(3333); +- } +- +- int a4_length = rand.nextInt(200) + 1; +- long[] a4 = new long[a4_length]; +- for (index = 0; index < a4_length; index++) { +- a4[index] = rand.nextLong() % 12322; +- } +- +- int a5_length = rand.nextInt(350) + 1; +- int[] a5 = new int[a5_length]; +- for (index = 0; index < a5_length; index++) { +- a5[index] = rand.nextInt(3333); +- } +- +- for (index = 0; index < a1; index++) { +- create_garbage(index); +- } +- +- long native_sum = sum2(a1, a2, a3, a4, a5); +- long java_sum = a1 + sum(a2) + sum(a3) + sum(a4) + sum(a5); +- if (native_sum != java_sum) { +- StringBuffer sb = new StringBuffer("Sums do not match: native = ") +- .append(native_sum).append(" java = ").append(java_sum); +- +- throw new RuntimeException(sb.toString()); +- } +- } +- +- static class Case1Runner extends Thread { +- public Case1Runner() { +- start(); +- } +- +- public void run() { +- for (int index = 0; index < CYCLES; index++) { +- run_test_case1(); +- } +- } +- } +- +- static class Case2Runner extends Thread { +- public Case2Runner() { +- start(); +- } +- +- public void run() { +- for (int index = 0; index < CYCLES; index++) { +- run_test_case2(); +- } +- } +- } +- +- public static void main(String[] args) { +- Thread[] thrs = new Thread[THREAD_PER_CASE * 2]; +- for (int index = 0; index < thrs.length; index = index + 2) { +- thrs[index] = new Case1Runner(); +- thrs[index + 1] = new Case2Runner(); +- } +- +- for (int index = 0; index < thrs.length; index++) { +- try { +- thrs[index].join(); +- } catch (Exception e) { +- e.printStackTrace(); +- } +- } +- } +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/jni/TestCriticalNativeStress.sh afu8u/hotspot/test/gc/shenandoah/jni/TestCriticalNativeStress.sh +--- openjdk/hotspot/test/gc/shenandoah/jni/TestCriticalNativeStress.sh 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/jni/TestCriticalNativeStress.sh 1970-01-01 08:00:00.000000000 +0800 +@@ -1,122 +0,0 @@ +-#!/bin/sh +- +-# +-# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. +-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +-# +-# This code is free software; you can redistribute it and/or modify it +-# under the terms of the GNU General Public License version 2 only, as +-# published by the Free Software Foundation. +-# +-# This code is distributed in the hope that it will be useful, but WITHOUT +-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +-# version 2 for more details (a copy is included in the LICENSE file that +-# accompanied this code). +-# +-# You should have received a copy of the GNU General Public License version +-# 2 along with this work; if not, write to the Free Software Foundation, +-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +-# +-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +-# or visit www.oracle.com if you need additional information or have any +-# questions. +-# +- +-## +-## @test +-## @requires (os.arch == "x86_64" | os.arch == "amd64" | os.arch=="x86" | os.arch=="i386") +-## @summary test JNI critical arrays support in Shenandoah +-## @run shell/timeout=480 TestCriticalNativeStress.sh +-## +- +-if [ "${TESTSRC}" = "" ] +-then +- TESTSRC=${PWD} +- echo "TESTSRC not set. Using "${TESTSRC}" as default" +-fi +-echo "TESTSRC=${TESTSRC}" +-## Adding common setup Variables for running shell tests. +-. ${TESTSRC}/../../../test_env.sh +- +-# set platform-dependent variables +-if [ "$VM_OS" = "linux" ]; then +- echo "Testing on linux" +- gcc_cmd=`which gcc` +- if [ "x$gcc_cmd" = "x" ]; then +- echo "WARNING: gcc not found. Cannot execute test." 2>&1 +- exit 0; +- fi +-else +- echo "Test passed; only valid for linux: $VM_OS" +- exit 0; +-fi +- +- +-THIS_DIR=. +- +-cp ${TESTSRC}${FS}*.java ${THIS_DIR} +-${TESTJAVA}${FS}bin${FS}javac TestCriticalNativeStress.java +- +-# default target 64-bits +-GCC_TARGET_BITS="" +-if [ "$VM_BITS" = "32" ]; then +- GCC_TARGET_BITS="-m32" +-fi +- +-$gcc_cmd -O1 -DLINUX -fPIC -shared ${GCC_TARGET_BITS} \ +- -o ${THIS_DIR}${FS}libTestCriticalNative.so \ +- -I${TESTJAVA}${FS}include \ +- -I${TESTJAVA}${FS}include${FS}linux \ +- ${TESTSRC}${FS}libTestCriticalNative.c +- +-# run the java test in the background +- +-cmd="${TESTJAVA}${FS}bin${FS}java -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive -XX:+ShenandoahDegeneratedGC -Xcomp -Xmx512M -XX:+CriticalJNINatives \ +- -Djava.library.path=${THIS_DIR}${FS} TestCriticalNativeStress" +- +-echo "$cmd" +-eval $cmd +- +-if [ $? -ne 0 ] +-then +- echo "Test Failed" +- exit 1 +-fi +- +-cmd="${TESTJAVA}${FS}bin${FS}java -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive -XX:-ShenandoahDegeneratedGC -Xcomp -Xmx512M -XX:+CriticalJNINatives \ +- -Djava.library.path=${THIS_DIR}${FS} TestCriticalNativeStress" +- +-echo "$cmd" +-eval $cmd +- +-if [ $? -ne 0 ] +-then +- echo "Test Failed" +- exit 1 +-fi +- +-cmd="${TESTJAVA}${FS}bin${FS}java -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xcomp -Xmx256M -XX:+CriticalJNINatives \ +- -Djava.library.path=${THIS_DIR}${FS} TestCriticalNativeStress" +- +-echo "$cmd" +-eval $cmd +- +-if [ $? -ne 0 ] +-then +- echo "Test Failed" +- exit 1 +-fi +- +-cmd="${TESTJAVA}${FS}bin${FS}java -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive -Xcomp -Xmx512M -XX:+CriticalJNINatives \ +- -Djava.library.path=${THIS_DIR}${FS} TestCriticalNativeStress" +- +-echo "$cmd" +-eval $cmd +- +-if [ $? -ne 0 ] +-then +- echo "Test Failed" +- exit 1 +-fi +- +diff -uNr openjdk/hotspot/test/gc/shenandoah/jni/TestJNICritical.java afu8u/hotspot/test/gc/shenandoah/jni/TestJNICritical.java +--- openjdk/hotspot/test/gc/shenandoah/jni/TestJNICritical.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/jni/TestJNICritical.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,63 +0,0 @@ +-/* +- * Copyright (c) 2016, 2017, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-import java.util.Arrays; +- +-public class TestJNICritical { +- static { +- System.loadLibrary("TestJNICritical"); +- } +- +- private static final int NUM_RUNS = 10000; +- private static final int ARRAY_SIZE = 10000; +- private static int[] a; +- private static int[] b; +- +- private static native void copyAtoB(int[] a, int[] b); +- +- public static void main(String[] args) { +- a = new int[ARRAY_SIZE]; +- b = new int[ARRAY_SIZE]; +- for (int i = 0; i < NUM_RUNS; i++) { +- test(); +- } +- } +- +- private static void test() { +- int[] a1 = new int[ARRAY_SIZE]; +- int[] b1 = new int[ARRAY_SIZE]; +- fillArray(a); +- copyAtoB(a, b); +- copyAtoB(a1, b1); // Don't optimize out garbage arrays. +- if (!Arrays.equals(a, b)) { +- throw new RuntimeException("arrays not equal"); +- } +- } +- +- private static void fillArray(int[] array) { +- for (int i = 0; i < ARRAY_SIZE; i++) { +- int val = (int) (Math.random() * Integer.MAX_VALUE); +- array[i] = val; +- } +- } +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/jni/TestJNICritical.sh afu8u/hotspot/test/gc/shenandoah/jni/TestJNICritical.sh +--- openjdk/hotspot/test/gc/shenandoah/jni/TestJNICritical.sh 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/jni/TestJNICritical.sh 1970-01-01 08:00:00.000000000 +0800 +@@ -1,96 +0,0 @@ +-#!/bin/sh +- +-# +-# Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. +-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +-# +-# This code is free software; you can redistribute it and/or modify it +-# under the terms of the GNU General Public License version 2 only, as +-# published by the Free Software Foundation. +-# +-# This code is distributed in the hope that it will be useful, but WITHOUT +-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +-# version 2 for more details (a copy is included in the LICENSE file that +-# accompanied this code). +-# +-# You should have received a copy of the GNU General Public License version +-# 2 along with this work; if not, write to the Free Software Foundation, +-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +-# +-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +-# or visit www.oracle.com if you need additional information or have any +-# questions. +-# +- +-## +-## @test +-## @summary test JNI critical arrays support in Shenandoah +-## @run shell/timeout=120 TestJNICritical.sh +-## +- +-if [ "${TESTSRC}" = "" ] +-then +- TESTSRC=${PWD} +- echo "TESTSRC not set. Using "${TESTSRC}" as default" +-fi +-echo "TESTSRC=${TESTSRC}" +-## Adding common setup Variables for running shell tests. +-. ${TESTSRC}/../../../test_env.sh +- +-# set platform-dependent variables +-if [ "$VM_OS" = "linux" ]; then +- echo "Testing on linux" +- gcc_cmd=`which gcc` +- if [ "x$gcc_cmd" = "x" ]; then +- echo "WARNING: gcc not found. Cannot execute test." 2>&1 +- exit 0; +- fi +-else +- echo "Test passed; only valid for linux: $VM_OS" +- exit 0; +-fi +- +-# Unfortunately, configurations cross-compiled to 32 bits would +-# fail with bitness mismatch, when compiled with platform gcc. +-# This would be fixed with /native support in JDK-8072842. +-if [ "$VM_BITS" = "32" ]; then +- echo "Test passed; only reliable on 64-bit" +- exit 0; +-fi +- +-THIS_DIR=. +- +-cp ${TESTSRC}${FS}*.java ${THIS_DIR} +-${TESTJAVA}${FS}bin${FS}javac TestJNICritical.java +- +-$gcc_cmd -O1 -DLINUX -fPIC -shared \ +- -o ${THIS_DIR}${FS}libTestJNICritical.so \ +- -I${TESTJAVA}${FS}include \ +- -I${TESTJAVA}${FS}include${FS}linux \ +- ${TESTSRC}${FS}libTestJNICritical.c +- +-# run the java test in the background +-cmd="${TESTJAVA}${FS}bin${FS}java -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+ShenandoahVerify -XX:ShenandoahGCHeuristics=aggressive \ +- -Djava.library.path=${THIS_DIR}${FS} TestJNICritical" +- +-echo "$cmd" +-eval $cmd +- +-if [ $? -ne 0 ] +-then +- echo "Test Failed" +- exit 1 +-fi +- +-cmd="${TESTJAVA}${FS}bin${FS}java -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive \ +- -Djava.library.path=${THIS_DIR}${FS} TestJNICritical" +- +-echo "$cmd" +-eval $cmd +- +-if [ $? -ne 0 ] +-then +- echo "Test Failed" +- exit 1 +-fi +diff -uNr openjdk/hotspot/test/gc/shenandoah/jni/TestJNIGlobalRefs.java afu8u/hotspot/test/gc/shenandoah/jni/TestJNIGlobalRefs.java +--- openjdk/hotspot/test/gc/shenandoah/jni/TestJNIGlobalRefs.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/jni/TestJNIGlobalRefs.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,100 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-import java.util.Arrays; +-import java.util.Random; +- +-public class TestJNIGlobalRefs { +- static { +- System.loadLibrary("TestJNIGlobalRefs"); +- } +- +- private static final int TIME_MSEC = 120000; +- private static final int ARRAY_SIZE = 10000; +- +- private static native void makeGlobalRef(Object o); +- private static native void makeWeakGlobalRef(Object o); +- private static native Object readGlobalRef(); +- private static native Object readWeakGlobalRef(); +- +- public static void main(String[] args) throws Throwable { +- seedGlobalRef(); +- seedWeakGlobalRef(); +- long start = System.currentTimeMillis(); +- long current = start; +- while (current - start < TIME_MSEC) { +- testGlobal(); +- testWeakGlobal(); +- Thread.sleep(1); +- current = System.currentTimeMillis(); +- } +- } +- +- private static void seedGlobalRef() { +- int[] a = new int[ARRAY_SIZE]; +- fillArray(a, 1337); +- makeGlobalRef(a); +- } +- +- private static void seedWeakGlobalRef() { +- int[] a = new int[ARRAY_SIZE]; +- fillArray(a, 8080); +- makeWeakGlobalRef(a); +- } +- +- private static void testGlobal() { +- int[] a = (int[]) readGlobalRef(); +- checkArray(a, 1337); +- } +- +- private static void testWeakGlobal() { +- int[] a = (int[]) readWeakGlobalRef(); +- if (a != null) { +- checkArray(a, 8080); +- } else { +- // weak reference is cleaned, recreate: +- seedWeakGlobalRef(); +- } +- } +- +- private static void fillArray(int[] array, int seed) { +- Random r = new Random(seed); +- for (int i = 0; i < ARRAY_SIZE; i++) { +- array[i] = r.nextInt(); +- } +- } +- +- private static void checkArray(int[] array, int seed) { +- Random r = new Random(seed); +- if (array.length != ARRAY_SIZE) { +- throw new IllegalStateException("Illegal array length: " + array.length + ", but expected " + ARRAY_SIZE); +- } +- for (int i = 0; i < ARRAY_SIZE; i++) { +- int actual = array[i]; +- int expected = r.nextInt(); +- if (actual != expected) { +- throw new IllegalStateException("Incorrect array data: " + actual + ", but expected " + expected); +- } +- } +- } +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/jni/TestJNIGlobalRefs.sh afu8u/hotspot/test/gc/shenandoah/jni/TestJNIGlobalRefs.sh +--- openjdk/hotspot/test/gc/shenandoah/jni/TestJNIGlobalRefs.sh 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/jni/TestJNIGlobalRefs.sh 1970-01-01 08:00:00.000000000 +0800 +@@ -1,120 +0,0 @@ +-#!/bin/sh +- +-# +-# Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. +-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +-# +-# This code is free software; you can redistribute it and/or modify it +-# under the terms of the GNU General Public License version 2 only, as +-# published by the Free Software Foundation. +-# +-# This code is distributed in the hope that it will be useful, but WITHOUT +-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +-# version 2 for more details (a copy is included in the LICENSE file that +-# accompanied this code). +-# +-# You should have received a copy of the GNU General Public License version +-# 2 along with this work; if not, write to the Free Software Foundation, +-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +-# +-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +-# or visit www.oracle.com if you need additional information or have any +-# questions. +-# +- +-## +-## @test +-## @summary Test JNI Global Refs with Shenandoah +-## @run shell/timeout=720 TestJNIGlobalRefs.sh +-## +- +-if [ "${TESTSRC}" = "" ] +-then +- TESTSRC=${PWD} +- echo "TESTSRC not set. Using "${TESTSRC}" as default" +-fi +-echo "TESTSRC=${TESTSRC}" +-## Adding common setup Variables for running shell tests. +-. ${TESTSRC}/../../../test_env.sh +- +-# set platform-dependent variables +-if [ "$VM_OS" = "linux" ]; then +- echo "Testing on linux" +- gcc_cmd=`which gcc` +- if [ "x$gcc_cmd" = "x" ]; then +- echo "WARNING: gcc not found. Cannot execute test." 2>&1 +- exit 0; +- fi +-else +- echo "Test passed; only valid for linux: $VM_OS" +- exit 0; +-fi +- +-# Unfortunately, configurations cross-compiled to 32 bits would +-# fail with bitness mismatch, when compiled with platform gcc. +-# This would be fixed with /native support in JDK-8072842. +-if [ "$VM_BITS" = "32" ]; then +- echo "Test passed; only reliable on 64-bit" +- exit 0; +-fi +- +-THIS_DIR=. +- +-cp ${TESTSRC}${FS}*.java ${THIS_DIR} +-${TESTJAVA}${FS}bin${FS}javac TestJNIGlobalRefs.java +- +-$gcc_cmd -O1 -DLINUX -fPIC -shared \ +- -o ${THIS_DIR}${FS}libTestJNIGlobalRefs.so \ +- -I${TESTJAVA}${FS}include \ +- -I${TESTJAVA}${FS}include${FS}linux \ +- ${TESTSRC}${FS}libTestJNIGlobalRefs.c +- +-# run the java test in the background +-cmd="${TESTJAVA}${FS}bin${FS}java -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive -XX:+ShenandoahVerify \ +- -Djava.library.path=${THIS_DIR}${FS} TestJNIGlobalRefs" +- +-echo "$cmd" +-eval $cmd +- +-if [ $? -ne 0 ] +-then +- echo "Test Failed" +- exit 1 +-fi +- +-cmd="${TESTJAVA}${FS}bin${FS}java -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive \ +- -Djava.library.path=${THIS_DIR}${FS} TestJNIGlobalRefs" +- +-echo "$cmd" +-eval $cmd +- +-if [ $? -ne 0 ] +-then +- echo "Test Failed" +- exit 1 +-fi +- +-cmd="${TESTJAVA}${FS}bin${FS}java -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive -XX:+ShenandoahVerify \ +- -Djava.library.path=${THIS_DIR}${FS} TestJNIGlobalRefs" +- +-echo "$cmd" +-eval $cmd +- +-if [ $? -ne 0 ] +-then +- echo "Test Failed" +- exit 1 +-fi +- +-cmd="${TESTJAVA}${FS}bin${FS}java -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive \ +- -Djava.library.path=${THIS_DIR}${FS} TestJNIGlobalRefs" +- +-echo "$cmd" +-eval $cmd +- +-if [ $? -ne 0 ] +-then +- echo "Test Failed" +- exit 1 +-fi +diff -uNr openjdk/hotspot/test/gc/shenandoah/jni/TestPinnedGarbage.java afu8u/hotspot/test/gc/shenandoah/jni/TestPinnedGarbage.java +--- openjdk/hotspot/test/gc/shenandoah/jni/TestPinnedGarbage.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/jni/TestPinnedGarbage.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,71 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-import java.util.Arrays; +-import java.util.concurrent.*; +- +-public class TestPinnedGarbage { +- static { +- System.loadLibrary("TestPinnedGarbage"); +- } +- +- private static final int NUM_RUNS = 1_000; +- private static final int OBJS_COUNT = 1 << 10; +- private static final int GARBAGE_COUNT = 1 << 18; +- +- private static native void pin(int[] a); +- private static native void unpin(int[] a); +- +- public static void main(String[] args) { +- ThreadLocalRandom rng = ThreadLocalRandom.current(); +- for (int i = 0; i < NUM_RUNS; i++) { +- test(rng); +- } +- } +- +- private static void test(ThreadLocalRandom rng) { +- Object[] objs = new Object[OBJS_COUNT]; +- for (int i = 0; i < OBJS_COUNT; i++) { +- objs[i] = new MyClass(); +- } +- +- int[] cog = new int[10]; +- int cogIdx = rng.nextInt(OBJS_COUNT); +- objs[cogIdx] = cog; +- pin(cog); +- +- for (int i = 0; i < GARBAGE_COUNT; i++) { +- int rIdx = rng.nextInt(OBJS_COUNT); +- if (rIdx != cogIdx) { +- objs[rIdx] = new MyClass(); +- } +- } +- +- unpin(cog); +- } +- +- public static class MyClass { +- public Object ref = new Object(); +- } +- +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/jni/TestPinnedGarbage.sh afu8u/hotspot/test/gc/shenandoah/jni/TestPinnedGarbage.sh +--- openjdk/hotspot/test/gc/shenandoah/jni/TestPinnedGarbage.sh 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/jni/TestPinnedGarbage.sh 1970-01-01 08:00:00.000000000 +0800 +@@ -1,120 +0,0 @@ +-#!/bin/sh +- +-# +-# Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. +-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +-# +-# This code is free software; you can redistribute it and/or modify it +-# under the terms of the GNU General Public License version 2 only, as +-# published by the Free Software Foundation. +-# +-# This code is distributed in the hope that it will be useful, but WITHOUT +-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +-# version 2 for more details (a copy is included in the LICENSE file that +-# accompanied this code). +-# +-# You should have received a copy of the GNU General Public License version +-# 2 along with this work; if not, write to the Free Software Foundation, +-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +-# +-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +-# or visit www.oracle.com if you need additional information or have any +-# questions. +-# +- +-## +-## @test +-## @summary test Test that garbage in the pinned region does not crash VM +-## @run shell/timeout=480 TestPinnedGarbage.sh +-## +- +-if [ "${TESTSRC}" = "" ] +-then +- TESTSRC=${PWD} +- echo "TESTSRC not set. Using "${TESTSRC}" as default" +-fi +-echo "TESTSRC=${TESTSRC}" +-## Adding common setup Variables for running shell tests. +-. ${TESTSRC}/../../../test_env.sh +- +-# set platform-dependent variables +-if [ "$VM_OS" = "linux" ]; then +- echo "Testing on linux" +- gcc_cmd=`which gcc` +- if [ "x$gcc_cmd" = "x" ]; then +- echo "WARNING: gcc not found. Cannot execute test." 2>&1 +- exit 0; +- fi +-else +- echo "Test passed; only valid for linux: $VM_OS" +- exit 0; +-fi +- +-# Unfortunately, configurations cross-compiled to 32 bits would +-# fail with bitness mismatch, when compiled with platform gcc. +-# This would be fixed with /native support in JDK-8072842. +-if [ "$VM_BITS" = "32" ]; then +- echo "Test passed; only reliable on 64-bit" +- exit 0; +-fi +- +-THIS_DIR=. +- +-cp ${TESTSRC}${FS}*.java ${THIS_DIR} +-${TESTJAVA}${FS}bin${FS}javac TestPinnedGarbage.java +- +-$gcc_cmd -O1 -DLINUX -fPIC -shared \ +- -o ${THIS_DIR}${FS}libTestPinnedGarbage.so \ +- -I${TESTJAVA}${FS}include \ +- -I${TESTJAVA}${FS}include${FS}linux \ +- ${TESTSRC}${FS}libTestPinnedGarbage.c +- +-# run the java test in the background +-cmd="${TESTJAVA}${FS}bin${FS}java -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+ShenandoahVerify -XX:+ShenandoahDegeneratedGC -XX:ShenandoahGCMode=passive \ +- -Djava.library.path=${THIS_DIR}${FS} TestPinnedGarbage" +- +-echo "$cmd" +-eval $cmd +- +-if [ $? -ne 0 ] +-then +- echo "Test Failed" +- exit 1 +-fi +- +-cmd="${TESTJAVA}${FS}bin${FS}java -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+ShenandoahVerify -XX:-ShenandoahDegeneratedGC -XX:ShenandoahGCMode=passive \ +- -Djava.library.path=${THIS_DIR}${FS} TestPinnedGarbage" +- +-echo "$cmd" +-eval $cmd +- +-if [ $? -ne 0 ] +-then +- echo "Test Failed" +- exit 1 +-fi +- +-cmd="${TESTJAVA}${FS}bin${FS}java -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+ShenandoahVerify \ +- -Djava.library.path=${THIS_DIR}${FS} TestPinnedGarbage" +- +-echo "$cmd" +-eval $cmd +- +-if [ $? -ne 0 ] +-then +- echo "Test Failed" +- exit 1 +-fi +- +-cmd="${TESTJAVA}${FS}bin${FS}java -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive \ +- -Djava.library.path=${THIS_DIR}${FS} TestPinnedGarbage" +- +-echo "$cmd" +-eval $cmd +- +-if [ $? -ne 0 ] +-then +- echo "Test Failed" +- exit 1 +-fi +diff -uNr openjdk/hotspot/test/gc/shenandoah/jvmti/libTestGetLoadedClasses.c afu8u/hotspot/test/gc/shenandoah/jvmti/libTestGetLoadedClasses.c +--- openjdk/hotspot/test/gc/shenandoah/jvmti/libTestGetLoadedClasses.c 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/jvmti/libTestGetLoadedClasses.c 1970-01-01 08:00:00.000000000 +0800 +@@ -1,114 +0,0 @@ +-/* +- * Copyright (c) 2019, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include +-#include +-#include "jvmti.h" +- +-#ifdef __cplusplus +-extern "C" { +-#endif +- +-#ifndef JNI_ENV_ARG +- +-#ifdef __cplusplus +-#define JNI_ENV_ARG(x, y) y +-#define JNI_ENV_PTR(x) x +-#else +-#define JNI_ENV_ARG(x,y) x, y +-#define JNI_ENV_PTR(x) (*x) +-#endif +- +-#endif +- +-static const char *EXC_CNAME = "java/lang/Exception"; +- +-static jvmtiEnv *jvmti = NULL; +- +-static jint Agent_Initialize(JavaVM *jvm, char *options, void *reserved); +- +-JNIEXPORT +-jint JNICALL Agent_OnLoad(JavaVM *jvm, char *options, void *reserved) { +- return Agent_Initialize(jvm, options, reserved); +-} +- +-JNIEXPORT +-jint JNICALL Agent_OnAttach(JavaVM *jvm, char *options, void *reserved) { +- return Agent_Initialize(jvm, options, reserved); +-} +- +-JNIEXPORT +-jint JNICALL JNI_OnLoad(JavaVM *jvm, void *reserved) { +- return JNI_VERSION_1_8; +-} +- +-static +-jint Agent_Initialize(JavaVM *jvm, char *options, void *reserved) { +- jvmtiCapabilities capabilities; +- jint res = JNI_ENV_PTR(jvm)->GetEnv(JNI_ENV_ARG(jvm, (void **) &jvmti), +- JVMTI_VERSION); +- if (res != JNI_OK || jvmti == NULL) { +- printf(" Error: wrong result of a valid call to GetEnv!\n"); +- return JNI_ERR; +- } +- +- (void)memset(&capabilities, 0, sizeof(capabilities)); +- capabilities.can_tag_objects = 1; +- capabilities.can_generate_garbage_collection_events = 1; +- (*jvmti)->AddCapabilities(jvmti, &capabilities); +- +- return JNI_OK; +-} +- +-static +-void throw_exc(JNIEnv *env, char *msg) { +- jclass exc_class = JNI_ENV_PTR(env)->FindClass(JNI_ENV_ARG(env, EXC_CNAME)); +- jint rt = JNI_OK; +- +- if (exc_class == NULL) { +- printf("throw_exc: Error in FindClass(env, %s)\n", EXC_CNAME); +- return; +- } +- rt = JNI_ENV_PTR(env)->ThrowNew(JNI_ENV_ARG(env, exc_class), msg); +- if (rt == JNI_ERR) { +- printf("throw_exc: Error in JNI ThrowNew(env, %s)\n", msg); +- } +-} +- +-JNIEXPORT jint JNICALL +-Java_TestGetLoadedClasses_getLoadedClasses(JNIEnv *env, jclass cls) { +- jint totalCount = 0; +- jclass* classes; +- if (jvmti == NULL) { +- throw_exc(env, "JVMTI client was not properly loaded!\n"); +- return 0; +- } +- +- (*jvmti)->GetLoadedClasses(jvmti, &totalCount, &classes); +- (*jvmti)->Deallocate(jvmti, (unsigned char*)classes); +- return totalCount; +-} +- +-#ifdef __cplusplus +-} +-#endif +diff -uNr openjdk/hotspot/test/gc/shenandoah/jvmti/libTestHeapDump.c afu8u/hotspot/test/gc/shenandoah/jvmti/libTestHeapDump.c +--- openjdk/hotspot/test/gc/shenandoah/jvmti/libTestHeapDump.c 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/jvmti/libTestHeapDump.c 1970-01-01 08:00:00.000000000 +0800 +@@ -1,130 +0,0 @@ +-/* +- * Copyright (c) 2017, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-#include +-#include +-#include "jvmti.h" +- +-#ifdef __cplusplus +-extern "C" { +-#endif +- +-#ifndef JNI_ENV_ARG +- +-#ifdef __cplusplus +-#define JNI_ENV_ARG(x, y) y +-#define JNI_ENV_PTR(x) x +-#else +-#define JNI_ENV_ARG(x,y) x, y +-#define JNI_ENV_PTR(x) (*x) +-#endif +- +-#endif +- +-#define TranslateError(err) "JVMTI error" +- +-#define PASSED 0 +-#define FAILED 2 +- +-static const char *EXC_CNAME = "java/lang/Exception"; +- +-static jvmtiEnv *jvmti = NULL; +-static jint result = PASSED; +- +-static jint Agent_Initialize(JavaVM *jvm, char *options, void *reserved); +- +-JNIEXPORT +-jint JNICALL Agent_OnLoad(JavaVM *jvm, char *options, void *reserved) { +- return Agent_Initialize(jvm, options, reserved); +-} +- +-JNIEXPORT +-jint JNICALL Agent_OnAttach(JavaVM *jvm, char *options, void *reserved) { +- return Agent_Initialize(jvm, options, reserved); +-} +- +-JNIEXPORT +-jint JNICALL JNI_OnLoad(JavaVM *jvm, void *reserved) { +- return JNI_VERSION_1_8; +-} +- +-static +-jint Agent_Initialize(JavaVM *jvm, char *options, void *reserved) { +- jvmtiCapabilities capabilities; +- jint res = JNI_ENV_PTR(jvm)->GetEnv(JNI_ENV_ARG(jvm, (void **) &jvmti), +- JVMTI_VERSION); +- if (res != JNI_OK || jvmti == NULL) { +- printf(" Error: wrong result of a valid call to GetEnv!\n"); +- return JNI_ERR; +- } +- +- (void)memset(&capabilities, 0, sizeof(capabilities)); +- capabilities.can_tag_objects = 1; +- capabilities.can_generate_garbage_collection_events = 1; +- (*jvmti)->AddCapabilities(jvmti, &capabilities); +- +- return JNI_OK; +-} +- +-static +-void throw_exc(JNIEnv *env, char *msg) { +- jclass exc_class = JNI_ENV_PTR(env)->FindClass(JNI_ENV_ARG(env, EXC_CNAME)); +- jint rt = JNI_OK; +- +- if (exc_class == NULL) { +- printf("throw_exc: Error in FindClass(env, %s)\n", EXC_CNAME); +- return; +- } +- rt = JNI_ENV_PTR(env)->ThrowNew(JNI_ENV_ARG(env, exc_class), msg); +- if (rt == JNI_ERR) { +- printf("throw_exc: Error in JNI ThrowNew(env, %s)\n", msg); +- } +-} +- +-static jint JNICALL heap_iter_callback(jlong class_tag, +- jlong size, +- jlong* tag_ptr, +- jint length, +- void* user_data) { +- (*((jint*)(user_data)))++; +- return JVMTI_VISIT_OBJECTS; +-} +- +-JNIEXPORT jint JNICALL +-Java_TestHeapDump_heapdump(JNIEnv *env, jclass cls, jclass filter_cls) { +- jvmtiHeapCallbacks callbacks; +- jint totalCount = 0; +- if (jvmti == NULL) { +- throw_exc(env, "JVMTI client was not properly loaded!\n"); +- return 0; +- } +- +- (void)memset(&callbacks, 0, sizeof(callbacks)); +- callbacks.heap_iteration_callback = &heap_iter_callback; +- (*jvmti)->IterateThroughHeap(jvmti, 0, filter_cls, &callbacks, (const void *)&totalCount); +- return totalCount; +-} +- +-#ifdef __cplusplus +-} +-#endif +diff -uNr openjdk/hotspot/test/gc/shenandoah/jvmti/TestGetLoadedClasses.java afu8u/hotspot/test/gc/shenandoah/jvmti/TestGetLoadedClasses.java +--- openjdk/hotspot/test/gc/shenandoah/jvmti/TestGetLoadedClasses.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/jvmti/TestGetLoadedClasses.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,101 +0,0 @@ +-/* +- * Copyright (c) 2019, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-import java.io.*; +-import java.nio.file.*; +- +-public class TestGetLoadedClasses { +- +- private static final int NUM_ITER = 1000; +- private static final int NUM_CLASSES = 10000; +- +- static { +- try { +- System.loadLibrary("TestGetLoadedClasses"); +- } catch (UnsatisfiedLinkError ule) { +- System.err.println("Could not load TestGetLoadedClasses library"); +- System.err.println("java.library.path: " +- + System.getProperty("java.library.path")); +- throw ule; +- } +- } +- +- native static int getLoadedClasses(); +- +- static Class[] classes = new Class[NUM_CLASSES]; +- +- static class Dummy { +- } +- +- static class MyClassLoader extends ClassLoader { +- final String path; +- +- MyClassLoader(String path) { +- this.path = path; +- } +- +- public Class loadClass(String name) throws ClassNotFoundException { +- try { +- File f = new File(path, name + ".class"); +- if (!f.exists()) { +- return super.loadClass(name); +- } +- +- Path path = Paths.get(f.getAbsolutePath()); +- byte[] cls = Files.readAllBytes(path); +- return defineClass(name, cls, 0, cls.length, null); +- } catch (IOException e) { +- throw new ClassNotFoundException(name); +- } +- } +- } +- +- static Class load(String path) throws Exception { +- ClassLoader cl = new MyClassLoader(path); +- Class c = (Class) Class.forName("TestGetLoadedClasses$Dummy", true, cl); +- if (c.getClassLoader() != cl) { +- throw new IllegalStateException("Should have loaded by target loader"); +- } +- return c; +- } +- +- static void loadClasses() throws Exception { +- String classDir = TestGetLoadedClasses.class.getProtectionDomain().getCodeSource().getLocation().getPath(); +- for (int c = 0; c < NUM_CLASSES; c++) { +- classes[c] = load(classDir); +- } +- } +- +- public static void main(String args[]) throws Exception { +- loadClasses(); +- new TestGetLoadedClasses().run(); +- } +- +- volatile Object sink; +- public void run() throws Exception { +- for (int i = 0; i < NUM_ITER; i++) { +- sink = new byte[1000000]; +- int count = getLoadedClasses(); +- } +- } +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/jvmti/TestGetLoadedClasses.sh afu8u/hotspot/test/gc/shenandoah/jvmti/TestGetLoadedClasses.sh +--- openjdk/hotspot/test/gc/shenandoah/jvmti/TestGetLoadedClasses.sh 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/jvmti/TestGetLoadedClasses.sh 1970-01-01 08:00:00.000000000 +0800 +@@ -1,102 +0,0 @@ +-#!/bin/sh +- +-# +-# Copyright (c) 2019, Red Hat, Inc. All rights reserved. +-# +-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +-# +-# This code is free software; you can redistribute it and/or modify it +-# under the terms of the GNU General Public License version 2 only, as +-# published by the Free Software Foundation. +-# +-# This code is distributed in the hope that it will be useful, but WITHOUT +-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +-# version 2 for more details (a copy is included in the LICENSE file that +-# accompanied this code). +-# +-# You should have received a copy of the GNU General Public License version +-# 2 along with this work; if not, write to the Free Software Foundation, +-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +-# +-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +-# or visit www.oracle.com if you need additional information or have any +-# questions. +-# +- +-## +-## @test +-## @summary test JVMTI GetLoadedClasses in Shenandoah +-## @run shell/timeout=480 TestGetLoadedClasses.sh +-## +- +-if [ "${TESTSRC}" = "" ] +-then +- TESTSRC=${PWD} +- echo "TESTSRC not set. Using "${TESTSRC}" as default" +-fi +-echo "TESTSRC=${TESTSRC}" +-## Adding common setup Variables for running shell tests. +-. ${TESTSRC}/../../../test_env.sh +- +-# set platform-dependent variables +-if [ "$VM_OS" = "linux" ]; then +- echo "Testing on linux" +- gcc_cmd=`which gcc` +- if [ "x$gcc_cmd" = "x" ]; then +- echo "WARNING: gcc not found. Cannot execute test." 2>&1 +- exit 0; +- fi +-else +- echo "Test passed; only valid for linux: $VM_OS" +- exit 0; +-fi +- +-# Unfortunately, configurations cross-compiled to 32 bits would +-# fail with bitness mismatch, when compiled with platform gcc. +-# This would be fixed with /native support in JDK-8072842. +-if [ "$VM_BITS" = "32" ]; then +- echo "Test passed; only reliable on 64-bit" +- exit 0; +-fi +- +-THIS_DIR=. +- +-cp ${TESTSRC}${FS}*.java ${THIS_DIR} +-${TESTJAVA}${FS}bin${FS}javac TestGetLoadedClasses.java +- +-$gcc_cmd -O1 -DLINUX -fPIC -shared \ +- -o ${THIS_DIR}${FS}libTestGetLoadedClasses.so \ +- -I${TESTJAVA}${FS}include \ +- -I${TESTJAVA}${FS}include${FS}linux \ +- ${TESTSRC}${FS}libTestGetLoadedClasses.c +- +-# run the java test in the background +-cmd="${TESTJAVA}${FS}bin${FS}java -agentpath:./libTestGetLoadedClasses.so -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC \ +- -XX:ShenandoahSATBBufferSize=1 -XX:+ClassUnloadingWithConcurrentMark -XX:+ClassUnloading -Djava.library.path=${THIS_DIR}${FS} TestGetLoadedClasses" +- +-echo "$cmd" +-eval $cmd +- +-if [ $? -ne 0 ] +-then +- echo "Test Failed" +- exit 1 +-fi +- +-if [ "$VM_BITS" = "64" ]; then +- cmd="${TESTJAVA}${FS}bin${FS}java -agentpath:./libTestGetLoadedClasses.so -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC \ +- -XX:ShenandoahSATBBufferSize=1 -XX:+ClassUnloadingWithConcurrentMark -XX:+ClassUnloading -XX:-UseCompressedOops -Djava.library.path=${THIS_DIR}${FS} TestGetLoadedClasses" +- +- echo "$cmd" +- eval $cmd +- +- if [ $? -ne 0 ] +- then +- echo "Test Failed" +- exit 1 +- fi +-else +- echo "Test passed; only valid for 64 bits" +- exit 0; +-fi +diff -uNr openjdk/hotspot/test/gc/shenandoah/jvmti/TestHeapDump.java afu8u/hotspot/test/gc/shenandoah/jvmti/TestHeapDump.java +--- openjdk/hotspot/test/gc/shenandoah/jvmti/TestHeapDump.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/jvmti/TestHeapDump.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,80 +0,0 @@ +-/* +- * Copyright (c) 2019, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-public class TestHeapDump { +- +- private static final int NUM_ITER = 10000; +- +- private static final int ARRAY_SIZE = 1000; +- +- private static final int EXPECTED_OBJECTS = +- ARRAY_SIZE + // array reachable from instance field +- 1 + // static field root +- 1; // local field root +- +- static { +- try { +- System.loadLibrary("TestHeapDump"); +- } catch (UnsatisfiedLinkError ule) { +- System.err.println("Could not load TestHeapDump library"); +- System.err.println("java.library.path: " +- + System.getProperty("java.library.path")); +- throw ule; +- } +- } +- +- native static int heapdump(Class filterClass); +- +- public static void main(String args[]) { +- new TestHeapDump().run(); +- } +- +- // This root needs to be discovered +- static Object root = new TestObject(); +- +- // This field needs to be discovered +- TestObject[] array; +- +- public void run() { +- array = new TestObject[ARRAY_SIZE]; +- for (int i = 0; i < ARRAY_SIZE; i++) { +- array[i] = new TestObject(); +- } +- TestObject localRoot = new TestObject(); +- for (int i = 0; i < NUM_ITER; i++) { +- int numObjs = heapdump(TestObject.class); +- if (numObjs != EXPECTED_OBJECTS) { +- throw new RuntimeException("Expected " + EXPECTED_OBJECTS + " objects, but got " + numObjs); +- } +- } +- reachabilityFence(array); +- reachabilityFence(localRoot); +- } +- +- // We look for the instances of this class during the heap scan +- public static class TestObject {} +- +- // See Reference.reachabilityFence() implementation in later +- // JDKs. +- static void reachabilityFence(Object obj) {} +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/jvmti/TestHeapDump.sh afu8u/hotspot/test/gc/shenandoah/jvmti/TestHeapDump.sh +--- openjdk/hotspot/test/gc/shenandoah/jvmti/TestHeapDump.sh 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/jvmti/TestHeapDump.sh 1970-01-01 08:00:00.000000000 +0800 +@@ -1,102 +0,0 @@ +-#!/bin/sh +- +-# +-# Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +-# +-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +-# +-# This code is free software; you can redistribute it and/or modify it +-# under the terms of the GNU General Public License version 2 only, as +-# published by the Free Software Foundation. +-# +-# This code is distributed in the hope that it will be useful, but WITHOUT +-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +-# version 2 for more details (a copy is included in the LICENSE file that +-# accompanied this code). +-# +-# You should have received a copy of the GNU General Public License version +-# 2 along with this work; if not, write to the Free Software Foundation, +-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +-# +-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +-# or visit www.oracle.com if you need additional information or have any +-# questions. +-# +- +-## +-## @test +-## @summary test JNI critical arrays support in Shenandoah +-## @run shell/timeout=480 TestHeapDump.sh +-## +- +-if [ "${TESTSRC}" = "" ] +-then +- TESTSRC=${PWD} +- echo "TESTSRC not set. Using "${TESTSRC}" as default" +-fi +-echo "TESTSRC=${TESTSRC}" +-## Adding common setup Variables for running shell tests. +-. ${TESTSRC}/../../../test_env.sh +- +-# set platform-dependent variables +-if [ "$VM_OS" = "linux" ]; then +- echo "Testing on linux" +- gcc_cmd=`which gcc` +- if [ "x$gcc_cmd" = "x" ]; then +- echo "WARNING: gcc not found. Cannot execute test." 2>&1 +- exit 0; +- fi +-else +- echo "Test passed; only valid for linux: $VM_OS" +- exit 0; +-fi +- +-# Unfortunately, configurations cross-compiled to 32 bits would +-# fail with bitness mismatch, when compiled with platform gcc. +-# This would be fixed with /native support in JDK-8072842. +-if [ "$VM_BITS" = "32" ]; then +- echo "Test passed; only reliable on 64-bit" +- exit 0; +-fi +- +-THIS_DIR=. +- +-cp ${TESTSRC}${FS}*.java ${THIS_DIR} +-${TESTJAVA}${FS}bin${FS}javac TestHeapDump.java +- +-$gcc_cmd -O1 -DLINUX -fPIC -shared \ +- -o ${THIS_DIR}${FS}libTestHeapDump.so \ +- -I${TESTJAVA}${FS}include \ +- -I${TESTJAVA}${FS}include${FS}linux \ +- ${TESTSRC}${FS}libTestHeapDump.c +- +-# run the java test in the background +-cmd="${TESTJAVA}${FS}bin${FS}java -agentpath:./libTestHeapDump.so -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive \ +- -Djava.library.path=${THIS_DIR}${FS} TestHeapDump" +- +-echo "$cmd" +-eval $cmd +- +-if [ $? -ne 0 ] +-then +- echo "Test Failed" +- exit 1 +-fi +- +-if [ "$VM_BITS" = "64" ]; then +- cmd="${TESTJAVA}${FS}bin${FS}java -agentpath:./libTestHeapDump.so -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive \ +- -XX:-UseCompressedOops -Djava.library.path=${THIS_DIR}${FS} TestHeapDump" +- +- echo "$cmd" +- eval $cmd +- +- if [ $? -ne 0 ] +- then +- echo "Test Failed" +- exit 1 +- fi +-else +- echo "Test passed; only valid for 64 bits" +- exit 0; +-fi +diff -uNr openjdk/hotspot/test/gc/shenandoah/mxbeans/TestChurnNotifications.java afu8u/hotspot/test/gc/shenandoah/mxbeans/TestChurnNotifications.java +--- openjdk/hotspot/test/gc/shenandoah/mxbeans/TestChurnNotifications.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/mxbeans/TestChurnNotifications.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,180 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestChurnNotifications +- * @summary Check that MX notifications are reported for all cycles +- * @key gc +- * +- * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:+ShenandoahDegeneratedGC -Dprecise=true +- * TestChurnNotifications +- * +- * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:-ShenandoahDegeneratedGC -Dprecise=true +- * TestChurnNotifications +- */ +- +-/* +- * @test TestChurnNotifications +- * @summary Check that MX notifications are reported for all cycles +- * @key gc +- * +- * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * -Dprecise=false +- * TestChurnNotifications +- */ +- +-/* +- * @test TestChurnNotifications +- * @summary Check that MX notifications are reported for all cycles +- * @key gc +- * +- * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive +- * -Dprecise=false +- * TestChurnNotifications +- */ +- +-/* +- * @test TestChurnNotifications +- * @summary Check that MX notifications are reported for all cycles +- * @key gc +- * +- * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=static +- * -Dprecise=false +- * TestChurnNotifications +- */ +- +-/* +- * @test TestChurnNotifications +- * @summary Check that MX notifications are reported for all cycles +- * @key gc +- * +- * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact +- * -Dprecise=false +- * TestChurnNotifications +- */ +- +-/* +- * @test TestChurnNotifications +- * @summary Check that MX notifications are reported for all cycles +- * @key gc +- * +- * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive +- * -Dprecise=false +- * TestChurnNotifications +- * +- * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu +- * -Dprecise=false +- * TestChurnNotifications +- */ +- +-import java.util.*; +-import java.util.concurrent.atomic.*; +-import javax.management.*; +-import java.lang.management.*; +-import javax.management.openmbean.*; +- +-import com.sun.management.GarbageCollectionNotificationInfo; +- +-public class TestChurnNotifications { +- +- static final long HEAP_MB = 128; // adjust for test configuration above +- static final long TARGET_MB = Long.getLong("target", 2_000); // 2 Gb allocation +- +- // Should we track the churn precisely? +- // Precise tracking is only reliable when GC is fully stop-the-world. Otherwise, +- // we cannot tell, looking at heap used before/after, what was the GC churn. +- static final boolean PRECISE = Boolean.getBoolean("precise"); +- +- static final long M = 1024 * 1024; +- +- static volatile Object sink; +- +- public static void main(String[] args) throws Exception { +- final AtomicLong churnBytes = new AtomicLong(); +- +- NotificationListener listener = new NotificationListener() { +- @Override +- public void handleNotification(Notification n, Object o) { +- if (n.getType().equals(GarbageCollectionNotificationInfo.GARBAGE_COLLECTION_NOTIFICATION)) { +- GarbageCollectionNotificationInfo info = GarbageCollectionNotificationInfo.from((CompositeData) n.getUserData()); +- Map mapBefore = info.getGcInfo().getMemoryUsageBeforeGc(); +- Map mapAfter = info.getGcInfo().getMemoryUsageAfterGc(); +- +- MemoryUsage before = mapBefore.get("Shenandoah"); +- MemoryUsage after = mapAfter.get("Shenandoah"); +- +- if ((before != null) && (after != null)) { +- long diff = before.getUsed() - after.getUsed(); +- if (diff > 0) { +- churnBytes.addAndGet(diff); +- } +- } +- } +- } +- }; +- +- for (GarbageCollectorMXBean bean : ManagementFactory.getGarbageCollectorMXBeans()) { +- ((NotificationEmitter) bean).addNotificationListener(listener, null, null); +- } +- +- final int size = 100_000; +- long count = TARGET_MB * 1024 * 1024 / (16 + 4 * size); +- +- long mem = count * (16 + 4 * size); +- +- for (int c = 0; c < count; c++) { +- sink = new int[size]; +- } +- +- System.gc(); +- +- // Wait until notifications start arriving, and then wait some more +- // to catch the ones arriving late. +- while (churnBytes.get() == 0) { +- Thread.sleep(1000); +- } +- Thread.sleep(5000); +- +- long actual = churnBytes.get(); +- +- long minExpected = PRECISE ? (mem - HEAP_MB * 1024 * 1024) : 1; +- long maxExpected = mem + HEAP_MB * 1024 * 1024; +- +- String msg = "Expected = [" + minExpected / M + "; " + maxExpected / M + "] (" + mem / M + "), actual = " + actual / M; +- if (minExpected <= actual && actual <= maxExpected) { +- System.out.println(msg); +- } else { +- throw new IllegalStateException(msg); +- } +- } +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/mxbeans/TestMemoryMXBeans.java afu8u/hotspot/test/gc/shenandoah/mxbeans/TestMemoryMXBeans.java +--- openjdk/hotspot/test/gc/shenandoah/mxbeans/TestMemoryMXBeans.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/mxbeans/TestMemoryMXBeans.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,83 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/** +- * @test TestMemoryMXBeans +- * @key gc +- * @summary Test JMX memory beans +- * +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g TestMemoryMXBeans -1 1024 +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xms1g -Xmx1g TestMemoryMXBeans 1024 1024 +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xms128m -Xmx1g TestMemoryMXBeans 128 1024 +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xms1g -Xmx1g -XX:ShenandoahUncommitDelay=0 TestMemoryMXBeans 1024 1024 +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xms128m -Xmx1g -XX:ShenandoahUncommitDelay=0 TestMemoryMXBeans 128 1024 +- */ +- +-import java.lang.management.*; +-import java.util.*; +- +-public class TestMemoryMXBeans { +- +- public static void main(String[] args) throws Exception { +- if (args.length < 2) { +- throw new IllegalStateException("Should provide expected heap sizes"); +- } +- +- long initSize = 1L * Integer.parseInt(args[0]) * 1024 * 1024; +- long maxSize = 1L * Integer.parseInt(args[1]) * 1024 * 1024; +- +- // wait for GC to uncommit +- Thread.sleep(1000); +- +- testMemoryBean(initSize, maxSize); +- } +- +- public static void testMemoryBean(long initSize, long maxSize) { +- MemoryMXBean memoryMXBean = ManagementFactory.getMemoryMXBean(); +- long heapInit = memoryMXBean.getHeapMemoryUsage().getInit(); +- long heapCommitted = memoryMXBean.getHeapMemoryUsage().getCommitted(); +- long heapMax = memoryMXBean.getHeapMemoryUsage().getMax(); +- long nonHeapInit = memoryMXBean.getNonHeapMemoryUsage().getInit(); +- long nonHeapCommitted = memoryMXBean.getNonHeapMemoryUsage().getCommitted(); +- long nonHeapMax = memoryMXBean.getNonHeapMemoryUsage().getMax(); +- +- if (initSize > 0 && heapInit != initSize) { +- throw new IllegalStateException("Init heap size is wrong: " + heapInit + " vs " + initSize); +- } +- if (maxSize > 0 && heapMax != maxSize) { +- throw new IllegalStateException("Max heap size is wrong: " + heapMax + " vs " + maxSize); +- } +- if (initSize > 0 && maxSize > 0 && initSize != maxSize && heapCommitted == heapMax) { +- throw new IllegalStateException("Committed heap size is max: " + heapCommitted + +- " (init: " + initSize + ", max: " + maxSize + ")"); +- } +- if (initSize > 0 && maxSize > 0 && initSize == maxSize && heapCommitted != heapMax) { +- throw new IllegalStateException("Committed heap size is not max: " + heapCommitted + +- " (init: " + initSize + ", max: " + maxSize + ")"); +- } +- if (initSize > 0 && heapCommitted < initSize) { +- throw new IllegalStateException("Committed heap size is below min: " + heapCommitted + +- " (init: " + initSize + ", max: " + maxSize + ")"); +- } +- } +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/mxbeans/TestMemoryPools.java afu8u/hotspot/test/gc/shenandoah/mxbeans/TestMemoryPools.java +--- openjdk/hotspot/test/gc/shenandoah/mxbeans/TestMemoryPools.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/mxbeans/TestMemoryPools.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,63 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/** +- * @test TestMemoryPools +- * @key gc +- * @summary Test JMX memory pools +- * +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g TestMemoryPools +- */ +- +-import java.lang.management.*; +-import java.util.*; +- +-public class TestMemoryPools { +- +- public static void main(String[] args) throws Exception { +- List mms = ManagementFactory.getMemoryManagerMXBeans(); +- if (mms == null) { +- throw new RuntimeException("getMemoryManagerMXBeans is null"); +- } +- if (mms.isEmpty()) { +- throw new RuntimeException("getMemoryManagerMXBeans is empty"); +- } +- for (MemoryManagerMXBean mmBean : mms) { +- String[] names = mmBean.getMemoryPoolNames(); +- if (names == null) { +- throw new RuntimeException("getMemoryPoolNames() is null"); +- } +- if (names.length == 0) { +- throw new RuntimeException("getMemoryPoolNames() is empty"); +- } +- for (String name : names) { +- if (name == null) { +- throw new RuntimeException("pool name is null"); +- } +- if (name.length() == 0) { +- throw new RuntimeException("pool name is empty"); +- } +- } +- } +- } +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/mxbeans/TestPauseNotifications.java afu8u/hotspot/test/gc/shenandoah/mxbeans/TestPauseNotifications.java +--- openjdk/hotspot/test/gc/shenandoah/mxbeans/TestPauseNotifications.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/mxbeans/TestPauseNotifications.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,185 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestPauseNotifications +- * @summary Check that MX notifications are reported for all cycles +- * @key gc +- * +- * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:+ShenandoahDegeneratedGC +- * TestPauseNotifications +- * +- * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:-ShenandoahDegeneratedGC +- * TestPauseNotifications +- */ +- +-/* +- * @test TestPauseNotifications +- * @summary Check that MX notifications are reported for all cycles +- * @key gc +- * +- * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * TestPauseNotifications +- */ +- +-/* +- * @test TestPauseNotifications +- * @summary Check that MX notifications are reported for all cycles +- * @key gc +- * +- * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive +- * TestPauseNotifications +- */ +- +-/* +- * @test TestPauseNotifications +- * @summary Check that MX notifications are reported for all cycles +- * @key gc +- * +- * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=static +- * TestPauseNotifications +- */ +- +-/* +- * @test TestPauseNotifications +- * @summary Check that MX notifications are reported for all cycles +- * @key gc +- * +- * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact +- * TestPauseNotifications +- */ +- +-/* +- * @test TestPauseNotifications +- * @summary Check that MX notifications are reported for all cycles +- * @key gc +- * +- * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive +- * TestPauseNotifications +- * +- * @run main/othervm -Xmx128m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu +- * TestPauseNotifications +- */ +- +-import java.util.*; +-import java.util.concurrent.atomic.*; +-import javax.management.*; +-import java.lang.management.*; +-import javax.management.openmbean.*; +-import com.sun.management.GarbageCollectionNotificationInfo; +- +-public class TestPauseNotifications { +- +- static final long HEAP_MB = 128; // adjust for test configuration above +- static final long TARGET_MB = Long.getLong("target", 2_000); // 2 Gb allocation +- +- static volatile Object sink; +- +- public static void main(String[] args) throws Exception { +- final AtomicLong pausesDuration = new AtomicLong(); +- final AtomicLong cyclesDuration = new AtomicLong(); +- +- NotificationListener listener = new NotificationListener() { +- @Override +- public void handleNotification(Notification n, Object o) { +- if (n.getType().equals(GarbageCollectionNotificationInfo.GARBAGE_COLLECTION_NOTIFICATION)) { +- GarbageCollectionNotificationInfo info = GarbageCollectionNotificationInfo.from((CompositeData) n.getUserData()); +- +- long d = info.getGcInfo().getDuration(); +- +- String name = info.getGcName(); +- if (name.contains("Shenandoah")) { +- if (name.equals("Shenandoah Pauses")) { +- pausesDuration.addAndGet(d); +- } else if (name.equals("Shenandoah Cycles")) { +- cyclesDuration.addAndGet(d); +- } else { +- throw new IllegalStateException("Unknown name: " + name); +- } +- } +- } +- } +- }; +- +- for (GarbageCollectorMXBean bean : ManagementFactory.getGarbageCollectorMXBeans()) { +- ((NotificationEmitter) bean).addNotificationListener(listener, null, null); +- } +- +- final int size = 100_000; +- long count = TARGET_MB * 1024 * 1024 / (16 + 4 * size); +- +- for (int c = 0; c < count; c++) { +- sink = new int[size]; +- } +- +- // Wait until notifications start arriving, and then wait some more +- // to catch the ones arriving late. +- while (pausesDuration.get() == 0) { +- Thread.sleep(1000); +- } +- Thread.sleep(5000); +- +- long pausesActual = pausesDuration.get(); +- long cyclesActual = cyclesDuration.get(); +- +- long minExpected = 1; +- long maxExpected = Long.MAX_VALUE; +- +- { +- String msg = "Pauses expected = [" + minExpected + "; " + maxExpected + "], actual = " + pausesActual; +- if (minExpected <= pausesActual && pausesActual <= maxExpected) { +- System.out.println(msg); +- } else { +- throw new IllegalStateException(msg); +- } +- } +- +- { +- String msg = "Cycles expected = [" + minExpected + "; " + maxExpected + "], actual = " + cyclesActual; +- if (minExpected <= cyclesActual && cyclesActual <= maxExpected) { +- System.out.println(msg); +- } else { +- throw new IllegalStateException(msg); +- } +- } +- +- { +- String msg = "Cycle duration (" + cyclesActual + "), pause duration (" + pausesActual + ")"; +- if (pausesActual <= cyclesActual) { +- System.out.println(msg); +- } else { +- throw new IllegalStateException(msg); +- } +- } +- } +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/oom/TestAllocLargeObj.java afu8u/hotspot/test/gc/shenandoah/oom/TestAllocLargeObj.java +--- openjdk/hotspot/test/gc/shenandoah/oom/TestAllocLargeObj.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/oom/TestAllocLargeObj.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,82 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/** +- * @test TestAllocLargeObj +- * @summary Test allocation of small object to result OOM, but not to crash JVM +- * @key gc +- * @library /testlibrary +- * +- * @run main TestAllocLargeObj +- */ +- +-import com.oracle.java.testlibrary.*; +- +-public class TestAllocLargeObj { +- +- static final int SIZE = 1 * 1024 * 1024; +- static final int COUNT = 16; +- +- static volatile Object sink; +- +- public static void work() throws Exception { +- Object[] root = new Object[COUNT]; +- sink = root; +- for (int c = 0; c < COUNT; c++) { +- root[c] = new Object[SIZE]; +- } +- } +- +- public static void main(String[] args) throws Exception { +- if (args.length > 0) { +- work(); +- return; +- } +- +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( +- "-Xmx16m", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- TestAllocLargeObj.class.getName(), +- "test"); +- +- OutputAnalyzer analyzer = new OutputAnalyzer(pb.start()); +- analyzer.shouldHaveExitValue(1); +- analyzer.shouldContain("java.lang.OutOfMemoryError: Java heap space"); +- } +- +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( +- "-Xmx1g", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- TestAllocLargeObj.class.getName(), +- "test"); +- +- OutputAnalyzer analyzer = new OutputAnalyzer(pb.start()); +- analyzer.shouldHaveExitValue(0); +- analyzer.shouldNotContain("java.lang.OutOfMemoryError: Java heap space"); +- } +- } +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/oom/TestAllocLargerThanHeap.java afu8u/hotspot/test/gc/shenandoah/oom/TestAllocLargerThanHeap.java +--- openjdk/hotspot/test/gc/shenandoah/oom/TestAllocLargerThanHeap.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/oom/TestAllocLargerThanHeap.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,77 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/** +- * @test TestAllocLargerThanHeap +- * @summary Test that allocation of the object larger than heap fails predictably +- * @key gc +- * @library /testlibrary +- * +- * @run main TestAllocLargerThanHeap +- */ +- +-import com.oracle.java.testlibrary.*; +- +-public class TestAllocLargerThanHeap { +- +- static final int SIZE = 16 * 1024 * 1024; +- +- static volatile Object sink; +- +- public static void work() throws Exception { +- sink = new Object[SIZE]; +- } +- +- public static void main(String[] args) throws Exception { +- if (args.length > 0) { +- work(); +- return; +- } +- +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( +- "-Xmx16m", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- TestAllocLargerThanHeap.class.getName(), +- "test"); +- +- OutputAnalyzer analyzer = new OutputAnalyzer(pb.start()); +- analyzer.shouldHaveExitValue(1); +- analyzer.shouldContain("java.lang.OutOfMemoryError: Java heap space"); +- } +- +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( +- "-Xmx1g", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- TestAllocLargerThanHeap.class.getName(), +- "test"); +- +- OutputAnalyzer analyzer = new OutputAnalyzer(pb.start()); +- analyzer.shouldHaveExitValue(0); +- analyzer.shouldNotContain("java.lang.OutOfMemoryError: Java heap space"); +- } +- } +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/oom/TestAllocSmallObj.java afu8u/hotspot/test/gc/shenandoah/oom/TestAllocSmallObj.java +--- openjdk/hotspot/test/gc/shenandoah/oom/TestAllocSmallObj.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/oom/TestAllocSmallObj.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,81 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/** +- * @test TestAllocSmallObj +- * @summary Test allocation of small object to result OOM, but not to crash JVM +- * @key gc +- * @library /testlibrary +- * +- * @run main TestAllocSmallObj +- */ +- +-import com.oracle.java.testlibrary.*; +- +-public class TestAllocSmallObj { +- +- static final int COUNT = 16 * 1024 * 1024; +- +- static volatile Object sink; +- +- public static void work() throws Exception { +- Object[] root = new Object[COUNT]; +- sink = root; +- for (int c = 0; c < COUNT; c++) { +- root[c] = new Object(); +- } +- } +- +- public static void main(String[] args) throws Exception { +- if (args.length > 0) { +- work(); +- return; +- } +- +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( +- "-Xmx16m", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- TestAllocSmallObj.class.getName(), +- "test"); +- +- OutputAnalyzer analyzer = new OutputAnalyzer(pb.start()); +- analyzer.shouldHaveExitValue(1); +- analyzer.shouldContain("java.lang.OutOfMemoryError: Java heap space"); +- } +- +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( +- "-Xmx1g", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- TestAllocSmallObj.class.getName(), +- "test"); +- +- OutputAnalyzer analyzer = new OutputAnalyzer(pb.start()); +- analyzer.shouldHaveExitValue(0); +- analyzer.shouldNotContain("java.lang.OutOfMemoryError: Java heap space"); +- } +- } +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/oom/TestClassLoaderLeak.java afu8u/hotspot/test/gc/shenandoah/oom/TestClassLoaderLeak.java +--- openjdk/hotspot/test/gc/shenandoah/oom/TestClassLoaderLeak.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/oom/TestClassLoaderLeak.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,152 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/** +- * @test TestClassLoaderLeak +- * @summary Test OOME in due to classloader leak +- * @key gc +- * @library /testlibrary +- * +- * @run main/timeout=480 TestClassLoaderLeak +- */ +- +-import java.util.*; +-import java.io.*; +-import java.nio.*; +-import java.nio.file.*; +-import com.oracle.java.testlibrary.*; +- +-public class TestClassLoaderLeak { +- +- static final int SIZE = 1 * 1024 * 1024; +- static final int COUNT = 128; +- +- static volatile Object sink; +- +- static class Dummy { +- static final int[] PAYLOAD = new int[SIZE]; +- } +- +- static class MyClassLoader extends ClassLoader { +- final String path; +- +- MyClassLoader(String path) { +- this.path = path; +- } +- +- public Class loadClass(String name) throws ClassNotFoundException { +- try { +- File f = new File(path, name + ".class"); +- if (!f.exists()) { +- return super.loadClass(name); +- } +- +- Path path = Paths.get(f.getAbsolutePath()); +- byte[] cls = Files.readAllBytes(path); +- return defineClass(name, cls, 0, cls.length, null); +- } catch (IOException e) { +- throw new ClassNotFoundException(name); +- } +- } +- } +- +- static void load(String path) throws Exception { +- ClassLoader cl = new MyClassLoader(path); +- Class c = (Class) Class.forName("TestClassLoaderLeak$Dummy", true, cl); +- if (c.getClassLoader() != cl) { +- throw new IllegalStateException("Should have loaded by target loader"); +- } +- sink = c; +- } +- +- public static void passWith(String... args) throws Exception { +- testWith(true, args); +- } +- +- public static void failWith(String... args) throws Exception { +- testWith(false, args); +- } +- +- public static void testWith(boolean shouldPass, String... args) throws Exception { +- List pbArgs = new ArrayList<>(); +- pbArgs.add("-Xmx128m"); +- pbArgs.add("-XX:+UnlockExperimentalVMOptions"); +- pbArgs.add("-XX:+UnlockDiagnosticVMOptions"); +- pbArgs.add("-XX:+UseShenandoahGC"); +- pbArgs.addAll(Arrays.asList(args)); +- pbArgs.add(TestClassLoaderLeak.class.getName()); +- pbArgs.add("test"); +- +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(pbArgs.toArray(new String[0])); +- +- OutputAnalyzer analyzer = new OutputAnalyzer(pb.start()); +- +- if (shouldPass) { +- analyzer.shouldHaveExitValue(0); +- analyzer.shouldNotContain("java.lang.OutOfMemoryError"); +- analyzer.shouldContain("All good"); +- } else { +- analyzer.shouldHaveExitValue(1); +- analyzer.shouldContain("java.lang.OutOfMemoryError"); +- analyzer.shouldNotContain("All good"); +- } +- } +- +- public static void main(String[] args) throws Exception { +- if (args.length > 0) { +- String classDir = TestClassLoaderLeak.class.getProtectionDomain().getCodeSource().getLocation().getPath(); +- for (int c = 0; c < COUNT; c++) { +- load(classDir); +- } +- System.out.println("All good"); +- return; +- } +- +- String[][][] modeHeuristics = new String[][][] { +- {{"satb"}, {"adaptive", "compact", "static", "aggressive"}}, +- {{"iu"}, {"adaptive", "aggressive"}}, +- {{"passive"}, {"passive"}} +- }; +- +- for (String[][] mh : modeHeuristics) { +- String mode = mh[0][0]; +- String[] heuristics = mh[1]; +- for (String h : heuristics) { +- // Forceful enabling should work +- passWith("-XX:ShenandoahGCMode=" + mode, "-XX:ShenandoahGCHeuristics=" + h, "-XX:+ClassUnloading"); +- passWith("-XX:ShenandoahGCMode=" + mode, "-XX:ShenandoahGCHeuristics=" + h, "-XX:+ClassUnloadingWithConcurrentMark"); +- +- // Even when concurrent unloading is disabled, Full GC has to recover +- passWith("-XX:ShenandoahGCMode=" + mode, "-XX:ShenandoahGCHeuristics=" + h, "-XX:+ClassUnloading", "-XX:-ClassUnloadingWithConcurrentMark"); +- passWith("-XX:ShenandoahGCMode=" + mode, "-XX:ShenandoahGCHeuristics=" + h, "-XX:+ClassUnloading", "-XX:-ClassUnloadingWithConcurrentMark", "-XX:ShenandoahUnloadClassesFrequency=0"); +- passWith("-XX:ShenandoahGCMode=" + mode, "-XX:ShenandoahGCHeuristics=" + h, "-XX:+ClassUnloading", "-XX:+ClassUnloadingWithConcurrentMark", "-XX:ShenandoahUnloadClassesFrequency=0"); +- +- // Should OOME when unloading forcefully disabled, even if local flags try to enable it back +- failWith("-XX:ShenandoahGCMode=" + mode, "-XX:ShenandoahGCHeuristics=" + h, "-XX:-ClassUnloading"); +- failWith("-XX:ShenandoahGCMode=" + mode, "-XX:ShenandoahGCHeuristics=" + h, "-XX:-ClassUnloading", "-XX:+ClassUnloadingWithConcurrentMark"); +- failWith("-XX:ShenandoahGCMode=" + mode, "-XX:ShenandoahGCHeuristics=" + h, "-XX:-ClassUnloading", "-XX:+ClassUnloadingWithConcurrentMark", "-XX:ShenandoahUnloadClassesFrequency=1"); +- failWith("-XX:ShenandoahGCMode=" + mode, "-XX:ShenandoahGCHeuristics=" + h, "-XX:-ClassUnloading", "-XX:-ClassUnloadingWithConcurrentMark", "-XX:ShenandoahUnloadClassesFrequency=1"); +- } +- } +- } +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/oom/TestThreadFailure.java afu8u/hotspot/test/gc/shenandoah/oom/TestThreadFailure.java +--- openjdk/hotspot/test/gc/shenandoah/oom/TestThreadFailure.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/oom/TestThreadFailure.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,76 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/** +- * @test TestThreadFailure +- * @summary Test OOME in separate thread is recoverable +- * @key gc +- * @library /testlibrary +- * +- * @run main/timeout=480 TestThreadFailure +- */ +- +-import java.util.*; +-import com.oracle.java.testlibrary.*; +- +-public class TestThreadFailure { +- +- static final int SIZE = 1024; +- static final int COUNT = 16; +- +- static class NastyThread extends Thread { +- @Override +- public void run() { +- List root = new ArrayList(); +- while (true) { +- root.add(new Object[SIZE]); +- } +- } +- } +- +- public static void main(String[] args) throws Exception { +- if (args.length > 0) { +- for (int t = 0; t < COUNT; t++) { +- Thread thread = new NastyThread(); +- thread.start(); +- thread.join(); +- } +- System.out.println("All good"); +- return; +- } +- +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( +- "-Xmx32m", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- TestThreadFailure.class.getName(), +- "test"); +- +- OutputAnalyzer analyzer = new OutputAnalyzer(pb.start()); +- analyzer.shouldHaveExitValue(0); +- analyzer.shouldContain("java.lang.OutOfMemoryError"); +- analyzer.shouldContain("All good"); +- } +- } +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/options/TestAlwaysPreTouch.java afu8u/hotspot/test/gc/shenandoah/options/TestAlwaysPreTouch.java +--- openjdk/hotspot/test/gc/shenandoah/options/TestAlwaysPreTouch.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/options/TestAlwaysPreTouch.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,42 +0,0 @@ +-/* +- * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestAlwaysPreTouch +- * @summary Check that Shenandoah's AlwaysPreTouch does not fire asserts +- * @key gc +- * +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+AlwaysPreTouch -Xmx1g TestAlwaysPreTouch +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+AlwaysPreTouch -XX:ConcGCThreads=2 -Xmx1g TestAlwaysPreTouch +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+AlwaysPreTouch -XX:ParallelGCThreads=2 -Xmx1g TestAlwaysPreTouch +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+AlwaysPreTouch -Xms128m -Xmx1g TestAlwaysPreTouch +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+AlwaysPreTouch -Xms1g -Xmx1g TestAlwaysPreTouch +- */ +- +-public class TestAlwaysPreTouch { +- +- public static void main(String[] args) throws Exception { +- // checking the initialization before entering main() +- } +- +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/options/TestArgumentRanges.java afu8u/hotspot/test/gc/shenandoah/options/TestArgumentRanges.java +--- openjdk/hotspot/test/gc/shenandoah/options/TestArgumentRanges.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/options/TestArgumentRanges.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,127 +0,0 @@ +-/* +- * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestArgumentRanges +- * @summary Test that Shenandoah arguments are checked for ranges where applicable +- * @key gc +- * @library /testlibrary +- * +- * @run driver TestArgumentRanges +- */ +- +-import com.oracle.java.testlibrary.*; +- +-public class TestArgumentRanges { +- public static void main(String[] args) throws Exception { +- testRange("ShenandoahGarbageThreshold", 0, 100); +- testRange("ShenandoahMinFreeThreshold", 0, 100); +- testRange("ShenandoahAllocationThreshold", 0, 100); +- testHeuristics(); +- } +- +- private static void testHeuristics() throws Exception { +- +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( +- "-Xmx128m", +- "-XX:+UnlockDiagnosticVMOptions", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-XX:ShenandoahGCHeuristics=aggressive", +- "-version"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldHaveExitValue(0); +- } +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( +- "-Xmx128m", +- "-XX:+UnlockDiagnosticVMOptions", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-XX:ShenandoahGCHeuristics=static", +- "-version"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldHaveExitValue(0); +- } +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( +- "-Xmx128m", +- "-XX:+UnlockDiagnosticVMOptions", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-XX:ShenandoahGCHeuristics=fluff", +- "-version"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldMatch("Unknown -XX:ShenandoahGCHeuristics option"); +- output.shouldHaveExitValue(1); +- } +- } +- +- private static void testRange(String option, int min, int max) throws Exception { +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( +- "-Xmx128m", +- "-XX:+UnlockDiagnosticVMOptions", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-XX:" + option + "=" + (max + 1), +- "-version"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldHaveExitValue(1); +- } +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( +- "-Xmx128m", +- "-XX:+UnlockDiagnosticVMOptions", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-XX:" + option + "=" + max, +- "-version"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldHaveExitValue(0); +- } +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( +- "-Xmx128m", +- "-XX:+UnlockDiagnosticVMOptions", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-XX:" + option + "=" + (min - 1), +- "-version"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldHaveExitValue(1); +- } +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( +- "-Xmx128m", +- "-XX:+UnlockDiagnosticVMOptions", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-XX:" + option + "=" + min, +- "-version"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldHaveExitValue(0); +- } +- } +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/options/TestClassUnloadingArguments.java afu8u/hotspot/test/gc/shenandoah/options/TestClassUnloadingArguments.java +--- openjdk/hotspot/test/gc/shenandoah/options/TestClassUnloadingArguments.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/options/TestClassUnloadingArguments.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,109 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestClassUnloadingArguments +- * @summary Test that loop mining arguments are sane +- * @key gc +- * @library /testlibrary +- * +- * @run driver TestClassUnloadingArguments +- */ +- +-import java.util.*; +- +-import com.oracle.java.testlibrary.*; +- +-public class TestClassUnloadingArguments { +- +- public static void testWith(String msg, boolean cu, boolean cuConc, String... args) throws Exception { +- String[] cmds = Arrays.copyOf(args, args.length + 3); +- cmds[args.length] = "-Xmx128m"; +- cmds[args.length + 1] = "-XX:+PrintFlagsFinal"; +- cmds[args.length + 2] = "-version"; +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(cmds); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldHaveExitValue(0); +- output.shouldContain("ClassUnloading"); +- output.shouldContain("ClassUnloadingWithConcurrentMark"); +- +- Asserts.assertEQ(output.firstMatch("(.+?) ClassUnloading.+?= (.+?) (.+?)", 2), +- Boolean.toString(cu), +- msg + ", but got wrong ClassUnloading"); +- Asserts.assertEQ(output.firstMatch("(.+?) ClassUnloadingWithConcurrentMark.+?= (.+?) (.+?)", 2), +- Boolean.toString(cuConc), +- msg + ", but got wrong ClassUnloadingWithConcurrentMark"); +- } +- +- public static void main(String[] args) throws Exception { +- testDefaultGC(); +- testShenandoah(); +- } +- +- public static void testDefaultGC() throws Exception { +- testWith("Default GC should have class unloading enabled", +- true, true); +- +-// Not in JDK 8 and Parallel GC +-// testWith("Default GC should disable everything", +-// false, false, +-// "-XX:-ClassUnloading"); +- +- testWith("Default GC should disable conc unload", +- true, false, +- "-XX:-ClassUnloadingWithConcurrentMark"); +- +-// Not in JDK 8 and Parallel GC +-// testWith("Default GC should not let conc unload to be enabled separately", +-// false, false, +-// "-XX:-ClassUnloading", +-// "-XX:+ClassUnloadingWithConcurrentMark"); +- } +- +- public static void testShenandoah() throws Exception { +- testWith("Shenandoah GC should have class unloading enabled", +- true, true, +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC"); +- +- testWith("Shenandoah GC should disable everything", +- false, false, +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-XX:-ClassUnloading"); +- +- testWith("Shenandoah GC should enable conc unload", +- true, true, +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-XX:+ClassUnloadingWithConcurrentMark"); +- +- testWith("Shenandoah GC should not let conc unload to be enabled separately", +- false, false, +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-XX:-ClassUnloading", +- "-XX:+ClassUnloadingWithConcurrentMark"); +- } +- +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/options/TestCodeCacheRootStyles.java afu8u/hotspot/test/gc/shenandoah/options/TestCodeCacheRootStyles.java +--- openjdk/hotspot/test/gc/shenandoah/options/TestCodeCacheRootStyles.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/options/TestCodeCacheRootStyles.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,39 +0,0 @@ +-/* +- * Copyright (c) 2017, 2020, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* @test TestCodeCacheRootStyles +- * @key gc +- * +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UnlockDiagnosticVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahCodeRootsStyle=0 TestCodeCacheRootStyles +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UnlockDiagnosticVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahCodeRootsStyle=1 TestCodeCacheRootStyles +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UnlockDiagnosticVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahCodeRootsStyle=2 TestCodeCacheRootStyles +- */ +- +-public class TestCodeCacheRootStyles { +- public static void main(String[] args) { +- // Bug should crash before we get here. +- } +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/options/TestEnabled.java afu8u/hotspot/test/gc/shenandoah/options/TestEnabled.java +--- openjdk/hotspot/test/gc/shenandoah/options/TestEnabled.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/options/TestEnabled.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,54 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- */ +- +-import java.lang.management.GarbageCollectorMXBean; +-import java.lang.management.ManagementFactory; +- +-/* +- * @test TestEnabled +- * @key gc +- * +- * @run main/othervm -Dexpected=false -Xmx64m TestEnabled +- * @run main/othervm -Dexpected=true -Xmx64m -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC TestEnabled +- */ +- +-public class TestEnabled { +- +- public static void main(String... args) { +- boolean expected = Boolean.getBoolean("expected"); +- boolean actual = isEnabled(); +- if (expected != actual) { +- throw new IllegalStateException("Error: expected = " + expected + ", actual = " + actual); +- } +- } +- +- public static boolean isEnabled() { +- for (GarbageCollectorMXBean bean : ManagementFactory.getGarbageCollectorMXBeans()) { +- if (bean.getName().contains("Shenandoah")) { +- return true; +- } +- } +- return false; +- } +- +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/options/TestExplicitGC.java afu8u/hotspot/test/gc/shenandoah/options/TestExplicitGC.java +--- openjdk/hotspot/test/gc/shenandoah/options/TestExplicitGC.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/options/TestExplicitGC.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,149 +0,0 @@ +-'/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestExplicitGC +- * @summary Test that Shenandoah reacts to explicit GC flags appropriately +- * @key gc +- * @library /testlibrary +- * +- * @run driver TestExplicitGC +- */ +- +-import com.oracle.java.testlibrary.*; +- +-public class TestExplicitGC { +- +- enum Mode { +- PRODUCT, +- DIAGNOSTIC, +- EXPERIMENTAL, +- } +- +- public static void main(String[] args) throws Exception { +- if (args.length > 0) { +- System.out.println("Calling System.gc()"); +- System.gc(); +- return; +- } +- +- String[] full = new String[] { +- "Pause Full" +- }; +- +- String[] concNormal = new String[] { +- "Pause Init Mark", +- "Pause Final Mark", +- }; +- +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( +- "-Xmx128m", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-verbose:gc", +- TestExplicitGC.class.getName(), +- "test"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- for (String p : full) { +- output.shouldNotContain(p); +- } +- for (String p : concNormal) { +- output.shouldContain(p); +- } +- } +- +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( +- "-Xmx128m", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-verbose:gc", +- "-XX:+DisableExplicitGC", +- TestExplicitGC.class.getName(), +- "test"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- for (String p : full) { +- output.shouldNotContain(p); +- } +- for (String p : concNormal) { +- output.shouldNotContain(p); +- } +- } +- +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( +- "-Xmx128m", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-verbose:gc", +- "-XX:+ExplicitGCInvokesConcurrent", +- TestExplicitGC.class.getName(), +- "test"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- for (String p : full) { +- output.shouldNotContain(p); +- } +- for (String p : concNormal) { +- output.shouldContain(p); +- } +- } +- +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( +- "-Xmx128m", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-verbose:gc", +- "-XX:-ExplicitGCInvokesConcurrent", +- TestExplicitGC.class.getName(), +- "test"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- for (String p : full) { +- output.shouldContain(p); +- } +- for (String p : concNormal) { +- output.shouldNotContain(p); +- } +- } +- +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( +- "-Xmx128m", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-verbose:gc", +- "-XX:+ExplicitGCInvokesConcurrent", +- "-XX:ShenandoahGCMode=iu", +- TestExplicitGC.class.getName(), +- "test"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- for (String p : full) { +- output.shouldNotContain(p); +- } +- for (String p : concNormal) { +- output.shouldContain(p); +- } +- } +- } +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/options/TestExplicitGCNoConcurrent.java afu8u/hotspot/test/gc/shenandoah/options/TestExplicitGCNoConcurrent.java +--- openjdk/hotspot/test/gc/shenandoah/options/TestExplicitGCNoConcurrent.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/options/TestExplicitGCNoConcurrent.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,74 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestExplicitGCNoConcurrent +- * @summary Test that Shenandoah reacts to explicit GC flags appropriately +- * @key gc +- * @library /testlibrary +- * +- * @run driver TestExplicitGCNoConcurrent +- */ +- +-import com.oracle.java.testlibrary.*; +- +-public class TestExplicitGCNoConcurrent { +- +- public static void main(String[] args) throws Exception { +- if (args.length > 0) { +- System.out.println("Calling System.gc()"); +- System.gc(); +- return; +- } +- +- String[] concurrent = new String[] { +- "Pause Init Mark", +- "Pause Final Mark", +- "Pause Init Update Refs", +- "Pause Final Update Refs", +- }; +- +- String[] opts = new String[] { +- "", +- "-XX:-ExplicitGCInvokesConcurrent", +- "-XX:+ExplicitGCInvokesConcurrent" +- }; +- +- for (String opt : opts) { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( +- "-Xmx128m", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-verbose:gc", +- "-XX:+UnlockDiagnosticVMOptions", +- opt, +- "-XX:ShenandoahGCHeuristics=passive", +- TestExplicitGCNoConcurrent.class.getName(), +- "test"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- for (String p : concurrent) { +- output.shouldNotContain(p); +- } +- } +- } +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/options/TestHeuristicsUnlock.java afu8u/hotspot/test/gc/shenandoah/options/TestHeuristicsUnlock.java +--- openjdk/hotspot/test/gc/shenandoah/options/TestHeuristicsUnlock.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/options/TestHeuristicsUnlock.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,115 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestHeuristicsUnlock +- * @summary Test that Shenandoah heuristics are unlocked properly +- * @key gc +- * @library /testlibrary +- * +- * @run driver TestHeuristicsUnlock +- */ +- +-import com.oracle.java.testlibrary.*; +- +-public class TestHeuristicsUnlock { +- +- enum Mode { +- PRODUCT, +- DIAGNOSTIC, +- EXPERIMENTAL, +- } +- +- public static void main(String[] args) throws Exception { +- testWith("-XX:ShenandoahGCHeuristics=adaptive", Mode.PRODUCT); +- testWith("-XX:ShenandoahGCHeuristics=static", Mode.PRODUCT); +- testWith("-XX:ShenandoahGCHeuristics=compact", Mode.PRODUCT); +- testWith("-XX:ShenandoahGCHeuristics=aggressive", Mode.DIAGNOSTIC); +- } +- +- private static void testWith(String h, Mode mode) throws Exception { +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( +- "-Xmx128m", +- "-XX:-UnlockDiagnosticVMOptions", +- "-XX:-UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- h, +- "-version" +- ); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- switch (mode) { +- case PRODUCT: +- output.shouldHaveExitValue(0); +- break; +- case DIAGNOSTIC: +- case EXPERIMENTAL: +- output.shouldHaveExitValue(1); +- break; +- } +- } +- +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( +- "-Xmx128m", +- "-XX:+UnlockDiagnosticVMOptions", +- "-XX:-UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- h, +- "-version" +- ); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- switch (mode) { +- case PRODUCT: +- case DIAGNOSTIC: +- output.shouldHaveExitValue(0); +- break; +- case EXPERIMENTAL: +- output.shouldHaveExitValue(1); +- break; +- } +- } +- +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( +- "-Xmx128m", +- "-XX:-UnlockDiagnosticVMOptions", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- h, +- "-version" +- ); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- switch (mode) { +- case PRODUCT: +- case EXPERIMENTAL: +- output.shouldHaveExitValue(0); +- break; +- case DIAGNOSTIC: +- output.shouldHaveExitValue(1); +- break; +- } +- } +- } +- +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/options/TestHumongousMoves.java afu8u/hotspot/test/gc/shenandoah/options/TestHumongousMoves.java +--- openjdk/hotspot/test/gc/shenandoah/options/TestHumongousMoves.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/options/TestHumongousMoves.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,61 +0,0 @@ +-/* +- * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestHumongousMoves +- * @summary Check Shenandoah reacts on setting humongous moves correctly +- * @key gc +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:-ShenandoahHumongousMoves +- * -XX:-ShenandoahDegeneratedGC -XX:+ShenandoahVerify +- * TestHumongousMoves +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:+ShenandoahHumongousMoves +- * -XX:-ShenandoahDegeneratedGC -XX:+ShenandoahVerify +- * TestHumongousMoves +- */ +- +-import java.util.Random; +- +-public class TestHumongousMoves { +- +- static final long TARGET_MB = Long.getLong("target", 10_000); // 10 Gb allocation +- +- static volatile Object sink; +- +- public static void main(String[] args) throws Exception { +- final int min = 0; +- final int max = 384 * 1024; +- long count = TARGET_MB * 1024 * 1024 / (16 + 4 * (min + (max - min) / 2)); +- +- Random r = new Random(); +- for (long c = 0; c < count; c++) { +- sink = new int[min + r.nextInt(max - min)]; +- } +- } +- +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/options/TestHumongousThresholdArgs.java afu8u/hotspot/test/gc/shenandoah/options/TestHumongousThresholdArgs.java +--- openjdk/hotspot/test/gc/shenandoah/options/TestHumongousThresholdArgs.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/options/TestHumongousThresholdArgs.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,72 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestHumongousThresholdArgs +- * @summary Test that Shenandoah humongous threshold args are checked +- * @key gc +- * @library /testlibrary +- * +- * @run driver TestHumongousThresholdArgs +- */ +- +-import com.oracle.java.testlibrary.*; +- +-public class TestHumongousThresholdArgs { +- public static void main(String[] args) throws Exception { +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( +- "-Xmx128m", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-version"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldHaveExitValue(0); +- } +- +- int[] valid = new int[] {1, 10, 50, 90, 100}; +- int[] invalid = new int[] {-100, -1, 0, 101, 1000}; +- +- for (int v : valid) { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( +- "-Xmx128m", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-XX:ShenandoahHumongousThreshold=" + v, +- "-version"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldHaveExitValue(0); +- } +- +- for (int v : invalid) { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( +- "-Xmx128m", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-XX:ShenandoahHumongousThreshold=" + v, +- "-version"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldHaveExitValue(1); +- } +- } +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/options/TestLargePages.java afu8u/hotspot/test/gc/shenandoah/options/TestLargePages.java +--- openjdk/hotspot/test/gc/shenandoah/options/TestLargePages.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/options/TestLargePages.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,66 +0,0 @@ +-/* +- * Copyright (c) 2021, Red Hat, Inc. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test id=default +- * +- * @run main/othervm -XX:+UseShenandoahGC -Xms128m -Xmx128m TestLargePages +- * @run main/othervm -XX:+UseShenandoahGC -Xmx128m TestLargePages +- * @run main/othervm -XX:+UseShenandoahGC -Xms128m TestLargePages +- * +- * @run main/othervm -XX:+UseShenandoahGC -Xms131m -Xmx131m TestLargePages +- * @run main/othervm -XX:+UseShenandoahGC -Xmx131m TestLargePages +- * @run main/othervm -XX:+UseShenandoahGC -Xms131m TestLargePages +- */ +- +-/* +- * @test id=lp +- * +- * @run main/othervm -XX:+UseShenandoahGC -XX:+UseLargePages -Xms128m -Xmx128m TestLargePages +- * @run main/othervm -XX:+UseShenandoahGC -XX:+UseLargePages -Xmx128m TestLargePages +- * @run main/othervm -XX:+UseShenandoahGC -XX:+UseLargePages -Xms128m TestLargePages +- * +- * @run main/othervm -XX:+UseShenandoahGC -XX:+UseLargePages -Xms131m -Xmx131m TestLargePages +- * @run main/othervm -XX:+UseShenandoahGC -XX:+UseLargePages -Xmx131m TestLargePages +- * @run main/othervm -XX:+UseShenandoahGC -XX:+UseLargePages -Xms131m TestLargePages +- */ +- +-/* +- * @test id=thp +- * @requires os.family == "linux" +- * +- * @run main/othervm -XX:+UseShenandoahGC -XX:+UseTransparentHugePages -Xms128m -Xmx128m TestLargePages +- * @run main/othervm -XX:+UseShenandoahGC -XX:+UseTransparentHugePages -Xmx128m TestLargePages +- * @run main/othervm -XX:+UseShenandoahGC -XX:+UseTransparentHugePages -Xms128m TestLargePages +- * +- * @run main/othervm -XX:+UseShenandoahGC -XX:+UseTransparentHugePages -Xms131m -Xmx131m TestLargePages +- * @run main/othervm -XX:+UseShenandoahGC -XX:+UseTransparentHugePages -Xmx131m TestLargePages +- * @run main/othervm -XX:+UseShenandoahGC -XX:+UseTransparentHugePages -Xms131m TestLargePages +- */ +- +-public class TestLargePages { +- public static void main(String[] args) { +- // Everything is checked on initialization +- } +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/options/TestLargePagesWithSmallHeap.java afu8u/hotspot/test/gc/shenandoah/options/TestLargePagesWithSmallHeap.java +--- openjdk/hotspot/test/gc/shenandoah/options/TestLargePagesWithSmallHeap.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/options/TestLargePagesWithSmallHeap.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,68 +0,0 @@ +-/* +- * Copyright (c) 2021, Red Hat, Inc. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test id=default +- * @bug 8268127 +- * @summary when heap is too small for regions to align to large page size, should fallback to regular page size +- * +- * @run main/othervm -XX:+UseShenandoahGC -Xms17m -Xmx17m TestLargePagesWithSmallHeap +- * @run main/othervm -XX:+UseShenandoahGC -Xmx17m TestLargePagesWithSmallHeap +- * @run main/othervm -XX:+UseShenandoahGC -Xms17m TestLargePagesWithSmallHeap +- * +- * @run main/othervm -XX:+UseShenandoahGC -Xms17m -Xmx17m TestLargePagesWithSmallHeap +- * @run main/othervm -XX:+UseShenandoahGC -Xmx17m TestLargePagesWithSmallHeap +- * @run main/othervm -XX:+UseShenandoahGC -Xms17m TestLargePagesWithSmallHeap +- */ +- +-/* +- * @test id=lp +- * +- * @run main/othervm -XX:+UseShenandoahGC -XX:+UseLargePages -Xms17m -Xmx17m TestLargePagesWithSmallHeap +- * @run main/othervm -XX:+UseShenandoahGC -XX:+UseLargePages -Xmx17m TestLargePagesWithSmallHeap +- * @run main/othervm -XX:+UseShenandoahGC -XX:+UseLargePages -Xms17m TestLargePagesWithSmallHeap +- * +- * @run main/othervm -XX:+UseShenandoahGC -XX:+UseLargePages -Xms17m -Xmx17m TestLargePagesWithSmallHeap +- * @run main/othervm -XX:+UseShenandoahGC -XX:+UseLargePages -Xmx17m TestLargePagesWithSmallHeap +- * @run main/othervm -XX:+UseShenandoahGC -XX:+UseLargePages -Xms17m TestLargePagesWithSmallHeap +- */ +- +-/* +- * @test id=thp +- * @requires os.family == "linux" +- * +- * @run main/othervm -XX:+UseShenandoahGC -XX:+UseTransparentHugePages -Xms17m -Xmx17m TestLargePagesWithSmallHeap +- * @run main/othervm -XX:+UseShenandoahGC -XX:+UseTransparentHugePages -Xmx17m TestLargePagesWithSmallHeap +- * @run main/othervm -XX:+UseShenandoahGC -XX:+UseTransparentHugePages -Xms17m TestLargePagesWithSmallHeap +- * +- * @run main/othervm -XX:+UseShenandoahGC -XX:+UseTransparentHugePages -Xms17m -Xmx17m TestLargePagesWithSmallHeap +- * @run main/othervm -XX:+UseShenandoahGC -XX:+UseTransparentHugePages -Xmx17m TestLargePagesWithSmallHeap +- * @run main/othervm -XX:+UseShenandoahGC -XX:+UseTransparentHugePages -Xms17m TestLargePagesWithSmallHeap +- */ +- +-public class TestLargePagesWithSmallHeap { +- public static void main(String[] args) { +- // Everything is checked on initialization +- } +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/options/TestModeUnlock.java afu8u/hotspot/test/gc/shenandoah/options/TestModeUnlock.java +--- openjdk/hotspot/test/gc/shenandoah/options/TestModeUnlock.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/options/TestModeUnlock.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,114 +0,0 @@ +-/* +- * Copyright (c) 2020, Red Hat, Inc. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestModeUnlock +- * @summary Test that Shenandoah modes are unlocked properly +- * @key gc +- * @library /testlibrary +- * @run driver TestModeUnlock +- */ +- +-import com.oracle.java.testlibrary.*; +- +-public class TestModeUnlock { +- +- enum Mode { +- PRODUCT, +- DIAGNOSTIC, +- EXPERIMENTAL, +- } +- +- public static void main(String[] args) throws Exception { +- testWith("-XX:ShenandoahGCMode=satb", Mode.PRODUCT); +- testWith("-XX:ShenandoahGCMode=iu", Mode.EXPERIMENTAL); +- testWith("-XX:ShenandoahGCMode=passive", Mode.DIAGNOSTIC); +- } +- +- private static void testWith(String h, Mode mode) throws Exception { +- if (false) { // When ShenandoahGC is experimental flag, this makes no sense to test +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( +- "-Xmx128m", +- "-XX:-UnlockDiagnosticVMOptions", +- "-XX:-UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- h, +- "-version" +- ); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- switch (mode) { +- case PRODUCT: +- output.shouldHaveExitValue(0); +- break; +- case DIAGNOSTIC: +- case EXPERIMENTAL: +- output.shouldHaveExitValue(1); +- break; +- } +- } +- +- if (false) { // When ShenandoahGC is experimental flag, this makes no sense to test +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( +- "-Xmx128m", +- "-XX:+UnlockDiagnosticVMOptions", +- "-XX:-UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- h, +- "-version" +- ); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- switch (mode) { +- case PRODUCT: +- case DIAGNOSTIC: +- output.shouldHaveExitValue(0); +- break; +- case EXPERIMENTAL: +- output.shouldHaveExitValue(1); +- break; +- } +- } +- +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( +- "-Xmx128m", +- "-XX:-UnlockDiagnosticVMOptions", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- h, +- "-version" +- ); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- switch (mode) { +- case PRODUCT: +- case EXPERIMENTAL: +- output.shouldHaveExitValue(0); +- break; +- case DIAGNOSTIC: +- output.shouldHaveExitValue(1); +- break; +- } +- } +- } +- +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/options/TestObjectAlignment.java afu8u/hotspot/test/gc/shenandoah/options/TestObjectAlignment.java +--- openjdk/hotspot/test/gc/shenandoah/options/TestObjectAlignment.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/options/TestObjectAlignment.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,59 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestObjectAlignment +- * @key gc +- * +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC TestObjectAlignment +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx16m TestObjectAlignment +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx32m TestObjectAlignment +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx64m TestObjectAlignment +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx128m TestObjectAlignment +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx256m TestObjectAlignment +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx512m TestObjectAlignment +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g TestObjectAlignment +- */ +- +-/* +- * @test TestObjectAlignment +- * @key gc +- * @requires (vm.bits == "64") +- * +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ObjectAlignmentInBytes=16 TestObjectAlignment +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ObjectAlignmentInBytes=16 -Xmx16m TestObjectAlignment +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ObjectAlignmentInBytes=16 -Xmx32m TestObjectAlignment +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ObjectAlignmentInBytes=16 -Xmx64m TestObjectAlignment +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ObjectAlignmentInBytes=16 -Xmx128m TestObjectAlignment +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ObjectAlignmentInBytes=16 -Xmx256m TestObjectAlignment +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ObjectAlignmentInBytes=16 -Xmx512m TestObjectAlignment +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ObjectAlignmentInBytes=16 -Xmx1g TestObjectAlignment +- */ +- +-public class TestObjectAlignment { +- +- public static void main(String[] args) throws Exception { +- // Testing the checking code on startup +- } +- +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/options/TestPacing.java afu8u/hotspot/test/gc/shenandoah/options/TestPacing.java +--- openjdk/hotspot/test/gc/shenandoah/options/TestPacing.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/options/TestPacing.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,43 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestPacing +- * @key gc +- * +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:-ShenandoahPacing -Xmx128m TestPacing +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+ShenandoahPacing -Xmx128m TestPacing +- */ +- +-public class TestPacing { +- static final long TARGET_MB = Long.getLong("target", 1000); // 1 Gb allocation +- +- static volatile Object sink; +- +- public static void main(String[] args) throws Exception { +- long count = TARGET_MB * 1024 * 1024 / 16; +- for (long c = 0; c < count; c++) { +- sink = new Object(); +- } +- } +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/options/TestParallelRegionStride.java afu8u/hotspot/test/gc/shenandoah/options/TestParallelRegionStride.java +--- openjdk/hotspot/test/gc/shenandoah/options/TestParallelRegionStride.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/options/TestParallelRegionStride.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,45 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestParallelRegionStride +- * @key gc +- * +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahParallelRegionStride=1 -Xmx128m TestParallelRegionStride +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahParallelRegionStride=10 -Xmx128m TestParallelRegionStride +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahParallelRegionStride=100 -Xmx128m TestParallelRegionStride +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahParallelRegionStride=1024 -Xmx128m TestParallelRegionStride +- */ +- +-public class TestParallelRegionStride { +- static final long TARGET_MB = Long.getLong("target", 1000); // 1 Gb allocation +- +- static volatile Object sink; +- +- public static void main(String[] args) throws Exception { +- long count = TARGET_MB * 1024 * 1024 / 16; +- for (long c = 0; c < count; c++) { +- sink = new Object(); +- } +- } +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/options/TestRegionSizeArgs.java afu8u/hotspot/test/gc/shenandoah/options/TestRegionSizeArgs.java +--- openjdk/hotspot/test/gc/shenandoah/options/TestRegionSizeArgs.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/options/TestRegionSizeArgs.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,235 +0,0 @@ +-/* +- * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestRegionSizeArgs +- * @summary Test that Shenandoah region size args are checked +- * @key gc +- * @library /testlibrary +- * +- * @run driver TestRegionSizeArgs +- */ +- +-import com.oracle.java.testlibrary.*; +- +-public class TestRegionSizeArgs { +- public static void main(String[] args) throws Exception { +- testInvalidRegionSizes(); +- testMinRegionSize(); +- testMaxRegionSize(); +- } +- +- private static void testInvalidRegionSizes() throws Exception { +- +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-Xms4m", +- "-Xmx1g", +- "-version"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldHaveExitValue(0); +- } +- +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-Xms8m", +- "-Xmx1g", +- "-version"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldHaveExitValue(0); +- } +- +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-Xms100m", +- "-Xmx1g", +- "-XX:ShenandoahRegionSize=200m", +- "-version"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldMatch("Invalid -XX:ShenandoahRegionSize option"); +- output.shouldHaveExitValue(1); +- } +- +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-Xms100m", +- "-Xmx1g", +- "-XX:ShenandoahRegionSize=9m", +- "-version"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldHaveExitValue(0); +- } +- +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-Xms100m", +- "-Xmx1g", +- "-XX:ShenandoahRegionSize=255K", +- "-version"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldMatch("Invalid -XX:ShenandoahRegionSize option"); +- output.shouldHaveExitValue(1); +- } +- +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-Xms100m", +- "-Xmx1g", +- "-XX:ShenandoahRegionSize=260K", +- "-version"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldHaveExitValue(0); +- } +- +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-Xms1g", +- "-Xmx1g", +- "-XX:ShenandoahRegionSize=32M", +- "-version"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldHaveExitValue(0); +- } +- +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-Xms1g", +- "-Xmx1g", +- "-XX:ShenandoahRegionSize=64M", +- "-version"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldMatch("Invalid -XX:ShenandoahRegionSize option"); +- output.shouldHaveExitValue(1); +- } +- +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-Xms1g", +- "-Xmx1g", +- "-XX:ShenandoahRegionSize=256K", +- "-version"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldHaveExitValue(0); +- } +- +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-Xms1g", +- "-Xmx1g", +- "-XX:ShenandoahRegionSize=128K", +- "-version"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldMatch("Invalid -XX:ShenandoahRegionSize option"); +- output.shouldHaveExitValue(1); +- } +- } +- +- private static void testMinRegionSize() throws Exception { +- +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-Xms100m", +- "-Xmx1g", +- "-XX:ShenandoahMinRegionSize=255K", +- "-version"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldMatch("Invalid -XX:ShenandoahMinRegionSize option"); +- output.shouldHaveExitValue(1); +- } +- +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-Xms100m", +- "-Xmx1g", +- "-XX:ShenandoahMinRegionSize=1M", +- "-XX:ShenandoahMaxRegionSize=260K", +- "-version"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldMatch("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize"); +- output.shouldHaveExitValue(1); +- } +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-Xms100m", +- "-Xmx1g", +- "-XX:ShenandoahMinRegionSize=200m", +- "-version"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldMatch("Invalid -XX:ShenandoahMinRegionSize option"); +- output.shouldHaveExitValue(1); +- } +- +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-Xms100m", +- "-Xmx1g", +- "-XX:ShenandoahMinRegionSize=9m", +- "-version"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldHaveExitValue(0); +- } +- +- } +- +- private static void testMaxRegionSize() throws Exception { +- +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-Xms100m", +- "-Xmx1g", +- "-XX:ShenandoahMaxRegionSize=255K", +- "-version"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldMatch("Invalid -XX:ShenandoahMaxRegionSize option"); +- output.shouldHaveExitValue(1); +- } +- +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-Xms100m", +- "-Xmx1g", +- "-XX:ShenandoahMinRegionSize=1M", +- "-XX:ShenandoahMaxRegionSize=260K", +- "-version"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldMatch("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize"); +- output.shouldHaveExitValue(1); +- } +- } +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/options/TestSelectiveBarrierFlags.java afu8u/hotspot/test/gc/shenandoah/options/TestSelectiveBarrierFlags.java +--- openjdk/hotspot/test/gc/shenandoah/options/TestSelectiveBarrierFlags.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/options/TestSelectiveBarrierFlags.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,102 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestSelectiveBarrierFlags +- * @summary Test selective barrier enabling works, by aggressively compiling HelloWorld with combinations +- * of barrier flags +- * @library /testlibrary +- * +- * @run main/othervm TestSelectiveBarrierFlags -Xint +- * @run main/othervm TestSelectiveBarrierFlags -Xbatch -XX:CompileThreshold=100 -XX:TieredStopAtLevel=1 +- * @run main/othervm TestSelectiveBarrierFlags -Xbatch -XX:CompileThreshold=100 -XX:-TieredCompilation -XX:+IgnoreUnrecognizedVMOptions -XX:+ShenandoahVerifyOptoBarriers +- */ +- +-import java.util.*; +-import java.util.concurrent.*; +-import com.oracle.java.testlibrary.*; +- +-public class TestSelectiveBarrierFlags { +- +- public static void main(String[] args) throws Exception { +- String[][] opts = { +- new String[]{ "ShenandoahLoadRefBarrier" }, +- new String[] { "ShenandoahSATBBarrier", "ShenandoahStoreValEnqueueBarrier" }, +- new String[]{ "ShenandoahCASBarrier" }, +- new String[]{ "ShenandoahCloneBarrier" }, +- }; +- +- int size = 1; +- for (String[] l : opts) { +- size *= (l.length + 1); +- } +- +- ExecutorService pool = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors()); +- +- for (int c = 0; c < size; c++) { +- int t = c; +- +- List conf = new ArrayList<>(); +- conf.addAll(Arrays.asList(args)); +- conf.add("-Xmx128m"); +- conf.add("-XX:+UnlockDiagnosticVMOptions"); +- conf.add("-XX:+UnlockExperimentalVMOptions"); +- conf.add("-XX:+UseShenandoahGC"); +- conf.add("-XX:ShenandoahGCMode=passive"); +- +- StringBuilder sb = new StringBuilder(); +- for (String[] l : opts) { +- // Make a choice which flag to select from the group. +- // Zero means no flag is selected from the group. +- int choice = t % (l.length + 1); +- for (int e = 0; e < l.length; e++) { +- conf.add("-XX:" + ((choice == (e + 1)) ? "+" : "-") + l[e]); +- } +- t = t / (l.length + 1); +- } +- +- conf.add("TestSelectiveBarrierFlags$Test"); +- +- pool.submit(() -> { +- try { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(conf.toArray(new String[0])); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldHaveExitValue(0); +- } catch (Exception e) { +- e.printStackTrace(); +- System.exit(1); +- } +- }); +- } +- +- pool.shutdown(); +- pool.awaitTermination(1, TimeUnit.HOURS); +- } +- +- public static class Test { +- public static void main(String... args) { +- System.out.println("HelloWorld"); +- } +- } +- +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/options/TestSingleThreaded.java afu8u/hotspot/test/gc/shenandoah/options/TestSingleThreaded.java +--- openjdk/hotspot/test/gc/shenandoah/options/TestSingleThreaded.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/options/TestSingleThreaded.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,39 +0,0 @@ +-/* +- * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestSingleThreaded +- * @summary test single worker threaded Shenandoah +- * @key gc +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * -XX:ParallelGCThreads=1 -XX:ConcGCThreads=1 TestSingleThreaded +- */ +- +-public class TestSingleThreaded { +- +- public static void main(String[] args) { +- // Bug should crash before we get here. +- } +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/options/TestSoftMaxHeapSize.java afu8u/hotspot/test/gc/shenandoah/options/TestSoftMaxHeapSize.java +--- openjdk/hotspot/test/gc/shenandoah/options/TestSoftMaxHeapSize.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/options/TestSoftMaxHeapSize.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,72 +0,0 @@ +-/* +- * Copyright (c) 2020, Red Hat, Inc. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestSoftMaxHeapSize +- * @summary Test that Shenandoah checks SoftMaxHeapSize +- * @library /testlibrary +- * @modules java.base/jdk.internal.misc +- * java.management +- * @run driver TestSoftMaxHeapSize +- */ +- +-import com.oracle.java.testlibrary.*; +- +-public class TestSoftMaxHeapSize { +- public static void main(String[] args) throws Exception { +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-Xms4m", +- "-Xmx128m", +- "-XX:ShenandoahSoftMaxHeapSize=4m", +- "-version"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldHaveExitValue(0); +- } +- +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-Xms4m", +- "-Xmx128m", +- "-XX:ShenandoahSoftMaxHeapSize=128m", +- "-version"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldHaveExitValue(0); +- } +- +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-Xms4m", +- "-Xmx128m", +- "-XX:ShenandoahSoftMaxHeapSize=129m", +- "-version"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldHaveExitValue(1); +- output.shouldContain("SoftMaxHeapSize must be less than or equal to the maximum heap size"); +- } +- } +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/options/TestThreadCounts.java afu8u/hotspot/test/gc/shenandoah/options/TestThreadCounts.java +--- openjdk/hotspot/test/gc/shenandoah/options/TestThreadCounts.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/options/TestThreadCounts.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,69 +0,0 @@ +-/* +- * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestThreadCounts +- * @summary Test that Shenandoah GC thread counts are handled well +- * @key gc +- * @library /testlibrary +- * @run driver TestThreadCounts +- */ +- +-import com.oracle.java.testlibrary.*; +- +-public class TestThreadCounts { +- public static void main(String[] args) throws Exception { +- for (int conc = 0; conc < 16; conc++) { +- for (int par = 0; par < 16; par++) { +- testWith(conc, par); +- } +- } +- } +- +- private static void testWith(int conc, int par) throws Exception { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( +- "-Xmx128m", +- "-XX:+UnlockDiagnosticVMOptions", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-XX:ConcGCThreads=" + conc, +- "-XX:ParallelGCThreads=" + par, +- "-version"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- +- if (conc == 0) { +- output.shouldContain("Shenandoah expects ConcGCThreads > 0"); +- output.shouldHaveExitValue(1); +- } else if (par == 0) { +- output.shouldContain("Shenandoah expects ParallelGCThreads > 0"); +- output.shouldHaveExitValue(1); +- } else if (conc > par) { +- output.shouldContain("Shenandoah expects ConcGCThreads <= ParallelGCThreads"); +- output.shouldHaveExitValue(1); +- } else { +- output.shouldNotContain("Shenandoah expects ConcGCThreads <= ParallelGCThreads"); +- output.shouldHaveExitValue(0); +- } +- } +- +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/options/TestThreadCountsOverride.java afu8u/hotspot/test/gc/shenandoah/options/TestThreadCountsOverride.java +--- openjdk/hotspot/test/gc/shenandoah/options/TestThreadCountsOverride.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/options/TestThreadCountsOverride.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,67 +0,0 @@ +-/* +- * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestThreadCountsOverride +- * @summary Test that Shenandoah GC thread counts are overridable +- * @key gc +- * @library /testlibrary +- * @run driver TestThreadCountsOverride +- */ +- +-import com.oracle.java.testlibrary.*; +- +-public class TestThreadCountsOverride { +- public static void main(String[] args) throws Exception { +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( +- "-Xmx128m", +- "-XX:+UnlockDiagnosticVMOptions", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-XX:ParallelGCThreads=1", +- "-XX:+PrintFlagsFinal", +- "-version"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- +- output.shouldMatch("ParallelGCThreads(.*)= 1 "); +- output.shouldHaveExitValue(0); +- } +- +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( +- "-Xmx128m", +- "-XX:+UnlockDiagnosticVMOptions", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-XX:ConcGCThreads=1", +- "-XX:+PrintFlagsFinal", +- "-version"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- +- output.shouldMatch("ConcGCThreads(.*)= 1 "); +- output.shouldHaveExitValue(0); +- } +- } +- +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/options/TestVerboseGC.java afu8u/hotspot/test/gc/shenandoah/options/TestVerboseGC.java +--- openjdk/hotspot/test/gc/shenandoah/options/TestVerboseGC.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/options/TestVerboseGC.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,102 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestVerboseGC +- * @summary Test that Shenandoah reacts properly on -verbose:gc +- * @key gc +- * @library /testlibrary +- * +- * @run driver TestVerboseGC +- */ +- +-import com.oracle.java.testlibrary.*; +- +-public class TestVerboseGC { +- static volatile Object sink; +- +- public static void main(String[] args) throws Exception { +- if (args.length > 0) { +- for (int c = 0; c < 1_000; c++) { +- sink = new byte[1_000_000]; +- Thread.sleep(1); +- } +- return; +- } +- +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-Xmx128m", +- TestVerboseGC.class.getName(), +- "test"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldNotContain("Concurrent marking"); +- output.shouldNotContain("Collectable Garbage"); +- output.shouldNotContain("GC STATISTICS"); +- output.shouldHaveExitValue(0); +- } +- +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-Xmx128m", +- "-verbose:gc", +- TestVerboseGC.class.getName(), +- "test"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldContain("Concurrent marking"); +- output.shouldNotContain("Collectable Garbage"); +- output.shouldContain("GC STATISTICS"); +- output.shouldHaveExitValue(0); +- } +- +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-Xmx128m", +- "-XX:+PrintGC", +- TestVerboseGC.class.getName(), +- "test"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldContain("Concurrent marking"); +- output.shouldNotContain("Collectable Garbage"); +- output.shouldContain("GC STATISTICS"); +- output.shouldHaveExitValue(0); +- } +- +- { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-Xmx128m", +- "-XX:+PrintGCDetails", +- TestVerboseGC.class.getName(), +- "test"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldContain("Concurrent marking"); +- output.shouldContain("Collectable Garbage"); +- output.shouldContain("GC STATISTICS"); +- output.shouldHaveExitValue(0); +- } +- } +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/options/TestWrongBarrierDisable.java afu8u/hotspot/test/gc/shenandoah/options/TestWrongBarrierDisable.java +--- openjdk/hotspot/test/gc/shenandoah/options/TestWrongBarrierDisable.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/options/TestWrongBarrierDisable.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,94 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* @test TestWrongBarrierDisable +- * @summary Test that disabling wrong barriers fails early +- * @key gc +- * @library /testlibrary +- * @run main/othervm TestWrongBarrierDisable +- */ +- +-import java.util.*; +- +-import com.oracle.java.testlibrary.*; +- +-public class TestWrongBarrierDisable { +- +- public static void main(String[] args) throws Exception { +- String[] concurrent = { +- "ShenandoahLoadRefBarrier", +- "ShenandoahSATBBarrier", +- "ShenandoahCASBarrier", +- "ShenandoahCloneBarrier", +- }; +- String[] iu = { +- "ShenandoahLoadRefBarrier", +- "ShenandoahStoreValEnqueueBarrier", +- "ShenandoahCASBarrier", +- "ShenandoahCloneBarrier", +- }; +- +- shouldFailAll("-XX:ShenandoahGCHeuristics=adaptive", concurrent); +- shouldFailAll("-XX:ShenandoahGCHeuristics=static", concurrent); +- shouldFailAll("-XX:ShenandoahGCHeuristics=compact", concurrent); +- shouldFailAll("-XX:ShenandoahGCHeuristics=aggressive", concurrent); +- shouldFailAll("-XX:ShenandoahGCMode=iu", iu); +- shouldPassAll("-XX:ShenandoahGCMode=passive", concurrent); +- shouldPassAll("-XX:ShenandoahGCMode=passive", iu); +- } +- +- private static void shouldFailAll(String h, String[] barriers) throws Exception { +- for (String b : barriers) { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( +- "-Xmx128m", +- "-XX:+UnlockDiagnosticVMOptions", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- h, +- "-XX:-" + b, +- "-version" +- ); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldHaveExitValue(1); +- output.shouldContain("GC mode needs "); +- output.shouldContain("to work correctly"); +- } +- } +- +- private static void shouldPassAll(String h, String[] barriers) throws Exception { +- for (String b : barriers) { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( +- "-Xmx128m", +- "-XX:+UnlockDiagnosticVMOptions", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- h, +- "-XX:-" + b, +- "-version" +- ); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldHaveExitValue(0); +- } +- } +- +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/options/TestWrongBarrierEnable.java afu8u/hotspot/test/gc/shenandoah/options/TestWrongBarrierEnable.java +--- openjdk/hotspot/test/gc/shenandoah/options/TestWrongBarrierEnable.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/options/TestWrongBarrierEnable.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,89 +0,0 @@ +-/* +- * Copyright (c) 2020, Red Hat, Inc. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* @test TestWrongBarrierEnable +- * @summary Test that disabling wrong barriers fails early +- * @key gc +- * @library /testlibrary +- * @run main/othervm TestWrongBarrierEnable +- */ +- +-import java.util.*; +- +-import com.oracle.java.testlibrary.*; +- +-public class TestWrongBarrierEnable { +- +- public static void main(String[] args) throws Exception { +- String[] concurrent = { +- "ShenandoahStoreValEnqueueBarrier", +- }; +- String[] iu = { +- "ShenandoahSATBBarrier", +- }; +- +- shouldFailAll("-XX:ShenandoahGCHeuristics=adaptive", concurrent); +- shouldFailAll("-XX:ShenandoahGCHeuristics=static", concurrent); +- shouldFailAll("-XX:ShenandoahGCHeuristics=compact", concurrent); +- shouldFailAll("-XX:ShenandoahGCHeuristics=aggressive", concurrent); +- shouldFailAll("-XX:ShenandoahGCMode=iu", iu); +- shouldPassAll("-XX:ShenandoahGCMode=passive", concurrent); +- shouldPassAll("-XX:ShenandoahGCMode=passive", iu); +- } +- +- private static void shouldFailAll(String h, String[] barriers) throws Exception { +- for (String b : barriers) { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( +- "-Xmx128m", +- "-XX:+UnlockDiagnosticVMOptions", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- h, +- "-XX:+" + b, +- "-version" +- ); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldHaveExitValue(1); +- output.shouldContain("GC mode needs "); +- output.shouldContain("to work correctly"); +- } +- } +- +- private static void shouldPassAll(String h, String[] barriers) throws Exception { +- for (String b : barriers) { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( +- "-Xmx128m", +- "-XX:+UnlockDiagnosticVMOptions", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- h, +- "-XX:+" + b, +- "-version" +- ); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldHaveExitValue(0); +- } +- } +- +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/TestAllocHumongousFragment.java afu8u/hotspot/test/gc/shenandoah/TestAllocHumongousFragment.java +--- openjdk/hotspot/test/gc/shenandoah/TestAllocHumongousFragment.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/TestAllocHumongousFragment.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,189 +0,0 @@ +-/* +- * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestAllocHumongousFragment +- * @summary Make sure Shenandoah can recover from humongous allocation fragmentation +- * @key gc +- * +- * @run main/othervm -Xmx1g -Xms1g -verbose:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahTargetNumRegions=2048 +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:-ShenandoahDegeneratedGC -XX:+ShenandoahVerify +- * TestAllocHumongousFragment +- * +- * @run main/othervm -Xmx1g -Xms1g -verbose:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahTargetNumRegions=2048 +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:+ShenandoahDegeneratedGC -XX:+ShenandoahVerify +- * TestAllocHumongousFragment +- * +- * @run main/othervm -Xmx1g -Xms1g -verbose:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahTargetNumRegions=2048 +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:-ShenandoahDegeneratedGC +- * TestAllocHumongousFragment +- * +- * @run main/othervm -Xmx1g -Xms1g -verbose:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahTargetNumRegions=2048 +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:+ShenandoahDegeneratedGC +- * TestAllocHumongousFragment +- */ +- +-/* +- * @test TestAllocHumongousFragment +- * @summary Make sure Shenandoah can recover from humongous allocation fragmentation +- * @key gc +- * +- * @run main/othervm -Xmx1g -Xms1g -verbose:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahTargetNumRegions=2048 +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahOOMDuringEvacALot -XX:+ShenandoahVerify +- * TestAllocHumongousFragment +- * +- * @run main/othervm -Xmx1g -Xms1g -verbose:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahTargetNumRegions=2048 +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahAllocFailureALot -XX:+ShenandoahVerify +- * TestAllocHumongousFragment +- * +- * @run main/othervm -Xmx1g -Xms1g -verbose:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahTargetNumRegions=2048 +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahOOMDuringEvacALot +- * TestAllocHumongousFragment +- * +- * @run main/othervm -Xmx1g -Xms1g -verbose:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahTargetNumRegions=2048 +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahAllocFailureALot +- * TestAllocHumongousFragment +- * +- * @run main/othervm -Xmx1g -Xms1g -verbose:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahTargetNumRegions=2048 +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive +- * -XX:+ShenandoahVerify +- * TestAllocHumongousFragment +- */ +- +-/* +- * @test TestAllocHumongousFragment +- * @summary Make sure Shenandoah can recover from humongous allocation fragmentation +- * @key gc +- * +- * @run main/othervm -Xmx1g -Xms1g -verbose:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahTargetNumRegions=2048 +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive +- * TestAllocHumongousFragment +- */ +- +-/* +- * @test TestAllocHumongousFragment +- * @summary Make sure Shenandoah can recover from humongous allocation fragmentation +- * @key gc +- * +- * @run main/othervm -Xmx1g -Xms1g -verbose:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahTargetNumRegions=2048 +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=static +- * TestAllocHumongousFragment +- */ +- +-/* +- * @test TestAllocHumongousFragment +- * @summary Make sure Shenandoah can recover from humongous allocation fragmentation +- * @key gc +- * +- * @run main/othervm -Xmx1g -Xms1g -verbose:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:ShenandoahTargetNumRegions=2048 +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact +- * TestAllocHumongousFragment +- */ +- +-/* +- * @test TestAllocHumongousFragment +- * @summary Make sure Shenandoah can recover from humongous allocation fragmentation +- * @key gc +- * +- * @run main/othervm -verbose:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g -XX:ShenandoahTargetNumRegions=2048 +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahOOMDuringEvacALot -XX:+ShenandoahVerify +- * TestAllocHumongousFragment +- * +- * @run main/othervm -verbose:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g -XX:ShenandoahTargetNumRegions=2048 +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahAllocFailureALot -XX:+ShenandoahVerify +- * TestAllocHumongousFragment +- * +- * @run main/othervm -verbose:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g -XX:ShenandoahTargetNumRegions=2048 +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahOOMDuringEvacALot +- * TestAllocHumongousFragment +- * +- * @run main/othervm -verbose:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g -XX:ShenandoahTargetNumRegions=2048 +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahAllocFailureALot +- * TestAllocHumongousFragment +- * +- * @run main/othervm -verbose:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g -XX:ShenandoahTargetNumRegions=2048 +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu +- * -XX:+ShenandoahVerify +- * TestAllocHumongousFragment +- */ +- +-/* +- * @test TestAllocHumongousFragment +- * @summary Make sure Shenandoah can recover from humongous allocation fragmentation +- * @key gc +- * +- * @run main/othervm -verbose:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g -XX:ShenandoahTargetNumRegions=2048 +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu +- * TestAllocHumongousFragment +- */ +- +-import java.util.*; +-import java.util.concurrent.*; +- +-public class TestAllocHumongousFragment { +- +- static final long TARGET_MB = Long.getLong("target", 30_000); // 30 Gb allocations +- static final long LIVE_MB = Long.getLong("occupancy", 700); // 700 Mb alive +- +- static volatile Object sink; +- +- static List objects; +- +- public static void main(String[] args) throws Exception { +- final int min = 128 * 1024; +- final int max = 16 * 1024 * 1024; +- final long count = TARGET_MB * 1024 * 1024 / (16 + 4 * (min + (max - min) / 2)); +- +- objects = new ArrayList<>(); +- long current = 0; +- +- Random r = new Random(); +- for (long c = 0; c < count; c++) { +- while (current > LIVE_MB * 1024 * 1024) { +- int idx = ThreadLocalRandom.current().nextInt(objects.size()); +- int[] remove = objects.remove(idx); +- current -= remove.length * 4 + 16; +- } +- +- int[] newObj = new int[min + r.nextInt(max - min)]; +- current += newObj.length * 4 + 16; +- objects.add(newObj); +- sink = new Object(); +- +- System.out.println("Allocated: " + (current / 1024 / 1024) + " Mb"); +- } +- } +- +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/TestAllocIntArrays.java afu8u/hotspot/test/gc/shenandoah/TestAllocIntArrays.java +--- openjdk/hotspot/test/gc/shenandoah/TestAllocIntArrays.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/TestAllocIntArrays.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,190 +0,0 @@ +-/* +- * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestAllocIntArrays +- * @summary Acceptance tests: collector can withstand allocation +- * @key gc +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:+ShenandoahDegeneratedGC -XX:+ShenandoahVerify +- * TestAllocIntArrays +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:-ShenandoahDegeneratedGC -XX:+ShenandoahVerify +- * TestAllocIntArrays +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:+ShenandoahDegeneratedGC +- * TestAllocIntArrays +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:-ShenandoahDegeneratedGC +- * TestAllocIntArrays +- */ +- +-/* +- * @test TestAllocIntArrays +- * @summary Acceptance tests: collector can withstand allocation +- * @key gc +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahOOMDuringEvacALot -XX:+ShenandoahVerify +- * TestAllocIntArrays +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahAllocFailureALot -XX:+ShenandoahVerify +- * TestAllocIntArrays +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahOOMDuringEvacALot +- * TestAllocIntArrays +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahAllocFailureALot +- * TestAllocIntArrays +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * TestAllocIntArrays +- */ +- +-/* +- * @test TestAllocIntArrays +- * @summary Acceptance tests: collector can withstand allocation +- * @key gc +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive +- * -XX:+ShenandoahVerify +- * TestAllocIntArrays +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive +- * TestAllocIntArrays +- */ +- +-/* +- * @test TestAllocIntArrays +- * @summary Acceptance tests: collector can withstand allocation +- * @key gc +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=static +- * TestAllocIntArrays +- */ +- +-/* +- * @test TestAllocIntArrays +- * @summary Acceptance tests: collector can withstand allocation +- * @key gc +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact +- * TestAllocIntArrays +- */ +- +-/* +- * @test TestAllocIntArrays +- * @summary Acceptance tests: collector can withstand allocation +- * @key gc +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC +- * -XX:-UseTLAB -XX:+ShenandoahVerify +- * TestAllocIntArrays +- */ +- +-/* +- * @test TestAllocIntArrays +- * @summary Acceptance tests: collector can withstand allocation +- * @key gc +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahOOMDuringEvacALot -XX:+ShenandoahVerify +- * TestAllocIntArrays +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahAllocFailureALot -XX:+ShenandoahVerify +- * TestAllocIntArrays +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahOOMDuringEvacALot +- * TestAllocIntArrays +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahAllocFailureALot +- * TestAllocIntArrays +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive +- * TestAllocIntArrays +- */ +- +-/* +- * @test TestAllocIntArrays +- * @summary Acceptance tests: collector can withstand allocation +- * @key gc +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu +- * -XX:+ShenandoahVerify +- * TestAllocIntArrays +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu +- * TestAllocIntArrays +- */ +- +-import java.util.Random; +- +-public class TestAllocIntArrays { +- +- static final long TARGET_MB = Long.getLong("target", 10_000); // 10 Gb allocation +- +- static volatile Object sink; +- +- public static void main(String[] args) throws Exception { +- final int min = 0; +- final int max = 384 * 1024; +- long count = TARGET_MB * 1024 * 1024 / (16 + 4 * (min + (max - min) / 2)); +- +- Random r = new Random(); +- for (long c = 0; c < count; c++) { +- sink = new int[min + r.nextInt(max - min)]; +- } +- } +- +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/TestAllocObjectArrays.java afu8u/hotspot/test/gc/shenandoah/TestAllocObjectArrays.java +--- openjdk/hotspot/test/gc/shenandoah/TestAllocObjectArrays.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/TestAllocObjectArrays.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,189 +0,0 @@ +-/* +- * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestAllocObjectArrays +- * @summary Acceptance tests: collector can withstand allocation +- * @key gc +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:+ShenandoahDegeneratedGC -XX:+ShenandoahVerify +- * TestAllocObjectArrays +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:-ShenandoahDegeneratedGC -XX:+ShenandoahVerify +- * TestAllocObjectArrays +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:+ShenandoahDegeneratedGC +- * TestAllocObjectArrays +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:-ShenandoahDegeneratedGC +- * TestAllocObjectArrays +- +-/* +- * @test TestAllocObjectArrays +- * @summary Acceptance tests: collector can withstand allocation +- * @key gc +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahOOMDuringEvacALot -XX:+ShenandoahVerify +- * TestAllocObjectArrays +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahAllocFailureALot -XX:+ShenandoahVerify +- * TestAllocObjectArrays +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahOOMDuringEvacALot +- * TestAllocObjectArrays +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahAllocFailureALot +- * TestAllocObjectArrays +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * TestAllocObjectArrays +- */ +- +-/* +- * @test TestAllocObjectArrays +- * @summary Acceptance tests: collector can withstand allocation +- * @key gc +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive +- * -XX:+ShenandoahVerify +- * TestAllocObjectArrays +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive +- * TestAllocObjectArrays +- */ +- +-/* +- * @test TestAllocObjectArrays +- * @summary Acceptance tests: collector can withstand allocation +- * @key gc +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=static +- * TestAllocObjectArrays +- */ +- +-/* +- * @test TestAllocObjectArrays +- * @summary Acceptance tests: collector can withstand allocation +- * @key gc +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact +- * TestAllocObjectArrays +- */ +- +-/* +- * @test TestAllocObjectArrays +- * @summary Acceptance tests: collector can withstand allocation +- * @key gc +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC +- * -XX:-UseTLAB -XX:+ShenandoahVerify +- * TestAllocObjectArrays +- */ +- +-/* +- * @test TestAllocObjectArrays +- * @summary Acceptance tests: collector can withstand allocation +- * @key gc +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahOOMDuringEvacALot -XX:+ShenandoahVerify +- * TestAllocObjectArrays +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahAllocFailureALot -XX:+ShenandoahVerify +- * TestAllocObjectArrays +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahOOMDuringEvacALot +- * TestAllocObjectArrays +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahAllocFailureALot +- * TestAllocObjectArrays +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive +- * TestAllocObjectArrays +- */ +- +-/* +- * @test TestAllocObjectArrays +- * @summary Acceptance tests: collector can withstand allocation +- * @key gc +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu +- * -XX:+ShenandoahVerify +- * TestAllocObjectArrays +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -Xmx1g -Xms1g +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu +- * TestAllocObjectArrays +- */ +- +-import java.util.Random; +- +-public class TestAllocObjectArrays { +- +- static final long TARGET_MB = Long.getLong("target", 10_000); // 10 Gb allocation +- +- static volatile Object sink; +- +- public static void main(String[] args) throws Exception { +- final int min = 0; +- final int max = 384 * 1024; +- long count = TARGET_MB * 1024 * 1024 / (16 + 4 * (min + (max - min) / 2)); +- +- Random r = new Random(); +- for (long c = 0; c < count; c++) { +- sink = new Object[min + r.nextInt(max - min)]; +- } +- } +- +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/TestAllocObjects.java afu8u/hotspot/test/gc/shenandoah/TestAllocObjects.java +--- openjdk/hotspot/test/gc/shenandoah/TestAllocObjects.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/TestAllocObjects.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,176 +0,0 @@ +-/* +- * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestAllocObjects +- * @summary Acceptance tests: collector can withstand allocation +- * @key gc +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:+ShenandoahDegeneratedGC -XX:+ShenandoahVerify +- * TestAllocObjects +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:-ShenandoahDegeneratedGC -XX:+ShenandoahVerify +- * TestAllocObjects +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:+ShenandoahDegeneratedGC +- * TestAllocObjects +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:-ShenandoahDegeneratedGC +- * TestAllocObjects +- */ +- +-/* +- * @test TestAllocObjects +- * @summary Acceptance tests: collector can withstand allocation +- * @key gc +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahOOMDuringEvacALot -XX:+ShenandoahVerify +- * TestAllocObjects +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahAllocFailureALot -XX:+ShenandoahVerify +- * TestAllocObjects +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahOOMDuringEvacALot +- * TestAllocObjects +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahAllocFailureALot +- * TestAllocObjects +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * TestAllocObjects +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive +- * -XX:+ShenandoahVerify +- * TestAllocObjects +- * +- */ +- +-/* +- * @test TestAllocObjects +- * @summary Acceptance tests: collector can withstand allocation +- * @key gc +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive +- * TestAllocObjects +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=static +- * TestAllocObjects +- */ +- +-/* +- * @test TestAllocObjects +- * @summary Acceptance tests: collector can withstand allocation +- * @key gc +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact +- * TestAllocObjects +- */ +- +-/* +- * @test TestAllocObjects +- * @summary Acceptance tests: collector can withstand allocation +- * @key gc +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahOOMDuringEvacALot -XX:+ShenandoahVerify +- * TestAllocObjects +- */ +- +-/* +- * @test TestAllocObjects +- * @summary Acceptance tests: collector can withstand allocation +- * @key gc +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahAllocFailureALot -XX:+ShenandoahVerify +- * TestAllocObjects +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahOOMDuringEvacALot +- * TestAllocObjects +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahAllocFailureALot +- * TestAllocObjects +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive +- * TestAllocObjects +- */ +- +-/* +- * @test TestAllocObjects +- * @summary Acceptance tests: collector can withstand allocation +- * @key gc +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu +- * -XX:+ShenandoahVerify +- * TestAllocObjects +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu +- * TestAllocObjects +- */ +- +-import java.util.Random; +- +-public class TestAllocObjects { +- +- static final long TARGET_MB = Long.getLong("target", 10_000); // 10 Gb allocation +- +- static volatile Object sink; +- +- public static void main(String[] args) throws Exception { +- long count = TARGET_MB * 1024 * 1024 / 16; +- for (long c = 0; c < count; c++) { +- sink = new Object(); +- } +- } +- +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/TestArrayCopyCheckCast.java afu8u/hotspot/test/gc/shenandoah/TestArrayCopyCheckCast.java +--- openjdk/hotspot/test/gc/shenandoah/TestArrayCopyCheckCast.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/TestArrayCopyCheckCast.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,47 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestArrayCopyCheckCast +- * @key gc +- * +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:TieredStopAtLevel=0 -Xmx16m TestArrayCopyCheckCast +- */ +-public class TestArrayCopyCheckCast { +- +- static class Foo {} +- static class Bar {} +- +- public static void main(String[] args) throws Exception { +- try { +- Object[] array1 = new Object[1]; +- array1[0] = new Bar(); +- Foo[] array2 = new Foo[1]; +- System.arraycopy(array1, 0, array2, 0, 1); +- throw new RuntimeException(); +- } catch (ArrayStoreException ex) { +- // expected +- } +- } +- +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/TestArrayCopyStress.java afu8u/hotspot/test/gc/shenandoah/TestArrayCopyStress.java +--- openjdk/hotspot/test/gc/shenandoah/TestArrayCopyStress.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/TestArrayCopyStress.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,77 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-import java.util.concurrent.*; +- +-/* +- * @test TestArrayCopyStress +- * @key gc +- * +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:TieredStopAtLevel=0 -Xmx16m TestArrayCopyStress +- */ +-public class TestArrayCopyStress { +- +- private static final int ARRAY_SIZE = 1000; +- private static final int ITERATIONS = 10000; +- +- static class Foo { +- int num; +- +- Foo(int num) { +- this.num = num; +- } +- } +- +- static class Bar {} +- +- public static void main(String[] args) throws Exception { +- for (int i = 0; i < ITERATIONS; i++) { +- testConjoint(); +- } +- } +- +- private static void testConjoint() { +- Foo[] array = new Foo[ARRAY_SIZE]; +- for (int i = 0; i < ARRAY_SIZE; i++) { +- array[i] = new Foo(i); +- } +- +- int src_idx = ThreadLocalRandom.current().nextInt(0, ARRAY_SIZE); +- int dst_idx = ThreadLocalRandom.current().nextInt(0, ARRAY_SIZE); +- int len = ThreadLocalRandom.current().nextInt(0, Math.min(ARRAY_SIZE - src_idx, ARRAY_SIZE - dst_idx)); +- System.arraycopy(array, src_idx, array, dst_idx, len); +- +- for (int i = 0; i < ARRAY_SIZE; i++) { +- if (i >= dst_idx && i < dst_idx + len) { +- assertEquals(array[i].num, i - (dst_idx - src_idx)); +- } else { +- assertEquals(array[i].num, i); +- } +- } +- } +- +- private static void assertEquals(int a, int b) { +- if (a != b) throw new RuntimeException("assert failed"); +- } +- +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/TestDynamicSoftMaxHeapSize.java afu8u/hotspot/test/gc/shenandoah/TestDynamicSoftMaxHeapSize.java +--- openjdk/hotspot/test/gc/shenandoah/TestDynamicSoftMaxHeapSize.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/TestDynamicSoftMaxHeapSize.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,130 +0,0 @@ +-/* +- * Copyright (c) 2020, Red Hat, Inc. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestDynamicSoftMaxHeapSize +- * @library /testlibrary +- * +- * @run main/othervm -Xms16m -Xmx512m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:+ShenandoahDegeneratedGC +- * -Dtarget=10000 +- * TestDynamicSoftMaxHeapSize +- * +- * @run main/othervm -Xms16m -Xmx512m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:-ShenandoahDegeneratedGC +- * -Dtarget=10000 +- * TestDynamicSoftMaxHeapSize +- */ +- +-/* +- * @test TestDynamicSoftMaxHeapSize +- * @library /testlibrary +- * +- * @run main/othervm -Xms16m -Xmx512m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * -Dtarget=1000 +- * TestDynamicSoftMaxHeapSize +- */ +- +-/* +- * @test TestDynamicSoftMaxHeapSize +- * @library /testlibrary +- * +- * @run main/othervm -Xms16m -Xmx512m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive +- * -Dtarget=10000 +- * TestDynamicSoftMaxHeapSize +- */ +- +-/* +- * @test TestDynamicSoftMaxHeapSize +- * @library /testlibrary +- * +- * @run main/othervm -Xms16m -Xmx512m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=static +- * -Dtarget=10000 +- * TestDynamicSoftMaxHeapSize +- */ +- +-/* +- * @test TestDynamicSoftMaxHeapSize +- * @library /testlibrary +- * +- * @run main/othervm -Xms16m -Xmx512m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact +- * -Dtarget=1000 +- * TestDynamicSoftMaxHeapSize +- */ +- +-/* +- * @test TestDynamicSoftMaxHeapSize +- * @library /testlibrary +- * +- * @run main/othervm -Xms16m -Xmx512m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive +- * -Dtarget=1000 +- * TestDynamicSoftMaxHeapSize +- */ +- +-/* +- * @test TestDynamicSoftMaxHeapSize +- * @library /testlibrary +- * +- * @run main/othervm -Xms16m -Xmx512m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu +- * -Dtarget=10000 +- * TestDynamicSoftMaxHeapSize +- */ +- +-import java.util.Random; +-import com.oracle.java.testlibrary.*; +- +-public class TestDynamicSoftMaxHeapSize { +- +- static final long TARGET_MB = Long.getLong("target", 10_000); // 10 Gb allocation +- static final long STRIDE = 10_000_000; +- +- static volatile Object sink; +- +- public static void main(String[] args) throws Exception { +- long count = TARGET_MB * 1024 * 1024 / 16; +- Random r = new Random(); +- +- String pid = Integer.toString(ProcessTools.getProcessId()); +- ProcessBuilder pb = new ProcessBuilder(); +- +- for (long c = 0; c < count; c += STRIDE) { +- // Sizes specifically include heaps below Xms and above Xmx to test saturation code. +- pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.set_flag", "ShenandoahSoftMaxHeapSize", "" + r.nextInt(768*1024*1024)}); +- pb.start().waitFor(); +- for (long s = 0; s < STRIDE; s++) { +- sink = new Object(); +- } +- Thread.sleep(1); +- } +- } +- +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/TestElasticTLAB.java afu8u/hotspot/test/gc/shenandoah/TestElasticTLAB.java +--- openjdk/hotspot/test/gc/shenandoah/TestElasticTLAB.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/TestElasticTLAB.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,58 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestElasticTLAB +- * @summary Test that Shenandoah is able to work with elastic TLABs +- * @key gc +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:-UseTLAB -XX:-ShenandoahElasticTLAB -XX:+ShenandoahVerify TestElasticTLAB +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:-UseTLAB -XX:-ShenandoahElasticTLAB TestElasticTLAB +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:-UseTLAB -XX:+ShenandoahElasticTLAB -XX:+ShenandoahVerify TestElasticTLAB +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:-UseTLAB -XX:+ShenandoahElasticTLAB TestElasticTLAB +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:+UseTLAB -XX:-ShenandoahElasticTLAB -XX:+ShenandoahVerify TestElasticTLAB +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:+UseTLAB -XX:-ShenandoahElasticTLAB TestElasticTLAB +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:+UseTLAB -XX:+ShenandoahElasticTLAB -XX:+ShenandoahVerify TestElasticTLAB +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -XX:+UseTLAB -XX:+ShenandoahElasticTLAB TestElasticTLAB +- */ +- +-import java.util.Random; +- +-public class TestElasticTLAB { +- +- static final long TARGET_MB = Long.getLong("target", 10_000); // 10 Gb allocation +- +- static volatile Object sink; +- +- public static void main(String[] args) throws Exception { +- final int min = 0; +- final int max = 384 * 1024; +- long count = TARGET_MB * 1024 * 1024 / (16 + 4 * (min + (max - min) / 2)); +- +- Random r = new Random(); +- for (long c = 0; c < count; c++) { +- sink = new int[min + r.nextInt(max - min)]; +- } +- } +- +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/TestEvilSyncBug.java afu8u/hotspot/test/gc/shenandoah/TestEvilSyncBug.java +--- openjdk/hotspot/test/gc/shenandoah/TestEvilSyncBug.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/TestEvilSyncBug.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,164 +0,0 @@ +-/* +- * Copyright (c) 2016, 2020, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestEvilSyncBug +- * @summary Tests for crash/assert when attaching init thread during shutdown +- * @key gc +- * @library /testlibrary +- * +- * @run driver/timeout=480 TestEvilSyncBug +- */ +- +-import java.util.*; +-import java.util.concurrent.*; +-import java.util.concurrent.locks.*; +- +-import com.oracle.java.testlibrary.*; +- +-public class TestEvilSyncBug { +- +- private static final int NUM_RUNS = 100; +- +- static Thread[] hooks = new MyHook[10000]; +- +- public static void main(String[] args) throws Exception { +- if (args.length > 0) { +- test(); +- } else { +- // Use 1/4 of available processors to avoid over-saturation. +- int numJobs = Math.max(1, Runtime.getRuntime().availableProcessors() / 4); +- ExecutorService pool = Executors.newFixedThreadPool(numJobs); +- Future[] fs = new Future[NUM_RUNS]; +- +- for (int c = 0; c < NUM_RUNS; c++) { +- Callable task = () -> { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-Xms128m", +- "-Xmx128m", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UnlockDiagnosticVMOptions", +- "-XX:+UseShenandoahGC", +- "-XX:ShenandoahGCHeuristics=aggressive", +- "TestEvilSyncBug", "test"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldHaveExitValue(0); +- return null; +- }; +- fs[c] = pool.submit(task); +- } +- +- for (Future f : fs) { +- f.get(); +- } +- +- pool.shutdown(); +- pool.awaitTermination(1, TimeUnit.HOURS); +- } +- } +- +- private static void test() throws Exception { +- +- for (int t = 0; t < hooks.length; t++) { +- hooks[t] = new MyHook(); +- } +- +- ExecutorService service = Executors.newFixedThreadPool( +- 2, +- r -> { +- Thread t = new Thread(r); +- t.setDaemon(true); +- return t; +- } +- ); +- +- List> futures = new ArrayList<>(); +- for (int c = 0; c < 100; c++) { +- Runtime.getRuntime().addShutdownHook(hooks[c]); +- final Test[] tests = new Test[1000]; +- for (int t = 0; t < tests.length; t++) { +- tests[t] = new Test(); +- } +- +- Future f1 = service.submit(() -> { +- Runtime.getRuntime().addShutdownHook(new MyHook()); +- IntResult2 r = new IntResult2(); +- for (Test test : tests) { +- test.RL_Us(r); +- } +- }); +- Future f2 = service.submit(() -> { +- Runtime.getRuntime().addShutdownHook(new MyHook()); +- for (Test test : tests) { +- test.WLI_Us(); +- } +- }); +- +- futures.add(f1); +- futures.add(f2); +- } +- +- for (Future f : futures) { +- f.get(); +- } +- } +- +- public static class IntResult2 { +- int r1, r2; +- } +- +- public static class Test { +- final StampedLock lock = new StampedLock(); +- +- int x, y; +- +- public void RL_Us(IntResult2 r) { +- StampedLock lock = this.lock; +- long stamp = lock.readLock(); +- r.r1 = x; +- r.r2 = y; +- lock.unlock(stamp); +- } +- +- public void WLI_Us() { +- try { +- StampedLock lock = this.lock; +- long stamp = lock.writeLockInterruptibly(); +- x = 1; +- y = 2; +- lock.unlock(stamp); +- } catch (InterruptedException e) { +- throw new RuntimeException(e); +- } +- } +- } +- +- private static class MyHook extends Thread { +- @Override +- public void run() { +- try { +- Thread.sleep(10); +- } catch (Exception e) {} +- } +- } +- +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/TestGCThreadGroups.java afu8u/hotspot/test/gc/shenandoah/TestGCThreadGroups.java +--- openjdk/hotspot/test/gc/shenandoah/TestGCThreadGroups.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/TestGCThreadGroups.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,119 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/** +- * @test TestGCThreadGroups +- * @summary Test Shenandoah GC uses concurrent/parallel threads correctly +- * @key gc +- * +- * @run main/othervm -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:ConcGCThreads=2 -XX:ParallelGCThreads=4 +- * -Dtarget=1000 +- * TestGCThreadGroups +- */ +- +-/** +- * @test TestGCThreadGroups +- * @summary Test Shenandoah GC uses concurrent/parallel threads correctly +- * @key gc +- * +- * @run main/othervm -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC +- * -XX:ConcGCThreads=2 -XX:ParallelGCThreads=4 +- * -Dtarget=1000 +- * TestGCThreadGroups +- * +- * @run main/othervm -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC +- * -XX:-UseDynamicNumberOfGCThreads +- * -Dtarget=1000 +- * TestGCThreadGroups +- * +- * @run main/othervm -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC +- * -XX:+ForceDynamicNumberOfGCThreads +- * -Dtarget=1000 +- * TestGCThreadGroups +- * +- * @run main/othervm -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive +- * -XX:ConcGCThreads=2 -XX:ParallelGCThreads=4 +- * -Dtarget=1000 +- * TestGCThreadGroups +- * +- * @run main/othervm -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=static +- * -XX:ConcGCThreads=2 -XX:ParallelGCThreads=4 +- * -Dtarget=1000 +- * TestGCThreadGroups +- * +- * @run main/othervm -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact +- * -XX:ConcGCThreads=2 -XX:ParallelGCThreads=4 +- * -Dtarget=100 +- * TestGCThreadGroups +- * +- * @run main/othervm -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * -XX:ConcGCThreads=2 -XX:ParallelGCThreads=4 +- * -Dtarget=100 +- * TestGCThreadGroups +- */ +- +-/** +- * @test TestGCThreadGroups +- * @summary Test Shenandoah GC uses concurrent/parallel threads correctly +- * @key gc +- * +- * @run main/othervm -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu +- * -XX:ConcGCThreads=2 -XX:ParallelGCThreads=4 +- * -Dtarget=1000 +- * TestGCThreadGroups +- * +- * @run main/othervm -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive +- * -XX:ConcGCThreads=2 -XX:ParallelGCThreads=4 +- * -Dtarget=1000 +- * TestGCThreadGroups +-*/ +- +-public class TestGCThreadGroups { +- +- static final long TARGET_MB = Long.getLong("target", 10_000); // 10 Gb allocation, around 1K cycles to handle +- static final long STRIDE = 100_000; +- +- static volatile Object sink; +- +- public static void main(String[] args) throws Exception { +- long count = TARGET_MB * 1024 * 1024 / 16; +- for (long c = 0; c < count; c += STRIDE) { +- for (long s = 0; s < STRIDE; s++) { +- sink = new Object(); +- } +- Thread.sleep(1); +- } +- } +- +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/TestHeapUncommit.java afu8u/hotspot/test/gc/shenandoah/TestHeapUncommit.java +--- openjdk/hotspot/test/gc/shenandoah/TestHeapUncommit.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/TestHeapUncommit.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,140 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestHeapUncommit +- * @summary Acceptance tests: collector can withstand allocation +- * @key gc +- * +- * @run main/othervm -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:+ShenandoahDegeneratedGC -XX:+ShenandoahVerify +- * TestHeapUncommit +- * +- * @run main/othervm -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:-ShenandoahDegeneratedGC -XX:+ShenandoahVerify +- * TestHeapUncommit +- * +- * @run main/othervm -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:+ShenandoahDegeneratedGC +- * TestHeapUncommit +- * +- * @run main/othervm -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:-ShenandoahDegeneratedGC +- * TestHeapUncommit +- */ +- +-/* +- * @test TestHeapUncommit +- * @summary Acceptance tests: collector can withstand allocation +- * @key gc +- * +- * @run main/othervm -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive +- * -XX:+ShenandoahVerify +- * TestHeapUncommit +- * +- * @run main/othervm -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive +- * TestHeapUncommit +- * +- * @run main/othervm -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=static +- * TestHeapUncommit +- * +- * @run main/othervm -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact +- * TestHeapUncommit +- * +- * @run main/othervm -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * TestHeapUncommit +- * +- * @run main/othervm -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 +- * -XX:+UseShenandoahGC +- * -XX:-UseTLAB -XX:+ShenandoahVerify +- * TestHeapUncommit +- */ +- +-/* +- * @test TestHeapUncommit +- * @summary Acceptance tests: collector can withstand allocation +- * @key gc +- * +- * @run main/othervm -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu +- * -XX:+ShenandoahVerify +- * TestHeapUncommit +- * +- * @run main/othervm -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu +- * TestHeapUncommit +- * +- * @run main/othervm -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive +- * TestHeapUncommit +- */ +- +-/* +- * @test TestHeapUncommit +- * @key gc +- * @requires (vm.bits == "64") +- * +- * @run main/othervm -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 -XX:+UseLargePages +- * -XX:+UseShenandoahGC +- * -XX:+ShenandoahVerify +- * TestHeapUncommit +- * +- * @run main/othervm -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 -XX:+UseLargePages +- * -XX:+UseShenandoahGC +- * TestHeapUncommit +- * +- * @run main/othervm -Xmx1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahUncommit -XX:ShenandoahUncommitDelay=0 -XX:+UseLargePages +- * -XX:+UseShenandoahGC +- * -XX:-UseTLAB -XX:+ShenandoahVerify +- * TestHeapUncommit +- */ +- +-import java.util.Random; +- +-public class TestHeapUncommit { +- +- static final long TARGET_MB = Long.getLong("target", 10_000); // 10 Gb allocation +- +- static volatile Object sink; +- +- public static void main(String[] args) throws Exception { +- final int min = 0; +- final int max = 384 * 1024; +- long count = TARGET_MB * 1024 * 1024 / (16 + 4 * (min + (max - min) / 2)); +- +- Random r = new Random(); +- for (long c = 0; c < count; c++) { +- sink = new int[min + r.nextInt(max - min)]; +- } +- } +- +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/TestHumongousThreshold.java afu8u/hotspot/test/gc/shenandoah/TestHumongousThreshold.java +--- openjdk/hotspot/test/gc/shenandoah/TestHumongousThreshold.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/TestHumongousThreshold.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,125 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestHumongousThreshold +- * @key gc +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g +- * -XX:+ShenandoahVerify +- * TestHumongousThreshold +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g +- * -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=50 +- * TestHumongousThreshold +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g +- * -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=90 +- * TestHumongousThreshold +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g +- * -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=99 +- * TestHumongousThreshold +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g +- * -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=100 +- * TestHumongousThreshold +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g +- * -XX:-UseTLAB -XX:+ShenandoahVerify +- * TestHumongousThreshold +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g +- * -XX:-UseTLAB -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=50 +- * TestHumongousThreshold +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g +- * -XX:-UseTLAB -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=90 +- * TestHumongousThreshold +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g +- * -XX:-UseTLAB -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=99 +- * TestHumongousThreshold +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g +- * -XX:-UseTLAB -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=100 +- * TestHumongousThreshold +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g +- * -XX:ShenandoahHumongousThreshold=90 -XX:ShenandoahGCHeuristics=aggressive +- * TestHumongousThreshold +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g +- * -XX:-UseTLAB -XX:ShenandoahHumongousThreshold=90 -XX:ShenandoahGCHeuristics=aggressive +- * TestHumongousThreshold +- */ +- +-/* +- * @test TestHumongousThreshold +- * @key gc +- * @requires (vm.bits == "64") +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g +- * -XX:ObjectAlignmentInBytes=16 -XX:+ShenandoahVerify +- * TestHumongousThreshold +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g +- * -XX:ObjectAlignmentInBytes=16 -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=50 +- * TestHumongousThreshold +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g +- * -XX:ObjectAlignmentInBytes=16 -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=90 +- * TestHumongousThreshold +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g +- * -XX:ObjectAlignmentInBytes=16 -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=99 +- * TestHumongousThreshold +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g +- * -XX:ObjectAlignmentInBytes=16 -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=100 +- * TestHumongousThreshold +- * +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g +- * -XX:-UseTLAB -XX:ObjectAlignmentInBytes=16 -XX:+ShenandoahVerify +- * TestHumongousThreshold +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g +- * -XX:-UseTLAB -XX:ObjectAlignmentInBytes=16 -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=50 +- * TestHumongousThreshold +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g +- * -XX:-UseTLAB -XX:ObjectAlignmentInBytes=16 -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=90 +- * TestHumongousThreshold +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g +- * -XX:-UseTLAB -XX:ObjectAlignmentInBytes=16 -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=99 +- * TestHumongousThreshold +- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g +- * -XX:-UseTLAB -XX:ObjectAlignmentInBytes=16 -XX:+ShenandoahVerify -XX:ShenandoahHumongousThreshold=100 +- * TestHumongousThreshold +- */ +- +-import java.util.Random; +- +-public class TestHumongousThreshold { +- +- static final long TARGET_MB = Long.getLong("target", 20_000); // 20 Gb allocation +- +- static volatile Object sink; +- +- public static void main(String[] args) throws Exception { +- final int min = 0; +- final int max = 384 * 1024; +- long count = TARGET_MB * 1024 * 1024 / (16 + 4 * (min + (max - min) / 2)); +- +- Random r = new Random(); +- for (long c = 0; c < count; c++) { +- sink = new int[min + r.nextInt(max - min)]; +- } +- } +- +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/TestLargeObjectAlignment.java afu8u/hotspot/test/gc/shenandoah/TestLargeObjectAlignment.java +--- openjdk/hotspot/test/gc/shenandoah/TestLargeObjectAlignment.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/TestLargeObjectAlignment.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,65 +0,0 @@ +-/* +- * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestLargeObjectAlignment +- * @summary Shenandoah crashes with -XX:ObjectAlignmentInBytes=16 +- * @key gc +- * @requires (vm.bits == "64") +- * +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ObjectAlignmentInBytes=16 -Xint TestLargeObjectAlignment +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ObjectAlignmentInBytes=16 -XX:-TieredCompilation TestLargeObjectAlignment +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ObjectAlignmentInBytes=16 -XX:TieredStopAtLevel=1 TestLargeObjectAlignment +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ObjectAlignmentInBytes=16 -XX:TieredStopAtLevel=4 TestLargeObjectAlignment +- */ +- +-import java.util.ArrayList; +-import java.util.List; +-import java.util.concurrent.ThreadLocalRandom; +- +-public class TestLargeObjectAlignment { +- +- static final int SLABS_COUNT = Integer.getInteger("slabs", 10000); +- static final int NODE_COUNT = Integer.getInteger("nodes", 10000); +- static final long TIME_NS = 1000L * 1000L * Integer.getInteger("timeMs", 5000); +- +- static Object[] objects; +- +- public static void main(String[] args) throws Exception { +- objects = new Object[SLABS_COUNT]; +- +- long start = System.nanoTime(); +- while (System.nanoTime() - start < TIME_NS) { +- objects[ThreadLocalRandom.current().nextInt(SLABS_COUNT)] = createSome(); +- } +- } +- +- public static Object createSome() { +- List result = new ArrayList(); +- for (int c = 0; c < NODE_COUNT; c++) { +- result.add(new Integer(c)); +- } +- return result; +- } +- +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/TestLotsOfCycles.java afu8u/hotspot/test/gc/shenandoah/TestLotsOfCycles.java +--- openjdk/hotspot/test/gc/shenandoah/TestLotsOfCycles.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/TestLotsOfCycles.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,142 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestLotsOfCycles +- * @key gc +- * +- * @run main/othervm/timeout=480 -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:+ShenandoahDegeneratedGC +- * -Dtarget=10000 +- * TestLotsOfCycles +- * +- * @run main/othervm/timeout=480 -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:-ShenandoahDegeneratedGC +- * -Dtarget=10000 +- * TestLotsOfCycles +- */ +- +-/* +- * @test TestLotsOfCycles +- * @key gc +- * +- * @run main/othervm/timeout=480 -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahOOMDuringEvacALot +- * -Dtarget=1000 +- * TestLotsOfCycles +- * +- * @run main/othervm/timeout=480 -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahAllocFailureALot +- * -Dtarget=1000 +- * TestLotsOfCycles +- * +- * @run main/othervm/timeout=480 -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * -Dtarget=1000 +- * TestLotsOfCycles +- */ +- +-/* +- * @test TestLotsOfCycles +- * @key gc +- * +- * @run main/othervm/timeout=480 -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive +- * -Dtarget=10000 +- * TestLotsOfCycles +- */ +- +-/* +- * @test TestLotsOfCycles +- * @key gc +- * +- * @run main/othervm/timeout=480 -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=static +- * -Dtarget=10000 +- * TestLotsOfCycles +- */ +- +-/* +- * @test TestLotsOfCycles +- * @key gc +- * +- * @run main/othervm/timeout=480 -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact +- * -Dtarget=1000 +- * TestLotsOfCycles +- */ +- +-/* +- * @test TestLotsOfCycles +- * @key gc +- * +- * @run main/othervm/timeout=480 -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahOOMDuringEvacALot +- * -Dtarget=1000 +- * TestLotsOfCycles +- * +- * @run main/othervm/timeout=480 -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahAllocFailureALot +- * -Dtarget=1000 +- * TestLotsOfCycles +- * +- * @run main/othervm/timeout=480 -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive +- * -Dtarget=1000 +- * TestLotsOfCycles +- */ +- +-/* +- * @test TestLotsOfCycles +- * @key gc +- * +- * @run main/othervm/timeout=480 -Xmx16m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu +- * -Dtarget=10000 +- * TestLotsOfCycles +- */ +- +-public class TestLotsOfCycles { +- +- static final long TARGET_MB = Long.getLong("target", 10_000); // 10 Gb allocation, around 1K cycles to handle +- static final long STRIDE = 100_000; +- +- static volatile Object sink; +- +- public static void main(String[] args) throws Exception { +- long count = TARGET_MB * 1024 * 1024 / 16; +- for (long c = 0; c < count; c += STRIDE) { +- for (long s = 0; s < STRIDE; s++) { +- sink = new Object(); +- } +- Thread.sleep(1); +- } +- } +- +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/TestObjItrWithHeapDump.java afu8u/hotspot/test/gc/shenandoah/TestObjItrWithHeapDump.java +--- openjdk/hotspot/test/gc/shenandoah/TestObjItrWithHeapDump.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/TestObjItrWithHeapDump.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,80 +0,0 @@ +-/* +- * Copyright (c) 2019, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestObjIterWithHeapDump +- * @summary Test heap dump triggered heap object iteration +- * @key gc +- * @bug 8225014 +- * @library /testlibrary +- * @run main/othervm TestObjItrWithHeapDump +- */ +- +-import java.util.*; +-import com.oracle.java.testlibrary.*; +- +-public class TestObjItrWithHeapDump { +- public static void testWith(String... args) throws Exception { +- String[] cmds = Arrays.copyOf(args, args.length + 2); +- cmds[args.length] = TestObjItrWithHeapDump.class.getName(); +- cmds[args.length + 1] = "test"; +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(cmds); +- +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldHaveExitValue(0); +- output.shouldContain("Class Histogram (before full gc)"); +- output.shouldContain("Class Histogram (after full gc)"); +- } +- +- public static void main(String[] args) throws Exception { +- if (args.length > 0 && args[0].equals("test")) { +- System.gc(); +- System.exit(0); +- } +- +- String[][][] modeHeuristics = new String[][][]{ +- {{"satb"}, {"adaptive", "compact", "static", "aggressive"}}, +- {{"iu"}, {"adaptive", "aggressive"}}, +- {{"passive"}, {"passive"}} +- }; +- +- for (String[][] mh : modeHeuristics) { +- String mode = mh[0][0]; +- String[] heuristics = mh[1]; +- for (String h : heuristics) { +- testWith("-XX:+UnlockDiagnosticVMOptions", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-XX:-ShenandoahDegeneratedGC", +- "-XX:ShenandoahGCMode=" + mode, +- "-XX:ShenandoahGCHeuristics=" + h, +- "-XX:+PrintClassHistogramBeforeFullGC", +- "-XX:+PrintClassHistogramAfterFullGC", +- "-XX:+PrintGCDetails", +- "-XX:-ExplicitGCInvokesConcurrent", +- "-Xmx512M" +- ); +- } +- } +- } +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/TestParallelRefprocSanity.java afu8u/hotspot/test/gc/shenandoah/TestParallelRefprocSanity.java +--- openjdk/hotspot/test/gc/shenandoah/TestParallelRefprocSanity.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/TestParallelRefprocSanity.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,49 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestParallelRefprocSanity +- * @summary Test that reference processing works with both parallel and non-parallel variants. +- * @key gc +- * +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g TestParallelRefprocSanity +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:-ParallelRefProcEnabled TestParallelRefprocSanity +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx1g -Xms1g -XX:+ParallelRefProcEnabled TestParallelRefprocSanity +- */ +- +-import java.lang.ref.*; +- +-public class TestParallelRefprocSanity { +- +- static final long TARGET_MB = Long.getLong("target", 10_000); // 10 Gb allocation +- +- static volatile Object sink; +- +- public static void main(String[] args) throws Exception { +- long count = TARGET_MB * 1024 * 1024 / 32; +- for (long c = 0; c < count; c++) { +- sink = new WeakReference(new Object()); +- } +- } +- +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/TestPeriodicGC.java afu8u/hotspot/test/gc/shenandoah/TestPeriodicGC.java +--- openjdk/hotspot/test/gc/shenandoah/TestPeriodicGC.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/TestPeriodicGC.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,160 +0,0 @@ +-/* +- * Copyright (c) 2017, 2020, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestPeriodicGC +- * @summary Test that periodic GC is working +- * @key gc +- * @library /testlibrary +- * +- * @run driver TestPeriodicGC +- */ +- +-import java.util.*; +- +-import com.oracle.java.testlibrary.*; +- +-public class TestPeriodicGC { +- +- public static void testWith(String msg, boolean periodic, String... args) throws Exception { +- String[] cmds = Arrays.copyOf(args, args.length + 2); +- cmds[args.length] = TestPeriodicGC.class.getName(); +- cmds[args.length + 1] = "test"; +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(cmds); +- +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldHaveExitValue(0); +- if (periodic && !output.getOutput().contains("Trigger: Time since last GC")) { +- throw new AssertionError(msg + ": Should have periodic GC in logs"); +- } +- if (!periodic && output.getOutput().contains("Trigger: Time since last GC")) { +- throw new AssertionError(msg + ": Should not have periodic GC in logs"); +- } +- } +- +- public static void main(String[] args) throws Exception { +- if (args.length > 0 && args[0].equals("test")) { +- Thread.sleep(5000); // stay idle +- return; +- } +- +- String[] enabled = new String[] { +- "adaptive", +- "compact", +- "static" +- }; +- +- for (String h : enabled) { +- testWith("Zero interval with " + h, +- false, +- "-verbose:gc", +- "-XX:+UnlockDiagnosticVMOptions", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-XX:ShenandoahGCHeuristics=" + h, +- "-XX:ShenandoahGuaranteedGCInterval=0" +- ); +- +- testWith("Short interval with " + h, +- true, +- "-verbose:gc", +- "-XX:+UnlockDiagnosticVMOptions", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-XX:ShenandoahGCHeuristics=" + h, +- "-XX:ShenandoahGuaranteedGCInterval=1000" +- ); +- +- testWith("Long interval with " + h, +- false, +- "-verbose:gc", +- "-XX:+UnlockDiagnosticVMOptions", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-XX:ShenandoahGCHeuristics=" + h, +- "-XX:ShenandoahGuaranteedGCInterval=100000" // deliberately too long +- ); +- } +- +- testWith("Zero interval with iu mode", +- false, +- "-verbose:gc", +- "-XX:+UnlockDiagnosticVMOptions", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-XX:ShenandoahGCMode=iu", +- "-XX:ShenandoahGuaranteedGCInterval=0" +- ); +- +- testWith("Short interval with iu mode", +- true, +- "-verbose:gc", +- "-XX:+UnlockDiagnosticVMOptions", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-XX:ShenandoahGCMode=iu", +- "-XX:ShenandoahGuaranteedGCInterval=1000" +- ); +- +- testWith("Long interval with iu mode", +- false, +- "-verbose:gc", +- "-XX:+UnlockDiagnosticVMOptions", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-XX:ShenandoahGCMode=iu", +- "-XX:ShenandoahGuaranteedGCInterval=100000" // deliberately too long +- ); +- +- testWith("Short interval with aggressive", +- false, +- "-verbose:gc", +- "-XX:+UnlockDiagnosticVMOptions", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-XX:ShenandoahGCHeuristics=aggressive", +- "-XX:ShenandoahGuaranteedGCInterval=1000" +- ); +- +- testWith("Zero interval with passive", +- false, +- "-verbose:gc", +- "-XX:+UnlockDiagnosticVMOptions", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-XX:ShenandoahGCMode=passive", +- "-XX:ShenandoahGuaranteedGCInterval=0" +- ); +- +- testWith("Short interval with passive", +- false, +- "-verbose:gc", +- "-XX:+UnlockDiagnosticVMOptions", +- "-XX:+UnlockExperimentalVMOptions", +- "-XX:+UseShenandoahGC", +- "-XX:ShenandoahGCMode=passive", +- "-XX:ShenandoahGuaranteedGCInterval=1000" +- ); +- } +- +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/TestRefprocSanity.java afu8u/hotspot/test/gc/shenandoah/TestRefprocSanity.java +--- openjdk/hotspot/test/gc/shenandoah/TestRefprocSanity.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/TestRefprocSanity.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,125 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestRefprocSanity +- * @summary Test that null references/referents work fine +- * @key gc +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC +- * -XX:+ShenandoahVerify +- * TestRefprocSanity +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC +- * TestRefprocSanity +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * TestRefprocSanity +- */ +- +-/* +- * @test TestRefprocSanity +- * @summary Test that null references/referents work fine +- * @key gc +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu +- * -XX:+ShenandoahVerify +- * TestRefprocSanity +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu +- * TestRefprocSanity +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive +- * TestRefprocSanity +- */ +- +-import java.lang.ref.*; +- +-public class TestRefprocSanity { +- +- static final long TARGET_MB = Long.getLong("target", 10_000); // 10 Gb allocation +- static final int WINDOW = 10_000; +- +- static final Reference[] refs = new Reference[WINDOW]; +- +- public static void main(String[] args) throws Exception { +- long count = TARGET_MB * 1024 * 1024 / 32; +- int rIdx = 0; +- +- ReferenceQueue rq = new ReferenceQueue(); +- +- for (int c = 0; c < WINDOW; c++) { +- refs[c] = select(c, new MyObject(c), rq); +- } +- +- for (int c = 0; c < count; c++) { +- verifyRefAt(rIdx); +- refs[rIdx] = select(c, new MyObject(rIdx), rq); +- +- rIdx++; +- if (rIdx >= WINDOW) { +- rIdx = 0; +- } +- while (rq.poll() != null); // drain +- } +- } +- +- static Reference select(int v, MyObject ext, ReferenceQueue rq) { +- switch (v % 10) { +- case 0: return new SoftReference(null); +- case 1: return new SoftReference(null, rq); +- case 2: return new SoftReference(ext); +- case 3: return new SoftReference(ext, rq); +- case 4: return new WeakReference(null); +- case 5: return new WeakReference(null, rq); +- case 6: return new WeakReference(ext); +- case 7: return new WeakReference(ext, rq); +- case 8: return new PhantomReference(null, rq); +- case 9: return new PhantomReference(ext, rq); +- default: throw new IllegalStateException(); +- } +- } +- +- static void verifyRefAt(int idx) { +- Reference ref = refs[idx]; +- MyObject mo = ref.get(); +- if (mo != null && mo.x != idx) { +- throw new IllegalStateException("Referent tag is incorrect: " + mo.x + ", should be " + idx); +- } +- } +- +- static class MyObject { +- final int x; +- +- public MyObject(int x) { +- this.x = x; +- } +- } +- +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/TestRegionSampling.java afu8u/hotspot/test/gc/shenandoah/TestRegionSampling.java +--- openjdk/hotspot/test/gc/shenandoah/TestRegionSampling.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/TestRegionSampling.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,107 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestRegionSampling +- * @key gc +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahRegionSampling +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:+ShenandoahDegeneratedGC +- * TestRegionSampling +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahRegionSampling +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:-ShenandoahDegeneratedGC +- * TestRegionSampling +- */ +- +-/* +- * @test TestRegionSampling +- * @key gc +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahRegionSampling +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive +- * TestRegionSampling +- */ +- +-/* +- * @test TestRegionSampling +- * @key gc +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahRegionSampling +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=static +- * TestRegionSampling +- */ +- +-/* +- * @test TestRegionSampling +- * @key gc +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahRegionSampling +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact +- * TestRegionSampling +- */ +- +-/* +- * @test TestRegionSampling +- * @key gc +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahRegionSampling +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * TestRegionSampling +- */ +- +-/* +- * @test TestRegionSampling +- * @key gc +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahRegionSampling +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive +- * TestRegionSampling +- */ +- +-/* +- * @test TestRegionSampling +- * @key gc +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ShenandoahRegionSampling +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu +- * TestRegionSampling +- * +- */ +- +-public class TestRegionSampling { +- +- static final long TARGET_MB = Long.getLong("target", 2_000); // 2 Gb allocation +- +- static volatile Object sink; +- +- public static void main(String[] args) throws Exception { +- long count = TARGET_MB * 1024 * 1024 / 16; +- for (long c = 0; c < count; c++) { +- sink = new Object(); +- } +- } +- +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/TestRetainObjects.java afu8u/hotspot/test/gc/shenandoah/TestRetainObjects.java +--- openjdk/hotspot/test/gc/shenandoah/TestRetainObjects.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/TestRetainObjects.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,169 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestRetainObjects +- * @summary Acceptance tests: collector can deal with retained objects +- * @key gc +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:+ShenandoahDegeneratedGC -XX:+ShenandoahVerify +- * TestRetainObjects +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:-ShenandoahDegeneratedGC -XX:+ShenandoahVerify +- * TestRetainObjects +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:+ShenandoahDegeneratedGC +- * TestRetainObjects +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:-ShenandoahDegeneratedGC +- * TestRetainObjects +- */ +- +-/* +- * @test TestRetainObjects +- * @summary Acceptance tests: collector can deal with retained objects +- * @key gc +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahOOMDuringEvacALot +- * TestRetainObjects +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahAllocFailureALot +- * TestRetainObjects +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * TestRetainObjects +- */ +- +-/* +- * @test TestRetainObjects +- * @summary Acceptance tests: collector can deal with retained objects +- * @key gc +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive +- * -XX:+ShenandoahVerify +- * TestRetainObjects +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive +- * TestRetainObjects +- */ +- +-/* +- * @test TestRetainObjects +- * @summary Acceptance tests: collector can deal with retained objects +- * @key gc +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=static +- * TestRetainObjects +- */ +- +-/* +- * @test TestRetainObjects +- * @summary Acceptance tests: collector can deal with retained objects +- * @key gc +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact +- * TestRetainObjects +- */ +- +-/* +- * @test TestRetainObjects +- * @summary Acceptance tests: collector can deal with retained objects +- * @key gc +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC +- * -XX:-UseTLAB -XX:+ShenandoahVerify +- * TestRetainObjects +- */ +- +-/* +- * @test TestRetainObjects +- * @summary Acceptance tests: collector can deal with retained objects +- * @key gc +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahOOMDuringEvacALot +- * TestRetainObjects +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahAllocFailureALot +- * TestRetainObjects +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive +- * TestRetainObjects +- */ +- +-/* +- * @test TestRetainObjects +- * @summary Acceptance tests: collector can deal with retained objects +- * @key gc +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu +- * -XX:+ShenandoahVerify +- * TestRetainObjects +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu +- * TestRetainObjects +- */ +- +-public class TestRetainObjects { +- +- static final int COUNT = 10_000_000; +- static final int WINDOW = 10_000; +- +- static final String[] reachable = new String[WINDOW]; +- +- public static void main(String[] args) throws Exception { +- int rIdx = 0; +- for (int c = 0; c < COUNT; c++) { +- reachable[rIdx] = ("LargeString" + c); +- rIdx++; +- if (rIdx >= WINDOW) { +- rIdx = 0; +- } +- } +- } +- +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/TestSieveObjects.java afu8u/hotspot/test/gc/shenandoah/TestSieveObjects.java +--- openjdk/hotspot/test/gc/shenandoah/TestSieveObjects.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/TestSieveObjects.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,195 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestSieveObjects +- * @summary Acceptance tests: collector can deal with retained objects +- * @key gc +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:+ShenandoahDegeneratedGC -XX:+ShenandoahVerify +- * TestSieveObjects +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:-ShenandoahDegeneratedGC -XX:+ShenandoahVerify +- * TestSieveObjects +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:+ShenandoahDegeneratedGC +- * TestSieveObjects +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:-ShenandoahDegeneratedGC +- * TestSieveObjects +- */ +- +-/* +- * @test TestSieveObjects +- * @summary Acceptance tests: collector can deal with retained objects +- * @key gc +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahOOMDuringEvacALot +- * TestSieveObjects +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahAllocFailureALot +- * TestSieveObjects +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * TestSieveObjects +- */ +- +-/* +- * @test TestSieveObjects +- * @summary Acceptance tests: collector can deal with retained objects +- * @key gc +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive +- * -XX:+ShenandoahVerify +- * TestSieveObjects +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive +- * TestSieveObjects +- * +- */ +- +-/* +- * @test TestSieveObjects +- * @summary Acceptance tests: collector can deal with retained objects +- * @key gc +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=static +- * TestSieveObjects +- */ +- +-/* +- * @test TestSieveObjects +- * @summary Acceptance tests: collector can deal with retained objects +- * @key gc +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact +- * TestSieveObjects +- */ +- +-/* +- * @test TestSieveObjects +- * @summary Acceptance tests: collector can deal with retained objects +- * @key gc +- * +- * @run main/othervm/timeout=240 -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC +- * -XX:-UseTLAB -XX:+ShenandoahVerify +- * TestSieveObjects +- */ +- +-/* +- * @test TestSieveObjects +- * @summary Acceptance tests: collector can deal with retained objects +- * @key gc +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahOOMDuringEvacALot +- * TestSieveObjects +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahAllocFailureALot +- * TestSieveObjects +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive +- * TestSieveObjects +- */ +- +-/* +- * @test TestSieveObjects +- * @summary Acceptance tests: collector can deal with retained objects +- * @key gc +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu +- * -XX:+ShenandoahVerify +- * TestSieveObjects +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu +- * TestSieveObjects +- */ +- +-import java.util.concurrent.ThreadLocalRandom; +- +-public class TestSieveObjects { +- +- static final int COUNT = 100_000_000; +- static final int WINDOW = 1_000_000; +- static final int PAYLOAD = 100; +- +- static final MyObject[] arr = new MyObject[WINDOW]; +- +- public static void main(String[] args) throws Exception { +- int rIdx = 0; +- for (int c = 0; c < COUNT; c++) { +- MyObject v = arr[rIdx]; +- if (v != null) { +- if (v.x != rIdx) { +- throw new IllegalStateException("Illegal value at index " + rIdx + ": " + v.x); +- } +- if (ThreadLocalRandom.current().nextInt(1000) > 100) { +- arr[rIdx] = null; +- } +- } else { +- if (ThreadLocalRandom.current().nextInt(1000) > 500) { +- arr[rIdx] = new MyObject(rIdx); +- } +- } +- rIdx++; +- if (rIdx >= WINDOW) { +- rIdx = 0; +- } +- } +- } +- +- public static class MyObject { +- public int x; +- public byte[] payload; +- +- public MyObject(int x) { +- this.x = x; +- this.payload = new byte[PAYLOAD]; +- } +- } +- +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/TestSmallHeap.java afu8u/hotspot/test/gc/shenandoah/TestSmallHeap.java +--- openjdk/hotspot/test/gc/shenandoah/TestSmallHeap.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/TestSmallHeap.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,42 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestSmallHeap +- * @key gc +- * +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC TestSmallHeap +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx64m TestSmallHeap +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx32m TestSmallHeap +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx16m TestSmallHeap +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx8m TestSmallHeap +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xmx4m TestSmallHeap +- */ +- +-public class TestSmallHeap { +- +- public static void main(String[] args) throws Exception { +- System.out.println("Hello World!"); +- } +- +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/TestStringDedup.java afu8u/hotspot/test/gc/shenandoah/TestStringDedup.java +--- openjdk/hotspot/test/gc/shenandoah/TestStringDedup.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/TestStringDedup.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,176 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +- /* +- * @test TestStringDedup +- * @summary Test Shenandoah string deduplication implementation +- * @key gc +- * +- * @run main/othervm -Xmx256m -verbose:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseStringDeduplication +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:+ShenandoahDegeneratedGC +- * TestStringDedup +- * +- * @run main/othervm -Xmx256m -verbose:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseStringDeduplication +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:-ShenandoahDegeneratedGC +- * TestStringDedup +- */ +- +-/* +- * @test TestStringDedup +- * @summary Test Shenandoah string deduplication implementation +- * @key gc +- * +- * @run main/othervm -Xmx256m -verbose:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseStringDeduplication +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact +- * TestStringDedup +- * @run main/othervm -Xmx256m -verbose:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseStringDeduplication +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * TestStringDedup +- * +- * @run main/othervm -Xmx256m -verbose:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseStringDeduplication +- * -XX:+UseShenandoahGC +- * TestStringDedup +- * +- * @run main/othervm -Xmx256m -verbose:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseStringDeduplication +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact +- * TestStringDedup +- */ +- +-/* +- * @test TestStringDedup +- * @summary Test Shenandoah string deduplication implementation +- * @key gc +- * +- * @run main/othervm -Xmx256m -verbose:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseStringDeduplication +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu +- * TestStringDedup +- * +- * @run main/othervm -Xmx256m -verbose:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseStringDeduplication +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive +- * TestStringDedup +- */ +- +-import java.lang.reflect.*; +-import java.util.*; +- +-import sun.misc.*; +- +-public class TestStringDedup { +- private static Field valueField; +- private static Unsafe unsafe; +- +- private static final int UniqueStrings = 20; +- +- static { +- try { +- Field field = Unsafe.class.getDeclaredField("theUnsafe"); +- field.setAccessible(true); +- unsafe = (Unsafe) field.get(null); +- +- valueField = String.class.getDeclaredField("value"); +- valueField.setAccessible(true); +- } catch (Exception e) { +- throw new RuntimeException(e); +- } +- } +- +- private static Object getValue(String string) { +- try { +- return valueField.get(string); +- } catch (Exception e) { +- throw new RuntimeException(e); +- } +- } +- +- static class StringAndId { +- private String str; +- private int id; +- +- public StringAndId(String str, int id) { +- this.str = str; +- this.id = id; +- } +- +- public String str() { +- return str; +- } +- +- public int id() { +- return id; +- } +- } +- +- private static void generateStrings(ArrayList strs, int unique_strs) { +- Random rn = new Random(); +- for (int u = 0; u < unique_strs; u++) { +- int n = rn.nextInt() % 10; +- n = Math.max(n, 2); +- for (int index = 0; index < n; index++) { +- strs.add(new StringAndId("Unique String " + u, u)); +- } +- } +- } +- +- private static int verifyDedepString(ArrayList strs) { +- HashMap seen = new HashMap<>(); +- int total = 0; +- int dedup = 0; +- +- for (StringAndId item : strs) { +- total++; +- StringAndId existing_item = seen.get(getValue(item.str())); +- if (existing_item == null) { +- seen.put(getValue(item.str()), item); +- } else { +- if (item.id() != existing_item.id() || +- !item.str().equals(existing_item.str())) { +- System.out.println("StringDedup error:"); +- System.out.println("String: " + item.str() + " != " + existing_item.str()); +- throw new RuntimeException("StringDedup Test failed"); +- } else { +- dedup++; +- } +- } +- } +- System.out.println("Dedup: " + dedup + "/" + total + " unique: " + (total - dedup)); +- return (total - dedup); +- } +- +- public static void main(String[] args) { +- ArrayList astrs = new ArrayList<>(); +- generateStrings(astrs, UniqueStrings); +- System.gc(); +- System.gc(); +- System.gc(); +- System.gc(); +- System.gc(); +- +- if (verifyDedepString(astrs) != UniqueStrings) { +- // Can not guarantee all strings are deduplicated, there can +- // still have pending items in queues. +- System.out.println("Not all strings are deduplicated"); +- } +- } +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/TestStringDedupStress.java afu8u/hotspot/test/gc/shenandoah/TestStringDedupStress.java +--- openjdk/hotspot/test/gc/shenandoah/TestStringDedupStress.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/TestStringDedupStress.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,232 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestStringDedupStress +- * @summary Test Shenandoah string deduplication implementation +- * @key gc +- * +- * @run main/othervm -Xmx1g -verbose:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseStringDeduplication +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:+ShenandoahDegeneratedGC +- * TestStringDedupStress +- * +- * @run main/othervm -Xmx1g -verbose:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseStringDeduplication +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:-ShenandoahDegeneratedGC +- * TestStringDedupStress +- */ +- +-/* +- * @test TestStringDedupStress +- * @summary Test Shenandoah string deduplication implementation +- * @key gc +- * +- * @run main/othervm -Xmx1g -verbose:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseStringDeduplication +- * -XX:+UseShenandoahGC +- * -DtargetStrings=3000000 +- * TestStringDedupStress +- * +- * @run main/othervm -Xmx1g -verbose:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseStringDeduplication +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * -DtargetStrings=2000000 +- * TestStringDedupStress +- * +- * @run main/othervm -Xmx1g -verbose:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseStringDeduplication +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahOOMDuringEvacALot +- * -DtargetStrings=2000000 +- * TestStringDedupStress +- * +- * @run main/othervm -Xmx1g -verbose:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseStringDeduplication +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact +- * TestStringDedupStress +- */ +- +- /* +- * @test TestStringDedupStress +- * @summary Test Shenandoah string deduplication implementation +- * @key gc +- * +- * @run main/othervm -Xmx1g -verbose:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseStringDeduplication +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu +- * TestStringDedupStress +- * +- * @run main/othervm -Xmx1g -verbose:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseStringDeduplication +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive +- * -DtargetStrings=2000000 +- * TestStringDedupStress +- * +- * @run main/othervm -Xmx1g -verbose:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseStringDeduplication +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu +- * -XX:+ShenandoahOOMDuringEvacALot +- * -DtargetStrings=2000000 +- * TestStringDedupStress +- * +- * @run main/othervm -Xmx1g -verbose:gc -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseStringDeduplication +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahOOMDuringEvacALot +- * -DtargetStrings=2000000 +- * TestStringDedupStress +- */ +- +-import java.lang.management.*; +-import java.lang.reflect.*; +-import java.util.*; +- +-import sun.misc.*; +- +-public class TestStringDedupStress { +- private static Field valueField; +- private static Unsafe unsafe; +- +- private static final int TARGET_STRINGS = Integer.getInteger("targetStrings", 2_500_000); +- private static final long MAX_REWRITE_GC_CYCLES = 6; +- private static final long MAX_REWRITE_TIME = 30*1000; // ms +- +- private static final int UNIQUE_STRINGS = 20; +- +- static { +- try { +- Field field = Unsafe.class.getDeclaredField("theUnsafe"); +- field.setAccessible(true); +- unsafe = (Unsafe) field.get(null); +- +- valueField = String.class.getDeclaredField("value"); +- valueField.setAccessible(true); +- } catch (Exception e) { +- throw new RuntimeException(e); +- } +- } +- +- private static Object getValue(String string) { +- try { +- return valueField.get(string); +- } catch (Exception e) { +- throw new RuntimeException(e); +- } +- } +- +- static class StringAndId { +- private String str; +- private int id; +- +- public StringAndId(String str, int id) { +- this.str = str; +- this.id = id; +- } +- +- public String str() { +- return str; +- } +- +- public int id() { +- return id; +- } +- } +- +- // Generate uniqueStrings number of strings +- private static void generateStrings(ArrayList strs, int uniqueStrings) { +- Random rn = new Random(); +- for (int u = 0; u < uniqueStrings; u++) { +- int n = rn.nextInt(uniqueStrings); +- strs.add(new StringAndId("Unique String " + n, n)); +- } +- } +- +- private static int verifyDedupString(ArrayList strs) { +- Map seen = new HashMap<>(TARGET_STRINGS*2); +- int total = 0; +- int dedup = 0; +- +- for (StringAndId item : strs) { +- total++; +- StringAndId existingItem = seen.get(getValue(item.str())); +- if (existingItem == null) { +- seen.put(getValue(item.str()), item); +- } else { +- if (item.id() != existingItem.id() || +- !item.str().equals(existingItem.str())) { +- System.out.println("StringDedup error:"); +- System.out.println("id: " + item.id() + " != " + existingItem.id()); +- System.out.println("or String: " + item.str() + " != " + existingItem.str()); +- throw new RuntimeException("StringDedup Test failed"); +- } else { +- dedup++; +- } +- } +- } +- System.out.println("Dedup: " + dedup + "/" + total + " unique: " + (total - dedup)); +- return (total - dedup); +- } +- +- static volatile ArrayList astrs = new ArrayList<>(); +- static GarbageCollectorMXBean gcCycleMBean; +- +- public static void main(String[] args) { +- Random rn = new Random(); +- +- for (GarbageCollectorMXBean bean : ManagementFactory.getGarbageCollectorMXBeans()) { +- if ("Shenandoah Cycles".equals(bean.getName())) { +- gcCycleMBean = bean; +- break; +- } +- } +- +- if (gcCycleMBean == null) { +- throw new RuntimeException("Can not find Shenandoah GC cycle mbean"); +- } +- +- // Generate roughly TARGET_STRINGS strings, only UNIQUE_STRINGS are unique +- int genIters = TARGET_STRINGS / UNIQUE_STRINGS; +- for (int index = 0; index < genIters; index++) { +- generateStrings(astrs, UNIQUE_STRINGS); +- } +- +- long cycleBeforeRewrite = gcCycleMBean.getCollectionCount(); +- long timeBeforeRewrite = System.currentTimeMillis(); +- +- long loop = 1; +- while (true) { +- int arrSize = astrs.size(); +- int index = rn.nextInt(arrSize); +- StringAndId item = astrs.get(index); +- int n = rn.nextInt(UNIQUE_STRINGS); +- item.str = "Unique String " + n; +- item.id = n; +- +- if (loop++ % 1000 == 0) { +- // enough GC cycles for rewritten strings to be deduplicated +- if (gcCycleMBean.getCollectionCount() - cycleBeforeRewrite >= MAX_REWRITE_GC_CYCLES) { +- break; +- } +- +- // enough time is spent waiting for GC to happen +- if (System.currentTimeMillis() - timeBeforeRewrite >= MAX_REWRITE_TIME) { +- break; +- } +- } +- } +- verifyDedupString(astrs); +- } +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/TestStringInternCleanup.java afu8u/hotspot/test/gc/shenandoah/TestStringInternCleanup.java +--- openjdk/hotspot/test/gc/shenandoah/TestStringInternCleanup.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/TestStringInternCleanup.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,115 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestStringInternCleanup +- * @summary Check that Shenandoah cleans up interned strings +- * @key gc +- * +- * @run main/othervm -Xmx64m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ClassUnloadingWithConcurrentMark +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:+ShenandoahDegeneratedGC -XX:+ShenandoahVerify +- * TestStringInternCleanup +- * +- * @run main/othervm -Xmx64m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ClassUnloadingWithConcurrentMark +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:-ShenandoahDegeneratedGC -XX:+ShenandoahVerify +- * TestStringInternCleanup +- * +- * @run main/othervm -Xmx64m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ClassUnloadingWithConcurrentMark +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:+ShenandoahDegeneratedGC +- * TestStringInternCleanup +- * +- * @run main/othervm -Xmx64m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ClassUnloadingWithConcurrentMark +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:-ShenandoahDegeneratedGC +- * TestStringInternCleanup +- */ +- +-/* +- * @test TestStringInternCleanup +- * @summary Check that Shenandoah cleans up interned strings +- * @key gc +- * +- * @run main/othervm -Xmx64m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ClassUnloadingWithConcurrentMark +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=aggressive +- * TestStringInternCleanup +- * +- * @run main/othervm -Xmx64m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ClassUnloadingWithConcurrentMark +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive +- * -XX:+ShenandoahVerify +- * TestStringInternCleanup +- * +- * @run main/othervm -Xmx64m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ClassUnloadingWithConcurrentMark +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive +- * TestStringInternCleanup +- * +- * @run main/othervm -Xmx64m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ClassUnloadingWithConcurrentMark +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact +- * TestStringInternCleanup +- * +- * @run main/othervm -Xmx64m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:-ClassUnloadingWithConcurrentMark +- * -XX:+UseShenandoahGC +- * TestStringInternCleanup +- */ +- +-/* +- * @test TestStringInternCleanup +- * @summary Check that Shenandoah cleans up interned strings +- * @key gc +- * +- * @run main/othervm -Xmx64m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ClassUnloadingWithConcurrentMark +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu +- * -XX:+ShenandoahVerify +- * TestStringInternCleanup +- * +- * @run main/othervm -Xmx64m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ClassUnloadingWithConcurrentMark +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu -XX:ShenandoahGCHeuristics=aggressive +- * -XX:+ShenandoahVerify +- * TestStringInternCleanup +- * +- * @run main/othervm -Xmx64m -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+ClassUnloadingWithConcurrentMark +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu +- * TestStringInternCleanup +- */ +- +-public class TestStringInternCleanup { +- +- static final int COUNT = 1_000_000; +- static final int WINDOW = 1_000; +- +- static final String[] reachable = new String[WINDOW]; +- +- public static void main(String[] args) throws Exception { +- int rIdx = 0; +- for (int c = 0; c < COUNT; c++) { +- reachable[rIdx] = ("LargeInternedString" + c).intern(); +- rIdx++; +- if (rIdx >= WINDOW) { +- rIdx = 0; +- } +- } +- } +- +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/TestVerifyJCStress.java afu8u/hotspot/test/gc/shenandoah/TestVerifyJCStress.java +--- openjdk/hotspot/test/gc/shenandoah/TestVerifyJCStress.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/TestVerifyJCStress.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,136 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestVerifyJCStress +- * @summary Tests that we pass at least one jcstress-like test with all verification turned on +- * @key gc +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UnlockDiagnosticVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:+ShenandoahDegeneratedGC -XX:+ShenandoahVerify -XX:+VerifyObjectEquals +- * TestVerifyJCStress +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UnlockDiagnosticVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive +- * -XX:-ShenandoahDegeneratedGC -XX:+ShenandoahVerify -XX:+VerifyObjectEquals +- * TestVerifyJCStress +- */ +- +-/* +- * @test TestVerifyJCStress +- * @summary Tests that we pass at least one jcstress-like test with all verification turned on +- * @key gc +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UnlockDiagnosticVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=adaptive +- * -XX:+ShenandoahVerify -XX:+VerifyObjectEquals -XX:+ShenandoahVerifyOptoBarriers +- * TestVerifyJCStress +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UnlockDiagnosticVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCHeuristics=compact +- * -XX:+ShenandoahVerify -XX:+VerifyObjectEquals -XX:+ShenandoahVerifyOptoBarriers +- * TestVerifyJCStress +- */ +- +-/* +- * @test TestVerifyJCStress +- * @summary Tests that we pass at least one jcstress-like test with all verification turned on +- * @key gc +- * +- * @run main/othervm -Xmx1g -Xms1g -XX:+UnlockExperimentalVMOptions -XX:+UnlockDiagnosticVMOptions +- * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu +- * -XX:+ShenandoahVerify -XX:+IgnoreUnrecognizedVMOptions -XX:+ShenandoahVerifyOptoBarriers +- * TestVerifyJCStress +- */ +- +-import java.util.*; +-import java.util.concurrent.*; +-import java.util.concurrent.locks.*; +- +-public class TestVerifyJCStress { +- +- public static void main(String[] args) throws Exception { +- ExecutorService service = Executors.newFixedThreadPool( +- 2, +- r -> { +- Thread t = new Thread(r); +- t.setDaemon(true); +- return t; +- } +- ); +- +- for (int c = 0; c < 10000; c++) { +- final Test[] tests = new Test[10000]; +- for (int t = 0; t < tests.length; t++) { +- tests[t] = new Test(); +- } +- +- Future f1 = service.submit(() -> { +- IntResult2 r = new IntResult2(); +- for (Test test : tests) { +- test.RL_Us(r); +- } +- }); +- Future f2 = service.submit(() -> { +- for (Test test : tests) { +- test.WLI_Us(); +- } +- }); +- +- f1.get(); +- f2.get(); +- } +- } +- +- public static class IntResult2 { +- int r1, r2; +- } +- +- public static class Test { +- final StampedLock lock = new StampedLock(); +- +- int x, y; +- +- public void RL_Us(IntResult2 r) { +- StampedLock lock = this.lock; +- long stamp = lock.readLock(); +- r.r1 = x; +- r.r2 = y; +- lock.unlock(stamp); +- } +- +- public void WLI_Us() { +- try { +- StampedLock lock = this.lock; +- long stamp = lock.writeLockInterruptibly(); +- x = 1; +- y = 2; +- lock.unlock(stamp); +- } catch (InterruptedException e) { +- throw new RuntimeException(e); +- } +- } +- } +- +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/TestVerifyLevels.java afu8u/hotspot/test/gc/shenandoah/TestVerifyLevels.java +--- openjdk/hotspot/test/gc/shenandoah/TestVerifyLevels.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/TestVerifyLevels.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,48 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestVerifyLevels +- * @key gc +- * +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+UnlockDiagnosticVMOptions -Xmx128m -XX:+ShenandoahVerify -XX:ShenandoahVerifyLevel=0 TestVerifyLevels +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+UnlockDiagnosticVMOptions -Xmx128m -XX:+ShenandoahVerify -XX:ShenandoahVerifyLevel=1 TestVerifyLevels +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+UnlockDiagnosticVMOptions -Xmx128m -XX:+ShenandoahVerify -XX:ShenandoahVerifyLevel=2 TestVerifyLevels +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+UnlockDiagnosticVMOptions -Xmx128m -XX:+ShenandoahVerify -XX:ShenandoahVerifyLevel=3 TestVerifyLevels +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+UnlockDiagnosticVMOptions -Xmx128m -XX:+ShenandoahVerify -XX:ShenandoahVerifyLevel=4 TestVerifyLevels +- */ +- +-public class TestVerifyLevels { +- +- static final long TARGET_MB = Long.getLong("target", 1_000); // 1 Gb allocation +- +- static Object sink; +- +- public static void main(String[] args) throws Exception { +- long count = TARGET_MB * 1024 * 1024 / 16; +- for (long c = 0; c < count; c++) { +- sink = new Object(); +- } +- } +- +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/TestWithLogLevel.java afu8u/hotspot/test/gc/shenandoah/TestWithLogLevel.java +--- openjdk/hotspot/test/gc/shenandoah/TestWithLogLevel.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/TestWithLogLevel.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,46 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +- /* +- * @test TestWithLogLevel +- * @summary Test Shenandoah with different log levels +- * @key gc +- * +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xms256M -Xmx1G -XX:+ShenandoahLogWarning TestWithLogLevel +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xms256M -Xmx1G -XX:+ShenandoahLogInfo TestWithLogLevel +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xms256M -Xmx1G -XX:+ShenandoahLogDebug TestWithLogLevel +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xms256M -Xmx1G -XX:+ShenandoahLogTrace TestWithLogLevel +- */ +- +-import java.util.*; +- +-public class TestWithLogLevel { +- public static void main(String[] args) { +- ArrayList list = new ArrayList<>(); +- long count = 300 * 1024 * 1024 / 16; // 300MB allocation +- for (long index = 0; index < count; index++) { +- Object sink = new Object(); +- list.add(sink); +- } +- } +-} +diff -uNr openjdk/hotspot/test/gc/shenandoah/TestWrongArrayMember.java afu8u/hotspot/test/gc/shenandoah/TestWrongArrayMember.java +--- openjdk/hotspot/test/gc/shenandoah/TestWrongArrayMember.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/shenandoah/TestWrongArrayMember.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,56 +0,0 @@ +-/* +- * Copyright (c) 2018, Red Hat, Inc. All rights reserved. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- * +- */ +- +-/* +- * @test TestWrongArrayMember +- * @key gc +- * +- * @run main/othervm -Xmx128m -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC TestWrongArrayMember +- * @run main/othervm -Xmx128m -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=iu TestWrongArrayMember +- */ +- +-public class TestWrongArrayMember { +- public static void main(String... args) throws Exception { +- Object[] src = new Object[3]; +- src[0] = new Integer(0); +- src[1] = new Object(); +- src[2] = new Object(); +- Object[] dst = new Integer[3]; +- dst[0] = new Integer(1); +- dst[1] = new Integer(2); +- dst[2] = new Integer(3); +- try { +- System.arraycopy(src, 0, dst, 0, 3); +- throw new RuntimeException("Expected ArrayStoreException"); +- } catch (ArrayStoreException e) { +- if (src[0] != dst[0]) { +- throw new RuntimeException("First element not copied"); +- } else if (src[1] == dst[1] || src[2] == dst[2]) { +- throw new RuntimeException("Second and third elements are affected"); +- } else { +- return; // Passed! +- } +- } +- } +-} +- +diff -uNr openjdk/hotspot/test/gc/startup_warnings/TestShenandoah.java afu8u/hotspot/test/gc/startup_warnings/TestShenandoah.java +--- openjdk/hotspot/test/gc/startup_warnings/TestShenandoah.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/startup_warnings/TestShenandoah.java 1970-01-01 08:00:00.000000000 +0800 +@@ -1,45 +0,0 @@ +-/* +- * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. +- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +- * +- * This code is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License version 2 only, as +- * published by the Free Software Foundation. +- * +- * This code is distributed in the hope that it will be useful, but WITHOUT +- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +- * version 2 for more details (a copy is included in the LICENSE file that +- * accompanied this code). +- * +- * You should have received a copy of the GNU General Public License version +- * 2 along with this work; if not, write to the Free Software Foundation, +- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +- * +- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +- * or visit www.oracle.com if you need additional information or have any +- * questions. +- */ +- +-/* +-* @test TestShenandoah +-* @key gc +-* @bug 8006398 +-* @summary Test that the Shenandoah collector does not print a warning message +-* @library /testlibrary +-*/ +- +-import com.oracle.java.testlibrary.OutputAnalyzer; +-import com.oracle.java.testlibrary.ProcessTools; +- +-public class TestShenandoah { +- +- public static void main(String args[]) throws Exception { +- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockExperimentalVMOptions", "-XX:+UseShenandoahGC", "-version"); +- OutputAnalyzer output = new OutputAnalyzer(pb.start()); +- output.shouldNotContain("deprecated"); +- output.shouldNotContain("error"); +- output.shouldHaveExitValue(0); +- } +- +-} +diff -uNr openjdk/hotspot/test/gc/survivorAlignment/TestAllocationInEden.java afu8u/hotspot/test/gc/survivorAlignment/TestAllocationInEden.java +--- openjdk/hotspot/test/gc/survivorAlignment/TestAllocationInEden.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/survivorAlignment/TestAllocationInEden.java 2025-05-06 10:53:45.235633677 +0800 +@@ -26,7 +26,6 @@ + * @bug 8031323 + * @summary Verify that object's alignment in eden space is not affected by + * SurvivorAlignmentInBytes option. +- * @requires vm.gc != "Shenandoah" + * @library /testlibrary /testlibrary/whitebox + * @build TestAllocationInEden SurvivorAlignmentTestMain AlignmentHelper + * @run main ClassFileInstaller sun.hotspot.WhiteBox +diff -uNr openjdk/hotspot/test/gc/survivorAlignment/TestPromotionFromEdenToTenured.java afu8u/hotspot/test/gc/survivorAlignment/TestPromotionFromEdenToTenured.java +--- openjdk/hotspot/test/gc/survivorAlignment/TestPromotionFromEdenToTenured.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/survivorAlignment/TestPromotionFromEdenToTenured.java 2025-05-06 10:53:45.235633677 +0800 +@@ -26,7 +26,6 @@ + * @bug 8031323 + * @summary Verify that objects promoted from eden space to tenured space during + * full GC are not aligned to SurvivorAlignmentInBytes value. +- * @requires vm.gc != "Shenandoah" + * @library /testlibrary /testlibrary/whitebox + * @build TestPromotionFromEdenToTenured SurvivorAlignmentTestMain + * AlignmentHelper +diff -uNr openjdk/hotspot/test/gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterFullGC.java afu8u/hotspot/test/gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterFullGC.java +--- openjdk/hotspot/test/gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterFullGC.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterFullGC.java 2025-05-06 10:53:45.235633677 +0800 +@@ -26,7 +26,6 @@ + * @bug 8031323 + * @summary Verify that objects promoted from survivor space to tenured space + * during full GC are not aligned to SurvivorAlignmentInBytes value. +- * @requires vm.gc != "Shenandoah" + * @library /testlibrary /testlibrary/whitebox + * @build TestPromotionFromSurvivorToTenuredAfterFullGC + * SurvivorAlignmentTestMain AlignmentHelper +diff -uNr openjdk/hotspot/test/gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterMinorGC.java afu8u/hotspot/test/gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterMinorGC.java +--- openjdk/hotspot/test/gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterMinorGC.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterMinorGC.java 2025-05-06 10:53:45.235633677 +0800 +@@ -27,7 +27,6 @@ + * @summary Verify that objects promoted from survivor space to tenured space + * when their age exceeded tenuring threshold are not aligned to + * SurvivorAlignmentInBytes value. +- * @requires vm.gc != "Shenandoah" + * @library /testlibrary /testlibrary/whitebox + * @build TestPromotionFromSurvivorToTenuredAfterMinorGC + * SurvivorAlignmentTestMain AlignmentHelper +diff -uNr openjdk/hotspot/test/gc/survivorAlignment/TestPromotionToSurvivor.java afu8u/hotspot/test/gc/survivorAlignment/TestPromotionToSurvivor.java +--- openjdk/hotspot/test/gc/survivorAlignment/TestPromotionToSurvivor.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/survivorAlignment/TestPromotionToSurvivor.java 2025-05-06 10:53:45.235633677 +0800 +@@ -26,7 +26,6 @@ + * @bug 8031323 + * @summary Verify that objects promoted from eden space to survivor space after + * minor GC are aligned to SurvivorAlignmentInBytes. +- * @requires vm.gc != "Shenandoah" + * @library /testlibrary /testlibrary/whitebox + * @build TestPromotionToSurvivor + * SurvivorAlignmentTestMain AlignmentHelper +diff -uNr openjdk/hotspot/test/gc/TestSystemGC.java afu8u/hotspot/test/gc/TestSystemGC.java +--- openjdk/hotspot/test/gc/TestSystemGC.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/TestSystemGC.java 2025-05-06 10:53:45.231633677 +0800 +@@ -38,8 +38,6 @@ + * @run main/othervm -XX:+UseG1GC -XX:+ExplicitGCInvokesConcurrent TestSystemGC + * @run main/othervm -XX:+UseLargePages TestSystemGC + * @run main/othervm -XX:+UseLargePages -XX:+UseLargePagesInMetaspace TestSystemGC +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC TestSystemGC +- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:+ExplicitGCInvokesConcurrent TestSystemGC + */ + + public class TestSystemGC { +diff -uNr openjdk/hotspot/test/gc/whitebox/TestWBGC.java afu8u/hotspot/test/gc/whitebox/TestWBGC.java +--- openjdk/hotspot/test/gc/whitebox/TestWBGC.java 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/gc/whitebox/TestWBGC.java 2025-05-06 10:53:45.235633677 +0800 +@@ -25,7 +25,6 @@ + * @test TestWBGC + * @bug 8055098 + * @summary Test verify that WB methods isObjectInOldGen and youngGC works correctly. +- * @requires vm.gc != "Shenandoah" + * @library /testlibrary /testlibrary/whitebox + * @build TestWBGC + * @run main ClassFileInstaller sun.hotspot.WhiteBox +diff -uNr openjdk/hotspot/test/runtime/6929067/Test6929067.sh afu8u/hotspot/test/runtime/6929067/Test6929067.sh +--- openjdk/hotspot/test/runtime/6929067/Test6929067.sh 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/runtime/6929067/Test6929067.sh 2025-05-06 11:13:08.147672951 +0800 +@@ -97,6 +97,9 @@ + i686) + ARCH=i386 + ;; ++ sw_64) ++ ARCH=sw64 ++ ;; + # Assuming other ARCH values need no translation + esac + +diff -uNr openjdk/hotspot/test/test_env.sh afu8u/hotspot/test/test_env.sh +--- openjdk/hotspot/test/test_env.sh 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/test_env.sh 2025-05-06 11:13:08.151672951 +0800 +@@ -211,6 +211,11 @@ + then + VM_CPU="aarch64" + fi ++grep "sw64" vm_version.out > ${NULL} ++if [ $? = 0 ] ++then ++ VM_CPU="sw64" ++fi + export VM_TYPE VM_BITS VM_OS VM_CPU + echo "VM_TYPE=${VM_TYPE}" + echo "VM_BITS=${VM_BITS}" +diff -uNr openjdk/hotspot/test/TEST.groups afu8u/hotspot/test/TEST.groups +--- openjdk/hotspot/test/TEST.groups 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/hotspot/test/TEST.groups 2025-05-06 10:53:45.175633675 +0800 +@@ -135,35 +135,6 @@ + sanity/ExecuteInternalVMTests.java \ + -gc/g1/TestGreyReclaimedHumongousObjects.java + +-hotspot_tier1_gc_shenandoah = \ +- gc/shenandoah/options/ \ +- gc/shenandoah/compiler/ \ +- gc/shenandoah/mxbeans/ \ +- gc/shenandoah/TestSmallHeap.java \ +- gc/shenandoah/oom/ +- +-hotspot_tier2_gc_shenandoah = \ +- gc/arguments/TestUseCompressedOopsErgo.java \ +- gc/arguments/TestAlignmentToUseLargePages.java \ +- gc/logging/TestGCId.java \ +- gc/metaspace/TestMetaspacePerfCounters.java \ +- gc/startup_warnings/TestShenandoah.java \ +- gc/shenandoah/jvmti/TestGetLoadedClasses.sh \ +- gc/shenandoah/ \ +- -gc/shenandoah/TestStringDedupStress.java \ +- -gc/shenandoah/jni/TestCriticalNativeStress.sh \ +- -:hotspot_tier1_gc_shenandoah +- +-hotspot_tier3_gc_shenandoah = \ +- gc/shenandoah/TestStringDedupStress.java \ +- gc/shenandoah/jni/TestCriticalNativeStress.sh \ +- -:hotspot_tier2_gc_shenandoah +- +-hotspot_gc_shenandoah = \ +- :hotspot_tier1_gc_shenandoah \ +- :hotspot_tier2_gc_shenandoah \ +- :hotspot_tier3_gc_shenandoah +- + hotspot_runtime = \ + sanity/ExecuteInternalVMTests.java + +diff -uNr openjdk/hotspot/THIRD_PARTY_README afu8u/hotspot/THIRD_PARTY_README +--- openjdk/hotspot/THIRD_PARTY_README 2023-04-19 05:53:02.000000000 +0800 ++++ afu8u/hotspot/THIRD_PARTY_README 2025-05-06 10:53:44.795633662 +0800 +@@ -7,7 +7,7 @@ + + --- begin of LICENSE --- + +-Copyright (c) 2000-2011 France Télécom ++Copyright (c) 2000-2011 France T??l??com + All rights reserved. + + Redistribution and use in source and binary forms, with or without +@@ -1035,7 +1035,7 @@ + --- begin of LICENSE --- + + Copyright notice +-Copyright © 2011 Ecma International ++Copyright ?? 2011 Ecma International + Ecma International + Rue du Rhone 114 + CH-1204 Geneva +@@ -2527,16 +2527,16 @@ + Unicode Terms of Use + + For the general privacy policy governing access to this site, see the Unicode +-Privacy Policy. For trademark usage, see the Unicode® Consortium Name and ++Privacy Policy. For trademark usage, see the Unicode?? Consortium Name and + Trademark Usage Policy. + + A. Unicode Copyright. +- 1. Copyright © 1991-2013 Unicode, Inc. All rights reserved. ++ 1. Copyright ?? 1991-2013 Unicode, Inc. All rights reserved. + + 2. Certain documents and files on this website contain a legend indicating + that "Modification is permitted." Any person is hereby authorized, + without fee, to modify such documents and files to create derivative +- works conforming to the Unicode® Standard, subject to Terms and ++ works conforming to the Unicode?? Standard, subject to Terms and + Conditions herein. + + 3. Any person is hereby authorized, without fee, to view, use, reproduce, +@@ -2602,14 +2602,14 @@ + + E.Trademarks & Logos. + 1. The Unicode Word Mark and the Unicode Logo are trademarks of Unicode, +- Inc. “The Unicode Consortium” and “Unicode, Inc.” are trade names of ++ Inc. ???The Unicode Consortium??? and ???Unicode, Inc.??? are trade names of + Unicode, Inc. Use of the information and materials found on this +- website indicates your acknowledgement of Unicode, Inc.’s exclusive ++ website indicates your acknowledgement of Unicode, Inc.???s exclusive + worldwide rights in the Unicode Word Mark, the Unicode Logo, and the + Unicode trade names. + +- 2. The Unicode Consortium Name and Trademark Usage Policy (“Trademark +- Policy”) are incorporated herein by reference and you agree to abide by ++ 2. The Unicode Consortium Name and Trademark Usage Policy (???Trademark ++ Policy???) are incorporated herein by reference and you agree to abide by + the provisions of the Trademark Policy, which may be changed from time + to time in the sole discretion of Unicode, Inc. + +@@ -2632,12 +2632,12 @@ + + 2. Modification by Unicode. Unicode shall have the right to modify this + Agreement at any time by posting it to this site. The user may not +- assign any part of this Agreement without Unicode’s prior written ++ assign any part of this Agreement without Unicode???s prior written + consent. + + 3. Taxes. The user agrees to pay any taxes arising from access to this + website or use of the information herein, except for those based on +- Unicode’s net income. ++ Unicode???s net income. + + 4. Severability. If any provision of this Agreement is declared invalid or + unenforceable, the remaining provisions of this Agreement shall remain +@@ -2666,7 +2666,7 @@ + + COPYRIGHT AND PERMISSION NOTICE + +-Copyright © 1991-2012 Unicode, Inc. All rights reserved. Distributed under the ++Copyright ?? 1991-2012 Unicode, Inc. All rights reserved. Distributed under the + Terms of Use in http://www.unicode.org/copyright.html. + + Permission is hereby granted, free of charge, to any person obtaining a copy +diff -uNr openjdk/jaxp/THIRD_PARTY_README afu8u/jaxp/THIRD_PARTY_README +--- openjdk/jaxp/THIRD_PARTY_README 2023-04-19 05:53:03.000000000 +0800 ++++ afu8u/jaxp/THIRD_PARTY_README 2025-05-06 10:53:45.255633678 +0800 +@@ -7,7 +7,7 @@ + + --- begin of LICENSE --- + +-Copyright (c) 2000-2011 France Télécom ++Copyright (c) 2000-2011 France T??l??com + All rights reserved. + + Redistribution and use in source and binary forms, with or without +@@ -1035,7 +1035,7 @@ + --- begin of LICENSE --- + + Copyright notice +-Copyright © 2011 Ecma International ++Copyright ?? 2011 Ecma International + Ecma International + Rue du Rhone 114 + CH-1204 Geneva +@@ -2527,16 +2527,16 @@ + Unicode Terms of Use + + For the general privacy policy governing access to this site, see the Unicode +-Privacy Policy. For trademark usage, see the Unicode® Consortium Name and ++Privacy Policy. For trademark usage, see the Unicode?? Consortium Name and + Trademark Usage Policy. + + A. Unicode Copyright. +- 1. Copyright © 1991-2013 Unicode, Inc. All rights reserved. ++ 1. Copyright ?? 1991-2013 Unicode, Inc. All rights reserved. + + 2. Certain documents and files on this website contain a legend indicating + that "Modification is permitted." Any person is hereby authorized, + without fee, to modify such documents and files to create derivative +- works conforming to the Unicode® Standard, subject to Terms and ++ works conforming to the Unicode?? Standard, subject to Terms and + Conditions herein. + + 3. Any person is hereby authorized, without fee, to view, use, reproduce, +@@ -2602,14 +2602,14 @@ + + E.Trademarks & Logos. + 1. The Unicode Word Mark and the Unicode Logo are trademarks of Unicode, +- Inc. “The Unicode Consortium” and “Unicode, Inc.” are trade names of ++ Inc. ???The Unicode Consortium??? and ???Unicode, Inc.??? are trade names of + Unicode, Inc. Use of the information and materials found on this +- website indicates your acknowledgement of Unicode, Inc.’s exclusive ++ website indicates your acknowledgement of Unicode, Inc.???s exclusive + worldwide rights in the Unicode Word Mark, the Unicode Logo, and the + Unicode trade names. + +- 2. The Unicode Consortium Name and Trademark Usage Policy (“Trademark +- Policy”) are incorporated herein by reference and you agree to abide by ++ 2. The Unicode Consortium Name and Trademark Usage Policy (???Trademark ++ Policy???) are incorporated herein by reference and you agree to abide by + the provisions of the Trademark Policy, which may be changed from time + to time in the sole discretion of Unicode, Inc. + +@@ -2632,12 +2632,12 @@ + + 2. Modification by Unicode. Unicode shall have the right to modify this + Agreement at any time by posting it to this site. The user may not +- assign any part of this Agreement without Unicode’s prior written ++ assign any part of this Agreement without Unicode???s prior written + consent. + + 3. Taxes. The user agrees to pay any taxes arising from access to this + website or use of the information herein, except for those based on +- Unicode’s net income. ++ Unicode???s net income. + + 4. Severability. If any provision of this Agreement is declared invalid or + unenforceable, the remaining provisions of this Agreement shall remain +@@ -2666,7 +2666,7 @@ + + COPYRIGHT AND PERMISSION NOTICE + +-Copyright © 1991-2012 Unicode, Inc. All rights reserved. Distributed under the ++Copyright ?? 1991-2012 Unicode, Inc. All rights reserved. Distributed under the + Terms of Use in http://www.unicode.org/copyright.html. + + Permission is hereby granted, free of charge, to any person obtaining a copy +diff -uNr openjdk/jaxws/THIRD_PARTY_README afu8u/jaxws/THIRD_PARTY_README +--- openjdk/jaxws/THIRD_PARTY_README 2023-04-19 05:53:04.000000000 +0800 ++++ afu8u/jaxws/THIRD_PARTY_README 2025-05-06 10:53:45.419633683 +0800 +@@ -7,7 +7,7 @@ + + --- begin of LICENSE --- + +-Copyright (c) 2000-2011 France Télécom ++Copyright (c) 2000-2011 France T??l??com + All rights reserved. + + Redistribution and use in source and binary forms, with or without +@@ -1035,7 +1035,7 @@ + --- begin of LICENSE --- + + Copyright notice +-Copyright © 2011 Ecma International ++Copyright ?? 2011 Ecma International + Ecma International + Rue du Rhone 114 + CH-1204 Geneva +@@ -2527,16 +2527,16 @@ + Unicode Terms of Use + + For the general privacy policy governing access to this site, see the Unicode +-Privacy Policy. For trademark usage, see the Unicode® Consortium Name and ++Privacy Policy. For trademark usage, see the Unicode?? Consortium Name and + Trademark Usage Policy. + + A. Unicode Copyright. +- 1. Copyright © 1991-2013 Unicode, Inc. All rights reserved. ++ 1. Copyright ?? 1991-2013 Unicode, Inc. All rights reserved. + + 2. Certain documents and files on this website contain a legend indicating + that "Modification is permitted." Any person is hereby authorized, + without fee, to modify such documents and files to create derivative +- works conforming to the Unicode® Standard, subject to Terms and ++ works conforming to the Unicode?? Standard, subject to Terms and + Conditions herein. + + 3. Any person is hereby authorized, without fee, to view, use, reproduce, +@@ -2602,14 +2602,14 @@ + + E.Trademarks & Logos. + 1. The Unicode Word Mark and the Unicode Logo are trademarks of Unicode, +- Inc. “The Unicode Consortium” and “Unicode, Inc.” are trade names of ++ Inc. ???The Unicode Consortium??? and ???Unicode, Inc.??? are trade names of + Unicode, Inc. Use of the information and materials found on this +- website indicates your acknowledgement of Unicode, Inc.’s exclusive ++ website indicates your acknowledgement of Unicode, Inc.???s exclusive + worldwide rights in the Unicode Word Mark, the Unicode Logo, and the + Unicode trade names. + +- 2. The Unicode Consortium Name and Trademark Usage Policy (“Trademark +- Policy”) are incorporated herein by reference and you agree to abide by ++ 2. The Unicode Consortium Name and Trademark Usage Policy (???Trademark ++ Policy???) are incorporated herein by reference and you agree to abide by + the provisions of the Trademark Policy, which may be changed from time + to time in the sole discretion of Unicode, Inc. + +@@ -2632,12 +2632,12 @@ + + 2. Modification by Unicode. Unicode shall have the right to modify this + Agreement at any time by posting it to this site. The user may not +- assign any part of this Agreement without Unicode’s prior written ++ assign any part of this Agreement without Unicode???s prior written + consent. + + 3. Taxes. The user agrees to pay any taxes arising from access to this + website or use of the information herein, except for those based on +- Unicode’s net income. ++ Unicode???s net income. + + 4. Severability. If any provision of this Agreement is declared invalid or + unenforceable, the remaining provisions of this Agreement shall remain +@@ -2666,7 +2666,7 @@ + + COPYRIGHT AND PERMISSION NOTICE + +-Copyright © 1991-2012 Unicode, Inc. All rights reserved. Distributed under the ++Copyright ?? 1991-2012 Unicode, Inc. All rights reserved. Distributed under the + Terms of Use in http://www.unicode.org/copyright.html. + + Permission is hereby granted, free of charge, to any person obtaining a copy +diff -uNr openjdk/.jcheck/conf afu8u/.jcheck/conf +--- openjdk/.jcheck/conf 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/.jcheck/conf 2025-05-06 11:13:08.071672949 +0800 +@@ -0,0 +1,31 @@ ++[general] ++project=jdk8u ++jbs=JDK ++version=openjdk8u372 ++ ++[checks] ++error=author,committer,reviewers,merge,issues,executable,symlink,message,hg-tag,whitespace ++ ++[repository] ++tags=(?:jdk-(?:[1-9]([0-9]*)(?:\.(?:0|[1-9][0-9]*)){0,4})(?:\+(?:(?:[0-9]+))|(?:-ga)))|(?:jdk[4-9](?:u\d{1,3})?-(?:(?:b\d{2,3})|(?:ga)))|(?:hs\d\d(?:\.\d{1,2})?-b\d\d) ++branches= ++ ++[census] ++version=0 ++domain=openjdk.org ++ ++[checks "whitespace"] ++files=.*\.cpp|.*\.hpp|.*\.c|.*\.h|.*\.java ++ ++[checks "merge"] ++message=Merge ++ ++[checks "reviewers"] ++reviewers=1 ++ignore=duke ++ ++[checks "committer"] ++role=committer ++ ++[checks "issues"] ++pattern=^([124-8][0-9]{6}): (\S.*)$ +diff -uNr openjdk/jdk/make/lib/SecurityLibraries.gmk afu8u/jdk/make/lib/SecurityLibraries.gmk +--- openjdk/jdk/make/lib/SecurityLibraries.gmk 2023-04-19 05:53:10.000000000 +0800 ++++ afu8u/jdk/make/lib/SecurityLibraries.gmk 2025-05-06 10:53:45.707633693 +0800 +@@ -243,7 +243,7 @@ + OPTIMIZATION := LOW, \ + CFLAGS := $(filter-out $(ECC_JNI_SOLSPARC_FILTER), $(CFLAGS_JDKLIB)) \ + $(BUILD_LIBSUNEC_FLAGS) \ +- -DMP_API_COMPATIBLE, \ ++ -DMP_API_COMPATIBLE -DNSS_ECC_MORE_THAN_SUITE_B, \ + CXXFLAGS := $(filter-out $(ECC_JNI_SOLSPARC_FILTER), $(CXXFLAGS_JDKLIB)) \ + $(BUILD_LIBSUNEC_FLAGS), \ + MAPFILE := $(JDK_TOPDIR)/make/mapfiles/libsunec/mapfile-vers, \ +diff -uNr openjdk/jdk/make/lib/SoundLibraries.gmk afu8u/jdk/make/lib/SoundLibraries.gmk +--- openjdk/jdk/make/lib/SoundLibraries.gmk 2023-04-19 05:53:04.000000000 +0800 ++++ afu8u/jdk/make/lib/SoundLibraries.gmk 2025-05-06 10:53:45.707633693 +0800 +@@ -136,6 +136,10 @@ + LIBJSOUND_CFLAGS += -DX_ARCH=X_PPC + endif + ++ ifeq ($(OPENJDK_TARGET_CPU), sw64) ++ LIBJSOUND_CFLAGS += -DX_ARCH=X_SW64 ++ endif ++ + ifeq ($(OPENJDK_TARGET_CPU), ppc64) + LIBJSOUND_CFLAGS += -DX_ARCH=X_PPC64 + endif +diff -uNr openjdk/jdk/src/share/classes/com/sun/org/apache/xml/internal/security/algorithms/implementations/ECDSAUtils.java afu8u/jdk/src/share/classes/com/sun/org/apache/xml/internal/security/algorithms/implementations/ECDSAUtils.java +--- openjdk/jdk/src/share/classes/com/sun/org/apache/xml/internal/security/algorithms/implementations/ECDSAUtils.java 2023-04-19 05:53:10.000000000 +0800 ++++ afu8u/jdk/src/share/classes/com/sun/org/apache/xml/internal/security/algorithms/implementations/ECDSAUtils.java 2025-05-06 10:53:45.891633699 +0800 +@@ -161,6 +161,149 @@ + static { + ecCurveDefinitions.add( + new ECCurveDefinition( ++ "secp112r1", ++ "1.3.132.0.6", ++ "db7c2abf62e35e668076bead208b", ++ "db7c2abf62e35e668076bead2088", ++ "659ef8ba043916eede8911702b22", ++ "09487239995a5ee76b55f9c2f098", ++ "a89ce5af8724c0a23e0e0ff77500", ++ "db7c2abf62e35e7628dfac6561c5", ++ 1) ++ ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( ++ "secp112r2", ++ "1.3.132.0.7", ++ "db7c2abf62e35e668076bead208b", ++ "6127c24c05f38a0aaaf65c0ef02c", ++ "51def1815db5ed74fcc34c85d709", ++ "4ba30ab5e892b4e1649dd0928643", ++ "adcd46f5882e3747def36e956e97", ++ "36df0aafd8b8d7597ca10520d04b", ++ 4) ++ ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( ++ "secp128r1", ++ "1.3.132.0.28", ++ "fffffffdffffffffffffffffffffffff", ++ "fffffffdfffffffffffffffffffffffc", ++ "e87579c11079f43dd824993c2cee5ed3", ++ "161ff7528b899b2d0c28607ca52c5b86", ++ "cf5ac8395bafeb13c02da292dded7a83", ++ "fffffffe0000000075a30d1b9038a115", ++ 1) ++ ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( ++ "secp128r2", ++ "1.3.132.0.29", ++ "fffffffdffffffffffffffffffffffff", ++ "d6031998d1b3bbfebf59cc9bbff9aee1", ++ "5eeefca380d02919dc2c6558bb6d8a5d", ++ "7b6aa5d85e572983e6fb32a7cdebc140", ++ "27b6916a894d3aee7106fe805fc34b44", ++ "3fffffff7fffffffbe0024720613b5a3", ++ 4) ++ ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( ++ "secp160k1", ++ "1.3.132.0.9", ++ "fffffffffffffffffffffffffffffffeffffac73", ++ "0000000000000000000000000000000000000000", ++ "0000000000000000000000000000000000000007", ++ "3b4c382ce37aa192a4019e763036f4f5dd4d7ebb", ++ "938cf935318fdced6bc28286531733c3f03c4fee", ++ "0100000000000000000001b8fa16dfab9aca16b6b3", ++ 1) ++ ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( ++ "secp160r1", ++ "1.3.132.0.8", ++ "ffffffffffffffffffffffffffffffff7fffffff", ++ "ffffffffffffffffffffffffffffffff7ffffffc", ++ "1c97befc54bd7a8b65acf89f81d4d4adc565fa45", ++ "4a96b5688ef573284664698968c38bb913cbfc82", ++ "23a628553168947d59dcc912042351377ac5fb32", ++ "0100000000000000000001f4c8f927aed3ca752257", ++ 1) ++ ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( ++ "secp160r2", ++ "1.3.132.0.30", ++ "fffffffffffffffffffffffffffffffeffffac73", ++ "fffffffffffffffffffffffffffffffeffffac70", ++ "b4e134d3fb59eb8bab57274904664d5af50388ba", ++ "52dcb034293a117e1f4ff11b30f7199d3144ce6d", ++ "feaffef2e331f296e071fa0df9982cfea7d43f2e", ++ "0100000000000000000000351ee786a818f3a1a16b", ++ 1) ++ ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( ++ "secp192k1", ++ "1.3.132.0.31", ++ "fffffffffffffffffffffffffffffffffffffffeffffee37", ++ "000000000000000000000000000000000000000000000000", ++ "000000000000000000000000000000000000000000000003", ++ "db4ff10ec057e9ae26b07d0280b7f4341da5d1b1eae06c7d", ++ "9b2f2f6d9c5628a7844163d015be86344082aa88d95e2f9d", ++ "fffffffffffffffffffffffe26f2fc170f69466a74defd8d", ++ 1) ++ ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( ++ "secp192r1 [NIST P-192, X9.62 prime192v1]", ++ "1.2.840.10045.3.1.1", ++ "fffffffffffffffffffffffffffffffeffffffffffffffff", ++ "fffffffffffffffffffffffffffffffefffffffffffffffc", ++ "64210519e59c80e70fa7e9ab72243049feb8deecc146b9b1", ++ "188da80eb03090f67cbf20eb43a18800f4ff0afd82ff1012", ++ "07192b95ffc8da78631011ed6b24cdd573f977a11e794811", ++ "ffffffffffffffffffffffff99def836146bc9b1b4d22831", ++ 1) ++ ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( ++ "secp224k1", ++ "1.3.132.0.32", ++ "fffffffffffffffffffffffffffffffffffffffffffffffeffffe56d", ++ "00000000000000000000000000000000000000000000000000000000", ++ "00000000000000000000000000000000000000000000000000000005", ++ "a1455b334df099df30fc28a169a467e9e47075a90f7e650eb6b7a45c", ++ "7e089fed7fba344282cafbd6f7e319f7c0b0bd59e2ca4bdb556d61a5", ++ "010000000000000000000000000001dce8d2ec6184caf0a971769fb1f7", ++ 1) ++ ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( ++ "secp224r1 [NIST P-224]", ++ "1.3.132.0.33", ++ "ffffffffffffffffffffffffffffffff000000000000000000000001", ++ "fffffffffffffffffffffffffffffffefffffffffffffffffffffffe", ++ "b4050a850c04b3abf54132565044b0b7d7bfd8ba270b39432355ffb4", ++ "b70e0cbd6bb4bf7f321390b94a03c1d356c21122343280d6115c1d21", ++ "bd376388b5f723fb4c22dfe6cd4375a05a07476444d5819985007e34", ++ "ffffffffffffffffffffffffffff16a2e0b8f03e13dd29455c5c2a3d", ++ 1) ++ ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( + "secp256k1", + "1.3.132.0.10", + "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", +@@ -210,6 +353,409 @@ + "01fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa51868783bf2f966b7fcc0148f709a5d03bb5c9b8899c47aebb6fb71e91386409", + 1) + ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( ++ "X9.62 prime192v2", ++ "1.2.840.10045.3.1.2", ++ "fffffffffffffffffffffffffffffffeffffffffffffffff", ++ "fffffffffffffffffffffffffffffffefffffffffffffffc", ++ "cc22d6dfb95c6b25e49c0d6364a4e5980c393aa21668d953", ++ "eea2bae7e1497842f2de7769cfe9c989c072ad696f48034a", ++ "6574d11d69b6ec7a672bb82a083df2f2b0847de970b2de15", ++ "fffffffffffffffffffffffe5fb1a724dc80418648d8dd31", ++ 1) ++ ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( ++ "X9.62 prime192v3", ++ "1.2.840.10045.3.1.3", ++ "fffffffffffffffffffffffffffffffeffffffffffffffff", ++ "fffffffffffffffffffffffffffffffefffffffffffffffc", ++ "22123dc2395a05caa7423daeccc94760a7d462256bd56916", ++ "7d29778100c65a1da1783716588dce2b8b4aee8e228f1896", ++ "38a90f22637337334b49dcb66a6dc8f9978aca7648a943b0", ++ "ffffffffffffffffffffffff7a62d031c83f4294f640ec13", ++ 1) ++ ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( ++ "X9.62 prime239v1", ++ "1.2.840.10045.3.1.4", ++ "7fffffffffffffffffffffff7fffffffffff8000000000007fffffffffff", ++ "7fffffffffffffffffffffff7fffffffffff8000000000007ffffffffffc", ++ "6b016c3bdcf18941d0d654921475ca71a9db2fb27d1d37796185c2942c0a", ++ "0ffa963cdca8816ccc33b8642bedf905c3d358573d3f27fbbd3b3cb9aaaf", ++ "7debe8e4e90a5dae6e4054ca530ba04654b36818ce226b39fccb7b02f1ae", ++ "7fffffffffffffffffffffff7fffff9e5e9a9f5d9071fbd1522688909d0b", ++ 1) ++ ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( ++ "X9.62 prime239v2", ++ "1.2.840.10045.3.1.5", ++ "7fffffffffffffffffffffff7fffffffffff8000000000007fffffffffff", ++ "7fffffffffffffffffffffff7fffffffffff8000000000007ffffffffffc", ++ "617fab6832576cbbfed50d99f0249c3fee58b94ba0038c7ae84c8c832f2c", ++ "38af09d98727705120c921bb5e9e26296a3cdcf2f35757a0eafd87b830e7", ++ "5b0125e4dbea0ec7206da0fc01d9b081329fb555de6ef460237dff8be4ba", ++ "7fffffffffffffffffffffff800000cfa7e8594377d414c03821bc582063", ++ 1) ++ ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( ++ "X9.62 prime239v3", ++ "1.2.840.10045.3.1.6", ++ "7fffffffffffffffffffffff7fffffffffff8000000000007fffffffffff", ++ "7fffffffffffffffffffffff7fffffffffff8000000000007ffffffffffc", ++ "255705fa2a306654b1f4cb03d6a750a30c250102d4988717d9ba15ab6d3e", ++ "6768ae8e18bb92cfcf005c949aa2c6d94853d0e660bbf854b1c9505fe95a", ++ "1607e6898f390c06bc1d552bad226f3b6fcfe48b6e818499af18e3ed6cf3", ++ "7fffffffffffffffffffffff7fffff975deb41b3a6057c3c432146526551", ++ 1) ++ ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( ++ "sect113r1", ++ "1.3.132.0.4", ++ "020000000000000000000000000201", ++ "003088250ca6e7c7fe649ce85820f7", ++ "00e8bee4d3e2260744188be0e9c723", ++ "009d73616f35f4ab1407d73562c10f", ++ "00a52830277958ee84d1315ed31886", ++ "0100000000000000d9ccec8a39e56f", ++ 2) ++ ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( ++ "sect113r2", ++ "1.3.132.0.5", ++ "020000000000000000000000000201", ++ "00689918dbec7e5a0dd6dfc0aa55c7", ++ "0095e9a9ec9b297bd4bf36e059184f", ++ "01a57a6a7b26ca5ef52fcdb8164797", ++ "00b3adc94ed1fe674c06e695baba1d", ++ "010000000000000108789b2496af93", ++ 2) ++ ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( ++ "sect131r1", ++ "1.3.132.0.22", ++ "080000000000000000000000000000010d", ++ "07a11b09a76b562144418ff3ff8c2570b8", ++ "0217c05610884b63b9c6c7291678f9d341", ++ "0081baf91fdf9833c40f9c181343638399", ++ "078c6e7ea38c001f73c8134b1b4ef9e150", ++ "0400000000000000023123953a9464b54d", ++ 2) ++ ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( ++ "sect131r2", ++ "1.3.132.0.23", ++ "080000000000000000000000000000010d", ++ "03e5a88919d7cafcbf415f07c2176573b2", ++ "04b8266a46c55657ac734ce38f018f2192", ++ "0356dcd8f2f95031ad652d23951bb366a8", ++ "0648f06d867940a5366d9e265de9eb240f", ++ "0400000000000000016954a233049ba98f", ++ 2) ++ ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( ++ "sect163k1 [NIST K-163]", ++ "1.3.132.0.1", ++ "0800000000000000000000000000000000000000c9", ++ "000000000000000000000000000000000000000001", ++ "000000000000000000000000000000000000000001", ++ "02fe13c0537bbc11acaa07d793de4e6d5e5c94eee8", ++ "0289070fb05d38ff58321f2e800536d538ccdaa3d9", ++ "04000000000000000000020108a2e0cc0d99f8a5ef", ++ 2) ++ ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( ++ "sect163r1", ++ "1.3.132.0.2", ++ "0800000000000000000000000000000000000000c9", ++ "07b6882caaefa84f9554ff8428bd88e246d2782ae2", ++ "0713612dcddcb40aab946bda29ca91f73af958afd9", ++ "0369979697ab43897789566789567f787a7876a654", ++ "00435edb42efafb2989d51fefce3c80988f41ff883", ++ "03ffffffffffffffffffff48aab689c29ca710279b", ++ 2) ++ ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( ++ "sect163r2 [NIST B-163]", ++ "1.3.132.0.15", ++ "0800000000000000000000000000000000000000c9", ++ "000000000000000000000000000000000000000001", ++ "020a601907b8c953ca1481eb10512f78744a3205fd", ++ "03f0eba16286a2d57ea0991168d4994637e8343e36", ++ "00d51fbc6c71a0094fa2cdd545b11c5c0c797324f1", ++ "040000000000000000000292fe77e70c12a4234c33", ++ 2) ++ ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( ++ "sect193r1", ++ "1.3.132.0.24", ++ "02000000000000000000000000000000000000000000008001", ++ "0017858feb7a98975169e171f77b4087de098ac8a911df7b01", ++ "00fdfb49bfe6c3a89facadaa7a1e5bbc7cc1c2e5d831478814", ++ "01f481bc5f0ff84a74ad6cdf6fdef4bf6179625372d8c0c5e1", ++ "0025e399f2903712ccf3ea9e3a1ad17fb0b3201b6af7ce1b05", ++ "01000000000000000000000000c7f34a778f443acc920eba49", ++ 2) ++ ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( ++ "sect193r2", ++ "1.3.132.0.25", ++ "02000000000000000000000000000000000000000000008001", ++ "0163f35a5137c2ce3ea6ed8667190b0bc43ecd69977702709b", ++ "00c9bb9e8927d4d64c377e2ab2856a5b16e3efb7f61d4316ae", ++ "00d9b67d192e0367c803f39e1a7e82ca14a651350aae617e8f", ++ "01ce94335607c304ac29e7defbd9ca01f596f927224cdecf6c", ++ "010000000000000000000000015aab561b005413ccd4ee99d5", ++ 2) ++ ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( ++ "sect233k1 [NIST K-233]", ++ "1.3.132.0.26", ++ "020000000000000000000000000000000000000004000000000000000001", ++ "000000000000000000000000000000000000000000000000000000000000", ++ "000000000000000000000000000000000000000000000000000000000001", ++ "017232ba853a7e731af129f22ff4149563a419c26bf50a4c9d6eefad6126", ++ "01db537dece819b7f70f555a67c427a8cd9bf18aeb9b56e0c11056fae6a3", ++ "008000000000000000000000000000069d5bb915bcd46efb1ad5f173abdf", ++ 4) ++ ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( ++ "sect233r1 [NIST B-233]", ++ "1.3.132.0.27", ++ "020000000000000000000000000000000000000004000000000000000001", ++ "000000000000000000000000000000000000000000000000000000000001", ++ "0066647ede6c332c7f8c0923bb58213b333b20e9ce4281fe115f7d8f90ad", ++ "00fac9dfcbac8313bb2139f1bb755fef65bc391f8b36f8f8eb7371fd558b", ++ "01006a08a41903350678e58528bebf8a0beff867a7ca36716f7e01f81052", ++ "01000000000000000000000000000013e974e72f8a6922031d2603cfe0d7", ++ 2) ++ ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( ++ "sect239k1", ++ "1.3.132.0.3", ++ "800000000000000000004000000000000000000000000000000000000001", ++ "000000000000000000000000000000000000000000000000000000000000", ++ "000000000000000000000000000000000000000000000000000000000001", ++ "29a0b6a887a983e9730988a68727a8b2d126c44cc2cc7b2a6555193035dc", ++ "76310804f12e549bdb011c103089e73510acb275fc312a5dc6b76553f0ca", ++ "2000000000000000000000000000005a79fec67cb6e91f1c1da800e478a5", ++ 4) ++ ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( ++ "sect283k1 [NIST K-283]", ++ "1.3.132.0.16", ++ "0800000000000000000000000000000000000000000000000000000000000000000010a1", ++ "000000000000000000000000000000000000000000000000000000000000000000000000", ++ "000000000000000000000000000000000000000000000000000000000000000000000001", ++ "0503213f78ca44883f1a3b8162f188e553cd265f23c1567a16876913b0c2ac2458492836", ++ "01ccda380f1c9e318d90f95d07e5426fe87e45c0e8184698e45962364e34116177dd2259", ++ "01ffffffffffffffffffffffffffffffffffe9ae2ed07577265dff7f94451e061e163c61", ++ 4) ++ ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( ++ "sect283r1 [NIST B-283]", ++ "1.3.132.0.17", ++ "0800000000000000000000000000000000000000000000000000000000000000000010a1", ++ "000000000000000000000000000000000000000000000000000000000000000000000001", ++ "027b680ac8b8596da5a4af8a19a0303fca97fd7645309fa2a581485af6263e313b79a2f5", ++ "05f939258db7dd90e1934f8c70b0dfec2eed25b8557eac9c80e2e198f8cdbecd86b12053", ++ "03676854fe24141cb98fe6d4b20d02b4516ff702350eddb0826779c813f0df45be8112f4", ++ "03ffffffffffffffffffffffffffffffffffef90399660fc938a90165b042a7cefadb307", ++ 2) ++ ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( ++ "sect409k1 [NIST K-409]", ++ "1.3.132.0.36", ++ "02000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000001", ++ "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", ++ "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", ++ "0060f05f658f49c1ad3ab1890f7184210efd0987e307c84c27accfb8f9f67cc2c460189eb5aaaa62ee222eb1b35540cfe9023746", ++ "01e369050b7c4e42acba1dacbf04299c3460782f918ea427e6325165e9ea10e3da5f6c42e9c55215aa9ca27a5863ec48d8e0286b", ++ "007ffffffffffffffffffffffffffffffffffffffffffffffffffe5f83b2d4ea20400ec4557d5ed3e3e7ca5b4b5c83b8e01e5fcf", ++ 4) ++ ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( ++ "sect409r1 [NIST B-409]", ++ "1.3.132.0.37", ++ "02000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000001", ++ "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", ++ "0021a5c2c8ee9feb5c4b9a753b7b476b7fd6422ef1f3dd674761fa99d6ac27c8a9a197b272822f6cd57a55aa4f50ae317b13545f", ++ "015d4860d088ddb3496b0c6064756260441cde4af1771d4db01ffe5b34e59703dc255a868a1180515603aeab60794e54bb7996a7", ++ "0061b1cfab6be5f32bbfa78324ed106a7636b9c5a7bd198d0158aa4f5488d08f38514f1fdf4b4f40d2181b3681c364ba0273c706", ++ "010000000000000000000000000000000000000000000000000001e2aad6a612f33307be5fa47c3c9e052f838164cd37d9a21173", ++ 2) ++ ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( ++ "sect571k1 [NIST K-571]", ++ "1.3.132.0.38", ++ "080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000425", ++ "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", ++ "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", ++ "026eb7a859923fbc82189631f8103fe4ac9ca2970012d5d46024804801841ca44370958493b205e647da304db4ceb08cbbd1ba39494776fb988b47174dca88c7e2945283a01c8972", ++ "0349dc807f4fbf374f4aeade3bca95314dd58cec9f307a54ffc61efc006d8a2c9d4979c0ac44aea74fbebbb9f772aedcb620b01a7ba7af1b320430c8591984f601cd4c143ef1c7a3", ++ "020000000000000000000000000000000000000000000000000000000000000000000000131850e1f19a63e4b391a8db917f4138b630d84be5d639381e91deb45cfe778f637c1001", ++ 4) ++ ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( ++ "sect571r1 [NIST B-571]", ++ "1.3.132.0.39", ++ "080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000425", ++ "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", ++ "02f40e7e2221f295de297117b7f3d62f5c6a97ffcb8ceff1cd6ba8ce4a9a18ad84ffabbd8efa59332be7ad6756a66e294afd185a78ff12aa520e4de739baca0c7ffeff7f2955727a", ++ "0303001d34b856296c16c0d40d3cd7750a93d1d2955fa80aa5f40fc8db7b2abdbde53950f4c0d293cdd711a35b67fb1499ae60038614f1394abfa3b4c850d927e1e7769c8eec2d19", ++ "037bf27342da639b6dccfffeb73d69d78c6c27a6009cbbca1980f8533921e8a684423e43bab08a576291af8f461bb2a8b3531d2f0485c19b16e2f1516e23dd3c1a4827af1b8ac15b", ++ "03ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe661ce18ff55987308059b186823851ec7dd9ca1161de93d5174d66e8382e9bb2fe84e47", ++ 2) ++ ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( ++ "X9.62 c2tnb191v1", ++ "1.2.840.10045.3.0.5", ++ "800000000000000000000000000000000000000000000201", ++ "2866537b676752636a68f56554e12640276b649ef7526267", ++ "2e45ef571f00786f67b0081b9495a3d95462f5de0aa185ec", ++ "36b3daf8a23206f9c4f299d7b21a9c369137f2c84ae1aa0d", ++ "765be73433b3f95e332932e70ea245ca2418ea0ef98018fb", ++ "40000000000000000000000004a20e90c39067c893bbb9a5", ++ 2) ++ ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( ++ "X9.62 c2tnb191v2", ++ "1.2.840.10045.3.0.6", ++ "800000000000000000000000000000000000000000000201", ++ "401028774d7777c7b7666d1366ea432071274f89ff01e718", ++ "0620048d28bcbd03b6249c99182b7c8cd19700c362c46a01", ++ "3809b2b7cc1b28cc5a87926aad83fd28789e81e2c9e3bf10", ++ "17434386626d14f3dbf01760d9213a3e1cf37aec437d668a", ++ "20000000000000000000000050508cb89f652824e06b8173", ++ 4) ++ ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( ++ "X9.62 c2tnb191v3", ++ "1.2.840.10045.3.0.7", ++ "800000000000000000000000000000000000000000000201", ++ "6c01074756099122221056911c77d77e77a777e7e7e77fcb", ++ "71fe1af926cf847989efef8db459f66394d90f32ad3f15e8", ++ "375d4ce24fde434489de8746e71786015009e66e38a926dd", ++ "545a39176196575d985999366e6ad34ce0a77cd7127b06be", ++ "155555555555555555555555610c0b196812bfb6288a3ea3", ++ 6) ++ ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( ++ "X9.62 c2tnb239v1", ++ "1.2.840.10045.3.0.11", ++ "800000000000000000000000000000000000000000000000001000000001", ++ "32010857077c5431123a46b808906756f543423e8d27877578125778ac76", ++ "790408f2eedaf392b012edefb3392f30f4327c0ca3f31fc383c422aa8c16", ++ "57927098fa932e7c0a96d3fd5b706ef7e5f5c156e16b7e7c86038552e91d", ++ "61d8ee5077c33fecf6f1a16b268de469c3c7744ea9a971649fc7a9616305", ++ "2000000000000000000000000000000f4d42ffe1492a4993f1cad666e447", ++ 4) ++ ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( ++ "X9.62 c2tnb239v2", ++ "1.2.840.10045.3.0.12", ++ "800000000000000000000000000000000000000000000000001000000001", ++ "4230017757a767fae42398569b746325d45313af0766266479b75654e65f", ++ "5037ea654196cff0cd82b2c14a2fcf2e3ff8775285b545722f03eacdb74b", ++ "28f9d04e900069c8dc47a08534fe76d2b900b7d7ef31f5709f200c4ca205", ++ "5667334c45aff3b5a03bad9dd75e2c71a99362567d5453f7fa6e227ec833", ++ "1555555555555555555555555555553c6f2885259c31e3fcdf154624522d", ++ 6) ++ ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( ++ "X9.62 c2tnb239v3", ++ "1.2.840.10045.3.0.13", ++ "800000000000000000000000000000000000000000000000001000000001", ++ "01238774666a67766d6676f778e676b66999176666e687666d8766c66a9f", ++ "6a941977ba9f6a435199acfc51067ed587f519c5ecb541b8e44111de1d40", ++ "70f6e9d04d289c4e89913ce3530bfde903977d42b146d539bf1bde4e9c92", ++ "2e5a0eaf6e5e1305b9004dce5c0ed7fe59a35608f33837c816d80b79f461", ++ "0cccccccccccccccccccccccccccccac4912d2d9df903ef9888b8a0e4cff", ++ 0xA) ++ ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( ++ "X9.62 c2tnb359v1", ++ "1.2.840.10045.3.0.18", ++ "800000000000000000000000000000000000000000000000000000000000000000000000100000000000000001", ++ "5667676a654b20754f356ea92017d946567c46675556f19556a04616b567d223a5e05656fb549016a96656a557", ++ "2472e2d0197c49363f1fe7f5b6db075d52b6947d135d8ca445805d39bc345626089687742b6329e70680231988", ++ "3c258ef3047767e7ede0f1fdaa79daee3841366a132e163aced4ed2401df9c6bdcde98e8e707c07a2239b1b097", ++ "53d7e08529547048121e9c95f3791dd804963948f34fae7bf44ea82365dc7868fe57e4ae2de211305a407104bd", ++ "01af286bca1af286bca1af286bca1af286bca1af286bc9fb8f6b85c556892c20a7eb964fe7719e74f490758d3b", ++ 0x4C) ++ ); ++ ++ ecCurveDefinitions.add( ++ new ECCurveDefinition( ++ "X9.62 c2tnb431r1", ++ "1.2.840.10045.3.0.20", ++ "800000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000001", ++ "1a827ef00dd6fc0e234caf046c6a5d8a85395b236cc4ad2cf32a0cadbdc9ddf620b0eb9906d0957f6c6feacd615468df104de296cd8f", ++ "10d9b4a3d9047d8b154359abfb1b7f5485b04ceb868237ddc9deda982a679a5a919b626d4e50a8dd731b107a9962381fb5d807bf2618", ++ "120fc05d3c67a99de161d2f4092622feca701be4f50f4758714e8a87bbf2a658ef8c21e7c5efe965361f6c2999c0c247b0dbd70ce6b7", ++ "20d0af8903a96f8d5fa2c255745d3c451b302c9346d9b7e485e7bce41f6b591f3e8f6addcbb0bc4c2f947a7de1a89b625d6a598b3760", ++ "0340340340340340340340340340340340340340340340340340340323c313fab50589703b5ec68d3587fec60d161cc149c1ad4a91", ++ 0x2760) ++ ); + } + + public static String getOIDFromPublicKey(ECPublicKey ecPublicKey) { +diff -uNr openjdk/jdk/src/share/classes/java/io/ObjectInputStream.java afu8u/jdk/src/share/classes/java/io/ObjectInputStream.java +--- openjdk/jdk/src/share/classes/java/io/ObjectInputStream.java 2023-04-19 05:53:04.000000000 +0800 ++++ afu8u/jdk/src/share/classes/java/io/ObjectInputStream.java 2025-05-06 11:13:08.343672958 +0800 +@@ -44,6 +44,7 @@ + + import static java.io.ObjectStreamClass.processQueue; + ++import sun.misc.SharedSecrets; + import sun.misc.ObjectInputFilter; + import sun.misc.ObjectStreamClassValidator; + import sun.misc.SharedSecrets; +diff -uNr openjdk/jdk/src/share/classes/java/lang/invoke/LambdaFormEditor.java afu8u/jdk/src/share/classes/java/lang/invoke/LambdaFormEditor.java +--- openjdk/jdk/src/share/classes/java/lang/invoke/LambdaFormEditor.java 2023-04-19 05:53:04.000000000 +0800 ++++ afu8u/jdk/src/share/classes/java/lang/invoke/LambdaFormEditor.java 2025-05-06 10:53:46.011633703 +0800 +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +diff -uNr openjdk/jdk/src/share/classes/sun/misc/ObjectInputFilter.java afu8u/jdk/src/share/classes/sun/misc/ObjectInputFilter.java +--- openjdk/jdk/src/share/classes/sun/misc/ObjectInputFilter.java 2023-04-19 05:53:05.000000000 +0800 ++++ afu8u/jdk/src/share/classes/sun/misc/ObjectInputFilter.java 2025-05-06 10:53:46.355633715 +0800 +@@ -270,7 +270,7 @@ + /** + * Current configured filter. + */ +- private static ObjectInputFilter serialFilter = configuredFilter; ++ private static volatile ObjectInputFilter serialFilter = configuredFilter; + + /** + * Get the filter for classes being deserialized on the ObjectInputStream. +@@ -304,9 +304,7 @@ + * @return the process-wide serialization filter or {@code null} if not configured + */ + public static ObjectInputFilter getSerialFilter() { +- synchronized (serialFilterLock) { + return serialFilter; +- } + } + + /** +diff -uNr openjdk/jdk/src/share/classes/sun/security/rsa/RSACore.java afu8u/jdk/src/share/classes/sun/security/rsa/RSACore.java +--- openjdk/jdk/src/share/classes/sun/security/rsa/RSACore.java 2023-04-19 05:53:05.000000000 +0800 ++++ afu8u/jdk/src/share/classes/sun/security/rsa/RSACore.java 2025-05-06 10:53:46.451633718 +0800 +@@ -1,5 +1,5 @@ + /* +- * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved. ++ * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it +@@ -25,20 +25,25 @@ + + package sun.security.rsa; + +-import java.math.BigInteger; +-import java.util.*; +- +-import java.security.SecureRandom; +-import java.security.interfaces.*; ++import sun.security.jca.JCAUtil; + + import javax.crypto.BadPaddingException; +- +-import sun.security.jca.JCAUtil; ++import java.math.BigInteger; ++import java.security.SecureRandom; ++import java.security.interfaces.RSAKey; ++import java.security.interfaces.RSAPrivateCrtKey; ++import java.security.interfaces.RSAPrivateKey; ++import java.security.interfaces.RSAPublicKey; ++import java.util.Arrays; ++import java.util.Map; ++import java.util.WeakHashMap; ++import java.util.concurrent.ConcurrentLinkedQueue; ++import java.util.concurrent.locks.ReentrantLock; + + /** + * Core of the RSA implementation. Has code to perform public and private key + * RSA operations (with and without CRT for private key ops). Private CRT ops +- * also support blinding to twart timing attacks. ++ * also support blinding to thwart timing attacks. + * + * The code in this class only does the core RSA operation. Padding and + * unpadding must be done externally. +@@ -51,13 +56,16 @@ + public final class RSACore { + + // globally enable/disable use of blinding +- private final static boolean ENABLE_BLINDING = true; ++ private static final boolean ENABLE_BLINDING = true; + +- // cache for blinding parameters. Map +- // use a weak hashmap so that cached values are automatically cleared +- // when the modulus is GC'ed +- private final static Map ++ // cache for blinding parameters. Map> use a weak hashmap so that, ++ // cached values are automatically cleared when the modulus is GC'ed. ++ // Multiple BlindingParameters can be queued during times of heavy load, ++ // like performance testing. ++ private static final Map> + blindingCache = new WeakHashMap<>(); ++ private static final ReentrantLock lock = new ReentrantLock(); + + private RSACore() { + // empty +@@ -313,7 +321,7 @@ + * + * The total performance cost is small. + */ +- private final static class BlindingRandomPair { ++ private static final class BlindingRandomPair { + final BigInteger u; + final BigInteger v; + +@@ -334,8 +342,8 @@ + * since sharing moduli is fundamentally broken and insecure, this + * does not matter. + */ +- private final static class BlindingParameters { +- private final static BigInteger BIG_TWO = BigInteger.valueOf(2L); ++ private static final class BlindingParameters { ++ private static final BigInteger BIG_TWO = BigInteger.valueOf(2L); + + // RSA public exponent + private final BigInteger e; +@@ -402,56 +410,68 @@ + if ((this.e != null && this.e.equals(e)) || + (this.d != null && this.d.equals(d))) { + +- BlindingRandomPair brp = null; +- synchronized (this) { +- if (!u.equals(BigInteger.ZERO) && +- !v.equals(BigInteger.ZERO)) { +- +- brp = new BlindingRandomPair(u, v); +- if (u.compareTo(BigInteger.ONE) <= 0 || +- v.compareTo(BigInteger.ONE) <= 0) { +- +- // need to reset the random pair next time +- u = BigInteger.ZERO; +- v = BigInteger.ZERO; +- } else { +- u = u.modPow(BIG_TWO, n); +- v = v.modPow(BIG_TWO, n); +- } +- } // Otherwise, need to reset the random pair. ++ BlindingRandomPair brp = new BlindingRandomPair(u, v); ++ if (u.compareTo(BigInteger.ONE) <= 0 || ++ v.compareTo(BigInteger.ONE) <= 0) { ++ // Reset so the parameters will be not queued later ++ u = BigInteger.ZERO; ++ v = BigInteger.ZERO; ++ } else { ++ u = u.modPow(BIG_TWO, n); ++ v = v.modPow(BIG_TWO, n); + } ++ + return brp; + } + + return null; + } ++ ++ // Check if reusable, return true if both u & v are not zero. ++ boolean isReusable() { ++ return !u.equals(BigInteger.ZERO) && !v.equals(BigInteger.ZERO); ++ } + } + + private static BlindingRandomPair getBlindingRandomPair( + BigInteger e, BigInteger d, BigInteger n) { + +- BlindingParameters bps = null; +- synchronized (blindingCache) { +- bps = blindingCache.get(n); ++ ConcurrentLinkedQueue queue; ++ ++ // Get queue from map, if there is none then create one ++ lock.lock(); ++ try { ++ queue = blindingCache.computeIfAbsent(n, ++ ignored -> new ConcurrentLinkedQueue<>()); ++ } finally { ++ lock.unlock(); + } + ++ BlindingParameters bps = queue.poll(); + if (bps == null) { + bps = new BlindingParameters(e, d, n); +- synchronized (blindingCache) { +- blindingCache.putIfAbsent(n, bps); +- } + } + +- BlindingRandomPair brp = bps.getBlindingRandomPair(e, d, n); +- if (brp == null) { +- // need to reset the blinding parameters +- bps = new BlindingParameters(e, d, n); +- synchronized (blindingCache) { +- blindingCache.replace(n, bps); +- } ++ BlindingRandomPair brp = null; ++ ++ // Loops to get a valid pair, going through the queue or create a new ++ // parameters if needed. ++ while (brp == null) { + brp = bps.getBlindingRandomPair(e, d, n); ++ if (brp == null) { ++ // need to reset the blinding parameters, first check for ++ // another in the queue. ++ bps = queue.poll(); ++ if (bps == null) { ++ bps = new BlindingParameters(e, d, n); ++ } ++ } + } + ++ // If this parameters are still usable, put them back into the queue. ++ if (bps.isReusable()) { ++ queue.add(bps); ++ } + return brp; + } + +diff -uNr openjdk/jdk/src/share/classes/sun/security/ssl/SupportedGroupsExtension.java afu8u/jdk/src/share/classes/sun/security/ssl/SupportedGroupsExtension.java +--- openjdk/jdk/src/share/classes/sun/security/ssl/SupportedGroupsExtension.java 2023-04-19 05:53:10.000000000 +0800 ++++ afu8u/jdk/src/share/classes/sun/security/ssl/SupportedGroupsExtension.java 2025-05-06 10:53:46.463633719 +0800 +@@ -179,6 +179,70 @@ + // Elliptic Curves (RFC 4492) + // + // See sun.security.util.CurveDB for the OIDs ++ // NIST K-163 ++ SECT163_K1 (0x0001, "sect163k1", "1.3.132.0.1", true, ++ ProtocolVersion.PROTOCOLS_TO_12), ++ SECT163_R1 (0x0002, "sect163r1", "1.3.132.0.2", false, ++ ProtocolVersion.PROTOCOLS_TO_12), ++ ++ // NIST B-163 ++ SECT163_R2 (0x0003, "sect163r2", "1.3.132.0.15", true, ++ ProtocolVersion.PROTOCOLS_TO_12), ++ SECT193_R1 (0x0004, "sect193r1", "1.3.132.0.24", false, ++ ProtocolVersion.PROTOCOLS_TO_12), ++ SECT193_R2 (0x0005, "sect193r2", "1.3.132.0.25", false, ++ ProtocolVersion.PROTOCOLS_TO_12), ++ ++ // NIST K-233 ++ SECT233_K1 (0x0006, "sect233k1", "1.3.132.0.26", true, ++ ProtocolVersion.PROTOCOLS_TO_12), ++ ++ // NIST B-233 ++ SECT233_R1 (0x0007, "sect233r1", "1.3.132.0.27", true, ++ ProtocolVersion.PROTOCOLS_TO_12), ++ SECT239_K1 (0x0008, "sect239k1", "1.3.132.0.3", false, ++ ProtocolVersion.PROTOCOLS_TO_12), ++ ++ // NIST K-283 ++ SECT283_K1 (0x0009, "sect283k1", "1.3.132.0.16", true, ++ ProtocolVersion.PROTOCOLS_TO_12), ++ ++ // NIST B-283 ++ SECT283_R1 (0x000A, "sect283r1", "1.3.132.0.17", true, ++ ProtocolVersion.PROTOCOLS_TO_12), ++ ++ // NIST K-409 ++ SECT409_K1 (0x000B, "sect409k1", "1.3.132.0.36", true, ++ ProtocolVersion.PROTOCOLS_TO_12), ++ ++ // NIST B-409 ++ SECT409_R1 (0x000C, "sect409r1", "1.3.132.0.37", true, ++ ProtocolVersion.PROTOCOLS_TO_12), ++ ++ // NIST K-571 ++ SECT571_K1 (0x000D, "sect571k1", "1.3.132.0.38", true, ++ ProtocolVersion.PROTOCOLS_TO_12), ++ ++ // NIST B-571 ++ SECT571_R1 (0x000E, "sect571r1", "1.3.132.0.39", true, ++ ProtocolVersion.PROTOCOLS_TO_12), ++ SECP160_K1 (0x000F, "secp160k1", "1.3.132.0.9", false, ++ ProtocolVersion.PROTOCOLS_TO_12), ++ SECP160_R1 (0x0010, "secp160r1", "1.3.132.0.8", false, ++ ProtocolVersion.PROTOCOLS_TO_12), ++ SECP160_R2 (0x0011, "secp160r2", "1.3.132.0.30", false, ++ ProtocolVersion.PROTOCOLS_TO_12), ++ SECP192_K1 (0x0012, "secp192k1", "1.3.132.0.31", false, ++ ProtocolVersion.PROTOCOLS_TO_12), ++ ++ // NIST P-192 ++ SECP192_R1 (0x0013, "secp192r1", "1.2.840.10045.3.1.1", true, ++ ProtocolVersion.PROTOCOLS_TO_12), ++ SECP224_K1 (0x0014, "secp224k1", "1.3.132.0.32", false, ++ ProtocolVersion.PROTOCOLS_TO_12), ++ // NIST P-224 ++ SECP224_R1 (0x0015, "secp224r1", "1.3.132.0.33", true, ++ ProtocolVersion.PROTOCOLS_TO_12), + SECP256_K1 (0x0016, "secp256k1", "1.3.132.0.10", false, + ProtocolVersion.PROTOCOLS_TO_12), + +diff -uNr openjdk/jdk/src/share/classes/sun/security/util/CurveDB.java afu8u/jdk/src/share/classes/sun/security/util/CurveDB.java +--- openjdk/jdk/src/share/classes/sun/security/util/CurveDB.java 2023-04-19 05:53:10.000000000 +0800 ++++ afu8u/jdk/src/share/classes/sun/security/util/CurveDB.java 2025-05-06 10:53:46.471633719 +0800 +@@ -178,6 +178,105 @@ + Pattern nameSplitPattern = Holder.nameSplitPattern; + + /* SEC2 prime curves */ ++ add("secp112r1", "1.3.132.0.6", P, ++ "DB7C2ABF62E35E668076BEAD208B", ++ "DB7C2ABF62E35E668076BEAD2088", ++ "659EF8BA043916EEDE8911702B22", ++ "09487239995A5EE76B55F9C2F098", ++ "A89CE5AF8724C0A23E0E0FF77500", ++ "DB7C2ABF62E35E7628DFAC6561C5", ++ 1, nameSplitPattern); ++ ++ add("secp112r2", "1.3.132.0.7", P, ++ "DB7C2ABF62E35E668076BEAD208B", ++ "6127C24C05F38A0AAAF65C0EF02C", ++ "51DEF1815DB5ED74FCC34C85D709", ++ "4BA30AB5E892B4E1649DD0928643", ++ "adcd46f5882e3747def36e956e97", ++ "36DF0AAFD8B8D7597CA10520D04B", ++ 4, nameSplitPattern); ++ ++ add("secp128r1", "1.3.132.0.28", P, ++ "FFFFFFFDFFFFFFFFFFFFFFFFFFFFFFFF", ++ "FFFFFFFDFFFFFFFFFFFFFFFFFFFFFFFC", ++ "E87579C11079F43DD824993C2CEE5ED3", ++ "161FF7528B899B2D0C28607CA52C5B86", ++ "CF5AC8395BAFEB13C02DA292DDED7A83", ++ "FFFFFFFE0000000075A30D1B9038A115", ++ 1, nameSplitPattern); ++ ++ add("secp128r2", "1.3.132.0.29", P, ++ "FFFFFFFDFFFFFFFFFFFFFFFFFFFFFFFF", ++ "D6031998D1B3BBFEBF59CC9BBFF9AEE1", ++ "5EEEFCA380D02919DC2C6558BB6D8A5D", ++ "7B6AA5D85E572983E6FB32A7CDEBC140", ++ "27B6916A894D3AEE7106FE805FC34B44", ++ "3FFFFFFF7FFFFFFFBE0024720613B5A3", ++ 4, nameSplitPattern); ++ ++ add("secp160k1", "1.3.132.0.9", P, ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFAC73", ++ "0000000000000000000000000000000000000000", ++ "0000000000000000000000000000000000000007", ++ "3B4C382CE37AA192A4019E763036F4F5DD4D7EBB", ++ "938CF935318FDCED6BC28286531733C3F03C4FEE", ++ "0100000000000000000001B8FA16DFAB9ACA16B6B3", ++ 1, nameSplitPattern); ++ ++ add("secp160r1", "1.3.132.0.8", P, ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF7FFFFFFF", ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF7FFFFFFC", ++ "1C97BEFC54BD7A8B65ACF89F81D4D4ADC565FA45", ++ "4A96B5688EF573284664698968C38BB913CBFC82", ++ "23A628553168947D59DCC912042351377AC5FB32", ++ "0100000000000000000001F4C8F927AED3CA752257", ++ 1, nameSplitPattern); ++ ++ add("secp160r2", "1.3.132.0.30", P, ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFAC73", ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFAC70", ++ "B4E134D3FB59EB8BAB57274904664D5AF50388BA", ++ "52DCB034293A117E1F4FF11B30F7199D3144CE6D", ++ "FEAFFEF2E331F296E071FA0DF9982CFEA7D43F2E", ++ "0100000000000000000000351EE786A818F3A1A16B", ++ 1, nameSplitPattern); ++ ++ add("secp192k1", "1.3.132.0.31", P, ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFEE37", ++ "000000000000000000000000000000000000000000000000", ++ "000000000000000000000000000000000000000000000003", ++ "DB4FF10EC057E9AE26B07D0280B7F4341DA5D1B1EAE06C7D", ++ "9B2F2F6D9C5628A7844163D015BE86344082AA88D95E2F9D", ++ "FFFFFFFFFFFFFFFFFFFFFFFE26F2FC170F69466A74DEFD8D", ++ 1, nameSplitPattern); ++ ++ add("secp192r1 [NIST P-192, X9.62 prime192v1]", "1.2.840.10045.3.1.1", PD, ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFF", ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFC", ++ "64210519E59C80E70FA7E9AB72243049FEB8DEECC146B9B1", ++ "188DA80EB03090F67CBF20EB43A18800F4FF0AFD82FF1012", ++ "07192B95FFC8DA78631011ED6B24CDD573F977A11E794811", ++ "FFFFFFFFFFFFFFFFFFFFFFFF99DEF836146BC9B1B4D22831", ++ 1, nameSplitPattern); ++ ++ add("secp224k1", "1.3.132.0.32", P, ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFE56D", ++ "00000000000000000000000000000000000000000000000000000000", ++ "00000000000000000000000000000000000000000000000000000005", ++ "A1455B334DF099DF30FC28A169A467E9E47075A90F7E650EB6B7A45C", ++ "7E089FED7FBA344282CAFBD6F7E319F7C0B0BD59E2CA4BDB556D61A5", ++ "010000000000000000000000000001DCE8D2EC6184CAF0A971769FB1F7", ++ 1, nameSplitPattern); ++ ++ add("secp224r1 [NIST P-224]", "1.3.132.0.33", PD, ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF000000000000000000000001", ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFFFFFFFFFE", ++ "B4050A850C04B3ABF54132565044B0B7D7BFD8BA270B39432355FFB4", ++ "B70E0CBD6BB4BF7F321390B94A03C1D356C21122343280D6115C1D21", ++ "BD376388B5F723FB4C22DFE6CD4375A05A07476444D5819985007E34", ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFF16A2E0B8F03E13DD29455C5C2A3D", ++ 1, nameSplitPattern); ++ + add("secp256k1", "1.3.132.0.10", P, + "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", + "0000000000000000000000000000000000000000000000000000000000000000", +@@ -214,6 +313,435 @@ + "01FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFA51868783BF2F966B7FCC0148F709A5D03BB5C9B8899C47AEBB6FB71E91386409", + 1, nameSplitPattern); + ++ /* ANSI X9.62 prime curves */ ++ add("X9.62 prime192v2", "1.2.840.10045.3.1.2", P, ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFF", ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFC", ++ "CC22D6DFB95C6B25E49C0D6364A4E5980C393AA21668D953", ++ "EEA2BAE7E1497842F2DE7769CFE9C989C072AD696F48034A", ++ "6574D11D69B6EC7A672BB82A083DF2F2B0847DE970B2DE15", ++ "FFFFFFFFFFFFFFFFFFFFFFFE5FB1A724DC80418648D8DD31", ++ 1, nameSplitPattern); ++ ++ add("X9.62 prime192v3", "1.2.840.10045.3.1.3", P, ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFF", ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFC", ++ "22123DC2395A05CAA7423DAECCC94760A7D462256BD56916", ++ "7D29778100C65A1DA1783716588DCE2B8B4AEE8E228F1896", ++ "38A90F22637337334B49DCB66A6DC8F9978ACA7648A943B0", ++ "FFFFFFFFFFFFFFFFFFFFFFFF7A62D031C83F4294F640EC13", ++ 1, nameSplitPattern); ++ ++ add("X9.62 prime239v1", "1.2.840.10045.3.1.4", P, ++ "7FFFFFFFFFFFFFFFFFFFFFFF7FFFFFFFFFFF8000000000007FFFFFFFFFFF", ++ "7FFFFFFFFFFFFFFFFFFFFFFF7FFFFFFFFFFF8000000000007FFFFFFFFFFC", ++ "6B016C3BDCF18941D0D654921475CA71A9DB2FB27D1D37796185C2942C0A", ++ "0FFA963CDCA8816CCC33B8642BEDF905C3D358573D3F27FBBD3B3CB9AAAF", ++ "7DEBE8E4E90A5DAE6E4054CA530BA04654B36818CE226B39FCCB7B02F1AE", ++ "7FFFFFFFFFFFFFFFFFFFFFFF7FFFFF9E5E9A9F5D9071FBD1522688909D0B", ++ 1, nameSplitPattern); ++ ++ add("X9.62 prime239v2", "1.2.840.10045.3.1.5", P, ++ "7FFFFFFFFFFFFFFFFFFFFFFF7FFFFFFFFFFF8000000000007FFFFFFFFFFF", ++ "7FFFFFFFFFFFFFFFFFFFFFFF7FFFFFFFFFFF8000000000007FFFFFFFFFFC", ++ "617FAB6832576CBBFED50D99F0249C3FEE58B94BA0038C7AE84C8C832F2C", ++ "38AF09D98727705120C921BB5E9E26296A3CDCF2F35757A0EAFD87B830E7", ++ "5B0125E4DBEA0EC7206DA0FC01D9B081329FB555DE6EF460237DFF8BE4BA", ++ "7FFFFFFFFFFFFFFFFFFFFFFF800000CFA7E8594377D414C03821BC582063", ++ 1, nameSplitPattern); ++ ++ add("X9.62 prime239v3", "1.2.840.10045.3.1.6", P, ++ "7FFFFFFFFFFFFFFFFFFFFFFF7FFFFFFFFFFF8000000000007FFFFFFFFFFF", ++ "7FFFFFFFFFFFFFFFFFFFFFFF7FFFFFFFFFFF8000000000007FFFFFFFFFFC", ++ "255705FA2A306654B1F4CB03D6A750A30C250102D4988717D9BA15AB6D3E", ++ "6768AE8E18BB92CFCF005C949AA2C6D94853D0E660BBF854B1C9505FE95A", ++ "1607E6898F390C06BC1D552BAD226F3B6FCFE48B6E818499AF18E3ED6CF3", ++ "7FFFFFFFFFFFFFFFFFFFFFFF7FFFFF975DEB41B3A6057C3C432146526551", ++ 1, nameSplitPattern); ++ ++ /* SEC2 binary curves */ ++ add("sect113r1", "1.3.132.0.4", B, ++ "020000000000000000000000000201", ++ "003088250CA6E7C7FE649CE85820F7", ++ "00E8BEE4D3E2260744188BE0E9C723", ++ "009D73616F35F4AB1407D73562C10F", ++ "00A52830277958EE84D1315ED31886", ++ "0100000000000000D9CCEC8A39E56F", ++ 2, nameSplitPattern); ++ ++ add("sect113r2", "1.3.132.0.5", B, ++ "020000000000000000000000000201", ++ "00689918DBEC7E5A0DD6DFC0AA55C7", ++ "0095E9A9EC9B297BD4BF36E059184F", ++ "01A57A6A7B26CA5EF52FCDB8164797", ++ "00B3ADC94ED1FE674C06E695BABA1D", ++ "010000000000000108789B2496AF93", ++ 2, nameSplitPattern); ++ ++ add("sect131r1", "1.3.132.0.22", B, ++ "080000000000000000000000000000010D", ++ "07A11B09A76B562144418FF3FF8C2570B8", ++ "0217C05610884B63B9C6C7291678F9D341", ++ "0081BAF91FDF9833C40F9C181343638399", ++ "078C6E7EA38C001F73C8134B1B4EF9E150", ++ "0400000000000000023123953A9464B54D", ++ 2, nameSplitPattern); ++ ++ add("sect131r2", "1.3.132.0.23", B, ++ "080000000000000000000000000000010D", ++ "03E5A88919D7CAFCBF415F07C2176573B2", ++ "04B8266A46C55657AC734CE38F018F2192", ++ "0356DCD8F2F95031AD652D23951BB366A8", ++ "0648F06D867940A5366D9E265DE9EB240F", ++ "0400000000000000016954A233049BA98F", ++ 2, nameSplitPattern); ++ ++ add("sect163k1 [NIST K-163]", "1.3.132.0.1", BD, ++ "0800000000000000000000000000000000000000C9", ++ "000000000000000000000000000000000000000001", ++ "000000000000000000000000000000000000000001", ++ "02FE13C0537BBC11ACAA07D793DE4E6D5E5C94EEE8", ++ "0289070FB05D38FF58321F2E800536D538CCDAA3D9", ++ "04000000000000000000020108A2E0CC0D99F8A5EF", ++ 2, nameSplitPattern); ++ ++ add("sect163r1", "1.3.132.0.2", B, ++ "0800000000000000000000000000000000000000C9", ++ "07B6882CAAEFA84F9554FF8428BD88E246D2782AE2", ++ "0713612DCDDCB40AAB946BDA29CA91F73AF958AFD9", ++ "0369979697AB43897789566789567F787A7876A654", ++ "00435EDB42EFAFB2989D51FEFCE3C80988F41FF883", ++ "03FFFFFFFFFFFFFFFFFFFF48AAB689C29CA710279B", ++ 2, nameSplitPattern); ++ ++ add("sect163r2 [NIST B-163]", "1.3.132.0.15", BD, ++ "0800000000000000000000000000000000000000C9", ++ "000000000000000000000000000000000000000001", ++ "020A601907B8C953CA1481EB10512F78744A3205FD", ++ "03F0EBA16286A2D57EA0991168D4994637E8343E36", ++ "00D51FBC6C71A0094FA2CDD545B11C5C0C797324F1", ++ "040000000000000000000292FE77E70C12A4234C33", ++ 2, nameSplitPattern); ++ ++ add("sect193r1", "1.3.132.0.24", B, ++ "02000000000000000000000000000000000000000000008001", ++ "0017858FEB7A98975169E171F77B4087DE098AC8A911DF7B01", ++ "00FDFB49BFE6C3A89FACADAA7A1E5BBC7CC1C2E5D831478814", ++ "01F481BC5F0FF84A74AD6CDF6FDEF4BF6179625372D8C0C5E1", ++ "0025E399F2903712CCF3EA9E3A1AD17FB0B3201B6AF7CE1B05", ++ "01000000000000000000000000C7F34A778F443ACC920EBA49", ++ 2, nameSplitPattern); ++ ++ add("sect193r2", "1.3.132.0.25", B, ++ "02000000000000000000000000000000000000000000008001", ++ "0163F35A5137C2CE3EA6ED8667190B0BC43ECD69977702709B", ++ "00C9BB9E8927D4D64C377E2AB2856A5B16E3EFB7F61D4316AE", ++ "00D9B67D192E0367C803F39E1A7E82CA14A651350AAE617E8F", ++ "01CE94335607C304AC29E7DEFBD9CA01F596F927224CDECF6C", ++ "010000000000000000000000015AAB561B005413CCD4EE99D5", ++ 2, nameSplitPattern); ++ ++ add("sect233k1 [NIST K-233]", "1.3.132.0.26", BD, ++ "020000000000000000000000000000000000000004000000000000000001", ++ "000000000000000000000000000000000000000000000000000000000000", ++ "000000000000000000000000000000000000000000000000000000000001", ++ "017232BA853A7E731AF129F22FF4149563A419C26BF50A4C9D6EEFAD6126", ++ "01DB537DECE819B7F70F555A67C427A8CD9BF18AEB9B56E0C11056FAE6A3", ++ "008000000000000000000000000000069D5BB915BCD46EFB1AD5F173ABDF", ++ 4, nameSplitPattern); ++ ++ add("sect233r1 [NIST B-233]", "1.3.132.0.27", B, ++ "020000000000000000000000000000000000000004000000000000000001", ++ "000000000000000000000000000000000000000000000000000000000001", ++ "0066647EDE6C332C7F8C0923BB58213B333B20E9CE4281FE115F7D8F90AD", ++ "00FAC9DFCBAC8313BB2139F1BB755FEF65BC391F8B36F8F8EB7371FD558B", ++ "01006A08A41903350678E58528BEBF8A0BEFF867A7CA36716F7E01F81052", ++ "01000000000000000000000000000013E974E72F8A6922031D2603CFE0D7", ++ 2, nameSplitPattern); ++ ++ add("sect239k1", "1.3.132.0.3", B, ++ "800000000000000000004000000000000000000000000000000000000001", ++ "000000000000000000000000000000000000000000000000000000000000", ++ "000000000000000000000000000000000000000000000000000000000001", ++ "29A0B6A887A983E9730988A68727A8B2D126C44CC2CC7B2A6555193035DC", ++ "76310804F12E549BDB011C103089E73510ACB275FC312A5DC6B76553F0CA", ++ "2000000000000000000000000000005A79FEC67CB6E91F1C1DA800E478A5", ++ 4, nameSplitPattern); ++ ++ add("sect283k1 [NIST K-283]", "1.3.132.0.16", BD, ++ "0800000000000000000000000000000000000000000000000000000000000000000010A1", ++ "000000000000000000000000000000000000000000000000000000000000000000000000", ++ "000000000000000000000000000000000000000000000000000000000000000000000001", ++ "0503213F78CA44883F1A3B8162F188E553CD265F23C1567A16876913B0C2AC2458492836", ++ "01CCDA380F1C9E318D90F95D07E5426FE87E45C0E8184698E45962364E34116177DD2259", ++ "01FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFE9AE2ED07577265DFF7F94451E061E163C61", ++ 4, nameSplitPattern); ++ ++ add("sect283r1 [NIST B-283]", "1.3.132.0.17", B, ++ "0800000000000000000000000000000000000000000000000000000000000000000010A1", ++ "000000000000000000000000000000000000000000000000000000000000000000000001", ++ "027B680AC8B8596DA5A4AF8A19A0303FCA97FD7645309FA2A581485AF6263E313B79A2F5", ++ "05F939258DB7DD90E1934F8C70B0DFEC2EED25B8557EAC9C80E2E198F8CDBECD86B12053", ++ "03676854FE24141CB98FE6D4B20D02B4516FF702350EDDB0826779C813F0DF45BE8112F4", ++ "03FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEF90399660FC938A90165B042A7CEFADB307", ++ 2, nameSplitPattern); ++ ++ add("sect409k1 [NIST K-409]", "1.3.132.0.36", BD, ++ "02000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000001", ++ "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", ++ "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", ++ "0060F05F658F49C1AD3AB1890F7184210EFD0987E307C84C27ACCFB8F9F67CC2C460189EB5AAAA62EE222EB1B35540CFE9023746", ++ "01E369050B7C4E42ACBA1DACBF04299C3460782F918EA427E6325165E9EA10E3DA5F6C42E9C55215AA9CA27A5863EC48D8E0286B", ++ "007FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFE5F83B2D4EA20400EC4557D5ED3E3E7CA5B4B5C83B8E01E5FCF", ++ 4, nameSplitPattern); ++ ++ add("sect409r1 [NIST B-409]", "1.3.132.0.37", B, ++ "02000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000001", ++ "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", ++ "0021A5C2C8EE9FEB5C4B9A753B7B476B7FD6422EF1F3DD674761FA99D6AC27C8A9A197B272822F6CD57A55AA4F50AE317B13545F", ++ "015D4860D088DDB3496B0C6064756260441CDE4AF1771D4DB01FFE5B34E59703DC255A868A1180515603AEAB60794E54BB7996A7", ++ "0061B1CFAB6BE5F32BBFA78324ED106A7636B9C5A7BD198D0158AA4F5488D08F38514F1FDF4B4F40D2181B3681C364BA0273C706", ++ "010000000000000000000000000000000000000000000000000001E2AAD6A612F33307BE5FA47C3C9E052F838164CD37D9A21173", ++ 2, nameSplitPattern); ++ ++ add("sect571k1 [NIST K-571]", "1.3.132.0.38", BD, ++ "080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000425", ++ "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", ++ "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", ++ "026EB7A859923FBC82189631F8103FE4AC9CA2970012D5D46024804801841CA44370958493B205E647DA304DB4CEB08CBBD1BA39494776FB988B47174DCA88C7E2945283A01C8972", ++ "0349DC807F4FBF374F4AEADE3BCA95314DD58CEC9F307A54FFC61EFC006D8A2C9D4979C0AC44AEA74FBEBBB9F772AEDCB620B01A7BA7AF1B320430C8591984F601CD4C143EF1C7A3", ++ "020000000000000000000000000000000000000000000000000000000000000000000000131850E1F19A63E4B391A8DB917F4138B630D84BE5D639381E91DEB45CFE778F637C1001", ++ 4, nameSplitPattern); ++ ++ add("sect571r1 [NIST B-571]", "1.3.132.0.39", B, ++ "080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000425", ++ "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", ++ "02F40E7E2221F295DE297117B7F3D62F5C6A97FFCB8CEFF1CD6BA8CE4A9A18AD84FFABBD8EFA59332BE7AD6756A66E294AFD185A78FF12AA520E4DE739BACA0C7FFEFF7F2955727A", ++ "0303001D34B856296C16C0D40D3CD7750A93D1D2955FA80AA5F40FC8DB7B2ABDBDE53950F4C0D293CDD711A35B67FB1499AE60038614F1394ABFA3B4C850D927E1E7769C8EEC2D19", ++ "037BF27342DA639B6DCCFFFEB73D69D78C6C27A6009CBBCA1980F8533921E8A684423E43BAB08A576291AF8F461BB2A8B3531D2F0485C19B16E2F1516E23DD3C1A4827AF1B8AC15B", ++ "03FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFE661CE18FF55987308059B186823851EC7DD9CA1161DE93D5174D66E8382E9BB2FE84E47", ++ 2, nameSplitPattern); ++ ++ /* ANSI X9.62 binary curves */ ++ add("X9.62 c2tnb191v1", "1.2.840.10045.3.0.5", B, ++ "800000000000000000000000000000000000000000000201", ++ "2866537B676752636A68F56554E12640276B649EF7526267", ++ "2E45EF571F00786F67B0081B9495A3D95462F5DE0AA185EC", ++ "36B3DAF8A23206F9C4F299D7B21A9C369137F2C84AE1AA0D", ++ "765BE73433B3F95E332932E70EA245CA2418EA0EF98018FB", ++ "40000000000000000000000004A20E90C39067C893BBB9A5", ++ 2, nameSplitPattern); ++ ++ add("X9.62 c2tnb191v2", "1.2.840.10045.3.0.6", B, ++ "800000000000000000000000000000000000000000000201", ++ "401028774D7777C7B7666D1366EA432071274F89FF01E718", ++ "0620048D28BCBD03B6249C99182B7C8CD19700C362C46A01", ++ "3809B2B7CC1B28CC5A87926AAD83FD28789E81E2C9E3BF10", ++ "17434386626D14F3DBF01760D9213A3E1CF37AEC437D668A", ++ "20000000000000000000000050508CB89F652824E06B8173", ++ 4, nameSplitPattern); ++ ++ add("X9.62 c2tnb191v3", "1.2.840.10045.3.0.7", B, ++ "800000000000000000000000000000000000000000000201", ++ "6C01074756099122221056911C77D77E77A777E7E7E77FCB", ++ "71FE1AF926CF847989EFEF8DB459F66394D90F32AD3F15E8", ++ "375D4CE24FDE434489DE8746E71786015009E66E38A926DD", ++ "545A39176196575D985999366E6AD34CE0A77CD7127B06BE", ++ "155555555555555555555555610C0B196812BFB6288A3EA3", ++ 6, nameSplitPattern); ++ ++ add("X9.62 c2tnb239v1", "1.2.840.10045.3.0.11", B, ++ "800000000000000000000000000000000000000000000000001000000001", ++ "32010857077C5431123A46B808906756F543423E8D27877578125778AC76", ++ "790408F2EEDAF392B012EDEFB3392F30F4327C0CA3F31FC383C422AA8C16", ++ "57927098FA932E7C0A96D3FD5B706EF7E5F5C156E16B7E7C86038552E91D", ++ "61D8EE5077C33FECF6F1A16B268DE469C3C7744EA9A971649FC7A9616305", ++ "2000000000000000000000000000000F4D42FFE1492A4993F1CAD666E447", ++ 4, nameSplitPattern); ++ ++ add("X9.62 c2tnb239v2", "1.2.840.10045.3.0.12", B, ++ "800000000000000000000000000000000000000000000000001000000001", ++ "4230017757A767FAE42398569B746325D45313AF0766266479B75654E65F", ++ "5037EA654196CFF0CD82B2C14A2FCF2E3FF8775285B545722F03EACDB74B", ++ "28F9D04E900069C8DC47A08534FE76D2B900B7D7EF31F5709F200C4CA205", ++ "5667334C45AFF3B5A03BAD9DD75E2C71A99362567D5453F7FA6E227EC833", ++ "1555555555555555555555555555553C6F2885259C31E3FCDF154624522D", ++ 6, nameSplitPattern); ++ ++ add("X9.62 c2tnb239v3", "1.2.840.10045.3.0.13", B, ++ "800000000000000000000000000000000000000000000000001000000001", ++ "01238774666A67766D6676F778E676B66999176666E687666D8766C66A9F", ++ "6A941977BA9F6A435199ACFC51067ED587F519C5ECB541B8E44111DE1D40", ++ "70F6E9D04D289C4E89913CE3530BFDE903977D42B146D539BF1BDE4E9C92", ++ "2E5A0EAF6E5E1305B9004DCE5C0ED7FE59A35608F33837C816D80B79F461", ++ "0CCCCCCCCCCCCCCCCCCCCCCCCCCCCCAC4912D2D9DF903EF9888B8A0E4CFF", ++ 0xA, nameSplitPattern); ++ ++ add("X9.62 c2tnb359v1", "1.2.840.10045.3.0.18", B, ++ "800000000000000000000000000000000000000000000000000000000000000000000000100000000000000001", ++ "5667676A654B20754F356EA92017D946567C46675556F19556A04616B567D223A5E05656FB549016A96656A557", ++ "2472E2D0197C49363F1FE7F5B6DB075D52B6947D135D8CA445805D39BC345626089687742B6329E70680231988", ++ "3C258EF3047767E7EDE0F1FDAA79DAEE3841366A132E163ACED4ED2401DF9C6BDCDE98E8E707C07A2239B1B097", ++ "53D7E08529547048121E9C95F3791DD804963948F34FAE7BF44EA82365DC7868FE57E4AE2DE211305A407104BD", ++ "01AF286BCA1AF286BCA1AF286BCA1AF286BCA1AF286BC9FB8F6B85C556892C20A7EB964FE7719E74F490758D3B", ++ 0x4C, nameSplitPattern); ++ ++ add("X9.62 c2tnb431r1", "1.2.840.10045.3.0.20", B, ++ "800000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000001", ++ "1A827EF00DD6FC0E234CAF046C6A5D8A85395B236CC4AD2CF32A0CADBDC9DDF620B0EB9906D0957F6C6FEACD615468DF104DE296CD8F", ++ "10D9B4A3D9047D8B154359ABFB1B7F5485B04CEB868237DDC9DEDA982A679A5A919B626D4E50A8DD731B107A9962381FB5D807BF2618", ++ "120FC05D3C67A99DE161D2F4092622FECA701BE4F50F4758714E8A87BBF2A658EF8C21E7C5EFE965361F6C2999C0C247B0DBD70CE6B7", ++ "20D0AF8903A96F8D5FA2C255745D3C451B302C9346D9B7E485E7BCE41F6B591F3E8F6ADDCBB0BC4C2F947A7DE1A89B625D6A598B3760", ++ "0340340340340340340340340340340340340340340340340340340323C313FAB50589703B5EC68D3587FEC60D161CC149C1AD4A91", ++ 0x2760, nameSplitPattern); ++ ++ /* ANSI X9.62 binary curves from the 1998 standard but forbidden ++ * in the 2005 version of the standard. ++ * We don't register them but leave them here for the time being in ++ * case we need to support them after all. ++ */ ++/* ++ add("X9.62 c2pnb163v1", "1.2.840.10045.3.0.1", B, ++ "080000000000000000000000000000000000000107", ++ "072546B5435234A422E0789675F432C89435DE5242", ++ "00C9517D06D5240D3CFF38C74B20B6CD4D6F9DD4D9", ++ "07AF69989546103D79329FCC3D74880F33BBE803CB", ++ "01EC23211B5966ADEA1D3F87F7EA5848AEF0B7CA9F", ++ "0400000000000000000001E60FC8821CC74DAEAFC1", ++ 2, nameSplitPattern); ++ ++ add("X9.62 c2pnb163v2", "1.2.840.10045.3.0.2", B, ++ "080000000000000000000000000000000000000107", ++ "0108B39E77C4B108BED981ED0E890E117C511CF072", ++ "0667ACEB38AF4E488C407433FFAE4F1C811638DF20", ++ "0024266E4EB5106D0A964D92C4860E2671DB9B6CC5", ++ "079F684DDF6684C5CD258B3890021B2386DFD19FC5", ++ "03FFFFFFFFFFFFFFFFFFFDF64DE1151ADBB78F10A7", ++ 2, nameSplitPattern); ++ ++ add("X9.62 c2pnb163v3", "1.2.840.10045.3.0.3", B, ++ "080000000000000000000000000000000000000107", ++ "07A526C63D3E25A256A007699F5447E32AE456B50E", ++ "03F7061798EB99E238FD6F1BF95B48FEEB4854252B", ++ "02F9F87B7C574D0BDECF8A22E6524775F98CDEBDCB", ++ "05B935590C155E17EA48EB3FF3718B893DF59A05D0", ++ "03FFFFFFFFFFFFFFFFFFFE1AEE140F110AFF961309", ++ 2, nameSplitPattern); ++ ++ add("X9.62 c2pnb176w1", "1.2.840.10045.3.0.4", B, ++ "0100000000000000000000000000000000080000000007", ++ "E4E6DB2995065C407D9D39B8D0967B96704BA8E9C90B", ++ "5DDA470ABE6414DE8EC133AE28E9BBD7FCEC0AE0FFF2", ++ "8D16C2866798B600F9F08BB4A8E860F3298CE04A5798", ++ "6FA4539C2DADDDD6BAB5167D61B436E1D92BB16A562C", ++ "00010092537397ECA4F6145799D62B0A19CE06FE26AD", ++ 0xFF6E, nameSplitPattern); ++ ++ add("X9.62 c2pnb208w1", "1.2.840.10045.3.0.10", B, ++ "010000000000000000000000000000000800000000000000000007", ++ "0000000000000000000000000000000000000000000000000000", ++ "C8619ED45A62E6212E1160349E2BFA844439FAFC2A3FD1638F9E", ++ "89FDFBE4ABE193DF9559ECF07AC0CE78554E2784EB8C1ED1A57A", ++ "0F55B51A06E78E9AC38A035FF520D8B01781BEB1A6BB08617DE3", ++ "000101BAF95C9723C57B6C21DA2EFF2D5ED588BDD5717E212F9D", ++ 0xFE48, nameSplitPattern); ++ ++ add("X9.62 c2pnb272w1", "1.2.840.10045.3.0.16", B, ++ "010000000000000000000000000000000000000000000000000000010000000000000B", ++ "91A091F03B5FBA4AB2CCF49C4EDD220FB028712D42BE752B2C40094DBACDB586FB20", ++ "7167EFC92BB2E3CE7C8AAAFF34E12A9C557003D7C73A6FAF003F99F6CC8482E540F7", ++ "6108BABB2CEEBCF787058A056CBE0CFE622D7723A289E08A07AE13EF0D10D171DD8D", ++ "10C7695716851EEF6BA7F6872E6142FBD241B830FF5EFCACECCAB05E02005DDE9D23", ++ "000100FAF51354E0E39E4892DF6E319C72C8161603FA45AA7B998A167B8F1E629521", ++ 0xFF06, nameSplitPattern); ++ ++ add("X9.62 c2pnb304w1", "1.2.840.10045.3.0.17", B, ++ "010000000000000000000000000000000000000000000000000000000000000000000000000807", ++ "FD0D693149A118F651E6DCE6802085377E5F882D1B510B44160074C1288078365A0396C8E681", ++ "BDDB97E555A50A908E43B01C798EA5DAA6788F1EA2794EFCF57166B8C14039601E55827340BE", ++ "197B07845E9BE2D96ADB0F5F3C7F2CFFBD7A3EB8B6FEC35C7FD67F26DDF6285A644F740A2614", ++ "E19FBEB76E0DA171517ECF401B50289BF014103288527A9B416A105E80260B549FDC1B92C03B", ++ "000101D556572AABAC800101D556572AABAC8001022D5C91DD173F8FB561DA6899164443051D", ++ 0xFE2E, nameSplitPattern); ++ ++ add("X9.62 c2pnb368w1", "1.2.840.10045.3.0.19", B, ++ "0100000000000000000000000000000000000000000000000000000000000000000000002000000000000000000007", ++ "E0D2EE25095206F5E2A4F9ED229F1F256E79A0E2B455970D8D0D865BD94778C576D62F0AB7519CCD2A1A906AE30D", ++ "FC1217D4320A90452C760A58EDCD30C8DD069B3C34453837A34ED50CB54917E1C2112D84D164F444F8F74786046A", ++ "1085E2755381DCCCE3C1557AFA10C2F0C0C2825646C5B34A394CBCFA8BC16B22E7E789E927BE216F02E1FB136A5F", ++ "7B3EB1BDDCBA62D5D8B2059B525797FC73822C59059C623A45FF3843CEE8F87CD1855ADAA81E2A0750B80FDA2310", ++ "00010090512DA9AF72B08349D98A5DD4C7B0532ECA51CE03E2D10F3B7AC579BD87E909AE40A6F131E9CFCE5BD967", ++ 0xFF70, nameSplitPattern); ++*/ ++ ++ /* ++ * Brainpool curves (RFC 5639) ++ * (Twisted curves are not included) ++ */ ++ ++ add("brainpoolP160r1", "1.3.36.3.3.2.8.1.1.1", P, ++ "E95E4A5F737059DC60DFC7AD95B3D8139515620F", ++ "340E7BE2A280EB74E2BE61BADA745D97E8F7C300", ++ "1E589A8595423412134FAA2DBDEC95C8D8675E58", ++ "BED5AF16EA3F6A4F62938C4631EB5AF7BDBCDBC3", ++ "1667CB477A1A8EC338F94741669C976316DA6321", ++ "E95E4A5F737059DC60DF5991D45029409E60FC09", ++ 1, nameSplitPattern); ++ ++ add("brainpoolP192r1", "1.3.36.3.3.2.8.1.1.3", P, ++ "C302F41D932A36CDA7A3463093D18DB78FCE476DE1A86297", ++ "6A91174076B1E0E19C39C031FE8685C1CAE040E5C69A28EF", ++ "469A28EF7C28CCA3DC721D044F4496BCCA7EF4146FBF25C9", ++ "C0A0647EAAB6A48753B033C56CB0F0900A2F5C4853375FD6", ++ "14B690866ABD5BB88B5F4828C1490002E6773FA2FA299B8F", ++ "C302F41D932A36CDA7A3462F9E9E916B5BE8F1029AC4ACC1", ++ 1, nameSplitPattern); ++ ++ add("brainpoolP224r1", "1.3.36.3.3.2.8.1.1.5", P, ++ "D7C134AA264366862A18302575D1D787B09F075797DA89F57EC8C0FF", ++ "68A5E62CA9CE6C1C299803A6C1530B514E182AD8B0042A59CAD29F43", ++ "2580F63CCFE44138870713B1A92369E33E2135D266DBB372386C400B", ++ "0D9029AD2C7E5CF4340823B2A87DC68C9E4CE3174C1E6EFDEE12C07D", ++ "58AA56F772C0726F24C6B89E4ECDAC24354B9E99CAA3F6D3761402CD", ++ "D7C134AA264366862A18302575D0FB98D116BC4B6DDEBCA3A5A7939F", ++ 1, nameSplitPattern); ++ ++ add("brainpoolP256r1", "1.3.36.3.3.2.8.1.1.7", P, ++ "A9FB57DBA1EEA9BC3E660A909D838D726E3BF623D52620282013481D1F6E5377", ++ "7D5A0975FC2C3057EEF67530417AFFE7FB8055C126DC5C6CE94A4B44F330B5D9", ++ "26DC5C6CE94A4B44F330B5D9BBD77CBF958416295CF7E1CE6BCCDC18FF8C07B6", ++ "8BD2AEB9CB7E57CB2C4B482FFC81B7AFB9DE27E1E3BD23C23A4453BD9ACE3262", ++ "547EF835C3DAC4FD97F8461A14611DC9C27745132DED8E545C1D54C72F046997", ++ "A9FB57DBA1EEA9BC3E660A909D838D718C397AA3B561A6F7901E0E82974856A7", ++ 1, nameSplitPattern); ++ ++ add("brainpoolP320r1", "1.3.36.3.3.2.8.1.1.9", P, ++ "D35E472036BC4FB7E13C785ED201E065F98FCFA6F6F40DEF4F92B9EC7893EC28FCD412B1F1B32E27", ++ "3EE30B568FBAB0F883CCEBD46D3F3BB8A2A73513F5EB79DA66190EB085FFA9F492F375A97D860EB4", ++ "520883949DFDBC42D3AD198640688A6FE13F41349554B49ACC31DCCD884539816F5EB4AC8FB1F1A6", ++ "43BD7E9AFB53D8B85289BCC48EE5BFE6F20137D10A087EB6E7871E2A10A599C710AF8D0D39E20611", ++ "14FDD05545EC1CC8AB4093247F77275E0743FFED117182EAA9C77877AAAC6AC7D35245D1692E8EE1", ++ "D35E472036BC4FB7E13C785ED201E065F98FCFA5B68F12A32D482EC7EE8658E98691555B44C59311", ++ 1, nameSplitPattern); ++ ++ add("brainpoolP384r1", "1.3.36.3.3.2.8.1.1.11", P, ++ "8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B412B1DA197FB71123ACD3A729901D1A71874700133107EC53", ++ "7BC382C63D8C150C3C72080ACE05AFA0C2BEA28E4FB22787139165EFBA91F90F8AA5814A503AD4EB04A8C7DD22CE2826", ++ "04A8C7DD22CE28268B39B55416F0447C2FB77DE107DCD2A62E880EA53EEB62D57CB4390295DBC9943AB78696FA504C11", ++ "1D1C64F068CF45FFA2A63A81B7C13F6B8847A3E77EF14FE3DB7FCAFE0CBD10E8E826E03436D646AAEF87B2E247D4AF1E", ++ "8ABE1D7520F9C2A45CB1EB8E95CFD55262B70B29FEEC5864E19C054FF99129280E4646217791811142820341263C5315", ++ "8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B31F166E6CAC0425A7CF3AB6AF6B7FC3103B883202E9046565", ++ 1, nameSplitPattern); ++ ++ add("brainpoolP512r1", "1.3.36.3.3.2.8.1.1.13", P, ++ "AADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA703308717D4D9B009BC66842AECDA12AE6A380E62881FF2F2D82C68528AA6056583A48F3", ++ "7830A3318B603B89E2327145AC234CC594CBDD8D3DF91610A83441CAEA9863BC2DED5D5AA8253AA10A2EF1C98B9AC8B57F1117A72BF2C7B9E7C1AC4D77FC94CA", ++ "3DF91610A83441CAEA9863BC2DED5D5AA8253AA10A2EF1C98B9AC8B57F1117A72BF2C7B9E7C1AC4D77FC94CADC083E67984050B75EBAE5DD2809BD638016F723", ++ "81AEE4BDD82ED9645A21322E9C4C6A9385ED9F70B5D916C1B43B62EEF4D0098EFF3B1F78E2D0D48D50D1687B93B97D5F7C6D5047406A5E688B352209BCB9F822", ++ "7DDE385D566332ECC0EABFA9CF7822FDF209F70024A57B1AA000C55B881F8111B2DCDE494A5F485E5BCA4BD88A2763AED1CA2B2FA8F0540678CD1E0F3AD80892", ++ "AADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA70330870553E5C414CA92619418661197FAC10471DB1D381085DDADDB58796829CA90069", ++ 1, nameSplitPattern); ++ + specCollection = Collections.unmodifiableCollection(oidMap.values()); + } + } +diff -uNr openjdk/jdk/src/share/demo/scripting/jconsole-plugin/build.xml afu8u/jdk/src/share/demo/scripting/jconsole-plugin/build.xml +--- openjdk/jdk/src/share/demo/scripting/jconsole-plugin/build.xml 2023-04-19 05:53:10.000000000 +0800 ++++ afu8u/jdk/src/share/demo/scripting/jconsole-plugin/build.xml 2025-05-06 10:53:46.647633725 +0800 +@@ -73,6 +73,9 @@ + + + ++ ++ ++ + + + +diff -uNr openjdk/jdk/src/share/demo/scripting/jconsole-plugin/README.txt afu8u/jdk/src/share/demo/scripting/jconsole-plugin/README.txt +--- openjdk/jdk/src/share/demo/scripting/jconsole-plugin/README.txt 2023-04-19 05:53:10.000000000 +0800 ++++ afu8u/jdk/src/share/demo/scripting/jconsole-plugin/README.txt 2025-05-06 10:53:46.647633725 +0800 +@@ -18,9 +18,11 @@ + engine javax.script.ScriptEngine + plugin com.sun.tools.jconsole.JConsolePlugin + +-You can add global functions and global variables by defining those in +-~/jconsole.js (or jconsole. where is the file extension for +-your scripting language of choice under your home directory). ++If you use JavaScript, there are many useful global functions defined in ++./src/resources/jconsole.js. This is built into the script plugin jar file. ++In addition, you can add other global functions and global variables by ++defining those in ~/jconsole.js (or jconsole. where is the file ++extension for your scripting language of choice under your home directory). + + How do I compile script console plugin? + +diff -uNr openjdk/jdk/src/share/demo/scripting/jconsole-plugin/src/resources/jconsole.js afu8u/jdk/src/share/demo/scripting/jconsole-plugin/src/resources/jconsole.js +--- openjdk/jdk/src/share/demo/scripting/jconsole-plugin/src/resources/jconsole.js 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/jdk/src/share/demo/scripting/jconsole-plugin/src/resources/jconsole.js 2025-05-06 10:53:46.647633725 +0800 +@@ -0,0 +1,891 @@ ++/* ++ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * - Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * ++ * - Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * ++ * - Neither the name of Oracle nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS ++ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, ++ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ++ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR ++ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ++ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ++ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ++ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ++ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ++ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++/* ++ * This source code is provided to illustrate the usage of a given feature ++ * or technique and has been deliberately simplified. Additional steps ++ * required for a production-quality application, such as security checks, ++ * input validation and proper error handling, might not be present in ++ * this sample code. ++ */ ++ ++ ++/* ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * ++ * -Redistribution of source code must retain the above copyright notice, this ++ * list of conditions and the following disclaimer. ++ * ++ * -Redistribution in binary form must reproduce the above copyright notice, ++ * this list of conditions and the following disclaimer in the documentation ++ * and/or other materials provided with the distribution. ++ * ++ * Neither the name of Oracle nor the names of contributors may ++ * be used to endorse or promote products derived from this software without ++ * specific prior written permission. ++ * ++ * This software is provided "AS IS," without a warranty of any kind. ALL ++ * EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ++ * ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE ++ * OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN MIDROSYSTEMS, INC. ("SUN") ++ * AND ITS LICENSORS SHALL NOT BE LIABLE FOR ANY DAMAGES SUFFERED BY LICENSEE ++ * AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THIS SOFTWARE OR ITS ++ * DERIVATIVES. IN NO EVENT WILL SUN OR ITS LICENSORS BE LIABLE FOR ANY LOST ++ * REVENUE, PROFIT OR DATA, OR FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, ++ * INCIDENTAL OR PUNITIVE DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY ++ * OF LIABILITY, ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, ++ * EVEN IF SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. ++ * ++ * You acknowledge that this software is not designed, licensed or intended ++ * for use in the design, construction, operation or maintenance of any ++ * nuclear facility. ++ */ ++ ++// This function depends on the pre-defined variable ++// "plugin" of type com.sun.tools.jconsole.JConsolePlugin ++ ++function jcontext() { ++ return plugin.getContext(); ++} ++jcontext.docString = "returns JConsoleContext for the current jconsole plugin"; ++ ++function mbeanConnection() { ++ return jcontext().getMBeanServerConnection(); ++} ++mbeanConnection.docString = "returns current MBeanServer connection"; ++ ++// check if there is a build in sync function, define one if missing ++if (typeof sync === "undefined") { ++ var sync = function(func, obj) { ++ if (arguments.length < 1 || arguments.length > 2 ) { ++ throw "sync(function [,object]) parameter count mismatch"; ++ } ++ ++ var syncobj = (arguments.length == 2 ? obj : this); ++ ++ if (!syncobj._syncLock) { ++ syncobj._syncLock = new Lock(); ++ } ++ ++ return function() { ++ syncobj._syncLock.lock(); ++ try { ++ func.apply(null, arguments); ++ } finally { ++ syncobj._syncLock.unlock(); ++ } ++ }; ++ }; ++ sync.docString = "synchronize a function, optionally on an object"; ++} ++ ++/** ++ * Prints one liner help message for each function exposed here ++ * Note that this function depends on docString meta-data for ++ * each function ++ */ ++function help() { ++ var i; ++ for (i in this) { ++ var func = this[i]; ++ if (typeof(func) == "function" && ++ ("docString" in func)) { ++ echo(i + " - " + func["docString"]); ++ } ++ } ++} ++help.docString = "prints help message for global functions"; ++ ++function connectionState() { ++ return jcontext().connectionState; ++} ++connectionState.docString = "return connection state of the current jcontext"; ++ ++/** ++ * Returns a platform MXBean proxy for given MXBean name and interface class ++ */ ++function newPlatformMXBeanProxy(name, intf) { ++ var factory = java.lang.management.ManagementFactory; ++ return factory.newPlatformMXBeanProxy(mbeanConnection(), name, intf); ++} ++newPlatformMXBeanProxy.docString = "returns a proxy for a platform MXBean"; ++ ++/** ++ * Wraps a string to ObjectName if needed. ++ */ ++function objectName(objName) { ++ var ObjectName = Packages.javax.management.ObjectName; ++ if (objName instanceof ObjectName) { ++ return objName; ++ } else { ++ return new ObjectName(objName); ++ } ++} ++objectName.docString = "creates JMX ObjectName for a given String"; ++ ++ ++/** ++ * Creates a new (M&M) Attribute object ++ * ++ * @param name name of the attribute ++ * @param value value of the attribute ++ */ ++function attribute(name, value) { ++ var Attribute = Packages.javax.management.Attribute; ++ return new Attribute(name, value); ++} ++attribute.docString = "returns a new JMX Attribute using name and value given"; ++ ++/** ++ * Returns MBeanInfo for given ObjectName. Strings are accepted. ++ */ ++function mbeanInfo(objName) { ++ objName = objectName(objName); ++ return mbeanConnection().getMBeanInfo(objName); ++} ++mbeanInfo.docString = "returns MBeanInfo of a given ObjectName"; ++ ++/** ++ * Returns ObjectInstance for a given ObjectName. ++ */ ++function objectInstance(objName) { ++ objName = objectName(objName); ++ return mbeanConnection().objectInstance(objectName); ++} ++objectInstance.docString = "returns ObjectInstance for a given ObjectName"; ++ ++/** ++ * Queries with given ObjectName and QueryExp. ++ * QueryExp may be null. ++ * ++ * @return set of ObjectNames. ++ */ ++function queryNames(objName, query) { ++ objName = objectName(objName); ++ if (query == undefined) query = null; ++ return mbeanConnection().queryNames(objName, query); ++} ++queryNames.docString = "returns QueryNames using given ObjectName and optional query"; ++ ++ ++/** ++ * Queries with given ObjectName and QueryExp. ++ * QueryExp may be null. ++ * ++ * @return set of ObjectInstances. ++ */ ++function queryMBeans(objName, query) { ++ objName = objectName(objName); ++ if (query == undefined) query = null; ++ return mbeanConnection().queryMBeans(objName, query); ++} ++queryMBeans.docString = "return MBeans using given ObjectName and optional query"; ++ ++// wraps a script array as java.lang.Object[] ++function objectArray(array) { ++ return Java.to(array, "java.lang.Object[]"); ++} ++ ++// wraps a script (string) array as java.lang.String[] ++function stringArray(array) { ++ return Java.to(array, "java.lang.String[]"); ++} ++ ++// script array to Java List ++function toAttrList(array) { ++ var AttributeList = Packages.javax.management.AttributeList; ++ if (array instanceof AttributeList) { ++ return array; ++ } ++ var list = new AttributeList(array.length); ++ for (var index = 0; index < array.length; index++) { ++ list.add(array[index]); ++ } ++ return list; ++} ++ ++// Java Collection (Iterable) to script array ++function toArray(collection) { ++ if (collection instanceof Array) { ++ return collection; ++ } ++ var itr = collection.iterator(); ++ var array = new Array(); ++ while (itr.hasNext()) { ++ array[array.length] = itr.next(); ++ } ++ return array; ++} ++ ++// gets MBean attributes ++function getMBeanAttributes(objName, attributeNames) { ++ objName = objectName(objName); ++ return mbeanConnection().getAttributes(objName,stringArray(attributeNames)); ++} ++getMBeanAttributes.docString = "returns specified Attributes of given ObjectName"; ++ ++// gets MBean attribute ++function getMBeanAttribute(objName, attrName) { ++ objName = objectName(objName); ++ return mbeanConnection().getAttribute(objName, attrName); ++} ++getMBeanAttribute.docString = "returns a single Attribute of given ObjectName"; ++ ++ ++// sets MBean attributes ++function setMBeanAttributes(objName, attrList) { ++ objName = objectName(objName); ++ attrList = toAttrList(attrList); ++ return mbeanConnection().setAttributes(objName, attrList); ++} ++setMBeanAttributes.docString = "sets specified Attributes of given ObjectName"; ++ ++// sets MBean attribute ++function setMBeanAttribute(objName, attrName, attrValue) { ++ var Attribute = Packages.javax.management.Attribute; ++ objName = objectName(objName); ++ mbeanConnection().setAttribute(objName, new Attribute(attrName, attrValue)); ++} ++setMBeanAttribute.docString = "sets a single Attribute of given ObjectName"; ++ ++ ++// invokes an operation on given MBean ++function invokeMBean(objName, operation, params, signature) { ++ objName = objectName(objName); ++ params = objectArray(params); ++ signature = stringArray(signature); ++ return mbeanConnection().invoke(objName, operation, params, signature); ++} ++invokeMBean.docString = "invokes MBean operation on given ObjectName"; ++ ++/** ++ * Wraps a MBean specified by ObjectName as a convenient ++ * script object -- so that setting/getting MBean attributes ++ * and invoking MBean method can be done with natural syntax. ++ * ++ * @param objName ObjectName of the MBean ++ * @param async asynchornous mode [optional, default is false] ++ * @return script wrapper for MBean ++ * ++ * With async mode, all field, operation access is async. Results ++ * will be of type FutureTask. When you need value, call 'get' on it. ++ */ ++function mbean(objName, async) { ++ var index; ++ ++ objName = objectName(objName); ++ var info = mbeanInfo(objName); ++ var attrs = info.attributes; ++ var attrMap = new Object; ++ for (index in attrs) { ++ attrMap[attrs[index].name] = attrs[index]; ++ } ++ var opers = info.operations; ++ var operMap = new Object; ++ for (index in opers) { ++ operMap[opers[index].name] = opers[index]; ++ } ++ ++ function isAttribute(name) { ++ return name in attrMap; ++ } ++ ++ function isOperation(name) { ++ return name in operMap; ++ } ++ ++ return new JSAdapter() { ++ __has__: function (name) { ++ return isAttribute(name) || isOperation(name); ++ }, ++ __get__: function (name) { ++ if (isAttribute(name)) { ++ if (async) { ++ return getMBeanAttribute.future(objName, name); ++ } else { ++ return getMBeanAttribute(objName, name); ++ } ++ } else { ++ return undefined; ++ } ++ }, ++ __call__: function(name) { ++ if (isOperation(name)) { ++ var oper = operMap[name]; ++ ++ var params = []; ++ for (var j = 1; j < arguments.length; j++) { ++ params[j-1]= arguments[j]; ++ } ++ ++ var sigs = oper.signature; ++ ++ var sigNames = new Array(sigs.length); ++ for (var index in sigs) { ++ sigNames[index] = sigs[index].getType(); ++ } ++ ++ if (async) { ++ return invokeMBean.future(objName, name, params, sigNames); ++ } else { ++ return invokeMBean(objName, name, params, sigNames); ++ } ++ } else { ++ return undefined; ++ } ++ }, ++ __put__: function (name, value) { ++ if (isAttribute(name)) { ++ if (async) { ++ setMBeanAttribute.future(objName, name, value); ++ } else { ++ setMBeanAttribute(objName, name, value); ++ } ++ } else { ++ return undefined; ++ } ++ } ++ }; ++} ++mbean.docString = "returns a conveninent script wrapper for a MBean of given ObjectName"; ++ ++/** ++ * load and evaluate script file. If no script file is ++ * specified, file dialog is shown to choose the script. ++ * ++ * @param file script file name [optional] ++ * @return value returned from evaluating script ++ */ ++function load(file) { ++ if (file == undefined || file == null) { ++ // file not specified, show file dialog to choose ++ file = fileDialog(); ++ } ++ if (file == null) return; ++ ++ var reader = new java.io.FileReader(file); ++ var oldFilename = engine.get(engine.FILENAME); ++ engine.put(engine.FILENAME, file); ++ try { ++ engine.eval(reader); ++ } finally { ++ engine.put(engine.FILENAME, oldFilename); ++ } ++ reader.close(); ++} ++load.docString = "loads a script file and evaluates it"; ++ ++/** ++ * Concurrency utilities for JavaScript. These are based on ++ * java.lang and java.util.concurrent API. The following functions ++ * provide a simpler API for scripts. Instead of directly using java.lang ++ * and java.util.concurrent classes, scripts can use functions and ++ * objects exported from here. ++ */ ++ ++/** ++ * Wrapper for java.lang.Object.wait ++ * ++ * can be called only within a sync method ++ */ ++function wait(object) { ++ var objClazz = java.lang.Class.forName('java.lang.Object'); ++ var waitMethod = objClazz.getMethod('wait', null); ++ waitMethod.invoke(object, null); ++} ++wait.docString = "convenient wrapper for java.lang.Object.wait method"; ++ ++ ++/** ++ * Wrapper for java.lang.Object.notify ++ * ++ * can be called only within a sync method ++ */ ++function notify(object) { ++ var objClazz = java.lang.Class.forName('java.lang.Object'); ++ var notifyMethod = objClazz.getMethod('notify', null); ++ notifyMethod.invoke(object, null); ++} ++notify.docString = "convenient wrapper for java.lang.Object.notify method"; ++ ++ ++/** ++ * Wrapper for java.lang.Object.notifyAll ++ * ++ * can be called only within a sync method ++ */ ++function notifyAll(object) { ++ var objClazz = java.lang.Class.forName('java.lang.Object'); ++ var notifyAllMethod = objClazz.getMethod('notifyAll', null); ++ notifyAllMethod.invoke(object, null); ++} ++notifyAll.docString = "convenient wrapper for java.lang.Object.notifyAll method"; ++ ++ ++/** ++ * Creates a java.lang.Runnable from a given script ++ * function. ++ */ ++Function.prototype.runnable = function() { ++ var args = arguments; ++ var func = this; ++ return new java.lang.Runnable() { ++ run: function() { ++ func.apply(null, args); ++ } ++ } ++} ++ ++/** ++ * Executes the function on a new Java Thread. ++ */ ++Function.prototype.thread = function() { ++ var t = new java.lang.Thread(this.runnable.apply(this, arguments)); ++ t.start(); ++ return t; ++} ++ ++/** ++ * Executes the function on a new Java daemon Thread. ++ */ ++Function.prototype.daemon = function() { ++ var t = new java.lang.Thread(this.runnable.apply(this, arguments)); ++ t.setDaemon(true); ++ t.start(); ++ return t; ++} ++ ++/** ++ * Creates a java.util.concurrent.Callable from a given script ++ * function. ++ */ ++Function.prototype.callable = function() { ++ var args = arguments; ++ var func = this; ++ return new java.util.concurrent.Callable() { ++ call: function() { return func.apply(null, args); } ++ } ++} ++ ++/** ++ * Registers the script function so that it will be called exit. ++ */ ++Function.prototype.atexit = function () { ++ var args = arguments; ++ java.lang.Runtime.getRuntime().addShutdownHook( ++ new java.lang.Thread(this.runnable.apply(this, args))); ++} ++ ++/** ++ * Executes the function asynchronously. ++ * ++ * @return a java.util.concurrent.FutureTask ++ */ ++Function.prototype.future = (function() { ++ // default executor for future ++ var juc = java.util.concurrent; ++ var theExecutor = juc.Executors.newSingleThreadExecutor(); ++ // clean-up the default executor at exit ++ (function() { theExecutor.shutdown(); }).atexit(); ++ return function() { ++ return theExecutor.submit(this.callable.apply(this, arguments)); ++ } ++})(); ++ ++// shortcut for j.u.c lock classes ++var Lock = java.util.concurrent.locks.ReentrantLock; ++var RWLock = java.util.concurrent.locks.ReentrantReadWriteLock; ++ ++/** ++ * Executes a function after acquiring given lock. On return, ++ * (normal or exceptional), lock is released. ++ * ++ * @param lock lock that is locked and unlocked ++ */ ++Function.prototype.sync = function (lock) { ++ if (arguments.length == 0) { ++ throw "lock is missing"; ++ } ++ var res = new Array(arguments.length - 1); ++ for (var i = 0; i < res.length; i++) { ++ res[i] = arguments[i + 1]; ++ } ++ lock.lock(); ++ try { ++ this.apply(null, res); ++ } finally { ++ lock.unlock(); ++ } ++}; ++ ++/** ++ * Causes current thread to sleep for specified ++ * number of milliseconds ++ * ++ * @param interval in milliseconds ++ */ ++function sleep(interval) { ++ java.lang.Thread.sleep(interval); ++} ++sleep.docString = "wrapper for java.lang.Thread.sleep method"; ++ ++/** ++ * Schedules a task to be executed once in N milliseconds specified. ++ * ++ * @param callback function or expression to evaluate ++ * @param interval in milliseconds to sleep ++ * @return timeout ID (which is nothing but Thread instance) ++ */ ++function setTimeout(callback, interval) { ++ if (! (callback instanceof Function)) { ++ callback = new Function(callback); ++ } ++ ++ // start a new thread that sleeps given time ++ // and calls callback in an infinite loop ++ return (function() { ++ try { ++ sleep(interval); ++ } catch (x) { } ++ callback(); ++ }).daemon(); ++} ++setTimeout.docString = "calls given callback once after specified interval"; ++ ++/** ++ * Cancels a timeout set earlier. ++ * @param tid timeout ID returned from setTimeout ++ */ ++function clearTimeout(tid) { ++ // we just interrupt the timer thread ++ tid.interrupt(); ++} ++clearTimeout.docString = "interrupt a setTimeout timer"; ++ ++/** ++ * Schedules a task to be executed once in ++ * every N milliseconds specified. ++ * ++ * @param callback function or expression to evaluate ++ * @param interval in milliseconds to sleep ++ * @return timeout ID (which is nothing but Thread instance) ++ */ ++function setInterval(callback, interval) { ++ if (! (callback instanceof Function)) { ++ callback = new Function(callback); ++ } ++ ++ // start a new thread that sleeps given time ++ // and calls callback in an infinite loop ++ return (function() { ++ while (true) { ++ try { ++ sleep(interval); ++ } catch (x) { ++ break; ++ } ++ callback(); ++ } ++ }).daemon(); ++} ++setInterval.docString = "calls given callback every specified interval"; ++ ++/** ++ * Cancels a timeout set earlier. ++ * @param tid timeout ID returned from setTimeout ++ */ ++function clearInterval(tid) { ++ // we just interrupt the timer thread ++ tid.interrupt(); ++} ++clearInterval.docString = "interrupt a setInterval timer"; ++ ++/** ++ * Simple access to thread local storage. ++ * ++ * Script sample: ++ * ++ * __thread.x = 44; ++ * function f() { ++ * __thread.x = 'hello'; ++ * print(__thread.x); ++ * } ++ * f.thread(); // prints 'hello' ++ * print(__thread.x); // prints 44 in main thread ++ */ ++var __thread = (function () { ++ var map = new Object(); ++ return new JSAdapter() { ++ __has__: function(name) { ++ return map[name] != undefined; ++ }, ++ __get__: function(name) { ++ if (map[name] != undefined) { ++ return map[name].get(); ++ } else { ++ return undefined; ++ } ++ }, ++ __put__: sync(function(name, value) { ++ if (map[name] == undefined) { ++ var tmp = new java.lang.ThreadLocal(); ++ tmp.set(value); ++ map[name] = tmp; ++ } else { ++ map[name].set(value); ++ } ++ }), ++ __delete__: function(name) { ++ if (map[name] != undefined) { ++ map[name].set(null); ++ } ++ } ++ } ++})(); ++ ++// user interface utilities ++ ++/** ++ * Swing invokeLater - invokes given function in AWT event thread ++ */ ++Function.prototype.invokeLater = function() { ++ var SwingUtilities = Packages.javax.swing.SwingUtilities; ++ SwingUtilities.invokeLater(this.runnable.apply(this, arguments)); ++} ++ ++/** ++ * Swing invokeAndWait - invokes given function in AWT event thread ++ * and waits for it's completion ++ */ ++Function.prototype.invokeAndWait = function() { ++ var SwingUtilities = Packages.javax.swing.SwingUtilities; ++ SwingUtilities.invokeAndWait(this.runnable.apply(this, arguments)); ++} ++ ++/** ++ * Am I running in AWT event dispatcher thread? ++ */ ++function isEventThread() { ++ var SwingUtilities = Packages.javax.swing.SwingUtilities; ++ return SwingUtilities.isEventDispatchThread(); ++} ++isEventThread.docString = "returns whether the current thread is GUI thread"; ++ ++/** ++ * Opens a file dialog box ++ * ++ * @param curDir current directory [optional] ++ * @return absolute path if file selected or else null ++ */ ++function fileDialog(curDir) { ++ var result; ++ function _fileDialog() { ++ if (curDir == undefined) curDir = undefined; ++ var JFileChooser = Packages.javax.swing.JFileChooser; ++ var dialog = new JFileChooser(curDir); ++ var res = dialog.showOpenDialog(null); ++ if (res == JFileChooser.APPROVE_OPTION) { ++ result = dialog.getSelectedFile().getAbsolutePath(); ++ } else { ++ result = null; ++ } ++ } ++ ++ if (isEventThread()) { ++ _fileDialog(); ++ } else { ++ _fileDialog.invokeAndWait(); ++ } ++ return result; ++} ++fileDialog.docString = "show a FileOpen dialog box"; ++ ++/** ++ * Shows a message box ++ * ++ * @param msg message to be shown ++ * @param title title of message box [optional] ++ * @param msgType type of message box [constants in JOptionPane] ++ */ ++function msgBox(msg, title, msgType) { ++ ++ function _msgBox() { ++ var JOptionPane = Packages.javax.swing.JOptionPane; ++ if (msg === undefined) msg = "undefined"; ++ if (msg === null) msg = "null"; ++ if (title == undefined) title = msg; ++ if (msgType == undefined) msgType = JOptionPane.INFORMATION_MESSAGE; ++ JOptionPane.showMessageDialog(window, msg, title, msgType); ++ } ++ if (isEventThread()) { ++ _msgBox(); ++ } else { ++ _msgBox.invokeAndWait(); ++ } ++} ++msgBox.docString = "shows MessageBox to the user"; ++ ++/** ++ * Shows an information alert box ++ * ++ * @param msg message to be shown ++ * @param title title of message box [optional] ++ */ ++function alert(msg, title) { ++ var JOptionPane = Packages.javax.swing.JOptionPane; ++ msgBox(msg, title, JOptionPane.INFORMATION_MESSAGE); ++} ++alert.docString = "shows an alert message box to the user"; ++ ++/** ++ * Shows an error alert box ++ * ++ * @param msg message to be shown ++ * @param title title of message box [optional] ++ */ ++function error(msg, title) { ++ var JOptionPane = Packages.javax.swing.JOptionPane; ++ msgBox(msg, title, JOptionPane.ERROR_MESSAGE); ++} ++error.docString = "shows an error message box to the user"; ++ ++ ++/** ++ * Shows a warning alert box ++ * ++ * @param msg message to be shown ++ * @param title title of message box [optional] ++ */ ++function warn(msg, title) { ++ var JOptionPane = Packages.javax.swing.JOptionPane; ++ msgBox(msg, title, JOptionPane.WARNING_MESSAGE); ++} ++warn.docString = "shows a warning message box to the user"; ++ ++ ++/** ++ * Shows a prompt dialog box ++ * ++ * @param question question to be asked ++ * @param answer default answer suggested [optional] ++ * @return answer given by user ++ */ ++function prompt(question, answer) { ++ var result; ++ function _prompt() { ++ var JOptionPane = Packages.javax.swing.JOptionPane; ++ if (answer == undefined) answer = ""; ++ result = JOptionPane.showInputDialog(window, question, answer); ++ } ++ if (isEventThread()) { ++ _prompt(); ++ } else { ++ _prompt.invokeAndWait(); ++ } ++ return result; ++} ++prompt.docString = "shows a prompt box to the user and returns the answer"; ++ ++/** ++ * Shows a confirmation dialog box ++ * ++ * @param msg message to be shown ++ * @param title title of message box [optional] ++ * @return boolean (yes->true, no->false) ++ */ ++function confirm(msg, title) { ++ var result; ++ var JOptionPane = Packages.javax.swing.JOptionPane; ++ function _confirm() { ++ if (title == undefined) title = msg; ++ var optionType = JOptionPane.YES_NO_OPTION; ++ result = JOptionPane.showConfirmDialog(null, msg, title, optionType); ++ } ++ if (isEventThread()) { ++ _confirm(); ++ } else { ++ _confirm.invokeAndWait(); ++ } ++ return result == JOptionPane.YES_OPTION; ++} ++confirm.docString = "shows a confirmation message box to the user"; ++ ++/** ++ * Echoes zero or more arguments supplied to screen. ++ * This is print equivalent for GUI. ++ * ++ * @param zero or more items to echo. ++ */ ++function echo() { ++ var args = arguments; ++ (function() { ++ var len = args.length; ++ for (var i = 0; i < len; i++) { ++ window.print(args[i]); ++ window.print(" "); ++ } ++ window.print("\n"); ++ }).invokeLater(); ++} ++echo.docString = "echoes arguments to interactive console screen"; ++ ++ ++/** ++ * Clear the screen ++ */ ++function clear() { ++ (function() { window.clear(false); }).invokeLater(); ++} ++clear.docString = "clears interactive console screen"; ++ ++ ++// synonym for clear ++var cls = clear; ++ ++ ++/** ++ * Exit the process after confirmation from user ++ * ++ * @param exitCode return code to OS [optional] ++ */ ++function exit(exitCode) { ++ if (exitCode == undefined) exitCode = 0; ++ if (confirm("Do you really want to exit?")) { ++ java.lang.System.exit(exitCode); ++ } ++} ++exit.docString = "exits jconsole"; ++ ++// synonym to exit ++var quit = exit; ++ +diff -uNr openjdk/jdk/src/share/lib/security/java.security-aix afu8u/jdk/src/share/lib/security/java.security-aix +--- openjdk/jdk/src/share/lib/security/java.security-aix 2023-04-19 05:53:10.000000000 +0800 ++++ afu8u/jdk/src/share/lib/security/java.security-aix 2025-05-06 11:13:08.387672959 +0800 +@@ -456,7 +456,16 @@ + # in the jdk.[tls|certpath|jar].disabledAlgorithms properties. To include this + # list in any of the disabledAlgorithms properties, add the property name as + # an entry. +-jdk.disabled.namedCurves = secp256k1 ++jdk.disabled.namedCurves = secp112r1, secp112r2, secp128r1, secp128r2, \ ++ secp160k1, secp160r1, secp160r2, secp192k1, secp192r1, secp224k1, \ ++ secp224r1, secp256k1, sect113r1, sect113r2, sect131r1, sect131r2, \ ++ sect163k1, sect163r1, sect163r2, sect193r1, sect193r2, sect233k1, \ ++ sect233r1, sect239k1, sect283k1, sect283r1, sect409k1, sect409r1, \ ++ sect571k1, sect571r1, X9.62 c2tnb191v1, X9.62 c2tnb191v2, \ ++ X9.62 c2tnb191v3, X9.62 c2tnb239v1, X9.62 c2tnb239v2, X9.62 c2tnb239v3, \ ++ X9.62 c2tnb359v1, X9.62 c2tnb431r1, X9.62 prime192v2, X9.62 prime192v3, \ ++ X9.62 prime239v1, X9.62 prime239v2, X9.62 prime239v3, brainpoolP256r1, \ ++ brainpoolP320r1, brainpoolP384r1, brainpoolP512r1 + + # + # Algorithm restrictions for certification path (CertPath) processing +diff -uNr openjdk/jdk/src/share/lib/security/java.security-linux afu8u/jdk/src/share/lib/security/java.security-linux +--- openjdk/jdk/src/share/lib/security/java.security-linux 2023-04-19 05:53:10.000000000 +0800 ++++ afu8u/jdk/src/share/lib/security/java.security-linux 2025-05-06 11:13:08.387672959 +0800 +@@ -456,7 +456,16 @@ + # in the jdk.[tls|certpath|jar].disabledAlgorithms properties. To include this + # list in any of the disabledAlgorithms properties, add the property name as + # an entry. +-jdk.disabled.namedCurves = secp256k1 ++jdk.disabled.namedCurves = secp112r1, secp112r2, secp128r1, secp128r2, \ ++ secp160k1, secp160r1, secp160r2, secp192k1, secp192r1, secp224k1, \ ++ secp224r1, secp256k1, sect113r1, sect113r2, sect131r1, sect131r2, \ ++ sect163k1, sect163r1, sect163r2, sect193r1, sect193r2, sect233k1, \ ++ sect233r1, sect239k1, sect283k1, sect283r1, sect409k1, sect409r1, \ ++ sect571k1, sect571r1, X9.62 c2tnb191v1, X9.62 c2tnb191v2, \ ++ X9.62 c2tnb191v3, X9.62 c2tnb239v1, X9.62 c2tnb239v2, X9.62 c2tnb239v3, \ ++ X9.62 c2tnb359v1, X9.62 c2tnb431r1, X9.62 prime192v2, X9.62 prime192v3, \ ++ X9.62 prime239v1, X9.62 prime239v2, X9.62 prime239v3, brainpoolP256r1, \ ++ brainpoolP320r1, brainpoolP384r1, brainpoolP512r1 + + # + # Algorithm restrictions for certification path (CertPath) processing +diff -uNr openjdk/jdk/src/share/lib/security/java.security-macosx afu8u/jdk/src/share/lib/security/java.security-macosx +--- openjdk/jdk/src/share/lib/security/java.security-macosx 2023-04-19 05:53:10.000000000 +0800 ++++ afu8u/jdk/src/share/lib/security/java.security-macosx 2025-05-06 11:13:08.387672959 +0800 +@@ -459,7 +459,16 @@ + # in the jdk.[tls|certpath|jar].disabledAlgorithms properties. To include this + # list in any of the disabledAlgorithms properties, add the property name as + # an entry. +-jdk.disabled.namedCurves = secp256k1 ++jdk.disabled.namedCurves = secp112r1, secp112r2, secp128r1, secp128r2, \ ++ secp160k1, secp160r1, secp160r2, secp192k1, secp192r1, secp224k1, \ ++ secp224r1, secp256k1, sect113r1, sect113r2, sect131r1, sect131r2, \ ++ sect163k1, sect163r1, sect163r2, sect193r1, sect193r2, sect233k1, \ ++ sect233r1, sect239k1, sect283k1, sect283r1, sect409k1, sect409r1, \ ++ sect571k1, sect571r1, X9.62 c2tnb191v1, X9.62 c2tnb191v2, \ ++ X9.62 c2tnb191v3, X9.62 c2tnb239v1, X9.62 c2tnb239v2, X9.62 c2tnb239v3, \ ++ X9.62 c2tnb359v1, X9.62 c2tnb431r1, X9.62 prime192v2, X9.62 prime192v3, \ ++ X9.62 prime239v1, X9.62 prime239v2, X9.62 prime239v3, brainpoolP256r1, \ ++ brainpoolP320r1, brainpoolP384r1, brainpoolP512r1 + + # + # Algorithm restrictions for certification path (CertPath) processing +diff -uNr openjdk/jdk/src/share/lib/security/java.security-solaris afu8u/jdk/src/share/lib/security/java.security-solaris +--- openjdk/jdk/src/share/lib/security/java.security-solaris 2023-04-19 05:53:10.000000000 +0800 ++++ afu8u/jdk/src/share/lib/security/java.security-solaris 2025-05-06 11:13:08.387672959 +0800 +@@ -457,7 +457,16 @@ + # in the jdk.[tls|certpath|jar].disabledAlgorithms properties. To include this + # list in any of the disabledAlgorithms properties, add the property name as + # an entry. +-jdk.disabled.namedCurves = secp256k1 ++jdk.disabled.namedCurves = secp112r1, secp112r2, secp128r1, secp128r2, \ ++ secp160k1, secp160r1, secp160r2, secp192k1, secp192r1, secp224k1, \ ++ secp224r1, secp256k1, sect113r1, sect113r2, sect131r1, sect131r2, \ ++ sect163k1, sect163r1, sect163r2, sect193r1, sect193r2, sect233k1, \ ++ sect233r1, sect239k1, sect283k1, sect283r1, sect409k1, sect409r1, \ ++ sect571k1, sect571r1, X9.62 c2tnb191v1, X9.62 c2tnb191v2, \ ++ X9.62 c2tnb191v3, X9.62 c2tnb239v1, X9.62 c2tnb239v2, X9.62 c2tnb239v3, \ ++ X9.62 c2tnb359v1, X9.62 c2tnb431r1, X9.62 prime192v2, X9.62 prime192v3, \ ++ X9.62 prime239v1, X9.62 prime239v2, X9.62 prime239v3, brainpoolP256r1, \ ++ brainpoolP320r1, brainpoolP384r1, brainpoolP512r1 + + # + # Algorithm restrictions for certification path (CertPath) processing +diff -uNr openjdk/jdk/src/share/lib/security/java.security-windows afu8u/jdk/src/share/lib/security/java.security-windows +--- openjdk/jdk/src/share/lib/security/java.security-windows 2023-04-19 05:53:10.000000000 +0800 ++++ afu8u/jdk/src/share/lib/security/java.security-windows 2025-05-06 11:13:08.387672959 +0800 +@@ -459,7 +459,16 @@ + # in the jdk.[tls|certpath|jar].disabledAlgorithms properties. To include this + # list in any of the disabledAlgorithms properties, add the property name as + # an entry. +-jdk.disabled.namedCurves = secp256k1 ++jdk.disabled.namedCurves = secp112r1, secp112r2, secp128r1, secp128r2, \ ++ secp160k1, secp160r1, secp160r2, secp192k1, secp192r1, secp224k1, \ ++ secp224r1, secp256k1, sect113r1, sect113r2, sect131r1, sect131r2, \ ++ sect163k1, sect163r1, sect163r2, sect193r1, sect193r2, sect233k1, \ ++ sect233r1, sect239k1, sect283k1, sect283r1, sect409k1, sect409r1, \ ++ sect571k1, sect571r1, X9.62 c2tnb191v1, X9.62 c2tnb191v2, \ ++ X9.62 c2tnb191v3, X9.62 c2tnb239v1, X9.62 c2tnb239v2, X9.62 c2tnb239v3, \ ++ X9.62 c2tnb359v1, X9.62 c2tnb431r1, X9.62 prime192v2, X9.62 prime192v3, \ ++ X9.62 prime239v1, X9.62 prime239v2, X9.62 prime239v3, brainpoolP256r1, \ ++ brainpoolP320r1, brainpoolP384r1, brainpoolP512r1 + + # + # Algorithm restrictions for certification path (CertPath) processing +diff -uNr openjdk/jdk/src/share/native/com/sun/media/sound/SoundDefs.h afu8u/jdk/src/share/native/com/sun/media/sound/SoundDefs.h +--- openjdk/jdk/src/share/native/com/sun/media/sound/SoundDefs.h 2023-04-19 05:53:05.000000000 +0800 ++++ afu8u/jdk/src/share/native/com/sun/media/sound/SoundDefs.h 2025-05-06 10:53:46.659633725 +0800 +@@ -45,6 +45,7 @@ + #define X_PPC 8 + #define X_PPC64 9 + #define X_PPC64LE 10 ++#define X_SW64 11 + #define X_AARCH64 11 + + // ********************************** +diff -uNr openjdk/jdk/src/share/native/sun/security/ec/impl/ec2_163.c afu8u/jdk/src/share/native/sun/security/ec/impl/ec2_163.c +--- openjdk/jdk/src/share/native/sun/security/ec/impl/ec2_163.c 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/jdk/src/share/native/sun/security/ec/impl/ec2_163.c 2025-05-06 10:53:46.731633728 +0800 +@@ -0,0 +1,260 @@ ++/* ++ * Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved. ++ * Use is subject to license terms. ++ * ++ * This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public License ++ * along with this library; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++/* ********************************************************************* ++ * ++ * The Original Code is the elliptic curve math library for binary polynomial field curves. ++ * ++ * The Initial Developer of the Original Code is ++ * Sun Microsystems, Inc. ++ * Portions created by the Initial Developer are Copyright (C) 2003 ++ * the Initial Developer. All Rights Reserved. ++ * ++ * Contributor(s): ++ * Sheueling Chang-Shantz , ++ * Stephen Fung , and ++ * Douglas Stebila , Sun Microsystems Laboratories. ++ * ++ *********************************************************************** */ ++ ++#include "ec2.h" ++#include "mp_gf2m.h" ++#include "mp_gf2m-priv.h" ++#include "mpi.h" ++#include "mpi-priv.h" ++#ifndef _KERNEL ++#include ++#endif ++ ++/* Fast reduction for polynomials over a 163-bit curve. Assumes reduction ++ * polynomial with terms {163, 7, 6, 3, 0}. */ ++mp_err ++ec_GF2m_163_mod(const mp_int *a, mp_int *r, const GFMethod *meth) ++{ ++ mp_err res = MP_OKAY; ++ mp_digit *u, z; ++ ++ if (a != r) { ++ MP_CHECKOK(mp_copy(a, r)); ++ } ++#ifdef ECL_SIXTY_FOUR_BIT ++ if (MP_USED(r) < 6) { ++ MP_CHECKOK(s_mp_pad(r, 6)); ++ } ++ u = MP_DIGITS(r); ++ MP_USED(r) = 6; ++ ++ /* u[5] only has 6 significant bits */ ++ z = u[5]; ++ u[2] ^= (z << 36) ^ (z << 35) ^ (z << 32) ^ (z << 29); ++ z = u[4]; ++ u[2] ^= (z >> 28) ^ (z >> 29) ^ (z >> 32) ^ (z >> 35); ++ u[1] ^= (z << 36) ^ (z << 35) ^ (z << 32) ^ (z << 29); ++ z = u[3]; ++ u[1] ^= (z >> 28) ^ (z >> 29) ^ (z >> 32) ^ (z >> 35); ++ u[0] ^= (z << 36) ^ (z << 35) ^ (z << 32) ^ (z << 29); ++ z = u[2] >> 35; /* z only has 29 significant bits */ ++ u[0] ^= (z << 7) ^ (z << 6) ^ (z << 3) ^ z; ++ /* clear bits above 163 */ ++ u[5] = u[4] = u[3] = 0; ++ u[2] ^= z << 35; ++#else ++ if (MP_USED(r) < 11) { ++ MP_CHECKOK(s_mp_pad(r, 11)); ++ } ++ u = MP_DIGITS(r); ++ MP_USED(r) = 11; ++ ++ /* u[11] only has 6 significant bits */ ++ z = u[10]; ++ u[5] ^= (z << 4) ^ (z << 3) ^ z ^ (z >> 3); ++ u[4] ^= (z << 29); ++ z = u[9]; ++ u[5] ^= (z >> 28) ^ (z >> 29); ++ u[4] ^= (z << 4) ^ (z << 3) ^ z ^ (z >> 3); ++ u[3] ^= (z << 29); ++ z = u[8]; ++ u[4] ^= (z >> 28) ^ (z >> 29); ++ u[3] ^= (z << 4) ^ (z << 3) ^ z ^ (z >> 3); ++ u[2] ^= (z << 29); ++ z = u[7]; ++ u[3] ^= (z >> 28) ^ (z >> 29); ++ u[2] ^= (z << 4) ^ (z << 3) ^ z ^ (z >> 3); ++ u[1] ^= (z << 29); ++ z = u[6]; ++ u[2] ^= (z >> 28) ^ (z >> 29); ++ u[1] ^= (z << 4) ^ (z << 3) ^ z ^ (z >> 3); ++ u[0] ^= (z << 29); ++ z = u[5] >> 3; /* z only has 29 significant bits */ ++ u[1] ^= (z >> 25) ^ (z >> 26); ++ u[0] ^= (z << 7) ^ (z << 6) ^ (z << 3) ^ z; ++ /* clear bits above 163 */ ++ u[11] = u[10] = u[9] = u[8] = u[7] = u[6] = 0; ++ u[5] ^= z << 3; ++#endif ++ s_mp_clamp(r); ++ ++ CLEANUP: ++ return res; ++} ++ ++/* Fast squaring for polynomials over a 163-bit curve. Assumes reduction ++ * polynomial with terms {163, 7, 6, 3, 0}. */ ++mp_err ++ec_GF2m_163_sqr(const mp_int *a, mp_int *r, const GFMethod *meth) ++{ ++ mp_err res = MP_OKAY; ++ mp_digit *u, *v; ++ ++ v = MP_DIGITS(a); ++ ++#ifdef ECL_SIXTY_FOUR_BIT ++ if (MP_USED(a) < 3) { ++ return mp_bsqrmod(a, meth->irr_arr, r); ++ } ++ if (MP_USED(r) < 6) { ++ MP_CHECKOK(s_mp_pad(r, 6)); ++ } ++ MP_USED(r) = 6; ++#else ++ if (MP_USED(a) < 6) { ++ return mp_bsqrmod(a, meth->irr_arr, r); ++ } ++ if (MP_USED(r) < 12) { ++ MP_CHECKOK(s_mp_pad(r, 12)); ++ } ++ MP_USED(r) = 12; ++#endif ++ u = MP_DIGITS(r); ++ ++#ifdef ECL_THIRTY_TWO_BIT ++ u[11] = gf2m_SQR1(v[5]); ++ u[10] = gf2m_SQR0(v[5]); ++ u[9] = gf2m_SQR1(v[4]); ++ u[8] = gf2m_SQR0(v[4]); ++ u[7] = gf2m_SQR1(v[3]); ++ u[6] = gf2m_SQR0(v[3]); ++#endif ++ u[5] = gf2m_SQR1(v[2]); ++ u[4] = gf2m_SQR0(v[2]); ++ u[3] = gf2m_SQR1(v[1]); ++ u[2] = gf2m_SQR0(v[1]); ++ u[1] = gf2m_SQR1(v[0]); ++ u[0] = gf2m_SQR0(v[0]); ++ return ec_GF2m_163_mod(r, r, meth); ++ ++ CLEANUP: ++ return res; ++} ++ ++/* Fast multiplication for polynomials over a 163-bit curve. Assumes ++ * reduction polynomial with terms {163, 7, 6, 3, 0}. */ ++mp_err ++ec_GF2m_163_mul(const mp_int *a, const mp_int *b, mp_int *r, ++ const GFMethod *meth) ++{ ++ mp_err res = MP_OKAY; ++ mp_digit a2 = 0, a1 = 0, a0, b2 = 0, b1 = 0, b0; ++ ++#ifdef ECL_THIRTY_TWO_BIT ++ mp_digit a5 = 0, a4 = 0, a3 = 0, b5 = 0, b4 = 0, b3 = 0; ++ mp_digit rm[6]; ++#endif ++ ++ if (a == b) { ++ return ec_GF2m_163_sqr(a, r, meth); ++ } else { ++ switch (MP_USED(a)) { ++#ifdef ECL_THIRTY_TWO_BIT ++ case 6: ++ a5 = MP_DIGIT(a, 5); ++ case 5: ++ a4 = MP_DIGIT(a, 4); ++ case 4: ++ a3 = MP_DIGIT(a, 3); ++#endif ++ case 3: ++ a2 = MP_DIGIT(a, 2); ++ case 2: ++ a1 = MP_DIGIT(a, 1); ++ default: ++ a0 = MP_DIGIT(a, 0); ++ } ++ switch (MP_USED(b)) { ++#ifdef ECL_THIRTY_TWO_BIT ++ case 6: ++ b5 = MP_DIGIT(b, 5); ++ case 5: ++ b4 = MP_DIGIT(b, 4); ++ case 4: ++ b3 = MP_DIGIT(b, 3); ++#endif ++ case 3: ++ b2 = MP_DIGIT(b, 2); ++ case 2: ++ b1 = MP_DIGIT(b, 1); ++ default: ++ b0 = MP_DIGIT(b, 0); ++ } ++#ifdef ECL_SIXTY_FOUR_BIT ++ MP_CHECKOK(s_mp_pad(r, 6)); ++ s_bmul_3x3(MP_DIGITS(r), a2, a1, a0, b2, b1, b0); ++ MP_USED(r) = 6; ++ s_mp_clamp(r); ++#else ++ MP_CHECKOK(s_mp_pad(r, 12)); ++ s_bmul_3x3(MP_DIGITS(r) + 6, a5, a4, a3, b5, b4, b3); ++ s_bmul_3x3(MP_DIGITS(r), a2, a1, a0, b2, b1, b0); ++ s_bmul_3x3(rm, a5 ^ a2, a4 ^ a1, a3 ^ a0, b5 ^ b2, b4 ^ b1, ++ b3 ^ b0); ++ rm[5] ^= MP_DIGIT(r, 5) ^ MP_DIGIT(r, 11); ++ rm[4] ^= MP_DIGIT(r, 4) ^ MP_DIGIT(r, 10); ++ rm[3] ^= MP_DIGIT(r, 3) ^ MP_DIGIT(r, 9); ++ rm[2] ^= MP_DIGIT(r, 2) ^ MP_DIGIT(r, 8); ++ rm[1] ^= MP_DIGIT(r, 1) ^ MP_DIGIT(r, 7); ++ rm[0] ^= MP_DIGIT(r, 0) ^ MP_DIGIT(r, 6); ++ MP_DIGIT(r, 8) ^= rm[5]; ++ MP_DIGIT(r, 7) ^= rm[4]; ++ MP_DIGIT(r, 6) ^= rm[3]; ++ MP_DIGIT(r, 5) ^= rm[2]; ++ MP_DIGIT(r, 4) ^= rm[1]; ++ MP_DIGIT(r, 3) ^= rm[0]; ++ MP_USED(r) = 12; ++ s_mp_clamp(r); ++#endif ++ return ec_GF2m_163_mod(r, r, meth); ++ } ++ ++ CLEANUP: ++ return res; ++} ++ ++/* Wire in fast field arithmetic for 163-bit curves. */ ++mp_err ++ec_group_set_gf2m163(ECGroup *group, ECCurveName name) ++{ ++ group->meth->field_mod = &ec_GF2m_163_mod; ++ group->meth->field_mul = &ec_GF2m_163_mul; ++ group->meth->field_sqr = &ec_GF2m_163_sqr; ++ return MP_OKAY; ++} +diff -uNr openjdk/jdk/src/share/native/sun/security/ec/impl/ec2_193.c afu8u/jdk/src/share/native/sun/security/ec/impl/ec2_193.c +--- openjdk/jdk/src/share/native/sun/security/ec/impl/ec2_193.c 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/jdk/src/share/native/sun/security/ec/impl/ec2_193.c 2025-05-06 10:53:46.731633728 +0800 +@@ -0,0 +1,277 @@ ++/* ++ * Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved. ++ * Use is subject to license terms. ++ * ++ * This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public License ++ * along with this library; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++/* ********************************************************************* ++ * ++ * The Original Code is the elliptic curve math library for binary polynomial field curves. ++ * ++ * The Initial Developer of the Original Code is ++ * Sun Microsystems, Inc. ++ * Portions created by the Initial Developer are Copyright (C) 2003 ++ * the Initial Developer. All Rights Reserved. ++ * ++ * Contributor(s): ++ * Sheueling Chang-Shantz , ++ * Stephen Fung , and ++ * Douglas Stebila , Sun Microsystems Laboratories. ++ * ++ *********************************************************************** */ ++ ++#include "ec2.h" ++#include "mp_gf2m.h" ++#include "mp_gf2m-priv.h" ++#include "mpi.h" ++#include "mpi-priv.h" ++#ifndef _KERNEL ++#include ++#endif ++ ++/* Fast reduction for polynomials over a 193-bit curve. Assumes reduction ++ * polynomial with terms {193, 15, 0}. */ ++mp_err ++ec_GF2m_193_mod(const mp_int *a, mp_int *r, const GFMethod *meth) ++{ ++ mp_err res = MP_OKAY; ++ mp_digit *u, z; ++ ++ if (a != r) { ++ MP_CHECKOK(mp_copy(a, r)); ++ } ++#ifdef ECL_SIXTY_FOUR_BIT ++ if (MP_USED(r) < 7) { ++ MP_CHECKOK(s_mp_pad(r, 7)); ++ } ++ u = MP_DIGITS(r); ++ MP_USED(r) = 7; ++ ++ /* u[6] only has 2 significant bits */ ++ z = u[6]; ++ u[3] ^= (z << 14) ^ (z >> 1); ++ u[2] ^= (z << 63); ++ z = u[5]; ++ u[3] ^= (z >> 50); ++ u[2] ^= (z << 14) ^ (z >> 1); ++ u[1] ^= (z << 63); ++ z = u[4]; ++ u[2] ^= (z >> 50); ++ u[1] ^= (z << 14) ^ (z >> 1); ++ u[0] ^= (z << 63); ++ z = u[3] >> 1; /* z only has 63 significant bits */ ++ u[1] ^= (z >> 49); ++ u[0] ^= (z << 15) ^ z; ++ /* clear bits above 193 */ ++ u[6] = u[5] = u[4] = 0; ++ u[3] ^= z << 1; ++#else ++ if (MP_USED(r) < 13) { ++ MP_CHECKOK(s_mp_pad(r, 13)); ++ } ++ u = MP_DIGITS(r); ++ MP_USED(r) = 13; ++ ++ /* u[12] only has 2 significant bits */ ++ z = u[12]; ++ u[6] ^= (z << 14) ^ (z >> 1); ++ u[5] ^= (z << 31); ++ z = u[11]; ++ u[6] ^= (z >> 18); ++ u[5] ^= (z << 14) ^ (z >> 1); ++ u[4] ^= (z << 31); ++ z = u[10]; ++ u[5] ^= (z >> 18); ++ u[4] ^= (z << 14) ^ (z >> 1); ++ u[3] ^= (z << 31); ++ z = u[9]; ++ u[4] ^= (z >> 18); ++ u[3] ^= (z << 14) ^ (z >> 1); ++ u[2] ^= (z << 31); ++ z = u[8]; ++ u[3] ^= (z >> 18); ++ u[2] ^= (z << 14) ^ (z >> 1); ++ u[1] ^= (z << 31); ++ z = u[7]; ++ u[2] ^= (z >> 18); ++ u[1] ^= (z << 14) ^ (z >> 1); ++ u[0] ^= (z << 31); ++ z = u[6] >> 1; /* z only has 31 significant bits */ ++ u[1] ^= (z >> 17); ++ u[0] ^= (z << 15) ^ z; ++ /* clear bits above 193 */ ++ u[12] = u[11] = u[10] = u[9] = u[8] = u[7] = 0; ++ u[6] ^= z << 1; ++#endif ++ s_mp_clamp(r); ++ ++ CLEANUP: ++ return res; ++} ++ ++/* Fast squaring for polynomials over a 193-bit curve. Assumes reduction ++ * polynomial with terms {193, 15, 0}. */ ++mp_err ++ec_GF2m_193_sqr(const mp_int *a, mp_int *r, const GFMethod *meth) ++{ ++ mp_err res = MP_OKAY; ++ mp_digit *u, *v; ++ ++ v = MP_DIGITS(a); ++ ++#ifdef ECL_SIXTY_FOUR_BIT ++ if (MP_USED(a) < 4) { ++ return mp_bsqrmod(a, meth->irr_arr, r); ++ } ++ if (MP_USED(r) < 7) { ++ MP_CHECKOK(s_mp_pad(r, 7)); ++ } ++ MP_USED(r) = 7; ++#else ++ if (MP_USED(a) < 7) { ++ return mp_bsqrmod(a, meth->irr_arr, r); ++ } ++ if (MP_USED(r) < 13) { ++ MP_CHECKOK(s_mp_pad(r, 13)); ++ } ++ MP_USED(r) = 13; ++#endif ++ u = MP_DIGITS(r); ++ ++#ifdef ECL_THIRTY_TWO_BIT ++ u[12] = gf2m_SQR0(v[6]); ++ u[11] = gf2m_SQR1(v[5]); ++ u[10] = gf2m_SQR0(v[5]); ++ u[9] = gf2m_SQR1(v[4]); ++ u[8] = gf2m_SQR0(v[4]); ++ u[7] = gf2m_SQR1(v[3]); ++#endif ++ u[6] = gf2m_SQR0(v[3]); ++ u[5] = gf2m_SQR1(v[2]); ++ u[4] = gf2m_SQR0(v[2]); ++ u[3] = gf2m_SQR1(v[1]); ++ u[2] = gf2m_SQR0(v[1]); ++ u[1] = gf2m_SQR1(v[0]); ++ u[0] = gf2m_SQR0(v[0]); ++ return ec_GF2m_193_mod(r, r, meth); ++ ++ CLEANUP: ++ return res; ++} ++ ++/* Fast multiplication for polynomials over a 193-bit curve. Assumes ++ * reduction polynomial with terms {193, 15, 0}. */ ++mp_err ++ec_GF2m_193_mul(const mp_int *a, const mp_int *b, mp_int *r, ++ const GFMethod *meth) ++{ ++ mp_err res = MP_OKAY; ++ mp_digit a3 = 0, a2 = 0, a1 = 0, a0, b3 = 0, b2 = 0, b1 = 0, b0; ++ ++#ifdef ECL_THIRTY_TWO_BIT ++ mp_digit a6 = 0, a5 = 0, a4 = 0, b6 = 0, b5 = 0, b4 = 0; ++ mp_digit rm[8]; ++#endif ++ ++ if (a == b) { ++ return ec_GF2m_193_sqr(a, r, meth); ++ } else { ++ switch (MP_USED(a)) { ++#ifdef ECL_THIRTY_TWO_BIT ++ case 7: ++ a6 = MP_DIGIT(a, 6); ++ case 6: ++ a5 = MP_DIGIT(a, 5); ++ case 5: ++ a4 = MP_DIGIT(a, 4); ++#endif ++ case 4: ++ a3 = MP_DIGIT(a, 3); ++ case 3: ++ a2 = MP_DIGIT(a, 2); ++ case 2: ++ a1 = MP_DIGIT(a, 1); ++ default: ++ a0 = MP_DIGIT(a, 0); ++ } ++ switch (MP_USED(b)) { ++#ifdef ECL_THIRTY_TWO_BIT ++ case 7: ++ b6 = MP_DIGIT(b, 6); ++ case 6: ++ b5 = MP_DIGIT(b, 5); ++ case 5: ++ b4 = MP_DIGIT(b, 4); ++#endif ++ case 4: ++ b3 = MP_DIGIT(b, 3); ++ case 3: ++ b2 = MP_DIGIT(b, 2); ++ case 2: ++ b1 = MP_DIGIT(b, 1); ++ default: ++ b0 = MP_DIGIT(b, 0); ++ } ++#ifdef ECL_SIXTY_FOUR_BIT ++ MP_CHECKOK(s_mp_pad(r, 8)); ++ s_bmul_4x4(MP_DIGITS(r), a3, a2, a1, a0, b3, b2, b1, b0); ++ MP_USED(r) = 8; ++ s_mp_clamp(r); ++#else ++ MP_CHECKOK(s_mp_pad(r, 14)); ++ s_bmul_3x3(MP_DIGITS(r) + 8, a6, a5, a4, b6, b5, b4); ++ s_bmul_4x4(MP_DIGITS(r), a3, a2, a1, a0, b3, b2, b1, b0); ++ s_bmul_4x4(rm, a3, a6 ^ a2, a5 ^ a1, a4 ^ a0, b3, b6 ^ b2, b5 ^ b1, ++ b4 ^ b0); ++ rm[7] ^= MP_DIGIT(r, 7); ++ rm[6] ^= MP_DIGIT(r, 6); ++ rm[5] ^= MP_DIGIT(r, 5) ^ MP_DIGIT(r, 13); ++ rm[4] ^= MP_DIGIT(r, 4) ^ MP_DIGIT(r, 12); ++ rm[3] ^= MP_DIGIT(r, 3) ^ MP_DIGIT(r, 11); ++ rm[2] ^= MP_DIGIT(r, 2) ^ MP_DIGIT(r, 10); ++ rm[1] ^= MP_DIGIT(r, 1) ^ MP_DIGIT(r, 9); ++ rm[0] ^= MP_DIGIT(r, 0) ^ MP_DIGIT(r, 8); ++ MP_DIGIT(r, 11) ^= rm[7]; ++ MP_DIGIT(r, 10) ^= rm[6]; ++ MP_DIGIT(r, 9) ^= rm[5]; ++ MP_DIGIT(r, 8) ^= rm[4]; ++ MP_DIGIT(r, 7) ^= rm[3]; ++ MP_DIGIT(r, 6) ^= rm[2]; ++ MP_DIGIT(r, 5) ^= rm[1]; ++ MP_DIGIT(r, 4) ^= rm[0]; ++ MP_USED(r) = 14; ++ s_mp_clamp(r); ++#endif ++ return ec_GF2m_193_mod(r, r, meth); ++ } ++ ++ CLEANUP: ++ return res; ++} ++ ++/* Wire in fast field arithmetic for 193-bit curves. */ ++mp_err ++ec_group_set_gf2m193(ECGroup *group, ECCurveName name) ++{ ++ group->meth->field_mod = &ec_GF2m_193_mod; ++ group->meth->field_mul = &ec_GF2m_193_mul; ++ group->meth->field_sqr = &ec_GF2m_193_sqr; ++ return MP_OKAY; ++} +diff -uNr openjdk/jdk/src/share/native/sun/security/ec/impl/ec2_233.c afu8u/jdk/src/share/native/sun/security/ec/impl/ec2_233.c +--- openjdk/jdk/src/share/native/sun/security/ec/impl/ec2_233.c 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/jdk/src/share/native/sun/security/ec/impl/ec2_233.c 2025-05-06 10:53:46.731633728 +0800 +@@ -0,0 +1,300 @@ ++/* ++ * Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved. ++ * Use is subject to license terms. ++ * ++ * This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public License ++ * along with this library; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++/* ********************************************************************* ++ * ++ * The Original Code is the elliptic curve math library for binary polynomial field curves. ++ * ++ * The Initial Developer of the Original Code is ++ * Sun Microsystems, Inc. ++ * Portions created by the Initial Developer are Copyright (C) 2003 ++ * the Initial Developer. All Rights Reserved. ++ * ++ * Contributor(s): ++ * Sheueling Chang-Shantz , ++ * Stephen Fung , and ++ * Douglas Stebila , Sun Microsystems Laboratories. ++ * ++ *********************************************************************** */ ++ ++#include "ec2.h" ++#include "mp_gf2m.h" ++#include "mp_gf2m-priv.h" ++#include "mpi.h" ++#include "mpi-priv.h" ++#ifndef _KERNEL ++#include ++#endif ++ ++/* Fast reduction for polynomials over a 233-bit curve. Assumes reduction ++ * polynomial with terms {233, 74, 0}. */ ++mp_err ++ec_GF2m_233_mod(const mp_int *a, mp_int *r, const GFMethod *meth) ++{ ++ mp_err res = MP_OKAY; ++ mp_digit *u, z; ++ ++ if (a != r) { ++ MP_CHECKOK(mp_copy(a, r)); ++ } ++#ifdef ECL_SIXTY_FOUR_BIT ++ if (MP_USED(r) < 8) { ++ MP_CHECKOK(s_mp_pad(r, 8)); ++ } ++ u = MP_DIGITS(r); ++ MP_USED(r) = 8; ++ ++ /* u[7] only has 18 significant bits */ ++ z = u[7]; ++ u[4] ^= (z << 33) ^ (z >> 41); ++ u[3] ^= (z << 23); ++ z = u[6]; ++ u[4] ^= (z >> 31); ++ u[3] ^= (z << 33) ^ (z >> 41); ++ u[2] ^= (z << 23); ++ z = u[5]; ++ u[3] ^= (z >> 31); ++ u[2] ^= (z << 33) ^ (z >> 41); ++ u[1] ^= (z << 23); ++ z = u[4]; ++ u[2] ^= (z >> 31); ++ u[1] ^= (z << 33) ^ (z >> 41); ++ u[0] ^= (z << 23); ++ z = u[3] >> 41; /* z only has 23 significant bits */ ++ u[1] ^= (z << 10); ++ u[0] ^= z; ++ /* clear bits above 233 */ ++ u[7] = u[6] = u[5] = u[4] = 0; ++ u[3] ^= z << 41; ++#else ++ if (MP_USED(r) < 15) { ++ MP_CHECKOK(s_mp_pad(r, 15)); ++ } ++ u = MP_DIGITS(r); ++ MP_USED(r) = 15; ++ ++ /* u[14] only has 18 significant bits */ ++ z = u[14]; ++ u[9] ^= (z << 1); ++ u[7] ^= (z >> 9); ++ u[6] ^= (z << 23); ++ z = u[13]; ++ u[9] ^= (z >> 31); ++ u[8] ^= (z << 1); ++ u[6] ^= (z >> 9); ++ u[5] ^= (z << 23); ++ z = u[12]; ++ u[8] ^= (z >> 31); ++ u[7] ^= (z << 1); ++ u[5] ^= (z >> 9); ++ u[4] ^= (z << 23); ++ z = u[11]; ++ u[7] ^= (z >> 31); ++ u[6] ^= (z << 1); ++ u[4] ^= (z >> 9); ++ u[3] ^= (z << 23); ++ z = u[10]; ++ u[6] ^= (z >> 31); ++ u[5] ^= (z << 1); ++ u[3] ^= (z >> 9); ++ u[2] ^= (z << 23); ++ z = u[9]; ++ u[5] ^= (z >> 31); ++ u[4] ^= (z << 1); ++ u[2] ^= (z >> 9); ++ u[1] ^= (z << 23); ++ z = u[8]; ++ u[4] ^= (z >> 31); ++ u[3] ^= (z << 1); ++ u[1] ^= (z >> 9); ++ u[0] ^= (z << 23); ++ z = u[7] >> 9; /* z only has 23 significant bits */ ++ u[3] ^= (z >> 22); ++ u[2] ^= (z << 10); ++ u[0] ^= z; ++ /* clear bits above 233 */ ++ u[14] = u[13] = u[12] = u[11] = u[10] = u[9] = u[8] = 0; ++ u[7] ^= z << 9; ++#endif ++ s_mp_clamp(r); ++ ++ CLEANUP: ++ return res; ++} ++ ++/* Fast squaring for polynomials over a 233-bit curve. Assumes reduction ++ * polynomial with terms {233, 74, 0}. */ ++mp_err ++ec_GF2m_233_sqr(const mp_int *a, mp_int *r, const GFMethod *meth) ++{ ++ mp_err res = MP_OKAY; ++ mp_digit *u, *v; ++ ++ v = MP_DIGITS(a); ++ ++#ifdef ECL_SIXTY_FOUR_BIT ++ if (MP_USED(a) < 4) { ++ return mp_bsqrmod(a, meth->irr_arr, r); ++ } ++ if (MP_USED(r) < 8) { ++ MP_CHECKOK(s_mp_pad(r, 8)); ++ } ++ MP_USED(r) = 8; ++#else ++ if (MP_USED(a) < 8) { ++ return mp_bsqrmod(a, meth->irr_arr, r); ++ } ++ if (MP_USED(r) < 15) { ++ MP_CHECKOK(s_mp_pad(r, 15)); ++ } ++ MP_USED(r) = 15; ++#endif ++ u = MP_DIGITS(r); ++ ++#ifdef ECL_THIRTY_TWO_BIT ++ u[14] = gf2m_SQR0(v[7]); ++ u[13] = gf2m_SQR1(v[6]); ++ u[12] = gf2m_SQR0(v[6]); ++ u[11] = gf2m_SQR1(v[5]); ++ u[10] = gf2m_SQR0(v[5]); ++ u[9] = gf2m_SQR1(v[4]); ++ u[8] = gf2m_SQR0(v[4]); ++#endif ++ u[7] = gf2m_SQR1(v[3]); ++ u[6] = gf2m_SQR0(v[3]); ++ u[5] = gf2m_SQR1(v[2]); ++ u[4] = gf2m_SQR0(v[2]); ++ u[3] = gf2m_SQR1(v[1]); ++ u[2] = gf2m_SQR0(v[1]); ++ u[1] = gf2m_SQR1(v[0]); ++ u[0] = gf2m_SQR0(v[0]); ++ return ec_GF2m_233_mod(r, r, meth); ++ ++ CLEANUP: ++ return res; ++} ++ ++/* Fast multiplication for polynomials over a 233-bit curve. Assumes ++ * reduction polynomial with terms {233, 74, 0}. */ ++mp_err ++ec_GF2m_233_mul(const mp_int *a, const mp_int *b, mp_int *r, ++ const GFMethod *meth) ++{ ++ mp_err res = MP_OKAY; ++ mp_digit a3 = 0, a2 = 0, a1 = 0, a0, b3 = 0, b2 = 0, b1 = 0, b0; ++ ++#ifdef ECL_THIRTY_TWO_BIT ++ mp_digit a7 = 0, a6 = 0, a5 = 0, a4 = 0, b7 = 0, b6 = 0, b5 = 0, b4 = ++ 0; ++ mp_digit rm[8]; ++#endif ++ ++ if (a == b) { ++ return ec_GF2m_233_sqr(a, r, meth); ++ } else { ++ switch (MP_USED(a)) { ++#ifdef ECL_THIRTY_TWO_BIT ++ case 8: ++ a7 = MP_DIGIT(a, 7); ++ case 7: ++ a6 = MP_DIGIT(a, 6); ++ case 6: ++ a5 = MP_DIGIT(a, 5); ++ case 5: ++ a4 = MP_DIGIT(a, 4); ++#endif ++ case 4: ++ a3 = MP_DIGIT(a, 3); ++ case 3: ++ a2 = MP_DIGIT(a, 2); ++ case 2: ++ a1 = MP_DIGIT(a, 1); ++ default: ++ a0 = MP_DIGIT(a, 0); ++ } ++ switch (MP_USED(b)) { ++#ifdef ECL_THIRTY_TWO_BIT ++ case 8: ++ b7 = MP_DIGIT(b, 7); ++ case 7: ++ b6 = MP_DIGIT(b, 6); ++ case 6: ++ b5 = MP_DIGIT(b, 5); ++ case 5: ++ b4 = MP_DIGIT(b, 4); ++#endif ++ case 4: ++ b3 = MP_DIGIT(b, 3); ++ case 3: ++ b2 = MP_DIGIT(b, 2); ++ case 2: ++ b1 = MP_DIGIT(b, 1); ++ default: ++ b0 = MP_DIGIT(b, 0); ++ } ++#ifdef ECL_SIXTY_FOUR_BIT ++ MP_CHECKOK(s_mp_pad(r, 8)); ++ s_bmul_4x4(MP_DIGITS(r), a3, a2, a1, a0, b3, b2, b1, b0); ++ MP_USED(r) = 8; ++ s_mp_clamp(r); ++#else ++ MP_CHECKOK(s_mp_pad(r, 16)); ++ s_bmul_4x4(MP_DIGITS(r) + 8, a7, a6, a5, a4, b7, b6, b5, b4); ++ s_bmul_4x4(MP_DIGITS(r), a3, a2, a1, a0, b3, b2, b1, b0); ++ s_bmul_4x4(rm, a7 ^ a3, a6 ^ a2, a5 ^ a1, a4 ^ a0, b7 ^ b3, ++ b6 ^ b2, b5 ^ b1, b4 ^ b0); ++ rm[7] ^= MP_DIGIT(r, 7) ^ MP_DIGIT(r, 15); ++ rm[6] ^= MP_DIGIT(r, 6) ^ MP_DIGIT(r, 14); ++ rm[5] ^= MP_DIGIT(r, 5) ^ MP_DIGIT(r, 13); ++ rm[4] ^= MP_DIGIT(r, 4) ^ MP_DIGIT(r, 12); ++ rm[3] ^= MP_DIGIT(r, 3) ^ MP_DIGIT(r, 11); ++ rm[2] ^= MP_DIGIT(r, 2) ^ MP_DIGIT(r, 10); ++ rm[1] ^= MP_DIGIT(r, 1) ^ MP_DIGIT(r, 9); ++ rm[0] ^= MP_DIGIT(r, 0) ^ MP_DIGIT(r, 8); ++ MP_DIGIT(r, 11) ^= rm[7]; ++ MP_DIGIT(r, 10) ^= rm[6]; ++ MP_DIGIT(r, 9) ^= rm[5]; ++ MP_DIGIT(r, 8) ^= rm[4]; ++ MP_DIGIT(r, 7) ^= rm[3]; ++ MP_DIGIT(r, 6) ^= rm[2]; ++ MP_DIGIT(r, 5) ^= rm[1]; ++ MP_DIGIT(r, 4) ^= rm[0]; ++ MP_USED(r) = 16; ++ s_mp_clamp(r); ++#endif ++ return ec_GF2m_233_mod(r, r, meth); ++ } ++ ++ CLEANUP: ++ return res; ++} ++ ++/* Wire in fast field arithmetic for 233-bit curves. */ ++mp_err ++ec_group_set_gf2m233(ECGroup *group, ECCurveName name) ++{ ++ group->meth->field_mod = &ec_GF2m_233_mod; ++ group->meth->field_mul = &ec_GF2m_233_mul; ++ group->meth->field_sqr = &ec_GF2m_233_sqr; ++ return MP_OKAY; ++} +diff -uNr openjdk/jdk/src/share/native/sun/security/ec/impl/ec2_aff.c afu8u/jdk/src/share/native/sun/security/ec/impl/ec2_aff.c +--- openjdk/jdk/src/share/native/sun/security/ec/impl/ec2_aff.c 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/jdk/src/share/native/sun/security/ec/impl/ec2_aff.c 2025-05-06 10:53:46.735633728 +0800 +@@ -0,0 +1,349 @@ ++/* ++ * Copyright (c) 2007, 2017, Oracle and/or its affiliates. All rights reserved. ++ * Use is subject to license terms. ++ * ++ * This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public License ++ * along with this library; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++/* ********************************************************************* ++ * ++ * The Original Code is the elliptic curve math library for binary polynomial field curves. ++ * ++ * The Initial Developer of the Original Code is ++ * Sun Microsystems, Inc. ++ * Portions created by the Initial Developer are Copyright (C) 2003 ++ * the Initial Developer. All Rights Reserved. ++ * ++ * Contributor(s): ++ * Douglas Stebila , Sun Microsystems Laboratories ++ * ++ * Last Modified Date from the Original Code: May 2017 ++ *********************************************************************** */ ++ ++#include "ec2.h" ++#include "mplogic.h" ++#include "mp_gf2m.h" ++#ifndef _KERNEL ++#include ++#endif ++ ++/* Checks if point P(px, py) is at infinity. Uses affine coordinates. */ ++mp_err ++ec_GF2m_pt_is_inf_aff(const mp_int *px, const mp_int *py) ++{ ++ ++ if ((mp_cmp_z(px) == 0) && (mp_cmp_z(py) == 0)) { ++ return MP_YES; ++ } else { ++ return MP_NO; ++ } ++ ++} ++ ++/* Sets P(px, py) to be the point at infinity. Uses affine coordinates. */ ++mp_err ++ec_GF2m_pt_set_inf_aff(mp_int *px, mp_int *py) ++{ ++ mp_zero(px); ++ mp_zero(py); ++ return MP_OKAY; ++} ++ ++/* Computes R = P + Q based on IEEE P1363 A.10.2. Elliptic curve points P, ++ * Q, and R can all be identical. Uses affine coordinates. */ ++mp_err ++ec_GF2m_pt_add_aff(const mp_int *px, const mp_int *py, const mp_int *qx, ++ const mp_int *qy, mp_int *rx, mp_int *ry, ++ const ECGroup *group) ++{ ++ mp_err res = MP_OKAY; ++ mp_int lambda, tempx, tempy; ++ ++ MP_DIGITS(&lambda) = 0; ++ MP_DIGITS(&tempx) = 0; ++ MP_DIGITS(&tempy) = 0; ++ MP_CHECKOK(mp_init(&lambda, FLAG(px))); ++ MP_CHECKOK(mp_init(&tempx, FLAG(px))); ++ MP_CHECKOK(mp_init(&tempy, FLAG(px))); ++ /* if P = inf, then R = Q */ ++ if (ec_GF2m_pt_is_inf_aff(px, py) == 0) { ++ MP_CHECKOK(mp_copy(qx, rx)); ++ MP_CHECKOK(mp_copy(qy, ry)); ++ res = MP_OKAY; ++ goto CLEANUP; ++ } ++ /* if Q = inf, then R = P */ ++ if (ec_GF2m_pt_is_inf_aff(qx, qy) == 0) { ++ MP_CHECKOK(mp_copy(px, rx)); ++ MP_CHECKOK(mp_copy(py, ry)); ++ res = MP_OKAY; ++ goto CLEANUP; ++ } ++ /* if px != qx, then lambda = (py+qy) / (px+qx), tempx = a + lambda^2 ++ * + lambda + px + qx */ ++ if (mp_cmp(px, qx) != 0) { ++ MP_CHECKOK(group->meth->field_add(py, qy, &tempy, group->meth)); ++ MP_CHECKOK(group->meth->field_add(px, qx, &tempx, group->meth)); ++ MP_CHECKOK(group->meth-> ++ field_div(&tempy, &tempx, &lambda, group->meth)); ++ MP_CHECKOK(group->meth->field_sqr(&lambda, &tempx, group->meth)); ++ MP_CHECKOK(group->meth-> ++ field_add(&tempx, &lambda, &tempx, group->meth)); ++ MP_CHECKOK(group->meth-> ++ field_add(&tempx, &group->curvea, &tempx, group->meth)); ++ MP_CHECKOK(group->meth-> ++ field_add(&tempx, px, &tempx, group->meth)); ++ MP_CHECKOK(group->meth-> ++ field_add(&tempx, qx, &tempx, group->meth)); ++ } else { ++ /* if py != qy or qx = 0, then R = inf */ ++ if (((mp_cmp(py, qy) != 0)) || (mp_cmp_z(qx) == 0)) { ++ mp_zero(rx); ++ mp_zero(ry); ++ res = MP_OKAY; ++ goto CLEANUP; ++ } ++ /* lambda = qx + qy / qx */ ++ MP_CHECKOK(group->meth->field_div(qy, qx, &lambda, group->meth)); ++ MP_CHECKOK(group->meth-> ++ field_add(&lambda, qx, &lambda, group->meth)); ++ /* tempx = a + lambda^2 + lambda */ ++ MP_CHECKOK(group->meth->field_sqr(&lambda, &tempx, group->meth)); ++ MP_CHECKOK(group->meth-> ++ field_add(&tempx, &lambda, &tempx, group->meth)); ++ MP_CHECKOK(group->meth-> ++ field_add(&tempx, &group->curvea, &tempx, group->meth)); ++ } ++ /* ry = (qx + tempx) * lambda + tempx + qy */ ++ MP_CHECKOK(group->meth->field_add(qx, &tempx, &tempy, group->meth)); ++ MP_CHECKOK(group->meth-> ++ field_mul(&tempy, &lambda, &tempy, group->meth)); ++ MP_CHECKOK(group->meth-> ++ field_add(&tempy, &tempx, &tempy, group->meth)); ++ MP_CHECKOK(group->meth->field_add(&tempy, qy, ry, group->meth)); ++ /* rx = tempx */ ++ MP_CHECKOK(mp_copy(&tempx, rx)); ++ ++ CLEANUP: ++ mp_clear(&lambda); ++ mp_clear(&tempx); ++ mp_clear(&tempy); ++ return res; ++} ++ ++/* Computes R = P - Q. Elliptic curve points P, Q, and R can all be ++ * identical. Uses affine coordinates. */ ++mp_err ++ec_GF2m_pt_sub_aff(const mp_int *px, const mp_int *py, const mp_int *qx, ++ const mp_int *qy, mp_int *rx, mp_int *ry, ++ const ECGroup *group) ++{ ++ mp_err res = MP_OKAY; ++ mp_int nqy; ++ ++ MP_DIGITS(&nqy) = 0; ++ MP_CHECKOK(mp_init(&nqy, FLAG(px))); ++ /* nqy = qx+qy */ ++ MP_CHECKOK(group->meth->field_add(qx, qy, &nqy, group->meth)); ++ MP_CHECKOK(group->point_add(px, py, qx, &nqy, rx, ry, group)); ++ CLEANUP: ++ mp_clear(&nqy); ++ return res; ++} ++ ++/* Computes R = 2P. Elliptic curve points P and R can be identical. Uses ++ * affine coordinates. */ ++mp_err ++ec_GF2m_pt_dbl_aff(const mp_int *px, const mp_int *py, mp_int *rx, ++ mp_int *ry, const ECGroup *group) ++{ ++ return group->point_add(px, py, px, py, rx, ry, group); ++} ++ ++/* by default, this routine is unused and thus doesn't need to be compiled */ ++#ifdef ECL_ENABLE_GF2M_PT_MUL_AFF ++/* Computes R = nP based on IEEE P1363 A.10.3. Elliptic curve points P and ++ * R can be identical. Uses affine coordinates. */ ++mp_err ++ec_GF2m_pt_mul_aff(const mp_int *n, const mp_int *px, const mp_int *py, ++ mp_int *rx, mp_int *ry, const ECGroup *group) ++{ ++ mp_err res = MP_OKAY; ++ mp_int k, k3, qx, qy, sx, sy; ++ int b1, b3, i, l; ++ ++ MP_DIGITS(&k) = 0; ++ MP_DIGITS(&k3) = 0; ++ MP_DIGITS(&qx) = 0; ++ MP_DIGITS(&qy) = 0; ++ MP_DIGITS(&sx) = 0; ++ MP_DIGITS(&sy) = 0; ++ MP_CHECKOK(mp_init(&k)); ++ MP_CHECKOK(mp_init(&k3)); ++ MP_CHECKOK(mp_init(&qx)); ++ MP_CHECKOK(mp_init(&qy)); ++ MP_CHECKOK(mp_init(&sx)); ++ MP_CHECKOK(mp_init(&sy)); ++ ++ /* if n = 0 then r = inf */ ++ if (mp_cmp_z(n) == 0) { ++ mp_zero(rx); ++ mp_zero(ry); ++ res = MP_OKAY; ++ goto CLEANUP; ++ } ++ /* Q = P, k = n */ ++ MP_CHECKOK(mp_copy(px, &qx)); ++ MP_CHECKOK(mp_copy(py, &qy)); ++ MP_CHECKOK(mp_copy(n, &k)); ++ /* if n < 0 then Q = -Q, k = -k */ ++ if (mp_cmp_z(n) < 0) { ++ MP_CHECKOK(group->meth->field_add(&qx, &qy, &qy, group->meth)); ++ MP_CHECKOK(mp_neg(&k, &k)); ++ } ++#ifdef ECL_DEBUG /* basic double and add method */ ++ l = mpl_significant_bits(&k) - 1; ++ MP_CHECKOK(mp_copy(&qx, &sx)); ++ MP_CHECKOK(mp_copy(&qy, &sy)); ++ for (i = l - 1; i >= 0; i--) { ++ /* S = 2S */ ++ MP_CHECKOK(group->point_dbl(&sx, &sy, &sx, &sy, group)); ++ /* if k_i = 1, then S = S + Q */ ++ if (mpl_get_bit(&k, i) != 0) { ++ MP_CHECKOK(group-> ++ point_add(&sx, &sy, &qx, &qy, &sx, &sy, group)); ++ } ++ } ++#else /* double and add/subtract method from ++ * standard */ ++ /* k3 = 3 * k */ ++ MP_CHECKOK(mp_set_int(&k3, 3)); ++ MP_CHECKOK(mp_mul(&k, &k3, &k3)); ++ /* S = Q */ ++ MP_CHECKOK(mp_copy(&qx, &sx)); ++ MP_CHECKOK(mp_copy(&qy, &sy)); ++ /* l = index of high order bit in binary representation of 3*k */ ++ l = mpl_significant_bits(&k3) - 1; ++ /* for i = l-1 downto 1 */ ++ for (i = l - 1; i >= 1; i--) { ++ /* S = 2S */ ++ MP_CHECKOK(group->point_dbl(&sx, &sy, &sx, &sy, group)); ++ b3 = MP_GET_BIT(&k3, i); ++ b1 = MP_GET_BIT(&k, i); ++ /* if k3_i = 1 and k_i = 0, then S = S + Q */ ++ if ((b3 == 1) && (b1 == 0)) { ++ MP_CHECKOK(group-> ++ point_add(&sx, &sy, &qx, &qy, &sx, &sy, group)); ++ /* if k3_i = 0 and k_i = 1, then S = S - Q */ ++ } else if ((b3 == 0) && (b1 == 1)) { ++ MP_CHECKOK(group-> ++ point_sub(&sx, &sy, &qx, &qy, &sx, &sy, group)); ++ } ++ } ++#endif ++ /* output S */ ++ MP_CHECKOK(mp_copy(&sx, rx)); ++ MP_CHECKOK(mp_copy(&sy, ry)); ++ ++ CLEANUP: ++ mp_clear(&k); ++ mp_clear(&k3); ++ mp_clear(&qx); ++ mp_clear(&qy); ++ mp_clear(&sx); ++ mp_clear(&sy); ++ return res; ++} ++#endif ++ ++/* Validates a point on a GF2m curve. */ ++mp_err ++ec_GF2m_validate_point(const mp_int *px, const mp_int *py, const ECGroup *group) ++{ ++ mp_err res = MP_NO; ++ mp_int accl, accr, tmp, pxt, pyt; ++ ++ MP_DIGITS(&accl) = 0; ++ MP_DIGITS(&accr) = 0; ++ MP_DIGITS(&tmp) = 0; ++ MP_DIGITS(&pxt) = 0; ++ MP_DIGITS(&pyt) = 0; ++ MP_CHECKOK(mp_init(&accl, FLAG(px))); ++ MP_CHECKOK(mp_init(&accr, FLAG(px))); ++ MP_CHECKOK(mp_init(&tmp, FLAG(px))); ++ MP_CHECKOK(mp_init(&pxt, FLAG(px))); ++ MP_CHECKOK(mp_init(&pyt, FLAG(px))); ++ ++ /* 1: Verify that publicValue is not the point at infinity */ ++ if (ec_GF2m_pt_is_inf_aff(px, py) == MP_YES) { ++ res = MP_NO; ++ goto CLEANUP; ++ } ++ /* 2: Verify that the coordinates of publicValue are elements ++ * of the field. ++ */ ++ if ((MP_SIGN(px) == MP_NEG) || (mp_cmp(px, &group->meth->irr) >= 0) || ++ (MP_SIGN(py) == MP_NEG) || (mp_cmp(py, &group->meth->irr) >= 0)) { ++ res = MP_NO; ++ goto CLEANUP; ++ } ++ /* 3: Verify that publicValue is on the curve. */ ++ if (group->meth->field_enc) { ++ group->meth->field_enc(px, &pxt, group->meth); ++ group->meth->field_enc(py, &pyt, group->meth); ++ } else { ++ mp_copy(px, &pxt); ++ mp_copy(py, &pyt); ++ } ++ /* left-hand side: y^2 + x*y */ ++ MP_CHECKOK( group->meth->field_sqr(&pyt, &accl, group->meth) ); ++ MP_CHECKOK( group->meth->field_mul(&pxt, &pyt, &tmp, group->meth) ); ++ MP_CHECKOK( group->meth->field_add(&accl, &tmp, &accl, group->meth) ); ++ /* right-hand side: x^3 + a*x^2 + b */ ++ MP_CHECKOK( group->meth->field_sqr(&pxt, &tmp, group->meth) ); ++ MP_CHECKOK( group->meth->field_mul(&pxt, &tmp, &accr, group->meth) ); ++ MP_CHECKOK( group->meth->field_mul(&group->curvea, &tmp, &tmp, group->meth) ); ++ MP_CHECKOK( group->meth->field_add(&tmp, &accr, &accr, group->meth) ); ++ MP_CHECKOK( group->meth->field_add(&accr, &group->curveb, &accr, group->meth) ); ++ /* check LHS - RHS == 0 */ ++ MP_CHECKOK( group->meth->field_add(&accl, &accr, &accr, group->meth) ); ++ if (mp_cmp_z(&accr) != 0) { ++ res = MP_NO; ++ goto CLEANUP; ++ } ++ /* 4: Verify that the order of the curve times the publicValue ++ * is the point at infinity. ++ */ ++ /* timing mitigation is not supported */ ++ MP_CHECKOK( ECPoint_mul(group, &group->order, px, py, &pxt, &pyt, /*timing*/ 0) ); ++ if (ec_GF2m_pt_is_inf_aff(&pxt, &pyt) != MP_YES) { ++ res = MP_NO; ++ goto CLEANUP; ++ } ++ ++ res = MP_YES; ++ ++CLEANUP: ++ mp_clear(&accl); ++ mp_clear(&accr); ++ mp_clear(&tmp); ++ mp_clear(&pxt); ++ mp_clear(&pyt); ++ return res; ++} +diff -uNr openjdk/jdk/src/share/native/sun/security/ec/impl/ec2.h afu8u/jdk/src/share/native/sun/security/ec/impl/ec2.h +--- openjdk/jdk/src/share/native/sun/security/ec/impl/ec2.h 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/jdk/src/share/native/sun/security/ec/impl/ec2.h 2025-05-06 10:53:46.731633728 +0800 +@@ -0,0 +1,126 @@ ++/* ++ * Copyright (c) 2007, 2017, Oracle and/or its affiliates. All rights reserved. ++ * Use is subject to license terms. ++ * ++ * This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public License ++ * along with this library; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++/* ********************************************************************* ++ * ++ * The Original Code is the elliptic curve math library for binary polynomial field curves. ++ * ++ * The Initial Developer of the Original Code is ++ * Sun Microsystems, Inc. ++ * Portions created by the Initial Developer are Copyright (C) 2003 ++ * the Initial Developer. All Rights Reserved. ++ * ++ * Contributor(s): ++ * Douglas Stebila , Sun Microsystems Laboratories ++ * ++ * Last Modified Date from the Original Code: May 2017 ++ *********************************************************************** */ ++ ++#ifndef _EC2_H ++#define _EC2_H ++ ++#include "ecl-priv.h" ++ ++/* Checks if point P(px, py) is at infinity. Uses affine coordinates. */ ++mp_err ec_GF2m_pt_is_inf_aff(const mp_int *px, const mp_int *py); ++ ++/* Sets P(px, py) to be the point at infinity. Uses affine coordinates. */ ++mp_err ec_GF2m_pt_set_inf_aff(mp_int *px, mp_int *py); ++ ++/* Computes R = P + Q where R is (rx, ry), P is (px, py) and Q is (qx, ++ * qy). Uses affine coordinates. */ ++mp_err ec_GF2m_pt_add_aff(const mp_int *px, const mp_int *py, ++ const mp_int *qx, const mp_int *qy, mp_int *rx, ++ mp_int *ry, const ECGroup *group); ++ ++/* Computes R = P - Q. Uses affine coordinates. */ ++mp_err ec_GF2m_pt_sub_aff(const mp_int *px, const mp_int *py, ++ const mp_int *qx, const mp_int *qy, mp_int *rx, ++ mp_int *ry, const ECGroup *group); ++ ++/* Computes R = 2P. Uses affine coordinates. */ ++mp_err ec_GF2m_pt_dbl_aff(const mp_int *px, const mp_int *py, mp_int *rx, ++ mp_int *ry, const ECGroup *group); ++ ++/* Validates a point on a GF2m curve. */ ++mp_err ec_GF2m_validate_point(const mp_int *px, const mp_int *py, const ECGroup *group); ++ ++/* by default, this routine is unused and thus doesn't need to be compiled */ ++#ifdef ECL_ENABLE_GF2M_PT_MUL_AFF ++/* Computes R = nP where R is (rx, ry) and P is (px, py). The parameters ++ * a, b and p are the elliptic curve coefficients and the irreducible that ++ * determines the field GF2m. Uses affine coordinates. */ ++mp_err ec_GF2m_pt_mul_aff(const mp_int *n, const mp_int *px, ++ const mp_int *py, mp_int *rx, mp_int *ry, ++ const ECGroup *group); ++#endif ++ ++/* Computes R = nP where R is (rx, ry) and P is (px, py). The parameters ++ * a, b and p are the elliptic curve coefficients and the irreducible that ++ * determines the field GF2m. Uses Montgomery projective coordinates. */ ++mp_err ec_GF2m_pt_mul_mont(const mp_int *n, const mp_int *px, ++ const mp_int *py, mp_int *rx, mp_int *ry, ++ const ECGroup *group, int timing); ++ ++#ifdef ECL_ENABLE_GF2M_PROJ ++/* Converts a point P(px, py) from affine coordinates to projective ++ * coordinates R(rx, ry, rz). */ ++mp_err ec_GF2m_pt_aff2proj(const mp_int *px, const mp_int *py, mp_int *rx, ++ mp_int *ry, mp_int *rz, const ECGroup *group); ++ ++/* Converts a point P(px, py, pz) from projective coordinates to affine ++ * coordinates R(rx, ry). */ ++mp_err ec_GF2m_pt_proj2aff(const mp_int *px, const mp_int *py, ++ const mp_int *pz, mp_int *rx, mp_int *ry, ++ const ECGroup *group); ++ ++/* Checks if point P(px, py, pz) is at infinity. Uses projective ++ * coordinates. */ ++mp_err ec_GF2m_pt_is_inf_proj(const mp_int *px, const mp_int *py, ++ const mp_int *pz); ++ ++/* Sets P(px, py, pz) to be the point at infinity. Uses projective ++ * coordinates. */ ++mp_err ec_GF2m_pt_set_inf_proj(mp_int *px, mp_int *py, mp_int *pz); ++ ++/* Computes R = P + Q where R is (rx, ry, rz), P is (px, py, pz) and Q is ++ * (qx, qy, qz). Uses projective coordinates. */ ++mp_err ec_GF2m_pt_add_proj(const mp_int *px, const mp_int *py, ++ const mp_int *pz, const mp_int *qx, ++ const mp_int *qy, mp_int *rx, mp_int *ry, ++ mp_int *rz, const ECGroup *group); ++ ++/* Computes R = 2P. Uses projective coordinates. */ ++mp_err ec_GF2m_pt_dbl_proj(const mp_int *px, const mp_int *py, ++ const mp_int *pz, mp_int *rx, mp_int *ry, ++ mp_int *rz, const ECGroup *group); ++ ++/* Computes R = nP where R is (rx, ry) and P is (px, py). The parameters ++ * a, b and p are the elliptic curve coefficients and the prime that ++ * determines the field GF2m. Uses projective coordinates. */ ++mp_err ec_GF2m_pt_mul_proj(const mp_int *n, const mp_int *px, ++ const mp_int *py, mp_int *rx, mp_int *ry, ++ const ECGroup *group); ++#endif ++ ++#endif /* _EC2_H */ +diff -uNr openjdk/jdk/src/share/native/sun/security/ec/impl/ec2_mont.c afu8u/jdk/src/share/native/sun/security/ec/impl/ec2_mont.c +--- openjdk/jdk/src/share/native/sun/security/ec/impl/ec2_mont.c 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/jdk/src/share/native/sun/security/ec/impl/ec2_mont.c 2025-05-06 10:53:46.735633728 +0800 +@@ -0,0 +1,278 @@ ++/* ++ * Copyright (c) 2007, 2017, Oracle and/or its affiliates. All rights reserved. ++ * Use is subject to license terms. ++ * ++ * This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public License ++ * along with this library; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++/* ********************************************************************* ++ * ++ * The Original Code is the elliptic curve math library for binary polynomial field curves. ++ * ++ * The Initial Developer of the Original Code is ++ * Sun Microsystems, Inc. ++ * Portions created by the Initial Developer are Copyright (C) 2003 ++ * the Initial Developer. All Rights Reserved. ++ * ++ * Contributor(s): ++ * Sheueling Chang-Shantz , ++ * Stephen Fung , and ++ * Douglas Stebila , Sun Microsystems Laboratories. ++ * ++ * Last Modified Date from the Original Code: May 2017 ++ *********************************************************************** */ ++ ++#include "ec2.h" ++#include "mplogic.h" ++#include "mp_gf2m.h" ++#ifndef _KERNEL ++#include ++#endif ++ ++/* Compute the x-coordinate x/z for the point 2*(x/z) in Montgomery ++ * projective coordinates. Uses algorithm Mdouble in appendix of Lopez, J. ++ * and Dahab, R. "Fast multiplication on elliptic curves over GF(2^m) ++ * without precomputation". modified to not require precomputation of ++ * c=b^{2^{m-1}}. */ ++static mp_err ++gf2m_Mdouble(mp_int *x, mp_int *z, const ECGroup *group, int kmflag) ++{ ++ mp_err res = MP_OKAY; ++ mp_int t1; ++ ++ MP_DIGITS(&t1) = 0; ++ MP_CHECKOK(mp_init(&t1, kmflag)); ++ ++ MP_CHECKOK(group->meth->field_sqr(x, x, group->meth)); ++ MP_CHECKOK(group->meth->field_sqr(z, &t1, group->meth)); ++ MP_CHECKOK(group->meth->field_mul(x, &t1, z, group->meth)); ++ MP_CHECKOK(group->meth->field_sqr(x, x, group->meth)); ++ MP_CHECKOK(group->meth->field_sqr(&t1, &t1, group->meth)); ++ MP_CHECKOK(group->meth-> ++ field_mul(&group->curveb, &t1, &t1, group->meth)); ++ MP_CHECKOK(group->meth->field_add(x, &t1, x, group->meth)); ++ ++ CLEANUP: ++ mp_clear(&t1); ++ return res; ++} ++ ++/* Compute the x-coordinate x1/z1 for the point (x1/z1)+(x2/x2) in ++ * Montgomery projective coordinates. Uses algorithm Madd in appendix of ++ * Lopex, J. and Dahab, R. "Fast multiplication on elliptic curves over ++ * GF(2^m) without precomputation". */ ++static mp_err ++gf2m_Madd(const mp_int *x, mp_int *x1, mp_int *z1, mp_int *x2, mp_int *z2, ++ const ECGroup *group, int kmflag) ++{ ++ mp_err res = MP_OKAY; ++ mp_int t1, t2; ++ ++ MP_DIGITS(&t1) = 0; ++ MP_DIGITS(&t2) = 0; ++ MP_CHECKOK(mp_init(&t1, kmflag)); ++ MP_CHECKOK(mp_init(&t2, kmflag)); ++ ++ MP_CHECKOK(mp_copy(x, &t1)); ++ MP_CHECKOK(group->meth->field_mul(x1, z2, x1, group->meth)); ++ MP_CHECKOK(group->meth->field_mul(z1, x2, z1, group->meth)); ++ MP_CHECKOK(group->meth->field_mul(x1, z1, &t2, group->meth)); ++ MP_CHECKOK(group->meth->field_add(z1, x1, z1, group->meth)); ++ MP_CHECKOK(group->meth->field_sqr(z1, z1, group->meth)); ++ MP_CHECKOK(group->meth->field_mul(z1, &t1, x1, group->meth)); ++ MP_CHECKOK(group->meth->field_add(x1, &t2, x1, group->meth)); ++ ++ CLEANUP: ++ mp_clear(&t1); ++ mp_clear(&t2); ++ return res; ++} ++ ++/* Compute the x, y affine coordinates from the point (x1, z1) (x2, z2) ++ * using Montgomery point multiplication algorithm Mxy() in appendix of ++ * Lopex, J. and Dahab, R. "Fast multiplication on elliptic curves over ++ * GF(2^m) without precomputation". Returns: 0 on error 1 if return value ++ * should be the point at infinity 2 otherwise */ ++static int ++gf2m_Mxy(const mp_int *x, const mp_int *y, mp_int *x1, mp_int *z1, ++ mp_int *x2, mp_int *z2, const ECGroup *group) ++{ ++ mp_err res = MP_OKAY; ++ int ret = 0; ++ mp_int t3, t4, t5; ++ ++ MP_DIGITS(&t3) = 0; ++ MP_DIGITS(&t4) = 0; ++ MP_DIGITS(&t5) = 0; ++ MP_CHECKOK(mp_init(&t3, FLAG(x2))); ++ MP_CHECKOK(mp_init(&t4, FLAG(x2))); ++ MP_CHECKOK(mp_init(&t5, FLAG(x2))); ++ ++ if (mp_cmp_z(z1) == 0) { ++ mp_zero(x2); ++ mp_zero(z2); ++ ret = 1; ++ goto CLEANUP; ++ } ++ ++ if (mp_cmp_z(z2) == 0) { ++ MP_CHECKOK(mp_copy(x, x2)); ++ MP_CHECKOK(group->meth->field_add(x, y, z2, group->meth)); ++ ret = 2; ++ goto CLEANUP; ++ } ++ ++ MP_CHECKOK(mp_set_int(&t5, 1)); ++ if (group->meth->field_enc) { ++ MP_CHECKOK(group->meth->field_enc(&t5, &t5, group->meth)); ++ } ++ ++ MP_CHECKOK(group->meth->field_mul(z1, z2, &t3, group->meth)); ++ ++ MP_CHECKOK(group->meth->field_mul(z1, x, z1, group->meth)); ++ MP_CHECKOK(group->meth->field_add(z1, x1, z1, group->meth)); ++ MP_CHECKOK(group->meth->field_mul(z2, x, z2, group->meth)); ++ MP_CHECKOK(group->meth->field_mul(z2, x1, x1, group->meth)); ++ MP_CHECKOK(group->meth->field_add(z2, x2, z2, group->meth)); ++ ++ MP_CHECKOK(group->meth->field_mul(z2, z1, z2, group->meth)); ++ MP_CHECKOK(group->meth->field_sqr(x, &t4, group->meth)); ++ MP_CHECKOK(group->meth->field_add(&t4, y, &t4, group->meth)); ++ MP_CHECKOK(group->meth->field_mul(&t4, &t3, &t4, group->meth)); ++ MP_CHECKOK(group->meth->field_add(&t4, z2, &t4, group->meth)); ++ ++ MP_CHECKOK(group->meth->field_mul(&t3, x, &t3, group->meth)); ++ MP_CHECKOK(group->meth->field_div(&t5, &t3, &t3, group->meth)); ++ MP_CHECKOK(group->meth->field_mul(&t3, &t4, &t4, group->meth)); ++ MP_CHECKOK(group->meth->field_mul(x1, &t3, x2, group->meth)); ++ MP_CHECKOK(group->meth->field_add(x2, x, z2, group->meth)); ++ ++ MP_CHECKOK(group->meth->field_mul(z2, &t4, z2, group->meth)); ++ MP_CHECKOK(group->meth->field_add(z2, y, z2, group->meth)); ++ ++ ret = 2; ++ ++ CLEANUP: ++ mp_clear(&t3); ++ mp_clear(&t4); ++ mp_clear(&t5); ++ if (res == MP_OKAY) { ++ return ret; ++ } else { ++ return 0; ++ } ++} ++ ++/* Computes R = nP based on algorithm 2P of Lopex, J. and Dahab, R. "Fast ++ * multiplication on elliptic curves over GF(2^m) without ++ * precomputation". Elliptic curve points P and R can be identical. Uses ++ * Montgomery projective coordinates. The timing parameter is ignored ++ * because this algorithm resists timing attacks by default. */ ++mp_err ++ec_GF2m_pt_mul_mont(const mp_int *n, const mp_int *px, const mp_int *py, ++ mp_int *rx, mp_int *ry, const ECGroup *group, ++ int timing) ++{ ++ mp_err res = MP_OKAY; ++ mp_int x1, x2, z1, z2; ++ int i, j; ++ mp_digit top_bit, mask; ++ ++ MP_DIGITS(&x1) = 0; ++ MP_DIGITS(&x2) = 0; ++ MP_DIGITS(&z1) = 0; ++ MP_DIGITS(&z2) = 0; ++ MP_CHECKOK(mp_init(&x1, FLAG(n))); ++ MP_CHECKOK(mp_init(&x2, FLAG(n))); ++ MP_CHECKOK(mp_init(&z1, FLAG(n))); ++ MP_CHECKOK(mp_init(&z2, FLAG(n))); ++ ++ /* if result should be point at infinity */ ++ if ((mp_cmp_z(n) == 0) || (ec_GF2m_pt_is_inf_aff(px, py) == MP_YES)) { ++ MP_CHECKOK(ec_GF2m_pt_set_inf_aff(rx, ry)); ++ goto CLEANUP; ++ } ++ ++ MP_CHECKOK(mp_copy(px, &x1)); /* x1 = px */ ++ MP_CHECKOK(mp_set_int(&z1, 1)); /* z1 = 1 */ ++ MP_CHECKOK(group->meth->field_sqr(&x1, &z2, group->meth)); /* z2 = ++ * x1^2 = ++ * px^2 */ ++ MP_CHECKOK(group->meth->field_sqr(&z2, &x2, group->meth)); ++ MP_CHECKOK(group->meth->field_add(&x2, &group->curveb, &x2, group->meth)); /* x2 ++ * = ++ * px^4 ++ * + ++ * b ++ */ ++ ++ /* find top-most bit and go one past it */ ++ i = MP_USED(n) - 1; ++ j = MP_DIGIT_BIT - 1; ++ top_bit = 1; ++ top_bit <<= MP_DIGIT_BIT - 1; ++ mask = top_bit; ++ while (!(MP_DIGITS(n)[i] & mask)) { ++ mask >>= 1; ++ j--; ++ } ++ mask >>= 1; ++ j--; ++ ++ /* if top most bit was at word break, go to next word */ ++ if (!mask) { ++ i--; ++ j = MP_DIGIT_BIT - 1; ++ mask = top_bit; ++ } ++ ++ for (; i >= 0; i--) { ++ for (; j >= 0; j--) { ++ if (MP_DIGITS(n)[i] & mask) { ++ MP_CHECKOK(gf2m_Madd(px, &x1, &z1, &x2, &z2, group, FLAG(n))); ++ MP_CHECKOK(gf2m_Mdouble(&x2, &z2, group, FLAG(n))); ++ } else { ++ MP_CHECKOK(gf2m_Madd(px, &x2, &z2, &x1, &z1, group, FLAG(n))); ++ MP_CHECKOK(gf2m_Mdouble(&x1, &z1, group, FLAG(n))); ++ } ++ mask >>= 1; ++ } ++ j = MP_DIGIT_BIT - 1; ++ mask = top_bit; ++ } ++ ++ /* convert out of "projective" coordinates */ ++ i = gf2m_Mxy(px, py, &x1, &z1, &x2, &z2, group); ++ if (i == 0) { ++ res = MP_BADARG; ++ goto CLEANUP; ++ } else if (i == 1) { ++ MP_CHECKOK(ec_GF2m_pt_set_inf_aff(rx, ry)); ++ } else { ++ MP_CHECKOK(mp_copy(&x2, rx)); ++ MP_CHECKOK(mp_copy(&z2, ry)); ++ } ++ ++ CLEANUP: ++ mp_clear(&x1); ++ mp_clear(&x2); ++ mp_clear(&z1); ++ mp_clear(&z2); ++ return res; ++} +diff -uNr openjdk/jdk/src/share/native/sun/security/ec/impl/ecl.c afu8u/jdk/src/share/native/sun/security/ec/impl/ecl.c +--- openjdk/jdk/src/share/native/sun/security/ec/impl/ecl.c 2023-04-19 05:53:10.000000000 +0800 ++++ afu8u/jdk/src/share/native/sun/security/ec/impl/ecl.c 2025-05-06 10:53:46.735633728 +0800 +@@ -39,6 +39,7 @@ + #include "mplogic.h" + #include "ecl.h" + #include "ecl-priv.h" ++#include "ec2.h" + #include "ecp.h" + #ifndef _KERNEL + #include +@@ -169,6 +170,50 @@ + return group; + } + ++#ifdef NSS_ECC_MORE_THAN_SUITE_B ++/* Construct a generic ECGroup for elliptic curves over binary polynomial ++ * fields. */ ++ECGroup * ++ECGroup_consGF2m(const mp_int *irr, const unsigned int irr_arr[5], ++ const mp_int *curvea, const mp_int *curveb, ++ const mp_int *genx, const mp_int *geny, ++ const mp_int *order, int cofactor) ++{ ++ mp_err res = MP_OKAY; ++ ECGroup *group = NULL; ++ ++ group = ECGroup_new(FLAG(irr)); ++ if (group == NULL) ++ return NULL; ++ ++ group->meth = GFMethod_consGF2m(irr, irr_arr); ++ if (group->meth == NULL) { ++ res = MP_MEM; ++ goto CLEANUP; ++ } ++ MP_CHECKOK(mp_copy(curvea, &group->curvea)); ++ MP_CHECKOK(mp_copy(curveb, &group->curveb)); ++ MP_CHECKOK(mp_copy(genx, &group->genx)); ++ MP_CHECKOK(mp_copy(geny, &group->geny)); ++ MP_CHECKOK(mp_copy(order, &group->order)); ++ group->cofactor = cofactor; ++ group->point_add = &ec_GF2m_pt_add_aff; ++ group->point_sub = &ec_GF2m_pt_sub_aff; ++ group->point_dbl = &ec_GF2m_pt_dbl_aff; ++ group->point_mul = &ec_GF2m_pt_mul_mont; ++ group->base_point_mul = NULL; ++ group->points_mul = &ec_pts_mul_basic; ++ group->validate_point = &ec_GF2m_validate_point; ++ ++ CLEANUP: ++ if (res != MP_OKAY) { ++ ECGroup_free(group); ++ return NULL; ++ } ++ return group; ++} ++#endif ++ + /* Construct ECGroup from hex parameters and name, if any. Called by + * ECGroup_fromHex and ECGroup_fromName. */ + ECGroup * +@@ -209,10 +254,85 @@ + + /* determine which optimizations (if any) to use */ + if (params->field == ECField_GFp) { ++#ifdef NSS_ECC_MORE_THAN_SUITE_B ++ switch (name) { ++#ifdef ECL_USE_FP ++ case ECCurve_SECG_PRIME_160R1: ++ group = ++ ECGroup_consGFp(&irr, &curvea, &curveb, &genx, &geny, ++ &order, params->cofactor); ++ if (group == NULL) { res = MP_UNDEF; goto CLEANUP; } ++ MP_CHECKOK(ec_group_set_secp160r1_fp(group)); ++ break; ++#endif ++ case ECCurve_SECG_PRIME_192R1: ++#ifdef ECL_USE_FP ++ group = ++ ECGroup_consGFp(&irr, &curvea, &curveb, &genx, &geny, ++ &order, params->cofactor); ++ if (group == NULL) { res = MP_UNDEF; goto CLEANUP; } ++ MP_CHECKOK(ec_group_set_nistp192_fp(group)); ++#else ++ group = ++ ECGroup_consGFp(&irr, &curvea, &curveb, &genx, &geny, ++ &order, params->cofactor); ++ if (group == NULL) { res = MP_UNDEF; goto CLEANUP; } ++ MP_CHECKOK(ec_group_set_gfp192(group, name)); ++#endif ++ break; ++ case ECCurve_SECG_PRIME_224R1: ++#ifdef ECL_USE_FP ++ group = ++ ECGroup_consGFp(&irr, &curvea, &curveb, &genx, &geny, ++ &order, params->cofactor); ++ if (group == NULL) { res = MP_UNDEF; goto CLEANUP; } ++ MP_CHECKOK(ec_group_set_nistp224_fp(group)); ++#else ++ group = ++ ECGroup_consGFp(&irr, &curvea, &curveb, &genx, &geny, ++ &order, params->cofactor); ++ if (group == NULL) { res = MP_UNDEF; goto CLEANUP; } ++ MP_CHECKOK(ec_group_set_gfp224(group, name)); ++#endif ++ break; ++ case ECCurve_SECG_PRIME_256R1: ++ group = ++ ECGroup_consGFp(&irr, &curvea, &curveb, &genx, &geny, ++ &order, params->cofactor); ++ if (group == NULL) { res = MP_UNDEF; goto CLEANUP; } ++ MP_CHECKOK(ec_group_set_gfp256(group, name)); ++ break; ++ case ECCurve_SECG_PRIME_521R1: ++ group = ++ ECGroup_consGFp(&irr, &curvea, &curveb, &genx, &geny, ++ &order, params->cofactor); ++ if (group == NULL) { res = MP_UNDEF; goto CLEANUP; } ++ MP_CHECKOK(ec_group_set_gfp521(group, name)); ++ break; ++ default: ++ /* use generic arithmetic */ ++#endif + group = + ECGroup_consGFp_mont(&irr, &curvea, &curveb, &genx, &geny, + &order, params->cofactor); + if (group == NULL) { res = MP_UNDEF; goto CLEANUP; } ++#ifdef NSS_ECC_MORE_THAN_SUITE_B ++ } ++ } else if (params->field == ECField_GF2m) { ++ group = ECGroup_consGF2m(&irr, NULL, &curvea, &curveb, &genx, &geny, &order, params->cofactor); ++ if (group == NULL) { res = MP_UNDEF; goto CLEANUP; } ++ if ((name == ECCurve_NIST_K163) || ++ (name == ECCurve_NIST_B163) || ++ (name == ECCurve_SECG_CHAR2_163R1)) { ++ MP_CHECKOK(ec_group_set_gf2m163(group, name)); ++ } else if ((name == ECCurve_SECG_CHAR2_193R1) || ++ (name == ECCurve_SECG_CHAR2_193R2)) { ++ MP_CHECKOK(ec_group_set_gf2m193(group, name)); ++ } else if ((name == ECCurve_NIST_K233) || ++ (name == ECCurve_NIST_B233)) { ++ MP_CHECKOK(ec_group_set_gf2m233(group, name)); ++ } ++#endif + } else { + res = MP_UNDEF; + goto CLEANUP; +diff -uNr openjdk/jdk/src/share/native/sun/security/ec/impl/ecl-curve.h afu8u/jdk/src/share/native/sun/security/ec/impl/ecl-curve.h +--- openjdk/jdk/src/share/native/sun/security/ec/impl/ecl-curve.h 2023-04-19 05:53:10.000000000 +0800 ++++ afu8u/jdk/src/share/native/sun/security/ec/impl/ecl-curve.h 2025-05-06 10:53:46.735633728 +0800 +@@ -44,6 +44,25 @@ + #endif + + /* NIST prime curves */ ++static const ECCurveParams ecCurve_NIST_P192 = { ++ "NIST-P192", ECField_GFp, 192, ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFF", ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFC", ++ "64210519E59C80E70FA7E9AB72243049FEB8DEECC146B9B1", ++ "188DA80EB03090F67CBF20EB43A18800F4FF0AFD82FF1012", ++ "07192B95FFC8DA78631011ED6B24CDD573F977A11E794811", ++ "FFFFFFFFFFFFFFFFFFFFFFFF99DEF836146BC9B1B4D22831", 1 ++}; ++ ++static const ECCurveParams ecCurve_NIST_P224 = { ++ "NIST-P224", ECField_GFp, 224, ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF000000000000000000000001", ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFFFFFFFFFE", ++ "B4050A850C04B3ABF54132565044B0B7D7BFD8BA270B39432355FFB4", ++ "B70E0CBD6BB4BF7F321390B94A03C1D356C21122343280D6115C1D21", ++ "BD376388B5F723FB4C22DFE6CD4375A05A07476444D5819985007E34", ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFF16A2E0B8F03E13DD29455C5C2A3D", 1 ++}; + + static const ECCurveParams ecCurve_NIST_P256 = { + "NIST-P256", ECField_GFp, 256, +@@ -77,7 +96,411 @@ + 1 + }; + ++/* NIST binary curves */ ++static const ECCurveParams ecCurve_NIST_K163 = { ++ "NIST-K163", ECField_GF2m, 163, ++ "0800000000000000000000000000000000000000C9", ++ "000000000000000000000000000000000000000001", ++ "000000000000000000000000000000000000000001", ++ "02FE13C0537BBC11ACAA07D793DE4E6D5E5C94EEE8", ++ "0289070FB05D38FF58321F2E800536D538CCDAA3D9", ++ "04000000000000000000020108A2E0CC0D99F8A5EF", 2 ++}; ++ ++static const ECCurveParams ecCurve_NIST_B163 = { ++ "NIST-B163", ECField_GF2m, 163, ++ "0800000000000000000000000000000000000000C9", ++ "000000000000000000000000000000000000000001", ++ "020A601907B8C953CA1481EB10512F78744A3205FD", ++ "03F0EBA16286A2D57EA0991168D4994637E8343E36", ++ "00D51FBC6C71A0094FA2CDD545B11C5C0C797324F1", ++ "040000000000000000000292FE77E70C12A4234C33", 2 ++}; ++ ++static const ECCurveParams ecCurve_NIST_K233 = { ++ "NIST-K233", ECField_GF2m, 233, ++ "020000000000000000000000000000000000000004000000000000000001", ++ "000000000000000000000000000000000000000000000000000000000000", ++ "000000000000000000000000000000000000000000000000000000000001", ++ "017232BA853A7E731AF129F22FF4149563A419C26BF50A4C9D6EEFAD6126", ++ "01DB537DECE819B7F70F555A67C427A8CD9BF18AEB9B56E0C11056FAE6A3", ++ "008000000000000000000000000000069D5BB915BCD46EFB1AD5F173ABDF", 4 ++}; ++ ++static const ECCurveParams ecCurve_NIST_B233 = { ++ "NIST-B233", ECField_GF2m, 233, ++ "020000000000000000000000000000000000000004000000000000000001", ++ "000000000000000000000000000000000000000000000000000000000001", ++ "0066647EDE6C332C7F8C0923BB58213B333B20E9CE4281FE115F7D8F90AD", ++ "00FAC9DFCBAC8313BB2139F1BB755FEF65BC391F8B36F8F8EB7371FD558B", ++ "01006A08A41903350678E58528BEBF8A0BEFF867A7CA36716F7E01F81052", ++ "01000000000000000000000000000013E974E72F8A6922031D2603CFE0D7", 2 ++}; ++ ++static const ECCurveParams ecCurve_NIST_K283 = { ++ "NIST-K283", ECField_GF2m, 283, ++ "0800000000000000000000000000000000000000000000000000000000000000000010A1", ++ "000000000000000000000000000000000000000000000000000000000000000000000000", ++ "000000000000000000000000000000000000000000000000000000000000000000000001", ++ "0503213F78CA44883F1A3B8162F188E553CD265F23C1567A16876913B0C2AC2458492836", ++ "01CCDA380F1C9E318D90F95D07E5426FE87E45C0E8184698E45962364E34116177DD2259", ++ "01FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFE9AE2ED07577265DFF7F94451E061E163C61", 4 ++}; ++ ++static const ECCurveParams ecCurve_NIST_B283 = { ++ "NIST-B283", ECField_GF2m, 283, ++ "0800000000000000000000000000000000000000000000000000000000000000000010A1", ++ "000000000000000000000000000000000000000000000000000000000000000000000001", ++ "027B680AC8B8596DA5A4AF8A19A0303FCA97FD7645309FA2A581485AF6263E313B79A2F5", ++ "05F939258DB7DD90E1934F8C70B0DFEC2EED25B8557EAC9C80E2E198F8CDBECD86B12053", ++ "03676854FE24141CB98FE6D4B20D02B4516FF702350EDDB0826779C813F0DF45BE8112F4", ++ "03FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEF90399660FC938A90165B042A7CEFADB307", 2 ++}; ++ ++static const ECCurveParams ecCurve_NIST_K409 = { ++ "NIST-K409", ECField_GF2m, 409, ++ "02000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000001", ++ "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", ++ "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", ++ "0060F05F658F49C1AD3AB1890F7184210EFD0987E307C84C27ACCFB8F9F67CC2C460189EB5AAAA62EE222EB1B35540CFE9023746", ++ "01E369050B7C4E42ACBA1DACBF04299C3460782F918EA427E6325165E9EA10E3DA5F6C42E9C55215AA9CA27A5863EC48D8E0286B", ++ "007FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFE5F83B2D4EA20400EC4557D5ED3E3E7CA5B4B5C83B8E01E5FCF", 4 ++}; ++ ++static const ECCurveParams ecCurve_NIST_B409 = { ++ "NIST-B409", ECField_GF2m, 409, ++ "02000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000001", ++ "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", ++ "0021A5C2C8EE9FEB5C4B9A753B7B476B7FD6422EF1F3DD674761FA99D6AC27C8A9A197B272822F6CD57A55AA4F50AE317B13545F", ++ "015D4860D088DDB3496B0C6064756260441CDE4AF1771D4DB01FFE5B34E59703DC255A868A1180515603AEAB60794E54BB7996A7", ++ "0061B1CFAB6BE5F32BBFA78324ED106A7636B9C5A7BD198D0158AA4F5488D08F38514F1FDF4B4F40D2181B3681C364BA0273C706", ++ "010000000000000000000000000000000000000000000000000001E2AAD6A612F33307BE5FA47C3C9E052F838164CD37D9A21173", 2 ++}; ++ ++static const ECCurveParams ecCurve_NIST_K571 = { ++ "NIST-K571", ECField_GF2m, 571, ++ "080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000425", ++ "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", ++ "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", ++ "026EB7A859923FBC82189631F8103FE4AC9CA2970012D5D46024804801841CA44370958493B205E647DA304DB4CEB08CBBD1BA39494776FB988B47174DCA88C7E2945283A01C8972", ++ "0349DC807F4FBF374F4AEADE3BCA95314DD58CEC9F307A54FFC61EFC006D8A2C9D4979C0AC44AEA74FBEBBB9F772AEDCB620B01A7BA7AF1B320430C8591984F601CD4C143EF1C7A3", ++ "020000000000000000000000000000000000000000000000000000000000000000000000131850E1F19A63E4B391A8DB917F4138B630D84BE5D639381E91DEB45CFE778F637C1001", 4 ++}; ++ ++static const ECCurveParams ecCurve_NIST_B571 = { ++ "NIST-B571", ECField_GF2m, 571, ++ "080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000425", ++ "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", ++ "02F40E7E2221F295DE297117B7F3D62F5C6A97FFCB8CEFF1CD6BA8CE4A9A18AD84FFABBD8EFA59332BE7AD6756A66E294AFD185A78FF12AA520E4DE739BACA0C7FFEFF7F2955727A", ++ "0303001D34B856296C16C0D40D3CD7750A93D1D2955FA80AA5F40FC8DB7B2ABDBDE53950F4C0D293CDD711A35B67FB1499AE60038614F1394ABFA3B4C850D927E1E7769C8EEC2D19", ++ "037BF27342DA639B6DCCFFFEB73D69D78C6C27A6009CBBCA1980F8533921E8A684423E43BAB08A576291AF8F461BB2A8B3531D2F0485C19B16E2F1516E23DD3C1A4827AF1B8AC15B", ++ "03FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFE661CE18FF55987308059B186823851EC7DD9CA1161DE93D5174D66E8382E9BB2FE84E47", 2 ++}; ++ ++/* ANSI X9.62 prime curves */ ++static const ECCurveParams ecCurve_X9_62_PRIME_192V2 = { ++ "X9.62 P-192V2", ECField_GFp, 192, ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFF", ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFC", ++ "CC22D6DFB95C6B25E49C0D6364A4E5980C393AA21668D953", ++ "EEA2BAE7E1497842F2DE7769CFE9C989C072AD696F48034A", ++ "6574D11D69B6EC7A672BB82A083DF2F2B0847DE970B2DE15", ++ "FFFFFFFFFFFFFFFFFFFFFFFE5FB1A724DC80418648D8DD31", 1 ++}; ++ ++static const ECCurveParams ecCurve_X9_62_PRIME_192V3 = { ++ "X9.62 P-192V3", ECField_GFp, 192, ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFF", ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFC", ++ "22123DC2395A05CAA7423DAECCC94760A7D462256BD56916", ++ "7D29778100C65A1DA1783716588DCE2B8B4AEE8E228F1896", ++ "38A90F22637337334B49DCB66A6DC8F9978ACA7648A943B0", ++ "FFFFFFFFFFFFFFFFFFFFFFFF7A62D031C83F4294F640EC13", 1 ++}; ++ ++static const ECCurveParams ecCurve_X9_62_PRIME_239V1 = { ++ "X9.62 P-239V1", ECField_GFp, 239, ++ "7FFFFFFFFFFFFFFFFFFFFFFF7FFFFFFFFFFF8000000000007FFFFFFFFFFF", ++ "7FFFFFFFFFFFFFFFFFFFFFFF7FFFFFFFFFFF8000000000007FFFFFFFFFFC", ++ "6B016C3BDCF18941D0D654921475CA71A9DB2FB27D1D37796185C2942C0A", ++ "0FFA963CDCA8816CCC33B8642BEDF905C3D358573D3F27FBBD3B3CB9AAAF", ++ "7DEBE8E4E90A5DAE6E4054CA530BA04654B36818CE226B39FCCB7B02F1AE", ++ "7FFFFFFFFFFFFFFFFFFFFFFF7FFFFF9E5E9A9F5D9071FBD1522688909D0B", 1 ++}; ++ ++static const ECCurveParams ecCurve_X9_62_PRIME_239V2 = { ++ "X9.62 P-239V2", ECField_GFp, 239, ++ "7FFFFFFFFFFFFFFFFFFFFFFF7FFFFFFFFFFF8000000000007FFFFFFFFFFF", ++ "7FFFFFFFFFFFFFFFFFFFFFFF7FFFFFFFFFFF8000000000007FFFFFFFFFFC", ++ "617FAB6832576CBBFED50D99F0249C3FEE58B94BA0038C7AE84C8C832F2C", ++ "38AF09D98727705120C921BB5E9E26296A3CDCF2F35757A0EAFD87B830E7", ++ "5B0125E4DBEA0EC7206DA0FC01D9B081329FB555DE6EF460237DFF8BE4BA", ++ "7FFFFFFFFFFFFFFFFFFFFFFF800000CFA7E8594377D414C03821BC582063", 1 ++}; ++ ++static const ECCurveParams ecCurve_X9_62_PRIME_239V3 = { ++ "X9.62 P-239V3", ECField_GFp, 239, ++ "7FFFFFFFFFFFFFFFFFFFFFFF7FFFFFFFFFFF8000000000007FFFFFFFFFFF", ++ "7FFFFFFFFFFFFFFFFFFFFFFF7FFFFFFFFFFF8000000000007FFFFFFFFFFC", ++ "255705FA2A306654B1F4CB03D6A750A30C250102D4988717D9BA15AB6D3E", ++ "6768AE8E18BB92CFCF005C949AA2C6D94853D0E660BBF854B1C9505FE95A", ++ "1607E6898F390C06BC1D552BAD226F3B6FCFE48B6E818499AF18E3ED6CF3", ++ "7FFFFFFFFFFFFFFFFFFFFFFF7FFFFF975DEB41B3A6057C3C432146526551", 1 ++}; ++ ++/* ANSI X9.62 binary curves */ ++static const ECCurveParams ecCurve_X9_62_CHAR2_PNB163V1 = { ++ "X9.62 C2-PNB163V1", ECField_GF2m, 163, ++ "080000000000000000000000000000000000000107", ++ "072546B5435234A422E0789675F432C89435DE5242", ++ "00C9517D06D5240D3CFF38C74B20B6CD4D6F9DD4D9", ++ "07AF69989546103D79329FCC3D74880F33BBE803CB", ++ "01EC23211B5966ADEA1D3F87F7EA5848AEF0B7CA9F", ++ "0400000000000000000001E60FC8821CC74DAEAFC1", 2 ++}; ++ ++static const ECCurveParams ecCurve_X9_62_CHAR2_PNB163V2 = { ++ "X9.62 C2-PNB163V2", ECField_GF2m, 163, ++ "080000000000000000000000000000000000000107", ++ "0108B39E77C4B108BED981ED0E890E117C511CF072", ++ "0667ACEB38AF4E488C407433FFAE4F1C811638DF20", ++ "0024266E4EB5106D0A964D92C4860E2671DB9B6CC5", ++ "079F684DDF6684C5CD258B3890021B2386DFD19FC5", ++ "03FFFFFFFFFFFFFFFFFFFDF64DE1151ADBB78F10A7", 2 ++}; ++ ++static const ECCurveParams ecCurve_X9_62_CHAR2_PNB163V3 = { ++ "X9.62 C2-PNB163V3", ECField_GF2m, 163, ++ "080000000000000000000000000000000000000107", ++ "07A526C63D3E25A256A007699F5447E32AE456B50E", ++ "03F7061798EB99E238FD6F1BF95B48FEEB4854252B", ++ "02F9F87B7C574D0BDECF8A22E6524775F98CDEBDCB", ++ "05B935590C155E17EA48EB3FF3718B893DF59A05D0", ++ "03FFFFFFFFFFFFFFFFFFFE1AEE140F110AFF961309", 2 ++}; ++ ++static const ECCurveParams ecCurve_X9_62_CHAR2_PNB176V1 = { ++ "X9.62 C2-PNB176V1", ECField_GF2m, 176, ++ "0100000000000000000000000000000000080000000007", ++ "E4E6DB2995065C407D9D39B8D0967B96704BA8E9C90B", ++ "5DDA470ABE6414DE8EC133AE28E9BBD7FCEC0AE0FFF2", ++ "8D16C2866798B600F9F08BB4A8E860F3298CE04A5798", ++ "6FA4539C2DADDDD6BAB5167D61B436E1D92BB16A562C", ++ "00010092537397ECA4F6145799D62B0A19CE06FE26AD", 0xFF6E ++}; ++ ++static const ECCurveParams ecCurve_X9_62_CHAR2_TNB191V1 = { ++ "X9.62 C2-TNB191V1", ECField_GF2m, 191, ++ "800000000000000000000000000000000000000000000201", ++ "2866537B676752636A68F56554E12640276B649EF7526267", ++ "2E45EF571F00786F67B0081B9495A3D95462F5DE0AA185EC", ++ "36B3DAF8A23206F9C4F299D7B21A9C369137F2C84AE1AA0D", ++ "765BE73433B3F95E332932E70EA245CA2418EA0EF98018FB", ++ "40000000000000000000000004A20E90C39067C893BBB9A5", 2 ++}; ++ ++static const ECCurveParams ecCurve_X9_62_CHAR2_TNB191V2 = { ++ "X9.62 C2-TNB191V2", ECField_GF2m, 191, ++ "800000000000000000000000000000000000000000000201", ++ "401028774D7777C7B7666D1366EA432071274F89FF01E718", ++ "0620048D28BCBD03B6249C99182B7C8CD19700C362C46A01", ++ "3809B2B7CC1B28CC5A87926AAD83FD28789E81E2C9E3BF10", ++ "17434386626D14F3DBF01760D9213A3E1CF37AEC437D668A", ++ "20000000000000000000000050508CB89F652824E06B8173", 4 ++}; ++ ++static const ECCurveParams ecCurve_X9_62_CHAR2_TNB191V3 = { ++ "X9.62 C2-TNB191V3", ECField_GF2m, 191, ++ "800000000000000000000000000000000000000000000201", ++ "6C01074756099122221056911C77D77E77A777E7E7E77FCB", ++ "71FE1AF926CF847989EFEF8DB459F66394D90F32AD3F15E8", ++ "375D4CE24FDE434489DE8746E71786015009E66E38A926DD", ++ "545A39176196575D985999366E6AD34CE0A77CD7127B06BE", ++ "155555555555555555555555610C0B196812BFB6288A3EA3", 6 ++}; ++ ++static const ECCurveParams ecCurve_X9_62_CHAR2_PNB208W1 = { ++ "X9.62 C2-PNB208W1", ECField_GF2m, 208, ++ "010000000000000000000000000000000800000000000000000007", ++ "0000000000000000000000000000000000000000000000000000", ++ "C8619ED45A62E6212E1160349E2BFA844439FAFC2A3FD1638F9E", ++ "89FDFBE4ABE193DF9559ECF07AC0CE78554E2784EB8C1ED1A57A", ++ "0F55B51A06E78E9AC38A035FF520D8B01781BEB1A6BB08617DE3", ++ "000101BAF95C9723C57B6C21DA2EFF2D5ED588BDD5717E212F9D", 0xFE48 ++}; ++ ++static const ECCurveParams ecCurve_X9_62_CHAR2_TNB239V1 = { ++ "X9.62 C2-TNB239V1", ECField_GF2m, 239, ++ "800000000000000000000000000000000000000000000000001000000001", ++ "32010857077C5431123A46B808906756F543423E8D27877578125778AC76", ++ "790408F2EEDAF392B012EDEFB3392F30F4327C0CA3F31FC383C422AA8C16", ++ "57927098FA932E7C0A96D3FD5B706EF7E5F5C156E16B7E7C86038552E91D", ++ "61D8EE5077C33FECF6F1A16B268DE469C3C7744EA9A971649FC7A9616305", ++ "2000000000000000000000000000000F4D42FFE1492A4993F1CAD666E447", 4 ++}; ++ ++static const ECCurveParams ecCurve_X9_62_CHAR2_TNB239V2 = { ++ "X9.62 C2-TNB239V2", ECField_GF2m, 239, ++ "800000000000000000000000000000000000000000000000001000000001", ++ "4230017757A767FAE42398569B746325D45313AF0766266479B75654E65F", ++ "5037EA654196CFF0CD82B2C14A2FCF2E3FF8775285B545722F03EACDB74B", ++ "28F9D04E900069C8DC47A08534FE76D2B900B7D7EF31F5709F200C4CA205", ++ "5667334C45AFF3B5A03BAD9DD75E2C71A99362567D5453F7FA6E227EC833", ++ "1555555555555555555555555555553C6F2885259C31E3FCDF154624522D", 6 ++}; ++ ++static const ECCurveParams ecCurve_X9_62_CHAR2_TNB239V3 = { ++ "X9.62 C2-TNB239V3", ECField_GF2m, 239, ++ "800000000000000000000000000000000000000000000000001000000001", ++ "01238774666A67766D6676F778E676B66999176666E687666D8766C66A9F", ++ "6A941977BA9F6A435199ACFC51067ED587F519C5ECB541B8E44111DE1D40", ++ "70F6E9D04D289C4E89913CE3530BFDE903977D42B146D539BF1BDE4E9C92", ++ "2E5A0EAF6E5E1305B9004DCE5C0ED7FE59A35608F33837C816D80B79F461", ++ "0CCCCCCCCCCCCCCCCCCCCCCCCCCCCCAC4912D2D9DF903EF9888B8A0E4CFF", 0xA ++}; ++ ++static const ECCurveParams ecCurve_X9_62_CHAR2_PNB272W1 = { ++ "X9.62 C2-PNB272W1", ECField_GF2m, 272, ++ "010000000000000000000000000000000000000000000000000000010000000000000B", ++ "91A091F03B5FBA4AB2CCF49C4EDD220FB028712D42BE752B2C40094DBACDB586FB20", ++ "7167EFC92BB2E3CE7C8AAAFF34E12A9C557003D7C73A6FAF003F99F6CC8482E540F7", ++ "6108BABB2CEEBCF787058A056CBE0CFE622D7723A289E08A07AE13EF0D10D171DD8D", ++ "10C7695716851EEF6BA7F6872E6142FBD241B830FF5EFCACECCAB05E02005DDE9D23", ++ "000100FAF51354E0E39E4892DF6E319C72C8161603FA45AA7B998A167B8F1E629521", ++ 0xFF06 ++}; ++ ++static const ECCurveParams ecCurve_X9_62_CHAR2_PNB304W1 = { ++ "X9.62 C2-PNB304W1", ECField_GF2m, 304, ++ "010000000000000000000000000000000000000000000000000000000000000000000000000807", ++ "FD0D693149A118F651E6DCE6802085377E5F882D1B510B44160074C1288078365A0396C8E681", ++ "BDDB97E555A50A908E43B01C798EA5DAA6788F1EA2794EFCF57166B8C14039601E55827340BE", ++ "197B07845E9BE2D96ADB0F5F3C7F2CFFBD7A3EB8B6FEC35C7FD67F26DDF6285A644F740A2614", ++ "E19FBEB76E0DA171517ECF401B50289BF014103288527A9B416A105E80260B549FDC1B92C03B", ++ "000101D556572AABAC800101D556572AABAC8001022D5C91DD173F8FB561DA6899164443051D", 0xFE2E ++}; ++ ++static const ECCurveParams ecCurve_X9_62_CHAR2_TNB359V1 = { ++ "X9.62 C2-TNB359V1", ECField_GF2m, 359, ++ "800000000000000000000000000000000000000000000000000000000000000000000000100000000000000001", ++ "5667676A654B20754F356EA92017D946567C46675556F19556A04616B567D223A5E05656FB549016A96656A557", ++ "2472E2D0197C49363F1FE7F5B6DB075D52B6947D135D8CA445805D39BC345626089687742B6329E70680231988", ++ "3C258EF3047767E7EDE0F1FDAA79DAEE3841366A132E163ACED4ED2401DF9C6BDCDE98E8E707C07A2239B1B097", ++ "53D7E08529547048121E9C95F3791DD804963948F34FAE7BF44EA82365DC7868FE57E4AE2DE211305A407104BD", ++ "01AF286BCA1AF286BCA1AF286BCA1AF286BCA1AF286BC9FB8F6B85C556892C20A7EB964FE7719E74F490758D3B", 0x4C ++}; ++ ++static const ECCurveParams ecCurve_X9_62_CHAR2_PNB368W1 = { ++ "X9.62 C2-PNB368W1", ECField_GF2m, 368, ++ "0100000000000000000000000000000000000000000000000000000000000000000000002000000000000000000007", ++ "E0D2EE25095206F5E2A4F9ED229F1F256E79A0E2B455970D8D0D865BD94778C576D62F0AB7519CCD2A1A906AE30D", ++ "FC1217D4320A90452C760A58EDCD30C8DD069B3C34453837A34ED50CB54917E1C2112D84D164F444F8F74786046A", ++ "1085E2755381DCCCE3C1557AFA10C2F0C0C2825646C5B34A394CBCFA8BC16B22E7E789E927BE216F02E1FB136A5F", ++ "7B3EB1BDDCBA62D5D8B2059B525797FC73822C59059C623A45FF3843CEE8F87CD1855ADAA81E2A0750B80FDA2310", ++ "00010090512DA9AF72B08349D98A5DD4C7B0532ECA51CE03E2D10F3B7AC579BD87E909AE40A6F131E9CFCE5BD967", 0xFF70 ++}; ++ ++static const ECCurveParams ecCurve_X9_62_CHAR2_TNB431R1 = { ++ "X9.62 C2-TNB431R1", ECField_GF2m, 431, ++ "800000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000001", ++ "1A827EF00DD6FC0E234CAF046C6A5D8A85395B236CC4AD2CF32A0CADBDC9DDF620B0EB9906D0957F6C6FEACD615468DF104DE296CD8F", ++ "10D9B4A3D9047D8B154359ABFB1B7F5485B04CEB868237DDC9DEDA982A679A5A919B626D4E50A8DD731B107A9962381FB5D807BF2618", ++ "120FC05D3C67A99DE161D2F4092622FECA701BE4F50F4758714E8A87BBF2A658EF8C21E7C5EFE965361F6C2999C0C247B0DBD70CE6B7", ++ "20D0AF8903A96F8D5FA2C255745D3C451B302C9346D9B7E485E7BCE41F6B591F3E8F6ADDCBB0BC4C2F947A7DE1A89B625D6A598B3760", ++ "0340340340340340340340340340340340340340340340340340340323C313FAB50589703B5EC68D3587FEC60D161CC149C1AD4A91", 0x2760 ++}; ++ + /* SEC2 prime curves */ ++static const ECCurveParams ecCurve_SECG_PRIME_112R1 = { ++ "SECP-112R1", ECField_GFp, 112, ++ "DB7C2ABF62E35E668076BEAD208B", ++ "DB7C2ABF62E35E668076BEAD2088", ++ "659EF8BA043916EEDE8911702B22", ++ "09487239995A5EE76B55F9C2F098", ++ "A89CE5AF8724C0A23E0E0FF77500", ++ "DB7C2ABF62E35E7628DFAC6561C5", 1 ++}; ++ ++static const ECCurveParams ecCurve_SECG_PRIME_112R2 = { ++ "SECP-112R2", ECField_GFp, 112, ++ "DB7C2ABF62E35E668076BEAD208B", ++ "6127C24C05F38A0AAAF65C0EF02C", ++ "51DEF1815DB5ED74FCC34C85D709", ++ "4BA30AB5E892B4E1649DD0928643", ++ "adcd46f5882e3747def36e956e97", ++ "36DF0AAFD8B8D7597CA10520D04B", 4 ++}; ++ ++static const ECCurveParams ecCurve_SECG_PRIME_128R1 = { ++ "SECP-128R1", ECField_GFp, 128, ++ "FFFFFFFDFFFFFFFFFFFFFFFFFFFFFFFF", ++ "FFFFFFFDFFFFFFFFFFFFFFFFFFFFFFFC", ++ "E87579C11079F43DD824993C2CEE5ED3", ++ "161FF7528B899B2D0C28607CA52C5B86", ++ "CF5AC8395BAFEB13C02DA292DDED7A83", ++ "FFFFFFFE0000000075A30D1B9038A115", 1 ++}; ++ ++static const ECCurveParams ecCurve_SECG_PRIME_128R2 = { ++ "SECP-128R2", ECField_GFp, 128, ++ "FFFFFFFDFFFFFFFFFFFFFFFFFFFFFFFF", ++ "D6031998D1B3BBFEBF59CC9BBFF9AEE1", ++ "5EEEFCA380D02919DC2C6558BB6D8A5D", ++ "7B6AA5D85E572983E6FB32A7CDEBC140", ++ "27B6916A894D3AEE7106FE805FC34B44", ++ "3FFFFFFF7FFFFFFFBE0024720613B5A3", 4 ++}; ++ ++static const ECCurveParams ecCurve_SECG_PRIME_160K1 = { ++ "SECP-160K1", ECField_GFp, 160, ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFAC73", ++ "0000000000000000000000000000000000000000", ++ "0000000000000000000000000000000000000007", ++ "3B4C382CE37AA192A4019E763036F4F5DD4D7EBB", ++ "938CF935318FDCED6BC28286531733C3F03C4FEE", ++ "0100000000000000000001B8FA16DFAB9ACA16B6B3", 1 ++}; ++ ++static const ECCurveParams ecCurve_SECG_PRIME_160R1 = { ++ "SECP-160R1", ECField_GFp, 160, ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF7FFFFFFF", ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF7FFFFFFC", ++ "1C97BEFC54BD7A8B65ACF89F81D4D4ADC565FA45", ++ "4A96B5688EF573284664698968C38BB913CBFC82", ++ "23A628553168947D59DCC912042351377AC5FB32", ++ "0100000000000000000001F4C8F927AED3CA752257", 1 ++}; ++ ++static const ECCurveParams ecCurve_SECG_PRIME_160R2 = { ++ "SECP-160R2", ECField_GFp, 160, ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFAC73", ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFAC70", ++ "B4E134D3FB59EB8BAB57274904664D5AF50388BA", ++ "52DCB034293A117E1F4FF11B30F7199D3144CE6D", ++ "FEAFFEF2E331F296E071FA0DF9982CFEA7D43F2E", ++ "0100000000000000000000351EE786A818F3A1A16B", 1 ++}; ++ ++static const ECCurveParams ecCurve_SECG_PRIME_192K1 = { ++ "SECP-192K1", ECField_GFp, 192, ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFEE37", ++ "000000000000000000000000000000000000000000000000", ++ "000000000000000000000000000000000000000000000003", ++ "DB4FF10EC057E9AE26B07D0280B7F4341DA5D1B1EAE06C7D", ++ "9B2F2F6D9C5628A7844163D015BE86344082AA88D95E2F9D", ++ "FFFFFFFFFFFFFFFFFFFFFFFE26F2FC170F69466A74DEFD8D", 1 ++}; ++ ++static const ECCurveParams ecCurve_SECG_PRIME_224K1 = { ++ "SECP-224K1", ECField_GFp, 224, ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFE56D", ++ "00000000000000000000000000000000000000000000000000000000", ++ "00000000000000000000000000000000000000000000000000000005", ++ "A1455B334DF099DF30FC28A169A467E9E47075A90F7E650EB6B7A45C", ++ "7E089FED7FBA344282CAFBD6F7E319F7C0B0BD59E2CA4BDB556D61A5", ++ "010000000000000000000000000001DCE8D2EC6184CAF0A971769FB1F7", 1 ++}; ++ + static const ECCurveParams ecCurve_SECG_PRIME_256K1 = { + "SECP-256K1", ECField_GFp, 256, + "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", +@@ -88,70 +511,178 @@ + "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141", 1 + }; + ++/* SEC2 binary curves */ ++static const ECCurveParams ecCurve_SECG_CHAR2_113R1 = { ++ "SECT-113R1", ECField_GF2m, 113, ++ "020000000000000000000000000201", ++ "003088250CA6E7C7FE649CE85820F7", ++ "00E8BEE4D3E2260744188BE0E9C723", ++ "009D73616F35F4AB1407D73562C10F", ++ "00A52830277958EE84D1315ED31886", ++ "0100000000000000D9CCEC8A39E56F", 2 ++}; ++ ++static const ECCurveParams ecCurve_SECG_CHAR2_113R2 = { ++ "SECT-113R2", ECField_GF2m, 113, ++ "020000000000000000000000000201", ++ "00689918DBEC7E5A0DD6DFC0AA55C7", ++ "0095E9A9EC9B297BD4BF36E059184F", ++ "01A57A6A7B26CA5EF52FCDB8164797", ++ "00B3ADC94ED1FE674C06E695BABA1D", ++ "010000000000000108789B2496AF93", 2 ++}; ++ ++static const ECCurveParams ecCurve_SECG_CHAR2_131R1 = { ++ "SECT-131R1", ECField_GF2m, 131, ++ "080000000000000000000000000000010D", ++ "07A11B09A76B562144418FF3FF8C2570B8", ++ "0217C05610884B63B9C6C7291678F9D341", ++ "0081BAF91FDF9833C40F9C181343638399", ++ "078C6E7EA38C001F73C8134B1B4EF9E150", ++ "0400000000000000023123953A9464B54D", 2 ++}; ++ ++static const ECCurveParams ecCurve_SECG_CHAR2_131R2 = { ++ "SECT-131R2", ECField_GF2m, 131, ++ "080000000000000000000000000000010D", ++ "03E5A88919D7CAFCBF415F07C2176573B2", ++ "04B8266A46C55657AC734CE38F018F2192", ++ "0356DCD8F2F95031AD652D23951BB366A8", ++ "0648F06D867940A5366D9E265DE9EB240F", ++ "0400000000000000016954A233049BA98F", 2 ++}; ++ ++static const ECCurveParams ecCurve_SECG_CHAR2_163R1 = { ++ "SECT-163R1", ECField_GF2m, 163, ++ "0800000000000000000000000000000000000000C9", ++ "07B6882CAAEFA84F9554FF8428BD88E246D2782AE2", ++ "0713612DCDDCB40AAB946BDA29CA91F73AF958AFD9", ++ "0369979697AB43897789566789567F787A7876A654", ++ "00435EDB42EFAFB2989D51FEFCE3C80988F41FF883", ++ "03FFFFFFFFFFFFFFFFFFFF48AAB689C29CA710279B", 2 ++}; ++ ++static const ECCurveParams ecCurve_SECG_CHAR2_193R1 = { ++ "SECT-193R1", ECField_GF2m, 193, ++ "02000000000000000000000000000000000000000000008001", ++ "0017858FEB7A98975169E171F77B4087DE098AC8A911DF7B01", ++ "00FDFB49BFE6C3A89FACADAA7A1E5BBC7CC1C2E5D831478814", ++ "01F481BC5F0FF84A74AD6CDF6FDEF4BF6179625372D8C0C5E1", ++ "0025E399F2903712CCF3EA9E3A1AD17FB0B3201B6AF7CE1B05", ++ "01000000000000000000000000C7F34A778F443ACC920EBA49", 2 ++}; ++ ++static const ECCurveParams ecCurve_SECG_CHAR2_193R2 = { ++ "SECT-193R2", ECField_GF2m, 193, ++ "02000000000000000000000000000000000000000000008001", ++ "0163F35A5137C2CE3EA6ED8667190B0BC43ECD69977702709B", ++ "00C9BB9E8927D4D64C377E2AB2856A5B16E3EFB7F61D4316AE", ++ "00D9B67D192E0367C803F39E1A7E82CA14A651350AAE617E8F", ++ "01CE94335607C304AC29E7DEFBD9CA01F596F927224CDECF6C", ++ "010000000000000000000000015AAB561B005413CCD4EE99D5", 2 ++}; ++ ++static const ECCurveParams ecCurve_SECG_CHAR2_239K1 = { ++ "SECT-239K1", ECField_GF2m, 239, ++ "800000000000000000004000000000000000000000000000000000000001", ++ "000000000000000000000000000000000000000000000000000000000000", ++ "000000000000000000000000000000000000000000000000000000000001", ++ "29A0B6A887A983E9730988A68727A8B2D126C44CC2CC7B2A6555193035DC", ++ "76310804F12E549BDB011C103089E73510ACB275FC312A5DC6B76553F0CA", ++ "2000000000000000000000000000005A79FEC67CB6E91F1C1DA800E478A5", 4 ++}; ++ ++/* WTLS curves */ ++static const ECCurveParams ecCurve_WTLS_1 = { ++ "WTLS-1", ECField_GF2m, 113, ++ "020000000000000000000000000201", ++ "000000000000000000000000000001", ++ "000000000000000000000000000001", ++ "01667979A40BA497E5D5C270780617", ++ "00F44B4AF1ECC2630E08785CEBCC15", ++ "00FFFFFFFFFFFFFFFDBF91AF6DEA73", 2 ++}; ++ ++static const ECCurveParams ecCurve_WTLS_8 = { ++ "WTLS-8", ECField_GFp, 112, ++ "FFFFFFFFFFFFFFFFFFFFFFFFFDE7", ++ "0000000000000000000000000000", ++ "0000000000000000000000000003", ++ "0000000000000000000000000001", ++ "0000000000000000000000000002", ++ "0100000000000001ECEA551AD837E9", 1 ++}; ++ ++static const ECCurveParams ecCurve_WTLS_9 = { ++ "WTLS-9", ECField_GFp, 160, ++ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFC808F", ++ "0000000000000000000000000000000000000000", ++ "0000000000000000000000000000000000000003", ++ "0000000000000000000000000000000000000001", ++ "0000000000000000000000000000000000000002", ++ "0100000000000000000001CDC98AE0E2DE574ABF33", 1 ++}; ++ + /* mapping between ECCurveName enum and pointers to ECCurveParams */ + static const ECCurveParams *ecCurve_map[] = { + NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ ++ &ecCurve_NIST_P192, /* ECCurve_NIST_P192 */ ++ &ecCurve_NIST_P224, /* ECCurve_NIST_P224 */ + &ecCurve_NIST_P256, /* ECCurve_NIST_P256 */ + &ecCurve_NIST_P384, /* ECCurve_NIST_P384 */ + &ecCurve_NIST_P521, /* ECCurve_NIST_P521 */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ ++ &ecCurve_NIST_K163, /* ECCurve_NIST_K163 */ ++ &ecCurve_NIST_B163, /* ECCurve_NIST_B163 */ ++ &ecCurve_NIST_K233, /* ECCurve_NIST_K233 */ ++ &ecCurve_NIST_B233, /* ECCurve_NIST_B233 */ ++ &ecCurve_NIST_K283, /* ECCurve_NIST_K283 */ ++ &ecCurve_NIST_B283, /* ECCurve_NIST_B283 */ ++ &ecCurve_NIST_K409, /* ECCurve_NIST_K409 */ ++ &ecCurve_NIST_B409, /* ECCurve_NIST_B409 */ ++ &ecCurve_NIST_K571, /* ECCurve_NIST_K571 */ ++ &ecCurve_NIST_B571, /* ECCurve_NIST_B571 */ ++ &ecCurve_X9_62_PRIME_192V2, /* ECCurve_X9_62_PRIME_192V2 */ ++ &ecCurve_X9_62_PRIME_192V3, /* ECCurve_X9_62_PRIME_192V3 */ ++ &ecCurve_X9_62_PRIME_239V1, /* ECCurve_X9_62_PRIME_239V1 */ ++ &ecCurve_X9_62_PRIME_239V2, /* ECCurve_X9_62_PRIME_239V2 */ ++ &ecCurve_X9_62_PRIME_239V3, /* ECCurve_X9_62_PRIME_239V3 */ ++ &ecCurve_X9_62_CHAR2_PNB163V1, /* ECCurve_X9_62_CHAR2_PNB163V1 */ ++ &ecCurve_X9_62_CHAR2_PNB163V2, /* ECCurve_X9_62_CHAR2_PNB163V2 */ ++ &ecCurve_X9_62_CHAR2_PNB163V3, /* ECCurve_X9_62_CHAR2_PNB163V3 */ ++ &ecCurve_X9_62_CHAR2_PNB176V1, /* ECCurve_X9_62_CHAR2_PNB176V1 */ ++ &ecCurve_X9_62_CHAR2_TNB191V1, /* ECCurve_X9_62_CHAR2_TNB191V1 */ ++ &ecCurve_X9_62_CHAR2_TNB191V2, /* ECCurve_X9_62_CHAR2_TNB191V2 */ ++ &ecCurve_X9_62_CHAR2_TNB191V3, /* ECCurve_X9_62_CHAR2_TNB191V3 */ ++ &ecCurve_X9_62_CHAR2_PNB208W1, /* ECCurve_X9_62_CHAR2_PNB208W1 */ ++ &ecCurve_X9_62_CHAR2_TNB239V1, /* ECCurve_X9_62_CHAR2_TNB239V1 */ ++ &ecCurve_X9_62_CHAR2_TNB239V2, /* ECCurve_X9_62_CHAR2_TNB239V2 */ ++ &ecCurve_X9_62_CHAR2_TNB239V3, /* ECCurve_X9_62_CHAR2_TNB239V3 */ ++ &ecCurve_X9_62_CHAR2_PNB272W1, /* ECCurve_X9_62_CHAR2_PNB272W1 */ ++ &ecCurve_X9_62_CHAR2_PNB304W1, /* ECCurve_X9_62_CHAR2_PNB304W1 */ ++ &ecCurve_X9_62_CHAR2_TNB359V1, /* ECCurve_X9_62_CHAR2_TNB359V1 */ ++ &ecCurve_X9_62_CHAR2_PNB368W1, /* ECCurve_X9_62_CHAR2_PNB368W1 */ ++ &ecCurve_X9_62_CHAR2_TNB431R1, /* ECCurve_X9_62_CHAR2_TNB431R1 */ ++ &ecCurve_SECG_PRIME_112R1, /* ECCurve_SECG_PRIME_112R1 */ ++ &ecCurve_SECG_PRIME_112R2, /* ECCurve_SECG_PRIME_112R2 */ ++ &ecCurve_SECG_PRIME_128R1, /* ECCurve_SECG_PRIME_128R1 */ ++ &ecCurve_SECG_PRIME_128R2, /* ECCurve_SECG_PRIME_128R2 */ ++ &ecCurve_SECG_PRIME_160K1, /* ECCurve_SECG_PRIME_160K1 */ ++ &ecCurve_SECG_PRIME_160R1, /* ECCurve_SECG_PRIME_160R1 */ ++ &ecCurve_SECG_PRIME_160R2, /* ECCurve_SECG_PRIME_160R2 */ ++ &ecCurve_SECG_PRIME_192K1, /* ECCurve_SECG_PRIME_192K1 */ ++ &ecCurve_SECG_PRIME_224K1, /* ECCurve_SECG_PRIME_224K1 */ + &ecCurve_SECG_PRIME_256K1, /* ECCurve_SECG_PRIME_256K1 */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ +- NULL, /* ECCurve_noName */ ++ &ecCurve_SECG_CHAR2_113R1, /* ECCurve_SECG_CHAR2_113R1 */ ++ &ecCurve_SECG_CHAR2_113R2, /* ECCurve_SECG_CHAR2_113R2 */ ++ &ecCurve_SECG_CHAR2_131R1, /* ECCurve_SECG_CHAR2_131R1 */ ++ &ecCurve_SECG_CHAR2_131R2, /* ECCurve_SECG_CHAR2_131R2 */ ++ &ecCurve_SECG_CHAR2_163R1, /* ECCurve_SECG_CHAR2_163R1 */ ++ &ecCurve_SECG_CHAR2_193R1, /* ECCurve_SECG_CHAR2_193R1 */ ++ &ecCurve_SECG_CHAR2_193R2, /* ECCurve_SECG_CHAR2_193R2 */ ++ &ecCurve_SECG_CHAR2_239K1, /* ECCurve_SECG_CHAR2_239K1 */ ++ &ecCurve_WTLS_1, /* ECCurve_WTLS_1 */ ++ &ecCurve_WTLS_8, /* ECCurve_WTLS_8 */ ++ &ecCurve_WTLS_9, /* ECCurve_WTLS_9 */ + NULL /* ECCurve_pastLastCurve */ + }; + +diff -uNr openjdk/jdk/src/share/native/sun/security/ec/impl/ecp_192.c afu8u/jdk/src/share/native/sun/security/ec/impl/ecp_192.c +--- openjdk/jdk/src/share/native/sun/security/ec/impl/ecp_192.c 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/jdk/src/share/native/sun/security/ec/impl/ecp_192.c 2025-05-06 10:53:46.735633728 +0800 +@@ -0,0 +1,517 @@ ++/* ++ * Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved. ++ * Use is subject to license terms. ++ * ++ * This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public License ++ * along with this library; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++/* ********************************************************************* ++ * ++ * The Original Code is the elliptic curve math library for prime field curves. ++ * ++ * The Initial Developer of the Original Code is ++ * Sun Microsystems, Inc. ++ * Portions created by the Initial Developer are Copyright (C) 2003 ++ * the Initial Developer. All Rights Reserved. ++ * ++ * Contributor(s): ++ * Douglas Stebila , Sun Microsystems Laboratories ++ * ++ *********************************************************************** */ ++ ++#include "ecp.h" ++#include "mpi.h" ++#include "mplogic.h" ++#include "mpi-priv.h" ++#ifndef _KERNEL ++#include ++#endif ++ ++#define ECP192_DIGITS ECL_CURVE_DIGITS(192) ++ ++/* Fast modular reduction for p192 = 2^192 - 2^64 - 1. a can be r. Uses ++ * algorithm 7 from Brown, Hankerson, Lopez, Menezes. Software ++ * Implementation of the NIST Elliptic Curves over Prime Fields. */ ++mp_err ++ec_GFp_nistp192_mod(const mp_int *a, mp_int *r, const GFMethod *meth) ++{ ++ mp_err res = MP_OKAY; ++ mp_size a_used = MP_USED(a); ++ mp_digit r3; ++#ifndef MPI_AMD64_ADD ++ mp_digit carry; ++#endif ++#ifdef ECL_THIRTY_TWO_BIT ++ mp_digit a5a = 0, a5b = 0, a4a = 0, a4b = 0, a3a = 0, a3b = 0; ++ mp_digit r0a, r0b, r1a, r1b, r2a, r2b; ++#else ++ mp_digit a5 = 0, a4 = 0, a3 = 0; ++ mp_digit r0, r1, r2; ++#endif ++ ++ /* reduction not needed if a is not larger than field size */ ++ if (a_used < ECP192_DIGITS) { ++ if (a == r) { ++ return MP_OKAY; ++ } ++ return mp_copy(a, r); ++ } ++ ++ /* for polynomials larger than twice the field size, use regular ++ * reduction */ ++ if (a_used > ECP192_DIGITS*2) { ++ MP_CHECKOK(mp_mod(a, &meth->irr, r)); ++ } else { ++ /* copy out upper words of a */ ++ ++#ifdef ECL_THIRTY_TWO_BIT ++ ++ /* in all the math below, ++ * nXb is most signifiant, nXa is least significant */ ++ switch (a_used) { ++ case 12: ++ a5b = MP_DIGIT(a, 11); ++ case 11: ++ a5a = MP_DIGIT(a, 10); ++ case 10: ++ a4b = MP_DIGIT(a, 9); ++ case 9: ++ a4a = MP_DIGIT(a, 8); ++ case 8: ++ a3b = MP_DIGIT(a, 7); ++ case 7: ++ a3a = MP_DIGIT(a, 6); ++ } ++ ++ ++ r2b= MP_DIGIT(a, 5); ++ r2a= MP_DIGIT(a, 4); ++ r1b = MP_DIGIT(a, 3); ++ r1a = MP_DIGIT(a, 2); ++ r0b = MP_DIGIT(a, 1); ++ r0a = MP_DIGIT(a, 0); ++ ++ /* implement r = (a2,a1,a0)+(a5,a5,a5)+(a4,a4,0)+(0,a3,a3) */ ++ MP_ADD_CARRY(r0a, a3a, r0a, 0, carry); ++ MP_ADD_CARRY(r0b, a3b, r0b, carry, carry); ++ MP_ADD_CARRY(r1a, a3a, r1a, carry, carry); ++ MP_ADD_CARRY(r1b, a3b, r1b, carry, carry); ++ MP_ADD_CARRY(r2a, a4a, r2a, carry, carry); ++ MP_ADD_CARRY(r2b, a4b, r2b, carry, carry); ++ r3 = carry; carry = 0; ++ MP_ADD_CARRY(r0a, a5a, r0a, 0, carry); ++ MP_ADD_CARRY(r0b, a5b, r0b, carry, carry); ++ MP_ADD_CARRY(r1a, a5a, r1a, carry, carry); ++ MP_ADD_CARRY(r1b, a5b, r1b, carry, carry); ++ MP_ADD_CARRY(r2a, a5a, r2a, carry, carry); ++ MP_ADD_CARRY(r2b, a5b, r2b, carry, carry); ++ r3 += carry; ++ MP_ADD_CARRY(r1a, a4a, r1a, 0, carry); ++ MP_ADD_CARRY(r1b, a4b, r1b, carry, carry); ++ MP_ADD_CARRY(r2a, 0, r2a, carry, carry); ++ MP_ADD_CARRY(r2b, 0, r2b, carry, carry); ++ r3 += carry; ++ ++ /* reduce out the carry */ ++ while (r3) { ++ MP_ADD_CARRY(r0a, r3, r0a, 0, carry); ++ MP_ADD_CARRY(r0b, 0, r0b, carry, carry); ++ MP_ADD_CARRY(r1a, r3, r1a, carry, carry); ++ MP_ADD_CARRY(r1b, 0, r1b, carry, carry); ++ MP_ADD_CARRY(r2a, 0, r2a, carry, carry); ++ MP_ADD_CARRY(r2b, 0, r2b, carry, carry); ++ r3 = carry; ++ } ++ ++ /* check for final reduction */ ++ /* ++ * our field is 0xffffffffffffffff, 0xfffffffffffffffe, ++ * 0xffffffffffffffff. That means we can only be over and need ++ * one more reduction ++ * if r2 == 0xffffffffffffffffff (same as r2+1 == 0) ++ * and ++ * r1 == 0xffffffffffffffffff or ++ * r1 == 0xfffffffffffffffffe and r0 = 0xfffffffffffffffff ++ * In all cases, we subtract the field (or add the 2's ++ * complement value (1,1,0)). (r0, r1, r2) ++ */ ++ if (((r2b == 0xffffffff) && (r2a == 0xffffffff) ++ && (r1b == 0xffffffff) ) && ++ ((r1a == 0xffffffff) || ++ (r1a == 0xfffffffe) && (r0a == 0xffffffff) && ++ (r0b == 0xffffffff)) ) { ++ /* do a quick subtract */ ++ MP_ADD_CARRY(r0a, 1, r0a, 0, carry); ++ r0b += carry; ++ r1a = r1b = r2a = r2b = 0; ++ } ++ ++ /* set the lower words of r */ ++ if (a != r) { ++ MP_CHECKOK(s_mp_pad(r, 6)); ++ } ++ MP_DIGIT(r, 5) = r2b; ++ MP_DIGIT(r, 4) = r2a; ++ MP_DIGIT(r, 3) = r1b; ++ MP_DIGIT(r, 2) = r1a; ++ MP_DIGIT(r, 1) = r0b; ++ MP_DIGIT(r, 0) = r0a; ++ MP_USED(r) = 6; ++#else ++ switch (a_used) { ++ case 6: ++ a5 = MP_DIGIT(a, 5); ++ case 5: ++ a4 = MP_DIGIT(a, 4); ++ case 4: ++ a3 = MP_DIGIT(a, 3); ++ } ++ ++ r2 = MP_DIGIT(a, 2); ++ r1 = MP_DIGIT(a, 1); ++ r0 = MP_DIGIT(a, 0); ++ ++ /* implement r = (a2,a1,a0)+(a5,a5,a5)+(a4,a4,0)+(0,a3,a3) */ ++#ifndef MPI_AMD64_ADD ++ MP_ADD_CARRY_ZERO(r0, a3, r0, carry); ++ MP_ADD_CARRY(r1, a3, r1, carry, carry); ++ MP_ADD_CARRY(r2, a4, r2, carry, carry); ++ r3 = carry; ++ MP_ADD_CARRY_ZERO(r0, a5, r0, carry); ++ MP_ADD_CARRY(r1, a5, r1, carry, carry); ++ MP_ADD_CARRY(r2, a5, r2, carry, carry); ++ r3 += carry; ++ MP_ADD_CARRY_ZERO(r1, a4, r1, carry); ++ MP_ADD_CARRY(r2, 0, r2, carry, carry); ++ r3 += carry; ++ ++#else ++ r2 = MP_DIGIT(a, 2); ++ r1 = MP_DIGIT(a, 1); ++ r0 = MP_DIGIT(a, 0); ++ ++ /* set the lower words of r */ ++ __asm__ ( ++ "xorq %3,%3 \n\t" ++ "addq %4,%0 \n\t" ++ "adcq %4,%1 \n\t" ++ "adcq %5,%2 \n\t" ++ "adcq $0,%3 \n\t" ++ "addq %6,%0 \n\t" ++ "adcq %6,%1 \n\t" ++ "adcq %6,%2 \n\t" ++ "adcq $0,%3 \n\t" ++ "addq %5,%1 \n\t" ++ "adcq $0,%2 \n\t" ++ "adcq $0,%3 \n\t" ++ : "=r"(r0), "=r"(r1), "=r"(r2), "=r"(r3), "=r"(a3), ++ "=r"(a4), "=r"(a5) ++ : "0" (r0), "1" (r1), "2" (r2), "3" (r3), ++ "4" (a3), "5" (a4), "6"(a5) ++ : "%cc" ); ++#endif ++ ++ /* reduce out the carry */ ++ while (r3) { ++#ifndef MPI_AMD64_ADD ++ MP_ADD_CARRY_ZERO(r0, r3, r0, carry); ++ MP_ADD_CARRY(r1, r3, r1, carry, carry); ++ MP_ADD_CARRY(r2, 0, r2, carry, carry); ++ r3 = carry; ++#else ++ a3=r3; ++ __asm__ ( ++ "xorq %3,%3 \n\t" ++ "addq %4,%0 \n\t" ++ "adcq %4,%1 \n\t" ++ "adcq $0,%2 \n\t" ++ "adcq $0,%3 \n\t" ++ : "=r"(r0), "=r"(r1), "=r"(r2), "=r"(r3), "=r"(a3) ++ : "0" (r0), "1" (r1), "2" (r2), "3" (r3), "4"(a3) ++ : "%cc" ); ++#endif ++ } ++ ++ /* check for final reduction */ ++ /* ++ * our field is 0xffffffffffffffff, 0xfffffffffffffffe, ++ * 0xffffffffffffffff. That means we can only be over and need ++ * one more reduction ++ * if r2 == 0xffffffffffffffffff (same as r2+1 == 0) ++ * and ++ * r1 == 0xffffffffffffffffff or ++ * r1 == 0xfffffffffffffffffe and r0 = 0xfffffffffffffffff ++ * In all cases, we subtract the field (or add the 2's ++ * complement value (1,1,0)). (r0, r1, r2) ++ */ ++ if (r3 || ((r2 == MP_DIGIT_MAX) && ++ ((r1 == MP_DIGIT_MAX) || ++ ((r1 == (MP_DIGIT_MAX-1)) && (r0 == MP_DIGIT_MAX))))) { ++ /* do a quick subtract */ ++ r0++; ++ r1 = r2 = 0; ++ } ++ /* set the lower words of r */ ++ if (a != r) { ++ MP_CHECKOK(s_mp_pad(r, 3)); ++ } ++ MP_DIGIT(r, 2) = r2; ++ MP_DIGIT(r, 1) = r1; ++ MP_DIGIT(r, 0) = r0; ++ MP_USED(r) = 3; ++#endif ++ } ++ ++ CLEANUP: ++ return res; ++} ++ ++#ifndef ECL_THIRTY_TWO_BIT ++/* Compute the sum of 192 bit curves. Do the work in-line since the ++ * number of words are so small, we don't want to overhead of mp function ++ * calls. Uses optimized modular reduction for p192. ++ */ ++mp_err ++ec_GFp_nistp192_add(const mp_int *a, const mp_int *b, mp_int *r, ++ const GFMethod *meth) ++{ ++ mp_err res = MP_OKAY; ++ mp_digit a0 = 0, a1 = 0, a2 = 0; ++ mp_digit r0 = 0, r1 = 0, r2 = 0; ++ mp_digit carry; ++ ++ switch(MP_USED(a)) { ++ case 3: ++ a2 = MP_DIGIT(a,2); ++ case 2: ++ a1 = MP_DIGIT(a,1); ++ case 1: ++ a0 = MP_DIGIT(a,0); ++ } ++ switch(MP_USED(b)) { ++ case 3: ++ r2 = MP_DIGIT(b,2); ++ case 2: ++ r1 = MP_DIGIT(b,1); ++ case 1: ++ r0 = MP_DIGIT(b,0); ++ } ++ ++#ifndef MPI_AMD64_ADD ++ MP_ADD_CARRY_ZERO(a0, r0, r0, carry); ++ MP_ADD_CARRY(a1, r1, r1, carry, carry); ++ MP_ADD_CARRY(a2, r2, r2, carry, carry); ++#else ++ __asm__ ( ++ "xorq %3,%3 \n\t" ++ "addq %4,%0 \n\t" ++ "adcq %5,%1 \n\t" ++ "adcq %6,%2 \n\t" ++ "adcq $0,%3 \n\t" ++ : "=r"(r0), "=r"(r1), "=r"(r2), "=r"(carry) ++ : "r" (a0), "r" (a1), "r" (a2), "0" (r0), ++ "1" (r1), "2" (r2) ++ : "%cc" ); ++#endif ++ ++ /* Do quick 'subract' if we've gone over ++ * (add the 2's complement of the curve field) */ ++ if (carry || ((r2 == MP_DIGIT_MAX) && ++ ((r1 == MP_DIGIT_MAX) || ++ ((r1 == (MP_DIGIT_MAX-1)) && (r0 == MP_DIGIT_MAX))))) { ++#ifndef MPI_AMD64_ADD ++ MP_ADD_CARRY_ZERO(r0, 1, r0, carry); ++ MP_ADD_CARRY(r1, 1, r1, carry, carry); ++ MP_ADD_CARRY(r2, 0, r2, carry, carry); ++#else ++ __asm__ ( ++ "addq $1,%0 \n\t" ++ "adcq $1,%1 \n\t" ++ "adcq $0,%2 \n\t" ++ : "=r"(r0), "=r"(r1), "=r"(r2) ++ : "0" (r0), "1" (r1), "2" (r2) ++ : "%cc" ); ++#endif ++ } ++ ++ ++ MP_CHECKOK(s_mp_pad(r, 3)); ++ MP_DIGIT(r, 2) = r2; ++ MP_DIGIT(r, 1) = r1; ++ MP_DIGIT(r, 0) = r0; ++ MP_SIGN(r) = MP_ZPOS; ++ MP_USED(r) = 3; ++ s_mp_clamp(r); ++ ++ ++ CLEANUP: ++ return res; ++} ++ ++/* Compute the diff of 192 bit curves. Do the work in-line since the ++ * number of words are so small, we don't want to overhead of mp function ++ * calls. Uses optimized modular reduction for p192. ++ */ ++mp_err ++ec_GFp_nistp192_sub(const mp_int *a, const mp_int *b, mp_int *r, ++ const GFMethod *meth) ++{ ++ mp_err res = MP_OKAY; ++ mp_digit b0 = 0, b1 = 0, b2 = 0; ++ mp_digit r0 = 0, r1 = 0, r2 = 0; ++ mp_digit borrow; ++ ++ switch(MP_USED(a)) { ++ case 3: ++ r2 = MP_DIGIT(a,2); ++ case 2: ++ r1 = MP_DIGIT(a,1); ++ case 1: ++ r0 = MP_DIGIT(a,0); ++ } ++ ++ switch(MP_USED(b)) { ++ case 3: ++ b2 = MP_DIGIT(b,2); ++ case 2: ++ b1 = MP_DIGIT(b,1); ++ case 1: ++ b0 = MP_DIGIT(b,0); ++ } ++ ++#ifndef MPI_AMD64_ADD ++ MP_SUB_BORROW(r0, b0, r0, 0, borrow); ++ MP_SUB_BORROW(r1, b1, r1, borrow, borrow); ++ MP_SUB_BORROW(r2, b2, r2, borrow, borrow); ++#else ++ __asm__ ( ++ "xorq %3,%3 \n\t" ++ "subq %4,%0 \n\t" ++ "sbbq %5,%1 \n\t" ++ "sbbq %6,%2 \n\t" ++ "adcq $0,%3 \n\t" ++ : "=r"(r0), "=r"(r1), "=r"(r2), "=r"(borrow) ++ : "r" (b0), "r" (b1), "r" (b2), "0" (r0), ++ "1" (r1), "2" (r2) ++ : "%cc" ); ++#endif ++ ++ /* Do quick 'add' if we've gone under 0 ++ * (subtract the 2's complement of the curve field) */ ++ if (borrow) { ++#ifndef MPI_AMD64_ADD ++ MP_SUB_BORROW(r0, 1, r0, 0, borrow); ++ MP_SUB_BORROW(r1, 1, r1, borrow, borrow); ++ MP_SUB_BORROW(r2, 0, r2, borrow, borrow); ++#else ++ __asm__ ( ++ "subq $1,%0 \n\t" ++ "sbbq $1,%1 \n\t" ++ "sbbq $0,%2 \n\t" ++ : "=r"(r0), "=r"(r1), "=r"(r2) ++ : "0" (r0), "1" (r1), "2" (r2) ++ : "%cc" ); ++#endif ++ } ++ ++ MP_CHECKOK(s_mp_pad(r, 3)); ++ MP_DIGIT(r, 2) = r2; ++ MP_DIGIT(r, 1) = r1; ++ MP_DIGIT(r, 0) = r0; ++ MP_SIGN(r) = MP_ZPOS; ++ MP_USED(r) = 3; ++ s_mp_clamp(r); ++ ++ CLEANUP: ++ return res; ++} ++ ++#endif ++ ++/* Compute the square of polynomial a, reduce modulo p192. Store the ++ * result in r. r could be a. Uses optimized modular reduction for p192. ++ */ ++mp_err ++ec_GFp_nistp192_sqr(const mp_int *a, mp_int *r, const GFMethod *meth) ++{ ++ mp_err res = MP_OKAY; ++ ++ MP_CHECKOK(mp_sqr(a, r)); ++ MP_CHECKOK(ec_GFp_nistp192_mod(r, r, meth)); ++ CLEANUP: ++ return res; ++} ++ ++/* Compute the product of two polynomials a and b, reduce modulo p192. ++ * Store the result in r. r could be a or b; a could be b. Uses ++ * optimized modular reduction for p192. */ ++mp_err ++ec_GFp_nistp192_mul(const mp_int *a, const mp_int *b, mp_int *r, ++ const GFMethod *meth) ++{ ++ mp_err res = MP_OKAY; ++ ++ MP_CHECKOK(mp_mul(a, b, r)); ++ MP_CHECKOK(ec_GFp_nistp192_mod(r, r, meth)); ++ CLEANUP: ++ return res; ++} ++ ++/* Divides two field elements. If a is NULL, then returns the inverse of ++ * b. */ ++mp_err ++ec_GFp_nistp192_div(const mp_int *a, const mp_int *b, mp_int *r, ++ const GFMethod *meth) ++{ ++ mp_err res = MP_OKAY; ++ mp_int t; ++ ++ /* If a is NULL, then return the inverse of b, otherwise return a/b. */ ++ if (a == NULL) { ++ return mp_invmod(b, &meth->irr, r); ++ } else { ++ /* MPI doesn't support divmod, so we implement it using invmod and ++ * mulmod. */ ++ MP_CHECKOK(mp_init(&t, FLAG(b))); ++ MP_CHECKOK(mp_invmod(b, &meth->irr, &t)); ++ MP_CHECKOK(mp_mul(a, &t, r)); ++ MP_CHECKOK(ec_GFp_nistp192_mod(r, r, meth)); ++ CLEANUP: ++ mp_clear(&t); ++ return res; ++ } ++} ++ ++/* Wire in fast field arithmetic and precomputation of base point for ++ * named curves. */ ++mp_err ++ec_group_set_gfp192(ECGroup *group, ECCurveName name) ++{ ++ if (name == ECCurve_NIST_P192) { ++ group->meth->field_mod = &ec_GFp_nistp192_mod; ++ group->meth->field_mul = &ec_GFp_nistp192_mul; ++ group->meth->field_sqr = &ec_GFp_nistp192_sqr; ++ group->meth->field_div = &ec_GFp_nistp192_div; ++#ifndef ECL_THIRTY_TWO_BIT ++ group->meth->field_add = &ec_GFp_nistp192_add; ++ group->meth->field_sub = &ec_GFp_nistp192_sub; ++#endif ++ } ++ return MP_OKAY; ++} +diff -uNr openjdk/jdk/src/share/native/sun/security/ec/impl/ecp_224.c afu8u/jdk/src/share/native/sun/security/ec/impl/ecp_224.c +--- openjdk/jdk/src/share/native/sun/security/ec/impl/ecp_224.c 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/jdk/src/share/native/sun/security/ec/impl/ecp_224.c 2025-05-06 10:53:46.735633728 +0800 +@@ -0,0 +1,373 @@ ++/* ++ * Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved. ++ * Use is subject to license terms. ++ * ++ * This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public License ++ * along with this library; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++/* ********************************************************************* ++ * ++ * The Original Code is the elliptic curve math library for prime field curves. ++ * ++ * The Initial Developer of the Original Code is ++ * Sun Microsystems, Inc. ++ * Portions created by the Initial Developer are Copyright (C) 2003 ++ * the Initial Developer. All Rights Reserved. ++ * ++ * Contributor(s): ++ * Douglas Stebila , Sun Microsystems Laboratories ++ * ++ *********************************************************************** */ ++ ++#include "ecp.h" ++#include "mpi.h" ++#include "mplogic.h" ++#include "mpi-priv.h" ++#ifndef _KERNEL ++#include ++#endif ++ ++#define ECP224_DIGITS ECL_CURVE_DIGITS(224) ++ ++/* Fast modular reduction for p224 = 2^224 - 2^96 + 1. a can be r. Uses ++ * algorithm 7 from Brown, Hankerson, Lopez, Menezes. Software ++ * Implementation of the NIST Elliptic Curves over Prime Fields. */ ++mp_err ++ec_GFp_nistp224_mod(const mp_int *a, mp_int *r, const GFMethod *meth) ++{ ++ mp_err res = MP_OKAY; ++ mp_size a_used = MP_USED(a); ++ ++ int r3b; ++ mp_digit carry; ++#ifdef ECL_THIRTY_TWO_BIT ++ mp_digit a6a = 0, a6b = 0, ++ a5a = 0, a5b = 0, a4a = 0, a4b = 0, a3a = 0, a3b = 0; ++ mp_digit r0a, r0b, r1a, r1b, r2a, r2b, r3a; ++#else ++ mp_digit a6 = 0, a5 = 0, a4 = 0, a3b = 0, a5a = 0; ++ mp_digit a6b = 0, a6a_a5b = 0, a5b = 0, a5a_a4b = 0, a4a_a3b = 0; ++ mp_digit r0, r1, r2, r3; ++#endif ++ ++ /* reduction not needed if a is not larger than field size */ ++ if (a_used < ECP224_DIGITS) { ++ if (a == r) return MP_OKAY; ++ return mp_copy(a, r); ++ } ++ /* for polynomials larger than twice the field size, use regular ++ * reduction */ ++ if (a_used > ECL_CURVE_DIGITS(224*2)) { ++ MP_CHECKOK(mp_mod(a, &meth->irr, r)); ++ } else { ++#ifdef ECL_THIRTY_TWO_BIT ++ /* copy out upper words of a */ ++ switch (a_used) { ++ case 14: ++ a6b = MP_DIGIT(a, 13); ++ case 13: ++ a6a = MP_DIGIT(a, 12); ++ case 12: ++ a5b = MP_DIGIT(a, 11); ++ case 11: ++ a5a = MP_DIGIT(a, 10); ++ case 10: ++ a4b = MP_DIGIT(a, 9); ++ case 9: ++ a4a = MP_DIGIT(a, 8); ++ case 8: ++ a3b = MP_DIGIT(a, 7); ++ } ++ r3a = MP_DIGIT(a, 6); ++ r2b= MP_DIGIT(a, 5); ++ r2a= MP_DIGIT(a, 4); ++ r1b = MP_DIGIT(a, 3); ++ r1a = MP_DIGIT(a, 2); ++ r0b = MP_DIGIT(a, 1); ++ r0a = MP_DIGIT(a, 0); ++ ++ ++ /* implement r = (a3a,a2,a1,a0) ++ +(a5a, a4,a3b, 0) ++ +( 0, a6,a5b, 0) ++ -( 0 0, 0|a6b, a6a|a5b ) ++ -( a6b, a6a|a5b, a5a|a4b, a4a|a3b ) */ ++ MP_ADD_CARRY (r1b, a3b, r1b, 0, carry); ++ MP_ADD_CARRY (r2a, a4a, r2a, carry, carry); ++ MP_ADD_CARRY (r2b, a4b, r2b, carry, carry); ++ MP_ADD_CARRY (r3a, a5a, r3a, carry, carry); ++ r3b = carry; ++ MP_ADD_CARRY (r1b, a5b, r1b, 0, carry); ++ MP_ADD_CARRY (r2a, a6a, r2a, carry, carry); ++ MP_ADD_CARRY (r2b, a6b, r2b, carry, carry); ++ MP_ADD_CARRY (r3a, 0, r3a, carry, carry); ++ r3b += carry; ++ MP_SUB_BORROW(r0a, a3b, r0a, 0, carry); ++ MP_SUB_BORROW(r0b, a4a, r0b, carry, carry); ++ MP_SUB_BORROW(r1a, a4b, r1a, carry, carry); ++ MP_SUB_BORROW(r1b, a5a, r1b, carry, carry); ++ MP_SUB_BORROW(r2a, a5b, r2a, carry, carry); ++ MP_SUB_BORROW(r2b, a6a, r2b, carry, carry); ++ MP_SUB_BORROW(r3a, a6b, r3a, carry, carry); ++ r3b -= carry; ++ MP_SUB_BORROW(r0a, a5b, r0a, 0, carry); ++ MP_SUB_BORROW(r0b, a6a, r0b, carry, carry); ++ MP_SUB_BORROW(r1a, a6b, r1a, carry, carry); ++ if (carry) { ++ MP_SUB_BORROW(r1b, 0, r1b, carry, carry); ++ MP_SUB_BORROW(r2a, 0, r2a, carry, carry); ++ MP_SUB_BORROW(r2b, 0, r2b, carry, carry); ++ MP_SUB_BORROW(r3a, 0, r3a, carry, carry); ++ r3b -= carry; ++ } ++ ++ while (r3b > 0) { ++ int tmp; ++ MP_ADD_CARRY(r1b, r3b, r1b, 0, carry); ++ if (carry) { ++ MP_ADD_CARRY(r2a, 0, r2a, carry, carry); ++ MP_ADD_CARRY(r2b, 0, r2b, carry, carry); ++ MP_ADD_CARRY(r3a, 0, r3a, carry, carry); ++ } ++ tmp = carry; ++ MP_SUB_BORROW(r0a, r3b, r0a, 0, carry); ++ if (carry) { ++ MP_SUB_BORROW(r0b, 0, r0b, carry, carry); ++ MP_SUB_BORROW(r1a, 0, r1a, carry, carry); ++ MP_SUB_BORROW(r1b, 0, r1b, carry, carry); ++ MP_SUB_BORROW(r2a, 0, r2a, carry, carry); ++ MP_SUB_BORROW(r2b, 0, r2b, carry, carry); ++ MP_SUB_BORROW(r3a, 0, r3a, carry, carry); ++ tmp -= carry; ++ } ++ r3b = tmp; ++ } ++ ++ while (r3b < 0) { ++ mp_digit maxInt = MP_DIGIT_MAX; ++ MP_ADD_CARRY (r0a, 1, r0a, 0, carry); ++ MP_ADD_CARRY (r0b, 0, r0b, carry, carry); ++ MP_ADD_CARRY (r1a, 0, r1a, carry, carry); ++ MP_ADD_CARRY (r1b, maxInt, r1b, carry, carry); ++ MP_ADD_CARRY (r2a, maxInt, r2a, carry, carry); ++ MP_ADD_CARRY (r2b, maxInt, r2b, carry, carry); ++ MP_ADD_CARRY (r3a, maxInt, r3a, carry, carry); ++ r3b += carry; ++ } ++ /* check for final reduction */ ++ /* now the only way we are over is if the top 4 words are all ones */ ++ if ((r3a == MP_DIGIT_MAX) && (r2b == MP_DIGIT_MAX) ++ && (r2a == MP_DIGIT_MAX) && (r1b == MP_DIGIT_MAX) && ++ ((r1a != 0) || (r0b != 0) || (r0a != 0)) ) { ++ /* one last subraction */ ++ MP_SUB_BORROW(r0a, 1, r0a, 0, carry); ++ MP_SUB_BORROW(r0b, 0, r0b, carry, carry); ++ MP_SUB_BORROW(r1a, 0, r1a, carry, carry); ++ r1b = r2a = r2b = r3a = 0; ++ } ++ ++ ++ if (a != r) { ++ MP_CHECKOK(s_mp_pad(r, 7)); ++ } ++ /* set the lower words of r */ ++ MP_SIGN(r) = MP_ZPOS; ++ MP_USED(r) = 7; ++ MP_DIGIT(r, 6) = r3a; ++ MP_DIGIT(r, 5) = r2b; ++ MP_DIGIT(r, 4) = r2a; ++ MP_DIGIT(r, 3) = r1b; ++ MP_DIGIT(r, 2) = r1a; ++ MP_DIGIT(r, 1) = r0b; ++ MP_DIGIT(r, 0) = r0a; ++#else ++ /* copy out upper words of a */ ++ switch (a_used) { ++ case 7: ++ a6 = MP_DIGIT(a, 6); ++ a6b = a6 >> 32; ++ a6a_a5b = a6 << 32; ++ case 6: ++ a5 = MP_DIGIT(a, 5); ++ a5b = a5 >> 32; ++ a6a_a5b |= a5b; ++ a5b = a5b << 32; ++ a5a_a4b = a5 << 32; ++ a5a = a5 & 0xffffffff; ++ case 5: ++ a4 = MP_DIGIT(a, 4); ++ a5a_a4b |= a4 >> 32; ++ a4a_a3b = a4 << 32; ++ case 4: ++ a3b = MP_DIGIT(a, 3) >> 32; ++ a4a_a3b |= a3b; ++ a3b = a3b << 32; ++ } ++ ++ r3 = MP_DIGIT(a, 3) & 0xffffffff; ++ r2 = MP_DIGIT(a, 2); ++ r1 = MP_DIGIT(a, 1); ++ r0 = MP_DIGIT(a, 0); ++ ++ /* implement r = (a3a,a2,a1,a0) ++ +(a5a, a4,a3b, 0) ++ +( 0, a6,a5b, 0) ++ -( 0 0, 0|a6b, a6a|a5b ) ++ -( a6b, a6a|a5b, a5a|a4b, a4a|a3b ) */ ++ MP_ADD_CARRY_ZERO (r1, a3b, r1, carry); ++ MP_ADD_CARRY (r2, a4 , r2, carry, carry); ++ MP_ADD_CARRY (r3, a5a, r3, carry, carry); ++ MP_ADD_CARRY_ZERO (r1, a5b, r1, carry); ++ MP_ADD_CARRY (r2, a6 , r2, carry, carry); ++ MP_ADD_CARRY (r3, 0, r3, carry, carry); ++ ++ MP_SUB_BORROW(r0, a4a_a3b, r0, 0, carry); ++ MP_SUB_BORROW(r1, a5a_a4b, r1, carry, carry); ++ MP_SUB_BORROW(r2, a6a_a5b, r2, carry, carry); ++ MP_SUB_BORROW(r3, a6b , r3, carry, carry); ++ MP_SUB_BORROW(r0, a6a_a5b, r0, 0, carry); ++ MP_SUB_BORROW(r1, a6b , r1, carry, carry); ++ if (carry) { ++ MP_SUB_BORROW(r2, 0, r2, carry, carry); ++ MP_SUB_BORROW(r3, 0, r3, carry, carry); ++ } ++ ++ ++ /* if the value is negative, r3 has a 2's complement ++ * high value */ ++ r3b = (int)(r3 >>32); ++ while (r3b > 0) { ++ r3 &= 0xffffffff; ++ MP_ADD_CARRY_ZERO(r1,((mp_digit)r3b) << 32, r1, carry); ++ if (carry) { ++ MP_ADD_CARRY(r2, 0, r2, carry, carry); ++ MP_ADD_CARRY(r3, 0, r3, carry, carry); ++ } ++ MP_SUB_BORROW(r0, r3b, r0, 0, carry); ++ if (carry) { ++ MP_SUB_BORROW(r1, 0, r1, carry, carry); ++ MP_SUB_BORROW(r2, 0, r2, carry, carry); ++ MP_SUB_BORROW(r3, 0, r3, carry, carry); ++ } ++ r3b = (int)(r3 >>32); ++ } ++ ++ while (r3b < 0) { ++ MP_ADD_CARRY_ZERO (r0, 1, r0, carry); ++ MP_ADD_CARRY (r1, MP_DIGIT_MAX <<32, r1, carry, carry); ++ MP_ADD_CARRY (r2, MP_DIGIT_MAX, r2, carry, carry); ++ MP_ADD_CARRY (r3, MP_DIGIT_MAX >> 32, r3, carry, carry); ++ r3b = (int)(r3 >>32); ++ } ++ /* check for final reduction */ ++ /* now the only way we are over is if the top 4 words are all ones */ ++ if ((r3 == (MP_DIGIT_MAX >> 32)) && (r2 == MP_DIGIT_MAX) ++ && ((r1 & MP_DIGIT_MAX << 32)== MP_DIGIT_MAX << 32) && ++ ((r1 != MP_DIGIT_MAX << 32 ) || (r0 != 0)) ) { ++ /* one last subraction */ ++ MP_SUB_BORROW(r0, 1, r0, 0, carry); ++ MP_SUB_BORROW(r1, 0, r1, carry, carry); ++ r2 = r3 = 0; ++ } ++ ++ ++ if (a != r) { ++ MP_CHECKOK(s_mp_pad(r, 4)); ++ } ++ /* set the lower words of r */ ++ MP_SIGN(r) = MP_ZPOS; ++ MP_USED(r) = 4; ++ MP_DIGIT(r, 3) = r3; ++ MP_DIGIT(r, 2) = r2; ++ MP_DIGIT(r, 1) = r1; ++ MP_DIGIT(r, 0) = r0; ++#endif ++ } ++ ++ CLEANUP: ++ return res; ++} ++ ++/* Compute the square of polynomial a, reduce modulo p224. Store the ++ * result in r. r could be a. Uses optimized modular reduction for p224. ++ */ ++mp_err ++ec_GFp_nistp224_sqr(const mp_int *a, mp_int *r, const GFMethod *meth) ++{ ++ mp_err res = MP_OKAY; ++ ++ MP_CHECKOK(mp_sqr(a, r)); ++ MP_CHECKOK(ec_GFp_nistp224_mod(r, r, meth)); ++ CLEANUP: ++ return res; ++} ++ ++/* Compute the product of two polynomials a and b, reduce modulo p224. ++ * Store the result in r. r could be a or b; a could be b. Uses ++ * optimized modular reduction for p224. */ ++mp_err ++ec_GFp_nistp224_mul(const mp_int *a, const mp_int *b, mp_int *r, ++ const GFMethod *meth) ++{ ++ mp_err res = MP_OKAY; ++ ++ MP_CHECKOK(mp_mul(a, b, r)); ++ MP_CHECKOK(ec_GFp_nistp224_mod(r, r, meth)); ++ CLEANUP: ++ return res; ++} ++ ++/* Divides two field elements. If a is NULL, then returns the inverse of ++ * b. */ ++mp_err ++ec_GFp_nistp224_div(const mp_int *a, const mp_int *b, mp_int *r, ++ const GFMethod *meth) ++{ ++ mp_err res = MP_OKAY; ++ mp_int t; ++ ++ /* If a is NULL, then return the inverse of b, otherwise return a/b. */ ++ if (a == NULL) { ++ return mp_invmod(b, &meth->irr, r); ++ } else { ++ /* MPI doesn't support divmod, so we implement it using invmod and ++ * mulmod. */ ++ MP_CHECKOK(mp_init(&t, FLAG(b))); ++ MP_CHECKOK(mp_invmod(b, &meth->irr, &t)); ++ MP_CHECKOK(mp_mul(a, &t, r)); ++ MP_CHECKOK(ec_GFp_nistp224_mod(r, r, meth)); ++ CLEANUP: ++ mp_clear(&t); ++ return res; ++ } ++} ++ ++/* Wire in fast field arithmetic and precomputation of base point for ++ * named curves. */ ++mp_err ++ec_group_set_gfp224(ECGroup *group, ECCurveName name) ++{ ++ if (name == ECCurve_NIST_P224) { ++ group->meth->field_mod = &ec_GFp_nistp224_mod; ++ group->meth->field_mul = &ec_GFp_nistp224_mul; ++ group->meth->field_sqr = &ec_GFp_nistp224_sqr; ++ group->meth->field_div = &ec_GFp_nistp224_div; ++ } ++ return MP_OKAY; ++} +diff -uNr openjdk/jdk/src/share/native/sun/security/ec/impl/oid.c afu8u/jdk/src/share/native/sun/security/ec/impl/oid.c +--- openjdk/jdk/src/share/native/sun/security/ec/impl/oid.c 2023-04-19 05:53:10.000000000 +0800 ++++ afu8u/jdk/src/share/native/sun/security/ec/impl/oid.c 2025-05-06 10:53:46.735633728 +0800 +@@ -69,13 +69,71 @@ + /* NOTE: prime192v1 is the same as secp192r1, prime256v1 is the + * same as secp256r1 + */ ++CONST_OID ansiX962prime192v1[] = { ANSI_X962_GFp_OID, 0x01 }; ++CONST_OID ansiX962prime192v2[] = { ANSI_X962_GFp_OID, 0x02 }; ++CONST_OID ansiX962prime192v3[] = { ANSI_X962_GFp_OID, 0x03 }; ++CONST_OID ansiX962prime239v1[] = { ANSI_X962_GFp_OID, 0x04 }; ++CONST_OID ansiX962prime239v2[] = { ANSI_X962_GFp_OID, 0x05 }; ++CONST_OID ansiX962prime239v3[] = { ANSI_X962_GFp_OID, 0x06 }; + CONST_OID ansiX962prime256v1[] = { ANSI_X962_GFp_OID, 0x07 }; + + /* SECG prime curve OIDs */ ++CONST_OID secgECsecp112r1[] = { SECG_OID, 0x06 }; ++CONST_OID secgECsecp112r2[] = { SECG_OID, 0x07 }; ++CONST_OID secgECsecp128r1[] = { SECG_OID, 0x1c }; ++CONST_OID secgECsecp128r2[] = { SECG_OID, 0x1d }; ++CONST_OID secgECsecp160k1[] = { SECG_OID, 0x09 }; ++CONST_OID secgECsecp160r1[] = { SECG_OID, 0x08 }; ++CONST_OID secgECsecp160r2[] = { SECG_OID, 0x1e }; ++CONST_OID secgECsecp192k1[] = { SECG_OID, 0x1f }; ++CONST_OID secgECsecp224k1[] = { SECG_OID, 0x20 }; ++CONST_OID secgECsecp224r1[] = { SECG_OID, 0x21 }; + CONST_OID secgECsecp256k1[] = { SECG_OID, 0x0a }; + CONST_OID secgECsecp384r1[] = { SECG_OID, 0x22 }; + CONST_OID secgECsecp521r1[] = { SECG_OID, 0x23 }; + ++/* SECG characterisitic two curve OIDs */ ++CONST_OID secgECsect113r1[] = {SECG_OID, 0x04 }; ++CONST_OID secgECsect113r2[] = {SECG_OID, 0x05 }; ++CONST_OID secgECsect131r1[] = {SECG_OID, 0x16 }; ++CONST_OID secgECsect131r2[] = {SECG_OID, 0x17 }; ++CONST_OID secgECsect163k1[] = {SECG_OID, 0x01 }; ++CONST_OID secgECsect163r1[] = {SECG_OID, 0x02 }; ++CONST_OID secgECsect163r2[] = {SECG_OID, 0x0f }; ++CONST_OID secgECsect193r1[] = {SECG_OID, 0x18 }; ++CONST_OID secgECsect193r2[] = {SECG_OID, 0x19 }; ++CONST_OID secgECsect233k1[] = {SECG_OID, 0x1a }; ++CONST_OID secgECsect233r1[] = {SECG_OID, 0x1b }; ++CONST_OID secgECsect239k1[] = {SECG_OID, 0x03 }; ++CONST_OID secgECsect283k1[] = {SECG_OID, 0x10 }; ++CONST_OID secgECsect283r1[] = {SECG_OID, 0x11 }; ++CONST_OID secgECsect409k1[] = {SECG_OID, 0x24 }; ++CONST_OID secgECsect409r1[] = {SECG_OID, 0x25 }; ++CONST_OID secgECsect571k1[] = {SECG_OID, 0x26 }; ++CONST_OID secgECsect571r1[] = {SECG_OID, 0x27 }; ++ ++/* ANSI X9.62 characteristic two curve OIDs */ ++CONST_OID ansiX962c2pnb163v1[] = { ANSI_X962_GF2m_OID, 0x01 }; ++CONST_OID ansiX962c2pnb163v2[] = { ANSI_X962_GF2m_OID, 0x02 }; ++CONST_OID ansiX962c2pnb163v3[] = { ANSI_X962_GF2m_OID, 0x03 }; ++CONST_OID ansiX962c2pnb176v1[] = { ANSI_X962_GF2m_OID, 0x04 }; ++CONST_OID ansiX962c2tnb191v1[] = { ANSI_X962_GF2m_OID, 0x05 }; ++CONST_OID ansiX962c2tnb191v2[] = { ANSI_X962_GF2m_OID, 0x06 }; ++CONST_OID ansiX962c2tnb191v3[] = { ANSI_X962_GF2m_OID, 0x07 }; ++CONST_OID ansiX962c2onb191v4[] = { ANSI_X962_GF2m_OID, 0x08 }; ++CONST_OID ansiX962c2onb191v5[] = { ANSI_X962_GF2m_OID, 0x09 }; ++CONST_OID ansiX962c2pnb208w1[] = { ANSI_X962_GF2m_OID, 0x0a }; ++CONST_OID ansiX962c2tnb239v1[] = { ANSI_X962_GF2m_OID, 0x0b }; ++CONST_OID ansiX962c2tnb239v2[] = { ANSI_X962_GF2m_OID, 0x0c }; ++CONST_OID ansiX962c2tnb239v3[] = { ANSI_X962_GF2m_OID, 0x0d }; ++CONST_OID ansiX962c2onb239v4[] = { ANSI_X962_GF2m_OID, 0x0e }; ++CONST_OID ansiX962c2onb239v5[] = { ANSI_X962_GF2m_OID, 0x0f }; ++CONST_OID ansiX962c2pnb272w1[] = { ANSI_X962_GF2m_OID, 0x10 }; ++CONST_OID ansiX962c2pnb304w1[] = { ANSI_X962_GF2m_OID, 0x11 }; ++CONST_OID ansiX962c2tnb359v1[] = { ANSI_X962_GF2m_OID, 0x12 }; ++CONST_OID ansiX962c2pnb368w1[] = { ANSI_X962_GF2m_OID, 0x13 }; ++CONST_OID ansiX962c2tnb431r1[] = { ANSI_X962_GF2m_OID, 0x14 }; ++ + #define OI(x) { siDEROID, (unsigned char *)x, sizeof x } + #ifndef SECOID_NO_STRINGS + #define OD(oid,tag,desc,mech,ext) { OI(oid), tag, desc, mech, ext } +@@ -96,18 +154,30 @@ + { { siDEROID, NULL, 0 }, ECCurve_noName, + "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, + +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, ++ OD( ansiX962prime192v1, ECCurve_NIST_P192, ++ "ANSI X9.62 elliptic curve prime192v1 (aka secp192r1, NIST P-192)", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( ansiX962prime192v2, ECCurve_X9_62_PRIME_192V2, ++ "ANSI X9.62 elliptic curve prime192v2", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( ansiX962prime192v3, ECCurve_X9_62_PRIME_192V3, ++ "ANSI X9.62 elliptic curve prime192v3", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( ansiX962prime239v1, ECCurve_X9_62_PRIME_239V1, ++ "ANSI X9.62 elliptic curve prime239v1", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( ansiX962prime239v2, ECCurve_X9_62_PRIME_239V2, ++ "ANSI X9.62 elliptic curve prime239v2", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( ansiX962prime239v3, ECCurve_X9_62_PRIME_239V3, ++ "ANSI X9.62 elliptic curve prime239v3", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), + OD( ansiX962prime256v1, ECCurve_NIST_P256, + "ANSI X9.62 elliptic curve prime256v1 (aka secp256r1, NIST P-256)", + CKM_INVALID_MECHANISM, +@@ -118,24 +188,42 @@ + { { siDEROID, NULL, 0 }, ECCurve_noName, + "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, + +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, ++ OD( secgECsect163k1, ECCurve_NIST_K163, ++ "SECG elliptic curve sect163k1 (aka NIST K-163)", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( secgECsect163r1, ECCurve_SECG_CHAR2_163R1, ++ "SECG elliptic curve sect163r1", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( secgECsect239k1, ECCurve_SECG_CHAR2_239K1, ++ "SECG elliptic curve sect239k1", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( secgECsect113r1, ECCurve_SECG_CHAR2_113R1, ++ "SECG elliptic curve sect113r1", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( secgECsect113r2, ECCurve_SECG_CHAR2_113R2, ++ "SECG elliptic curve sect113r2", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( secgECsecp112r1, ECCurve_SECG_PRIME_112R1, ++ "SECG elliptic curve secp112r1", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( secgECsecp112r2, ECCurve_SECG_PRIME_112R2, ++ "SECG elliptic curve secp112r2", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( secgECsecp160r1, ECCurve_SECG_PRIME_160R1, ++ "SECG elliptic curve secp160r1", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( secgECsecp160k1, ECCurve_SECG_PRIME_160K1, ++ "SECG elliptic curve secp160k1", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), + OD( secgECsecp256k1, ECCurve_SECG_PRIME_256K1, + "SECG elliptic curve secp256k1", + CKM_INVALID_MECHANISM, +@@ -148,6 +236,18 @@ + "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, + { { siDEROID, NULL, 0 }, ECCurve_noName, + "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, ++ OD( secgECsect163r2, ECCurve_NIST_B163, ++ "SECG elliptic curve sect163r2 (aka NIST B-163)", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( secgECsect283k1, ECCurve_NIST_K283, ++ "SECG elliptic curve sect283k1 (aka NIST K-283)", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( secgECsect283r1, ECCurve_NIST_B283, ++ "SECG elliptic curve sect283r1 (aka NIST B-283)", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), + { { siDEROID, NULL, 0 }, ECCurve_noName, + "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, + { { siDEROID, NULL, 0 }, ECCurve_noName, +@@ -156,36 +256,54 @@ + "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, + { { siDEROID, NULL, 0 }, ECCurve_noName, + "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, ++ OD( secgECsect131r1, ECCurve_SECG_CHAR2_131R1, ++ "SECG elliptic curve sect131r1", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( secgECsect131r2, ECCurve_SECG_CHAR2_131R2, ++ "SECG elliptic curve sect131r2", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( secgECsect193r1, ECCurve_SECG_CHAR2_193R1, ++ "SECG elliptic curve sect193r1", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( secgECsect193r2, ECCurve_SECG_CHAR2_193R2, ++ "SECG elliptic curve sect193r2", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( secgECsect233k1, ECCurve_NIST_K233, ++ "SECG elliptic curve sect233k1 (aka NIST K-233)", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( secgECsect233r1, ECCurve_NIST_B233, ++ "SECG elliptic curve sect233r1 (aka NIST B-233)", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( secgECsecp128r1, ECCurve_SECG_PRIME_128R1, ++ "SECG elliptic curve secp128r1", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( secgECsecp128r2, ECCurve_SECG_PRIME_128R2, ++ "SECG elliptic curve secp128r2", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( secgECsecp160r2, ECCurve_SECG_PRIME_160R2, ++ "SECG elliptic curve secp160r2", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( secgECsecp192k1, ECCurve_SECG_PRIME_192K1, ++ "SECG elliptic curve secp192k1", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( secgECsecp224k1, ECCurve_SECG_PRIME_224K1, ++ "SECG elliptic curve secp224k1", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( secgECsecp224r1, ECCurve_NIST_P224, ++ "SECG elliptic curve secp224r1 (aka NIST P-224)", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), + OD( secgECsecp384r1, ECCurve_NIST_P384, + "SECG elliptic curve secp384r1 (aka NIST P-384)", + CKM_INVALID_MECHANISM, +@@ -194,14 +312,22 @@ + "SECG elliptic curve secp521r1 (aka NIST P-521)", + CKM_INVALID_MECHANISM, + INVALID_CERT_EXTENSION ), +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION } ++ OD( secgECsect409k1, ECCurve_NIST_K409, ++ "SECG elliptic curve sect409k1 (aka NIST K-409)", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( secgECsect409r1, ECCurve_NIST_B409, ++ "SECG elliptic curve sect409r1 (aka NIST B-409)", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( secgECsect571k1, ECCurve_NIST_K571, ++ "SECG elliptic curve sect571k1 (aka NIST K-571)", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( secgECsect571r1, ECCurve_NIST_B571, ++ "SECG elliptic curve sect571r1 (aka NIST B-571)", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ) + }; + + static SECOidData ANSI_oids[] = { +@@ -209,46 +335,78 @@ + "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, + + /* ANSI X9.62 named elliptic curves (characteristic two field) */ ++ OD( ansiX962c2pnb163v1, ECCurve_X9_62_CHAR2_PNB163V1, ++ "ANSI X9.62 elliptic curve c2pnb163v1", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( ansiX962c2pnb163v2, ECCurve_X9_62_CHAR2_PNB163V2, ++ "ANSI X9.62 elliptic curve c2pnb163v2", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( ansiX962c2pnb163v3, ECCurve_X9_62_CHAR2_PNB163V3, ++ "ANSI X9.62 elliptic curve c2pnb163v3", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( ansiX962c2pnb176v1, ECCurve_X9_62_CHAR2_PNB176V1, ++ "ANSI X9.62 elliptic curve c2pnb176v1", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( ansiX962c2tnb191v1, ECCurve_X9_62_CHAR2_TNB191V1, ++ "ANSI X9.62 elliptic curve c2tnb191v1", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( ansiX962c2tnb191v2, ECCurve_X9_62_CHAR2_TNB191V2, ++ "ANSI X9.62 elliptic curve c2tnb191v2", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( ansiX962c2tnb191v3, ECCurve_X9_62_CHAR2_TNB191V3, ++ "ANSI X9.62 elliptic curve c2tnb191v3", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), + { { siDEROID, NULL, 0 }, ECCurve_noName, + "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, + { { siDEROID, NULL, 0 }, ECCurve_noName, + "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, ++ OD( ansiX962c2pnb208w1, ECCurve_X9_62_CHAR2_PNB208W1, ++ "ANSI X9.62 elliptic curve c2pnb208w1", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( ansiX962c2tnb239v1, ECCurve_X9_62_CHAR2_TNB239V1, ++ "ANSI X9.62 elliptic curve c2tnb239v1", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( ansiX962c2tnb239v2, ECCurve_X9_62_CHAR2_TNB239V2, ++ "ANSI X9.62 elliptic curve c2tnb239v2", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( ansiX962c2tnb239v3, ECCurve_X9_62_CHAR2_TNB239V3, ++ "ANSI X9.62 elliptic curve c2tnb239v3", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), + { { siDEROID, NULL, 0 }, ECCurve_noName, + "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, + { { siDEROID, NULL, 0 }, ECCurve_noName, + "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION }, +- { { siDEROID, NULL, 0 }, ECCurve_noName, +- "Unknown OID", CKM_INVALID_MECHANISM, INVALID_CERT_EXTENSION } ++ OD( ansiX962c2pnb272w1, ECCurve_X9_62_CHAR2_PNB272W1, ++ "ANSI X9.62 elliptic curve c2pnb272w1", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( ansiX962c2pnb304w1, ECCurve_X9_62_CHAR2_PNB304W1, ++ "ANSI X9.62 elliptic curve c2pnb304w1", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( ansiX962c2tnb359v1, ECCurve_X9_62_CHAR2_TNB359V1, ++ "ANSI X9.62 elliptic curve c2tnb359v1", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( ansiX962c2pnb368w1, ECCurve_X9_62_CHAR2_PNB368W1, ++ "ANSI X9.62 elliptic curve c2pnb368w1", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ), ++ OD( ansiX962c2tnb431r1, ECCurve_X9_62_CHAR2_TNB431R1, ++ "ANSI X9.62 elliptic curve c2tnb431r1", ++ CKM_INVALID_MECHANISM, ++ INVALID_CERT_EXTENSION ) + }; + + SECOidData * +diff -uNr openjdk/jdk/src/solaris/bin/sw64/jvm.cfg afu8u/jdk/src/solaris/bin/sw64/jvm.cfg +--- openjdk/jdk/src/solaris/bin/sw64/jvm.cfg 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/jdk/src/solaris/bin/sw64/jvm.cfg 2025-05-06 10:53:46.755633728 +0800 +@@ -0,0 +1,40 @@ ++# Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved. ++# Copyright (c) 2015, 2018, Wuxi Institute of Advanced Technology. All rights reserved. ++# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++# ++# This code is free software; you can redistribute it and/or modify it ++# under the terms of the GNU General Public License version 2 only, as ++# published by the Free Software Foundation. Oracle designates this ++# particular file as subject to the "Classpath" exception as provided ++# by Oracle in the LICENSE file that accompanied this code. ++# ++# This code is distributed in the hope that it will be useful, but WITHOUT ++# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++# version 2 for more details (a copy is included in the LICENSE file that ++# accompanied this code). ++# ++# You should have received a copy of the GNU General Public License version ++# 2 along with this work; if not, write to the Free Software Foundation, ++# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++# ++# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++# or visit www.oracle.com if you need additional information or have any ++# questions. ++# ++# ++# List of JVMs that can be used as an option to java, javac, etc. ++# Order is important -- first in this list is the default JVM. ++# NOTE that this both this file and its format are UNSUPPORTED and ++# WILL GO AWAY in a future release. ++# ++# You may also select a JVM in an arbitrary location with the ++# "-XXaltjvm=" option, but that too is unsupported ++# and may not be available in a future release. ++# ++-server KNOWN ++-client IGNORE ++-hotspot ERROR ++-classic WARN ++-native ERROR ++-green ERROR +diff -uNr openjdk/jdk/src/solaris/classes/java/io/UnixFileSystem.java afu8u/jdk/src/solaris/classes/java/io/UnixFileSystem.java +--- openjdk/jdk/src/solaris/classes/java/io/UnixFileSystem.java 2023-04-19 05:53:05.000000000 +0800 ++++ afu8u/jdk/src/solaris/classes/java/io/UnixFileSystem.java 2025-05-06 11:13:08.391672959 +0800 +@@ -34,6 +34,7 @@ + private final char slash; + private final char colon; + private final String javaHome; ++ private final String userDir; + + public UnixFileSystem() { + slash = AccessController.doPrivileged( +@@ -42,6 +43,8 @@ + new GetPropertyAction("path.separator")).charAt(0); + javaHome = AccessController.doPrivileged( + new GetPropertyAction("java.home")); ++ userDir = AccessController.doPrivileged( ++ new GetPropertyAction("user.dir")); + } + + +@@ -135,7 +138,11 @@ + + public String resolve(File f) { + if (isAbsolute(f)) return f.getPath(); +- return resolve(System.getProperty("user.dir"), f.getPath()); ++ SecurityManager sm = System.getSecurityManager(); ++ if (sm != null) { ++ sm.checkPropertyAccess("user.dir"); ++ } ++ return resolve(userDir, f.getPath()); + } + + // Caches for canonicalization results to improve startup performance. +diff -uNr openjdk/jdk/src/windows/classes/java/io/WinNTFileSystem.java afu8u/jdk/src/windows/classes/java/io/WinNTFileSystem.java +--- openjdk/jdk/src/windows/classes/java/io/WinNTFileSystem.java 2023-04-19 05:53:05.000000000 +0800 ++++ afu8u/jdk/src/windows/classes/java/io/WinNTFileSystem.java 2025-05-06 11:13:08.395672960 +0800 +@@ -53,6 +53,7 @@ + private final char slash; + private final char altSlash; + private final char semicolon; ++ private final String userDir; + + // Whether to enable alternative data streams (ADS) by suppressing + // checking the path for invalid characters, in particular ":". +@@ -74,6 +75,8 @@ + semicolon = AccessController.doPrivileged( + new GetPropertyAction("path.separator")).charAt(0); + altSlash = (this.slash == '\\') ? '/' : '\\'; ++ userDir = AccessController.doPrivileged( ++ new GetPropertyAction("user.dir")); + } + + private boolean isSlash(char c) { +@@ -400,7 +403,11 @@ + private String getUserPath() { + /* For both compatibility and security, + we must look this up every time */ +- return normalize(System.getProperty("user.dir")); ++ SecurityManager sm = System.getSecurityManager(); ++ if (sm != null) { ++ sm.checkPropertyAccess("user.dir"); ++ } ++ return normalize(userDir); + } + + private String getDrive(String path) { +diff -uNr openjdk/jdk/test/java/io/File/UserDirChangedTest.java afu8u/jdk/test/java/io/File/UserDirChangedTest.java +--- openjdk/jdk/test/java/io/File/UserDirChangedTest.java 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/jdk/test/java/io/File/UserDirChangedTest.java 2025-05-06 11:13:08.403672960 +0800 +@@ -0,0 +1,51 @@ ++/* ++ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++/* @test ++ @bug 8194154 ++ @summary Test changing property user.dir on impacting getCanonicalPath ++ @run main/othervm UserDirChangedTest ++ */ ++ ++import java.io.File; ++ ++public class UserDirChangedTest { ++ public static void main(String[] args) throws Exception { ++ String keyUserDir = "user.dir"; ++ String userDirNew = "/home/a/b/c/"; ++ String fileName = "./a"; ++ ++ String userDir = System.getProperty(keyUserDir); ++ File file = new File(fileName); ++ String canFilePath = file.getCanonicalPath(); ++ ++ // now reset user.dir, this will cause crash on linux without bug 8194154 fixed. ++ System.setProperty(keyUserDir, userDirNew); ++ String newCanFilePath = file.getCanonicalPath(); ++ System.out.format("%24s %48s%n", "Canonical Path = ", canFilePath); ++ System.out.format("%24s %48s%n", "new Canonical Path = ", newCanFilePath); ++ if (!canFilePath.equals(newCanFilePath)) { ++ throw new RuntimeException("Changing property user.dir should have no effect on getCanonicalPath"); ++ } ++ } ++} +diff -uNr openjdk/jdk/test/java/io/FileOutputStream/OpenNUL.java afu8u/jdk/test/java/io/FileOutputStream/OpenNUL.java +--- openjdk/jdk/test/java/io/FileOutputStream/OpenNUL.java 2023-04-19 05:53:06.000000000 +0800 ++++ afu8u/jdk/test/java/io/FileOutputStream/OpenNUL.java 2025-05-06 11:13:08.403672960 +0800 +@@ -26,9 +26,7 @@ + * @bug 8285445 + * @requires (os.family == "windows") + * @summary Verify behavior of opening "NUL:" with ADS enabled and disabled. +- * @run main/othervm OpenNUL + * @run main/othervm -Djdk.io.File.enableADS OpenNUL +- * @run main/othervm -Djdk.io.File.enableADS=FalsE OpenNUL + * @run main/othervm -Djdk.io.File.enableADS=true OpenNUL + */ + +@@ -38,7 +36,7 @@ + + public class OpenNUL { + public static void main(String args[]) throws IOException { +- String enableADS = System.getProperty("jdk.io.File.enableADS", "true"); ++ String enableADS = System.getProperty("jdk.io.File.enableADS"); + boolean fails = enableADS.equalsIgnoreCase(Boolean.FALSE.toString()); + + FileOutputStream fos; +diff -uNr openjdk/jdk/test/jdk/jfr/event/os/TestCPUInformation.java afu8u/jdk/test/jdk/jfr/event/os/TestCPUInformation.java +--- openjdk/jdk/test/jdk/jfr/event/os/TestCPUInformation.java 2023-04-19 05:53:06.000000000 +0800 ++++ afu8u/jdk/test/jdk/jfr/event/os/TestCPUInformation.java 2025-05-06 10:53:47.419633751 +0800 +@@ -54,8 +54,8 @@ + Events.assertField(event, "hwThreads").atLeast(1); + Events.assertField(event, "cores").atLeast(1); + Events.assertField(event, "sockets").atLeast(1); +- Events.assertField(event, "cpu").containsAny("Intel", "AMD", "Unknown x86", "sparc", "ARM", "PPC", "PowerPC", "AArch64", "s390"); +- Events.assertField(event, "description").containsAny("Intel", "AMD", "Unknown x86", "SPARC", "ARM", "PPC", "PowerPC", "AArch64", "s390"); ++ Events.assertField(event, "cpu").containsAny("Intel", "AMD", "Unknown x86", "sparc", "ARM", "PPC", "PowerPC", "AArch64", "s390", "sw_64"); ++ Events.assertField(event, "description").containsAny("Intel", "AMD", "Unknown x86", "SPARC", "ARM", "PPC", "PowerPC", "AArch64", "s390", "shenwei"); + } + } + } +diff -uNr openjdk/jdk/test/sun/security/ec/TestEC.java afu8u/jdk/test/sun/security/ec/TestEC.java +--- openjdk/jdk/test/sun/security/ec/TestEC.java 2023-04-19 05:53:10.000000000 +0800 ++++ afu8u/jdk/test/sun/security/ec/TestEC.java 2025-05-06 10:53:47.523633754 +0800 +@@ -36,7 +36,7 @@ + * @library ../../../java/security/testlibrary + * @library ../../../javax/net/ssl/TLSCommon + * @compile -XDignore.symbol.file TestEC.java +- * @run main/othervm -Djdk.tls.namedGroups="secp256r1" TestEC ++ * @run main/othervm -Djdk.tls.namedGroups="secp256r1,sect193r1" TestEC + */ + + import java.security.NoSuchProviderException; +diff -uNr openjdk/jdk/test/sun/security/pkcs11/sslecc/ClientJSSEServerJSSE.java afu8u/jdk/test/sun/security/pkcs11/sslecc/ClientJSSEServerJSSE.java +--- openjdk/jdk/test/sun/security/pkcs11/sslecc/ClientJSSEServerJSSE.java 2023-04-19 05:53:10.000000000 +0800 ++++ afu8u/jdk/test/sun/security/pkcs11/sslecc/ClientJSSEServerJSSE.java 2025-05-06 10:53:47.607633757 +0800 +@@ -33,7 +33,7 @@ + * @author Andreas Sterbenz + * @library /lib .. ../../../../javax/net/ssl/TLSCommon + * @library ../../../../java/security/testlibrary +- * @run main/othervm -Djdk.tls.namedGroups="secp256r1" ++ * @run main/othervm -Djdk.tls.namedGroups="secp256r1,sect193r1" + * ClientJSSEServerJSSE + * @run main/othervm -Djdk.tls.namedGroups="secp256r1,sect193r1" + * ClientJSSEServerJSSE sm policy +diff -uNr openjdk/jdk/THIRD_PARTY_README afu8u/jdk/THIRD_PARTY_README +--- openjdk/jdk/THIRD_PARTY_README 2023-04-19 05:53:04.000000000 +0800 ++++ afu8u/jdk/THIRD_PARTY_README 2025-05-06 10:53:45.631633690 +0800 +@@ -7,7 +7,7 @@ + + --- begin of LICENSE --- + +-Copyright (c) 2000-2011 France Télécom ++Copyright (c) 2000-2011 France T??l??com + All rights reserved. + + Redistribution and use in source and binary forms, with or without +@@ -1035,7 +1035,7 @@ + --- begin of LICENSE --- + + Copyright notice +-Copyright © 2011 Ecma International ++Copyright ?? 2011 Ecma International + Ecma International + Rue du Rhone 114 + CH-1204 Geneva +@@ -2527,16 +2527,16 @@ + Unicode Terms of Use + + For the general privacy policy governing access to this site, see the Unicode +-Privacy Policy. For trademark usage, see the Unicode® Consortium Name and ++Privacy Policy. For trademark usage, see the Unicode?? Consortium Name and + Trademark Usage Policy. + + A. Unicode Copyright. +- 1. Copyright © 1991-2013 Unicode, Inc. All rights reserved. ++ 1. Copyright ?? 1991-2013 Unicode, Inc. All rights reserved. + + 2. Certain documents and files on this website contain a legend indicating + that "Modification is permitted." Any person is hereby authorized, + without fee, to modify such documents and files to create derivative +- works conforming to the Unicode® Standard, subject to Terms and ++ works conforming to the Unicode?? Standard, subject to Terms and + Conditions herein. + + 3. Any person is hereby authorized, without fee, to view, use, reproduce, +@@ -2602,14 +2602,14 @@ + + E.Trademarks & Logos. + 1. The Unicode Word Mark and the Unicode Logo are trademarks of Unicode, +- Inc. “The Unicode Consortium” and “Unicode, Inc.” are trade names of ++ Inc. ???The Unicode Consortium??? and ???Unicode, Inc.??? are trade names of + Unicode, Inc. Use of the information and materials found on this +- website indicates your acknowledgement of Unicode, Inc.’s exclusive ++ website indicates your acknowledgement of Unicode, Inc.???s exclusive + worldwide rights in the Unicode Word Mark, the Unicode Logo, and the + Unicode trade names. + +- 2. The Unicode Consortium Name and Trademark Usage Policy (“Trademark +- Policy”) are incorporated herein by reference and you agree to abide by ++ 2. The Unicode Consortium Name and Trademark Usage Policy (???Trademark ++ Policy???) are incorporated herein by reference and you agree to abide by + the provisions of the Trademark Policy, which may be changed from time + to time in the sole discretion of Unicode, Inc. + +@@ -2632,12 +2632,12 @@ + + 2. Modification by Unicode. Unicode shall have the right to modify this + Agreement at any time by posting it to this site. The user may not +- assign any part of this Agreement without Unicode’s prior written ++ assign any part of this Agreement without Unicode???s prior written + consent. + + 3. Taxes. The user agrees to pay any taxes arising from access to this + website or use of the information herein, except for those based on +- Unicode’s net income. ++ Unicode???s net income. + + 4. Severability. If any provision of this Agreement is declared invalid or + unenforceable, the remaining provisions of this Agreement shall remain +@@ -2666,7 +2666,7 @@ + + COPYRIGHT AND PERMISSION NOTICE + +-Copyright © 1991-2012 Unicode, Inc. All rights reserved. Distributed under the ++Copyright ?? 1991-2012 Unicode, Inc. All rights reserved. Distributed under the + Terms of Use in http://www.unicode.org/copyright.html. + + Permission is hereby granted, free of charge, to any person obtaining a copy +diff -uNr openjdk/langtools/test/tools/javadoc/api/basic/TagletPathTest.java afu8u/langtools/test/tools/javadoc/api/basic/TagletPathTest.java +--- openjdk/langtools/test/tools/javadoc/api/basic/TagletPathTest.java 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/langtools/test/tools/javadoc/api/basic/TagletPathTest.java 2025-05-06 10:53:47.935633768 +0800 +@@ -0,0 +1,106 @@ ++/* ++ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. ++ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ++ * ++ * This code is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 only, as ++ * published by the Free Software Foundation. ++ * ++ * This code is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * version 2 for more details (a copy is included in the LICENSE file that ++ * accompanied this code). ++ * ++ * You should have received a copy of the GNU General Public License version ++ * 2 along with this work; if not, write to the Free Software Foundation, ++ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ++ * ++ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA ++ * or visit www.oracle.com if you need additional information or have any ++ * questions. ++ */ ++ ++/* ++ * @test ++ * @bug 6493690 ++ * @summary javadoc should have a javax.tools.Tool service provider ++ * @build APITest ++ * @run main TagletPathTest ++ */ ++ ++import java.io.File; ++import java.io.PrintWriter; ++import java.io.StringWriter; ++import java.nio.charset.Charset; ++import java.nio.file.Files; ++import java.util.Arrays; ++import java.util.List; ++import javax.tools.DocumentationTool; ++import javax.tools.DocumentationTool.DocumentationTask; ++import javax.tools.JavaCompiler; ++import javax.tools.JavaFileObject; ++import javax.tools.StandardJavaFileManager; ++import javax.tools.StandardLocation; ++import javax.tools.ToolProvider; ++ ++/** ++ * Tests for locating a doclet via the file manager's DOCLET_PATH. ++ */ ++public class TagletPathTest extends APITest { ++ public static void main(String... args) throws Exception { ++ new TagletPathTest().run(); ++ } ++ ++ /** ++ * Verify that a taglet can be specified, and located via ++ * the file manager's TAGLET_PATH. ++ */ ++ @Test ++ public void testTagletPath() throws Exception { ++ File testSrc = new File(System.getProperty("test.src")); ++ File tagletSrcFile = new File(testSrc, "taglets/UnderlineTaglet.java"); ++ File tagletDir = getOutDir("classes"); ++ JavaCompiler compiler = ToolProvider.getSystemJavaCompiler(); ++ StandardJavaFileManager cfm = compiler.getStandardFileManager(null, null, null); ++ cfm.setLocation(StandardLocation.CLASS_OUTPUT, Arrays.asList(tagletDir)); ++ Iterable cfiles = cfm.getJavaFileObjects(tagletSrcFile); ++ if (!compiler.getTask(null, cfm, null, null, null, cfiles).call()) ++ throw new Exception("cannot compile taglet"); ++ ++ JavaFileObject srcFile = createSimpleJavaFileObject("pkg/C", testSrcText); ++ DocumentationTool tool = ToolProvider.getSystemDocumentationTool(); ++ StandardJavaFileManager fm = tool.getStandardFileManager(null, null, null); ++ File outDir = getOutDir("api"); ++ fm.setLocation(DocumentationTool.Location.DOCUMENTATION_OUTPUT, Arrays.asList(outDir)); ++ fm.setLocation(DocumentationTool.Location.TAGLET_PATH, Arrays.asList(tagletDir)); ++ Iterable files = Arrays.asList(srcFile); ++ Iterable options = Arrays.asList("-taglet", "UnderlineTaglet"); ++ StringWriter sw = new StringWriter(); ++ PrintWriter pw = new PrintWriter(sw); ++ DocumentationTask t = tool.getTask(pw, fm, null, null, options, files); ++ boolean ok = t.call(); ++ String out = sw.toString(); ++ System.err.println(">>" + out + "<<"); ++ if (ok) { ++ File f = new File(outDir, "pkg/C.html"); ++ List doc = Files.readAllLines(f.toPath(), Charset.defaultCharset()); ++ for (String line: doc) { ++ if (line.contains("" + TEST_STRING + "")) { ++ System.err.println("taglet executed as expected"); ++ return; ++ } ++ } ++ error("expected text not found in output " + f); ++ } else { ++ error("task failed"); ++ } ++ } ++ ++ static final String TEST_STRING = "xyzzy"; ++ static final String testSrcText = ++ "package pkg;\n" + ++ "/** {@underline " + TEST_STRING + "} */\n" + ++ "public class C { }"; ++} ++ +diff -uNr openjdk/langtools/test/tools/javadoc/api/basic/taglets/UnderlineTaglet.java afu8u/langtools/test/tools/javadoc/api/basic/taglets/UnderlineTaglet.java +--- openjdk/langtools/test/tools/javadoc/api/basic/taglets/UnderlineTaglet.java 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/langtools/test/tools/javadoc/api/basic/taglets/UnderlineTaglet.java 2025-05-06 10:53:47.935633768 +0800 +@@ -0,0 +1,152 @@ ++/* ++ * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or ++ * without modification, are permitted provided that the following ++ * conditions are met: ++ * ++ * -Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * ++ * -Redistribution in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * ++ * Neither the name of Oracle nor the names of ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * This software is provided "AS IS," without a warranty of any ++ * kind. ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND ++ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY ++ * EXCLUDED. SUN AND ITS LICENSORS SHALL NOT BE LIABLE FOR ANY ++ * DAMAGES OR LIABILITIES SUFFERED BY LICENSEE AS A RESULT OF OR ++ * RELATING TO USE, MODIFICATION OR DISTRIBUTION OF THE SOFTWARE OR ++ * ITS DERIVATIVES. IN NO EVENT WILL SUN OR ITS LICENSORS BE LIABLE ++ * FOR ANY LOST REVENUE, PROFIT OR DATA, OR FOR DIRECT, INDIRECT, ++ * SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE DAMAGES, HOWEVER ++ * CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY, ARISING OUT OF ++ * THE USE OF OR INABILITY TO USE SOFTWARE, EVEN IF SUN HAS BEEN ++ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. ++ * ++ * You acknowledge that Software is not designed, licensed or ++ * intended for use in the design, construction, operation or ++ * maintenance of any nuclear facility. ++ */ ++ ++import com.sun.tools.doclets.Taglet; ++import com.sun.javadoc.*; ++import java.util.Map; ++ ++/** ++ * A sample Inline Taglet representing {@underline ...}. This tag can ++ * be used in any kind of {@link com.sun.javadoc.Doc}. ++ * The text is underlined. For example, ++ * "@underline UNDERLINE ME" would be shown as: UNDERLINE ME. ++ * ++ * @author Jamie Ho ++ * @since 1.4 ++ */ ++ ++public class UnderlineTaglet implements Taglet { ++ ++ private static final String NAME = "underline"; ++ ++ /** ++ * Return the name of this custom tag. ++ */ ++ public String getName() { ++ return NAME; ++ } ++ ++ /** ++ * @return true since this tag can be used in a field ++ * doc comment ++ */ ++ public boolean inField() { ++ return true; ++ } ++ ++ /** ++ * @return true since this tag can be used in a constructor ++ * doc comment ++ */ ++ public boolean inConstructor() { ++ return true; ++ } ++ ++ /** ++ * @return true since this tag can be used in a method ++ * doc comment ++ */ ++ public boolean inMethod() { ++ return true; ++ } ++ ++ /** ++ * @return true since this tag can be used in an overview ++ * doc comment ++ */ ++ public boolean inOverview() { ++ return true; ++ } ++ ++ /** ++ * @return true since this tag can be used in a package ++ * doc comment ++ */ ++ public boolean inPackage() { ++ return true; ++ } ++ ++ /** ++ * @return true since this ++ */ ++ public boolean inType() { ++ return true; ++ } ++ ++ /** ++ * Will return true since this is an inline tag. ++ * @return true since this is an inline tag. ++ */ ++ ++ public boolean isInlineTag() { ++ return true; ++ } ++ ++ /** ++ * Register this Taglet. ++ * @param tagletMap the map to register this tag to. ++ */ ++ public static void register(Map tagletMap) { ++ UnderlineTaglet tag = new UnderlineTaglet(); ++ Taglet t = (Taglet) tagletMap.get(tag.getName()); ++ if (t != null) { ++ tagletMap.remove(tag.getName()); ++ } ++ tagletMap.put(tag.getName(), tag); ++ } ++ ++ /** ++ * Given the Tag representation of this custom ++ * tag, return its string representation. ++ * @param tag he Tag representation of this custom tag. ++ */ ++ public String toString(Tag tag) { ++ return "" + tag.text() + ""; ++ } ++ ++ /** ++ * This method should not be called since arrays of inline tags do not ++ * exist. Method {@link #tostring(Tag)} should be used to convert this ++ * inline tag to a string. ++ * @param tags the array of Tags representing of this custom tag. ++ */ ++ public String toString(Tag[] tags) { ++ return null; ++ } ++} ++ +diff -uNr openjdk/langtools/THIRD_PARTY_README afu8u/langtools/THIRD_PARTY_README +--- openjdk/langtools/THIRD_PARTY_README 2023-04-19 05:53:08.000000000 +0800 ++++ afu8u/langtools/THIRD_PARTY_README 2025-05-06 10:53:47.671633759 +0800 +@@ -7,7 +7,7 @@ + + --- begin of LICENSE --- + +-Copyright (c) 2000-2011 France Télécom ++Copyright (c) 2000-2011 France T??l??com + All rights reserved. + + Redistribution and use in source and binary forms, with or without +@@ -1035,7 +1035,7 @@ + --- begin of LICENSE --- + + Copyright notice +-Copyright © 2011 Ecma International ++Copyright ?? 2011 Ecma International + Ecma International + Rue du Rhone 114 + CH-1204 Geneva +@@ -2527,16 +2527,16 @@ + Unicode Terms of Use + + For the general privacy policy governing access to this site, see the Unicode +-Privacy Policy. For trademark usage, see the Unicode® Consortium Name and ++Privacy Policy. For trademark usage, see the Unicode?? Consortium Name and + Trademark Usage Policy. + + A. Unicode Copyright. +- 1. Copyright © 1991-2013 Unicode, Inc. All rights reserved. ++ 1. Copyright ?? 1991-2013 Unicode, Inc. All rights reserved. + + 2. Certain documents and files on this website contain a legend indicating + that "Modification is permitted." Any person is hereby authorized, + without fee, to modify such documents and files to create derivative +- works conforming to the Unicode® Standard, subject to Terms and ++ works conforming to the Unicode?? Standard, subject to Terms and + Conditions herein. + + 3. Any person is hereby authorized, without fee, to view, use, reproduce, +@@ -2602,14 +2602,14 @@ + + E.Trademarks & Logos. + 1. The Unicode Word Mark and the Unicode Logo are trademarks of Unicode, +- Inc. “The Unicode Consortium” and “Unicode, Inc.” are trade names of ++ Inc. ???The Unicode Consortium??? and ???Unicode, Inc.??? are trade names of + Unicode, Inc. Use of the information and materials found on this +- website indicates your acknowledgement of Unicode, Inc.’s exclusive ++ website indicates your acknowledgement of Unicode, Inc.???s exclusive + worldwide rights in the Unicode Word Mark, the Unicode Logo, and the + Unicode trade names. + +- 2. The Unicode Consortium Name and Trademark Usage Policy (“Trademark +- Policy”) are incorporated herein by reference and you agree to abide by ++ 2. The Unicode Consortium Name and Trademark Usage Policy (???Trademark ++ Policy???) are incorporated herein by reference and you agree to abide by + the provisions of the Trademark Policy, which may be changed from time + to time in the sole discretion of Unicode, Inc. + +@@ -2632,12 +2632,12 @@ + + 2. Modification by Unicode. Unicode shall have the right to modify this + Agreement at any time by posting it to this site. The user may not +- assign any part of this Agreement without Unicode’s prior written ++ assign any part of this Agreement without Unicode???s prior written + consent. + + 3. Taxes. The user agrees to pay any taxes arising from access to this + website or use of the information herein, except for those based on +- Unicode’s net income. ++ Unicode???s net income. + + 4. Severability. If any provision of this Agreement is declared invalid or + unenforceable, the remaining provisions of this Agreement shall remain +@@ -2666,7 +2666,7 @@ + + COPYRIGHT AND PERMISSION NOTICE + +-Copyright © 1991-2012 Unicode, Inc. All rights reserved. Distributed under the ++Copyright ?? 1991-2012 Unicode, Inc. All rights reserved. Distributed under the + Terms of Use in http://www.unicode.org/copyright.html. + + Permission is hereby granted, free of charge, to any person obtaining a copy +diff -uNr openjdk/nashorn/THIRD_PARTY_README afu8u/nashorn/THIRD_PARTY_README +--- openjdk/nashorn/THIRD_PARTY_README 2023-04-19 05:53:08.000000000 +0800 ++++ afu8u/nashorn/THIRD_PARTY_README 2025-05-06 10:53:47.951633769 +0800 +@@ -7,7 +7,7 @@ + + --- begin of LICENSE --- + +-Copyright (c) 2000-2011 France Télécom ++Copyright (c) 2000-2011 France T??l??com + All rights reserved. + + Redistribution and use in source and binary forms, with or without +@@ -1035,7 +1035,7 @@ + --- begin of LICENSE --- + + Copyright notice +-Copyright © 2011 Ecma International ++Copyright ?? 2011 Ecma International + Ecma International + Rue du Rhone 114 + CH-1204 Geneva +@@ -2527,16 +2527,16 @@ + Unicode Terms of Use + + For the general privacy policy governing access to this site, see the Unicode +-Privacy Policy. For trademark usage, see the Unicode® Consortium Name and ++Privacy Policy. For trademark usage, see the Unicode?? Consortium Name and + Trademark Usage Policy. + + A. Unicode Copyright. +- 1. Copyright © 1991-2013 Unicode, Inc. All rights reserved. ++ 1. Copyright ?? 1991-2013 Unicode, Inc. All rights reserved. + + 2. Certain documents and files on this website contain a legend indicating + that "Modification is permitted." Any person is hereby authorized, + without fee, to modify such documents and files to create derivative +- works conforming to the Unicode® Standard, subject to Terms and ++ works conforming to the Unicode?? Standard, subject to Terms and + Conditions herein. + + 3. Any person is hereby authorized, without fee, to view, use, reproduce, +@@ -2602,14 +2602,14 @@ + + E.Trademarks & Logos. + 1. The Unicode Word Mark and the Unicode Logo are trademarks of Unicode, +- Inc. “The Unicode Consortium” and “Unicode, Inc.” are trade names of ++ Inc. ???The Unicode Consortium??? and ???Unicode, Inc.??? are trade names of + Unicode, Inc. Use of the information and materials found on this +- website indicates your acknowledgement of Unicode, Inc.’s exclusive ++ website indicates your acknowledgement of Unicode, Inc.???s exclusive + worldwide rights in the Unicode Word Mark, the Unicode Logo, and the + Unicode trade names. + +- 2. The Unicode Consortium Name and Trademark Usage Policy (“Trademark +- Policy”) are incorporated herein by reference and you agree to abide by ++ 2. The Unicode Consortium Name and Trademark Usage Policy (???Trademark ++ Policy???) are incorporated herein by reference and you agree to abide by + the provisions of the Trademark Policy, which may be changed from time + to time in the sole discretion of Unicode, Inc. + +@@ -2632,12 +2632,12 @@ + + 2. Modification by Unicode. Unicode shall have the right to modify this + Agreement at any time by posting it to this site. The user may not +- assign any part of this Agreement without Unicode’s prior written ++ assign any part of this Agreement without Unicode???s prior written + consent. + + 3. Taxes. The user agrees to pay any taxes arising from access to this + website or use of the information herein, except for those based on +- Unicode’s net income. ++ Unicode???s net income. + + 4. Severability. If any provision of this Agreement is declared invalid or + unenforceable, the remaining provisions of this Agreement shall remain +@@ -2666,7 +2666,7 @@ + + COPYRIGHT AND PERMISSION NOTICE + +-Copyright © 1991-2012 Unicode, Inc. All rights reserved. Distributed under the ++Copyright ?? 1991-2012 Unicode, Inc. All rights reserved. Distributed under the + Terms of Use in http://www.unicode.org/copyright.html. + + Permission is hereby granted, free of charge, to any person obtaining a copy +diff -uNr openjdk/native_configure_sw64 afu8u/native_configure_sw64 +--- openjdk/native_configure_sw64 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/native_configure_sw64 2025-05-06 10:53:48.079633773 +0800 +@@ -0,0 +1,27 @@ ++#!/bin/bash ++#build swjdk8 ++# ++# 1. bash native_configure_sw64 release ++# if you want to build debug jdk use "bash native_configure slowdebug" ++# 2. make all ++ ++#gitnum=`git log| head -n 1 |cut -b 8-15` ++#bdate=`date +%Y-%m-%d` ++#topdir=`pwd` ++#buildtag=`echo $USER`.`basename $topdir`.$bdate.$gitnum.$patch ++#patch=SP.4 ++buildtag=sw1.3.1 ++updatever=312 ++level=${1?usage: $0 release/slowdebug} ++#set user-release-suffix and update-version by version_patch.sh ++#bash version_patch.sh ++# $1: debug level (release, fastdebug, slowdebug) ++ bash configure \ ++ --with-user-release-suffix=$buildtag \ ++ --with-update-version=$updatever \ ++ --disable-zip-debug-info \ ++ --with-debug-level=$level \ ++ --disable-ccache \ ++ --enable-hotspot-test-in-build \ ++ --with-milestone=fcs \ ++ --with-build-number=b00 +diff -uNr openjdk/README_SW64 afu8u/README_SW64 +--- openjdk/README_SW64 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/README_SW64 2025-05-06 10:53:44.691633659 +0800 +@@ -0,0 +1,18 @@ ++README: ++ ++Simple Build Instructions: ++ ++ 1. If you don't have a swjdk8 install it, ++ Add the /bin directory of this installation to your PATH environment variable. ++ ++ 2. Configure the build: ++ bash ./native_configure_sw64 release ++ If you want to build debug swjdk use "bash native_configure_sw64 slowdebug". ++ ++ 3. Build the OpenJDK: ++ make all ++ The resulting JDK image should be found in build/*/images/j2sdk-image. ++If the build environment no git, you must do "bash version_patch.sh" on the environment has git to get the git id, ++then copy the swjdk8u src to the target envirinment. ++ 1. bash configure --disable-zip-debug-info --with-debug-level=release(slowdebug) ++ 2. make all +diff -uNr openjdk/READYJ afu8u/READYJ +--- openjdk/READYJ 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/READYJ 2025-05-06 10:53:44.691633659 +0800 +@@ -0,0 +1,16 @@ ++Read me for cross compile, for native compile, see README_SW64 ++ ++configure: ++>bash cross_swcompile_clean ++this will gen release&slowdebug n&c configuration in build dir ++ ++gen ide: ++>bear make CONF=sw64-slowdebug-c LOG=debug hotspot ++then open compile_commands.json ++ ++make: ++mount and make sure dirs are corresponds and same ++modify ip/n ip/c in cross_mk to use your c or n version sw ++copy a j2sdk-image-sw-n and j2sdk-image-sw-c to your build root, then ++>make -f cross_mk c lvl=release ++this will make hotspot for sw release and copy result to j2sdk-image-sw-c, same for n and slowdebug +diff -uNr openjdk/.src-rev afu8u/.src-rev +--- openjdk/.src-rev 2023-04-19 05:53:17.000000000 +0800 ++++ afu8u/.src-rev 1970-01-01 08:00:00.000000000 +0800 +@@ -1 +0,0 @@ +-.:git:a94c8719ef9f+ +diff -uNr openjdk/test/ProblemList.jtx afu8u/test/ProblemList.jtx +--- openjdk/test/ProblemList.jtx 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/test/ProblemList.jtx 2025-05-06 10:53:48.079633773 +0800 +@@ -0,0 +1,2152 @@ ++##SW not support yet ++compiler/arguments/TestUseCountLeadingZerosInstructionOnUnsupportedCPU.java ++compiler/arguments/TestUseCountTrailingZerosInstructionOnUnsupportedCPU.java ++compiler/c2/cr6340864/TestByteVect.java ++compiler/c2/cr6340864/TestDoubleVect.java ++compiler/c2/cr6340864/TestFloatVect.java ++compiler/c2/cr6340864/TestIntVect.java ++compiler/c2/cr6340864/TestLongVect.java ++compiler/c2/cr6340864/TestShortVect.java ++compiler/codegen/TestCharVect2.java ++compiler/tiered/Level2RecompilationTest.java ++ ++# jdk_X11_error ++com/sun/awt/SecurityWarning/GetSizeShouldNotReturnZero.java ++com/sun/awt/Translucency/WindowOpacity.java ++com/sun/java/swing/plaf/gtk/4928019/bug4928019.java ++com/sun/java/swing/plaf/gtk/Test6635110.java ++com/sun/java/swing/plaf/gtk/Test6963870.java ++com/sun/java/swing/plaf/windows/8016551/bug8016551.java ++com/sun/java/swing/plaf/windows/Test6824600.java ++com/sun/java/swing/plaf/windows/WindowsRadioButtonUI/7089914/bug7089914.java ++java/awt/AlphaComposite/HeadlessAlphaComposite.java ++java/awt/AlphaComposite/TestAlphaCompositeForNaN.java ++java/awt/applet/Applet/HeadlessApplet.java ++java/awt/appletviewer/IOExceptionIfEncodedURLTest/IOExceptionIfEncodedURLTest.sh ++java/awt/BasicStroke/DashOffset.java ++java/awt/BasicStroke/DashScaleMinWidth.java ++java/awt/BasicStroke/DashStrokeTest.java ++java/awt/BasicStroke/DashZeroWidth.java ++java/awt/Checkbox/SetStateExcessEvent/SetStateExcessEvent.java ++java/awt/Choice/ChoiceKeyEventReaction/ChoiceKeyEventReaction.html ++java/awt/Choice/ChoiceLocationTest/ChoiceLocationTest.java ++java/awt/Choice/ChoiceMouseWheelTest/ChoiceMouseWheelTest.java ++java/awt/Choice/DragMouseOutAndRelease/DragMouseOutAndRelease.java ++java/awt/Choice/GetSizeTest/GetSizeTest.java ++java/awt/Choice/GrabLockTest/GrabLockTest.java ++java/awt/Choice/ItemStateChangeTest/ItemStateChangeTest.java ++java/awt/Choice/PopdownGeneratesMouseEvents/PopdownGeneratesMouseEvents.html ++java/awt/Choice/PopupPosTest/PopupPosTest.html ++java/awt/Choice/RemoveAllShrinkTest/RemoveAllShrinkTest.java ++java/awt/Choice/ResizeAutoClosesChoice/ResizeAutoClosesChoice.java ++java/awt/Choice/SelectCurrentItemTest/SelectCurrentItemTest.html ++java/awt/Choice/UnfocusableCB_ERR/UnfocusableCB_ERR.java ++java/awt/Choice/UnfocusableToplevel/UnfocusableToplevel.java ++java/awt/Color/HeadlessColor.java ++java/awt/Color/OpacityChange/OpacityChange.java ++java/awt/Component/7097771/bug7097771.java ++java/awt/Component/CompEventOnHiddenComponent/CompEventOnHiddenComponent.java ++java/awt/Component/DimensionEncapsulation/DimensionEncapsulation.java ++java/awt/Component/F10TopToplevel/F10TopToplevel.html ++java/awt/Component/Headless/HeadlessButton.java ++java/awt/Component/Headless/HeadlessCanvas.java ++java/awt/Component/Headless/HeadlessCheckbox.java ++java/awt/Component/Headless/HeadlessChoice.java ++java/awt/Component/Headless/HeadlessComponent.java ++java/awt/Component/Headless/HeadlessContainer.java ++java/awt/Component/Headless/HeadlessDialog.java ++java/awt/Component/Headless/HeadlessFileDialog.java ++java/awt/Component/Headless/HeadlessFrame.java ++java/awt/Component/Headless/HeadlessLabel.java ++java/awt/Component/Headless/HeadlessList.java ++java/awt/Component/Headless/HeadlessPanel.java ++java/awt/Component/Headless/HeadlessScrollbar.java ++java/awt/Component/Headless/HeadlessScrollPane.java ++java/awt/Component/Headless/HeadlessTextArea.java ++java/awt/Component/Headless/HeadlessTextField.java ++java/awt/Component/Headless/HeadlessWindow.java ++java/awt/Component/InsetsEncapsulation/InsetsEncapsulation.java ++java/awt/Component/isLightweightCrash/IsLightweightCrash.java ++java/awt/Component/NativeInLightShow/NativeInLightShow.java ++java/awt/Component/NoUpdateUponShow/NoUpdateUponShow.java ++java/awt/ComponentOrientation/BasicTest.java ++java/awt/ComponentOrientation/BorderTest.java ++java/awt/ComponentOrientation/FlowTest.java ++java/awt/ComponentOrientation/WindowTest.java ++java/awt/Component/PaintAll/PaintAll.java ++java/awt/Component/PrintAllXcheckJNI/PrintAllXcheckJNI.java ++java/awt/Component/Revalidate/Revalidate.java ++java/awt/Component/SetEnabledPerformance/SetEnabledPerformance.java ++java/awt/Component/TreeLockDeadlock/TreeLockDeadlock.java ++java/awt/Container/CheckZOrderChange/CheckZOrderChange.java ++java/awt/Container/ContainerAIOOBE/ContainerAIOOBE.java ++java/awt/Container/isRemoveNotifyNeeded/JInternalFrameTest.java ++java/awt/Container/MoveToOtherScreenTest/MoveToOtherScreenTest.java ++java/awt/Container/ValidateRoot/InvalidateMustRespectValidateRoots.java ++java/awt/Cursor/HeadlessCursor.java ++java/awt/Cursor/PredefinedPrivate/PredefinedPrivate.java ++java/awt/datatransfer/Clipboard/BasicClipboardTest.java ++java/awt/datatransfer/Clipboard/GetContentsInterruptedTest.java ++java/awt/datatransfer/ClipboardInterVMTest/ClipboardInterVMTest.java ++java/awt/datatransfer/CustomClassLoaderTransferTest/CustomClassLoaderTransferTest.java ++java/awt/datatransfer/DataFlavor/DataFlavorCloneTest/DataFlavorCloneTest.java ++java/awt/datatransfer/DataFlavor/DataFlavorEqualsNullTest.java ++java/awt/datatransfer/DataFlavor/DataFlavorEqualsTest.java ++java/awt/datatransfer/DataFlavor/DataFlavorFileListTest.java ++java/awt/datatransfer/DataFlavor/DataFlavorSerializedTest.java ++java/awt/datatransfer/DataFlavor/DefaultMatchTest.java ++java/awt/datatransfer/DataFlavor/EqualHashCodeTest.java ++java/awt/datatransfer/DataFlavor/EqualsHashCodeSymmetryTest/EqualsHashCodeSymmetryTest.java ++java/awt/datatransfer/DataFlavor/ExternalizeTest.java ++java/awt/datatransfer/DataFlavor/GetReaderForTextIAEForStringSelectionTest.java ++java/awt/datatransfer/DataFlavor/GetReaderForTextNPETest.java ++java/awt/datatransfer/DataFlavor/MimeTypeSerializationTest.java ++java/awt/datatransfer/DataFlavor/NoClassParameterTest.java ++java/awt/datatransfer/DataFlavor/NormalizeMimeTypeParameter.java ++java/awt/datatransfer/DataFlavor/NullDataFlavorTest.java ++java/awt/datatransfer/DataFlavor/ReaderForUnicodeText.java ++java/awt/datatransfer/DataFlavor/SelectBestFlavorNPETest.java ++java/awt/datatransfer/DataFlavor/SelectBestTextFlavorBadArrayTest.java ++java/awt/datatransfer/DataFlavor/ToStringNullPointerTest.java ++java/awt/datatransfer/DragImage/MultiResolutionDragImageTest.java ++java/awt/datatransfer/DragUnicodeBetweenJVMTest/DragUnicodeBetweenJVMTest.html ++java/awt/datatransfer/Headless/HeadlessClipboard.java ++java/awt/datatransfer/Headless/HeadlessDataFlavor.java ++java/awt/datatransfer/Headless/HeadlessSystemFlavorMap.java ++java/awt/datatransfer/HTMLDataFlavors/HTMLDataFlavorTest.java ++java/awt/datatransfer/ImageTransfer/ImageTransferTest.java ++java/awt/datatransfer/Independence/IndependenceAWTTest.java ++java/awt/datatransfer/Independence/IndependenceSwingTest.java ++java/awt/datatransfer/MissedHtmlAndRtfBug/MissedHtmlAndRtfBug.html ++java/awt/datatransfer/SystemFlavorMap/AddFlavorForNativeTest.java ++java/awt/datatransfer/SystemFlavorMap/AddFlavorTest.java ++java/awt/datatransfer/SystemFlavorMap/AddNativeForFlavorTest.java ++java/awt/datatransfer/SystemFlavorMap/AddNativeTest.java ++java/awt/datatransfer/SystemFlavorMap/DuplicatedNativesTest.java ++java/awt/datatransfer/SystemFlavorMap/DuplicateMappingTest.java ++java/awt/datatransfer/SystemFlavorMap/GetFlavorsForNewNativeTest.java ++java/awt/datatransfer/SystemFlavorMap/GetNativesForFlavorTest.java ++java/awt/datatransfer/SystemFlavorMap/GetNativesForNewFlavorTest.java ++java/awt/datatransfer/SystemFlavorMap/InvalidMapArgumentsTest.java ++java/awt/datatransfer/SystemFlavorMap/ManyFlavorMapTest.java ++java/awt/datatransfer/SystemFlavorMap/MappingGenerationTest.java ++java/awt/datatransfer/SystemFlavorMap/SetDataFlavorsTest.java ++java/awt/datatransfer/SystemFlavorMap/SetFlavorsForNativeTest.java ++java/awt/datatransfer/SystemFlavorMap/SetNativesForFlavor.java ++java/awt/datatransfer/SystemFlavorMap/SetNativesForFlavorTest.java ++java/awt/datatransfer/SystemFlavorMap/SetNativesTest.java ++java/awt/datatransfer/SystemSelection/SystemSelectionAWTTest.java ++java/awt/datatransfer/SystemSelection/SystemSelectionSwingTest.java ++java/awt/Desktop/8064934/bug8064934.java ++java/awt/Desktop/DesktopGtkLoadTest/DesktopGtkLoadTest.java ++java/awt/Desktop/OpenByUNCPathNameTest/OpenByUNCPathNameTest.java ++java/awt/Dialog/CloseDialog/CloseDialogTest.java ++java/awt/Dialog/CrashXCheckJni/CrashXCheckJni.java ++java/awt/Dialog/DialogAboveFrame/DialogAboveFrameTest.java ++java/awt/Dialog/DialogOverflowSizeTest/DialogSizeOverflowTest.java ++java/awt/Dialog/MakeWindowAlwaysOnTop/MakeWindowAlwaysOnTop.java ++java/awt/Dialog/ModalDialogPermission/ModalDialogPermission.java ++java/awt/Dialog/NonResizableDialogSysMenuResize/NonResizableDialogSysMenuResize.java ++java/awt/Dialog/ValidateOnShow/ValidateOnShow.java ++java/awt/dnd/AcceptDropMultipleTimes/AcceptDropMultipleTimes.java ++java/awt/dnd/BadSerializationTest/BadSerializationTest.java ++java/awt/dnd/Button2DragTest/Button2DragTest.html ++java/awt/dnd/DisposeFrameOnDragCrash/DisposeFrameOnDragTest.java ++java/awt/dnd/DragInterceptorAppletTest/DragInterceptorAppletTest.html ++java/awt/dnd/DragSourceListenerSerializationTest/DragSourceListenerSerializationTest.java ++java/awt/dnd/DropTargetEnterExitTest/ExtraDragEnterTest.java ++java/awt/dnd/DropTargetEnterExitTest/MissedDragExitTest.java ++java/awt/dnd/FileListBetweenJVMsTest/FileListBetweenJVMsTest.html ++java/awt/dnd/ImageDecoratedDnDInOut/ImageDecoratedDnDInOut.html ++java/awt/dnd/ImageDecoratedDnDNegative/ImageDecoratedDnDNegative.html ++java/awt/dnd/ImageTransferTest/ImageTransferTest.java ++java/awt/dnd/InterJVMGetDropSuccessTest/InterJVMGetDropSuccessTest.html ++java/awt/dnd/MissingDragExitEventTest/MissingDragExitEventTest.java ++java/awt/dnd/MissingEventsOnModalDialog/MissingEventsOnModalDialogTest.java ++java/awt/dnd/NoFormatsCrashTest/NoFormatsCrashTest.html ++java/awt/dnd/URIListBetweenJVMsTest/URIListBetweenJVMsTest.html ++java/awt/dnd/URIListToFileListBetweenJVMsTest/URIListToFileListBetweenJVMsTest.html ++java/awt/EmbeddedFrame/GraphicsConfigTest/GraphicsConfigTest.java ++java/awt/event/ComponentEvent/MovedResizedTardyEventTest/MovedResizedTardyEventTest.html ++java/awt/EventDispatchThread/EDTShutdownTest/EDTShutdownTest.java ++java/awt/EventDispatchThread/HandleExceptionOnEDT/HandleExceptionOnEDT.java ++java/awt/EventDispatchThread/LoopRobustness/LoopRobustness.html ++java/awt/EventDispatchThread/PreserveDispathThread/PreserveDispatchThread.java ++java/awt/event/HierarchyEvent/AncestorResized/AncestorResized.java ++java/awt/event/InputEvent/ButtonArraysEquality/ButtonArraysEquality.java ++java/awt/event/InputEvent/EventWhenTest/EventWhenTest.java ++java/awt/event/InvocationEvent/InvocationEventTest.java ++java/awt/event/KeyEvent/8020209/bug8020209.java ++java/awt/event/KeyEvent/AltCharAcceleratorTest/AltCharAcceleratorTest.java ++java/awt/event/KeyEvent/CorrectTime/CorrectTime.java ++java/awt/event/KeyEvent/DeadKey/DeadKeyMacOSXInputText.java ++java/awt/event/KeyEvent/DeadKey/deadKeyMacOSX.java ++java/awt/event/KeyEvent/DeadKey/DeadKeySystemAssertionDialog.java ++java/awt/event/KeyEvent/ExtendedKeyCode/ExtendedKeyCodeTest.java ++java/awt/event/KeyEvent/ExtendedModifiersTest/ExtendedModifiersTest.java ++java/awt/event/KeyEvent/KeyChar/KeyCharTest.java ++java/awt/event/KeyEvent/KeyMaskTest/KeyMaskTest.java ++java/awt/event/KeyEvent/KeyTyped/CtrlASCII.html ++java/awt/event/KeyEvent/SwallowKeyEvents/SwallowKeyEvents.java ++java/awt/event/MouseEvent/AcceptExtraButton/AcceptExtraButton.java ++java/awt/event/MouseEvent/CheckGetMaskForButton/CheckGetMaskForButton.java ++java/awt/event/MouseEvent/ClickDuringKeypress/ClickDuringKeypress.java ++java/awt/event/MouseEvent/DisabledComponents/DisabledComponentsTest.java ++java/awt/event/MouseEvent/EnterAsGrabbedEvent/EnterAsGrabbedEvent.java ++java/awt/event/MouseEvent/EventTimeInFuture/EventTimeInFuture.java ++java/awt/event/MouseEvent/FrameMouseEventAbsoluteCoordsTest/FrameMouseEventAbsoluteCoordsTest.html ++java/awt/event/MouseEvent/MenuDragMouseEventAbsoluteCoordsTest/MenuDragMouseEventAbsoluteCoordsTest.html ++java/awt/event/MouseEvent/MouseButtonsAndKeyMasksTest/MouseButtonsAndKeyMasksTest.java ++java/awt/event/MouseEvent/MouseButtonsTest/MouseButtonsTest.java ++java/awt/event/MouseEvent/MouseClickTest/MouseClickTest.html ++java/awt/event/MouseEvent/MouseWheelEventAbsoluteCoordsTest/MouseWheelEventAbsoluteCoordsTest.html ++java/awt/event/MouseEvent/MultipleMouseButtonsTest/MultipleMouseButtonsTest.java ++java/awt/event/MouseEvent/RobotLWTest/RobotLWTest.html ++java/awt/event/MouseEvent/SpuriousExitEnter/SpuriousExitEnter_3.java ++java/awt/event/MouseWheelEvent/DisabledComponent/DisabledComponent.java ++java/awt/event/MouseWheelEvent/InfiniteRecursion/InfiniteRecursion_1.java ++java/awt/event/MouseWheelEvent/InfiniteRecursion/InfiniteRecursion_2.html ++java/awt/event/MouseWheelEvent/InfiniteRecursion/InfiniteRecursion_3.html ++java/awt/event/MouseWheelEvent/InfiniteRecursion/InfiniteRecursion_4.java ++java/awt/event/MouseWheelEvent/InfiniteRecursion/InfiniteRecursion.java ++java/awt/event/MouseWheelEvent/WheelModifier/MouseWheelOnBackgroundComponent.java ++java/awt/event/MouseWheelEvent/WheelModifier/WheelModifier.java ++java/awt/event/OtherEvents/UngrabID/UngrabID.java ++java/awt/EventQueue/6638195/bug6638195.java ++java/awt/EventQueue/6980209/bug6980209.java ++java/awt/EventQueue/HeadlessEventQueue.java ++java/awt/EventQueue/InvocationEventTest/InvocationEventTest.java ++java/awt/EventQueue/MainAppContext/MainAppContext.java ++java/awt/EventQueue/NonComponentSourcePost.java ++java/awt/EventQueue/PostEventOrderingTest/PostEventOrderingTest.java ++java/awt/EventQueue/PushPopDeadlock2/PushPopTest.java ++java/awt/EventQueue/SecondaryLoopTest/SecondaryLoopTest.java ++java/awt/event/SequencedEvent/MultipleContextsFunctionalTest.java ++java/awt/event/SequencedEvent/MultipleContextsUnitTest.java ++java/awt/event/TextEvent/TextEventSequenceTest/TextEventSequenceTest.java ++java/awt/FileDialog/FilenameFilterTest/FilenameFilterTest.html ++java/awt/FileDialog/ISCthrownByFileListTest/ISCthrownByFileListTest.java ++java/awt/Focus/6378278/InputVerifierTest.java ++java/awt/Focus/6382144/EndlessLoopTest.java ++java/awt/Focus/6401036/InputVerifierTest2.java ++java/awt/Focus/6981400/Test1.java ++java/awt/Focus/6981400/Test2.java ++java/awt/Focus/6981400/Test3.java ++java/awt/Focus/8013611/JDK8013611.java ++java/awt/Focus/8073453/AWTFocusTransitionTest.java ++java/awt/Focus/8073453/SwingFocusTransitionTest.java ++java/awt/Focus/ActualFocusedWindowTest/ActualFocusedWindowBlockingTest.java ++java/awt/Focus/ActualFocusedWindowTest/ActualFocusedWindowRetaining.java ++java/awt/Focus/AppletInitialFocusTest/AppletInitialFocusTest1.html ++java/awt/Focus/AppletInitialFocusTest/AppletInitialFocusTest.html ++java/awt/Focus/AutoRequestFocusTest/AutoRequestFocusSetVisibleTest.java ++java/awt/Focus/AutoRequestFocusTest/AutoRequestFocusToFrontTest.java ++java/awt/Focus/ChildWindowFocusTest/ChildWindowFocusTest.html ++java/awt/Focus/ChoiceFocus/ChoiceFocus.java ++java/awt/Focus/ClearGlobalFocusOwnerTest/ClearGlobalFocusOwnerTest.java ++java/awt/Focus/ClearLwQueueBreakTest/ClearLwQueueBreakTest.java ++java/awt/Focus/CloseDialogActivateOwnerTest/CloseDialogActivateOwnerTest.java ++java/awt/Focus/ConsumeNextKeyTypedOnModalShowTest/ConsumeNextKeyTypedOnModalShowTest.java ++java/awt/Focus/ContainerFocusAutoTransferTest/ContainerFocusAutoTransferTest.java ++java/awt/Focus/DeiconifiedFrameLoosesFocus/DeiconifiedFrameLoosesFocus.html ++java/awt/Focus/DisposedWindow/DisposeDialogNotActivateOwnerTest/DisposeDialogNotActivateOwnerTest.html ++java/awt/Focus/FocusEmbeddedFrameTest/FocusEmbeddedFrameTest.java ++java/awt/Focus/FocusOwnerFrameOnClick/FocusOwnerFrameOnClick.java ++java/awt/Focus/FocusSubRequestTest/FocusSubRequestTest.html ++java/awt/Focus/FocusTransitionTest/FocusTransitionTest.java ++java/awt/Focus/FocusTraversalPolicy/DefaultFTPTest.java ++java/awt/Focus/FocusTraversalPolicy/InitialFTP.java ++java/awt/Focus/FocusTraversalPolicy/LayoutFTPTest.java ++java/awt/Focus/FrameJumpingToMouse/FrameJumpingToMouse.java ++java/awt/Focus/FrameMinimizeTest/FrameMinimizeTest.java ++java/awt/Focus/Headless/HeadlessContainerOrderFocusTraversalPolicy.java ++java/awt/Focus/Headless/HeadlessDefaultFocusTraversalPolicy.java ++java/awt/Focus/Headless/HeadlessDefaultKeyboardFocusManager.java ++java/awt/Focus/IconifiedFrameFocusChangeTest/IconifiedFrameFocusChangeTest.java ++java/awt/Focus/InputVerifierTest3/InputVerifierTest3.java ++java/awt/Focus/KeyEventForBadFocusOwnerTest/KeyEventForBadFocusOwnerTest.java ++java/awt/Focus/ModalBlockedStealsFocusTest/ModalBlockedStealsFocusTest.html ++java/awt/Focus/ModalDialogActivationTest/ModalDialogActivationTest.java ++java/awt/Focus/ModalDialogInitialFocusTest/ModalDialogInitialFocusTest.html ++java/awt/Focus/ModalExcludedWindowClickTest/ModalExcludedWindowClickTest.html ++java/awt/Focus/MouseClickRequestFocusRaceTest/MouseClickRequestFocusRaceTest.html ++java/awt/Focus/NoAutotransferToDisabledCompTest/NoAutotransferToDisabledCompTest.java ++java/awt/Focus/NonFocusableBlockedOwnerTest/NonFocusableBlockedOwnerTest.html ++java/awt/Focus/NonFocusableResizableTooSmall/NonFocusableResizableTooSmall.java ++java/awt/Focus/NonFocusableWindowTest/NoEventsTest.java ++java/awt/Focus/NonFocusableWindowTest/NonfocusableOwnerTest.java ++java/awt/Focus/NullActiveWindowOnFocusLost/NullActiveWindowOnFocusLost.java ++java/awt/Focus/OwnedWindowFocusIMECrashTest/OwnedWindowFocusIMECrashTest.java ++java/awt/Focus/RemoveAfterRequest/RemoveAfterRequest.java ++java/awt/Focus/RequestFocusAndHideTest/RequestFocusAndHideTest.java ++java/awt/Focus/RequestFocusToDisabledCompTest/RequestFocusToDisabledCompTest.java ++java/awt/Focus/RequestOnCompWithNullParent/RequestOnCompWithNullParent1.java ++java/awt/Focus/ResetMostRecentFocusOwnerTest/ResetMostRecentFocusOwnerTest.java ++java/awt/Focus/RestoreFocusOnDisabledComponentTest/RestoreFocusOnDisabledComponentTest.java ++java/awt/Focus/RollbackFocusFromAnotherWindowTest/RollbackFocusFromAnotherWindowTest.java ++java/awt/Focus/ShowFrameCheckForegroundTest/ShowFrameCheckForegroundTest.java ++java/awt/Focus/SimpleWindowActivationTest/SimpleWindowActivationTest.java ++java/awt/Focus/SortingFPT/JDK8048887.java ++java/awt/Focus/ToFrontFocusTest/ToFrontFocus.html ++java/awt/Focus/TranserFocusToWindow/TranserFocusToWindow.java ++java/awt/Focus/TypeAhead/TestFocusFreeze.java ++java/awt/Focus/WindowInitialFocusTest/WindowInitialFocusTest.html ++java/awt/Focus/WindowIsFocusableAccessByThreadsTest/WindowIsFocusableAccessByThreadsTest.java ++java/awt/Focus/WindowUpdateFocusabilityTest/WindowUpdateFocusabilityTest.html ++java/awt/Focus/WrongKeyTypedConsumedTest/WrongKeyTypedConsumedTest.java ++java/awt/FontClass/BigMetrics.java ++java/awt/FontClass/CreateFont/bigfont.html ++java/awt/FontClass/CreateFont/DeleteFont.sh ++java/awt/FontClass/CreateFont/fileaccess/FontFile.java ++java/awt/FontClass/DebugFonts.java ++java/awt/FontClass/FontAccess.java ++java/awt/FontClass/FontPrivilege.java ++java/awt/FontClass/FontSize1Test.java ++java/awt/FontClass/GlyphRotationTest.java ++java/awt/FontClass/HeadlessFont.java ++java/awt/FontClass/HelvLtOblTest.java ++java/awt/FontClass/MassiveMetricsTest.java ++java/awt/FontClass/SurrogateTest/SuppCharTest.java ++java/awt/FontClass/SurrogateTest/SupplementaryCanDisplayUpToTest.java ++java/awt/FontClass/X11FontPathCrashTest.java ++java/awt/font/FontNames/LocaleFamilyNames.java ++java/awt/font/GlyphVector/TestLayoutFlags.java ++java/awt/font/GlyphVector/VisualBounds.java ++java/awt/font/LineBreakMeasurer/AllFontsLBM.java ++java/awt/font/LineBreakMeasurer/FRCTest.java ++java/awt/FontMetrics/MaxAdvanceIsMax.java ++java/awt/FontMetrics/SpaceAdvance.java ++java/awt/FontMetrics/StyledSpaceAdvance.java ++java/awt/font/MonospacedGlyphWidth/MonospacedGlyphWidthTest.java ++java/awt/font/NumericShaper/EasternArabicTest.java ++java/awt/font/NumericShaper/EqualsTest.java ++java/awt/font/NumericShaper/MTTest.java ++java/awt/font/NumericShaper/ShapingTest.java ++java/awt/font/PhoneticExtensions/PhoneticExtensionsGlyphTest.java ++java/awt/font/Rotate/RotatedFontMetricsTest.java ++java/awt/font/Rotate/Shear.java ++java/awt/font/Rotate/TestTransform.java ++java/awt/font/Rotate/TranslatedOutlineTest.java ++java/awt/font/StyledMetrics/BoldSpace.java ++java/awt/font/TextLayout/AttributeValuesCastTest.java ++java/awt/font/TextLayout/CombiningPerf.java ++java/awt/font/TextLayout/DecorationBoundsTest.java ++java/awt/font/TextLayout/DiacriticsDrawingTest.java ++java/awt/font/TextLayout/HangulShapingTest.java ++java/awt/font/TextLayout/HebrewIsRTLTest.java ++java/awt/font/TextLayout/NegativeGlyphIDException.java ++java/awt/font/TextLayout/OSXLigatureTest.java ++java/awt/font/TextLayout/TestAATMorxFont.java ++java/awt/font/TextLayout/TestHebrewMark.java ++java/awt/font/TextLayout/TestOldHangul.java ++java/awt/font/TextLayout/TestSinhalaChar.java ++java/awt/font/TextLayout/TestTibetan.java ++java/awt/font/TextLayout/TextLayoutBounds.java ++java/awt/font/TextLayout/UnderlinePositionTest.java ++java/awt/font/TextLayout/VisibleAdvance.java ++java/awt/font/Threads/FontThread.java ++java/awt/font/TransformAttribute/TransformEqualityTest.java ++java/awt/font/Underline/UnderlineTest.java ++java/awt/Frame/7024749/bug7024749.java ++java/awt/Frame/DecoratedExceptions/DecoratedExceptions.java ++java/awt/Frame/DisposeParentGC/DisposeParentGC.java ++java/awt/Frame/DisposeStressTest/DisposeStressTest.html ++java/awt/Frame/DynamicLayout/DynamicLayout.java ++java/awt/Frame/ExceptionOnSetExtendedStateTest/ExceptionOnSetExtendedStateTest.java ++java/awt/Frame/FrameLocation/FrameLocation.java ++java/awt/Frame/FrameResize/ShowChildWhileResizingTest.java ++java/awt/Frame/FrameSetSizeStressTest/FrameSetSizeStressTest.java ++java/awt/Frame/FramesGC/FramesGC.java ++java/awt/Frame/FrameSize/TestFrameSize.java ++java/awt/Frame/HideMaximized/HideMaximized.java ++java/awt/Frame/HugeFrame/HugeFrame.java ++java/awt/Frame/InvisibleOwner/InvisibleOwner.java ++java/awt/Frame/LayoutOnMaximizeTest/LayoutOnMaximizeTest.java ++java/awt/Frame/MaximizedByPlatform/MaximizedByPlatform.java ++java/awt/Frame/MaximizedNormalBoundsUndecoratedTest/MaximizedNormalBoundsUndecoratedTest.java ++java/awt/Frame/MaximizedToIconified/MaximizedToIconified.java ++java/awt/Frame/MaximizedToMaximized/MaximizedToMaximized.java ++java/awt/Frame/MaximizedUndecorated/MaximizedUndecorated.java ++java/awt/Frame/MiscUndecorated/ActiveAWTWindowTest.java ++java/awt/Frame/MiscUndecorated/ActiveSwingWindowTest.java ++java/awt/Frame/MiscUndecorated/FrameCloseTest.java ++java/awt/Frame/MiscUndecorated/RepaintTest.java ++java/awt/Frame/MiscUndecorated/UndecoratedInitiallyIconified.java ++java/awt/Frame/NonEDT_GUI_DeadlockTest/NonEDT_GUI_Deadlock.html ++java/awt/Frame/NormalToIconified/NormalToIconifiedTest.java ++java/awt/Frame/ObscuredFrame/ObscuredFrameTest.java ++java/awt/Frame/ResizeAfterSetFont/ResizeAfterSetFont.java ++java/awt/Frame/SetMaximizedBounds/SetMaximizedBounds.java ++java/awt/Frame/ShapeNotSetSometimes/ShapeNotSetSometimes.java ++java/awt/Frame/ShownOffScreenOnWin98/ShownOffScreenOnWin98Test.java ++java/awt/Frame/SlideNotResizableTest/SlideNotResizableTest.java ++java/awt/Frame/UnfocusableMaximizedFrameResizablity/UnfocusableMaximizedFrameResizablity.java ++java/awt/Frame/WindowDragTest/WindowDragTest.java ++java/awt/FullScreen/8013581/bug8013581.java ++java/awt/FullScreen/AltTabCrashTest/AltTabCrashTest.java ++java/awt/FullScreen/BufferStrategyExceptionTest/BufferStrategyExceptionTest.java ++java/awt/FullScreen/DisplayChangeVITest/DisplayChangeVITest.java ++java/awt/FullScreen/FullScreenInsets/FullScreenInsets.java ++java/awt/FullScreen/MultimonFullscreenTest/MultimonDeadlockTest.java ++java/awt/FullScreen/NonExistentDisplayModeTest/NonExistentDisplayModeTest.java ++java/awt/FullScreen/NoResizeEventOnDMChangeTest/NoResizeEventOnDMChangeTest.java ++java/awt/FullScreen/SetFSWindow/FSFrame.java ++java/awt/FullScreen/TranslucentWindow/TranslucentWindow.java ++java/awt/FullScreen/UninitializedDisplayModeChangeTest/UninitializedDisplayModeChangeTest.java ++java/awt/geom/AffineTransform/InvalidTransformParameterTest.java ++java/awt/geom/Arc2D/SerializationTest.java ++java/awt/geom/CubicCurve2D/ContainsTest.java ++java/awt/geom/CubicCurve2D/IntersectsTest.java ++java/awt/geom/CubicCurve2D/SolveCubicTest.java ++java/awt/geom/Path2D/EmptyCapacity.java ++java/awt/geom/Path2D/FillPPathTest.java ++java/awt/geom/Path2D/Path2DCopyConstructor.java ++java/awt/geom/Path2D/Path2DGrow.java ++java/awt/grab/EmbeddedFrameTest1/EmbeddedFrameTest1.java ++java/awt/grab/GrabOnUnfocusableToplevel/GrabOnUnfocusableToplevel.java ++java/awt/grab/MenuDragEvents/MenuDragEvents.html ++java/awt/GradientPaint/GradientTransformTest.java ++java/awt/GradientPaint/HeadlessGradientPaint.java ++java/awt/GradientPaint/LinearColorSpaceGradientTest.java ++java/awt/Graphics2D/DrawString/AlphaSurfaceText.java ++java/awt/Graphics2D/DrawString/DrawRotatedString.java ++java/awt/Graphics2D/DrawString/DrawRotatedStringUsingRotatedFont.java ++java/awt/Graphics2D/DrawString/DrawStringCrash.java ++java/awt/Graphics2D/DrawString/DrawStrSuper.java ++java/awt/Graphics2D/DrawString/EmptyAttrString.java ++java/awt/Graphics2D/DrawString/LCDTextSrcEa.java ++java/awt/Graphics2D/DrawString/RotTransText.java ++java/awt/Graphics2D/DrawString/ScaledLCDTextMetrics.java ++java/awt/Graphics2D/DrawString/TextRenderingTest.java ++java/awt/Graphics2D/DrawString/XRenderElt254TextTest.java ++java/awt/Graphics2D/FillTexturePaint/FillTexturePaint.java ++java/awt/Graphics2D/FlipDrawImage/FlipDrawImage.java ++java/awt/Graphics2D/Headless/HeadlessPoint.java ++java/awt/Graphics2D/Headless/HeadlessPolygon.java ++java/awt/Graphics2D/Headless/HeadlessRectangle.java ++java/awt/Graphics2D/IncorrectTextSize/IncorrectTextSize.java ++java/awt/Graphics2D/MTGraphicsAccessTest/MTGraphicsAccessTest.java ++java/awt/Graphics2D/RenderClipTest/RenderClipTest.java ++java/awt/Graphics2D/ScaledCopyArea/ScaledCopyArea.java ++java/awt/Graphics2D/Test8004859/Test8004859.java ++java/awt/Graphics2D/TransformSetGet/TransformSetGet.java ++java/awt/Graphics2D/WhiteTextColorTest.java ++java/awt/GraphicsConfiguration/HeadlessGraphicsConfiguration.java ++java/awt/GraphicsConfiguration/NormalizingTransformTest/NormalizingTransformTest.java ++java/awt/GraphicsDevice/CheckDisplayModes.java ++java/awt/GraphicsDevice/CloneConfigsTest.java ++java/awt/GraphicsDevice/HeadlessGraphicsDevice.java ++java/awt/GraphicsDevice/IncorrectDisplayModeExitFullscreen.java ++java/awt/Graphics/DrawImageBG/SystemBgColorTest.java ++java/awt/Graphics/DrawLineTest.java ++java/awt/GraphicsEnvironment/HeadlessGraphicsEnvironment.java ++java/awt/GraphicsEnvironment/LoadLock/GE_init1.java ++java/awt/GraphicsEnvironment/LoadLock/GE_init2.java ++java/awt/GraphicsEnvironment/LoadLock/GE_init3.java ++java/awt/GraphicsEnvironment/LoadLock/GE_init4.java ++java/awt/GraphicsEnvironment/LoadLock/GE_init5.java ++java/awt/GraphicsEnvironment/LoadLock/GE_init6.java ++java/awt/GraphicsEnvironment/PreferLocaleFonts.java ++java/awt/GraphicsEnvironment/TestDetectHeadless/TestDetectHeadless.sh ++java/awt/GraphicsEnvironment/TestGetDefScreenDevice.java ++java/awt/Graphics/LineClipTest.java ++java/awt/GridBagLayout/GridBagLayoutIpadXYTest/GridBagLayoutIpadXYTest.html ++java/awt/GridLayout/ChangeGridSize/ChangeGridSize.java ++java/awt/GridLayout/ComponentPreferredSize/ComponentPreferredSize.java ++java/awt/GridLayout/LayoutExtraGaps/LayoutExtraGaps.java ++java/awt/Gtk/GtkVersionTest/GtkVersionTest.java ++java/awt/Headless/HeadlessAWTEventMulticaster.java ++java/awt/Headless/HeadlessAWTException.java ++java/awt/Headless/HeadlessBasicStroke.java ++java/awt/Headless/HeadlessBorderLayout.java ++java/awt/Headless/HeadlessCardLayout.java ++java/awt/Headless/HeadlessCheckboxGroup.java ++java/awt/Headless/HeadlessCheckboxMenuItem.java ++java/awt/Headless/HeadlessComponentOrientation.java ++java/awt/Headless/HeadlessDimension.java ++java/awt/Headless/HeadlessFlowLayout.java ++java/awt/Headless/HeadlessMediaTracker.java ++java/awt/Headless/HeadlessPopupMenu.java ++java/awt/im/6396526/IMLookAndFeel.java ++java/awt/im/8041990/bug8041990.java ++java/awt/image/BufferedImage/GetPropertyNames.java ++java/awt/image/BufferedImage/ICMColorDataTest/ICMColorDataTest.java ++java/awt/image/BufferedImage/TinyScale.java ++java/awt/image/ConvolveOp/EdgeNoOpCrash.java ++java/awt/image/ConvolveOp/OpCompatibleImageTest.java ++java/awt/image/DrawImage/DrawImageCoordsTest.java ++java/awt/image/DrawImage/EABlitTest.java ++java/awt/image/DrawImage/IncorrectAlphaConversionBicubic.java ++java/awt/image/DrawImage/IncorrectAlphaSurface2SW.java ++java/awt/image/DrawImage/IncorrectBounds.java ++java/awt/image/DrawImage/IncorrectClipSurface2SW.java ++java/awt/image/DrawImage/IncorrectClipXorModeSurface2Surface.java ++java/awt/image/DrawImage/IncorrectClipXorModeSW2Surface.java ++java/awt/image/DrawImage/IncorrectDestinationOffset.java ++java/awt/image/DrawImage/IncorrectManagedImageSourceOffset.java ++java/awt/image/DrawImage/IncorrectOffset.java ++java/awt/image/DrawImage/IncorrectSourceOffset.java ++java/awt/image/DrawImage/IncorrectUnmanagedImageRotatedClip.java ++java/awt/image/DrawImage/IncorrectUnmanagedImageSourceOffset.java ++java/awt/image/DrawImage/SimpleManagedImage.java ++java/awt/image/DrawImage/SimpleUnmanagedImage.java ++java/awt/image/DrawImage/UnmanagedDrawImagePerformance.java ++java/awt/image/FilteredImageSourceTest.java ++java/awt/image/GetDataElementsTest.java ++java/awt/image/GetSamplesTest.java ++java/awt/image/Headless/HeadlessAffineTransformOp.java ++java/awt/image/Headless/HeadlessAreaAveragingScaleFilter.java ++java/awt/image/Headless/HeadlessBufferedImageFilter.java ++java/awt/image/Headless/HeadlessBufferedImage.java ++java/awt/image/Headless/HeadlessColorModel.java ++java/awt/image/Headless/HeadlessCropImageFilter.java ++java/awt/image/Headless/HeadlessImageFilter.java ++java/awt/image/Headless/HeadlessIndexColorModel.java ++java/awt/image/Headless/HeadlessReplicateScaleFilter.java ++java/awt/image/Headless/HeadlessRGBImageFilter.java ++java/awt/image/ImageIconHang.java ++java/awt/image/ImagingOpsNoExceptionsTest/ImagingOpsNoExceptionsTest.java ++java/awt/image/IncorrectSampleMaskTest.java ++java/awt/image/LookupOp/IntImageReverseTest.java ++java/awt/image/LookupOp/SingleArrayTest.java ++java/awt/image/mlib/MlibOpsTest.java ++java/awt/image/MultiResolutionImage/MultiResolutionImageObserverTest.java ++java/awt/image/MultiResolutionImage/NSImageToMultiResolutionImageTest.java ++java/awt/image/MultiResolutionImageTest.java ++java/awt/image/multiresolution/MultiResolutionToolkitImageTest.java ++java/awt/image/Raster/TestChildRasterOp.java ++java/awt/image/VolatileImage/BitmaskVolatileImage.java ++java/awt/image/VolatileImage/VolatileImageBug.java ++java/awt/im/Headless/HeadlessInputContext.java ++java/awt/im/Headless/HeadlessInputMethodHighlight.java ++java/awt/im/InputContext/bug4625203.java ++java/awt/im/InputContext/InputContextTest.java ++java/awt/im/memoryleak/InputContextMemoryLeakTest.java ++java/awt/Insets/CombinedTestApp1.java ++java/awt/Insets/HeadlessInsets.java ++java/awt/JAWT/JAWT.sh ++java/awt/keyboard/AltPlusNumberKeyCombinationsTest/AltPlusNumberKeyCombinationsTest.java ++java/awt/keyboard/EqualKeyCode/EqualKeyCode.java ++java/awt/KeyboardFocusmanager/ConsumeNextMnemonicKeyTypedTest/ConsumeForModalDialogTest/ConsumeForModalDialogTest.html ++java/awt/KeyboardFocusmanager/ConsumeNextMnemonicKeyTypedTest/ConsumeNextMnemonicKeyTypedTest.html ++java/awt/KeyboardFocusmanager/DefaultPolicyChange/DefaultPolicyChange_AWT.java ++java/awt/KeyboardFocusmanager/DefaultPolicyChange/DefaultPolicyChange_Swing.java ++java/awt/KeyboardFocusmanager/TypeAhead/ButtonActionKeyTest/ButtonActionKeyTest.html ++java/awt/KeyboardFocusmanager/TypeAhead/EnqueueWithDialogButtonTest/EnqueueWithDialogButtonTest.java ++java/awt/KeyboardFocusmanager/TypeAhead/EnqueueWithDialogTest/EnqueueWithDialogTest.java ++java/awt/KeyboardFocusmanager/TypeAhead/FreezeTest/FreezeTest.java ++java/awt/KeyboardFocusmanager/TypeAhead/MenuItemActivatedTest/MenuItemActivatedTest.html ++java/awt/KeyboardFocusmanager/TypeAhead/SubMenuShowTest/SubMenuShowTest.html ++java/awt/KeyboardFocusmanager/TypeAhead/TestDialogTypeAhead.html ++java/awt/LightweightDispatcher/LWDispatcherMemoryLeakTest.java ++java/awt/List/ActionAfterRemove/ActionAfterRemove.java ++java/awt/List/EmptyListEventTest/EmptyListEventTest.java ++java/awt/List/FirstItemRemoveTest/FirstItemRemoveTest.html ++java/awt/List/FocusEmptyListTest/FocusEmptyListTest.html ++java/awt/List/KeyEventsTest/KeyEventsTest.html ++java/awt/List/ListGarbageCollectionTest/AwtListGarbageCollectionTest.java ++java/awt/List/ListPeer/R2303044ListSelection.java ++java/awt/List/NofocusListDblClickTest/NofocusListDblClickTest.java ++java/awt/List/ScrollOutside/ScrollOut.java ++java/awt/List/SetBackgroundTest/SetBackgroundTest.java ++java/awt/List/SingleModeDeselect/SingleModeDeselect.java ++java/awt/MenuBar/8007006/bug8007006.java ++java/awt/MenuBar/DeadlockTest1/DeadlockTest1.java ++java/awt/MenuBar/HeadlessMenuBar.java ++java/awt/MenuBar/MenuBarSetFont/MenuBarSetFont.java ++java/awt/MenuBar/RemoveHelpMenu/RemoveHelpMenu.java ++java/awt/Menu/Headless/HeadlessMenuItem.java ++java/awt/Menu/Headless/HeadlessMenu.java ++java/awt/Menu/Headless/HeadlessMenuShortcut.java ++java/awt/Menu/NullMenuLabelTest/NullMenuLabelTest.java ++java/awt/Menu/OpensWithNoGrab/OpensWithNoGrab.java ++java/awt/Mixing/AWT_Mixing/HierarchyBoundsListenerMixingTest.java ++java/awt/Mixing/AWT_Mixing/JButtonInGlassPaneOverlapping.java ++java/awt/Mixing/AWT_Mixing/JButtonOverlapping.java ++java/awt/Mixing/AWT_Mixing/JColorChooserOverlapping.java ++java/awt/Mixing/AWT_Mixing/JComboBoxOverlapping.java ++java/awt/Mixing/AWT_Mixing/JEditorPaneInGlassPaneOverlapping.java ++java/awt/Mixing/AWT_Mixing/JEditorPaneOverlapping.java ++java/awt/Mixing/AWT_Mixing/JGlassPaneInternalFrameOverlapping.java ++java/awt/Mixing/AWT_Mixing/JGlassPaneMoveOverlapping.java ++java/awt/Mixing/AWT_Mixing/JInternalFrameMoveOverlapping.java ++java/awt/Mixing/AWT_Mixing/JInternalFrameOverlapping.java ++java/awt/Mixing/AWT_Mixing/JLabelInGlassPaneOverlapping.java ++java/awt/Mixing/AWT_Mixing/JLabelOverlapping.java ++java/awt/Mixing/AWT_Mixing/JListInGlassPaneOverlapping.java ++java/awt/Mixing/AWT_Mixing/JListOverlapping.java ++java/awt/Mixing/AWT_Mixing/JMenuBarOverlapping.java ++java/awt/Mixing/AWT_Mixing/JPanelInGlassPaneOverlapping.java ++java/awt/Mixing/AWT_Mixing/JPanelOverlapping.java ++java/awt/Mixing/AWT_Mixing/JPopupMenuOverlapping.java ++java/awt/Mixing/AWT_Mixing/JProgressBarInGlassPaneOverlapping.java ++java/awt/Mixing/AWT_Mixing/JProgressBarOverlapping.java ++java/awt/Mixing/AWT_Mixing/JScrollBarInGlassPaneOverlapping.java ++java/awt/Mixing/AWT_Mixing/JScrollBarOverlapping.java ++java/awt/Mixing/AWT_Mixing/JScrollPaneOverlapping.java ++java/awt/Mixing/AWT_Mixing/JSliderInGlassPaneOverlapping.java ++java/awt/Mixing/AWT_Mixing/JSliderOverlapping.java ++java/awt/Mixing/AWT_Mixing/JSpinnerInGlassPaneOverlapping.java ++java/awt/Mixing/AWT_Mixing/JSpinnerOverlapping.java ++java/awt/Mixing/AWT_Mixing/JSplitPaneOverlapping.java ++java/awt/Mixing/AWT_Mixing/JTableInGlassPaneOverlapping.java ++java/awt/Mixing/AWT_Mixing/JTableOverlapping.java ++java/awt/Mixing/AWT_Mixing/JTextAreaInGlassPaneOverlapping.java ++java/awt/Mixing/AWT_Mixing/JTextAreaOverlapping.java ++java/awt/Mixing/AWT_Mixing/JTextFieldInGlassPaneOverlapping.java ++java/awt/Mixing/AWT_Mixing/JTextFieldOverlapping.java ++java/awt/Mixing/AWT_Mixing/JToggleButtonInGlassPaneOverlapping.java ++java/awt/Mixing/AWT_Mixing/JToggleButtonOverlapping.java ++java/awt/Mixing/AWT_Mixing/MixingFrameResizing.java ++java/awt/Mixing/AWT_Mixing/MixingPanelsResizing.java ++java/awt/Mixing/AWT_Mixing/OpaqueOverlappingChoice.java ++java/awt/Mixing/AWT_Mixing/OpaqueOverlapping.java ++java/awt/Mixing/AWT_Mixing/ViewportOverlapping.java ++java/awt/Mixing/HWDisappear.java ++java/awt/Mixing/JButtonInGlassPane.java ++java/awt/Mixing/LWComboBox.java ++java/awt/Mixing/LWPopupMenu.java ++java/awt/Mixing/MixingInHwPanel.java ++java/awt/Mixing/MixingOnDialog.java ++java/awt/Mixing/MixingOnShrinkingHWButton.java ++java/awt/Mixing/NonOpaqueInternalFrame.java ++java/awt/Mixing/OpaqueTest.java ++java/awt/Mixing/OverlappingButtons.java ++java/awt/Mixing/setComponentZOrder.java ++java/awt/Mixing/Validating.java ++java/awt/Mixing/ValidBounds.java ++java/awt/Modal/FileDialog/FileDialogAppModal1Test.java ++java/awt/Modal/FileDialog/FileDialogAppModal2Test.java ++java/awt/Modal/FileDialog/FileDialogAppModal3Test.java ++java/awt/Modal/FileDialog/FileDialogAppModal4Test.java ++java/awt/Modal/FileDialog/FileDialogAppModal5Test.java ++java/awt/Modal/FileDialog/FileDialogAppModal6Test.java ++java/awt/Modal/FileDialog/FileDialogDocModal1Test.java ++java/awt/Modal/FileDialog/FileDialogDocModal2Test.java ++java/awt/Modal/FileDialog/FileDialogDocModal3Test.java ++java/awt/Modal/FileDialog/FileDialogDocModal4Test.java ++java/awt/Modal/FileDialog/FileDialogDocModal5Test.java ++java/awt/Modal/FileDialog/FileDialogDocModal6Test.java ++java/awt/Modal/FileDialog/FileDialogDocModal7Test.java ++java/awt/Modal/FileDialog/FileDialogModal1Test.java ++java/awt/Modal/FileDialog/FileDialogModal2Test.java ++java/awt/Modal/FileDialog/FileDialogModal3Test.java ++java/awt/Modal/FileDialog/FileDialogModal4Test.java ++java/awt/Modal/FileDialog/FileDialogModal5Test.java ++java/awt/Modal/FileDialog/FileDialogModal6Test.java ++java/awt/Modal/FileDialog/FileDialogNonModal1Test.java ++java/awt/Modal/FileDialog/FileDialogNonModal2Test.java ++java/awt/Modal/FileDialog/FileDialogNonModal3Test.java ++java/awt/Modal/FileDialog/FileDialogNonModal4Test.java ++java/awt/Modal/FileDialog/FileDialogNonModal5Test.java ++java/awt/Modal/FileDialog/FileDialogNonModal6Test.java ++java/awt/Modal/FileDialog/FileDialogNonModal7Test.java ++java/awt/Modal/FileDialog/FileDialogTKModal1Test.java ++java/awt/Modal/FileDialog/FileDialogTKModal2Test.java ++java/awt/Modal/FileDialog/FileDialogTKModal3Test.java ++java/awt/Modal/FileDialog/FileDialogTKModal4Test.java ++java/awt/Modal/FileDialog/FileDialogTKModal5Test.java ++java/awt/Modal/FileDialog/FileDialogTKModal6Test.java ++java/awt/Modal/FileDialog/FileDialogTKModal7Test.java ++java/awt/Modal/LWModalTest/LWModalTest.java ++java/awt/Modal/ModalBlockingTests/BlockingDDAppModalTest.java ++java/awt/Modal/ModalBlockingTests/BlockingDDDocModalTest.java ++java/awt/Modal/ModalBlockingTests/BlockingDDModelessTest.java ++java/awt/Modal/ModalBlockingTests/BlockingDDNonModalTest.java ++java/awt/Modal/ModalBlockingTests/BlockingDDSetModalTest.java ++java/awt/Modal/ModalBlockingTests/BlockingDDToolkitModalTest.java ++java/awt/Modal/ModalBlockingTests/BlockingDFAppModalTest.java ++java/awt/Modal/ModalBlockingTests/BlockingDFSetModalTest.java ++java/awt/Modal/ModalBlockingTests/BlockingDFToolkitModalTest.java ++java/awt/Modal/ModalBlockingTests/BlockingDFWModeless1Test.java ++java/awt/Modal/ModalBlockingTests/BlockingDFWModeless2Test.java ++java/awt/Modal/ModalBlockingTests/BlockingDFWNonModal1Test.java ++java/awt/Modal/ModalBlockingTests/BlockingDFWNonModal2Test.java ++java/awt/Modal/ModalBlockingTests/BlockingDocModalTest.java ++java/awt/Modal/ModalBlockingTests/BlockingFDAppModalTest.java ++java/awt/Modal/ModalBlockingTests/BlockingFDDocModalTest.java ++java/awt/Modal/ModalBlockingTests/BlockingFDModelessTest.java ++java/awt/Modal/ModalBlockingTests/BlockingFDNonModalTest.java ++java/awt/Modal/ModalBlockingTests/BlockingFDSetModalTest.java ++java/awt/Modal/ModalBlockingTests/BlockingFDToolkitModalTest.java ++java/awt/Modal/ModalBlockingTests/BlockingFDWDocModal1Test.java ++java/awt/Modal/ModalBlockingTests/BlockingFDWDocModal2Test.java ++java/awt/Modal/ModalBlockingTests/BlockingFDWDocModal3Test.java ++java/awt/Modal/ModalBlockingTests/BlockingFDWDocModal4Test.java ++java/awt/Modal/ModalBlockingTests/BlockingFDWModeless1Test.java ++java/awt/Modal/ModalBlockingTests/BlockingFDWModeless2Test.java ++java/awt/Modal/ModalBlockingTests/BlockingFDWModeless3Test.java ++java/awt/Modal/ModalBlockingTests/BlockingFDWModeless4Test.java ++java/awt/Modal/ModalBlockingTests/BlockingFDWNonModal1Test.java ++java/awt/Modal/ModalBlockingTests/BlockingFDWNonModal2Test.java ++java/awt/Modal/ModalBlockingTests/BlockingFDWNonModal3Test.java ++java/awt/Modal/ModalBlockingTests/BlockingFDWNonModal4Test.java ++java/awt/Modal/ModalBlockingTests/BlockingWindowsAppModal1Test.java ++java/awt/Modal/ModalBlockingTests/BlockingWindowsAppModal2Test.java ++java/awt/Modal/ModalBlockingTests/BlockingWindowsAppModal3Test.java ++java/awt/Modal/ModalBlockingTests/BlockingWindowsAppModal4Test.java ++java/awt/Modal/ModalBlockingTests/BlockingWindowsAppModal5Test.java ++java/awt/Modal/ModalBlockingTests/BlockingWindowsAppModal6Test.java ++java/awt/Modal/ModalBlockingTests/BlockingWindowsDocModal1Test.java ++java/awt/Modal/ModalBlockingTests/BlockingWindowsDocModal2Test.java ++java/awt/Modal/ModalBlockingTests/BlockingWindowsSetModal1Test.java ++java/awt/Modal/ModalBlockingTests/BlockingWindowsSetModal2Test.java ++java/awt/Modal/ModalBlockingTests/BlockingWindowsSetModal3Test.java ++java/awt/Modal/ModalBlockingTests/BlockingWindowsSetModal4Test.java ++java/awt/Modal/ModalBlockingTests/BlockingWindowsSetModal5Test.java ++java/awt/Modal/ModalBlockingTests/BlockingWindowsSetModal6Test.java ++java/awt/Modal/ModalBlockingTests/BlockingWindowsToolkitModal1Test.java ++java/awt/Modal/ModalBlockingTests/BlockingWindowsToolkitModal2Test.java ++java/awt/Modal/ModalBlockingTests/BlockingWindowsToolkitModal3Test.java ++java/awt/Modal/ModalBlockingTests/BlockingWindowsToolkitModal4Test.java ++java/awt/Modal/ModalBlockingTests/BlockingWindowsToolkitModal5Test.java ++java/awt/Modal/ModalBlockingTests/BlockingWindowsToolkitModal6Test.java ++java/awt/Modal/ModalBlockingTests/UnblockedDialogAppModalTest.java ++java/awt/Modal/ModalBlockingTests/UnblockedDialogDocModalTest.java ++java/awt/Modal/ModalBlockingTests/UnblockedDialogModelessTest.java ++java/awt/Modal/ModalBlockingTests/UnblockedDialogNonModalTest.java ++java/awt/Modal/ModalBlockingTests/UnblockedDialogSetModalTest.java ++java/awt/Modal/ModalBlockingTests/UnblockedDialogToolkitModalTest.java ++java/awt/Modal/ModalDialogOrderingTest/ModalDialogOrderingTest.java ++java/awt/Modal/ModalExclusionTests/ApplicationExcludeDialogFileTest.java ++java/awt/Modal/ModalExclusionTests/ApplicationExcludeDialogPageSetupTest.java ++java/awt/Modal/ModalExclusionTests/ApplicationExcludeDialogPrintSetupTest.java ++java/awt/Modal/ModalExclusionTests/ApplicationExcludeFrameFileTest.java ++java/awt/Modal/ModalExclusionTests/ApplicationExcludeFramePageSetupTest.java ++java/awt/Modal/ModalExclusionTests/ApplicationExcludeFramePrintSetupTest.java ++java/awt/Modal/ModalExclusionTests/ToolkitExcludeDialogFileTest.java ++java/awt/Modal/ModalExclusionTests/ToolkitExcludeDialogPageSetupTest.java ++java/awt/Modal/ModalExclusionTests/ToolkitExcludeDialogPrintSetupTest.java ++java/awt/Modal/ModalExclusionTests/ToolkitExcludeFrameFileTest.java ++java/awt/Modal/ModalExclusionTests/ToolkitExcludeFramePageSetupTest.java ++java/awt/Modal/ModalExclusionTests/ToolkitExcludeFramePrintSetupTest.java ++java/awt/Modal/ModalFocusTransferTests/FocusTransferDialogsAppModalTest.java ++java/awt/Modal/ModalFocusTransferTests/FocusTransferDialogsDocModalTest.java ++java/awt/Modal/ModalFocusTransferTests/FocusTransferDialogsModelessTest.java ++java/awt/Modal/ModalFocusTransferTests/FocusTransferDialogsNonModalTest.java ++java/awt/Modal/ModalFocusTransferTests/FocusTransferDWFAppModalTest.java ++java/awt/Modal/ModalFocusTransferTests/FocusTransferDWFDocModalTest.java ++java/awt/Modal/ModalFocusTransferTests/FocusTransferDWFModelessTest.java ++java/awt/Modal/ModalFocusTransferTests/FocusTransferDWFNonModalTest.java ++java/awt/Modal/ModalFocusTransferTests/FocusTransferFDWAppModalTest.java ++java/awt/Modal/ModalFocusTransferTests/FocusTransferFDWDocModalTest.java ++java/awt/Modal/ModalFocusTransferTests/FocusTransferFDWModelessTest.java ++java/awt/Modal/ModalFocusTransferTests/FocusTransferFDWNonModalTest.java ++java/awt/Modal/ModalFocusTransferTests/FocusTransferFWDAppModal1Test.java ++java/awt/Modal/ModalFocusTransferTests/FocusTransferFWDAppModal2Test.java ++java/awt/Modal/ModalFocusTransferTests/FocusTransferFWDAppModal3Test.java ++java/awt/Modal/ModalFocusTransferTests/FocusTransferFWDAppModal4Test.java ++java/awt/Modal/ModalFocusTransferTests/FocusTransferFWDDocModal1Test.java ++java/awt/Modal/ModalFocusTransferTests/FocusTransferFWDDocModal2Test.java ++java/awt/Modal/ModalFocusTransferTests/FocusTransferFWDDocModal3Test.java ++java/awt/Modal/ModalFocusTransferTests/FocusTransferFWDDocModal4Test.java ++java/awt/Modal/ModalFocusTransferTests/FocusTransferFWDModeless1Test.java ++java/awt/Modal/ModalFocusTransferTests/FocusTransferFWDModeless2Test.java ++java/awt/Modal/ModalFocusTransferTests/FocusTransferFWDModeless3Test.java ++java/awt/Modal/ModalFocusTransferTests/FocusTransferFWDModeless4Test.java ++java/awt/Modal/ModalFocusTransferTests/FocusTransferFWDNonModal1Test.java ++java/awt/Modal/ModalFocusTransferTests/FocusTransferFWDNonModal2Test.java ++java/awt/Modal/ModalFocusTransferTests/FocusTransferFWDNonModal3Test.java ++java/awt/Modal/ModalFocusTransferTests/FocusTransferFWDNonModal4Test.java ++java/awt/Modal/ModalFocusTransferTests/FocusTransferWDFAppModal1Test.java ++java/awt/Modal/ModalFocusTransferTests/FocusTransferWDFAppModal2Test.java ++java/awt/Modal/ModalFocusTransferTests/FocusTransferWDFAppModal3Test.java ++java/awt/Modal/ModalFocusTransferTests/FocusTransferWDFDocModal1Test.java ++java/awt/Modal/ModalFocusTransferTests/FocusTransferWDFDocModal2Test.java ++java/awt/Modal/ModalFocusTransferTests/FocusTransferWDFDocModal3Test.java ++java/awt/Modal/ModalFocusTransferTests/FocusTransferWDFModeless1Test.java ++java/awt/Modal/ModalFocusTransferTests/FocusTransferWDFModeless2Test.java ++java/awt/Modal/ModalFocusTransferTests/FocusTransferWDFModeless3Test.java ++java/awt/Modal/ModalFocusTransferTests/FocusTransferWDFNonModal1Test.java ++java/awt/Modal/ModalFocusTransferTests/FocusTransferWDFNonModal2Test.java ++java/awt/Modal/ModalFocusTransferTests/FocusTransferWDFNonModal3Test.java ++java/awt/Modal/ModalInternalFrameTest/ModalInternalFrameTest.java ++java/awt/Modal/ModalitySettingsTest/ModalitySettingsTest.java ++java/awt/Modal/MultipleDialogs/MultipleDialogs1Test.java ++java/awt/Modal/MultipleDialogs/MultipleDialogs2Test.java ++java/awt/Modal/MultipleDialogs/MultipleDialogs3Test.java ++java/awt/Modal/MultipleDialogs/MultipleDialogs4Test.java ++java/awt/Modal/MultipleDialogs/MultipleDialogs5Test.java ++java/awt/Modal/NpeOnClose/NpeOnCloseTest.java ++java/awt/Modal/NullModalityDialogTest/NullModalityDialogTest.java ++java/awt/Modal/OnTop/OnTopAppModal1Test.java ++java/awt/Modal/OnTop/OnTopAppModal2Test.java ++java/awt/Modal/OnTop/OnTopAppModal3Test.java ++java/awt/Modal/OnTop/OnTopAppModal4Test.java ++java/awt/Modal/OnTop/OnTopAppModal5Test.java ++java/awt/Modal/OnTop/OnTopAppModal6Test.java ++java/awt/Modal/OnTop/OnTopDocModal1Test.java ++java/awt/Modal/OnTop/OnTopDocModal2Test.java ++java/awt/Modal/OnTop/OnTopDocModal3Test.java ++java/awt/Modal/OnTop/OnTopDocModal4Test.java ++java/awt/Modal/OnTop/OnTopDocModal5Test.java ++java/awt/Modal/OnTop/OnTopDocModal6Test.java ++java/awt/Modal/OnTop/OnTopModal1Test.java ++java/awt/Modal/OnTop/OnTopModal2Test.java ++java/awt/Modal/OnTop/OnTopModal3Test.java ++java/awt/Modal/OnTop/OnTopModal4Test.java ++java/awt/Modal/OnTop/OnTopModal5Test.java ++java/awt/Modal/OnTop/OnTopModal6Test.java ++java/awt/Modal/OnTop/OnTopModeless1Test.java ++java/awt/Modal/OnTop/OnTopModeless2Test.java ++java/awt/Modal/OnTop/OnTopModeless3Test.java ++java/awt/Modal/OnTop/OnTopModeless4Test.java ++java/awt/Modal/OnTop/OnTopModeless5Test.java ++java/awt/Modal/OnTop/OnTopModeless6Test.java ++java/awt/Modal/OnTop/OnTopTKModal1Test.java ++java/awt/Modal/OnTop/OnTopTKModal2Test.java ++java/awt/Modal/OnTop/OnTopTKModal3Test.java ++java/awt/Modal/OnTop/OnTopTKModal4Test.java ++java/awt/Modal/OnTop/OnTopTKModal5Test.java ++java/awt/Modal/OnTop/OnTopTKModal6Test.java ++java/awt/Modal/SupportedTest/SupportedTest.java ++java/awt/Modal/ToBack/ToBackAppModal1Test.java ++java/awt/Modal/ToBack/ToBackAppModal2Test.java ++java/awt/Modal/ToBack/ToBackAppModal3Test.java ++java/awt/Modal/ToBack/ToBackAppModal4Test.java ++java/awt/Modal/ToBack/ToBackAppModal5Test.java ++java/awt/Modal/ToBack/ToBackAppModal6Test.java ++java/awt/Modal/ToBack/ToBackDocModal1Test.java ++java/awt/Modal/ToBack/ToBackDocModal2Test.java ++java/awt/Modal/ToBack/ToBackDocModal3Test.java ++java/awt/Modal/ToBack/ToBackDocModal4Test.java ++java/awt/Modal/ToBack/ToBackDocModal5Test.java ++java/awt/Modal/ToBack/ToBackDocModal6Test.java ++java/awt/Modal/ToBack/ToBackModal1Test.java ++java/awt/Modal/ToBack/ToBackModal2Test.java ++java/awt/Modal/ToBack/ToBackModal3Test.java ++java/awt/Modal/ToBack/ToBackModal4Test.java ++java/awt/Modal/ToBack/ToBackModal5Test.java ++java/awt/Modal/ToBack/ToBackModal6Test.java ++java/awt/Modal/ToBack/ToBackModeless1Test.java ++java/awt/Modal/ToBack/ToBackModeless2Test.java ++java/awt/Modal/ToBack/ToBackModeless3Test.java ++java/awt/Modal/ToBack/ToBackModeless4Test.java ++java/awt/Modal/ToBack/ToBackModeless5Test.java ++java/awt/Modal/ToBack/ToBackModeless6Test.java ++java/awt/Modal/ToBack/ToBackNonModal1Test.java ++java/awt/Modal/ToBack/ToBackNonModal2Test.java ++java/awt/Modal/ToBack/ToBackNonModal3Test.java ++java/awt/Modal/ToBack/ToBackNonModal4Test.java ++java/awt/Modal/ToBack/ToBackNonModal5Test.java ++java/awt/Modal/ToBack/ToBackNonModal6Test.java ++java/awt/Modal/ToBack/ToBackTKModal1Test.java ++java/awt/Modal/ToBack/ToBackTKModal2Test.java ++java/awt/Modal/ToBack/ToBackTKModal3Test.java ++java/awt/Modal/ToBack/ToBackTKModal4Test.java ++java/awt/Modal/ToBack/ToBackTKModal5Test.java ++java/awt/Modal/ToBack/ToBackTKModal6Test.java ++java/awt/Modal/ToFront/DialogToFrontAppModalTest.java ++java/awt/Modal/ToFront/DialogToFrontDocModalTest.java ++java/awt/Modal/ToFront/DialogToFrontModalTest.java ++java/awt/Modal/ToFront/DialogToFrontModeless1Test.java ++java/awt/Modal/ToFront/DialogToFrontNonModalTest.java ++java/awt/Modal/ToFront/DialogToFrontTKModalTest.java ++java/awt/Modal/ToFront/FrameToFrontAppModal1Test.java ++java/awt/Modal/ToFront/FrameToFrontAppModal2Test.java ++java/awt/Modal/ToFront/FrameToFrontAppModal3Test.java ++java/awt/Modal/ToFront/FrameToFrontAppModal4Test.java ++java/awt/Modal/ToFront/FrameToFrontAppModal5Test.java ++java/awt/Modal/ToFront/FrameToFrontDocModal1Test.java ++java/awt/Modal/ToFront/FrameToFrontDocModal2Test.java ++java/awt/Modal/ToFront/FrameToFrontModal1Test.java ++java/awt/Modal/ToFront/FrameToFrontModal2Test.java ++java/awt/Modal/ToFront/FrameToFrontModal3Test.java ++java/awt/Modal/ToFront/FrameToFrontModal4Test.java ++java/awt/Modal/ToFront/FrameToFrontModal5Test.java ++java/awt/Modal/ToFront/FrameToFrontModeless1Test.java ++java/awt/Modal/ToFront/FrameToFrontNonModalTest.java ++java/awt/Modal/ToFront/FrameToFrontTKModal1Test.java ++java/awt/Modal/ToFront/FrameToFrontTKModal2Test.java ++java/awt/Modal/ToFront/FrameToFrontTKModal3Test.java ++java/awt/Modal/ToFront/FrameToFrontTKModal4Test.java ++java/awt/Modal/ToFront/FrameToFrontTKModal5Test.java ++java/awt/MouseAdapter/MouseAdapterUnitTest/MouseAdapterUnitTest.java ++java/awt/Mouse/EnterExitEvents/DragWindowOutOfFrameTest.java ++java/awt/Mouse/EnterExitEvents/DragWindowTest.java ++java/awt/Mouse/EnterExitEvents/ModalDialogEnterExitEventsTest.java ++java/awt/Mouse/EnterExitEvents/ResizingFrameTest.java ++java/awt/Mouse/ExtraMouseClick/ExtraMouseClick.html ++java/awt/Mouse/GetMousePositionTest/GetMousePositionWithOverlay.java ++java/awt/Mouse/GetMousePositionTest/GetMousePositionWithPopup.java ++java/awt/MouseInfo/GetPointerInfoTest.java ++java/awt/MouseInfo/JContainerMousePositionTest.java ++java/awt/MouseInfo/MultiscreenPointerInfo.java ++java/awt/Mouse/MaximizedFrameTest/MaximizedFrameTest.java ++java/awt/Mouse/MouseComboBoxTest/MouseComboBoxTest.java ++java/awt/Mouse/MouseDragEvent/MouseDraggedTest.java ++java/awt/Mouse/MouseModifiersUnitTest/ExtraButtonDrag.java ++java/awt/Mouse/MouseModifiersUnitTest/MouseModifiersUnitTest_Extra.java ++java/awt/Mouse/MouseModifiersUnitTest/MouseModifiersUnitTest_Standard.java ++java/awt/Mouse/RemovedComponentMouseListener/RemovedComponentMouseListener.java ++java/awt/Mouse/TitleBarDoubleClick/TitleBarDoubleClick.html ++java/awt/Multiscreen/LocationRelativeToTest/LocationRelativeToTest.java ++java/awt/Multiscreen/MouseEventTest/MouseEventTest.java ++java/awt/Multiscreen/MultiScreenInsetsTest/MultiScreenInsetsTest.java ++java/awt/Multiscreen/MultiScreenLocationTest/MultiScreenLocationTest.java ++java/awt/Multiscreen/TranslucencyThrowsExceptionWhenFullScreen/TranslucencyThrowsExceptionWhenFullScreen.java ++java/awt/Multiscreen/UpdateGCTest/UpdateGCTest.java ++java/awt/Multiscreen/WindowGCChangeTest/WindowGCChangeTest.html ++java/awt/Multiscreen/WPanelPeerPerf/WPanelPeerPerf.java ++java/awt/Paint/bug8024864.java ++java/awt/Paint/ButtonRepaint.java ++java/awt/Paint/CheckboxRepaint.java ++java/awt/Paint/ComponentIsNotDrawnAfterRemoveAddTest/ComponentIsNotDrawnAfterRemoveAddTest.java ++java/awt/Paint/ExposeOnEDT.java ++java/awt/Paint/LabelRepaint.java ++java/awt/Paint/ListRepaint.java ++java/awt/Paint/PaintNativeOnUpdate.java ++java/awt/Paint/PgramUserBoundsTest.java ++java/awt/Paint/RepaintOnAWTShutdown.java ++java/awt/print/Headless/HeadlessBook.java ++java/awt/print/Headless/HeadlessPageFormat.java ++java/awt/print/Headless/HeadlessPaper.java ++java/awt/print/Headless/HeadlessPrinterJob.java ++java/awt/PrintJob/MultipleEnd/MultipleEnd.java ++java/awt/PrintJob/PrintArcTest/PrintArcTest.java ++java/awt/PrintJob/QuoteAndBackslashTest/QuoteAndBackslashTest.java ++java/awt/PrintJob/RoundedRectTest/RoundedRectTest.java ++java/awt/PrintJob/Security/SecurityDialogTest.java ++java/awt/print/PageFormat/NullPaper.java ++java/awt/print/PageFormat/PageFormatFromAttributes.java ++java/awt/print/PageFormat/ReverseLandscapeTest.java ++java/awt/print/PaintSetEnabledDeadlock/PaintSetEnabledDeadlock.java ++java/awt/print/PrinterJob/CheckAccess.java ++java/awt/print/PrinterJob/CheckPrivilege.java ++java/awt/print/PrinterJob/CustomPrintService/SetPrintServiceTest.java ++java/awt/print/PrinterJob/DeviceScale.java ++java/awt/print/PrinterJob/EmptyFill.java ++java/awt/print/PrinterJob/ExceptionTest.java ++java/awt/print/PrinterJob/GetMediasTest.java ++java/awt/print/PrinterJob/GlyphPositions.java ++java/awt/print/PrinterJob/HeadlessPrintingTest.java ++java/awt/print/PrinterJob/ImagePrinting/NullClipARGB.java ++java/awt/print/PrinterJob/NullGetName.java ++java/awt/print/PrinterJob/PaintText.java ++java/awt/print/PrinterJob/PrintCrashTest.java ++java/awt/print/PrinterJob/PrintTextPane.java ++java/awt/print/PrinterJob/PrintToDir.java ++java/awt/print/PrinterJob/PrtException.java ++java/awt/print/PrinterJob/PSQuestionMark.java ++java/awt/print/PrinterJob/PSWindingRule.java ++java/awt/print/PrinterJob/RemoveListener.java ++java/awt/print/PrinterJob/SameService.java ++java/awt/print/PrintServicesSecurityManager.java ++java/awt/Robot/AcceptExtraMouseButtons/AcceptExtraMouseButtons.java ++java/awt/Robot/CtorTest/CtorTest.java ++java/awt/Robot/ModifierRobotKey/ModifierRobotKeyTest.java ++java/awt/Robot/RobotExtraButton/RobotExtraButton.java ++java/awt/Robot/WaitForIdleSyncroizedOnString/WaitForIdleSyncroizedOnString.java ++java/awt/ScrollPane/bug8077409Test.java ++java/awt/ScrollPane/ScrollPanePreferredSize/ScrollPanePreferredSize.java ++java/awt/ScrollPane/ScrollPaneValidateTest.java ++java/awt/security/Permissions.java ++java/awt/SplashScreen/MultiResolutionSplash/MultiResolutionSplashTest.java ++java/awt/TextArea/DisposeTest/TestDispose.java ++java/awt/TextArea/Mixing/TextAreaMixing.java ++java/awt/TextArea/ScrollbarIntersectionTest/ScrollbarIntersectionTest.java ++java/awt/TextArea/TextAreaCaretVisibilityTest/bug7129742.java ++java/awt/TextArea/TextAreaEditing/TextAreaEditing.java ++java/awt/TextArea/TextAreaTwicePack/TextAreaTwicePack.java ++java/awt/TextArea/UsingWithMouse/SelectionAutoscrollTest.html ++java/awt/TextField/DisposeTest/TestDispose.java ++java/awt/TextField/SelectionInvisibleTest/SelectionInvisibleTest.java ++java/awt/Toolkit/AutoShutdown/EventQueuePush/EventQueuePushAutoshutdown.sh ++java/awt/Toolkit/AutoShutdown/ShowExitTest/ShowExitTest.sh ++java/awt/Toolkit/BadDisplayTest/BadDisplayTest.java ++java/awt/Toolkit/DesktopProperties/rfe4758438.java ++java/awt/Toolkit/DisplayChangesException/DisplayChangesException.java ++java/awt/Toolkit/DynamicLayout/bug7172833.java ++java/awt/Toolkit/GetImage/bug8078165.java ++java/awt/Toolkit/Headless/AWTEventListener/AWTListener.java ++java/awt/Toolkit/Headless/ExceptionContract/ExceptionContract.java ++java/awt/Toolkit/Headless/GetPrintJob/GetPrintJobHeadless.java ++java/awt/Toolkit/Headless/GetPrintJob/GetPrintJob.java ++java/awt/Toolkit/Headless/HeadlessToolkit.java ++java/awt/Toolkit/HeadlessTray/HeadlessTray.java ++java/awt/Toolkit/Headless/WrappedToolkitTest/WrappedToolkitTest.sh ++java/awt/Toolkit/LoadAWTCrashTest/LoadAWTCrashTest.java ++java/awt/Toolkit/LockingKeyStateTest/LockingKeyStateTest.java ++java/awt/Toolkit/RealSync/RealSyncOnEDT.java ++java/awt/Toolkit/RealSync/Test.java ++java/awt/Toolkit/ScreenInsetsTest/ScreenInsetsTest.java ++java/awt/Toolkit/SecurityTest/SecurityTest2.java ++java/awt/Toolkit/ToolkitPropertyTest/bug7129133.java ++java/awt/Toolkit/ToolkitPropertyTest/SystemPropTest_1.java ++java/awt/Toolkit/ToolkitPropertyTest/SystemPropTest_2.java ++java/awt/Toolkit/ToolkitPropertyTest/SystemPropTest_3.java ++java/awt/Toolkit/ToolkitPropertyTest/SystemPropTest_4.java ++java/awt/Toolkit/ToolkitPropertyTest/SystemPropTest_5.java ++java/awt/Toolkit/ToolkitPropertyTest/ToolkitPropertyTest_Disable.java ++java/awt/Toolkit/ToolkitPropertyTest/ToolkitPropertyTest_Enable.java ++java/awt/TrayIcon/8072769/bug8072769.java ++java/awt/TrayIcon/ActionCommand/ActionCommand.java ++java/awt/TrayIcon/ActionEventMask/ActionEventMask.java ++java/awt/TrayIcon/CtorTest/CtorTest.java ++java/awt/TrayIcon/GetTrayIconsTest/GetTrayIcons.java ++java/awt/TrayIcon/InterJVMTest/InterJVM.java ++java/awt/TrayIcon/ModalityTest/ModalityTest.java ++java/awt/TrayIcon/MouseEventMask/MouseEventMaskTest.java ++java/awt/TrayIcon/MouseMovedTest/MouseMovedTest.java ++java/awt/TrayIcon/PopupMenuLeakTest/PopupMenuLeakTest.java ++java/awt/TrayIcon/PropertyChangeListenerTest.java ++java/awt/TrayIcon/SecurityCheck/FunctionalityCheck/FunctionalityCheck.java ++java/awt/TrayIcon/SecurityCheck/NoPermissionTest/NoPermissionTest.java ++java/awt/TrayIcon/SecurityCheck/PermissionTest/PermissionTest.java ++java/awt/TrayIcon/SystemTrayInstance/SystemTrayInstanceTest.java ++java/awt/TrayIcon/TrayIconAddTest/TrayIconAddTest.java ++java/awt/TrayIcon/TrayIconEventModifiers/TrayIconEventModifiersTest.java ++java/awt/TrayIcon/TrayIconEvents/TrayIconEventsTest.java ++java/awt/TrayIcon/TrayIconMethodsTest/TrayIconMethodsTest.java ++java/awt/TrayIcon/TrayIconMouseTest/TrayIconMouseTest.java ++java/awt/TrayIcon/TrayIconPopup/TrayIconPopupTest.java ++java/awt/TrayIcon/TrayIconRemoveTest/TrayIconRemoveTest.java ++java/awt/TrayIcon/TrayIconSizeTest/TrayIconSizeTest.java ++java/awt/Window/8027025/Test8027025.java ++java/awt/Window/AlwaysOnTop/AlwaysOnTopEvenOfWindow.java ++java/awt/Window/AlwaysOnTop/AlwaysOnTopFieldTest.java ++java/awt/Window/AlwaysOnTop/AutoTestOnTop.java ++java/awt/Window/AlwaysOnTop/SyncAlwaysOnTopFieldTest.java ++java/awt/Window/AlwaysOnTop/TestAlwaysOnTopBeforeShow.java ++java/awt/Window/BackgroundIsNotUpdated/BackgroundIsNotUpdated.java ++java/awt/Window/GetWindowsTest/GetWindowsTest.java ++java/awt/Window/Grab/GrabTest.java ++java/awt/Window/GrabSequence/GrabSequence.java ++java/awt/Window/HandleWindowDestroyTest/HandleWindowDestroyTest.html ++java/awt/Window/LocationByPlatform/LocationByPlatformTest.java ++java/awt/Window/MaximizeOffscreen/MaximizeOffscreenTest.java ++java/awt/Window/OwnedWindowsLeak/OwnedWindowsLeak.java ++java/awt/Window/OwnedWindowsSerialization/OwnedWindowsSerialization.java ++java/awt/Window/PropertyChangeListenerLockSerialization/PropertyChangeListenerLockSerialization.java ++java/awt/Window/SetBackgroundNPE/SetBackgroundNPE.java ++java/awt/Window/setLocRelativeTo/SetLocationRelativeToTest.java ++java/awt/Window/ShapedAndTranslucentWindows/FocusAWTTest.java ++java/awt/Window/ShapedAndTranslucentWindows/SetShapeAndClick.java ++java/awt/Window/ShapedAndTranslucentWindows/SetShapeDynamicallyAndClick.java ++java/awt/Window/ShapedAndTranslucentWindows/SetShape.java ++java/awt/Window/ShapedAndTranslucentWindows/ShapedByAPI.java ++java/awt/Window/ShapedAndTranslucentWindows/Shaped.java ++java/awt/Window/ShapedAndTranslucentWindows/ShapedTranslucent.java ++java/awt/Window/ShapedAndTranslucentWindows/ShapedTranslucentWindowClick.java ++java/awt/Window/ShapedAndTranslucentWindows/StaticallyShaped.java ++java/awt/Window/ShapedAndTranslucentWindows/TranslucentChoice.java ++java/awt/Window/ShapedAndTranslucentWindows/Translucent.java ++java/awt/Window/ShapedAndTranslucentWindows/TranslucentWindowClick.java ++java/awt/Window/TopLevelLocation/TopLevelLocation.java ++java/awt/Window/WindowClosedEvents/WindowClosedEventOnDispose.java ++java/awt/Window/WindowDeadlockTest/WindowDeadlockTest.java ++java/awt/Window/WindowGCInFullScreen/WindowGCInFullScreen.java ++java/awt/Window/WindowJumpingTest/WindowJumpingTest.java ++java/awt/Window/WindowsLeak/WindowsLeak.java ++java/awt/Window/WindowType/WindowType.java ++java/awt/WMSpecificTests/Metacity/FullscreenDialogModality.java ++java/awt/WMSpecificTests/Mutter/MutterMaximizeTest.java ++java/awt/xembed/server/RunTestXEmbed.java ++java/beans/XMLEncoder/java_awt_AWTKeyStroke.java ++java/beans/XMLEncoder/java_awt_BasicStroke.java ++java/beans/XMLEncoder/java_awt_BorderLayout.java ++java/beans/XMLEncoder/java_awt_CardLayout.java ++java/beans/XMLEncoder/java_awt_Color.java ++java/beans/XMLEncoder/java_awt_Component.java ++java/beans/XMLEncoder/java_awt_Cursor.java ++java/beans/XMLEncoder/java_awt_Dimension.java ++java/beans/XMLEncoder/java_awt_Font.java ++java/beans/XMLEncoder/java_awt_geom_AffineTransform.java ++java/beans/XMLEncoder/java_awt_GradientPaint.java ++java/beans/XMLEncoder/java_awt_GridBagConstraints.java ++java/beans/XMLEncoder/java_awt_GridBagLayout.java ++java/beans/XMLEncoder/java_awt_Insets.java ++java/beans/XMLEncoder/java_awt_LinearGradientPaint.java ++java/beans/XMLEncoder/java_awt_MenuShortcut.java ++java/beans/XMLEncoder/java_awt_Point.java ++java/beans/XMLEncoder/java_awt_RadialGradientPaint.java ++java/beans/XMLEncoder/java_awt_Rectangle.java ++java/beans/XMLEncoder/java_awt_ScrollPane.java ++java/beans/XMLEncoder/javax_swing_border_BevelBorder.java ++java/beans/XMLEncoder/javax_swing_border_CompoundBorder.java ++java/beans/XMLEncoder/javax_swing_border_EmptyBorder.java ++java/beans/XMLEncoder/javax_swing_border_EtchedBorder.java ++java/beans/XMLEncoder/javax_swing_border_LineBorder.java ++java/beans/XMLEncoder/javax_swing_border_MatteBorder.java ++java/beans/XMLEncoder/javax_swing_border_SoftBevelBorder.java ++java/beans/XMLEncoder/javax_swing_border_StrokeBorder.java ++java/beans/XMLEncoder/javax_swing_border_TitledBorder.java ++java/beans/XMLEncoder/javax_swing_Box_Filler.java ++java/beans/XMLEncoder/javax_swing_Box.java ++java/beans/XMLEncoder/javax_swing_BoxLayout.java ++java/beans/XMLEncoder/javax_swing_DefaultCellEditor.java ++java/beans/XMLEncoder/javax_swing_JButton.java ++java/beans/XMLEncoder/javax_swing_JLayeredPane.java ++java/beans/XMLEncoder/javax_swing_JSplitPane.java ++java/beans/XMLEncoder/javax_swing_JTree.java ++java/beans/XMLEncoder/javax_swing_KeyStroke.java ++java/beans/XMLEncoder/javax_swing_OverlayLayout.java ++java/beans/XMLEncoder/javax_swing_plaf_BorderUIResource_BevelBorderUIResource.java ++java/beans/XMLEncoder/javax_swing_plaf_BorderUIResource_CompoundBorderUIResource.java ++java/beans/XMLEncoder/javax_swing_plaf_BorderUIResource_EmptyBorderUIResource.java ++java/beans/XMLEncoder/javax_swing_plaf_BorderUIResource_EtchedBorderUIResource.java ++java/beans/XMLEncoder/javax_swing_plaf_BorderUIResource_LineBorderUIResource.java ++java/beans/XMLEncoder/javax_swing_plaf_BorderUIResource_MatteBorderUIResource.java ++java/beans/XMLEncoder/javax_swing_plaf_BorderUIResource_TitledBorderUIResource.java ++java/beans/XMLEncoder/javax_swing_plaf_ColorUIResource.java ++java/beans/XMLEncoder/javax_swing_plaf_FontUIResource.java ++java/beans/XMLEncoder/javax_swing_tree_DefaultTreeModel.java ++java/beans/XMLEncoder/javax_swing_tree_TreePath.java ++java/beans/XMLEncoder/sun_swing_PrintColorUIResource.java ++java/lang/SecurityManager/NoAWT.java ++javax/sound/midi/Devices/ClosedReceiver.java ++javax/sound/midi/Devices/InitializationHang.java ++javax/sound/midi/Devices/MidiDeviceGetReceivers.java ++javax/sound/midi/Devices/MidiIO.java ++javax/sound/midi/Devices/MidiOutGetMicrosecondPositionBug.java ++javax/sound/midi/Devices/OpenClose.java ++javax/sound/midi/Devices/ReceiverTransmitterAvailable.java ++javax/sound/midi/Devices/Reopen.java ++javax/sound/midi/File/SMFCp037.java ++javax/sound/midi/File/SMFParserBreak.java ++javax/sound/midi/File/SMPTESequence.java ++javax/sound/midi/File/WriteRealTimeMessageNPE.java ++javax/sound/midi/Gervill/AudioFloatConverter/GetFormat.java ++javax/sound/midi/Gervill/AudioFloatConverter/ToFloatArray.java ++javax/sound/midi/Gervill/AudioFloatFormatConverter/SkipTest.java ++javax/sound/midi/Gervill/AudioFloatInputStream/Available.java ++javax/sound/midi/Gervill/AudioFloatInputStream/Close.java ++javax/sound/midi/Gervill/AudioFloatInputStream/GetFormat.java ++javax/sound/midi/Gervill/AudioFloatInputStream/GetFrameLength.java ++javax/sound/midi/Gervill/AudioFloatInputStream/MarkSupported.java ++javax/sound/midi/Gervill/AudioFloatInputStream/ReadFloatArrayIntInt.java ++javax/sound/midi/Gervill/AudioFloatInputStream/ReadFloatArray.java ++javax/sound/midi/Gervill/AudioFloatInputStream/Read.java ++javax/sound/midi/Gervill/AudioFloatInputStream/Reset.java ++javax/sound/midi/Gervill/AudioFloatInputStream/Skip.java ++javax/sound/midi/Gervill/DLSSoundbankReader/TestGetSoundbankFile.java ++javax/sound/midi/Gervill/DLSSoundbankReader/TestGetSoundbankInputStream2.java ++javax/sound/midi/Gervill/DLSSoundbankReader/TestGetSoundbankInputStream.java ++javax/sound/midi/Gervill/DLSSoundbankReader/TestGetSoundbankUrl.java ++javax/sound/midi/Gervill/EmergencySoundbank/TestCreateSoundbank.java ++javax/sound/midi/Gervill/ModelByteBuffer/GetInputStream.java ++javax/sound/midi/Gervill/ModelByteBuffer/GetRoot.java ++javax/sound/midi/Gervill/ModelByteBuffer/LoadAll.java ++javax/sound/midi/Gervill/ModelByteBuffer/Load.java ++javax/sound/midi/Gervill/ModelByteBuffer/NewModelByteBufferByteArrayIntInt.java ++javax/sound/midi/Gervill/ModelByteBuffer/NewModelByteBufferByteArray.java ++javax/sound/midi/Gervill/ModelByteBuffer/NewModelByteBufferFile.java ++javax/sound/midi/Gervill/ModelByteBuffer/NewModelByteBufferFileLongLong.java ++javax/sound/midi/Gervill/ModelByteBuffer/RandomFileInputStream/Available.java ++javax/sound/midi/Gervill/ModelByteBuffer/RandomFileInputStream/Close.java ++javax/sound/midi/Gervill/ModelByteBuffer/RandomFileInputStream/MarkReset.java ++javax/sound/midi/Gervill/ModelByteBuffer/RandomFileInputStream/MarkSupported.java ++javax/sound/midi/Gervill/ModelByteBuffer/RandomFileInputStream/ReadByteIntInt.java ++javax/sound/midi/Gervill/ModelByteBuffer/RandomFileInputStream/ReadByte.java ++javax/sound/midi/Gervill/ModelByteBuffer/RandomFileInputStream/Read.java ++javax/sound/midi/Gervill/ModelByteBuffer/RandomFileInputStream/Skip.java ++javax/sound/midi/Gervill/ModelByteBuffer/SubbufferLong.java ++javax/sound/midi/Gervill/ModelByteBuffer/SubbufferLongLongBoolean.java ++javax/sound/midi/Gervill/ModelByteBuffer/SubbufferLongLong.java ++javax/sound/midi/Gervill/ModelByteBuffer/Unload.java ++javax/sound/midi/Gervill/ModelByteBufferWavetable/GetAttenuation.java ++javax/sound/midi/Gervill/ModelByteBufferWavetable/GetChannels.java ++javax/sound/midi/Gervill/ModelByteBufferWavetable/GetLoopLength.java ++javax/sound/midi/Gervill/ModelByteBufferWavetable/GetLoopStart.java ++javax/sound/midi/Gervill/ModelByteBufferWavetable/GetPitchCorrection.java ++javax/sound/midi/Gervill/ModelByteBufferWavetable/NewModelByteBufferWavetableModelByteBufferAudioFormatFloat.java ++javax/sound/midi/Gervill/ModelByteBufferWavetable/NewModelByteBufferWavetableModelByteBufferAudioFormat.java ++javax/sound/midi/Gervill/ModelByteBufferWavetable/NewModelByteBufferWavetableModelByteBufferFloat.java ++javax/sound/midi/Gervill/ModelByteBufferWavetable/NewModelByteBufferWavetableModelByteBuffer.java ++javax/sound/midi/Gervill/ModelByteBufferWavetable/Open.java ++javax/sound/midi/Gervill/ModelByteBufferWavetable/OpenStream.java ++javax/sound/midi/Gervill/ModelByteBufferWavetable/Set8BitExtensionBuffer.java ++javax/sound/midi/Gervill/ModelByteBufferWavetable/SetLoopType.java ++javax/sound/midi/Gervill/ModelByteBuffer/WriteTo.java ++javax/sound/midi/Gervill/ModelDestination/NewModelDestination.java ++javax/sound/midi/Gervill/ModelDestination/NewModelDestinationModelIdentifier.java ++javax/sound/midi/Gervill/ModelDestination/SetIdentifier.java ++javax/sound/midi/Gervill/ModelDestination/SetTransform.java ++javax/sound/midi/Gervill/ModelIdentifier/EqualsObject.java ++javax/sound/midi/Gervill/ModelIdentifier/NewModelIdentifierStringInt.java ++javax/sound/midi/Gervill/ModelIdentifier/NewModelIdentifierString.java ++javax/sound/midi/Gervill/ModelIdentifier/NewModelIdentifierStringStringInt.java ++javax/sound/midi/Gervill/ModelIdentifier/NewModelIdentifierStringString.java ++javax/sound/midi/Gervill/ModelIdentifier/SetInstance.java ++javax/sound/midi/Gervill/ModelIdentifier/SetObject.java ++javax/sound/midi/Gervill/ModelIdentifier/SetVariable.java ++javax/sound/midi/Gervill/ModelPerformer/GetOscillators.java ++javax/sound/midi/Gervill/ModelPerformer/SetConnectionBlocks.java ++javax/sound/midi/Gervill/ModelPerformer/SetDefaultConnectionsEnabled.java ++javax/sound/midi/Gervill/ModelPerformer/SetExclusiveClass.java ++javax/sound/midi/Gervill/ModelPerformer/SetKeyFrom.java ++javax/sound/midi/Gervill/ModelPerformer/SetKeyTo.java ++javax/sound/midi/Gervill/ModelPerformer/SetName.java ++javax/sound/midi/Gervill/ModelPerformer/SetSelfNonExclusive.java ++javax/sound/midi/Gervill/ModelPerformer/SetVelFrom.java ++javax/sound/midi/Gervill/ModelPerformer/SetVelTo.java ++javax/sound/midi/Gervill/ModelSource/NewModelSource.java ++javax/sound/midi/Gervill/ModelSource/NewModelSourceModelIdentifierBooleanBooleanInt.java ++javax/sound/midi/Gervill/ModelSource/NewModelSourceModelIdentifierBooleanBoolean.java ++javax/sound/midi/Gervill/ModelSource/NewModelSourceModelIdentifierBoolean.java ++javax/sound/midi/Gervill/ModelSource/NewModelSourceModelIdentifier.java ++javax/sound/midi/Gervill/ModelSource/NewModelSourceModelIdentifierModelTransform.java ++javax/sound/midi/Gervill/ModelSource/SetIdentifier.java ++javax/sound/midi/Gervill/ModelSource/SetTransform.java ++javax/sound/midi/Gervill/ModelStandardIndexedDirector/ModelStandardIndexedDirectorTest.java ++javax/sound/midi/Gervill/ModelStandardTransform/NewModelStandardTransformBooleanBooleanInt.java ++javax/sound/midi/Gervill/ModelStandardTransform/NewModelStandardTransformBooleanBoolean.java ++javax/sound/midi/Gervill/ModelStandardTransform/NewModelStandardTransformBoolean.java ++javax/sound/midi/Gervill/ModelStandardTransform/NewModelStandardTransform.java ++javax/sound/midi/Gervill/ModelStandardTransform/SetDirection.java ++javax/sound/midi/Gervill/ModelStandardTransform/SetPolarity.java ++javax/sound/midi/Gervill/ModelStandardTransform/SetTransform.java ++javax/sound/midi/Gervill/ModelStandardTransform/TransformAbsolute.java ++javax/sound/midi/Gervill/ModelStandardTransform/TransformConcave.java ++javax/sound/midi/Gervill/ModelStandardTransform/TransformConvex.java ++javax/sound/midi/Gervill/ModelStandardTransform/TransformLinear.java ++javax/sound/midi/Gervill/ModelStandardTransform/TransformSwitch.java ++javax/sound/midi/Gervill/RiffReaderWriter/Available.java ++javax/sound/midi/Gervill/RiffReaderWriter/Close.java ++javax/sound/midi/Gervill/RiffReaderWriter/GetFilePointer.java ++javax/sound/midi/Gervill/RiffReaderWriter/GetSize.java ++javax/sound/midi/Gervill/RiffReaderWriter/HasNextChunk.java ++javax/sound/midi/Gervill/RiffReaderWriter/ReadByteArrayIntInt.java ++javax/sound/midi/Gervill/RiffReaderWriter/ReadByte.java ++javax/sound/midi/Gervill/RiffReaderWriter/ReadInt.java ++javax/sound/midi/Gervill/RiffReaderWriter/Read.java ++javax/sound/midi/Gervill/RiffReaderWriter/ReadLong.java ++javax/sound/midi/Gervill/RiffReaderWriter/ReadShort.java ++javax/sound/midi/Gervill/RiffReaderWriter/ReadString.java ++javax/sound/midi/Gervill/RiffReaderWriter/ReadUnsignedByte.java ++javax/sound/midi/Gervill/RiffReaderWriter/ReadUnsignedInt.java ++javax/sound/midi/Gervill/RiffReaderWriter/ReadUnsignedShort.java ++javax/sound/midi/Gervill/RiffReaderWriter/Skip.java ++javax/sound/midi/Gervill/RiffReaderWriter/WriteOutputStream.java ++javax/sound/midi/Gervill/SF2SoundbankReader/TestGetSoundbankFile.java ++javax/sound/midi/Gervill/SF2SoundbankReader/TestGetSoundbankInputStream2.java ++javax/sound/midi/Gervill/SF2SoundbankReader/TestGetSoundbankInputStream.java ++javax/sound/midi/Gervill/SF2SoundbankReader/TestGetSoundbankUrl.java ++javax/sound/midi/Gervill/SimpleInstrument/AddModelInstrumentIntIntIntIntInt.java ++javax/sound/midi/Gervill/SimpleInstrument/AddModelInstrumentIntIntIntInt.java ++javax/sound/midi/Gervill/SimpleInstrument/AddModelInstrumentIntInt.java ++javax/sound/midi/Gervill/SimpleInstrument/AddModelInstrument.java ++javax/sound/midi/Gervill/SimpleInstrument/AddModelPerformerArrayIntIntIntIntInt.java ++javax/sound/midi/Gervill/SimpleInstrument/AddModelPerformerArrayIntIntIntInt.java ++javax/sound/midi/Gervill/SimpleInstrument/AddModelPerformerArrayIntInt.java ++javax/sound/midi/Gervill/SimpleInstrument/AddModelPerformerArray.java ++javax/sound/midi/Gervill/SimpleInstrument/AddModelPerformerIntIntIntIntInt.java ++javax/sound/midi/Gervill/SimpleInstrument/AddModelPerformerIntIntIntInt.java ++javax/sound/midi/Gervill/SimpleInstrument/AddModelPerformerIntInt.java ++javax/sound/midi/Gervill/SimpleInstrument/AddModelPerformer.java ++javax/sound/midi/Gervill/SimpleInstrument/Clear.java ++javax/sound/midi/Gervill/SimpleInstrument/SetName.java ++javax/sound/midi/Gervill/SimpleInstrument/SetPatch.java ++javax/sound/midi/Gervill/SimpleSoundbank/AddInstrument.java ++javax/sound/midi/Gervill/SimpleSoundbank/AddResource.java ++javax/sound/midi/Gervill/SimpleSoundbank/GetInstrument.java ++javax/sound/midi/Gervill/SimpleSoundbank/RemoveInstrument.java ++javax/sound/midi/Gervill/SimpleSoundbank/SetDescription.java ++javax/sound/midi/Gervill/SimpleSoundbank/SetName.java ++javax/sound/midi/Gervill/SimpleSoundbank/SetVendor.java ++javax/sound/midi/Gervill/SimpleSoundbank/SetVersion.java ++javax/sound/midi/Gervill/SoftAudioBuffer/Array.java ++javax/sound/midi/Gervill/SoftAudioBuffer/Clear.java ++javax/sound/midi/Gervill/SoftAudioBuffer/Get.java ++javax/sound/midi/Gervill/SoftAudioBuffer/NewSoftAudioBuffer.java ++javax/sound/midi/Gervill/SoftAudioSynthesizer/GetFormat.java ++javax/sound/midi/Gervill/SoftAudioSynthesizer/GetPropertyInfo.java ++javax/sound/midi/Gervill/SoftAudioSynthesizer/Open.java ++javax/sound/midi/Gervill/SoftAudioSynthesizer/OpenStream.java ++javax/sound/midi/Gervill/SoftChannel/AllNotesOff.java ++javax/sound/midi/Gervill/SoftChannel/AllSoundOff.java ++javax/sound/midi/Gervill/SoftChannel/ChannelPressure.java ++javax/sound/midi/Gervill/SoftChannel/Controller.java ++javax/sound/midi/Gervill/SoftChannel/LocalControl.java ++javax/sound/midi/Gervill/SoftChannel/Mono.java ++javax/sound/midi/Gervill/SoftChannel/Mute.java ++javax/sound/midi/Gervill/SoftChannel/NoteOff2.java ++javax/sound/midi/Gervill/SoftChannel/NoteOff.java ++javax/sound/midi/Gervill/SoftChannel/NoteOn.java ++javax/sound/midi/Gervill/SoftChannel/NoteOverFlowTest2.java ++javax/sound/midi/Gervill/SoftChannel/NoteOverFlowTest.java ++javax/sound/midi/Gervill/SoftChannel/Omni.java ++javax/sound/midi/Gervill/SoftChannel/PitchBend.java ++javax/sound/midi/Gervill/SoftChannel/PolyPressure.java ++javax/sound/midi/Gervill/SoftChannel/ProgramAndBankChange.java ++javax/sound/midi/Gervill/SoftChannel/ProgramChange.java ++javax/sound/midi/Gervill/SoftChannel/ResetAllControllers.java ++javax/sound/midi/Gervill/SoftChannel/Solo.java ++javax/sound/midi/Gervill/SoftCubicResampler/Interpolate.java ++javax/sound/midi/Gervill/SoftFilter/TestProcessAudio.java ++javax/sound/midi/Gervill/SoftLanczosResampler/Interpolate.java ++javax/sound/midi/Gervill/SoftLimiter/ProcessAudio_replace_mix.java ++javax/sound/midi/Gervill/SoftLimiter/ProcessAudio_replace_mix_mono.java ++javax/sound/midi/Gervill/SoftLimiter/ProcessAudio_replace_mix_mono_overdrive.java ++javax/sound/midi/Gervill/SoftLimiter/ProcessAudio_replace_mix_overdrive.java ++javax/sound/midi/Gervill/SoftLimiter/ProcessAudio_replace_normal.java ++javax/sound/midi/Gervill/SoftLimiter/ProcessAudio_replace_normal_mono.java ++javax/sound/midi/Gervill/SoftLimiter/ProcessAudio_replace_overdrive.java ++javax/sound/midi/Gervill/SoftLimiter/ProcessAudio_replace_overdrive_mono.java ++javax/sound/midi/Gervill/SoftLinearResampler2/Interpolate.java ++javax/sound/midi/Gervill/SoftLinearResampler/Interpolate.java ++javax/sound/midi/Gervill/SoftLowFrequencyOscillator/TestProcessControlLogic.java ++javax/sound/midi/Gervill/SoftPointResampler/Interpolate.java ++javax/sound/midi/Gervill/SoftProvider/GetDevice.java ++javax/sound/midi/Gervill/SoftReceiver/Close.java ++javax/sound/midi/Gervill/SoftReceiver/GetMidiDevice.java ++javax/sound/midi/Gervill/SoftReceiver/Send_ActiveSense.java ++javax/sound/midi/Gervill/SoftReceiver/Send_AllNotesOff.java ++javax/sound/midi/Gervill/SoftReceiver/Send_AllSoundOff.java ++javax/sound/midi/Gervill/SoftReceiver/Send_ChannelPressure.java ++javax/sound/midi/Gervill/SoftReceiver/Send_Controller.java ++javax/sound/midi/Gervill/SoftReceiver/Send_Mono.java ++javax/sound/midi/Gervill/SoftReceiver/Send_NoteOff.java ++javax/sound/midi/Gervill/SoftReceiver/Send_NoteOn_AllChannels.java ++javax/sound/midi/Gervill/SoftReceiver/Send_NoteOn_Delayed.java ++javax/sound/midi/Gervill/SoftReceiver/Send_NoteOn.java ++javax/sound/midi/Gervill/SoftReceiver/Send_NoteOn_Multiple.java ++javax/sound/midi/Gervill/SoftReceiver/Send_Omni.java ++javax/sound/midi/Gervill/SoftReceiver/Send_PitchBend.java ++javax/sound/midi/Gervill/SoftReceiver/Send_PolyPressure.java ++javax/sound/midi/Gervill/SoftReceiver/Send_ProgramChange.java ++javax/sound/midi/Gervill/SoftReceiver/Send_ResetAllControllers.java ++javax/sound/midi/Gervill/SoftSincResampler/Interpolate.java ++javax/sound/midi/Gervill/SoftSynthesizer/Close.java ++javax/sound/midi/Gervill/SoftSynthesizer/GetAvailableInstruments2.java ++javax/sound/midi/Gervill/SoftSynthesizer/GetAvailableInstruments.java ++javax/sound/midi/Gervill/SoftSynthesizer/GetChannels.java ++javax/sound/midi/Gervill/SoftSynthesizer/GetDefaultSoundbank.java ++javax/sound/midi/Gervill/SoftSynthesizer/GetDeviceInfo.java ++javax/sound/midi/Gervill/SoftSynthesizer/GetLatency.java ++javax/sound/midi/Gervill/SoftSynthesizer/GetLoadedInstruments2.java ++javax/sound/midi/Gervill/SoftSynthesizer/GetLoadedInstruments.java ++javax/sound/midi/Gervill/SoftSynthesizer/GetMaxPolyphony.java ++javax/sound/midi/Gervill/SoftSynthesizer/GetMaxReceivers.java ++javax/sound/midi/Gervill/SoftSynthesizer/GetMaxTransmitters.java ++javax/sound/midi/Gervill/SoftSynthesizer/GetMicrosecondPosition.java ++javax/sound/midi/Gervill/SoftSynthesizer/GetPropertyInfo.java ++javax/sound/midi/Gervill/SoftSynthesizer/GetReceiver2.java ++javax/sound/midi/Gervill/SoftSynthesizer/GetReceiver.java ++javax/sound/midi/Gervill/SoftSynthesizer/GetReceivers.java ++javax/sound/midi/Gervill/SoftSynthesizer/GetTransmitter.java ++javax/sound/midi/Gervill/SoftSynthesizer/GetTransmitters.java ++javax/sound/midi/Gervill/SoftSynthesizer/GetVoiceStatus.java ++javax/sound/midi/Gervill/SoftSynthesizer/ImplicitOpenClose.java ++javax/sound/midi/Gervill/SoftSynthesizer/IsOpen.java ++javax/sound/midi/Gervill/SoftSynthesizer/IsSoundbankSupported.java ++javax/sound/midi/Gervill/SoftSynthesizer/LoadAllInstruments.java ++javax/sound/midi/Gervill/SoftSynthesizer/LoadInstrument.java ++javax/sound/midi/Gervill/SoftSynthesizer/LoadInstruments.java ++javax/sound/midi/Gervill/SoftSynthesizer/Open.java ++javax/sound/midi/Gervill/SoftSynthesizer/OpenStream.java ++javax/sound/midi/Gervill/SoftSynthesizer/RemapInstrument.java ++javax/sound/midi/Gervill/SoftSynthesizer/TestDisableLoadDefaultSoundbank.java ++javax/sound/midi/Gervill/SoftSynthesizer/TestPreciseTimestampRendering.java ++javax/sound/midi/Gervill/SoftSynthesizer/TestRender1.java ++javax/sound/midi/Gervill/SoftSynthesizer/UnloadAllInstruments.java ++javax/sound/midi/Gervill/SoftSynthesizer/UnloadInstrument.java ++javax/sound/midi/Gervill/SoftSynthesizer/UnloadInstruments.java ++javax/sound/midi/Gervill/SoftTuning/GetName.java ++javax/sound/midi/Gervill/SoftTuning/GetTuningInt.java ++javax/sound/midi/Gervill/SoftTuning/GetTuning.java ++javax/sound/midi/Gervill/SoftTuning/Load1.java ++javax/sound/midi/Gervill/SoftTuning/Load2.java ++javax/sound/midi/Gervill/SoftTuning/Load4.java ++javax/sound/midi/Gervill/SoftTuning/Load5.java ++javax/sound/midi/Gervill/SoftTuning/Load6.java ++javax/sound/midi/Gervill/SoftTuning/Load7.java ++javax/sound/midi/Gervill/SoftTuning/Load8.java ++javax/sound/midi/Gervill/SoftTuning/Load9.java ++javax/sound/midi/Gervill/SoftTuning/NewSoftTuningByteArray.java ++javax/sound/midi/Gervill/SoftTuning/NewSoftTuning.java ++javax/sound/midi/Gervill/SoftTuning/NewSoftTuningPatchByteArray.java ++javax/sound/midi/Gervill/SoftTuning/NewSoftTuningPatch.java ++javax/sound/midi/Gervill/SoftTuning/RealTimeTuning.java ++javax/sound/midi/MetaMessage/MetaMessageClone.java ++javax/sound/midi/MidiDeviceConnectors/TestAllDevices.java ++javax/sound/midi/MidiSystem/DefaultDevices.java ++javax/sound/midi/MidiSystem/DefaultProperties.java ++javax/sound/midi/MidiSystem/GetSequencer.java ++javax/sound/midi/MidiSystem/MidiFileTypeUniqueness.java ++javax/sound/midi/MidiSystem/ProviderCacheing.java ++javax/sound/midi/Sequence/GetMicrosecondLength.java ++javax/sound/midi/Sequence/MidiSMPTE.java ++javax/sound/midi/Sequencer/LoopIAE.java ++javax/sound/midi/Sequencer/Looping.java ++javax/sound/midi/Sequencer/MetaCallback.java ++javax/sound/midi/Sequencer/Recording.java ++javax/sound/midi/Sequencer/SeqRecordDoesNotCopy.java ++javax/sound/midi/Sequencer/SeqRecordsRealTimeEvents.java ++javax/sound/midi/Sequencer/SeqStartRecording.java ++javax/sound/midi/Sequencer/SequencerCacheValues.java ++javax/sound/midi/Sequencer/SequencerImplicitSynthOpen.java ++javax/sound/midi/Sequencer/SequencerSetMuteSolo.java ++javax/sound/midi/Sequencer/SequencerState.java ++javax/sound/midi/Sequencer/SetTickPosition.java ++javax/sound/midi/Sequencer/TickLength.java ++javax/sound/midi/Sequence/SMPTEDuration.java ++javax/sound/midi/ShortMessage/FastShortMessage2.java ++javax/sound/midi/ShortMessage/FastShortMessage.java ++javax/sound/midi/Soundbanks/ExtraCharInSoundbank.java ++javax/sound/midi/Soundbanks/GetSoundBankIOException.java ++javax/sound/midi/Synthesizer/AsynchronousMidiChannel.java ++javax/sound/midi/Synthesizer/bug4685396.java ++javax/sound/midi/Synthesizer/SynthesizerGetLatency.java ++javax/sound/midi/SysexMessage/SendRawSysexMessage.java ++javax/sound/midi/Track/bug6416024.java ++javax/sound/midi/Track/TrackAddSameTick.java ++javax/sound/midi/Transmitter/bug6415669.java ++javax/sound/sampled/AudioFileFormat/AudioFileFormatToString.java ++javax/sound/sampled/AudioFileFormat/Properties.java ++javax/sound/sampled/AudioFileFormat/TypeEquals.java ++javax/sound/sampled/AudioFormat/AudioFormatBitSize.java ++javax/sound/sampled/AudioFormat/EncodingEquals.java ++javax/sound/sampled/AudioFormat/Matches_NOT_SPECIFIED.java ++javax/sound/sampled/AudioFormat/PCM_FLOAT_support.java ++javax/sound/sampled/AudioFormat/Properties.java ++javax/sound/sampled/AudioInputStream/AISReadFraction.java ++javax/sound/sampled/AudioInputStream/bug6188860.java ++javax/sound/sampled/AudioSystem/AudioFileTypes/AudioFileTypeUniqueness.java ++javax/sound/sampled/AudioSystem/AudioFileTypes/ShowAudioFileTypes.java ++javax/sound/sampled/AudioSystem/DefaultMixers.java ++javax/sound/sampled/AudioSystem/DefaultProperties.java ++javax/sound/sampled/AudioSystem/ProviderCacheing.java ++javax/sound/sampled/Clip/AutoCloseTimeCheck.java ++javax/sound/sampled/Clip/bug5070081.java ++javax/sound/sampled/Clip/ClipCloseLoss.java ++javax/sound/sampled/Clip/ClipFlushCrash.java ++javax/sound/sampled/Clip/ClipIsRunningAfterStop.java ++javax/sound/sampled/Clip/ClipSetPos.java ++javax/sound/sampled/Clip/Drain/ClipDrain.java ++javax/sound/sampled/Clip/Duration/ClipDuration.java ++javax/sound/sampled/Clip/Endpoint/ClipSetEndPoint.java ++javax/sound/sampled/Clip/IsRunningHang.java ++javax/sound/sampled/Clip/Open/ClipOpenBug.java ++javax/sound/sampled/Controls/CompoundControl/ToString.java ++javax/sound/sampled/Controls/FloatControl/FloatControlBug.java ++javax/sound/sampled/DataLine/DataLine_ArrayIndexOutOfBounds.java ++javax/sound/sampled/DataLine/DataLineInfoNegBufferSize.java ++javax/sound/sampled/DataLine/LineDefFormat.java ++javax/sound/sampled/DataLine/LongFramePosition.java ++javax/sound/sampled/DirectAudio/bug6372428.java ++javax/sound/sampled/DirectAudio/bug6400879.java ++javax/sound/sampled/FileReader/ReadersExceptions.java ++javax/sound/sampled/FileTypeExtension/FileTypeExtensionTest.java ++javax/sound/sampled/FileWriter/AlawEncoderSync.java ++javax/sound/sampled/FileWriter/WriterCloseInput.java ++javax/sound/sampled/LineEvent/LineInfoNPE.java ++javax/sound/sampled/Lines/16and32KHz/Has16and32KHz.java ++javax/sound/sampled/Lines/BufferSizeCheck.java ++javax/sound/sampled/Lines/ChangingBuffer.java ++javax/sound/sampled/Lines/ClipOpenException.java ++javax/sound/sampled/Lines/FrameSize/FrameSizeTest.java ++javax/sound/sampled/Lines/GetLine.java ++javax/sound/sampled/Lines/SDLwrite.java ++javax/sound/sampled/Lines/SourceDataLineDefaultBufferSizeCrash.java ++javax/sound/sampled/Lines/StopStart.java ++javax/sound/sampled/LinuxCrash/ClipLinuxCrash2.java ++javax/sound/sampled/LinuxCrash/ClipLinuxCrash.java ++javax/sound/sampled/LinuxCrash/SDLLinuxCrash.java ++javax/sound/sampled/Mixers/BogusMixers.java ++javax/sound/sampled/Mixers/BothEndiansAndSigns.java ++javax/sound/sampled/Mixers/DisabledAssertionCrash.java ++javax/sound/sampled/Mixers/NoSimpleInputDevice.java ++javax/sound/sampled/Mixers/PlugHwMonoAnd8bitAvailable.java ++javax/sound/sampled/Mixers/UnexpectedIAE.java ++javax/sound/sampled/spi/AudioFileReader/Aiff12bit.java ++javax/sound/sampled/spi/AudioFileReader/AIFFCp037.java ++javax/sound/sampled/spi/AudioFileReader/AIFFLargeHeader.java ++javax/sound/sampled/spi/AudioFileReader/AuNotSpecified.java ++javax/sound/sampled/spi/AudioFileReader/AuZeroLength.java ++javax/sound/sampled/spi/AudioFileReader/OpenWaveFile.java ++javax/sound/sampled/spi/AudioFileWriter/AiffSampleRate.java ++javax/sound/sampled/spi/AudioFileWriter/AUwithULAW.java ++javax/sound/sampled/spi/AudioFileWriter/RIFFHeader.java ++javax/sound/sampled/spi/AudioFileWriter/WaveBigEndian.java ++javax/sound/sampled/spi/AudioFileWriter/WriteAuUnspecifiedLength.java ++javax/sound/sampled/spi/FormatConversionProvider/AlawUlaw.java ++javax/swing/AbstractButton/6711682/bug6711682.java ++javax/swing/AbstractButton/AnimatedIcon/AnimatedIcon.java ++javax/swing/AncestorNotifier/7193219/bug7193219.java ++javax/swing/border/Test4120351.java ++javax/swing/border/Test4124729.java ++javax/swing/border/Test4856008.java ++javax/swing/border/Test6461042.java ++javax/swing/border/Test6625450.java ++javax/swing/border/Test6978482.java ++javax/swing/border/Test6981576.java ++javax/swing/border/Test7022041.java ++javax/swing/border/Test7034614.java ++javax/swing/border/Test7149090.java ++javax/swing/DataTransfer/6456844/bug6456844.java ++javax/swing/DataTransfer/8059739/bug8059739.java ++javax/swing/dnd/7171812/bug7171812.java ++javax/swing/GroupLayout/6613904/bug6613904.java ++javax/swing/GroupLayout/7071166/bug7071166.java ++javax/swing/Headless/HeadlessAbstractSpinnerModel.java ++javax/swing/Headless/HeadlessBox_Filler.java ++javax/swing/Headless/HeadlessBox.java ++javax/swing/Headless/HeadlessCellRendererPane.java ++javax/swing/Headless/HeadlessDefaultListCellRenderer.java ++javax/swing/Headless/HeadlessDefaultListCellRenderer_UIResource.java ++javax/swing/Headless/HeadlessGrayFilter.java ++javax/swing/Headless/HeadlessJApplet.java ++javax/swing/Headless/HeadlessJButton.java ++javax/swing/Headless/HeadlessJCheckBox.java ++javax/swing/Headless/HeadlessJCheckBoxMenuItem.java ++javax/swing/Headless/HeadlessJColorChooser.java ++javax/swing/Headless/HeadlessJComboBox.java ++javax/swing/Headless/HeadlessJComponent.java ++javax/swing/Headless/HeadlessJDesktopPane.java ++javax/swing/Headless/HeadlessJDialog.java ++javax/swing/Headless/HeadlessJEditorPane.java ++javax/swing/Headless/HeadlessJFileChooser.java ++javax/swing/Headless/HeadlessJFormattedTextField.java ++javax/swing/Headless/HeadlessJFrame.java ++javax/swing/Headless/HeadlessJInternalFrame.java ++javax/swing/Headless/HeadlessJInternalFrame_JDesktopIcon.java ++javax/swing/Headless/HeadlessJLabel.java ++javax/swing/Headless/HeadlessJLayeredPane.java ++javax/swing/Headless/HeadlessJList.java ++javax/swing/Headless/HeadlessJMenuBar.java ++javax/swing/Headless/HeadlessJMenuItem.java ++javax/swing/Headless/HeadlessJMenu.java ++javax/swing/Headless/HeadlessJOptionPane.java ++javax/swing/Headless/HeadlessJPanel.java ++javax/swing/Headless/HeadlessJPasswordField.java ++javax/swing/Headless/HeadlessJPopupMenu.java ++javax/swing/Headless/HeadlessJPopupMenu_Separator.java ++javax/swing/Headless/HeadlessJProgressBar.java ++javax/swing/Headless/HeadlessJRadioButton.java ++javax/swing/Headless/HeadlessJRadioButtonMenuItem.java ++javax/swing/Headless/HeadlessJRootPane.java ++javax/swing/Headless/HeadlessJScrollBar.java ++javax/swing/Headless/HeadlessJScrollPane.java ++javax/swing/Headless/HeadlessJSeparator.java ++javax/swing/Headless/HeadlessJSlider.java ++javax/swing/Headless/HeadlessJSpinner.java ++javax/swing/Headless/HeadlessJSplitPane.java ++javax/swing/Headless/HeadlessJTabbedPane.java ++javax/swing/Headless/HeadlessJTable.java ++javax/swing/Headless/HeadlessJTextArea.java ++javax/swing/Headless/HeadlessJTextField.java ++javax/swing/Headless/HeadlessJTextPane.java ++javax/swing/Headless/HeadlessJToggleButton.java ++javax/swing/Headless/HeadlessJToolBar.java ++javax/swing/Headless/HeadlessJToolBar_Separator.java ++javax/swing/Headless/HeadlessJToolTip.java ++javax/swing/Headless/HeadlessJTree.java ++javax/swing/Headless/HeadlessJViewport.java ++javax/swing/Headless/HeadlessJWindow.java ++javax/swing/Headless/HeadlessLookAndFeel.java ++javax/swing/Headless/HeadlessMenuSelectionManager.java ++javax/swing/Headless/HeadlessOverlayLayout.java ++javax/swing/Headless/HeadlessPopupFactory.java ++javax/swing/Headless/HeadlessScrollPaneLayout.java ++javax/swing/Headless/HeadlessSizeRequirements.java ++javax/swing/Headless/HeadlessSizeSequence.java ++javax/swing/Headless/HeadlessSpinnerListModel.java ++javax/swing/Headless/HeadlessSpinnerNumberModel.java ++javax/swing/Headless/HeadlessTimer.java ++javax/swing/JButton/4368790/bug4368790.java ++javax/swing/JButton/6604281/bug6604281.java ++javax/swing/JButton/JButtonPaintNPE/JButtonPaintNPE.java ++javax/swing/JCheckBox/8032667/bug8032667_image_diff.java ++javax/swing/JColorChooser/Test4165217.java ++javax/swing/JColorChooser/Test4177735.java ++javax/swing/JColorChooser/Test4193384.java ++javax/swing/JColorChooser/Test4234761.java ++javax/swing/JColorChooser/Test4461329.java ++javax/swing/JColorChooser/Test4711996.java ++javax/swing/JColorChooser/Test6199676.java ++javax/swing/JColorChooser/Test6524757.java ++javax/swing/JColorChooser/Test6541987.java ++javax/swing/JColorChooser/Test6559154.java ++javax/swing/JColorChooser/Test6707406.java ++javax/swing/JColorChooser/Test6827032.java ++javax/swing/JColorChooser/Test7194184.java ++javax/swing/JComboBox/4199622/bug4199622.java ++javax/swing/JComboBox/4515752/DefaultButtonTest.java ++javax/swing/JComboBox/4523758/bug4523758.java ++javax/swing/JComboBox/4743225/bug4743225.java ++javax/swing/JComboBox/6236162/bug6236162.java ++javax/swing/JComboBox/6337518/bug6337518.java ++javax/swing/JComboBox/6406264/bug6406264.java ++javax/swing/JComboBox/6559152/bug6559152.java ++javax/swing/JComboBox/6607130/bug6607130.java ++javax/swing/JComboBox/6632953/bug6632953.java ++javax/swing/JComboBox/7031551/bug7031551.java ++javax/swing/JComboBox/7082443/bug7082443.java ++javax/swing/JComboBox/7195179/Test7195179.java ++javax/swing/JComboBox/8015300/Test8015300.java ++javax/swing/JComboBox/8019180/Test8019180.java ++javax/swing/JComboBox/8032878/bug8032878.java ++javax/swing/JComboBox/8033069/bug8033069NoScrollBar.java ++javax/swing/JComboBox/8033069/bug8033069ScrollBar.java ++javax/swing/JComboBox/8057893/bug8057893.java ++javax/swing/JComboBox/8072767/bug8072767.java ++javax/swing/JComboBox/8136998/bug8136998.java ++javax/swing/JComboBox/ConsumedKeyTest/ConsumedKeyTest.java ++javax/swing/JComboBox/ShowPopupAfterHidePopupTest/ShowPopupAfterHidePopupTest.java ++javax/swing/JComponent/4337267/bug4337267.java ++javax/swing/JComponent/6683775/bug6683775.java ++javax/swing/JComponent/6989617/bug6989617.java ++javax/swing/JComponent/7154030/bug7154030.java ++javax/swing/JComponent/8043610/bug8043610.java ++javax/swing/JDialog/6639507/bug6639507.java ++javax/swing/JDialog/WrongBackgroundColor/WrongBackgroundColor.java ++javax/swing/JEditorPane/4492274/bug4492274.java ++javax/swing/JEditorPane/6882559/bug6882559.java ++javax/swing/JEditorPane/6917744/bug6917744.java ++javax/swing/JEditorPane/8158734/bug8158734.java ++javax/swing/JEditorPane/8195095/ImageViewTest.java ++javax/swing/JEditorPane/bug4714674.java ++javax/swing/JFileChooser/4524490/bug4524490.java ++javax/swing/JFileChooser/4847375/bug4847375.java ++javax/swing/JFileChooser/6342301/bug6342301.java ++javax/swing/JFileChooser/6396844/TwentyThousandTest.java ++javax/swing/JFileChooser/6484091/bug6484091.java ++javax/swing/JFileChooser/6489130/bug6489130.java ++javax/swing/JFileChooser/6520101/bug6520101.java ++javax/swing/JFileChooser/6550546/bug6550546.java ++javax/swing/JFileChooser/6570445/bug6570445.java ++javax/swing/JFileChooser/6688203/bug6688203.java ++javax/swing/JFileChooser/6713352/bug6713352.java ++javax/swing/JFileChooser/6738668/bug6738668.java ++javax/swing/JFileChooser/6741890/bug6741890.java ++javax/swing/JFileChooser/6817933/Test6817933.java ++javax/swing/JFileChooser/6840086/bug6840086.java ++javax/swing/JFileChooser/6868611/bug6868611.java ++javax/swing/JFileChooser/6945316/bug6945316.java ++javax/swing/JFileChooser/7036025/bug7036025.java ++javax/swing/JFileChooser/7199708/bug7199708.java ++javax/swing/JFileChooser/8002077/bug8002077.java ++javax/swing/JFileChooser/8013442/Test8013442.java ++javax/swing/JFileChooser/8021253/bug8021253.java ++javax/swing/JFileChooser/8046391/bug8046391.java ++javax/swing/JFileChooser/8062561/bug8062561.java ++javax/swing/JFileChooser/8080628/bug8080628.java ++javax/swing/JFormattedTextField/Test6462562.java ++javax/swing/JFrame/4962534/bug4962534.html ++javax/swing/JFrame/8016356/bug8016356.java ++javax/swing/JFrame/8255880/RepaintOnFrameIconifiedStateChangeTest.java ++javax/swing/JFrame/AlwaysOnTop/AlwaysOnTopImeTest.java ++javax/swing/JFrame/HangNonVolatileBuffer/HangNonVolatileBuffer.java ++javax/swing/JFrame/NSTexturedJFrame/NSTexturedJFrame.java ++javax/swing/JInternalFrame/5066752/bug5066752.java ++javax/swing/JInternalFrame/6647340/bug6647340.java ++javax/swing/JInternalFrame/6725409/bug6725409.java ++javax/swing/JInternalFrame/8020708/bug8020708.java ++javax/swing/JInternalFrame/InternalFrameIsNotCollectedTest.java ++javax/swing/JInternalFrame/Test6325652.java ++javax/swing/JInternalFrame/Test6505027.java ++javax/swing/JInternalFrame/Test6802868.java ++javax/swing/JLabel/6501991/bug6501991.java ++javax/swing/JLabel/6596966/bug6596966.java ++javax/swing/JLabel/7004134/bug7004134.java ++javax/swing/JLayer/6824395/bug6824395.java ++javax/swing/JLayer/6872503/bug6872503.java ++javax/swing/JLayer/6875153/bug6875153.java ++javax/swing/JLayer/6875716/bug6875716.java ++javax/swing/JLayer/6994419/bug6994419.java ++javax/swing/JLayer/SerializationTest/SerializationTest.java ++javax/swing/JList/6462008/bug6462008.java ++javax/swing/JList/6510999/bug6510999.java ++javax/swing/JList/6823603/bug6823603.java ++javax/swing/JMenu/4417601/bug4417601.java ++javax/swing/JMenu/4515762/bug4515762.java ++javax/swing/JMenu/4692443/bug4692443.java ++javax/swing/JMenu/6359669/bug6359669.java ++javax/swing/JMenu/6470128/bug6470128.java ++javax/swing/JMenu/6538132/bug6538132.java ++javax/swing/JMenu/8071705/bug8071705.java ++javax/swing/JMenu/8072900/WrongSelectionOnMouseOver.java ++javax/swing/JMenuBar/4750590/bug4750590.java ++javax/swing/JMenuBar/MisplacedBorder/MisplacedBorder.java ++javax/swing/JMenuItem/4171437/bug4171437.java ++javax/swing/JMenuItem/4654927/bug4654927.java ++javax/swing/JMenuItem/6209975/bug6209975.java ++javax/swing/JMenuItem/6249972/bug6249972.java ++javax/swing/JMenuItem/6438430/bug6438430.java ++javax/swing/JMenuItem/6883341/bug6883341.java ++javax/swing/JMenuItem/7036148/bug7036148.java ++javax/swing/JMenuItem/ActionListenerCalledTwice/ActionListenerCalledTwiceTest.java ++javax/swing/JMenu/JMenuSelectedColorTest.java ++javax/swing/JOptionPane/6428694/bug6428694.java ++javax/swing/JOptionPane/6464022/bug6464022.java ++javax/swing/JOptionPane/7138665/bug7138665.java ++javax/swing/JPopupMenu/4458079/bug4458079.java ++javax/swing/JPopupMenu/4634626/bug4634626.java ++javax/swing/JPopupMenu/4966112/bug4966112.java ++javax/swing/JPopupMenu/6217905/bug6217905.java ++javax/swing/JPopupMenu/6415145/bug6415145.java ++javax/swing/JPopupMenu/6495920/bug6495920.java ++javax/swing/JPopupMenu/6515446/bug6515446.java ++javax/swing/JPopupMenu/6544309/bug6544309.java ++javax/swing/JPopupMenu/6580930/bug6580930.java ++javax/swing/JPopupMenu/6583251/bug6583251.java ++javax/swing/JPopupMenu/6675802/bug6675802.java ++javax/swing/JPopupMenu/6690791/bug6690791.java ++javax/swing/JPopupMenu/6691503/bug6691503.java ++javax/swing/JPopupMenu/6694823/bug6694823.java ++javax/swing/JPopupMenu/6800513/bug6800513.java ++javax/swing/JPopupMenu/6827786/bug6827786.java ++javax/swing/JPopupMenu/6987844/bug6987844.java ++javax/swing/JPopupMenu/7154841/bug7154841.java ++javax/swing/JPopupMenu/7156657/bug7156657.java ++javax/swing/JPopupMenu/Separator/6547087/bug6547087.java ++javax/swing/JProgressBar/7141573/bug7141573.java ++javax/swing/JProgressBar/8161664/ProgressBarMemoryLeakTest.java ++javax/swing/JRadioButton/8033699/bug8033699.java ++javax/swing/JRadioButton/8041561/bug8041561.java ++javax/swing/JRadioButton/8075609/bug8075609.java ++javax/swing/JRootPane/4670486/bug4670486.java ++javax/swing/JScrollBar/4708809/bug4708809.java ++javax/swing/JScrollBar/4865918/bug4865918.java ++javax/swing/JScrollBar/6542335/bug6542335.java ++javax/swing/JScrollBar/6924059/bug6924059.java ++javax/swing/JScrollBar/7163696/Test7163696.java ++javax/swing/JScrollBar/bug4202954/bug4202954.java ++javax/swing/JScrollPane/6274267/bug6274267.java ++javax/swing/JScrollPane/6559589/bug6559589.java ++javax/swing/JScrollPane/6612531/bug6612531.java ++javax/swing/JScrollPane/HorizontalMouseWheelOnShiftPressed/HorizontalMouseWheelOnShiftPressed.java ++javax/swing/JScrollPane/Test6526631.java ++javax/swing/JSlider/4252173/bug4252173.java ++javax/swing/JSlider/6278700/bug6278700.java ++javax/swing/JSlider/6348946/bug6348946.java ++javax/swing/JSlider/6401380/bug6401380.java ++javax/swing/JSlider/6579827/bug6579827.java ++javax/swing/JSlider/6794831/bug6794831.java ++javax/swing/JSlider/6794836/bug6794836.java ++javax/swing/JSlider/6848475/bug6848475.java ++javax/swing/JSlider/6918861/bug6918861.java ++javax/swing/JSlider/6923305/bug6923305.java ++javax/swing/JSpinner/4973721/bug4973721.java ++javax/swing/JSpinner/5012888/bug5012888.java ++javax/swing/JSpinner/6463712/bug6463712.java ++javax/swing/JSpinner/6532833/bug6532833.java ++javax/swing/JSpinner/8008657/bug8008657.java ++javax/swing/JSplitPane/4201995/bug4201995.java ++javax/swing/JSplitPane/4816114/bug4816114.java ++javax/swing/JSplitPane/4885629/bug4885629.java ++javax/swing/JTabbedPane/4361477/bug4361477.java ++javax/swing/JTabbedPane/4624207/bug4624207.java ++javax/swing/JTabbedPane/6416920/bug6416920.java ++javax/swing/JTabbedPane/6495408/bug6495408.java ++javax/swing/JTabbedPane/6670274/bug6670274.java ++javax/swing/JTabbedPane/7010561/bug7010561.java ++javax/swing/JTabbedPane/7024235/Test7024235.java ++javax/swing/JTabbedPane/7161568/bug7161568.java ++javax/swing/JTabbedPane/7170310/bug7170310.java ++javax/swing/JTabbedPane/8007563/Test8007563.java ++javax/swing/JTable/4220171/bug4220171.java ++javax/swing/JTable/4235420/bug4235420.java ++javax/swing/JTable/6263446/bug6263446.java ++javax/swing/JTable/6735286/bug6735286.java ++javax/swing/JTable/6768387/bug6768387.java ++javax/swing/JTable/6777378/bug6777378.java ++javax/swing/JTable/6788484/bug6788484.java ++javax/swing/JTable/6913768/bug6913768.java ++javax/swing/JTable/6937798/bug6937798.java ++javax/swing/JTable/7027139/bug7027139.java ++javax/swing/JTable/7055065/bug7055065.java ++javax/swing/JTable/7068740/bug7068740.java ++javax/swing/JTable/7188612/JTableAccessibleGetLocationOnScreen.java ++javax/swing/JTable/8005019/bug8005019.java ++javax/swing/JTable/8031971/bug8031971.java ++javax/swing/JTable/8032874/bug8032874.java ++javax/swing/JTableHeader/6884066/bug6884066.java ++javax/swing/JTableHeader/6889007/bug6889007.java ++javax/swing/JTable/Test6888156.java ++javax/swing/JTextArea/4697612/bug4697612.java ++javax/swing/JTextArea/6925473/bug6925473.java ++javax/swing/JTextArea/6940863/bug6940863.java ++javax/swing/JTextArea/7049024/bug7049024.java ++javax/swing/JTextArea/Test6593649.java ++javax/swing/JTextArea/TextViewOOM/TextViewOOM.java ++javax/swing/JTextField/8036819/bug8036819.java ++javax/swing/JTextField/I18NViewNoWrapMinSpan/I18NViewNoWrapMinSpan.java ++javax/swing/JTextPane/JTextPaneDocumentAlignment.java ++javax/swing/JTextPane/JTextPaneDocumentWrapping.java ++javax/swing/JToolBar/4247996/bug4247996.java ++javax/swing/JToolBar/4529206/bug4529206.java ++javax/swing/JToolTip/4846413/bug4846413.java ++javax/swing/JTree/4330357/bug4330357.java ++javax/swing/JTree/4633594/JTreeFocusTest.java ++javax/swing/JTree/4908142/bug4908142.java ++javax/swing/JTree/4927934/bug4927934.java ++javax/swing/JTree/6263446/bug6263446.java ++javax/swing/JTree/6505523/bug6505523.java ++javax/swing/JTree/6578666/bug6578666.java ++javax/swing/JTree/8003400/Test8003400.java ++javax/swing/JTree/8003830/bug8003830.java ++javax/swing/JTree/8004298/bug8004298.java ++javax/swing/JTree/8013571/Test8013571.java ++javax/swing/JTree/8072676/TreeClipTest.java ++javax/swing/JViewport/6953396/bug6953396.java ++javax/swing/JViewport/7107099/bug7107099.java ++javax/swing/JWindow/ShapedAndTranslucentWindows/PerPixelTranslucentCanvas.java ++javax/swing/JWindow/ShapedAndTranslucentWindows/PerPixelTranslucentGradient.java ++javax/swing/JWindow/ShapedAndTranslucentWindows/PerPixelTranslucent.java ++javax/swing/JWindow/ShapedAndTranslucentWindows/PerPixelTranslucentSwing.java ++javax/swing/JWindow/ShapedAndTranslucentWindows/SetShapeAndClickSwing.java ++javax/swing/JWindow/ShapedAndTranslucentWindows/ShapedPerPixelTranslucentGradient.java ++javax/swing/JWindow/ShapedAndTranslucentWindows/ShapedTranslucentPerPixelTranslucentGradient.java ++javax/swing/JWindow/ShapedAndTranslucentWindows/TranslucentJComboBox.java ++javax/swing/JWindow/ShapedAndTranslucentWindows/TranslucentPerPixelTranslucentGradient.java ++javax/swing/JWindow/ShapedAndTranslucentWindows/TranslucentWindowClickSwing.java ++javax/swing/KeyboardManager/8013370/Test8013370.java ++javax/swing/LookAndFeel/6474153/bug6474153.java ++javax/swing/LookAndFeel/8145547/DemandGTK2.sh ++javax/swing/LookAndFeel/8145547/DemandGTK3.sh ++javax/swing/LookAndFeel/8145547/DemandGTK.java ++javax/swing/MultiUIDefaults/4300666/bug4300666.java ++javax/swing/MultiUIDefaults/4331767/bug4331767.java ++javax/swing/MultiUIDefaults/Test6860438.java ++javax/swing/plaf/aqua/CustomComboBoxFocusTest.java ++javax/swing/plaf/basic/BasicComboBoxEditor/Test8015336.java ++javax/swing/plaf/basic/BasicComboPopup/8154069/Bug8154069.java ++javax/swing/plaf/basic/BasicHTML/4251579/bug4251579.java ++javax/swing/plaf/basic/BasicLabelUI/bug7172652.java ++javax/swing/plaf/basic/BasicMenuUI/4983388/bug4983388.java ++javax/swing/plaf/basic/BasicScrollPaneUI/Test6632810.java ++javax/swing/plaf/basic/BasicSplitPaneUI/Test6657026.java ++javax/swing/plaf/basic/BasicTabbedPaneUI/Test6943780.java ++javax/swing/plaf/basic/BasicTreeUI/8023474/bug8023474.java ++javax/swing/plaf/basic/Test6984643.java ++javax/swing/plaf/gtk/crash/RenderBadPictureCrash.java ++javax/swing/plaf/metal/MetalBorders/Test6657026.java ++javax/swing/plaf/metal/MetalBumps/Test6657026.java ++javax/swing/plaf/metal/MetalInternalFrameUI/Test6657026.java ++javax/swing/plaf/metal/MetalLookAndFeel/5073047/bug5073047.java ++javax/swing/plaf/metal/MetalLookAndFeel/Test8039750.java ++javax/swing/plaf/metal/MetalSliderUI/Test6657026.java ++javax/swing/plaf/nimbus/8041642/bug8041642.java ++javax/swing/plaf/nimbus/8041642/ScrollBarThumbVisibleTest.java ++javax/swing/plaf/nimbus/8041725/bug8041725.java ++javax/swing/plaf/nimbus/8057791/bug8057791.java ++javax/swing/plaf/nimbus/ColorCustomizationTest.java ++javax/swing/plaf/nimbus/Test6741426.java ++javax/swing/plaf/nimbus/Test6849805.java ++javax/swing/plaf/nimbus/Test6919629.java ++javax/swing/plaf/nimbus/Test7048204.java ++javax/swing/plaf/synth/6771547/SynthTest.java ++javax/swing/plaf/synth/7032791/bug7032791.java ++javax/swing/plaf/synth/7143614/bug7143614.java ++javax/swing/plaf/synth/7158712/bug7158712.java ++javax/swing/plaf/synth/SynthButtonUI/6276188/bug6276188.java ++javax/swing/plaf/synth/SynthToolBarUI/6739756/bug6739756.java ++javax/swing/plaf/synth/Test6660049.java ++javax/swing/plaf/synth/Test6933784.java ++javax/swing/plaf/synth/Test8015926.java ++javax/swing/plaf/synth/Test8043627.java ++javax/swing/plaf/windows/WindowsRootPaneUI/WrongAltProcessing/WrongAltProcessing.java ++javax/swing/Popup/6514582/bug6514582.java ++javax/swing/PopupFactory/6276087/NonOpaquePopupMenuTest.java ++javax/swing/PopupFactory/8048506/bug8048506.java ++javax/swing/Popup/TaskbarPositionTest.java ++javax/swing/reliability/TaskUndJFrameProperties.java ++javax/swing/reliability/TaskZoomJFrameChangeState.java ++javax/swing/reliability/TaskZoomJFrameRepaint.java ++javax/swing/RepaintManager/6608456/bug6608456.java ++javax/swing/RepaintManager/7013453/bug7013453.java ++javax/swing/RepaintManager/DisplayListenerLeak/DisplayListenerLeak.java ++javax/swing/RepaintManager/IconifyTest/IconifyTest.java ++javax/swing/Security/6657138/bug6657138.java ++javax/swing/Security/6657138/ComponentTest.java ++javax/swing/Security/6938813/bug6938813.java ++javax/swing/SpringLayout/4726194/bug4726194.java ++javax/swing/SwingUtilities/4917669/bug4917669.java ++javax/swing/SwingUtilities/6797139/bug6797139.java ++javax/swing/SwingUtilities/7088744/bug7088744.java ++javax/swing/SwingUtilities/7146377/bug7146377.java ++javax/swing/SwingUtilities/7170657/bug7170657.java ++javax/swing/SwingUtilities/8032219/DrawRect.java ++javax/swing/SwingUtilities/TestBadBreak/TestBadBreak.java ++javax/swing/SwingWorker/6432565/bug6432565.java ++javax/swing/SwingWorker/6493680/bug6493680.java ++javax/swing/SwingWorker/6880336/NestedWorkers.java ++javax/swing/system/6799345/TestShutdown.java ++javax/swing/text/AbstractDocument/6968363/Test6968363.java ++javax/swing/text/AbstractDocument/8030118/Test8030118.java ++javax/swing/text/CSSBorder/6796710/bug6796710.java ++javax/swing/text/DefaultCaret/6938583/bug6938583.java ++javax/swing/text/DefaultCaret/7083457/bug7083457.java ++javax/swing/text/DefaultEditorKit/4278839/bug4278839.java ++javax/swing/text/DefaultHighlighter/6771184/bug6771184.java ++javax/swing/text/DefaultStyledDocument/6636983/bug6636983.java ++javax/swing/text/FlowView/LayoutTest.java ++javax/swing/text/html/7189299/bug7189299.java ++javax/swing/text/html/8005391/bug8005391.java ++javax/swing/text/html/8034955/bug8034955.java ++javax/swing/text/html/8218674/TooltipImageTest.java ++javax/swing/text/html/CSS/4530474/bug4530474.java ++javax/swing/text/html/HRuleView/Test5062055.java ++javax/swing/text/html/HTMLDocument/8058120/bug8058120.java ++javax/swing/text/html/HTMLEditorKit/4242228/bug4242228.java ++javax/swing/text/html/HTMLEditorKit/5043626/bug5043626.java ++javax/swing/text/html/parser/8074956/bug8074956.java ++javax/swing/text/html/parser/Parser/6836089/bug6836089.java ++javax/swing/text/html/parser/Parser/6990651/bug6990651.java ++javax/swing/text/html/parser/Parser/7003777/bug7003777.java ++javax/swing/text/html/parser/Parser/7011777/bug7011777.java ++javax/swing/text/html/parser/Parser/7165725/bug7165725.java ++javax/swing/text/html/parser/Parser/8028616/bug8028616.java ++javax/swing/text/html/parser/Parser/8078268/bug8078268.java ++javax/swing/text/html/parser/Parser/HtmlCommentTagParseTest/HtmlCommentTagParseTest.java ++javax/swing/text/html/parser/Test8017492.java ++javax/swing/text/html/Test4783068.java ++javax/swing/text/html/TestJLabelWithHTMLText.java ++javax/swing/text/JTextComponent/5074573/bug5074573.java ++javax/swing/text/LayoutQueue/Test6588003.java ++javax/swing/text/NavigationFilter/6735293/bug6735293.java ++javax/swing/text/StyledEditorKit/4506788/bug4506788.java ++javax/swing/text/StyledEditorKit/8016833/bug8016833.java ++javax/swing/text/Utilities/bug7045593.java ++javax/swing/text/View/8014863/bug8014863.java ++javax/swing/text/View/8015853/bug8015853.java ++javax/swing/text/View/8048110/bug8048110.java ++javax/swing/text/WrappedPlainView/6857057/bug6857057.java ++javax/swing/ToolTipManager/7123767/bug7123767.java ++javax/swing/ToolTipManager/Test6256140.java ++javax/swing/ToolTipManager/Test6657026.java ++javax/swing/tree/DefaultTreeCellRenderer/7142955/bug7142955.java ++javax/swing/UIDefaults/6622002/bug6622002.java ++javax/swing/UIDefaults/6795356/bug6795356.java ++javax/swing/UIDefaults/6795356/SwingLazyValueTest.java ++javax/swing/UIDefaults/6795356/TableTest.java ++javax/swing/UIDefaults/7180976/Pending.java ++javax/swing/UIManager/Test6657026.java ++javax/swing/UITest/UITest.java ++sun/awt/AppContext/8012933/Test8012933.java ++sun/awt/AppContext/MultiThread/MultiThreadTest.java ++sun/awt/datatransfer/DataFlavorComparatorTest1.java ++sun/awt/datatransfer/DataFlavorComparatorTest.java ++sun/awt/datatransfer/SuplementaryCharactersTransferTest.java ++sun/awt/dnd/8024061/bug8024061.java ++sun/awt/font/ClassLoaderLeakTest.java ++sun/awt/image/bug8038000.java ++sun/awt/image/DrawByteBinary.java ++sun/awt/image/ImageRepresentation/LUTCompareTest.java ++sun/awt/image/ImageWatched/AddNoLeak.java ++sun/awt/image/ImagingLib/SamePackingTypeTest.java ++sun/java2d/AcceleratedXORModeTest.java ++sun/java2d/ClassCastExceptionForInvalidSurface.java ++sun/java2d/cmm/ColorConvertOp/AlphaTest.java ++sun/java2d/cmm/ColorConvertOp/ColConvCCMTest.java ++sun/java2d/cmm/ColorConvertOp/ColConvDCMTest.java ++sun/java2d/cmm/ColorConvertOp/ColCvtAlpha.java ++sun/java2d/cmm/ColorConvertOp/ColCvtIntARGB.java ++sun/java2d/cmm/ColorConvertOp/ColorConvertTest.java ++sun/java2d/cmm/ColorConvertOp/ColorSpaceCvtCrashTest/ColorSpaceCvtCrashTest.java ++sun/java2d/cmm/ColorConvertOp/ConstructorsNullTest/ConstructorsNullTest.html ++sun/java2d/cmm/ColorConvertOp/GrayTest.java ++sun/java2d/cmm/ColorConvertOp/InvalidRenderIntentTest.java ++sun/java2d/cmm/ColorConvertOp/MTColConvTest.java ++sun/java2d/cmm/ColorConvertOp/MTSafetyTest.java ++sun/java2d/cmm/ColorConvertOp/RGBColorConvertTest.java ++sun/java2d/cmm/ProfileOp/DisposalCrashTest.java ++sun/java2d/cmm/ProfileOp/MTReadProfileTest.java ++sun/java2d/cmm/ProfileOp/ReadProfileTest.java ++sun/java2d/cmm/ProfileOp/ReadWriteProfileTest.java ++sun/java2d/cmm/ProfileOp/SetDataTest.java ++sun/java2d/DirectX/AcceleratedScaleTest/AcceleratedScaleTest.java ++sun/java2d/DirectX/AccelPaintsTest/AccelPaintsTest.java ++sun/java2d/DirectX/DrawBitmaskToSurfaceTest.java ++sun/java2d/DirectX/IAEforEmptyFrameTest/IAEforEmptyFrameTest.java ++sun/java2d/DirectX/InfiniteValidationLoopTest/InfiniteValidationLoopTest.java ++sun/java2d/DirectX/OnScreenRenderingResizeTest/OnScreenRenderingResizeTest.java ++sun/java2d/DirectX/OpaqueImageToSurfaceBlitTest/OpaqueImageToSurfaceBlitTest.java ++sun/java2d/DirectX/OverriddenInsetsTest/OverriddenInsetsTest.java ++sun/java2d/DirectX/RenderingToCachedGraphicsTest/RenderingToCachedGraphicsTest.java ++sun/java2d/DirectX/StrikeDisposalCrashTest/StrikeDisposalCrashTest.java ++sun/java2d/DirectX/SwingOnScreenScrollingTest/SwingOnScreenScrollingTest.java ++sun/java2d/DirectX/TransformedPaintTest/TransformedPaintTest.java ++sun/java2d/DrawCachedImageAndTransform.java ++sun/java2d/DrawXORModeTest.java ++sun/java2d/GdiRendering/InsetClipping.java ++sun/java2d/loops/Bug7049339.java ++sun/java2d/loops/RenderToCustomBufferTest.java ++sun/java2d/marlin/ArrayCacheSizeTest.java ++sun/java2d/marlin/CeilAndFloorTests.java ++sun/java2d/marlin/CrashNaNTest.java ++sun/java2d/marlin/CrashPaintTest.java ++sun/java2d/marlin/CrashTest.java ++sun/java2d/marlin/DefaultRenderingEngine.java ++sun/java2d/marlin/TextClipErrorTest.java ++sun/java2d/OpenGL/bug7181438.java ++sun/java2d/OpenGL/CopyAreaOOB.java ++sun/java2d/OpenGL/CustomCompositeTest.java ++sun/java2d/OpenGL/DrawBufImgOp.java ++sun/java2d/OpenGL/DrawHugeImageTest.java ++sun/java2d/OpenGL/GradientPaints.java ++sun/java2d/pipe/hw/RSLAPITest/RSLAPITest.java ++sun/java2d/pipe/hw/RSLContextInvalidationTest/RSLContextInvalidationTest.java ++sun/java2d/pipe/InterpolationQualityTest.java ++sun/java2d/pipe/MutableColorTest/MutableColorTest.java ++sun/java2d/pipe/RegionOps.java ++sun/java2d/pipe/Test7027667.java ++sun/java2d/pipe/Test8004821.java ++sun/java2d/pisces/Renderer/Test7019861.java ++sun/java2d/pisces/Renderer/TestNPE.java ++sun/java2d/pisces/Test7036754.java ++sun/java2d/SunGraphics2D/DrawImageBilinear.java ++sun/java2d/SunGraphics2D/EmptyClipRenderingTest.java ++sun/java2d/SunGraphics2D/PolyVertTest.java ++sun/java2d/SunGraphics2D/SimplePrimQuality.java ++sun/java2d/SunGraphics2D/SourceClippingBlitTest/SourceClippingBlitTest.java ++sun/java2d/SunGraphicsEnvironment/TestSGEuseAlternateFontforJALocales.java ++sun/java2d/X11SurfaceData/DrawImageBgTest/DrawImageBgTest.java ++sun/java2d/X11SurfaceData/SharedMemoryPixmapsTest/SharedMemoryPixmapsTest.sh ++sun/java2d/XRenderBlitsTest.java ++sun/security/provider/SeedGenerator/Awt_Hang_Test.java ++ ++#x86 also failed ++ ++runtime/6929067/Test6929067.sh ++runtime/InitialThreadOverflow/testme.sh ++runtime/classFileParserBug/TestEmptyBootstrapMethodsAttr.java ++com/sun/jdi/JdbReadTwiceTest.sh ++com/sun/jndi/ldap/LdapDnsProviderTest.java ++com/sun/management/DiagnosticCommandMBean/DcmdMBeanPermissionsTest.java ++java/beans/Introspector/7064279/Test7064279.java ++java/beans/Introspector/Test7172865.java ++java/beans/Introspector/Test7195106.java ++java/net/InetAddress/IsReachableViaLoopbackTest.java ++java/net/InetAddress/getOriginalHostName.java ++java/nio/file/Files/probeContentType/ForceLoad.java ++java/nio/file/Files/probeContentType/ParallelProbes.java ++java/rmi/transport/rapidExportUnexport/RapidExportUnexport.java ++java/security/KeyStore/PKCS12/KeytoolReaderP12Test.java ++java/security/cert/CertPathBuilder/akiExt/AKISerialNumber.java ++java/security/cert/CertPathBuilder/targetConstraints/BuildEEBasicConstraints.java ++java/security/cert/pkix/policyChanges/TestPolicy.java ++java/text/BreakIterator/BreakIteratorTest.java ++java/util/logging/LocalizedLevelName.java ++java/util/logging/SimpleFormatterFormat.java ++javax/management/remote/mandatory/connection/RMIConnector_NPETest.java ++javax/net/ssl/SSLSession/CheckMyTrustedKeystore.java ++javax/print/CheckDupFlavor.java ++javax/print/attribute/AttributeTest.java ++javax/print/attribute/GetCopiesSupported.java ++javax/print/attribute/SidesPageRangesTest.java ++javax/print/attribute/SupportedPrintableAreas.java ++javax/security/auth/login/JAASConfigSyntaxCheck/JAASConfigSyntaxTest.java ++javax/security/auth/login/LoginContext/DynamicConfigurationTest.java ++javax/xml/bind/jxc/8073519/SchemagenErrorReporting.java ++security/infra/java/security/cert/CertPathValidator/certification/ActalisCA.java ++security/infra/java/security/cert/CertPathValidator/certification/AmazonCA.java ++security/infra/java/security/cert/CertPathValidator/certification/BuypassCA.java ++security/infra/java/security/cert/CertPathValidator/certification/ComodoCA.java ++security/infra/java/security/cert/CertPathValidator/certification/DTrustCA.java ++security/infra/java/security/cert/CertPathValidator/certification/EntrustCA.java ++security/infra/java/security/cert/CertPathValidator/certification/GlobalSignR6CA.java ++security/infra/java/security/cert/CertPathValidator/certification/GoDaddyCA.java ++security/infra/java/security/cert/CertPathValidator/certification/HaricaCA.java ++security/infra/java/security/cert/CertPathValidator/certification/LuxTrustCA.java ++security/infra/java/security/cert/CertPathValidator/certification/QuoVadisCA.java ++security/infra/java/security/cert/CertPathValidator/certification/SSLCA.java ++security/infra/java/security/cert/CertPathValidator/certification/TeliaSoneraCA.java ++sun/management/jmxremote/startstop/JMXStartStopTest.java ++sun/rmi/transport/proxy/EagerHttpFallback.java ++sun/security/krb5/auto/ReplayCacheTestProc.java ++sun/security/lib/cacerts/VerifyCACerts.java ++sun/security/ssl/X509TrustManagerImpl/Symantec/Distrust.java ++sun/security/tools/jarsigner/TsacertOptionTest.java ++sun/security/tools/jarsigner/warnings/AliasNotInStoreTest.java ++sun/security/tools/jarsigner/warnings/BadExtendedKeyUsageTest.java ++sun/security/tools/jarsigner/warnings/BadNetscapeCertTypeTest.java ++sun/security/tools/jarsigner/warnings/ChainNotValidatedTest.java ++sun/security/tools/jarsigner/warnings/HasExpiredCertTest.java ++sun/security/tools/jarsigner/warnings/HasExpiringCertTest.java ++sun/security/tools/jarsigner/warnings/HasUnsignedEntryTest.java ++sun/security/tools/jarsigner/warnings/MultipleWarningsTest.java ++sun/security/tools/jarsigner/warnings/NoTimestampTest.java ++sun/security/tools/jarsigner/warnings/NotSignedByAliasTest.java ++sun/security/tools/jarsigner/warnings/NotYetValidCertTest.java ++sun/security/tools/jarsigner/weaksize.sh ++sun/tools/jps/TestJpsJarRelative.java ++sun/tools/native2ascii/NativeErrors.java ++sun/util/logging/SourceClassName.java ++tools/launcher/TestSpecialArgs.java ++vmTestbase/nsk/jdb/monitor/monitor002/monitor002.java ++com/sun/crypto/provider/Cipher/DES/PerformanceTest.java ++com/sun/jdi/ArrayLengthDumpTest.sh ++com/sun/jdi/BreakpointWithFullGC.sh ++com/sun/jdi/CatchAllTest.sh ++com/sun/jdi/CatchCaughtTest.sh ++com/sun/jdi/CatchPatternTest.sh ++com/sun/jdi/CommandCommentDelimiter.sh ++com/sun/jdi/DeoptimizeWalk.sh ++com/sun/jdi/EvalArgs.sh ++com/sun/jdi/EvalArraysAsList.sh ++com/sun/jdi/EvalInterfaceStatic.sh ++com/sun/jdi/GetLocalVariables3Test.sh ++com/sun/jdi/GetLocalVariables4Test.sh ++com/sun/jdi/JdbLockTest.sh ++com/sun/jdi/JdbMethodExitTest.sh ++com/sun/jdi/JdbMissStep.sh ++com/sun/jdi/JdbVarargsTest.sh ++com/sun/jdi/MixedSuspendTest.sh ++com/sun/jdi/NotAField.sh ++com/sun/jdi/NullLocalVariable.sh ++com/sun/jdi/PopAndInvokeTest.java ++com/sun/jdi/Redefine-g.sh ++com/sun/jdi/RedefineAbstractClass.sh ++com/sun/jdi/RedefineAddPrivateMethod.sh ++com/sun/jdi/RedefineAnnotation.sh ++com/sun/jdi/RedefineChangeClassOrder.sh ++com/sun/jdi/RedefineClasses.sh ++com/sun/jdi/RedefineClearBreakpoint.sh ++com/sun/jdi/RedefineException.sh ++com/sun/jdi/RedefineFinal.sh ++com/sun/jdi/RedefineImplementor.sh ++com/sun/jdi/RedefineIntConstantToLong.sh ++com/sun/jdi/RedefineMulti.sh ++com/sun/jdi/RedefinePop.sh ++com/sun/jdi/RedefineStep.sh ++com/sun/jdi/RedefineTTYLineNumber.sh ++com/sun/jdi/StringConvertTest.sh ++com/sun/jdi/WatchFramePop.sh ++com/sun/security/auth/callback/DialogCallbackHandler/Default.java ++com/sun/security/auth/callback/TextCallbackHandler/Default.java ++com/sun/security/auth/callback/TextCallbackHandler/Password.java ++com/sun/security/sasl/gsskerb/AuthOnly.java ++com/sun/security/sasl/gsskerb/ConfSecurityLayer.java ++com/sun/security/sasl/gsskerb/NoSecurityLayer.java ++com/sun/tracing/BasicFunctionality.java ++java/io/ByteArrayOutputStream/MaxCapacity.java ++java/io/CharArrayReader/OverflowInRead.java ++java/io/File/GetXSpace.java ++java/io/IOException/LastErrorString.java ++java/io/StringBufferInputStream/OverflowInRead.java ++java/io/SystemInAvailable.java ++java/lang/StringBuilder/HugeCapacity.java ++java/lang/instrument/ParallelTransformerLoader.sh ++java/lang/invoke/LFCaching/LFGarbageCollectedTest.java ++java/nio/channels/FileChannel/FileExtensionAndMap.java ++java/util/AbstractList/CheckForComodification.java ++java/util/ResourceBundle/RestrictedBundleTest.java ++java/util/ResourceBundle/Test4300693.java ++java/util/WeakHashMap/GCDuringIteration.java ++java/util/zip/3GBZipFiles.sh ++javax/net/ssl/SSLEngine/Basics.java ++javax/net/ssl/SSLEngine/CheckStatus.java ++javax/net/ssl/SSLEngine/ConnectionTest.java ++javax/net/ssl/SSLEngine/EngineCloseOnAlert.java ++javax/net/ssl/SSLEngine/IllegalHandshakeMessage.java ++javax/net/ssl/SSLEngine/IllegalRecordVersion.java ++javax/net/ssl/SSLEngine/TestAllSuites.java ++javax/security/auth/kerberos/KerberosHashEqualsTest.java ++javax/security/auth/kerberos/KerberosTixDateTest.java ++jdk/jfr/event/oldobject/TestObjectDescription.java ++jdk/jfr/event/sampling/TestNative.java ++jdk/security/infra/java/security/cert/CertPathValidator/certification/HaricaCA.java ++sun/net/www/protocol/https/HttpsURLConnection/CloseKeepAliveCached.java ++sun/security/provider/PolicyFile/GrantAllPermToExtWhenNoPolicy.java ++sun/security/provider/PolicyParser/ExtDirs.java ++sun/security/provider/PolicyParser/ExtDirsChange.java ++sun/security/provider/PolicyParser/ExtDirsDefaultPolicy.java ++sun/security/provider/PolicyParser/PrincipalExpansionError.java ++sun/security/ssl/SSLSocketImpl/ClientTimeout.java ++sun/security/ssl/SSLSocketImpl/NonAutoClose.java ++sun/security/ssl/SSLSocketImpl/SetClientMode.java ++sun/security/tools/jarsigner/warnings/BadKeyUsageTest.java ++sun/security/tools/keytool/i18n.sh +diff -uNr openjdk/THIRD_PARTY_README afu8u/THIRD_PARTY_README +--- openjdk/THIRD_PARTY_README 2023-04-19 05:53:02.000000000 +0800 ++++ afu8u/THIRD_PARTY_README 2025-05-06 10:53:44.691633659 +0800 +@@ -7,7 +7,7 @@ + + --- begin of LICENSE --- + +-Copyright (c) 2000-2011 France Télécom ++Copyright (c) 2000-2011 France T??l??com + All rights reserved. + + Redistribution and use in source and binary forms, with or without +@@ -1035,7 +1035,7 @@ + --- begin of LICENSE --- + + Copyright notice +-Copyright © 2011 Ecma International ++Copyright ?? 2011 Ecma International + Ecma International + Rue du Rhone 114 + CH-1204 Geneva +@@ -2527,16 +2527,16 @@ + Unicode Terms of Use + + For the general privacy policy governing access to this site, see the Unicode +-Privacy Policy. For trademark usage, see the Unicode® Consortium Name and ++Privacy Policy. For trademark usage, see the Unicode?? Consortium Name and + Trademark Usage Policy. + + A. Unicode Copyright. +- 1. Copyright © 1991-2013 Unicode, Inc. All rights reserved. ++ 1. Copyright ?? 1991-2013 Unicode, Inc. All rights reserved. + + 2. Certain documents and files on this website contain a legend indicating + that "Modification is permitted." Any person is hereby authorized, + without fee, to modify such documents and files to create derivative +- works conforming to the Unicode® Standard, subject to Terms and ++ works conforming to the Unicode?? Standard, subject to Terms and + Conditions herein. + + 3. Any person is hereby authorized, without fee, to view, use, reproduce, +@@ -2602,14 +2602,14 @@ + + E.Trademarks & Logos. + 1. The Unicode Word Mark and the Unicode Logo are trademarks of Unicode, +- Inc. “The Unicode Consortium” and “Unicode, Inc.” are trade names of ++ Inc. ???The Unicode Consortium??? and ???Unicode, Inc.??? are trade names of + Unicode, Inc. Use of the information and materials found on this +- website indicates your acknowledgement of Unicode, Inc.’s exclusive ++ website indicates your acknowledgement of Unicode, Inc.???s exclusive + worldwide rights in the Unicode Word Mark, the Unicode Logo, and the + Unicode trade names. + +- 2. The Unicode Consortium Name and Trademark Usage Policy (“Trademark +- Policy”) are incorporated herein by reference and you agree to abide by ++ 2. The Unicode Consortium Name and Trademark Usage Policy (???Trademark ++ Policy???) are incorporated herein by reference and you agree to abide by + the provisions of the Trademark Policy, which may be changed from time + to time in the sole discretion of Unicode, Inc. + +@@ -2632,12 +2632,12 @@ + + 2. Modification by Unicode. Unicode shall have the right to modify this + Agreement at any time by posting it to this site. The user may not +- assign any part of this Agreement without Unicode’s prior written ++ assign any part of this Agreement without Unicode???s prior written + consent. + + 3. Taxes. The user agrees to pay any taxes arising from access to this + website or use of the information herein, except for those based on +- Unicode’s net income. ++ Unicode???s net income. + + 4. Severability. If any provision of this Agreement is declared invalid or + unenforceable, the remaining provisions of this Agreement shall remain +@@ -2666,7 +2666,7 @@ + + COPYRIGHT AND PERMISSION NOTICE + +-Copyright © 1991-2012 Unicode, Inc. All rights reserved. Distributed under the ++Copyright ?? 1991-2012 Unicode, Inc. All rights reserved. Distributed under the + Terms of Use in http://www.unicode.org/copyright.html. + + Permission is hereby granted, free of charge, to any person obtaining a copy +diff -uNr openjdk/version_patch.sh afu8u/version_patch.sh +--- openjdk/version_patch.sh 1970-01-01 08:00:00.000000000 +0800 ++++ afu8u/version_patch.sh 2025-05-06 10:53:48.079633773 +0800 +@@ -0,0 +1,14 @@ ++#!/bin/bash ++gitnum=`git log| head -n 1 |cut -b 8-15` ++bdate=`date +%Y-%m-%d` ++#gitdate=`git log --pretty=format:"%ai"|head -n 1|cut -d " " -f 1` ++topdir=`pwd` ++updatever=212 ++patch=SP2 ++username=`echo $USER` ++buildtag=$username.`basename $topdir`.$gitnum.$bdate.$patch ++ ++#sed -i '500a USER_RELEASE_SUFFIX=`'$buildtag'`' ./common/autoconf/jdk-options.m4 ++sed -i '499c USER_RELEASE_SUFFIX="'$buildtag'"' ./common/autoconf/jdk-options.m4 ++sed -i '500c JDK_UPDATE_VERSION='$updatever'' ./common/autoconf/jdk-options.m4 ++ diff --git a/java-1.8.0-openjdk.spec b/java-1.8.0-openjdk.spec index c161aea..c64f9be 100644 --- a/java-1.8.0-openjdk.spec +++ b/java-1.8.0-openjdk.spec @@ -13,7 +13,7 @@ # Only produce a release build on x86_64: # $ rhpkg mockbuild --without slowdebug --without fastdebug # -%global anolis_release 2 +%global anolis_release 3 # Enable fastdebug builds by default on relevant arches. %bcond_without fastdebug # Enable slowdebug builds by default on relevant arches. @@ -95,11 +95,11 @@ # similarly for other %%{_jvmdir}/{jre,java} and %%{_javadocdir}/{java,java-zip} # Set of architectures for which we build slowdebug builds -%global debug_arches loongarch64 +%global debug_arches loongarch64 sw_64 # Set of architectures for which we build fastdebug builds -%global fastdebug_arches loongarch64 +%global fastdebug_arches loongarch64 sw_64 # Set of architectures with a Just-In-Time (JIT) compiler -%global jit_arches loongarch64 +%global jit_arches loongarch64 sw_64 # Set of architectures which use the Zero assembler port (!jit_arches) %global zero_arches %{arm} # Set of architectures which run a full bootstrap cycle @@ -107,7 +107,7 @@ # Set of architectures which support SystemTap tapsets %global systemtap_arches %{jit_arches} # Set of architectures which support the serviceability agent -%global sa_arches loongarch64 +%global sa_arches loongarch64 sw_64 # Set of architectures which support class data sharing # See https://bugzilla.redhat.com/show_bug.cgi?id=513605 # MetaspaceShared::generate_vtable_methods is not implemented for the PPC JIT @@ -214,6 +214,10 @@ %global archinstall loongarch64 %global stapinstall loongarch64 %endif +%ifarch sw_64 +%global archinstall sw64 +%global stapinstall sw64 +%endif # Need to support noarch for srpm build %ifarch noarch %global archinstall %{nil} @@ -265,13 +269,13 @@ # eg # jdk8u60-b27 -> jdk8u60 or # aarch64-jdk8u60-b27 -> aarch64-jdk8u60 (dont forget spec escape % by %%) %global whole_update %(VERSION=%{version_tag}; echo ${VERSION%%-*}) # eg jdk8u60 -> 60 or aarch64-jdk8u60 -> 60 -%ifnarch loongarch64 +%ifnarch loongarch64 sw_64 %global updatever %(VERSION=%{whole_update}; echo ${VERSION##*u}) %else %global updatever 372 %endif # eg jdk8u60-b27 -> b27 -%ifnarch loongarch64 +%ifnarch loongarch64 sw_64 %global buildver %(VERSION=%{version_tag}; echo ${VERSION##*-}) %else %global buildver b07 @@ -1412,6 +1416,16 @@ Patch201: jdk8043805-allow_using_system_installed_libjpeg.patch # Init support for LoongArch64 Patch2000: LoongArch64.patch +############################################# +# +# sw_64 patches +# +# This section includes patches which are +# added by sw_64. +############################################# +# Init support for sw_64 +Patch3000: 3000-sw_64.patch + ############################################# # # Shenandoah fixes @@ -1779,7 +1793,7 @@ cp %{SOURCE2} . # # the configure macro will do this too, but it also passes a few flags not # supported by openjdk configure script -%ifnarch loongarch64 +%ifnarch loongarch64 sw_64 cp %{SOURCE100} %{top_level_dir_name}/common/autoconf/build-aux/ cp %{SOURCE101} %{top_level_dir_name}/common/autoconf/build-aux/ %endif @@ -1803,7 +1817,7 @@ sh %{SOURCE12} %patch3 %patch5 -%ifnarch loongarch64 +%ifnarch loongarch64 sw_64 # s390 build fixes %patch102 %patch103 @@ -1835,6 +1849,9 @@ pushd %{top_level_dir_name} %ifarch loongarch64 %patch2000 -p1 %endif +%ifarch sw_64 +%patch3000 -p1 +%endif popd # RPM-only fixes @@ -1909,7 +1926,7 @@ export NUM_PROC=${NUM_PROC:-1} [ ${NUM_PROC} -gt %{?_smp_ncpus_max} ] && export NUM_PROC=%{?_smp_ncpus_max} %endif -%ifarch loongarch64 +%ifarch loongarch64 sw_64 export ARCH_DATA_MODEL=64 %endif @@ -1993,7 +2010,7 @@ function buildjdk() { make \ JAVAC_FLAGS=-g \ -%ifnarch loongarch64 +%ifnarch loongarch64 sw_64 LOG=trace \ %endif SCTP_WERROR= \ @@ -2160,8 +2177,10 @@ $JAVA_HOME/bin/javac -d . %{SOURCE16} $JAVA_HOME/bin/java $(echo $(basename %{SOURCE16})|sed "s|\.java||") "%{oj_vendor}" %{oj_vendor_url} %{oj_vendor_bug_url} # Check translations are available for new timezones +%ifnarch sw_64 $JAVA_HOME/bin/javac -d . %{SOURCE18} $JAVA_HOME/bin/java $(echo $(basename %{SOURCE18})|sed "s|\.java||") JRE +%endif # Check debug symbols are present and can identify code find "$JAVA_HOME" -iname '*.so' -print0 | while read -d $'\0' lib @@ -2213,7 +2232,7 @@ done # Using line number 1 might cause build problems. See: # https://bugzilla.redhat.com/show_bug.cgi?id=1539664 # https://bugzilla.redhat.com/show_bug.cgi?id=1538767 -%ifnarch loongarch64 +%ifnarch loongarch64 sw_64 gdb -q "$JAVA_HOME/bin/java" < - 1:1.8.0.372.b07-3 +- Add sw_64 ISA support + * Tue Nov 07 2023 Leslie Zhai - 1:1.8.0.372.b07-2 - Change openjdk_revision - Drop jdk8275535-rh2053256-ldap_auth.patch -- Gitee From 3785ec1bd6afa99085ebf79594da4bc074ec790c Mon Sep 17 00:00:00 2001 From: swcompiler Date: Fri, 9 May 2025 08:56:14 +0800 Subject: [PATCH 2/2] exclude x86_64 aarch64 --- java-1.8.0-openjdk.spec | 3 +++ 1 file changed, 3 insertions(+) diff --git a/java-1.8.0-openjdk.spec b/java-1.8.0-openjdk.spec index c64f9be..f58366e 100644 --- a/java-1.8.0-openjdk.spec +++ b/java-1.8.0-openjdk.spec @@ -13,6 +13,9 @@ # Only produce a release build on x86_64: # $ rhpkg mockbuild --without slowdebug --without fastdebug # + +ExcludeArch: x86_64 aarch64 + %global anolis_release 3 # Enable fastdebug builds by default on relevant arches. %bcond_without fastdebug -- Gitee