From 79b89e7ab15ae29f9a49cccf52f2898560ec6e80 Mon Sep 17 00:00:00 2001 From: binaryfz Date: Mon, 30 Nov 2020 10:57:59 +0800 Subject: [PATCH 1/9] add third_party --- third_party/libnativehelper/ALog-priv.h | 76 + third_party/libnativehelper/Android.bp | 123 ++ third_party/libnativehelper/JNIHelp.cpp | 407 +++++ third_party/libnativehelper/JniConstants.cpp | 287 ++++ third_party/libnativehelper/JniConstants.h | 83 + third_party/libnativehelper/JniInvocation.cpp | 321 ++++ .../libnativehelper/MODULE_LICENSE_APACHE2 | 0 third_party/libnativehelper/NOTICE | 190 +++ third_party/libnativehelper/README | 11 + .../nativehelper/nativehelper_utils.h | 55 + .../nativehelper/scoped_bytes.h | 83 + .../nativehelper/scoped_local_frame.h | 39 + .../nativehelper/scoped_local_ref.h | 91 ++ .../nativehelper/scoped_primitive_array.h | 147 ++ .../nativehelper/scoped_string_chars.h | 73 + .../nativehelper/scoped_utf_chars.h | 94 ++ .../include/nativehelper/JNIHelp.h | 300 ++++ .../include/nativehelper/JniInvocation.h | 80 + .../include/nativehelper/ScopedBytes.h | 23 + .../include/nativehelper/ScopedLocalFrame.h | 22 + .../include/nativehelper/ScopedLocalRef.h | 23 + .../nativehelper/ScopedPrimitiveArray.h | 23 + .../include/nativehelper/ScopedStringChars.h | 23 + .../include/nativehelper/ScopedUtfChars.h | 23 + .../include/nativehelper/module_api.h | 23 + .../include/nativehelper/toStringArray.h | 78 + third_party/libnativehelper/include_jni/jni.h | 1143 +++++++++++++ .../libnativehelper/libnativehelper.map.txt | 40 + .../nativehelper/detail/signature_checker.h | 1441 +++++++++++++++++ .../nativehelper/jni_macros.h | 285 ++++ third_party/libnativehelper/tests/Android.bp | 74 + .../libnativehelper/tests/AndroidTest.xml | 26 + .../tests/JniInvocation_test.cpp | 95 ++ .../JniSafeRegisterNativeMethods_test.cpp | 1282 +++++++++++++++ .../tests/jni_gtest/Android.bp | 12 + .../jni_gtest/base/nativehelper/jni_gtest.h | 126 ++ .../tests/libnativehelper_api_test.c | 26 + third_party/libnativehelper/toStringArray.cpp | 53 + 38 files changed, 7301 insertions(+) create mode 100644 third_party/libnativehelper/ALog-priv.h create mode 100644 third_party/libnativehelper/Android.bp create mode 100644 third_party/libnativehelper/JNIHelp.cpp create mode 100644 third_party/libnativehelper/JniConstants.cpp create mode 100644 third_party/libnativehelper/JniConstants.h create mode 100644 third_party/libnativehelper/JniInvocation.cpp create mode 100644 third_party/libnativehelper/MODULE_LICENSE_APACHE2 create mode 100644 third_party/libnativehelper/NOTICE create mode 100644 third_party/libnativehelper/README create mode 100644 third_party/libnativehelper/header_only_include/nativehelper/nativehelper_utils.h create mode 100644 third_party/libnativehelper/header_only_include/nativehelper/scoped_bytes.h create mode 100644 third_party/libnativehelper/header_only_include/nativehelper/scoped_local_frame.h create mode 100644 third_party/libnativehelper/header_only_include/nativehelper/scoped_local_ref.h create mode 100644 third_party/libnativehelper/header_only_include/nativehelper/scoped_primitive_array.h create mode 100644 third_party/libnativehelper/header_only_include/nativehelper/scoped_string_chars.h create mode 100644 third_party/libnativehelper/header_only_include/nativehelper/scoped_utf_chars.h create mode 100644 third_party/libnativehelper/include/nativehelper/JNIHelp.h create mode 100644 third_party/libnativehelper/include/nativehelper/JniInvocation.h create mode 100644 third_party/libnativehelper/include/nativehelper/ScopedBytes.h create mode 100644 third_party/libnativehelper/include/nativehelper/ScopedLocalFrame.h create mode 100644 third_party/libnativehelper/include/nativehelper/ScopedLocalRef.h create mode 100644 third_party/libnativehelper/include/nativehelper/ScopedPrimitiveArray.h create mode 100644 third_party/libnativehelper/include/nativehelper/ScopedStringChars.h create mode 100644 third_party/libnativehelper/include/nativehelper/ScopedUtfChars.h create mode 100644 third_party/libnativehelper/include/nativehelper/module_api.h create mode 100644 third_party/libnativehelper/include/nativehelper/toStringArray.h create mode 100644 third_party/libnativehelper/include_jni/jni.h create mode 100644 third_party/libnativehelper/libnativehelper.map.txt create mode 100644 third_party/libnativehelper/platform_include/nativehelper/detail/signature_checker.h create mode 100644 third_party/libnativehelper/platform_include/nativehelper/jni_macros.h create mode 100644 third_party/libnativehelper/tests/Android.bp create mode 100644 third_party/libnativehelper/tests/AndroidTest.xml create mode 100644 third_party/libnativehelper/tests/JniInvocation_test.cpp create mode 100644 third_party/libnativehelper/tests/JniSafeRegisterNativeMethods_test.cpp create mode 100644 third_party/libnativehelper/tests/jni_gtest/Android.bp create mode 100644 third_party/libnativehelper/tests/jni_gtest/base/nativehelper/jni_gtest.h create mode 100644 third_party/libnativehelper/tests/libnativehelper_api_test.c create mode 100644 third_party/libnativehelper/toStringArray.cpp diff --git a/third_party/libnativehelper/ALog-priv.h b/third_party/libnativehelper/ALog-priv.h new file mode 100644 index 0000000000..5e24fc8230 --- /dev/null +++ b/third_party/libnativehelper/ALog-priv.h @@ -0,0 +1,76 @@ +/* + * Copyright 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NATIVEHELPER_ALOGPRIV_H_ +#define NATIVEHELPER_ALOGPRIV_H_ + +#include + +#ifndef LOG_NDEBUG +#ifdef NDEBUG +#define LOG_NDEBUG 1 +#else +#define LOG_NDEBUG 0 +#endif +#endif + + +/* + * Basic log message macros intended to emulate the behavior of log/log.h + * in system core. This should be dependent only on ndk exposed logging + * functionality. + */ + +#ifndef ALOG +#define ALOG(priority, tag, fmt...) \ + __android_log_print(ANDROID_##priority, tag, fmt) +#endif + +#ifndef ALOGV +#if LOG_NDEBUG +#define ALOGV(...) ((void)0) +#else +#define ALOGV(...) ((void)ALOG(LOG_VERBOSE, LOG_TAG, __VA_ARGS__)) +#endif +#endif + +#ifndef ALOGD +#define ALOGD(...) ((void)ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__)) +#endif + +#ifndef ALOGI +#define ALOGI(...) ((void)ALOG(LOG_INFO, LOG_TAG, __VA_ARGS__)) +#endif + +#ifndef ALOGW +#define ALOGW(...) ((void)ALOG(LOG_WARN, LOG_TAG, __VA_ARGS__)) +#endif + +#ifndef ALOGE +#define ALOGE(...) ((void)ALOG(LOG_ERROR, LOG_TAG, __VA_ARGS__)) +#endif + +/* + * Log a fatal error if cond is true. The condition test is inverted from + * assert(3) semantics. The test and message are not stripped from release + * builds + */ +#ifndef ALOG_ALWAYS_FATAL_IF +#define ALOG_ALWAYS_FATAL_IF(cond, ...) \ + if (cond) __android_log_assert(#cond, LOG_TAG, __VA_ARGS__) +#endif + +#endif // NATIVEHELPER_ALOGPRIV_H_ diff --git a/third_party/libnativehelper/Android.bp b/third_party/libnativehelper/Android.bp new file mode 100644 index 0000000000..f2340f1c55 --- /dev/null +++ b/third_party/libnativehelper/Android.bp @@ -0,0 +1,123 @@ +// Copyright (C) 2009 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +cc_library_headers { + name: "jni_headers", + host_supported: true, + export_include_dirs: ["include_jni"], + vendor_available: true, + target: { + windows: { + enabled: true, + }, + }, +} + +cc_library_headers { + name: "libnativehelper_header_only", + host_supported: true, + export_include_dirs: ["header_only_include"], + target: { + windows: { + enabled: true, + }, + }, +} + +cc_library_headers { + name: "jni_platform_headers", + host_supported: true, + export_include_dirs: ["platform_include"], + target: { + windows: { + enabled: true, + }, + }, +} + +cc_library { + name: "libnativehelper", + host_supported: true, + srcs: [ + "JNIHelp.cpp", + "JniConstants.cpp", + "JniInvocation.cpp", + "toStringArray.cpp", + ], + shared_libs: [ + "libbase", + "liblog", + ], + cflags: [ + "-Werror", + "-fvisibility=protected", + ], + export_include_dirs: [ + "include", + "header_only_include", + "platform_include" + ], + stubs: { + symbol_file: "libnativehelper.map.txt", + versions: ["1"], + }, + target: { + windows: { + enabled: true, + }, + }, +} + +// +// NDK-only build for the target (device), using libc++. +// - Relies only on NDK exposed functionality. +// - This doesn't include JniInvocation. +// + +cc_library_shared { + name: "libnativehelper_compat_libc++", + export_include_dirs: [ + "header_only_include", + "include", + ], + cflags: ["-Werror"], + include_dirs: [ + "libnativehelper/header_only_include", + "libnativehelper/platform_include", + ], + srcs: [ + "JNIHelp.cpp", + "JniConstants.cpp", + "toStringArray.cpp", + ], + shared_libs: [ + "liblog", + ], + sdk_version: "19", + stl: "c++_static", +} + +ndk_headers { + name: "ndk_jni.h", + from: "include_jni", + to: "", + srcs: ["include_jni/jni.h"], + license: "NOTICE", +} + +// +// Tests. +// + +subdirs = ["tests"] diff --git a/third_party/libnativehelper/JNIHelp.cpp b/third_party/libnativehelper/JNIHelp.cpp new file mode 100644 index 0000000000..8432f7a37c --- /dev/null +++ b/third_party/libnativehelper/JNIHelp.cpp @@ -0,0 +1,407 @@ +/* + * Copyright (C) 2006 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "JNIHelp" + +#include "nativehelper/JNIHelp.h" + +#include "ALog-priv.h" + +#include + +#include "JniConstants.h" +#include "nativehelper/ScopedLocalRef.h" + +/** + * Equivalent to ScopedLocalRef, but for C_JNIEnv instead. (And slightly more powerful.) + */ +template +class scoped_local_ref { +public: + explicit scoped_local_ref(C_JNIEnv* env, T localRef = NULL) + : mEnv(env), mLocalRef(localRef) + { + } + + ~scoped_local_ref() { + reset(); + } + + void reset(T localRef = NULL) { + if (mLocalRef != NULL) { + (*mEnv)->DeleteLocalRef(reinterpret_cast(mEnv), mLocalRef); + mLocalRef = localRef; + } + } + + T get() const { + return mLocalRef; + } + +private: + C_JNIEnv* const mEnv; + T mLocalRef; + + DISALLOW_COPY_AND_ASSIGN(scoped_local_ref); +}; + +static jclass findClass(C_JNIEnv* env, const char* className) { + JNIEnv* e = reinterpret_cast(env); + return (*env)->FindClass(e, className); +} + +MODULE_API int jniRegisterNativeMethods(C_JNIEnv* env, const char* className, + const JNINativeMethod* gMethods, int numMethods) +{ + JNIEnv* e = reinterpret_cast(env); + + ALOGV("Registering %s's %d native methods...", className, numMethods); + + scoped_local_ref c(env, findClass(env, className)); + ALOG_ALWAYS_FATAL_IF(c.get() == NULL, + "Native registration unable to find class '%s'; aborting...", + className); + + int result = e->RegisterNatives(c.get(), gMethods, numMethods); + ALOG_ALWAYS_FATAL_IF(result < 0, "RegisterNatives failed for '%s'; aborting...", + className); + + return 0; +} + +/* + * Returns a human-readable summary of an exception object. The buffer will + * be populated with the "binary" class name and, if present, the + * exception message. + */ +static bool getExceptionSummary(C_JNIEnv* env, jthrowable exception, std::string& result) { + JNIEnv* e = reinterpret_cast(env); + + /* get the name of the exception's class */ + scoped_local_ref exceptionClass(env, (*env)->GetObjectClass(e, exception)); // can't fail + scoped_local_ref classClass(env, + (*env)->GetObjectClass(e, exceptionClass.get())); // java.lang.Class, can't fail + jmethodID classGetNameMethod = + (*env)->GetMethodID(e, classClass.get(), "getName", "()Ljava/lang/String;"); + scoped_local_ref classNameStr(env, + (jstring) (*env)->CallObjectMethod(e, exceptionClass.get(), classGetNameMethod)); + if (classNameStr.get() == NULL) { + (*env)->ExceptionClear(e); + result = ""; + return false; + } + const char* classNameChars = (*env)->GetStringUTFChars(e, classNameStr.get(), NULL); + if (classNameChars == NULL) { + (*env)->ExceptionClear(e); + result = ""; + return false; + } + result += classNameChars; + (*env)->ReleaseStringUTFChars(e, classNameStr.get(), classNameChars); + + /* if the exception has a detail message, get that */ + jmethodID getMessage = + (*env)->GetMethodID(e, exceptionClass.get(), "getMessage", "()Ljava/lang/String;"); + scoped_local_ref messageStr(env, + (jstring) (*env)->CallObjectMethod(e, exception, getMessage)); + if (messageStr.get() == NULL) { + return true; + } + + result += ": "; + + const char* messageChars = (*env)->GetStringUTFChars(e, messageStr.get(), NULL); + if (messageChars != NULL) { + result += messageChars; + (*env)->ReleaseStringUTFChars(e, messageStr.get(), messageChars); + } else { + result += ""; + (*env)->ExceptionClear(e); // clear OOM + } + + return true; +} + +/* + * Returns an exception (with stack trace) as a string. + */ +static bool getStackTrace(C_JNIEnv* env, jthrowable exception, std::string& result) { + JNIEnv* e = reinterpret_cast(env); + + scoped_local_ref stringWriterClass(env, findClass(env, "java/io/StringWriter")); + if (stringWriterClass.get() == NULL) { + return false; + } + + jmethodID stringWriterCtor = (*env)->GetMethodID(e, stringWriterClass.get(), "", "()V"); + jmethodID stringWriterToStringMethod = + (*env)->GetMethodID(e, stringWriterClass.get(), "toString", "()Ljava/lang/String;"); + + scoped_local_ref printWriterClass(env, findClass(env, "java/io/PrintWriter")); + if (printWriterClass.get() == NULL) { + return false; + } + + jmethodID printWriterCtor = + (*env)->GetMethodID(e, printWriterClass.get(), "", "(Ljava/io/Writer;)V"); + + scoped_local_ref stringWriter(env, + (*env)->NewObject(e, stringWriterClass.get(), stringWriterCtor)); + if (stringWriter.get() == NULL) { + return false; + } + + scoped_local_ref printWriter(env, + (*env)->NewObject(e, printWriterClass.get(), printWriterCtor, stringWriter.get())); + if (printWriter.get() == NULL) { + return false; + } + + scoped_local_ref exceptionClass(env, (*env)->GetObjectClass(e, exception)); // can't fail + jmethodID printStackTraceMethod = + (*env)->GetMethodID(e, exceptionClass.get(), "printStackTrace", "(Ljava/io/PrintWriter;)V"); + (*env)->CallVoidMethod(e, exception, printStackTraceMethod, printWriter.get()); + + if ((*env)->ExceptionCheck(e)) { + return false; + } + + scoped_local_ref messageStr(env, + (jstring) (*env)->CallObjectMethod(e, stringWriter.get(), stringWriterToStringMethod)); + if (messageStr.get() == NULL) { + return false; + } + + const char* utfChars = (*env)->GetStringUTFChars(e, messageStr.get(), NULL); + if (utfChars == NULL) { + return false; + } + + result = utfChars; + + (*env)->ReleaseStringUTFChars(e, messageStr.get(), utfChars); + return true; +} + +MODULE_API int jniThrowException(C_JNIEnv* env, const char* className, const char* msg) { + JNIEnv* e = reinterpret_cast(env); + + if ((*env)->ExceptionCheck(e)) { + /* TODO: consider creating the new exception with this as "cause" */ + scoped_local_ref exception(env, (*env)->ExceptionOccurred(e)); + (*env)->ExceptionClear(e); + + if (exception.get() != NULL) { + std::string text; + getExceptionSummary(env, exception.get(), text); + ALOGW("Discarding pending exception (%s) to throw %s", text.c_str(), className); + } + } + + scoped_local_ref exceptionClass(env, findClass(env, className)); + if (exceptionClass.get() == NULL) { + ALOGE("Unable to find exception class %s", className); + /* ClassNotFoundException now pending */ + return -1; + } + + if ((*env)->ThrowNew(e, exceptionClass.get(), msg) != JNI_OK) { + ALOGE("Failed throwing '%s' '%s'", className, msg); + /* an exception, most likely OOM, will now be pending */ + return -1; + } + + return 0; +} + +MODULE_API int jniThrowExceptionFmt(C_JNIEnv* env, const char* className, const char* fmt, va_list args) { + char msgBuf[512]; + vsnprintf(msgBuf, sizeof(msgBuf), fmt, args); + return jniThrowException(env, className, msgBuf); +} + +MODULE_API int jniThrowNullPointerException(C_JNIEnv* env, const char* msg) { + return jniThrowException(env, "java/lang/NullPointerException", msg); +} + +MODULE_API int jniThrowRuntimeException(C_JNIEnv* env, const char* msg) { + return jniThrowException(env, "java/lang/RuntimeException", msg); +} + +MODULE_API int jniThrowIOException(C_JNIEnv* env, int errnum) { + char buffer[80]; + const char* message = jniStrError(errnum, buffer, sizeof(buffer)); + return jniThrowException(env, "java/io/IOException", message); +} + +static std::string jniGetStackTrace(C_JNIEnv* env, jthrowable exception) { + JNIEnv* e = reinterpret_cast(env); + + scoped_local_ref currentException(env, (*env)->ExceptionOccurred(e)); + if (exception == NULL) { + exception = currentException.get(); + if (exception == NULL) { + return ""; + } + } + + if (currentException.get() != NULL) { + (*env)->ExceptionClear(e); + } + + std::string trace; + if (!getStackTrace(env, exception, trace)) { + (*env)->ExceptionClear(e); + getExceptionSummary(env, exception, trace); + } + + if (currentException.get() != NULL) { + (*env)->Throw(e, currentException.get()); // rethrow + } + + return trace; +} + +MODULE_API void jniLogException(C_JNIEnv* env, int priority, const char* tag, jthrowable exception) { + std::string trace(jniGetStackTrace(env, exception)); + __android_log_write(priority, tag, trace.c_str()); +} + +// Note: glibc has a nonstandard strerror_r that returns char* rather than POSIX's int. +// char *strerror_r(int errnum, char *buf, size_t n); +// +// Some versions of bionic support the glibc style call. Since the set of defines that determine +// which version is used is byzantine in its complexity we will just use this C++ template hack to +// select the correct jniStrError implementation based on the libc being used. +namespace impl { + +using GNUStrError = char* (*)(int,char*,size_t); +using POSIXStrError = int (*)(int,char*,size_t); + +inline const char* realJniStrError(GNUStrError func, int errnum, char* buf, size_t buflen) { + return func(errnum, buf, buflen); +} + +inline const char* realJniStrError(POSIXStrError func, int errnum, char* buf, size_t buflen) { + int rc = func(errnum, buf, buflen); + if (rc != 0) { + // (POSIX only guarantees a value other than 0. The safest + // way to implement this function is to use C++ and overload on the + // type of strerror_r to accurately distinguish GNU from POSIX.) + snprintf(buf, buflen, "errno %d", errnum); + } + return buf; +} + +} // namespace impl + +MODULE_API const char* jniStrError(int errnum, char* buf, size_t buflen) { +#ifdef _WIN32 + strerror_s(buf, buflen, errnum); + return buf; +#else + // The magic of C++ overloading selects the correct implementation based on the declared type of + // strerror_r. The inline will ensure that we don't have any indirect calls. + return impl::realJniStrError(strerror_r, errnum, buf, buflen); +#endif +} + +MODULE_API jobject jniCreateFileDescriptor(C_JNIEnv* env, int fd) { + JNIEnv* e = reinterpret_cast(env); + jobject fileDescriptor = e->NewObject(JniConstants::GetFileDescriptorClass(e), + JniConstants::GetFileDescriptorInitMethod(e)); + // NOTE: NewObject ensures that an OutOfMemoryError will be seen by the Java + // caller if the alloc fails, so we just return nullptr when that happens. + if (fileDescriptor != nullptr) { + jniSetFileDescriptorOfFD(env, fileDescriptor, fd); + } + return fileDescriptor; +} + +MODULE_API int jniGetFDFromFileDescriptor(C_JNIEnv* env, jobject fileDescriptor) { + JNIEnv* e = reinterpret_cast(env); + if (fileDescriptor != nullptr) { + return e->GetIntField(fileDescriptor, + JniConstants::GetFileDescriptorDescriptorField(e)); + } else { + return -1; + } +} + +MODULE_API void jniSetFileDescriptorOfFD(C_JNIEnv* env, jobject fileDescriptor, int value) { + JNIEnv* e = reinterpret_cast(env); + if (fileDescriptor == nullptr) { + jniThrowNullPointerException(e, "null FileDescriptor"); + } else { + e->SetIntField(fileDescriptor, JniConstants::GetFileDescriptorDescriptorField(e), value); + } +} + +MODULE_API jlong jniGetOwnerIdFromFileDescriptor(C_JNIEnv* env, jobject fileDescriptor) { + JNIEnv* e = reinterpret_cast(env); + return e->GetLongField(fileDescriptor, JniConstants::GetFileDescriptorOwnerIdField(e)); +} + +MODULE_API jarray jniGetNioBufferBaseArray(C_JNIEnv* env, jobject nioBuffer) { + JNIEnv* e = reinterpret_cast(env); + jclass nioAccessClass = JniConstants::GetNioAccessClass(e); + jmethodID getBaseArrayMethod = JniConstants::GetNioAccessGetBaseArrayMethod(e); + jobject object = e->CallStaticObjectMethod(nioAccessClass, getBaseArrayMethod, nioBuffer); + return static_cast(object); +} + +MODULE_API int jniGetNioBufferBaseArrayOffset(C_JNIEnv* env, jobject nioBuffer) { + JNIEnv* e = reinterpret_cast(env); + jclass nioAccessClass = JniConstants::GetNioAccessClass(e); + jmethodID getBaseArrayOffsetMethod = JniConstants::GetNioAccessGetBaseArrayOffsetMethod(e); + return e->CallStaticIntMethod(nioAccessClass, getBaseArrayOffsetMethod, nioBuffer); +} + +MODULE_API jlong jniGetNioBufferPointer(C_JNIEnv* env, jobject nioBuffer) { + JNIEnv* e = reinterpret_cast(env); + jlong baseAddress = e->GetLongField(nioBuffer, JniConstants::GetNioBufferAddressField(e)); + if (baseAddress != 0) { + const int position = e->GetIntField(nioBuffer, JniConstants::GetNioBufferPositionField(e)); + const int shift = + e->GetIntField(nioBuffer, JniConstants::GetNioBufferElementSizeShiftField(e)); + baseAddress += position << shift; + } + return baseAddress; +} + +MODULE_API jlong jniGetNioBufferFields(C_JNIEnv* env, jobject nioBuffer, + jint* position, jint* limit, jint* elementSizeShift) { + JNIEnv* e = reinterpret_cast(env); + *position = e->GetIntField(nioBuffer, JniConstants::GetNioBufferPositionField(e)); + *limit = e->GetIntField(nioBuffer, JniConstants::GetNioBufferLimitField(e)); + *elementSizeShift = + e->GetIntField(nioBuffer, JniConstants::GetNioBufferElementSizeShiftField(e)); + return e->GetLongField(nioBuffer, JniConstants::GetNioBufferAddressField(e)); +} + +MODULE_API jobject jniGetReferent(C_JNIEnv* env, jobject ref) { + JNIEnv* e = reinterpret_cast(env); + return e->CallObjectMethod(ref, JniConstants::GetReferenceGetMethod(e)); +} + +MODULE_API jstring jniCreateString(C_JNIEnv* env, const jchar* unicodeChars, jsize len) { + JNIEnv* e = reinterpret_cast(env); + return e->NewString(unicodeChars, len); +} + +MODULE_API void jniUninitializeConstants() { + JniConstants::Uninitialize(); +} diff --git a/third_party/libnativehelper/JniConstants.cpp b/third_party/libnativehelper/JniConstants.cpp new file mode 100644 index 0000000000..6d6ce791ec --- /dev/null +++ b/third_party/libnativehelper/JniConstants.cpp @@ -0,0 +1,287 @@ +/* + * Copyright (C) 2010 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "JniConstants" +#include "ALog-priv.h" + +#include "JniConstants.h" + +#include +#include +#include + +#include "nativehelper/ScopedLocalRef.h" + +namespace { + +// Mutex protecting the initialization of cached class references. +std::mutex g_class_refs_mutex; + +// Atomic boolean flag for double locked checking that class references are +// initialized before use. +std::atomic g_class_refs_initialized(false); + +// Cached global references to class instances. +// +// These are GC heap references that are initialized under the protection of +// |g_class_refs_mutex| as they should only be initialized once to avoid losing a +// global reference. Initialization happens lazily when an accessor tries to +// retrieve one of these classes. + +jclass g_file_descriptor_class = nullptr; // java.io.FileDescriptor +jclass g_nio_access_class = nullptr; // java.nio.Access +jclass g_nio_buffer_class = nullptr; // java.nio.Buffer +jclass g_reference_class = nullptr; // java.lang.ref.Reference +jclass g_string_class = nullptr; // java.lang.String + +// Cached field and method ids. +// +// These are non-GC heap values. They are initialized lazily and racily. We +// avoid holding a mutex here because the JNI API supports concurrent calls to +// Get{Field,Method}ID and also because finding an id may recursively call into +// Get{Field,Method}ID. +// +// The recursion issue occurs here for the fields in the FileDescriptor class +// since retrieving a field id requires the class to be initialized. Class +// initialization leads to the initialization of static fields. The +// FileDescriptor class has static fields that are FileDescriptor instances. The +// initialization of these static FileDescriptor fields follows a convoluted +// path that that leads to a call to jniGetFDFromFileDescriptor() which then +// needs to call GetFieldID() which is in the call stack. If thread-safety were +// desirable here, a recursive mutex would be required. +// +// These field and method ids have default values of nullptr. They are reset +// back to nullptr in JniConstants::Uninitialize(), along with the class +// references, when a new runtime instance is created via JNI_CreateJavaVM(). The +// reset happens before the new runtime instance is returned to the caller and +// under the protection of the |g_class_refs_mutex|. + +jfieldID g_file_descriptor_descriptor_field = nullptr; // java.io.FileDescriptor.descriptor +jfieldID g_file_descriptor_owner_id_field = nullptr; // java.io.FileDescriptor.ownerId +jmethodID g_file_descriptor_init_method = nullptr; // void java.io.FileDescriptor.() +jmethodID g_nio_access_get_base_array_method = nullptr; // Object java.nio.NIOAccess.getBaseArray() +jmethodID g_nio_access_get_base_array_offset_method = nullptr; // Object java.nio.NIOAccess.getBaseArray() +jfieldID g_nio_buffer_address_field = nullptr; // long java.nio.Buffer.address +jfieldID g_nio_buffer_element_size_shift_field = nullptr; // int java.nio.Buffer._elementSizeShift +jfieldID g_nio_buffer_limit_field = nullptr; // int java.nio.Buffer.limit +jfieldID g_nio_buffer_position_field = nullptr; // int java.nio.Buffer.position +jmethodID g_nio_buffer_array_method = nullptr; // Object java.nio.Buffer.array() +jmethodID g_nio_buffer_array_offset_method = nullptr; // int java.nio.Buffer.arrayOffset() +jmethodID g_reference_get_method = nullptr; // Object java.lang.ref.Reference.get() + +jclass FindClass(JNIEnv* env, const char* name) { + ScopedLocalRef klass(env, env->FindClass(name)); + ALOG_ALWAYS_FATAL_IF(klass.get() == nullptr, "failed to find class '%s'", name); + return reinterpret_cast(env->NewGlobalRef(klass.get())); +} + +jfieldID FindField(JNIEnv* env, jclass klass, const char* name, const char* desc) { + jfieldID result = env->GetFieldID(klass, name, desc); + ALOG_ALWAYS_FATAL_IF(result == nullptr, "failed to find field '%s:%s'", name, desc); + return result; +} + +jmethodID FindMethod(JNIEnv* env, jclass klass, const char* name, const char* signature) { + jmethodID result = env->GetMethodID(klass, name, signature); + ALOG_ALWAYS_FATAL_IF(result == nullptr, "failed to find method '%s%s'", name, signature); + return result; +} + +jmethodID FindStaticMethod(JNIEnv* env, jclass klass, const char* name, const char* signature) { + jmethodID result = env->GetStaticMethodID(klass, name, signature); + ALOG_ALWAYS_FATAL_IF(result == nullptr, "failed to find static method '%s%s'", name, signature); + return result; +} + +} // namespace + +jclass JniConstants::GetFileDescriptorClass(JNIEnv* env) { + EnsureClassReferencesInitialized(env); + return g_file_descriptor_class; +} + +jclass JniConstants::GetNioAccessClass(JNIEnv* env) { + EnsureClassReferencesInitialized(env); + return g_nio_access_class; +} + +jclass JniConstants::GetNioBufferClass(JNIEnv* env) { + EnsureClassReferencesInitialized(env); + return g_nio_buffer_class; +} + +jclass JniConstants::GetReferenceClass(JNIEnv* env) { + EnsureClassReferencesInitialized(env); + return g_reference_class; +} + +jclass JniConstants::GetStringClass(JNIEnv* env) { + EnsureClassReferencesInitialized(env); + return g_string_class; +} + +jfieldID JniConstants::GetFileDescriptorDescriptorField(JNIEnv* env) { + if (g_file_descriptor_descriptor_field == nullptr) { + jclass klass = GetFileDescriptorClass(env); + g_file_descriptor_descriptor_field = FindField(env, klass, "descriptor", "I"); + } + return g_file_descriptor_descriptor_field; +} + +jfieldID JniConstants::GetFileDescriptorOwnerIdField(JNIEnv* env) { + if (g_file_descriptor_owner_id_field == nullptr) { + jclass klass = GetFileDescriptorClass(env); + g_file_descriptor_owner_id_field = FindField(env, klass, "ownerId", "J"); + } + return g_file_descriptor_owner_id_field; +} + +jmethodID JniConstants::GetFileDescriptorInitMethod(JNIEnv* env) { + if (g_file_descriptor_init_method == nullptr) { + jclass klass = GetFileDescriptorClass(env); + g_file_descriptor_init_method = FindMethod(env, klass, "", "()V"); + } + return g_file_descriptor_init_method; +} + +jmethodID JniConstants::GetNioAccessGetBaseArrayMethod(JNIEnv* env) { + if (g_nio_access_get_base_array_method == nullptr) { + jclass klass = GetNioAccessClass(env); + g_nio_access_get_base_array_method = + FindStaticMethod(env, klass, "getBaseArray", + "(Ljava/nio/Buffer;)Ljava/lang/Object;"); + } + return g_nio_access_get_base_array_method; +} + +jmethodID JniConstants::GetNioAccessGetBaseArrayOffsetMethod(JNIEnv* env) { + if (g_nio_access_get_base_array_offset_method == nullptr) { + jclass klass = GetNioAccessClass(env); + g_nio_access_get_base_array_offset_method = + FindStaticMethod(env, klass, "getBaseArrayOffset", "(Ljava/nio/Buffer;)I"); + } + return g_nio_access_get_base_array_offset_method; +} + +jfieldID JniConstants::GetNioBufferAddressField(JNIEnv* env) { + if (g_nio_buffer_address_field == nullptr) { + jclass klass = GetNioBufferClass(env); + g_nio_buffer_address_field = FindField(env, klass, "address", "J"); + } + return g_nio_buffer_address_field; +} + +jfieldID JniConstants::GetNioBufferElementSizeShiftField(JNIEnv* env) { + if (g_nio_buffer_element_size_shift_field == nullptr) { + jclass klass = GetNioBufferClass(env); + g_nio_buffer_element_size_shift_field = FindField(env, klass, "_elementSizeShift", "I"); + } + return g_nio_buffer_element_size_shift_field; +} + +jfieldID JniConstants::GetNioBufferLimitField(JNIEnv* env) { + if (g_nio_buffer_limit_field == nullptr) { + jclass klass = GetNioBufferClass(env); + g_nio_buffer_limit_field = FindField(env, klass, "limit", "I"); + } + return g_nio_buffer_limit_field; +} + +jfieldID JniConstants::GetNioBufferPositionField(JNIEnv* env) { + if (g_nio_buffer_position_field == nullptr) { + jclass klass = GetNioBufferClass(env); + g_nio_buffer_position_field = FindField(env, klass, "position", "I"); + } + return g_nio_buffer_position_field; +} + +jmethodID JniConstants::GetNioBufferArrayMethod(JNIEnv* env) { + if (g_nio_buffer_array_method == nullptr) { + jclass klass = GetNioBufferClass(env); + g_nio_buffer_array_method = FindMethod(env, klass, "array", "()Ljava/lang/Object;"); + } + return g_nio_buffer_array_method; +} + +jmethodID JniConstants::GetNioBufferArrayOffsetMethod(JNIEnv* env) { + if (g_nio_buffer_array_offset_method == nullptr) { + jclass klass = GetNioBufferClass(env); + g_nio_buffer_array_offset_method = FindMethod(env, klass, "arrayOffset", "()I"); + } + return g_nio_buffer_array_offset_method; +} + +jmethodID JniConstants::GetReferenceGetMethod(JNIEnv* env) { + if (g_reference_get_method == nullptr) { + jclass klass = GetReferenceClass(env); + g_reference_get_method = FindMethod(env, klass, "get", "()Ljava/lang/Object;"); + } + return g_reference_get_method; +} + +void JniConstants::EnsureClassReferencesInitialized(JNIEnv* env) { + // Fast check if class references are initialized. + if (g_class_refs_initialized.load(std::memory_order_acquire)) { + return; + } + + // Slower check with initialization if necessary. + std::lock_guard guard(g_class_refs_mutex); + if (g_class_refs_initialized.load(std::memory_order_relaxed)) { + return; + } + + // Class constants should be initialized only once because they global + // references. Field ids and Method ids can be initialized later since they + // are not references and races only have trivial performance + // consequences. + g_file_descriptor_class = FindClass(env, "java/io/FileDescriptor"); + g_nio_access_class = FindClass(env, "java/nio/NIOAccess"); + g_nio_buffer_class = FindClass(env, "java/nio/Buffer"); + g_reference_class = FindClass(env, "java/lang/ref/Reference"); + g_string_class = FindClass(env, "java/lang/String"); + g_class_refs_initialized.store(true, std::memory_order_release); +} + +void JniConstants::Uninitialize() { + // This method is called when a new runtime instance is created. There is no + // notification of a runtime instance being destroyed in the JNI interface + // so we piggyback on creation. Since only one runtime is supported at a + // time, we know the constants are invalid when JNI_CreateJavaVM() is + // called. + // + // Clean shutdown would require calling DeleteGlobalRef() for each of the + // class references. + std::lock_guard guard(g_class_refs_mutex); + g_file_descriptor_class = nullptr; + g_file_descriptor_descriptor_field = nullptr; + g_file_descriptor_owner_id_field = nullptr; + g_file_descriptor_init_method = nullptr; + g_nio_access_class = nullptr; + g_nio_access_get_base_array_method = nullptr; + g_nio_access_get_base_array_offset_method = nullptr; + g_nio_buffer_class = nullptr; + g_nio_buffer_address_field = nullptr; + g_nio_buffer_element_size_shift_field = nullptr; + g_nio_buffer_limit_field = nullptr; + g_nio_buffer_position_field = nullptr; + g_nio_buffer_array_method = nullptr; + g_nio_buffer_array_offset_method = nullptr; + g_reference_class = nullptr; + g_reference_get_method = nullptr; + g_string_class = nullptr; + g_class_refs_initialized.store(false, std::memory_order_release); +} diff --git a/third_party/libnativehelper/JniConstants.h b/third_party/libnativehelper/JniConstants.h new file mode 100644 index 0000000000..b2d03bea4f --- /dev/null +++ b/third_party/libnativehelper/JniConstants.h @@ -0,0 +1,83 @@ +/* + * Copyright 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include "jni.h" + +struct JniConstants { + // Global reference to java.io.FileDescriptor. + static jclass GetFileDescriptorClass(JNIEnv* env); + + // java.io.FileDescriptor.descriptor. + static jfieldID GetFileDescriptorDescriptorField(JNIEnv* env); + + // java.io.FileDescriptor.ownerId. + static jfieldID GetFileDescriptorOwnerIdField(JNIEnv* env); + + // void java.io.FileDescriptor.(). + static jmethodID GetFileDescriptorInitMethod(JNIEnv* env); + + // Global reference to java.nio.NIOAccess. + static jclass GetNioAccessClass(JNIEnv* env); + + // Object java.nio.NIOAccess.getBaseArray(Buffer); + static jmethodID GetNioAccessGetBaseArrayMethod(JNIEnv* env); + + // int java.nio.NIOAccess.getBaseArrayOffset(Buffer); + static jmethodID GetNioAccessGetBaseArrayOffsetMethod(JNIEnv* env); + + // Global reference to java.nio.Buffer. + static jclass GetNioBufferClass(JNIEnv* env); + + // long java.nio.Buffer.address + static jfieldID GetNioBufferAddressField(JNIEnv* env); + + // int java.nio.Buffer._elementSizeShift + static jfieldID GetNioBufferElementSizeShiftField(JNIEnv* env); + + // int java.nio.Buffer.limit; + static jfieldID GetNioBufferLimitField(JNIEnv* env); + + // int java.nio.Buffer.position; + static jfieldID GetNioBufferPositionField(JNIEnv* env); + + // Object java.nio.Buffer.array() + static jmethodID GetNioBufferArrayMethod(JNIEnv* env); + + // int java.nio.Buffer.arrayOffset() + static jmethodID GetNioBufferArrayOffsetMethod(JNIEnv* env); + + // Global reference to java.lang.ref.Reference. + static jclass GetReferenceClass(JNIEnv* env); + + // Object java.lang.ref.Reference.get() + static jmethodID GetReferenceGetMethod(JNIEnv* env); + + // Global reference to java.lang.String. + static jclass GetStringClass(JNIEnv* env); + + // Ensure class constants are initialized before use. Field and method + // constants are lazily initialized via getters. + static void EnsureClassReferencesInitialized(JNIEnv* env); + + // Ensure any cached heap objects from previous VM instances are + // invalidated. There is no notification here that a VM is destroyed so this + // method must be called when a new VM is created (and calls from any + // earlier VM's are completed). The caching of heap objects in this class is + // one reason why there is a limit of VM instance per process. + static void Uninitialize(); +}; diff --git a/third_party/libnativehelper/JniInvocation.cpp b/third_party/libnativehelper/JniInvocation.cpp new file mode 100644 index 0000000000..6d992e69c3 --- /dev/null +++ b/third_party/libnativehelper/JniInvocation.cpp @@ -0,0 +1,321 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nativehelper/JniInvocation.h" + +#ifdef _WIN32 +#include +#else +#include +#endif +#include +#include + +#include + +#define LOG_TAG "JniInvocation" +#include + +#ifdef __ANDROID__ +#include +#endif + +#include "android-base/errors.h" +#include "JniConstants.h" + +namespace { + +template +void UNUSED(const T&) {} + +bool IsDebuggable() { +#ifdef __ANDROID__ + char debuggable[PROP_VALUE_MAX] = {0}; + __system_property_get("ro.debuggable", debuggable); + return strcmp(debuggable, "1") == 0; +#else + return false; +#endif +} + +int GetLibrarySystemProperty(char* buffer) { +#ifdef __ANDROID__ + return __system_property_get("persist.sys.dalvik.vm.lib.2", buffer); +#else + UNUSED(buffer); + return 0; +#endif +} + +#ifdef _WIN32 +#define FUNC_POINTER FARPROC +#else +#define FUNC_POINTER void* +#endif + +void* OpenLibrary(const char* filename) { +#ifdef _WIN32 + return LoadLibrary(filename); +#else + // Load with RTLD_NODELETE in order to ensure that libart.so is not unmapped when it is closed. + // This is due to the fact that it is possible that some threads might have yet to finish + // exiting even after JNI_DeleteJavaVM returns, which can lead to segfaults if the library is + // unloaded. + const int kDlopenFlags = RTLD_NOW | RTLD_NODELETE; + return dlopen(filename, kDlopenFlags); +#endif +} + +int CloseLibrary(void* handle) { +#ifdef _WIN32 + return FreeLibrary(static_cast(handle)); +#else + return dlclose(handle); +#endif +} + +FUNC_POINTER GetSymbol(void* handle, const char* symbol) { +#ifdef _WIN32 + return GetProcAddress(static_cast(handle), symbol); +#else + return dlsym(handle, symbol); +#endif +} + +std::string GetError() { +#ifdef _WIN32 + return android::base::SystemErrorCodeToString(GetLastError()); +#else + return std::string(dlerror()); +#endif +} + +} // namespace + +struct JniInvocationImpl final { + public: + JniInvocationImpl(); + ~JniInvocationImpl(); + + bool Init(const char* library); + + // static const char* GetLibrary(const char* library, char* buffer); + + static const char* GetLibrary(const char* library, + char* buffer, + bool (*is_debuggable)() = IsDebuggable, + int (*get_library_system_property)(char* buffer) = GetLibrarySystemProperty); + + static JniInvocationImpl& GetJniInvocation(); + + jint JNI_GetDefaultJavaVMInitArgs(void* vmargs); + jint JNI_CreateJavaVM(JavaVM** p_vm, JNIEnv** p_env, void* vm_args); + jint JNI_GetCreatedJavaVMs(JavaVM** vms, jsize size, jsize* vm_count); + + private: + JniInvocationImpl(const JniInvocationImpl&) = delete; + JniInvocationImpl& operator=(const JniInvocationImpl&) = delete; + + bool FindSymbol(FUNC_POINTER* pointer, const char* symbol); + + static JniInvocationImpl* jni_invocation_; + + // Handle to library opened with dlopen(). Library exports + // JNI_GetDefaultJavaVMInitArgs, JNI_CreateJavaVM, JNI_GetCreatedJavaVMs. + void* handle_; + jint (*JNI_GetDefaultJavaVMInitArgs_)(void*); + jint (*JNI_CreateJavaVM_)(JavaVM**, JNIEnv**, void*); + jint (*JNI_GetCreatedJavaVMs_)(JavaVM**, jsize, jsize*); + + friend class JNIInvocation_Debuggable_Test; + friend class JNIInvocation_NonDebuggable_Test; +}; + +// Check JniInvocationImpl size is same as fields, e.g. no vtable present. +static_assert(sizeof(JniInvocationImpl) == 4 * sizeof(uintptr_t)); + +JniInvocationImpl* JniInvocationImpl::jni_invocation_ = NULL; + +JniInvocationImpl::JniInvocationImpl() : + handle_(NULL), + JNI_GetDefaultJavaVMInitArgs_(NULL), + JNI_CreateJavaVM_(NULL), + JNI_GetCreatedJavaVMs_(NULL) { + LOG_ALWAYS_FATAL_IF(jni_invocation_ != NULL, "JniInvocation instance already initialized"); + jni_invocation_ = this; +} + +JniInvocationImpl::~JniInvocationImpl() { + jni_invocation_ = NULL; + if (handle_ != NULL) { + CloseLibrary(handle_); + } +} + +static const char* kLibraryFallback = "libart.so"; + +const char* JniInvocationImpl::GetLibrary(const char* library, + char* buffer, + bool (*is_debuggable)(), + int (*get_library_system_property)(char* buffer)) { +#ifdef __ANDROID__ + const char* default_library; + + if (!is_debuggable()) { + // Not a debuggable build. + // Do not allow arbitrary library. Ignore the library parameter. This + // will also ignore the default library, but initialize to fallback + // for cleanliness. + library = kLibraryFallback; + default_library = kLibraryFallback; + } else { + // Debuggable build. + // Accept the library parameter. For the case it is NULL, load the default + // library from the system property. + if (buffer != NULL) { + if (get_library_system_property(buffer) > 0) { + default_library = buffer; + } else { + default_library = kLibraryFallback; + } + } else { + // No buffer given, just use default fallback. + default_library = kLibraryFallback; + } + } +#else + UNUSED(buffer); + UNUSED(is_debuggable); + UNUSED(get_library_system_property); + const char* default_library = kLibraryFallback; +#endif + if (library == NULL) { + library = default_library; + } + + return library; +} + +bool JniInvocationImpl::Init(const char* library) { +#ifdef __ANDROID__ + char buffer[PROP_VALUE_MAX]; +#else + char* buffer = NULL; +#endif + library = GetLibrary(library, buffer); + handle_ = OpenLibrary(library); + if (handle_ == NULL) { + if (strcmp(library, kLibraryFallback) == 0) { + // Nothing else to try. + ALOGE("Failed to dlopen %s: %s", library, GetError().c_str()); + return false; + } + // Note that this is enough to get something like the zygote + // running, we can't property_set here to fix this for the future + // because we are root and not the system user. See + // RuntimeInit.commonInit for where we fix up the property to + // avoid future fallbacks. http://b/11463182 + ALOGW("Falling back from %s to %s after dlopen error: %s", + library, kLibraryFallback, GetError().c_str()); + library = kLibraryFallback; + handle_ = OpenLibrary(library); + if (handle_ == NULL) { + ALOGE("Failed to dlopen %s: %s", library, GetError().c_str()); + return false; + } + } + if (!FindSymbol(reinterpret_cast(&JNI_GetDefaultJavaVMInitArgs_), + "JNI_GetDefaultJavaVMInitArgs")) { + return false; + } + if (!FindSymbol(reinterpret_cast(&JNI_CreateJavaVM_), + "JNI_CreateJavaVM")) { + return false; + } + if (!FindSymbol(reinterpret_cast(&JNI_GetCreatedJavaVMs_), + "JNI_GetCreatedJavaVMs")) { + return false; + } + return true; +} + +jint JniInvocationImpl::JNI_GetDefaultJavaVMInitArgs(void* vmargs) { + return JNI_GetDefaultJavaVMInitArgs_(vmargs); +} + +jint JniInvocationImpl::JNI_CreateJavaVM(JavaVM** p_vm, JNIEnv** p_env, void* vm_args) { + return JNI_CreateJavaVM_(p_vm, p_env, vm_args); +} + +jint JniInvocationImpl::JNI_GetCreatedJavaVMs(JavaVM** vms, jsize size, jsize* vm_count) { + return JNI_GetCreatedJavaVMs_(vms, size, vm_count); +} + +bool JniInvocationImpl::FindSymbol(FUNC_POINTER* pointer, const char* symbol) { + *pointer = GetSymbol(handle_, symbol); + if (*pointer == NULL) { + ALOGE("Failed to find symbol %s: %s\n", symbol, GetError().c_str()); + CloseLibrary(handle_); + handle_ = NULL; + return false; + } + return true; +} + +JniInvocationImpl& JniInvocationImpl::GetJniInvocation() { + LOG_ALWAYS_FATAL_IF(jni_invocation_ == NULL, + "Failed to create JniInvocation instance before using JNI invocation API"); + return *jni_invocation_; +} + +MODULE_API jint JNI_GetDefaultJavaVMInitArgs(void* vm_args) { + return JniInvocationImpl::GetJniInvocation().JNI_GetDefaultJavaVMInitArgs(vm_args); +} + +MODULE_API jint JNI_CreateJavaVM(JavaVM** p_vm, JNIEnv** p_env, void* vm_args) { + // Ensure any cached heap objects from previous VM instances are + // invalidated. There is no notification here that a VM is destroyed. These + // cached objects limit us to one VM instance per process. + JniConstants::Uninitialize(); + return JniInvocationImpl::GetJniInvocation().JNI_CreateJavaVM(p_vm, p_env, vm_args); +} + +MODULE_API jint JNI_GetCreatedJavaVMs(JavaVM** vms, jsize size, jsize* vm_count) { + return JniInvocationImpl::GetJniInvocation().JNI_GetCreatedJavaVMs(vms, size, vm_count); +} + +MODULE_API JniInvocationImpl* JniInvocationCreate() { + return new JniInvocationImpl(); +} + +MODULE_API void JniInvocationDestroy(JniInvocationImpl* instance) { + delete instance; +} + +MODULE_API int JniInvocationInit(JniInvocationImpl* instance, const char* library) { + return instance->Init(library) ? 1 : 0; +} + +MODULE_API const char* JniInvocationGetLibrary(const char* library, char* buffer) { + return JniInvocationImpl::GetLibrary(library, buffer); +} + +MODULE_API const char* JniInvocation::GetLibrary(const char* library, + char* buffer, + bool (*is_debuggable)(), + int (*get_library_system_property)(char* buffer)) { + return JniInvocationImpl::GetLibrary(library, buffer, is_debuggable, get_library_system_property); +} diff --git a/third_party/libnativehelper/MODULE_LICENSE_APACHE2 b/third_party/libnativehelper/MODULE_LICENSE_APACHE2 new file mode 100644 index 0000000000..e69de29bb2 diff --git a/third_party/libnativehelper/NOTICE b/third_party/libnativehelper/NOTICE new file mode 100644 index 0000000000..c5b1efa7aa --- /dev/null +++ b/third_party/libnativehelper/NOTICE @@ -0,0 +1,190 @@ + + Copyright (c) 2005-2008, The Android Open Source Project + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + diff --git a/third_party/libnativehelper/README b/third_party/libnativehelper/README new file mode 100644 index 0000000000..5a5f5d45c2 --- /dev/null +++ b/third_party/libnativehelper/README @@ -0,0 +1,11 @@ +Support functions for Android's class libraries + + +These are VM-agnostic native functions that implement methods for system +class libraries. All code here: + + - MUST not be associated with an android.* class (that code lives in + frameworks/base/). + - SHOULD be written in C rather than C++ where possible. + +Some helper functions are defined in include/nativehelper/JNIHelp.h. diff --git a/third_party/libnativehelper/header_only_include/nativehelper/nativehelper_utils.h b/third_party/libnativehelper/header_only_include/nativehelper/nativehelper_utils.h new file mode 100644 index 0000000000..d7289f9fa2 --- /dev/null +++ b/third_party/libnativehelper/header_only_include/nativehelper/nativehelper_utils.h @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2007 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef NATIVEHELPER_MACROS_H_ +#define NATIVEHELPER_MACROS_H_ + +#if defined(__cplusplus) + +#if !defined(DISALLOW_COPY_AND_ASSIGN) +// DISALLOW_COPY_AND_ASSIGN disallows the copy and operator= functions. It goes in the private: +// declarations in a class. +#define DISALLOW_COPY_AND_ASSIGN(TypeName) \ + TypeName(const TypeName&) = delete; \ + void operator=(const TypeName&) = delete +#endif // !defined(DISALLOW_COPY_AND_ASSIGN) + +#ifndef NATIVEHELPER_JNIHELP_H_ +// This seems a header-only include. Provide NPE throwing. +static inline int jniThrowNullPointerException(JNIEnv* env, const char* msg) { + if (env->ExceptionCheck()) { + // Drop any pending exception. + env->ExceptionClear(); + } + + jclass e_class = env->FindClass("java/lang/NullPointerException"); + if (e_class == nullptr) { + return -1; + } + + if (env->ThrowNew(e_class, msg) != JNI_OK) { + env->DeleteLocalRef(e_class); + return -1; + } + + env->DeleteLocalRef(e_class); + return 0; +} +#endif // NATIVEHELPER_JNIHELP_H_ + +#endif // defined(__cplusplus) + +#endif // NATIVEHELPER_MACROS_H_ diff --git a/third_party/libnativehelper/header_only_include/nativehelper/scoped_bytes.h b/third_party/libnativehelper/header_only_include/nativehelper/scoped_bytes.h new file mode 100644 index 0000000000..f53931e5d0 --- /dev/null +++ b/third_party/libnativehelper/header_only_include/nativehelper/scoped_bytes.h @@ -0,0 +1,83 @@ +/* + * Copyright (C) 2010 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SCOPED_BYTES_H_ +#define SCOPED_BYTES_H_ + +#include "jni.h" +#include "nativehelper_utils.h" + +/** + * ScopedBytesRO and ScopedBytesRW attempt to paper over the differences between byte[]s and + * ByteBuffers. This in turn helps paper over the differences between non-direct ByteBuffers backed + * by byte[]s, direct ByteBuffers backed by bytes[]s, and direct ByteBuffers not backed by byte[]s. + * (On Android, this last group only contains MappedByteBuffers.) + */ +template +class ScopedBytes { +public: + ScopedBytes(JNIEnv* env, jobject object) + : mEnv(env), mObject(object), mByteArray(NULL), mPtr(NULL) + { + if (mObject == NULL) { + jniThrowNullPointerException(mEnv, NULL); + } else { + jclass byteArrayClass = env->FindClass("[B"); + if (mEnv->IsInstanceOf(mObject, byteArrayClass)) { + mByteArray = reinterpret_cast(mObject); + mPtr = mEnv->GetByteArrayElements(mByteArray, NULL); + } else { + mPtr = reinterpret_cast(mEnv->GetDirectBufferAddress(mObject)); + } + mEnv->DeleteLocalRef(byteArrayClass); + } + } + + ~ScopedBytes() { + if (mByteArray != NULL) { + mEnv->ReleaseByteArrayElements(mByteArray, mPtr, readOnly ? JNI_ABORT : 0); + } + } + +private: + JNIEnv* const mEnv; + const jobject mObject; + jbyteArray mByteArray; + +protected: + jbyte* mPtr; + +private: + DISALLOW_COPY_AND_ASSIGN(ScopedBytes); +}; + +class ScopedBytesRO : public ScopedBytes { +public: + ScopedBytesRO(JNIEnv* env, jobject object) : ScopedBytes(env, object) {} + const jbyte* get() const { + return mPtr; + } +}; + +class ScopedBytesRW : public ScopedBytes { +public: + ScopedBytesRW(JNIEnv* env, jobject object) : ScopedBytes(env, object) {} + jbyte* get() { + return mPtr; + } +}; + +#endif // SCOPED_BYTES_H_ diff --git a/third_party/libnativehelper/header_only_include/nativehelper/scoped_local_frame.h b/third_party/libnativehelper/header_only_include/nativehelper/scoped_local_frame.h new file mode 100644 index 0000000000..91180fe2d3 --- /dev/null +++ b/third_party/libnativehelper/header_only_include/nativehelper/scoped_local_frame.h @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2010 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SCOPED_LOCAL_FRAME_H_ +#define SCOPED_LOCAL_FRAME_H_ + +#include "jni.h" +#include "nativehelper_utils.h" + +class ScopedLocalFrame { +public: + explicit ScopedLocalFrame(JNIEnv* env) : mEnv(env) { + mEnv->PushLocalFrame(128); + } + + ~ScopedLocalFrame() { + mEnv->PopLocalFrame(NULL); + } + +private: + JNIEnv* const mEnv; + + DISALLOW_COPY_AND_ASSIGN(ScopedLocalFrame); +}; + +#endif // SCOPED_LOCAL_FRAME_H_ diff --git a/third_party/libnativehelper/header_only_include/nativehelper/scoped_local_ref.h b/third_party/libnativehelper/header_only_include/nativehelper/scoped_local_ref.h new file mode 100644 index 0000000000..3eb21d9d25 --- /dev/null +++ b/third_party/libnativehelper/header_only_include/nativehelper/scoped_local_ref.h @@ -0,0 +1,91 @@ +/* + * Copyright (C) 2010 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SCOPED_LOCAL_REF_H_ +#define SCOPED_LOCAL_REF_H_ + +#include + +#include "jni.h" +#include "nativehelper_utils.h" + +// A smart pointer that deletes a JNI local reference when it goes out of scope. +template +class ScopedLocalRef { +public: + ScopedLocalRef(JNIEnv* env, T localRef) : mEnv(env), mLocalRef(localRef) { + } + + ScopedLocalRef(ScopedLocalRef&& s) noexcept : mEnv(s.mEnv), mLocalRef(s.release()) { + } + + explicit ScopedLocalRef(JNIEnv* env) : mEnv(env), mLocalRef(nullptr) { + } + + ~ScopedLocalRef() { + reset(); + } + + void reset(T ptr = NULL) { + if (ptr != mLocalRef) { + if (mLocalRef != NULL) { + mEnv->DeleteLocalRef(mLocalRef); + } + mLocalRef = ptr; + } + } + + T release() __attribute__((warn_unused_result)) { + T localRef = mLocalRef; + mLocalRef = NULL; + return localRef; + } + + T get() const { + return mLocalRef; + } + + + // We do not expose an empty constructor as it can easily lead to errors + // using common idioms, e.g.: + // ScopedLocalRef<...> ref; + // ref.reset(...); + + // Move assignment operator. + ScopedLocalRef& operator=(ScopedLocalRef&& s) noexcept { + reset(s.release()); + mEnv = s.mEnv; + return *this; + } + + // Allows "if (scoped_ref == nullptr)" + bool operator==(std::nullptr_t) const { + return mLocalRef == nullptr; + } + + // Allows "if (scoped_ref != nullptr)" + bool operator!=(std::nullptr_t) const { + return mLocalRef != nullptr; + } + +private: + JNIEnv* mEnv; + T mLocalRef; + + DISALLOW_COPY_AND_ASSIGN(ScopedLocalRef); +}; + +#endif // SCOPED_LOCAL_REF_H_ diff --git a/third_party/libnativehelper/header_only_include/nativehelper/scoped_primitive_array.h b/third_party/libnativehelper/header_only_include/nativehelper/scoped_primitive_array.h new file mode 100644 index 0000000000..d6840c264e --- /dev/null +++ b/third_party/libnativehelper/header_only_include/nativehelper/scoped_primitive_array.h @@ -0,0 +1,147 @@ +/* + * Copyright (C) 2010 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SCOPED_PRIMITIVE_ARRAY_H_ +#define SCOPED_PRIMITIVE_ARRAY_H_ + +#include "jni.h" +#include "nativehelper_utils.h" + +#ifdef POINTER_TYPE +#error POINTER_TYPE is defined. +#else +#define POINTER_TYPE(T) T* /* NOLINT */ +#endif + +#ifdef REFERENCE_TYPE +#error REFERENCE_TYPE is defined. +#else +#define REFERENCE_TYPE(T) T& /* NOLINT */ +#endif + +// ScopedBooleanArrayRO, ScopedByteArrayRO, ScopedCharArrayRO, ScopedDoubleArrayRO, +// ScopedFloatArrayRO, ScopedIntArrayRO, ScopedLongArrayRO, and ScopedShortArrayRO provide +// convenient read-only access to Java arrays from JNI code. This is cheaper than read-write +// access and should be used by default. +#define INSTANTIATE_SCOPED_PRIMITIVE_ARRAY_RO(PRIMITIVE_TYPE, NAME) \ + class Scoped ## NAME ## ArrayRO { \ + public: \ + explicit Scoped ## NAME ## ArrayRO(JNIEnv* env) \ + : mEnv(env), mJavaArray(NULL), mRawArray(NULL), mSize(0) {} \ + Scoped ## NAME ## ArrayRO(JNIEnv* env, PRIMITIVE_TYPE ## Array javaArray) \ + : mEnv(env) { \ + if (javaArray == NULL) { \ + mJavaArray = NULL; \ + mSize = 0; \ + mRawArray = NULL; \ + jniThrowNullPointerException(mEnv, NULL); \ + } else { \ + reset(javaArray); \ + } \ + } \ + ~Scoped ## NAME ## ArrayRO() { \ + if (mRawArray != NULL && mRawArray != mBuffer) { \ + mEnv->Release ## NAME ## ArrayElements(mJavaArray, mRawArray, JNI_ABORT); \ + } \ + } \ + void reset(PRIMITIVE_TYPE ## Array javaArray) { \ + mJavaArray = javaArray; \ + mSize = mEnv->GetArrayLength(mJavaArray); \ + if (mSize <= buffer_size) { \ + mEnv->Get ## NAME ## ArrayRegion(mJavaArray, 0, mSize, mBuffer); \ + mRawArray = mBuffer; \ + } else { \ + mRawArray = mEnv->Get ## NAME ## ArrayElements(mJavaArray, NULL); \ + } \ + } \ + const PRIMITIVE_TYPE* get() const { return mRawArray; } \ + PRIMITIVE_TYPE ## Array getJavaArray() const { return mJavaArray; } \ + const PRIMITIVE_TYPE& operator[](size_t n) const { return mRawArray[n]; } \ + size_t size() const { return mSize; } \ + private: \ + static const jsize buffer_size = 1024; \ + JNIEnv* const mEnv; \ + PRIMITIVE_TYPE ## Array mJavaArray; \ + POINTER_TYPE(PRIMITIVE_TYPE) mRawArray; \ + jsize mSize; \ + PRIMITIVE_TYPE mBuffer[buffer_size]; \ + DISALLOW_COPY_AND_ASSIGN(Scoped ## NAME ## ArrayRO); \ + } + +INSTANTIATE_SCOPED_PRIMITIVE_ARRAY_RO(jboolean, Boolean); +INSTANTIATE_SCOPED_PRIMITIVE_ARRAY_RO(jbyte, Byte); +INSTANTIATE_SCOPED_PRIMITIVE_ARRAY_RO(jchar, Char); +INSTANTIATE_SCOPED_PRIMITIVE_ARRAY_RO(jdouble, Double); +INSTANTIATE_SCOPED_PRIMITIVE_ARRAY_RO(jfloat, Float); +INSTANTIATE_SCOPED_PRIMITIVE_ARRAY_RO(jint, Int); +INSTANTIATE_SCOPED_PRIMITIVE_ARRAY_RO(jlong, Long); +INSTANTIATE_SCOPED_PRIMITIVE_ARRAY_RO(jshort, Short); + +#undef INSTANTIATE_SCOPED_PRIMITIVE_ARRAY_RO + +// ScopedBooleanArrayRW, ScopedByteArrayRW, ScopedCharArrayRW, ScopedDoubleArrayRW, +// ScopedFloatArrayRW, ScopedIntArrayRW, ScopedLongArrayRW, and ScopedShortArrayRW provide +// convenient read-write access to Java arrays from JNI code. These are more expensive, +// since they entail a copy back onto the Java heap, and should only be used when necessary. +#define INSTANTIATE_SCOPED_PRIMITIVE_ARRAY_RW(PRIMITIVE_TYPE, NAME) \ + class Scoped ## NAME ## ArrayRW { \ + public: \ + explicit Scoped ## NAME ## ArrayRW(JNIEnv* env) \ + : mEnv(env), mJavaArray(NULL), mRawArray(NULL) {} \ + Scoped ## NAME ## ArrayRW(JNIEnv* env, PRIMITIVE_TYPE ## Array javaArray) \ + : mEnv(env), mJavaArray(javaArray), mRawArray(NULL) { \ + if (mJavaArray == NULL) { \ + jniThrowNullPointerException(mEnv, NULL); \ + } else { \ + mRawArray = mEnv->Get ## NAME ## ArrayElements(mJavaArray, NULL); \ + } \ + } \ + ~Scoped ## NAME ## ArrayRW() { \ + if (mRawArray) { \ + mEnv->Release ## NAME ## ArrayElements(mJavaArray, mRawArray, 0); \ + } \ + } \ + void reset(PRIMITIVE_TYPE ## Array javaArray) { \ + mJavaArray = javaArray; \ + mRawArray = mEnv->Get ## NAME ## ArrayElements(mJavaArray, NULL); \ + } \ + const PRIMITIVE_TYPE* get() const { return mRawArray; } \ + PRIMITIVE_TYPE ## Array getJavaArray() const { return mJavaArray; } \ + const PRIMITIVE_TYPE& operator[](size_t n) const { return mRawArray[n]; } \ + POINTER_TYPE(PRIMITIVE_TYPE) get() { return mRawArray; } \ + REFERENCE_TYPE(PRIMITIVE_TYPE) operator[](size_t n) { return mRawArray[n]; } \ + size_t size() const { return mEnv->GetArrayLength(mJavaArray); } \ + private: \ + JNIEnv* const mEnv; \ + PRIMITIVE_TYPE ## Array mJavaArray; \ + POINTER_TYPE(PRIMITIVE_TYPE) mRawArray; \ + DISALLOW_COPY_AND_ASSIGN(Scoped ## NAME ## ArrayRW); \ + } + +INSTANTIATE_SCOPED_PRIMITIVE_ARRAY_RW(jboolean, Boolean); +INSTANTIATE_SCOPED_PRIMITIVE_ARRAY_RW(jbyte, Byte); +INSTANTIATE_SCOPED_PRIMITIVE_ARRAY_RW(jchar, Char); +INSTANTIATE_SCOPED_PRIMITIVE_ARRAY_RW(jdouble, Double); +INSTANTIATE_SCOPED_PRIMITIVE_ARRAY_RW(jfloat, Float); +INSTANTIATE_SCOPED_PRIMITIVE_ARRAY_RW(jint, Int); +INSTANTIATE_SCOPED_PRIMITIVE_ARRAY_RW(jlong, Long); +INSTANTIATE_SCOPED_PRIMITIVE_ARRAY_RW(jshort, Short); + +#undef INSTANTIATE_SCOPED_PRIMITIVE_ARRAY_RW +#undef POINTER_TYPE +#undef REFERENCE_TYPE + +#endif // SCOPED_PRIMITIVE_ARRAY_H_ diff --git a/third_party/libnativehelper/header_only_include/nativehelper/scoped_string_chars.h b/third_party/libnativehelper/header_only_include/nativehelper/scoped_string_chars.h new file mode 100644 index 0000000000..4debb2af1d --- /dev/null +++ b/third_party/libnativehelper/header_only_include/nativehelper/scoped_string_chars.h @@ -0,0 +1,73 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SCOPED_STRING_CHARS_H_ +#define SCOPED_STRING_CHARS_H_ + +#include "jni.h" +#include "nativehelper_utils.h" + +// A smart pointer that provides access to a jchar* given a JNI jstring. +// Unlike GetStringChars, we throw NullPointerException rather than abort if +// passed a null jstring, and get will return NULL. +// This makes the correct idiom very simple: +// +// ScopedStringChars name(env, java_name); +// if (name.get() == NULL) { +// return NULL; +// } +class ScopedStringChars { + public: + ScopedStringChars(JNIEnv* env, jstring s) : env_(env), string_(s), size_(0) { + if (s == NULL) { + chars_ = NULL; + jniThrowNullPointerException(env, NULL); + } else { + chars_ = env->GetStringChars(string_, NULL); + if (chars_ != NULL) { + size_ = env->GetStringLength(string_); + } + } + } + + ~ScopedStringChars() { + if (chars_ != NULL) { + env_->ReleaseStringChars(string_, chars_); + } + } + + const jchar* get() const { + return chars_; + } + + size_t size() const { + return size_; + } + + const jchar& operator[](size_t n) const { + return chars_[n]; + } + + private: + JNIEnv* const env_; + const jstring string_; + const jchar* chars_; + size_t size_; + + DISALLOW_COPY_AND_ASSIGN(ScopedStringChars); +}; + +#endif // SCOPED_STRING_CHARS_H_ diff --git a/third_party/libnativehelper/header_only_include/nativehelper/scoped_utf_chars.h b/third_party/libnativehelper/header_only_include/nativehelper/scoped_utf_chars.h new file mode 100644 index 0000000000..bab7cb7d39 --- /dev/null +++ b/third_party/libnativehelper/header_only_include/nativehelper/scoped_utf_chars.h @@ -0,0 +1,94 @@ +/* + * Copyright (C) 2010 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SCOPED_UTF_CHARS_H_ +#define SCOPED_UTF_CHARS_H_ + +#include + +#include "jni.h" +#include "nativehelper_utils.h" + +// A smart pointer that provides read-only access to a Java string's UTF chars. +// Unlike GetStringUTFChars, we throw NullPointerException rather than abort if +// passed a null jstring, and c_str will return nullptr. +// This makes the correct idiom very simple: +// +// ScopedUtfChars name(env, java_name); +// if (name.c_str() == nullptr) { +// return nullptr; +// } +class ScopedUtfChars { + public: + ScopedUtfChars(JNIEnv* env, jstring s) : env_(env), string_(s) { + if (s == nullptr) { + utf_chars_ = nullptr; + jniThrowNullPointerException(env, nullptr); + } else { + utf_chars_ = env->GetStringUTFChars(s, nullptr); + } + } + + ScopedUtfChars(ScopedUtfChars&& rhs) noexcept : + env_(rhs.env_), string_(rhs.string_), utf_chars_(rhs.utf_chars_) { + rhs.env_ = nullptr; + rhs.string_ = nullptr; + rhs.utf_chars_ = nullptr; + } + + ~ScopedUtfChars() { + if (utf_chars_) { + env_->ReleaseStringUTFChars(string_, utf_chars_); + } + } + + ScopedUtfChars& operator=(ScopedUtfChars&& rhs) noexcept { + if (this != &rhs) { + // Delete the currently owned UTF chars. + this->~ScopedUtfChars(); + + // Move the rhs ScopedUtfChars and zero it out. + env_ = rhs.env_; + string_ = rhs.string_; + utf_chars_ = rhs.utf_chars_; + rhs.env_ = nullptr; + rhs.string_ = nullptr; + rhs.utf_chars_ = nullptr; + } + return *this; + } + + const char* c_str() const { + return utf_chars_; + } + + size_t size() const { + return strlen(utf_chars_); + } + + const char& operator[](size_t n) const { + return utf_chars_[n]; + } + + private: + JNIEnv* env_; + jstring string_; + const char* utf_chars_; + + DISALLOW_COPY_AND_ASSIGN(ScopedUtfChars); +}; + +#endif // SCOPED_UTF_CHARS_H_ diff --git a/third_party/libnativehelper/include/nativehelper/JNIHelp.h b/third_party/libnativehelper/include/nativehelper/JNIHelp.h new file mode 100644 index 0000000000..bab5dd5766 --- /dev/null +++ b/third_party/libnativehelper/include/nativehelper/JNIHelp.h @@ -0,0 +1,300 @@ +/* + * Copyright (C) 2007 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * JNI helper functions. + * + * This file may be included by C or C++ code, which is trouble because jni.h + * uses different typedefs for JNIEnv in each language. + */ +#ifndef NATIVEHELPER_JNIHELP_H_ +#define NATIVEHELPER_JNIHELP_H_ + +#include +#include + +#include +#include "module_api.h" + +#ifndef NELEM +# define NELEM(x) ((int) (sizeof(x) / sizeof((x)[0]))) +#endif + +/* + * Register one or more native methods with a particular class. + * "className" looks like "java/lang/String". Aborts on failure. + * TODO: fix all callers and change the return type to void. + */ +MODULE_API int jniRegisterNativeMethods(C_JNIEnv* env, + const char* className, + const JNINativeMethod* gMethods, + int numMethods); + +/* + * Throw an exception with the specified class and an optional message. + * + * The "className" argument will be passed directly to FindClass, which + * takes strings with slashes (e.g. "java/lang/Object"). + * + * If an exception is currently pending, we log a warning message and + * clear it. + * + * Returns 0 on success, nonzero if something failed (e.g. the exception + * class couldn't be found, so *an* exception will still be pending). + * + * Currently aborts the VM if it can't throw the exception. + */ +MODULE_API int jniThrowException(C_JNIEnv* env, const char* className, const char* msg); + +/* + * Throw an exception with the specified class and formatted error message. + * + * The "className" argument will be passed directly to FindClass, which + * takes strings with slashes (e.g. "java/lang/Object"). + * + * If an exception is currently pending, we log a warning message and + * clear it. + * + * Returns 0 on success, nonzero if something failed (e.g. the exception + * class couldn't be found, so *an* exception will still be pending). + * + * Currently aborts the VM if it can't throw the exception. + */ +MODULE_API int jniThrowExceptionFmt(C_JNIEnv* env, const char* className, const char* fmt, va_list args); + +/* + * Throw a java.lang.NullPointerException, with an optional message. + */ +MODULE_API int jniThrowNullPointerException(C_JNIEnv* env, const char* msg); + +/* + * Throw a java.lang.RuntimeException, with an optional message. + */ +MODULE_API int jniThrowRuntimeException(C_JNIEnv* env, const char* msg); + +/* + * Throw a java.io.IOException, generating the message from errno. + */ +MODULE_API int jniThrowIOException(C_JNIEnv* env, int errnum); + +/* + * Return a pointer to a locale-dependent error string explaining errno + * value 'errnum'. The returned pointer may or may not be equal to 'buf'. + * This function is thread-safe (unlike strerror) and portable (unlike + * strerror_r). + */ +MODULE_API const char* jniStrError(int errnum, char* buf, size_t buflen); + +/* + * Returns a new java.io.FileDescriptor for the given int fd. + */ +MODULE_API jobject jniCreateFileDescriptor(C_JNIEnv* env, int fd); + +/* + * Returns the int fd from a java.io.FileDescriptor. + */ +MODULE_API int jniGetFDFromFileDescriptor(C_JNIEnv* env, jobject fileDescriptor); + +/* + * Sets the int fd in a java.io.FileDescriptor. Throws java.lang.NullPointerException + * if fileDescriptor is null. + */ +MODULE_API void jniSetFileDescriptorOfFD(C_JNIEnv* env, + jobject fileDescriptor, + int value); + +/* + * Returns the long ownerId from a java.io.FileDescriptor. + */ +MODULE_API jlong jniGetOwnerIdFromFileDescriptor(C_JNIEnv* env, jobject fileDescriptor); + +/* + * Gets the managed heap array backing a java.nio.Buffer instance. + * + * Returns nullptr if there is no array backing. + * + * This method performs a JNI call to java.nio.NIOAccess.getBaseArray(). + */ +MODULE_API jarray jniGetNioBufferBaseArray(C_JNIEnv* env, jobject nioBuffer); + +/* + * Gets the offset in bytes from the start of the managed heap array backing the buffer. + * + * Returns 0 if there is no array backing. + * + * This method performs a JNI call to java.nio.NIOAccess.getBaseArrayOffset(). + */ +MODULE_API jint jniGetNioBufferBaseArrayOffset(C_JNIEnv* env, jobject nioBuffer); + +/* + * Gets field information from a java.nio.Buffer instance. + * + * Reads the |position|, |limit|, and |elementSizeShift| fields from the buffer instance. + * + * Returns the |address| field of the java.nio.Buffer instance which is only valid (non-zero) when + * the buffer is backed by a direct buffer. + */ +MODULE_API jlong jniGetNioBufferFields(C_JNIEnv* env, + jobject nioBuffer, + /*out*/jint* position, + /*out*/jint* limit, + /*out*/jint* elementSizeShift); + +/* + * Gets the current position from a java.nio.Buffer as a pointer to memory in a fixed buffer. + * + * Returns 0 if |nioBuffer| is not backed by a direct buffer. + * + * This method reads the |address|, |position|, and |elementSizeShift| fields from the + * java.nio.Buffer instance to calculate the pointer address for the current position. + */ +MODULE_API jlong jniGetNioBufferPointer(C_JNIEnv* env, jobject nioBuffer); + +/* + * Returns the reference from a java.lang.ref.Reference. + */ +MODULE_API jobject jniGetReferent(C_JNIEnv* env, jobject ref); + +/* + * Returns a Java String object created from UTF-16 data either from jchar or, + * if called from C++11, char16_t (a bitwise identical distinct type). + */ +MODULE_API jstring jniCreateString(C_JNIEnv* env, const jchar* unicodeChars, jsize len); + +/* + * Log a message and an exception. + * If exception is NULL, logs the current exception in the JNI environment. + */ +MODULE_API void jniLogException(C_JNIEnv* env, int priority, const char* tag, jthrowable exception); + +/* + * Clear the cache of constants libnativehelper is using. + */ +MODULE_API void jniUninitializeConstants(); + +/* + * For C++ code, we provide inlines that map to the C functions. g++ always + * inlines these, even on non-optimized builds. + */ +#if defined(__cplusplus) + +inline int jniRegisterNativeMethods(JNIEnv* env, const char* className, const JNINativeMethod* gMethods, int numMethods) { + return jniRegisterNativeMethods(&env->functions, className, gMethods, numMethods); +} + +inline int jniThrowException(JNIEnv* env, const char* className, const char* msg) { + return jniThrowException(&env->functions, className, msg); +} + +/* + * Equivalent to jniThrowException but with a printf-like format string and + * variable-length argument list. This is only available in C++. + */ +inline int jniThrowExceptionFmt(JNIEnv* env, const char* className, const char* fmt, ...) { + va_list args; + va_start(args, fmt); + return jniThrowExceptionFmt(&env->functions, className, fmt, args); + va_end(args); +} + +inline int jniThrowNullPointerException(JNIEnv* env, const char* msg) { + return jniThrowNullPointerException(&env->functions, msg); +} + +inline int jniThrowRuntimeException(JNIEnv* env, const char* msg) { + return jniThrowRuntimeException(&env->functions, msg); +} + +inline int jniThrowIOException(JNIEnv* env, int errnum) { + return jniThrowIOException(&env->functions, errnum); +} + +inline jobject jniCreateFileDescriptor(JNIEnv* env, int fd) { + return jniCreateFileDescriptor(&env->functions, fd); +} + +inline int jniGetFDFromFileDescriptor(JNIEnv* env, jobject fileDescriptor) { + return jniGetFDFromFileDescriptor(&env->functions, fileDescriptor); +} + +inline void jniSetFileDescriptorOfFD(JNIEnv* env, jobject fileDescriptor, int value) { + jniSetFileDescriptorOfFD(&env->functions, fileDescriptor, value); +} + +inline jlong jniGetOwnerIdFromFileDescriptor(JNIEnv* env, jobject fileDescriptor) { + return jniGetOwnerIdFromFileDescriptor(&env->functions, fileDescriptor); +} + +inline jarray jniGetNioBufferBaseArray(JNIEnv* env, jobject nioBuffer) { + return jniGetNioBufferBaseArray(&env->functions, nioBuffer); +} + +inline jint jniGetNioBufferBaseArrayOffset(JNIEnv* env, jobject nioBuffer) { + return jniGetNioBufferBaseArrayOffset(&env->functions, nioBuffer); +} + +inline jlong jniGetNioBufferFields(JNIEnv* env, jobject nioBuffer, + jint* position, jint* limit, jint* elementSizeShift) { + return jniGetNioBufferFields(&env->functions, nioBuffer, + position, limit, elementSizeShift); +} + +inline jlong jniGetNioBufferPointer(JNIEnv* env, jobject nioBuffer) { + return jniGetNioBufferPointer(&env->functions, nioBuffer); +} + +inline jobject jniGetReferent(JNIEnv* env, jobject ref) { + return jniGetReferent(&env->functions, ref); +} + +inline jstring jniCreateString(JNIEnv* env, const jchar* unicodeChars, jsize len) { + return jniCreateString(&env->functions, unicodeChars, len); +} + +inline jstring jniCreateString(JNIEnv* env, const char16_t* unicodeChars, jsize len) { + return jniCreateString(&env->functions, reinterpret_cast(unicodeChars), len); +} + +inline void jniLogException(JNIEnv* env, int priority, const char* tag, jthrowable exception = NULL) { + jniLogException(&env->functions, priority, tag, exception); +} + +#if !defined(DISALLOW_COPY_AND_ASSIGN) +// DISALLOW_COPY_AND_ASSIGN disallows the copy and operator= functions. It goes in the private: +// declarations in a class. +#define DISALLOW_COPY_AND_ASSIGN(TypeName) \ + TypeName(const TypeName&) = delete; \ + void operator=(const TypeName&) = delete +#endif // !defined(DISALLOW_COPY_AND_ASSIGN) + +#endif // defined(__cplusplus) + +/* + * TEMP_FAILURE_RETRY is defined by some, but not all, versions of + * . (Alas, it is not as standard as we'd hoped!) So, if it's + * not already defined, then define it here. + */ +#ifndef TEMP_FAILURE_RETRY +/* Used to retry syscalls that can return EINTR. */ +#define TEMP_FAILURE_RETRY(exp) ({ \ + typeof (exp) _rc; \ + do { \ + _rc = (exp); \ + } while (_rc == -1 && errno == EINTR); \ + _rc; }) +#endif + +#endif /* NATIVEHELPER_JNIHELP_H_ */ diff --git a/third_party/libnativehelper/include/nativehelper/JniInvocation.h b/third_party/libnativehelper/include/nativehelper/JniInvocation.h new file mode 100644 index 0000000000..0d87aa98ae --- /dev/null +++ b/third_party/libnativehelper/include/nativehelper/JniInvocation.h @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef JNI_INVOCATION_H_included +#define JNI_INVOCATION_H_included + +#include +#include "module_api.h" + +struct JniInvocationImpl; + +MODULE_API struct JniInvocationImpl* JniInvocationCreate(); +MODULE_API void JniInvocationDestroy(struct JniInvocationImpl* instance); +MODULE_API int JniInvocationInit(struct JniInvocationImpl* instance, const char* library); +MODULE_API const char* JniInvocationGetLibrary(const char* library, char* buffer); + +#ifdef __cplusplus + +// JniInvocation adds a layer of indirection for applications using +// the JNI invocation API to allow the JNI implementation to be +// selected dynamically. Apps can specify a specific implementation to +// be used by calling InitJniInvocation. If this is not done, the +// library will chosen based on the value of Android system property +// persist.sys.dalvik.vm.lib on the device, and otherwise fall back to +// a hard-coded default implementation. +class JniInvocation final { + public: + JniInvocation() { + impl_ = JniInvocationCreate(); + } + + ~JniInvocation() { + JniInvocationDestroy(impl_); + } + + // Initialize JNI invocation API. library should specifiy a valid + // shared library for opening via dlopen providing a JNI invocation + // implementation, or null to allow defaulting via + // persist.sys.dalvik.vm.lib. + bool Init(const char* library) { + return JniInvocationInit(impl_, library) != 0; + } + + // Exposes which library is actually loaded from the given name. The + // buffer of size PROPERTY_VALUE_MAX will be used to load the system + // property for the default library, if necessary. If no buffer is + // provided, the fallback value will be used. + static const char* GetLibrary(const char* library, char* buffer) { + return JniInvocationGetLibrary(library, buffer); + } + + private: + JniInvocation(const JniInvocation&) = delete; + JniInvocation& operator=(const JniInvocation&) = delete; + + static const char* GetLibrary(const char* library, char* buffer, bool (*is_debuggable)(), + int (*get_library_system_property)(char* buffer)); + + JniInvocationImpl* impl_; + + friend class JNIInvocation_Debuggable_Test; + friend class JNIInvocation_NonDebuggable_Test; +}; + +#endif // __cplusplus + +#endif // JNI_INVOCATION_H_included diff --git a/third_party/libnativehelper/include/nativehelper/ScopedBytes.h b/third_party/libnativehelper/include/nativehelper/ScopedBytes.h new file mode 100644 index 0000000000..7cb2ad09c6 --- /dev/null +++ b/third_party/libnativehelper/include/nativehelper/ScopedBytes.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2010 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SCOPED_BYTES_H_included +#define SCOPED_BYTES_H_included + +#include "JNIHelp.h" +#include + +#endif // SCOPED_BYTES_H_included diff --git a/third_party/libnativehelper/include/nativehelper/ScopedLocalFrame.h b/third_party/libnativehelper/include/nativehelper/ScopedLocalFrame.h new file mode 100644 index 0000000000..57873f271e --- /dev/null +++ b/third_party/libnativehelper/include/nativehelper/ScopedLocalFrame.h @@ -0,0 +1,22 @@ +/* + * Copyright (C) 2010 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SCOPED_LOCAL_FRAME_H_included +#define SCOPED_LOCAL_FRAME_H_included + +#include + +#endif // SCOPED_LOCAL_FRAME_H_included diff --git a/third_party/libnativehelper/include/nativehelper/ScopedLocalRef.h b/third_party/libnativehelper/include/nativehelper/ScopedLocalRef.h new file mode 100644 index 0000000000..0fb03d75ac --- /dev/null +++ b/third_party/libnativehelper/include/nativehelper/ScopedLocalRef.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2010 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SCOPED_LOCAL_REF_H_included +#define SCOPED_LOCAL_REF_H_included + +#include "JNIHelp.h" +#include + +#endif // SCOPED_LOCAL_REF_H_included diff --git a/third_party/libnativehelper/include/nativehelper/ScopedPrimitiveArray.h b/third_party/libnativehelper/include/nativehelper/ScopedPrimitiveArray.h new file mode 100644 index 0000000000..626b64f18f --- /dev/null +++ b/third_party/libnativehelper/include/nativehelper/ScopedPrimitiveArray.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2010 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SCOPED_PRIMITIVE_ARRAY_H_included +#define SCOPED_PRIMITIVE_ARRAY_H_included + +#include "JNIHelp.h" +#include + +#endif // SCOPED_PRIMITIVE_ARRAY_H_included diff --git a/third_party/libnativehelper/include/nativehelper/ScopedStringChars.h b/third_party/libnativehelper/include/nativehelper/ScopedStringChars.h new file mode 100644 index 0000000000..59c405c09f --- /dev/null +++ b/third_party/libnativehelper/include/nativehelper/ScopedStringChars.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SCOPED_STRING_CHARS_H_included +#define SCOPED_STRING_CHARS_H_included + +#include "JNIHelp.h" +#include + +#endif // SCOPED_STRING_CHARS_H_included diff --git a/third_party/libnativehelper/include/nativehelper/ScopedUtfChars.h b/third_party/libnativehelper/include/nativehelper/ScopedUtfChars.h new file mode 100644 index 0000000000..f123115feb --- /dev/null +++ b/third_party/libnativehelper/include/nativehelper/ScopedUtfChars.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2010 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SCOPED_UTF_CHARS_H_included +#define SCOPED_UTF_CHARS_H_included + +#include "JNIHelp.h" +#include + +#endif // SCOPED_UTF_CHARS_H_included diff --git a/third_party/libnativehelper/include/nativehelper/module_api.h b/third_party/libnativehelper/include/nativehelper/module_api.h new file mode 100644 index 0000000000..8b109e3935 --- /dev/null +++ b/third_party/libnativehelper/include/nativehelper/module_api.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2019 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#ifdef __cplusplus +#define MODULE_API extern "C" +#else +#define MODULE_API +#endif // __cplusplus diff --git a/third_party/libnativehelper/include/nativehelper/toStringArray.h b/third_party/libnativehelper/include/nativehelper/toStringArray.h new file mode 100644 index 0000000000..1965d6a7fe --- /dev/null +++ b/third_party/libnativehelper/include/nativehelper/toStringArray.h @@ -0,0 +1,78 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TO_STRING_ARRAY_H_included +#define TO_STRING_ARRAY_H_included + +#include + +#include +#include "module_api.h" + +// Public API for libnativehelper library. +MODULE_API jobjectArray newStringArray(JNIEnv* env, size_t count); +MODULE_API jobjectArray toStringArray(JNIEnv* env, const char* const* strings); + +#ifdef __cplusplus + +#include +#include +#include "ScopedLocalRef.h" + +template +jobjectArray toStringArray(JNIEnv* env, Counter* counter, Getter* getter) { + size_t count = (*counter)(); + jobjectArray result = newStringArray(env, count); + if (result == NULL) { + return NULL; + } + for (size_t i = 0; i < count; ++i) { + ScopedLocalRef s(env, env->NewStringUTF((*getter)(i))); + if (env->ExceptionCheck()) { + return NULL; + } + env->SetObjectArrayElement(result, i, s.get()); + if (env->ExceptionCheck()) { + return NULL; + } + } + return result; +} + +struct VectorCounter { + const std::vector& strings; + explicit VectorCounter(const std::vector& strings) : strings(strings) {} + size_t operator()() { + return strings.size(); + } +}; +struct VectorGetter { + const std::vector& strings; + explicit VectorGetter(const std::vector& strings) : strings(strings) {} + const char* operator()(size_t i) { + return strings[i].c_str(); + } +}; + +inline jobjectArray toStringArray(JNIEnv* env, const std::vector& strings) { + VectorCounter counter(strings); + VectorGetter getter(strings); + return toStringArray(env, &counter, &getter); +} + +#endif // __cplusplus + +#endif // TO_STRING_ARRAY_H_included diff --git a/third_party/libnativehelper/include_jni/jni.h b/third_party/libnativehelper/include_jni/jni.h new file mode 100644 index 0000000000..4c343139e0 --- /dev/null +++ b/third_party/libnativehelper/include_jni/jni.h @@ -0,0 +1,1143 @@ +/* + * Copyright (C) 2006 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * JNI specification, as defined by Sun: + * http://java.sun.com/javase/6/docs/technotes/guides/jni/spec/jniTOC.html + * + * Everything here is expected to be VM-neutral. + */ + +#ifndef JNI_H_ +#define JNI_H_ + +#include +#include + +/* Primitive types that match up with Java equivalents. */ +typedef uint8_t jboolean; /* unsigned 8 bits */ +typedef int8_t jbyte; /* signed 8 bits */ +typedef uint16_t jchar; /* unsigned 16 bits */ +typedef int16_t jshort; /* signed 16 bits */ +typedef int32_t jint; /* signed 32 bits */ +typedef int64_t jlong; /* signed 64 bits */ +typedef float jfloat; /* 32-bit IEEE 754 */ +typedef double jdouble; /* 64-bit IEEE 754 */ + +/* "cardinal indices and sizes" */ +typedef jint jsize; + +#ifdef __cplusplus +/* + * Reference types, in C++ + */ +class _jobject {}; +class _jclass : public _jobject {}; +class _jstring : public _jobject {}; +class _jarray : public _jobject {}; +class _jobjectArray : public _jarray {}; +class _jbooleanArray : public _jarray {}; +class _jbyteArray : public _jarray {}; +class _jcharArray : public _jarray {}; +class _jshortArray : public _jarray {}; +class _jintArray : public _jarray {}; +class _jlongArray : public _jarray {}; +class _jfloatArray : public _jarray {}; +class _jdoubleArray : public _jarray {}; +class _jthrowable : public _jobject {}; + +typedef _jobject* jobject; +typedef _jclass* jclass; +typedef _jstring* jstring; +typedef _jarray* jarray; +typedef _jobjectArray* jobjectArray; +typedef _jbooleanArray* jbooleanArray; +typedef _jbyteArray* jbyteArray; +typedef _jcharArray* jcharArray; +typedef _jshortArray* jshortArray; +typedef _jintArray* jintArray; +typedef _jlongArray* jlongArray; +typedef _jfloatArray* jfloatArray; +typedef _jdoubleArray* jdoubleArray; +typedef _jthrowable* jthrowable; +typedef _jobject* jweak; + + +#else /* not __cplusplus */ + +/* + * Reference types, in C. + */ +typedef void* jobject; +typedef jobject jclass; +typedef jobject jstring; +typedef jobject jarray; +typedef jarray jobjectArray; +typedef jarray jbooleanArray; +typedef jarray jbyteArray; +typedef jarray jcharArray; +typedef jarray jshortArray; +typedef jarray jintArray; +typedef jarray jlongArray; +typedef jarray jfloatArray; +typedef jarray jdoubleArray; +typedef jobject jthrowable; +typedef jobject jweak; + +#endif /* not __cplusplus */ + +struct _jfieldID; /* opaque structure */ +typedef struct _jfieldID* jfieldID; /* field IDs */ + +struct _jmethodID; /* opaque structure */ +typedef struct _jmethodID* jmethodID; /* method IDs */ + +struct JNIInvokeInterface; + +typedef union jvalue { + jboolean z; + jbyte b; + jchar c; + jshort s; + jint i; + jlong j; + jfloat f; + jdouble d; + jobject l; +} jvalue; + +typedef enum jobjectRefType { + JNIInvalidRefType = 0, + JNILocalRefType = 1, + JNIGlobalRefType = 2, + JNIWeakGlobalRefType = 3 +} jobjectRefType; + +typedef struct { + const char* name; + const char* signature; + void* fnPtr; +} JNINativeMethod; + +struct _JNIEnv; +struct _JavaVM; +typedef const struct JNINativeInterface* C_JNIEnv; + +#if defined(__cplusplus) +typedef _JNIEnv JNIEnv; +typedef _JavaVM JavaVM; +#else +typedef const struct JNINativeInterface* JNIEnv; +typedef const struct JNIInvokeInterface* JavaVM; +#endif + +/* + * Table of interface function pointers. + */ +struct JNINativeInterface { + void* reserved0; + void* reserved1; + void* reserved2; + void* reserved3; + + jint (*GetVersion)(JNIEnv *); + + jclass (*DefineClass)(JNIEnv*, const char*, jobject, const jbyte*, + jsize); + jclass (*FindClass)(JNIEnv*, const char*); + + jmethodID (*FromReflectedMethod)(JNIEnv*, jobject); + jfieldID (*FromReflectedField)(JNIEnv*, jobject); + /* spec doesn't show jboolean parameter */ + jobject (*ToReflectedMethod)(JNIEnv*, jclass, jmethodID, jboolean); + + jclass (*GetSuperclass)(JNIEnv*, jclass); + jboolean (*IsAssignableFrom)(JNIEnv*, jclass, jclass); + + /* spec doesn't show jboolean parameter */ + jobject (*ToReflectedField)(JNIEnv*, jclass, jfieldID, jboolean); + + jint (*Throw)(JNIEnv*, jthrowable); + jint (*ThrowNew)(JNIEnv *, jclass, const char *); + jthrowable (*ExceptionOccurred)(JNIEnv*); + void (*ExceptionDescribe)(JNIEnv*); + void (*ExceptionClear)(JNIEnv*); + void (*FatalError)(JNIEnv*, const char*); + + jint (*PushLocalFrame)(JNIEnv*, jint); + jobject (*PopLocalFrame)(JNIEnv*, jobject); + + jobject (*NewGlobalRef)(JNIEnv*, jobject); + void (*DeleteGlobalRef)(JNIEnv*, jobject); + void (*DeleteLocalRef)(JNIEnv*, jobject); + jboolean (*IsSameObject)(JNIEnv*, jobject, jobject); + + jobject (*NewLocalRef)(JNIEnv*, jobject); + jint (*EnsureLocalCapacity)(JNIEnv*, jint); + + jobject (*AllocObject)(JNIEnv*, jclass); + jobject (*NewObject)(JNIEnv*, jclass, jmethodID, ...); + jobject (*NewObjectV)(JNIEnv*, jclass, jmethodID, va_list); + jobject (*NewObjectA)(JNIEnv*, jclass, jmethodID, const jvalue*); + + jclass (*GetObjectClass)(JNIEnv*, jobject); + jboolean (*IsInstanceOf)(JNIEnv*, jobject, jclass); + jmethodID (*GetMethodID)(JNIEnv*, jclass, const char*, const char*); + + jobject (*CallObjectMethod)(JNIEnv*, jobject, jmethodID, ...); + jobject (*CallObjectMethodV)(JNIEnv*, jobject, jmethodID, va_list); + jobject (*CallObjectMethodA)(JNIEnv*, jobject, jmethodID, const jvalue*); + jboolean (*CallBooleanMethod)(JNIEnv*, jobject, jmethodID, ...); + jboolean (*CallBooleanMethodV)(JNIEnv*, jobject, jmethodID, va_list); + jboolean (*CallBooleanMethodA)(JNIEnv*, jobject, jmethodID, const jvalue*); + jbyte (*CallByteMethod)(JNIEnv*, jobject, jmethodID, ...); + jbyte (*CallByteMethodV)(JNIEnv*, jobject, jmethodID, va_list); + jbyte (*CallByteMethodA)(JNIEnv*, jobject, jmethodID, const jvalue*); + jchar (*CallCharMethod)(JNIEnv*, jobject, jmethodID, ...); + jchar (*CallCharMethodV)(JNIEnv*, jobject, jmethodID, va_list); + jchar (*CallCharMethodA)(JNIEnv*, jobject, jmethodID, const jvalue*); + jshort (*CallShortMethod)(JNIEnv*, jobject, jmethodID, ...); + jshort (*CallShortMethodV)(JNIEnv*, jobject, jmethodID, va_list); + jshort (*CallShortMethodA)(JNIEnv*, jobject, jmethodID, const jvalue*); + jint (*CallIntMethod)(JNIEnv*, jobject, jmethodID, ...); + jint (*CallIntMethodV)(JNIEnv*, jobject, jmethodID, va_list); + jint (*CallIntMethodA)(JNIEnv*, jobject, jmethodID, const jvalue*); + jlong (*CallLongMethod)(JNIEnv*, jobject, jmethodID, ...); + jlong (*CallLongMethodV)(JNIEnv*, jobject, jmethodID, va_list); + jlong (*CallLongMethodA)(JNIEnv*, jobject, jmethodID, const jvalue*); + jfloat (*CallFloatMethod)(JNIEnv*, jobject, jmethodID, ...); + jfloat (*CallFloatMethodV)(JNIEnv*, jobject, jmethodID, va_list); + jfloat (*CallFloatMethodA)(JNIEnv*, jobject, jmethodID, const jvalue*); + jdouble (*CallDoubleMethod)(JNIEnv*, jobject, jmethodID, ...); + jdouble (*CallDoubleMethodV)(JNIEnv*, jobject, jmethodID, va_list); + jdouble (*CallDoubleMethodA)(JNIEnv*, jobject, jmethodID, const jvalue*); + void (*CallVoidMethod)(JNIEnv*, jobject, jmethodID, ...); + void (*CallVoidMethodV)(JNIEnv*, jobject, jmethodID, va_list); + void (*CallVoidMethodA)(JNIEnv*, jobject, jmethodID, const jvalue*); + + jobject (*CallNonvirtualObjectMethod)(JNIEnv*, jobject, jclass, + jmethodID, ...); + jobject (*CallNonvirtualObjectMethodV)(JNIEnv*, jobject, jclass, + jmethodID, va_list); + jobject (*CallNonvirtualObjectMethodA)(JNIEnv*, jobject, jclass, + jmethodID, const jvalue*); + jboolean (*CallNonvirtualBooleanMethod)(JNIEnv*, jobject, jclass, + jmethodID, ...); + jboolean (*CallNonvirtualBooleanMethodV)(JNIEnv*, jobject, jclass, + jmethodID, va_list); + jboolean (*CallNonvirtualBooleanMethodA)(JNIEnv*, jobject, jclass, + jmethodID, const jvalue*); + jbyte (*CallNonvirtualByteMethod)(JNIEnv*, jobject, jclass, + jmethodID, ...); + jbyte (*CallNonvirtualByteMethodV)(JNIEnv*, jobject, jclass, + jmethodID, va_list); + jbyte (*CallNonvirtualByteMethodA)(JNIEnv*, jobject, jclass, + jmethodID, const jvalue*); + jchar (*CallNonvirtualCharMethod)(JNIEnv*, jobject, jclass, + jmethodID, ...); + jchar (*CallNonvirtualCharMethodV)(JNIEnv*, jobject, jclass, + jmethodID, va_list); + jchar (*CallNonvirtualCharMethodA)(JNIEnv*, jobject, jclass, + jmethodID, const jvalue*); + jshort (*CallNonvirtualShortMethod)(JNIEnv*, jobject, jclass, + jmethodID, ...); + jshort (*CallNonvirtualShortMethodV)(JNIEnv*, jobject, jclass, + jmethodID, va_list); + jshort (*CallNonvirtualShortMethodA)(JNIEnv*, jobject, jclass, + jmethodID, const jvalue*); + jint (*CallNonvirtualIntMethod)(JNIEnv*, jobject, jclass, + jmethodID, ...); + jint (*CallNonvirtualIntMethodV)(JNIEnv*, jobject, jclass, + jmethodID, va_list); + jint (*CallNonvirtualIntMethodA)(JNIEnv*, jobject, jclass, + jmethodID, const jvalue*); + jlong (*CallNonvirtualLongMethod)(JNIEnv*, jobject, jclass, + jmethodID, ...); + jlong (*CallNonvirtualLongMethodV)(JNIEnv*, jobject, jclass, + jmethodID, va_list); + jlong (*CallNonvirtualLongMethodA)(JNIEnv*, jobject, jclass, + jmethodID, const jvalue*); + jfloat (*CallNonvirtualFloatMethod)(JNIEnv*, jobject, jclass, + jmethodID, ...); + jfloat (*CallNonvirtualFloatMethodV)(JNIEnv*, jobject, jclass, + jmethodID, va_list); + jfloat (*CallNonvirtualFloatMethodA)(JNIEnv*, jobject, jclass, + jmethodID, const jvalue*); + jdouble (*CallNonvirtualDoubleMethod)(JNIEnv*, jobject, jclass, + jmethodID, ...); + jdouble (*CallNonvirtualDoubleMethodV)(JNIEnv*, jobject, jclass, + jmethodID, va_list); + jdouble (*CallNonvirtualDoubleMethodA)(JNIEnv*, jobject, jclass, + jmethodID, const jvalue*); + void (*CallNonvirtualVoidMethod)(JNIEnv*, jobject, jclass, + jmethodID, ...); + void (*CallNonvirtualVoidMethodV)(JNIEnv*, jobject, jclass, + jmethodID, va_list); + void (*CallNonvirtualVoidMethodA)(JNIEnv*, jobject, jclass, + jmethodID, const jvalue*); + + jfieldID (*GetFieldID)(JNIEnv*, jclass, const char*, const char*); + + jobject (*GetObjectField)(JNIEnv*, jobject, jfieldID); + jboolean (*GetBooleanField)(JNIEnv*, jobject, jfieldID); + jbyte (*GetByteField)(JNIEnv*, jobject, jfieldID); + jchar (*GetCharField)(JNIEnv*, jobject, jfieldID); + jshort (*GetShortField)(JNIEnv*, jobject, jfieldID); + jint (*GetIntField)(JNIEnv*, jobject, jfieldID); + jlong (*GetLongField)(JNIEnv*, jobject, jfieldID); + jfloat (*GetFloatField)(JNIEnv*, jobject, jfieldID); + jdouble (*GetDoubleField)(JNIEnv*, jobject, jfieldID); + + void (*SetObjectField)(JNIEnv*, jobject, jfieldID, jobject); + void (*SetBooleanField)(JNIEnv*, jobject, jfieldID, jboolean); + void (*SetByteField)(JNIEnv*, jobject, jfieldID, jbyte); + void (*SetCharField)(JNIEnv*, jobject, jfieldID, jchar); + void (*SetShortField)(JNIEnv*, jobject, jfieldID, jshort); + void (*SetIntField)(JNIEnv*, jobject, jfieldID, jint); + void (*SetLongField)(JNIEnv*, jobject, jfieldID, jlong); + void (*SetFloatField)(JNIEnv*, jobject, jfieldID, jfloat); + void (*SetDoubleField)(JNIEnv*, jobject, jfieldID, jdouble); + + jmethodID (*GetStaticMethodID)(JNIEnv*, jclass, const char*, const char*); + + jobject (*CallStaticObjectMethod)(JNIEnv*, jclass, jmethodID, ...); + jobject (*CallStaticObjectMethodV)(JNIEnv*, jclass, jmethodID, va_list); + jobject (*CallStaticObjectMethodA)(JNIEnv*, jclass, jmethodID, const jvalue*); + jboolean (*CallStaticBooleanMethod)(JNIEnv*, jclass, jmethodID, ...); + jboolean (*CallStaticBooleanMethodV)(JNIEnv*, jclass, jmethodID, + va_list); + jboolean (*CallStaticBooleanMethodA)(JNIEnv*, jclass, jmethodID, const jvalue*); + jbyte (*CallStaticByteMethod)(JNIEnv*, jclass, jmethodID, ...); + jbyte (*CallStaticByteMethodV)(JNIEnv*, jclass, jmethodID, va_list); + jbyte (*CallStaticByteMethodA)(JNIEnv*, jclass, jmethodID, const jvalue*); + jchar (*CallStaticCharMethod)(JNIEnv*, jclass, jmethodID, ...); + jchar (*CallStaticCharMethodV)(JNIEnv*, jclass, jmethodID, va_list); + jchar (*CallStaticCharMethodA)(JNIEnv*, jclass, jmethodID, const jvalue*); + jshort (*CallStaticShortMethod)(JNIEnv*, jclass, jmethodID, ...); + jshort (*CallStaticShortMethodV)(JNIEnv*, jclass, jmethodID, va_list); + jshort (*CallStaticShortMethodA)(JNIEnv*, jclass, jmethodID, const jvalue*); + jint (*CallStaticIntMethod)(JNIEnv*, jclass, jmethodID, ...); + jint (*CallStaticIntMethodV)(JNIEnv*, jclass, jmethodID, va_list); + jint (*CallStaticIntMethodA)(JNIEnv*, jclass, jmethodID, const jvalue*); + jlong (*CallStaticLongMethod)(JNIEnv*, jclass, jmethodID, ...); + jlong (*CallStaticLongMethodV)(JNIEnv*, jclass, jmethodID, va_list); + jlong (*CallStaticLongMethodA)(JNIEnv*, jclass, jmethodID, const jvalue*); + jfloat (*CallStaticFloatMethod)(JNIEnv*, jclass, jmethodID, ...); + jfloat (*CallStaticFloatMethodV)(JNIEnv*, jclass, jmethodID, va_list); + jfloat (*CallStaticFloatMethodA)(JNIEnv*, jclass, jmethodID, const jvalue*); + jdouble (*CallStaticDoubleMethod)(JNIEnv*, jclass, jmethodID, ...); + jdouble (*CallStaticDoubleMethodV)(JNIEnv*, jclass, jmethodID, va_list); + jdouble (*CallStaticDoubleMethodA)(JNIEnv*, jclass, jmethodID, const jvalue*); + void (*CallStaticVoidMethod)(JNIEnv*, jclass, jmethodID, ...); + void (*CallStaticVoidMethodV)(JNIEnv*, jclass, jmethodID, va_list); + void (*CallStaticVoidMethodA)(JNIEnv*, jclass, jmethodID, const jvalue*); + + jfieldID (*GetStaticFieldID)(JNIEnv*, jclass, const char*, + const char*); + + jobject (*GetStaticObjectField)(JNIEnv*, jclass, jfieldID); + jboolean (*GetStaticBooleanField)(JNIEnv*, jclass, jfieldID); + jbyte (*GetStaticByteField)(JNIEnv*, jclass, jfieldID); + jchar (*GetStaticCharField)(JNIEnv*, jclass, jfieldID); + jshort (*GetStaticShortField)(JNIEnv*, jclass, jfieldID); + jint (*GetStaticIntField)(JNIEnv*, jclass, jfieldID); + jlong (*GetStaticLongField)(JNIEnv*, jclass, jfieldID); + jfloat (*GetStaticFloatField)(JNIEnv*, jclass, jfieldID); + jdouble (*GetStaticDoubleField)(JNIEnv*, jclass, jfieldID); + + void (*SetStaticObjectField)(JNIEnv*, jclass, jfieldID, jobject); + void (*SetStaticBooleanField)(JNIEnv*, jclass, jfieldID, jboolean); + void (*SetStaticByteField)(JNIEnv*, jclass, jfieldID, jbyte); + void (*SetStaticCharField)(JNIEnv*, jclass, jfieldID, jchar); + void (*SetStaticShortField)(JNIEnv*, jclass, jfieldID, jshort); + void (*SetStaticIntField)(JNIEnv*, jclass, jfieldID, jint); + void (*SetStaticLongField)(JNIEnv*, jclass, jfieldID, jlong); + void (*SetStaticFloatField)(JNIEnv*, jclass, jfieldID, jfloat); + void (*SetStaticDoubleField)(JNIEnv*, jclass, jfieldID, jdouble); + + jstring (*NewString)(JNIEnv*, const jchar*, jsize); + jsize (*GetStringLength)(JNIEnv*, jstring); + const jchar* (*GetStringChars)(JNIEnv*, jstring, jboolean*); + void (*ReleaseStringChars)(JNIEnv*, jstring, const jchar*); + jstring (*NewStringUTF)(JNIEnv*, const char*); + jsize (*GetStringUTFLength)(JNIEnv*, jstring); + /* JNI spec says this returns const jbyte*, but that's inconsistent */ + const char* (*GetStringUTFChars)(JNIEnv*, jstring, jboolean*); + void (*ReleaseStringUTFChars)(JNIEnv*, jstring, const char*); + jsize (*GetArrayLength)(JNIEnv*, jarray); + jobjectArray (*NewObjectArray)(JNIEnv*, jsize, jclass, jobject); + jobject (*GetObjectArrayElement)(JNIEnv*, jobjectArray, jsize); + void (*SetObjectArrayElement)(JNIEnv*, jobjectArray, jsize, jobject); + + jbooleanArray (*NewBooleanArray)(JNIEnv*, jsize); + jbyteArray (*NewByteArray)(JNIEnv*, jsize); + jcharArray (*NewCharArray)(JNIEnv*, jsize); + jshortArray (*NewShortArray)(JNIEnv*, jsize); + jintArray (*NewIntArray)(JNIEnv*, jsize); + jlongArray (*NewLongArray)(JNIEnv*, jsize); + jfloatArray (*NewFloatArray)(JNIEnv*, jsize); + jdoubleArray (*NewDoubleArray)(JNIEnv*, jsize); + + jboolean* (*GetBooleanArrayElements)(JNIEnv*, jbooleanArray, jboolean*); + jbyte* (*GetByteArrayElements)(JNIEnv*, jbyteArray, jboolean*); + jchar* (*GetCharArrayElements)(JNIEnv*, jcharArray, jboolean*); + jshort* (*GetShortArrayElements)(JNIEnv*, jshortArray, jboolean*); + jint* (*GetIntArrayElements)(JNIEnv*, jintArray, jboolean*); + jlong* (*GetLongArrayElements)(JNIEnv*, jlongArray, jboolean*); + jfloat* (*GetFloatArrayElements)(JNIEnv*, jfloatArray, jboolean*); + jdouble* (*GetDoubleArrayElements)(JNIEnv*, jdoubleArray, jboolean*); + + void (*ReleaseBooleanArrayElements)(JNIEnv*, jbooleanArray, + jboolean*, jint); + void (*ReleaseByteArrayElements)(JNIEnv*, jbyteArray, + jbyte*, jint); + void (*ReleaseCharArrayElements)(JNIEnv*, jcharArray, + jchar*, jint); + void (*ReleaseShortArrayElements)(JNIEnv*, jshortArray, + jshort*, jint); + void (*ReleaseIntArrayElements)(JNIEnv*, jintArray, + jint*, jint); + void (*ReleaseLongArrayElements)(JNIEnv*, jlongArray, + jlong*, jint); + void (*ReleaseFloatArrayElements)(JNIEnv*, jfloatArray, + jfloat*, jint); + void (*ReleaseDoubleArrayElements)(JNIEnv*, jdoubleArray, + jdouble*, jint); + + void (*GetBooleanArrayRegion)(JNIEnv*, jbooleanArray, + jsize, jsize, jboolean*); + void (*GetByteArrayRegion)(JNIEnv*, jbyteArray, + jsize, jsize, jbyte*); + void (*GetCharArrayRegion)(JNIEnv*, jcharArray, + jsize, jsize, jchar*); + void (*GetShortArrayRegion)(JNIEnv*, jshortArray, + jsize, jsize, jshort*); + void (*GetIntArrayRegion)(JNIEnv*, jintArray, + jsize, jsize, jint*); + void (*GetLongArrayRegion)(JNIEnv*, jlongArray, + jsize, jsize, jlong*); + void (*GetFloatArrayRegion)(JNIEnv*, jfloatArray, + jsize, jsize, jfloat*); + void (*GetDoubleArrayRegion)(JNIEnv*, jdoubleArray, + jsize, jsize, jdouble*); + + /* spec shows these without const; some jni.h do, some don't */ + void (*SetBooleanArrayRegion)(JNIEnv*, jbooleanArray, + jsize, jsize, const jboolean*); + void (*SetByteArrayRegion)(JNIEnv*, jbyteArray, + jsize, jsize, const jbyte*); + void (*SetCharArrayRegion)(JNIEnv*, jcharArray, + jsize, jsize, const jchar*); + void (*SetShortArrayRegion)(JNIEnv*, jshortArray, + jsize, jsize, const jshort*); + void (*SetIntArrayRegion)(JNIEnv*, jintArray, + jsize, jsize, const jint*); + void (*SetLongArrayRegion)(JNIEnv*, jlongArray, + jsize, jsize, const jlong*); + void (*SetFloatArrayRegion)(JNIEnv*, jfloatArray, + jsize, jsize, const jfloat*); + void (*SetDoubleArrayRegion)(JNIEnv*, jdoubleArray, + jsize, jsize, const jdouble*); + + jint (*RegisterNatives)(JNIEnv*, jclass, const JNINativeMethod*, + jint); + jint (*UnregisterNatives)(JNIEnv*, jclass); + jint (*MonitorEnter)(JNIEnv*, jobject); + jint (*MonitorExit)(JNIEnv*, jobject); + jint (*GetJavaVM)(JNIEnv*, JavaVM**); + + void (*GetStringRegion)(JNIEnv*, jstring, jsize, jsize, jchar*); + void (*GetStringUTFRegion)(JNIEnv*, jstring, jsize, jsize, char*); + + void* (*GetPrimitiveArrayCritical)(JNIEnv*, jarray, jboolean*); + void (*ReleasePrimitiveArrayCritical)(JNIEnv*, jarray, void*, jint); + + const jchar* (*GetStringCritical)(JNIEnv*, jstring, jboolean*); + void (*ReleaseStringCritical)(JNIEnv*, jstring, const jchar*); + + jweak (*NewWeakGlobalRef)(JNIEnv*, jobject); + void (*DeleteWeakGlobalRef)(JNIEnv*, jweak); + + jboolean (*ExceptionCheck)(JNIEnv*); + + jobject (*NewDirectByteBuffer)(JNIEnv*, void*, jlong); + void* (*GetDirectBufferAddress)(JNIEnv*, jobject); + jlong (*GetDirectBufferCapacity)(JNIEnv*, jobject); + + /* added in JNI 1.6 */ + jobjectRefType (*GetObjectRefType)(JNIEnv*, jobject); +}; + +/* + * C++ object wrapper. + * + * This is usually overlaid on a C struct whose first element is a + * JNINativeInterface*. We rely somewhat on compiler behavior. + */ +struct _JNIEnv { + /* do not rename this; it does not seem to be entirely opaque */ + const struct JNINativeInterface* functions; + +#if defined(__cplusplus) + + jint GetVersion() + { return functions->GetVersion(this); } + + jclass DefineClass(const char *name, jobject loader, const jbyte* buf, + jsize bufLen) + { return functions->DefineClass(this, name, loader, buf, bufLen); } + + jclass FindClass(const char* name) + { return functions->FindClass(this, name); } + + jmethodID FromReflectedMethod(jobject method) + { return functions->FromReflectedMethod(this, method); } + + jfieldID FromReflectedField(jobject field) + { return functions->FromReflectedField(this, field); } + + jobject ToReflectedMethod(jclass cls, jmethodID methodID, jboolean isStatic) + { return functions->ToReflectedMethod(this, cls, methodID, isStatic); } + + jclass GetSuperclass(jclass clazz) + { return functions->GetSuperclass(this, clazz); } + + jboolean IsAssignableFrom(jclass clazz1, jclass clazz2) + { return functions->IsAssignableFrom(this, clazz1, clazz2); } + + jobject ToReflectedField(jclass cls, jfieldID fieldID, jboolean isStatic) + { return functions->ToReflectedField(this, cls, fieldID, isStatic); } + + jint Throw(jthrowable obj) + { return functions->Throw(this, obj); } + + jint ThrowNew(jclass clazz, const char* message) + { return functions->ThrowNew(this, clazz, message); } + + jthrowable ExceptionOccurred() + { return functions->ExceptionOccurred(this); } + + void ExceptionDescribe() + { functions->ExceptionDescribe(this); } + + void ExceptionClear() + { functions->ExceptionClear(this); } + + void FatalError(const char* msg) + { functions->FatalError(this, msg); } + + jint PushLocalFrame(jint capacity) + { return functions->PushLocalFrame(this, capacity); } + + jobject PopLocalFrame(jobject result) + { return functions->PopLocalFrame(this, result); } + + jobject NewGlobalRef(jobject obj) + { return functions->NewGlobalRef(this, obj); } + + void DeleteGlobalRef(jobject globalRef) + { functions->DeleteGlobalRef(this, globalRef); } + + void DeleteLocalRef(jobject localRef) + { functions->DeleteLocalRef(this, localRef); } + + jboolean IsSameObject(jobject ref1, jobject ref2) + { return functions->IsSameObject(this, ref1, ref2); } + + jobject NewLocalRef(jobject ref) + { return functions->NewLocalRef(this, ref); } + + jint EnsureLocalCapacity(jint capacity) + { return functions->EnsureLocalCapacity(this, capacity); } + + jobject AllocObject(jclass clazz) + { return functions->AllocObject(this, clazz); } + + jobject NewObject(jclass clazz, jmethodID methodID, ...) + { + va_list args; + va_start(args, methodID); + jobject result = functions->NewObjectV(this, clazz, methodID, args); + va_end(args); + return result; + } + + jobject NewObjectV(jclass clazz, jmethodID methodID, va_list args) + { return functions->NewObjectV(this, clazz, methodID, args); } + + jobject NewObjectA(jclass clazz, jmethodID methodID, const jvalue* args) + { return functions->NewObjectA(this, clazz, methodID, args); } + + jclass GetObjectClass(jobject obj) + { return functions->GetObjectClass(this, obj); } + + jboolean IsInstanceOf(jobject obj, jclass clazz) + { return functions->IsInstanceOf(this, obj, clazz); } + + jmethodID GetMethodID(jclass clazz, const char* name, const char* sig) + { return functions->GetMethodID(this, clazz, name, sig); } + +#define CALL_TYPE_METHOD(_jtype, _jname) \ + _jtype Call##_jname##Method(jobject obj, jmethodID methodID, ...) \ + { \ + _jtype result; \ + va_list args; \ + va_start(args, methodID); \ + result = functions->Call##_jname##MethodV(this, obj, methodID, \ + args); \ + va_end(args); \ + return result; \ + } +#define CALL_TYPE_METHODV(_jtype, _jname) \ + _jtype Call##_jname##MethodV(jobject obj, jmethodID methodID, \ + va_list args) \ + { return functions->Call##_jname##MethodV(this, obj, methodID, args); } +#define CALL_TYPE_METHODA(_jtype, _jname) \ + _jtype Call##_jname##MethodA(jobject obj, jmethodID methodID, \ + const jvalue* args) \ + { return functions->Call##_jname##MethodA(this, obj, methodID, args); } + +#define CALL_TYPE(_jtype, _jname) \ + CALL_TYPE_METHOD(_jtype, _jname) \ + CALL_TYPE_METHODV(_jtype, _jname) \ + CALL_TYPE_METHODA(_jtype, _jname) + + CALL_TYPE(jobject, Object) + CALL_TYPE(jboolean, Boolean) + CALL_TYPE(jbyte, Byte) + CALL_TYPE(jchar, Char) + CALL_TYPE(jshort, Short) + CALL_TYPE(jint, Int) + CALL_TYPE(jlong, Long) + CALL_TYPE(jfloat, Float) + CALL_TYPE(jdouble, Double) + + void CallVoidMethod(jobject obj, jmethodID methodID, ...) + { + va_list args; + va_start(args, methodID); + functions->CallVoidMethodV(this, obj, methodID, args); + va_end(args); + } + void CallVoidMethodV(jobject obj, jmethodID methodID, va_list args) + { functions->CallVoidMethodV(this, obj, methodID, args); } + void CallVoidMethodA(jobject obj, jmethodID methodID, const jvalue* args) + { functions->CallVoidMethodA(this, obj, methodID, args); } + +#define CALL_NONVIRT_TYPE_METHOD(_jtype, _jname) \ + _jtype CallNonvirtual##_jname##Method(jobject obj, jclass clazz, \ + jmethodID methodID, ...) \ + { \ + _jtype result; \ + va_list args; \ + va_start(args, methodID); \ + result = functions->CallNonvirtual##_jname##MethodV(this, obj, \ + clazz, methodID, args); \ + va_end(args); \ + return result; \ + } +#define CALL_NONVIRT_TYPE_METHODV(_jtype, _jname) \ + _jtype CallNonvirtual##_jname##MethodV(jobject obj, jclass clazz, \ + jmethodID methodID, va_list args) \ + { return functions->CallNonvirtual##_jname##MethodV(this, obj, clazz, \ + methodID, args); } +#define CALL_NONVIRT_TYPE_METHODA(_jtype, _jname) \ + _jtype CallNonvirtual##_jname##MethodA(jobject obj, jclass clazz, \ + jmethodID methodID, const jvalue* args) \ + { return functions->CallNonvirtual##_jname##MethodA(this, obj, clazz, \ + methodID, args); } + +#define CALL_NONVIRT_TYPE(_jtype, _jname) \ + CALL_NONVIRT_TYPE_METHOD(_jtype, _jname) \ + CALL_NONVIRT_TYPE_METHODV(_jtype, _jname) \ + CALL_NONVIRT_TYPE_METHODA(_jtype, _jname) + + CALL_NONVIRT_TYPE(jobject, Object) + CALL_NONVIRT_TYPE(jboolean, Boolean) + CALL_NONVIRT_TYPE(jbyte, Byte) + CALL_NONVIRT_TYPE(jchar, Char) + CALL_NONVIRT_TYPE(jshort, Short) + CALL_NONVIRT_TYPE(jint, Int) + CALL_NONVIRT_TYPE(jlong, Long) + CALL_NONVIRT_TYPE(jfloat, Float) + CALL_NONVIRT_TYPE(jdouble, Double) + + void CallNonvirtualVoidMethod(jobject obj, jclass clazz, + jmethodID methodID, ...) + { + va_list args; + va_start(args, methodID); + functions->CallNonvirtualVoidMethodV(this, obj, clazz, methodID, args); + va_end(args); + } + void CallNonvirtualVoidMethodV(jobject obj, jclass clazz, + jmethodID methodID, va_list args) + { functions->CallNonvirtualVoidMethodV(this, obj, clazz, methodID, args); } + void CallNonvirtualVoidMethodA(jobject obj, jclass clazz, + jmethodID methodID, const jvalue* args) + { functions->CallNonvirtualVoidMethodA(this, obj, clazz, methodID, args); } + + jfieldID GetFieldID(jclass clazz, const char* name, const char* sig) + { return functions->GetFieldID(this, clazz, name, sig); } + + jobject GetObjectField(jobject obj, jfieldID fieldID) + { return functions->GetObjectField(this, obj, fieldID); } + jboolean GetBooleanField(jobject obj, jfieldID fieldID) + { return functions->GetBooleanField(this, obj, fieldID); } + jbyte GetByteField(jobject obj, jfieldID fieldID) + { return functions->GetByteField(this, obj, fieldID); } + jchar GetCharField(jobject obj, jfieldID fieldID) + { return functions->GetCharField(this, obj, fieldID); } + jshort GetShortField(jobject obj, jfieldID fieldID) + { return functions->GetShortField(this, obj, fieldID); } + jint GetIntField(jobject obj, jfieldID fieldID) + { return functions->GetIntField(this, obj, fieldID); } + jlong GetLongField(jobject obj, jfieldID fieldID) + { return functions->GetLongField(this, obj, fieldID); } + jfloat GetFloatField(jobject obj, jfieldID fieldID) + { return functions->GetFloatField(this, obj, fieldID); } + jdouble GetDoubleField(jobject obj, jfieldID fieldID) + { return functions->GetDoubleField(this, obj, fieldID); } + + void SetObjectField(jobject obj, jfieldID fieldID, jobject value) + { functions->SetObjectField(this, obj, fieldID, value); } + void SetBooleanField(jobject obj, jfieldID fieldID, jboolean value) + { functions->SetBooleanField(this, obj, fieldID, value); } + void SetByteField(jobject obj, jfieldID fieldID, jbyte value) + { functions->SetByteField(this, obj, fieldID, value); } + void SetCharField(jobject obj, jfieldID fieldID, jchar value) + { functions->SetCharField(this, obj, fieldID, value); } + void SetShortField(jobject obj, jfieldID fieldID, jshort value) + { functions->SetShortField(this, obj, fieldID, value); } + void SetIntField(jobject obj, jfieldID fieldID, jint value) + { functions->SetIntField(this, obj, fieldID, value); } + void SetLongField(jobject obj, jfieldID fieldID, jlong value) + { functions->SetLongField(this, obj, fieldID, value); } + void SetFloatField(jobject obj, jfieldID fieldID, jfloat value) + { functions->SetFloatField(this, obj, fieldID, value); } + void SetDoubleField(jobject obj, jfieldID fieldID, jdouble value) + { functions->SetDoubleField(this, obj, fieldID, value); } + + jmethodID GetStaticMethodID(jclass clazz, const char* name, const char* sig) + { return functions->GetStaticMethodID(this, clazz, name, sig); } + +#define CALL_STATIC_TYPE_METHOD(_jtype, _jname) \ + _jtype CallStatic##_jname##Method(jclass clazz, jmethodID methodID, \ + ...) \ + { \ + _jtype result; \ + va_list args; \ + va_start(args, methodID); \ + result = functions->CallStatic##_jname##MethodV(this, clazz, \ + methodID, args); \ + va_end(args); \ + return result; \ + } +#define CALL_STATIC_TYPE_METHODV(_jtype, _jname) \ + _jtype CallStatic##_jname##MethodV(jclass clazz, jmethodID methodID, \ + va_list args) \ + { return functions->CallStatic##_jname##MethodV(this, clazz, methodID, \ + args); } +#define CALL_STATIC_TYPE_METHODA(_jtype, _jname) \ + _jtype CallStatic##_jname##MethodA(jclass clazz, jmethodID methodID, \ + const jvalue* args) \ + { return functions->CallStatic##_jname##MethodA(this, clazz, methodID, \ + args); } + +#define CALL_STATIC_TYPE(_jtype, _jname) \ + CALL_STATIC_TYPE_METHOD(_jtype, _jname) \ + CALL_STATIC_TYPE_METHODV(_jtype, _jname) \ + CALL_STATIC_TYPE_METHODA(_jtype, _jname) + + CALL_STATIC_TYPE(jobject, Object) + CALL_STATIC_TYPE(jboolean, Boolean) + CALL_STATIC_TYPE(jbyte, Byte) + CALL_STATIC_TYPE(jchar, Char) + CALL_STATIC_TYPE(jshort, Short) + CALL_STATIC_TYPE(jint, Int) + CALL_STATIC_TYPE(jlong, Long) + CALL_STATIC_TYPE(jfloat, Float) + CALL_STATIC_TYPE(jdouble, Double) + + void CallStaticVoidMethod(jclass clazz, jmethodID methodID, ...) + { + va_list args; + va_start(args, methodID); + functions->CallStaticVoidMethodV(this, clazz, methodID, args); + va_end(args); + } + void CallStaticVoidMethodV(jclass clazz, jmethodID methodID, va_list args) + { functions->CallStaticVoidMethodV(this, clazz, methodID, args); } + void CallStaticVoidMethodA(jclass clazz, jmethodID methodID, const jvalue* args) + { functions->CallStaticVoidMethodA(this, clazz, methodID, args); } + + jfieldID GetStaticFieldID(jclass clazz, const char* name, const char* sig) + { return functions->GetStaticFieldID(this, clazz, name, sig); } + + jobject GetStaticObjectField(jclass clazz, jfieldID fieldID) + { return functions->GetStaticObjectField(this, clazz, fieldID); } + jboolean GetStaticBooleanField(jclass clazz, jfieldID fieldID) + { return functions->GetStaticBooleanField(this, clazz, fieldID); } + jbyte GetStaticByteField(jclass clazz, jfieldID fieldID) + { return functions->GetStaticByteField(this, clazz, fieldID); } + jchar GetStaticCharField(jclass clazz, jfieldID fieldID) + { return functions->GetStaticCharField(this, clazz, fieldID); } + jshort GetStaticShortField(jclass clazz, jfieldID fieldID) + { return functions->GetStaticShortField(this, clazz, fieldID); } + jint GetStaticIntField(jclass clazz, jfieldID fieldID) + { return functions->GetStaticIntField(this, clazz, fieldID); } + jlong GetStaticLongField(jclass clazz, jfieldID fieldID) + { return functions->GetStaticLongField(this, clazz, fieldID); } + jfloat GetStaticFloatField(jclass clazz, jfieldID fieldID) + { return functions->GetStaticFloatField(this, clazz, fieldID); } + jdouble GetStaticDoubleField(jclass clazz, jfieldID fieldID) + { return functions->GetStaticDoubleField(this, clazz, fieldID); } + + void SetStaticObjectField(jclass clazz, jfieldID fieldID, jobject value) + { functions->SetStaticObjectField(this, clazz, fieldID, value); } + void SetStaticBooleanField(jclass clazz, jfieldID fieldID, jboolean value) + { functions->SetStaticBooleanField(this, clazz, fieldID, value); } + void SetStaticByteField(jclass clazz, jfieldID fieldID, jbyte value) + { functions->SetStaticByteField(this, clazz, fieldID, value); } + void SetStaticCharField(jclass clazz, jfieldID fieldID, jchar value) + { functions->SetStaticCharField(this, clazz, fieldID, value); } + void SetStaticShortField(jclass clazz, jfieldID fieldID, jshort value) + { functions->SetStaticShortField(this, clazz, fieldID, value); } + void SetStaticIntField(jclass clazz, jfieldID fieldID, jint value) + { functions->SetStaticIntField(this, clazz, fieldID, value); } + void SetStaticLongField(jclass clazz, jfieldID fieldID, jlong value) + { functions->SetStaticLongField(this, clazz, fieldID, value); } + void SetStaticFloatField(jclass clazz, jfieldID fieldID, jfloat value) + { functions->SetStaticFloatField(this, clazz, fieldID, value); } + void SetStaticDoubleField(jclass clazz, jfieldID fieldID, jdouble value) + { functions->SetStaticDoubleField(this, clazz, fieldID, value); } + + jstring NewString(const jchar* unicodeChars, jsize len) + { return functions->NewString(this, unicodeChars, len); } + + jsize GetStringLength(jstring string) + { return functions->GetStringLength(this, string); } + + const jchar* GetStringChars(jstring string, jboolean* isCopy) + { return functions->GetStringChars(this, string, isCopy); } + + void ReleaseStringChars(jstring string, const jchar* chars) + { functions->ReleaseStringChars(this, string, chars); } + + jstring NewStringUTF(const char* bytes) + { return functions->NewStringUTF(this, bytes); } + + jsize GetStringUTFLength(jstring string) + { return functions->GetStringUTFLength(this, string); } + + const char* GetStringUTFChars(jstring string, jboolean* isCopy) + { return functions->GetStringUTFChars(this, string, isCopy); } + + void ReleaseStringUTFChars(jstring string, const char* utf) + { functions->ReleaseStringUTFChars(this, string, utf); } + + jsize GetArrayLength(jarray array) + { return functions->GetArrayLength(this, array); } + + jobjectArray NewObjectArray(jsize length, jclass elementClass, + jobject initialElement) + { return functions->NewObjectArray(this, length, elementClass, + initialElement); } + + jobject GetObjectArrayElement(jobjectArray array, jsize index) + { return functions->GetObjectArrayElement(this, array, index); } + + void SetObjectArrayElement(jobjectArray array, jsize index, jobject value) + { functions->SetObjectArrayElement(this, array, index, value); } + + jbooleanArray NewBooleanArray(jsize length) + { return functions->NewBooleanArray(this, length); } + jbyteArray NewByteArray(jsize length) + { return functions->NewByteArray(this, length); } + jcharArray NewCharArray(jsize length) + { return functions->NewCharArray(this, length); } + jshortArray NewShortArray(jsize length) + { return functions->NewShortArray(this, length); } + jintArray NewIntArray(jsize length) + { return functions->NewIntArray(this, length); } + jlongArray NewLongArray(jsize length) + { return functions->NewLongArray(this, length); } + jfloatArray NewFloatArray(jsize length) + { return functions->NewFloatArray(this, length); } + jdoubleArray NewDoubleArray(jsize length) + { return functions->NewDoubleArray(this, length); } + + jboolean* GetBooleanArrayElements(jbooleanArray array, jboolean* isCopy) + { return functions->GetBooleanArrayElements(this, array, isCopy); } + jbyte* GetByteArrayElements(jbyteArray array, jboolean* isCopy) + { return functions->GetByteArrayElements(this, array, isCopy); } + jchar* GetCharArrayElements(jcharArray array, jboolean* isCopy) + { return functions->GetCharArrayElements(this, array, isCopy); } + jshort* GetShortArrayElements(jshortArray array, jboolean* isCopy) + { return functions->GetShortArrayElements(this, array, isCopy); } + jint* GetIntArrayElements(jintArray array, jboolean* isCopy) + { return functions->GetIntArrayElements(this, array, isCopy); } + jlong* GetLongArrayElements(jlongArray array, jboolean* isCopy) + { return functions->GetLongArrayElements(this, array, isCopy); } + jfloat* GetFloatArrayElements(jfloatArray array, jboolean* isCopy) + { return functions->GetFloatArrayElements(this, array, isCopy); } + jdouble* GetDoubleArrayElements(jdoubleArray array, jboolean* isCopy) + { return functions->GetDoubleArrayElements(this, array, isCopy); } + + void ReleaseBooleanArrayElements(jbooleanArray array, jboolean* elems, + jint mode) + { functions->ReleaseBooleanArrayElements(this, array, elems, mode); } + void ReleaseByteArrayElements(jbyteArray array, jbyte* elems, + jint mode) + { functions->ReleaseByteArrayElements(this, array, elems, mode); } + void ReleaseCharArrayElements(jcharArray array, jchar* elems, + jint mode) + { functions->ReleaseCharArrayElements(this, array, elems, mode); } + void ReleaseShortArrayElements(jshortArray array, jshort* elems, + jint mode) + { functions->ReleaseShortArrayElements(this, array, elems, mode); } + void ReleaseIntArrayElements(jintArray array, jint* elems, + jint mode) + { functions->ReleaseIntArrayElements(this, array, elems, mode); } + void ReleaseLongArrayElements(jlongArray array, jlong* elems, + jint mode) + { functions->ReleaseLongArrayElements(this, array, elems, mode); } + void ReleaseFloatArrayElements(jfloatArray array, jfloat* elems, + jint mode) + { functions->ReleaseFloatArrayElements(this, array, elems, mode); } + void ReleaseDoubleArrayElements(jdoubleArray array, jdouble* elems, + jint mode) + { functions->ReleaseDoubleArrayElements(this, array, elems, mode); } + + void GetBooleanArrayRegion(jbooleanArray array, jsize start, jsize len, + jboolean* buf) + { functions->GetBooleanArrayRegion(this, array, start, len, buf); } + void GetByteArrayRegion(jbyteArray array, jsize start, jsize len, + jbyte* buf) + { functions->GetByteArrayRegion(this, array, start, len, buf); } + void GetCharArrayRegion(jcharArray array, jsize start, jsize len, + jchar* buf) + { functions->GetCharArrayRegion(this, array, start, len, buf); } + void GetShortArrayRegion(jshortArray array, jsize start, jsize len, + jshort* buf) + { functions->GetShortArrayRegion(this, array, start, len, buf); } + void GetIntArrayRegion(jintArray array, jsize start, jsize len, + jint* buf) + { functions->GetIntArrayRegion(this, array, start, len, buf); } + void GetLongArrayRegion(jlongArray array, jsize start, jsize len, + jlong* buf) + { functions->GetLongArrayRegion(this, array, start, len, buf); } + void GetFloatArrayRegion(jfloatArray array, jsize start, jsize len, + jfloat* buf) + { functions->GetFloatArrayRegion(this, array, start, len, buf); } + void GetDoubleArrayRegion(jdoubleArray array, jsize start, jsize len, + jdouble* buf) + { functions->GetDoubleArrayRegion(this, array, start, len, buf); } + + void SetBooleanArrayRegion(jbooleanArray array, jsize start, jsize len, + const jboolean* buf) + { functions->SetBooleanArrayRegion(this, array, start, len, buf); } + void SetByteArrayRegion(jbyteArray array, jsize start, jsize len, + const jbyte* buf) + { functions->SetByteArrayRegion(this, array, start, len, buf); } + void SetCharArrayRegion(jcharArray array, jsize start, jsize len, + const jchar* buf) + { functions->SetCharArrayRegion(this, array, start, len, buf); } + void SetShortArrayRegion(jshortArray array, jsize start, jsize len, + const jshort* buf) + { functions->SetShortArrayRegion(this, array, start, len, buf); } + void SetIntArrayRegion(jintArray array, jsize start, jsize len, + const jint* buf) + { functions->SetIntArrayRegion(this, array, start, len, buf); } + void SetLongArrayRegion(jlongArray array, jsize start, jsize len, + const jlong* buf) + { functions->SetLongArrayRegion(this, array, start, len, buf); } + void SetFloatArrayRegion(jfloatArray array, jsize start, jsize len, + const jfloat* buf) + { functions->SetFloatArrayRegion(this, array, start, len, buf); } + void SetDoubleArrayRegion(jdoubleArray array, jsize start, jsize len, + const jdouble* buf) + { functions->SetDoubleArrayRegion(this, array, start, len, buf); } + + jint RegisterNatives(jclass clazz, const JNINativeMethod* methods, + jint nMethods) + { return functions->RegisterNatives(this, clazz, methods, nMethods); } + + jint UnregisterNatives(jclass clazz) + { return functions->UnregisterNatives(this, clazz); } + + jint MonitorEnter(jobject obj) + { return functions->MonitorEnter(this, obj); } + + jint MonitorExit(jobject obj) + { return functions->MonitorExit(this, obj); } + + jint GetJavaVM(JavaVM** vm) + { return functions->GetJavaVM(this, vm); } + + void GetStringRegion(jstring str, jsize start, jsize len, jchar* buf) + { functions->GetStringRegion(this, str, start, len, buf); } + + void GetStringUTFRegion(jstring str, jsize start, jsize len, char* buf) + { return functions->GetStringUTFRegion(this, str, start, len, buf); } + + void* GetPrimitiveArrayCritical(jarray array, jboolean* isCopy) + { return functions->GetPrimitiveArrayCritical(this, array, isCopy); } + + void ReleasePrimitiveArrayCritical(jarray array, void* carray, jint mode) + { functions->ReleasePrimitiveArrayCritical(this, array, carray, mode); } + + const jchar* GetStringCritical(jstring string, jboolean* isCopy) + { return functions->GetStringCritical(this, string, isCopy); } + + void ReleaseStringCritical(jstring string, const jchar* carray) + { functions->ReleaseStringCritical(this, string, carray); } + + jweak NewWeakGlobalRef(jobject obj) + { return functions->NewWeakGlobalRef(this, obj); } + + void DeleteWeakGlobalRef(jweak obj) + { functions->DeleteWeakGlobalRef(this, obj); } + + jboolean ExceptionCheck() + { return functions->ExceptionCheck(this); } + + jobject NewDirectByteBuffer(void* address, jlong capacity) + { return functions->NewDirectByteBuffer(this, address, capacity); } + + void* GetDirectBufferAddress(jobject buf) + { return functions->GetDirectBufferAddress(this, buf); } + + jlong GetDirectBufferCapacity(jobject buf) + { return functions->GetDirectBufferCapacity(this, buf); } + + /* added in JNI 1.6 */ + jobjectRefType GetObjectRefType(jobject obj) + { return functions->GetObjectRefType(this, obj); } +#endif /*__cplusplus*/ +}; + + +/* + * JNI invocation interface. + */ +struct JNIInvokeInterface { + void* reserved0; + void* reserved1; + void* reserved2; + + jint (*DestroyJavaVM)(JavaVM*); + jint (*AttachCurrentThread)(JavaVM*, JNIEnv**, void*); + jint (*DetachCurrentThread)(JavaVM*); + jint (*GetEnv)(JavaVM*, void**, jint); + jint (*AttachCurrentThreadAsDaemon)(JavaVM*, JNIEnv**, void*); +}; + +/* + * C++ version. + */ +struct _JavaVM { + const struct JNIInvokeInterface* functions; + +#if defined(__cplusplus) + jint DestroyJavaVM() + { return functions->DestroyJavaVM(this); } + jint AttachCurrentThread(JNIEnv** p_env, void* thr_args) + { return functions->AttachCurrentThread(this, p_env, thr_args); } + jint DetachCurrentThread() + { return functions->DetachCurrentThread(this); } + jint GetEnv(void** env, jint version) + { return functions->GetEnv(this, env, version); } + jint AttachCurrentThreadAsDaemon(JNIEnv** p_env, void* thr_args) + { return functions->AttachCurrentThreadAsDaemon(this, p_env, thr_args); } +#endif /*__cplusplus*/ +}; + +struct JavaVMAttachArgs { + jint version; /* must be >= JNI_VERSION_1_2 */ + const char* name; /* NULL or name of thread as modified UTF-8 str */ + jobject group; /* global ref of a ThreadGroup object, or NULL */ +}; +typedef struct JavaVMAttachArgs JavaVMAttachArgs; + +/* + * JNI 1.2+ initialization. (As of 1.6, the pre-1.2 structures are no + * longer supported.) + */ +typedef struct JavaVMOption { + const char* optionString; + void* extraInfo; +} JavaVMOption; + +typedef struct JavaVMInitArgs { + jint version; /* use JNI_VERSION_1_2 or later */ + + jint nOptions; + JavaVMOption* options; + jboolean ignoreUnrecognized; +} JavaVMInitArgs; + +#ifdef __cplusplus +extern "C" { +#endif +/* + * VM initialization functions. + * + * Note these are the only symbols exported for JNI by the VM. + */ +jint JNI_GetDefaultJavaVMInitArgs(void*); +jint JNI_CreateJavaVM(JavaVM**, JNIEnv**, void*); +jint JNI_GetCreatedJavaVMs(JavaVM**, jsize, jsize*); + +#define JNIIMPORT +#define JNIEXPORT __attribute__ ((visibility ("default"))) +#define JNICALL + +/* + * Prototypes for functions exported by loadable shared libs. These are + * called by JNI, not provided by JNI. + */ +JNIEXPORT jint JNI_OnLoad(JavaVM* vm, void* reserved); +JNIEXPORT void JNI_OnUnload(JavaVM* vm, void* reserved); + +#ifdef __cplusplus +} +#endif + + +/* + * Manifest constants. + */ +#define JNI_FALSE 0 +#define JNI_TRUE 1 + +#define JNI_VERSION_1_1 0x00010001 +#define JNI_VERSION_1_2 0x00010002 +#define JNI_VERSION_1_4 0x00010004 +#define JNI_VERSION_1_6 0x00010006 + +#define JNI_OK (0) /* no error */ +#define JNI_ERR (-1) /* generic error */ +#define JNI_EDETACHED (-2) /* thread detached from the VM */ +#define JNI_EVERSION (-3) /* JNI version error */ +#define JNI_ENOMEM (-4) /* Out of memory */ +#define JNI_EEXIST (-5) /* VM already created */ +#define JNI_EINVAL (-6) /* Invalid argument */ + +#define JNI_COMMIT 1 /* copy content, do not free buffer */ +#define JNI_ABORT 2 /* free buffer w/o copying back */ + +#endif /* JNI_H_ */ diff --git a/third_party/libnativehelper/libnativehelper.map.txt b/third_party/libnativehelper/libnativehelper.map.txt new file mode 100644 index 0000000000..62c980d4e5 --- /dev/null +++ b/third_party/libnativehelper/libnativehelper.map.txt @@ -0,0 +1,40 @@ +# This library should only export C linkage definitions. +# +# VERSION string that follows is derived from _. +LIBNATIVEHELPER_1 { + global: + JNI_GetDefaultJavaVMInitArgs; + JNI_CreateJavaVM; + JNI_GetCreatedJavaVMs; + + jniRegisterNativeMethods; + jniThrowException; + jniThrowExceptionFmt; + jniThrowNullPointerException; + jniThrowRuntimeException; + jniThrowIOException; + jniStrError; + jniCreateFileDescriptor; + jniGetFDFromFileDescriptor; + jniSetFileDescriptorOfFD; + jniGetOwnerIdFromFileDescriptor; + jniGetNioBufferBaseArray; + jniGetNioBufferBaseArrayOffset; + jniGetNioBufferPointer; + jniGetNioBufferFields; + jniGetReferent; + jniCreateString; + jniLogException; + jniUninitializeConstants; + + JniInvocationCreate; + JniInvocationDestroy; + JniInvocationInit; + JniInvocationGetLibrary; + + newStringArray; + toStringArray; + + local: + *; +}; diff --git a/third_party/libnativehelper/platform_include/nativehelper/detail/signature_checker.h b/third_party/libnativehelper/platform_include/nativehelper/detail/signature_checker.h new file mode 100644 index 0000000000..7c2a7fc1b9 --- /dev/null +++ b/third_party/libnativehelper/platform_include/nativehelper/detail/signature_checker.h @@ -0,0 +1,1441 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/* + * WARNING: Do not include and use these directly. Use jni_macros.h instead! + * The "detail" namespace should be a strong hint not to depend on the internals, + * which could change at any time. + * + * This implements the underlying mechanism for compile-time JNI signature/ctype checking + * and inference. + * + * This file provides the constexpr basic blocks such as strings, arrays, vectors + * as well as the JNI-specific parsing functionality. + * + * Everything is implemented via generic-style (templates without metaprogramming) + * wherever possible. Traditional template metaprogramming is used sparingly. + * + * Everything in this file except ostream<< is constexpr. + */ + +#pragma once + +#include // std::ostream +#include // jni typedefs, JniNativeMethod. +#include // std::common_type, std::remove_cv + +namespace nativehelper { +namespace detail { + +// If CHECK evaluates to false then X_ASSERT will halt compilation. +// +// Asserts meant to be used only within constexpr context. +#if defined(JNI_SIGNATURE_CHECKER_DISABLE_ASSERTS) +# define X_ASSERT(CHECK) do { if ((false)) { (CHECK) ? void(0) : void(0); } } while (false) +#else +# define X_ASSERT(CHECK) \ + ( (CHECK) ? void(0) : jni_assertion_failure(#CHECK) ) +#endif + +// The runtime 'jni_assert' will never get called from a constexpr context; +// instead compilation will abort with a stack trace. +// +// Inspect the frame above this one to see the exact nature of the failure. +inline void jni_assertion_failure(const char* /*msg*/) __attribute__((noreturn)); +inline void jni_assertion_failure(const char* /*msg*/) { + std::terminate(); +} + +// An immutable constexpr string view, similar to std::string_view but for C++14. +// For a mutable string see instead ConstexprVector. +// +// As it is a read-only view into a string, it is not guaranteed to be zero-terminated. +struct ConstexprStringView { + // Implicit conversion from string literal: + // ConstexprStringView str = "hello_world"; + template + constexpr ConstexprStringView(const char (& lit)[N]) // NOLINT: explicit. + : _array(lit), _size(N - 1) { + // Using an array of characters is not allowed because the inferred size would be wrong. + // Use the other constructor instead for that. + X_ASSERT(lit[N - 1] == '\0'); + } + + constexpr ConstexprStringView(const char* ptr, size_t size) + : _array(ptr), _size(size) { + // See the below constructor instead. + X_ASSERT(ptr != nullptr); + } + + // No-arg constructor: Create empty view. + constexpr ConstexprStringView() : _array(""), _size(0u) {} + + constexpr size_t size() const { + return _size; + } + + constexpr bool empty() const { + return size() == 0u; + } + + constexpr char operator[](size_t i) const { + X_ASSERT(i <= size()); + return _array[i]; + } + + // Create substring from this[start..start+len). + constexpr ConstexprStringView substr(size_t start, size_t len) const { + X_ASSERT(start <= size()); + X_ASSERT(len <= size() - start); + + return ConstexprStringView(&_array[start], len); + } + + // Create maximum length substring that begins at 'start'. + constexpr ConstexprStringView substr(size_t start) const { + X_ASSERT(start <= size()); + return substr(start, size() - start); + } + + using const_iterator = const char*; + + constexpr const_iterator begin() const { + return &_array[0]; + } + + constexpr const_iterator end() const { + return &_array[size()]; + } + + private: + const char* _array; // Never-null for simplicity. + size_t _size; +}; + +constexpr bool +operator==(const ConstexprStringView& lhs, const ConstexprStringView& rhs) { + if (lhs.size() != rhs.size()) { + return false; + } + for (size_t i = 0; i < lhs.size(); ++i) { + if (lhs[i] != rhs[i]) { + return false; + } + } + return true; +} + +constexpr bool +operator!=(const ConstexprStringView& lhs, const ConstexprStringView& rhs) { + return !(lhs == rhs); +} + +inline std::ostream& operator<<(std::ostream& os, const ConstexprStringView& str) { + for (char c : str) { + os << c; + } + return os; +} + +constexpr bool IsValidJniDescriptorStart(char shorty) { + constexpr char kValidJniStarts[] = + {'V', 'Z', 'B', 'C', 'S', 'I', 'J', 'F', 'D', 'L', '[', '(', ')'}; + + for (char c : kValidJniStarts) { + if (c == shorty) { + return true; + } + } + + return false; +} + +// A constexpr "vector" that supports storing a variable amount of Ts +// in an array-like interface. +// +// An up-front kMaxSize must be given since constexpr does not support +// dynamic allocations. +template +struct ConstexprVector { + public: + constexpr explicit ConstexprVector() : _size(0u), _array{} { + } + + private: + // Custom iterator to support ptr-one-past-end into the union array without + // undefined behavior. + template + struct VectorIterator { + Elem* ptr; + + constexpr VectorIterator& operator++() { + ++ptr; + return *this; + } + + constexpr VectorIterator operator++(int) const { + VectorIterator tmp(*this); + ++tmp; + return tmp; + } + + constexpr /*T&*/ auto& operator*() { + // Use 'auto' here since using 'T' is incorrect with const_iterator. + return ptr->_value; + } + + constexpr const /*T&*/ auto& operator*() const { + // Use 'auto' here for consistency with above. + return ptr->_value; + } + + constexpr bool operator==(const VectorIterator& other) const { + return ptr == other.ptr; + } + + constexpr bool operator!=(const VectorIterator& other) const { + return !(*this == other); + } + }; + + // Do not require that T is default-constructible by using a union. + struct MaybeElement { + union { + T _value; + }; + }; + + public: + using iterator = VectorIterator; + using const_iterator = VectorIterator; + + constexpr iterator begin() { + return {&_array[0]}; + } + + constexpr iterator end() { + return {&_array[size()]}; + } + + constexpr const_iterator begin() const { + return {&_array[0]}; + } + + constexpr const_iterator end() const { + return {&_array[size()]}; + } + + constexpr void push_back(const T& value) { + X_ASSERT(_size + 1 <= kMaxSize); + + _array[_size]._value = value; + _size++; + } + + // A pop operation could also be added since constexpr T's + // have default destructors, it would just be _size--. + // We do not need a pop() here though. + + constexpr const T& operator[](size_t i) const { + return _array[i]._value; + } + + constexpr T& operator[](size_t i) { + return _array[i]._value; + } + + constexpr size_t size() const { + return _size; + } + private: + + size_t _size; + MaybeElement _array[kMaxSize]; +}; + +// Parsed and validated "long" form of a single JNI descriptor. +// e.g. one of "J", "Ljava/lang/Object;" etc. +struct JniDescriptorNode { + ConstexprStringView longy; + + constexpr JniDescriptorNode(ConstexprStringView longy) : longy(longy) { // NOLINT(google-explicit-constructor) + X_ASSERT(!longy.empty()); + } + constexpr JniDescriptorNode() : longy() {} + + constexpr char shorty() { + // Must be initialized with the non-default constructor. + X_ASSERT(!longy.empty()); + return longy[0]; + } +}; + +inline std::ostream& operator<<(std::ostream& os, const JniDescriptorNode& node) { + os << node.longy; + return os; +} + +// Equivalent of C++17 std::optional. +// +// An optional is essentially a type safe +// union { +// void Nothing, +// T Some; +// }; +// +template +struct ConstexprOptional { + // Create a default optional with no value. + constexpr ConstexprOptional() : _has_value(false), _nothing() { + } + + // Create an optional with a value. + constexpr ConstexprOptional(const T& value) // NOLINT(google-explicit-constructor) + : _has_value(true), _value(value) { + } + + constexpr explicit operator bool() const { + return _has_value; + } + + constexpr bool has_value() const { + return _has_value; + } + + constexpr const T& value() const { + X_ASSERT(has_value()); + return _value; + } + + constexpr const T* operator->() const { + return &(value()); + } + + constexpr const T& operator*() const { + return value(); + } + + private: + bool _has_value; + // The "Nothing" is likely unnecessary but improves readability. + struct Nothing {}; + union { + Nothing _nothing; + T _value; + }; +}; + +template +constexpr bool +operator==(const ConstexprOptional& lhs, const ConstexprOptional& rhs) { + if (lhs && rhs) { + return lhs.value() == rhs.value(); + } + return lhs.has_value() == rhs.has_value(); +} + +template +constexpr bool +operator!=(const ConstexprOptional& lhs, const ConstexprOptional& rhs) { + return !(lhs == rhs); +} + +template +inline std::ostream& operator<<(std::ostream& os, const ConstexprOptional& val) { + if (val) { + os << val.value(); + } + return os; +} + +// Equivalent of std::nullopt +// Allows implicit conversion to any empty ConstexprOptional. +// Mostly useful for macros that need to return an empty constexpr optional. +struct NullConstexprOptional { + template + constexpr operator ConstexprOptional() const { // NOLINT(google-explicit-constructor) + return ConstexprOptional(); + } +}; + +inline std::ostream& operator<<(std::ostream& os, NullConstexprOptional) { + return os; +} + +#if !defined(PARSE_FAILURES_NONFATAL) +// Unfortunately we cannot have custom messages here, as it just prints a stack trace with the +// macros expanded. This is at least more flexible than static_assert which requires a string +// literal. +// NOTE: The message string literal must be on same line as the macro to be seen during a +// compilation error. +#define PARSE_FAILURE(msg) X_ASSERT(! #msg) +#define PARSE_ASSERT_MSG(cond, msg) X_ASSERT(#msg && (cond)) +#define PARSE_ASSERT(cond) X_ASSERT(cond) +#else +#define PARSE_FAILURE(msg) return NullConstexprOptional{}; +#define PARSE_ASSERT_MSG(cond, msg) if (!(cond)) { PARSE_FAILURE(msg); } +#define PARSE_ASSERT(cond) if (!(cond)) { PARSE_FAILURE(""); } +#endif + +// This is a placeholder function and should not be called directly. +constexpr void ParseFailure(const char* msg) { + (void) msg; // intentionally no-op. +} + +// Temporary parse data when parsing a function descriptor. +struct ParseTypeDescriptorResult { + // A single argument descriptor, e.g. "V" or "Ljava/lang/Object;" + ConstexprStringView token; + // The remainder of the function descriptor yet to be parsed. + ConstexprStringView remainder; + + constexpr bool has_token() const { + return token.size() > 0u; + } + + constexpr bool has_remainder() const { + return remainder.size() > 0u; + } + + constexpr JniDescriptorNode as_node() const { + X_ASSERT(has_token()); + return {token}; + } +}; + +// Parse a single type descriptor out of a function type descriptor substring, +// and return the token and the remainder string. +// +// If parsing fails (i.e. illegal syntax), then: +// parses are fatal -> assertion is triggered (default behavior), +// parses are nonfatal -> returns nullopt (test behavior). +constexpr ConstexprOptional +ParseSingleTypeDescriptor(ConstexprStringView single_type, + bool allow_void = false) { + constexpr NullConstexprOptional kUnreachable = {}; + + // Nothing else left. + if (single_type.size() == 0) { + return ParseTypeDescriptorResult{}; + } + + ConstexprStringView token; + ConstexprStringView remainder = single_type.substr(/*start*/1u); + + char c = single_type[0]; + PARSE_ASSERT(IsValidJniDescriptorStart(c)); + + enum State { + kSingleCharacter, + kArray, + kObject + }; + + State state = kSingleCharacter; + + // Parse the first character to figure out if we should parse the rest. + switch (c) { + case '!': { + constexpr bool fast_jni_is_deprecated = false; + PARSE_ASSERT(fast_jni_is_deprecated); + break; + } + case 'V': + if (!allow_void) { + constexpr bool void_type_descriptor_only_allowed_in_return_type = false; + PARSE_ASSERT(void_type_descriptor_only_allowed_in_return_type); + } + [[clang::fallthrough]]; + case 'Z': + case 'B': + case 'C': + case 'S': + case 'I': + case 'J': + case 'F': + case 'D': + token = single_type.substr(/*start*/0u, /*len*/1u); + break; + case 'L': + state = kObject; + break; + case '[': + state = kArray; + break; + default: { + // See JNI Chapter 3: Type Signatures. + PARSE_FAILURE("Expected a valid type descriptor character."); + return kUnreachable; + } + } + + // Possibly parse an arbitary-long remainder substring. + switch (state) { + case kSingleCharacter: + return {{token, remainder}}; + case kArray: { + // Recursively parse the array component, as it's just any non-void type descriptor. + ConstexprOptional + maybe_res = ParseSingleTypeDescriptor(remainder, /*allow_void*/false); + PARSE_ASSERT(maybe_res); // Downstream parsing has asserted, bail out. + + ParseTypeDescriptorResult res = maybe_res.value(); + + // Reject illegal array type descriptors such as "]". + PARSE_ASSERT_MSG(res.has_token(), "All array types must follow by their component type (e.g. ']I', ']]Z', etc. "); + + token = single_type.substr(/*start*/0u, res.token.size() + 1u); + + return {{token, res.remainder}}; + } + case kObject: { + // Parse the fully qualified class, e.g. Lfoo/bar/baz; + // Note checking that each part of the class name is a valid class identifier + // is too complicated (JLS 3.8). + // This simple check simply scans until the next ';'. + bool found_semicolon = false; + size_t semicolon_len = 0; + for (size_t i = 0; i < single_type.size(); ++i) { + switch (single_type[i]) { + case ')': + case '(': + case '[': + PARSE_FAILURE("Object identifiers cannot have ()[ in them."); + break; + } + if (single_type[i] == ';') { + semicolon_len = i + 1; + found_semicolon = true; + break; + } + } + + PARSE_ASSERT(found_semicolon); + + token = single_type.substr(/*start*/0u, semicolon_len); + remainder = single_type.substr(/*start*/semicolon_len); + + bool class_name_is_empty = token.size() <= 2u; // e.g. "L;" + PARSE_ASSERT(!class_name_is_empty); + + return {{token, remainder}}; + } + default: + X_ASSERT(false); + } + + X_ASSERT(false); + return kUnreachable; +} + +// Abstract data type to represent container for Ret(Args,...). +template +struct FunctionSignatureDescriptor { + ConstexprVector args; + T ret; + + static constexpr size_t max_size = kMaxSize; +}; + + +template +inline std::ostream& operator<<( + std::ostream& os, + const FunctionSignatureDescriptor& signature) { + size_t count = 0; + os << "args={"; + for (auto& arg : signature.args) { + os << arg; + + if (count != signature.args.size() - 1) { + os << ","; + } + + ++count; + } + os << "}, ret="; + os << signature.ret; + return os; +} + +// Ret(Args...) of JniDescriptorNode. +template +using JniSignatureDescriptor = FunctionSignatureDescriptor; + +// Parse a JNI function signature descriptor into a JniSignatureDescriptor. +// +// If parsing fails (i.e. illegal syntax), then: +// parses are fatal -> assertion is triggered (default behavior), +// parses are nonfatal -> returns nullopt (test behavior). +template +constexpr ConstexprOptional> +ParseSignatureAsList(ConstexprStringView signature) { + // The list of JNI descriptors cannot possibly exceed the number of characters + // in the JNI string literal. We leverage this to give an upper bound of the strlen. + // This is a bit wasteful but in constexpr there *must* be a fixed upper size for data structures. + ConstexprVector jni_desc_node_list; + JniDescriptorNode return_jni_desc; + + enum State { + kInitial = 0, + kParsingParameters = 1, + kParsingReturnType = 2, + kCompleted = 3, + }; + + State state = kInitial; + + while (!signature.empty()) { + switch (state) { + case kInitial: { + char c = signature[0]; + PARSE_ASSERT_MSG(c == '(', + "First character of a JNI signature must be a '('"); + state = kParsingParameters; + signature = signature.substr(/*start*/1u); + break; + } + case kParsingParameters: { + char c = signature[0]; + if (c == ')') { + state = kParsingReturnType; + signature = signature.substr(/*start*/1u); + break; + } + + ConstexprOptional + res = ParseSingleTypeDescriptor(signature, /*allow_void*/false); + PARSE_ASSERT(res); + + jni_desc_node_list.push_back(res->as_node()); + + signature = res->remainder; + break; + } + case kParsingReturnType: { + ConstexprOptional + res = ParseSingleTypeDescriptor(signature, /*allow_void*/true); + PARSE_ASSERT(res); + + return_jni_desc = res->as_node(); + signature = res->remainder; + state = kCompleted; + break; + } + default: { + // e.g. "()VI" is illegal because the V terminates the signature. + PARSE_FAILURE("Signature had left over tokens after parsing return type"); + break; + } + } + } + + switch (state) { + case kCompleted: + // Everything is ok. + break; + case kParsingParameters: + PARSE_FAILURE("Signature was missing ')'"); + break; + case kParsingReturnType: + PARSE_FAILURE("Missing return type"); + case kInitial: + PARSE_FAILURE("Cannot have an empty signature"); + default: + X_ASSERT(false); + } + + return {{jni_desc_node_list, return_jni_desc}}; +} + +// What kind of JNI does this type belong to? +enum NativeKind { + kNotJni, // Illegal parameter used inside of a function type. + kNormalJniCallingConventionParameter, + kNormalNative, + kFastNative, // Also valid in normal. + kCriticalNative, // Also valid in fast/normal. +}; + +// Is this type final, i.e. it cannot be subtyped? +enum TypeFinal { + kNotFinal, + kFinal // e.g. any primitive or any "final" class such as String. +}; + +// What position is the JNI type allowed to be in? +// Ignored when in a CriticalNative context. +enum NativePositionAllowed { + kNotAnyPosition, + kReturnPosition, + kZerothPosition, + kFirstOrLaterPosition, + kSecondOrLaterPosition, +}; + +constexpr NativePositionAllowed ConvertPositionToAllowed(size_t position) { + switch (position) { + case 0: + return kZerothPosition; + case 1: + return kFirstOrLaterPosition; + default: + return kSecondOrLaterPosition; + } +} + +// Type traits for a JNI parameter type. See below for specializations. +template +struct jni_type_trait { + static constexpr NativeKind native_kind = kNotJni; + static constexpr const char type_descriptor[] = "(illegal)"; + static constexpr NativePositionAllowed position_allowed = kNotAnyPosition; + static constexpr TypeFinal type_finality = kNotFinal; + static constexpr const char type_name[] = "(illegal)"; +}; + +// Access the jni_type_trait from a non-templated constexpr function. +// Identical non-static fields to jni_type_trait, see Reify(). +struct ReifiedJniTypeTrait { + NativeKind native_kind; + ConstexprStringView type_descriptor; + NativePositionAllowed position_allowed; + TypeFinal type_finality; + ConstexprStringView type_name; + + template + static constexpr ReifiedJniTypeTrait Reify() { + // This should perhaps be called 'Type Erasure' except we don't use virtuals, + // so it's not quite the same idiom. + using TR = jni_type_trait; + return {TR::native_kind, + TR::type_descriptor, + TR::position_allowed, + TR::type_finality, + TR::type_name}; + } + + // Find the most similar ReifiedJniTypeTrait corresponding to the type descriptor. + // + // Any type can be found by using the exact canonical type descriptor as listed + // in the jni type traits definitions. + // + // Non-final JNI types have limited support for inexact similarity: + // [[* | [L* -> jobjectArray + // L* -> jobject + // + // Otherwise return a nullopt. + static constexpr ConstexprOptional + MostSimilarTypeDescriptor(ConstexprStringView type_descriptor); +}; + +constexpr bool +operator==(const ReifiedJniTypeTrait& lhs, const ReifiedJniTypeTrait& rhs) { + return lhs.native_kind == rhs.native_kind + && rhs.type_descriptor == lhs.type_descriptor && + lhs.position_allowed == rhs.position_allowed + && rhs.type_finality == lhs.type_finality && + lhs.type_name == rhs.type_name; +} + +inline std::ostream& operator<<(std::ostream& os, const ReifiedJniTypeTrait& rjtt) { + // os << "ReifiedJniTypeTrait<" << rjft.type_name << ">"; + os << rjtt.type_name; + return os; +} + +// Template specialization for any JNI typedefs. +#define JNI_TYPE_TRAIT(jtype, the_type_descriptor, the_native_kind, the_type_finality, the_position) \ +template <> \ +struct jni_type_trait< jtype > { \ + static constexpr NativeKind native_kind = the_native_kind; \ + static constexpr const char type_descriptor[] = the_type_descriptor; \ + static constexpr NativePositionAllowed position_allowed = the_position; \ + static constexpr TypeFinal type_finality = the_type_finality; \ + static constexpr const char type_name[] = #jtype; \ +}; + +#define DEFINE_JNI_TYPE_TRAIT(TYPE_TRAIT_FN) \ +TYPE_TRAIT_FN(jboolean, "Z", kCriticalNative, kFinal, kSecondOrLaterPosition) \ +TYPE_TRAIT_FN(jbyte, "B", kCriticalNative, kFinal, kSecondOrLaterPosition) \ +TYPE_TRAIT_FN(jchar, "C", kCriticalNative, kFinal, kSecondOrLaterPosition) \ +TYPE_TRAIT_FN(jshort, "S", kCriticalNative, kFinal, kSecondOrLaterPosition) \ +TYPE_TRAIT_FN(jint, "I", kCriticalNative, kFinal, kSecondOrLaterPosition) \ +TYPE_TRAIT_FN(jlong, "J", kCriticalNative, kFinal, kSecondOrLaterPosition) \ +TYPE_TRAIT_FN(jfloat, "F", kCriticalNative, kFinal, kSecondOrLaterPosition) \ +TYPE_TRAIT_FN(jdouble, "D", kCriticalNative, kFinal, kSecondOrLaterPosition) \ +TYPE_TRAIT_FN(jobject, "Ljava/lang/Object;", kFastNative, kNotFinal, kFirstOrLaterPosition) \ +TYPE_TRAIT_FN(jclass, "Ljava/lang/Class;", kFastNative, kFinal, kFirstOrLaterPosition) \ +TYPE_TRAIT_FN(jstring, "Ljava/lang/String;", kFastNative, kFinal, kSecondOrLaterPosition) \ +TYPE_TRAIT_FN(jarray, "Ljava/lang/Object;", kFastNative, kNotFinal, kSecondOrLaterPosition) \ +TYPE_TRAIT_FN(jobjectArray, "[Ljava/lang/Object;", kFastNative, kNotFinal, kSecondOrLaterPosition) \ +TYPE_TRAIT_FN(jbooleanArray, "[Z", kFastNative, kFinal, kSecondOrLaterPosition) \ +TYPE_TRAIT_FN(jbyteArray, "[B", kFastNative, kFinal, kSecondOrLaterPosition) \ +TYPE_TRAIT_FN(jcharArray, "[C", kFastNative, kFinal, kSecondOrLaterPosition) \ +TYPE_TRAIT_FN(jshortArray, "[S", kFastNative, kFinal, kSecondOrLaterPosition) \ +TYPE_TRAIT_FN(jintArray, "[I", kFastNative, kFinal, kSecondOrLaterPosition) \ +TYPE_TRAIT_FN(jlongArray, "[J", kFastNative, kFinal, kSecondOrLaterPosition) \ +TYPE_TRAIT_FN(jfloatArray, "[F", kFastNative, kFinal, kSecondOrLaterPosition) \ +TYPE_TRAIT_FN(jdoubleArray, "[D", kFastNative, kFinal, kSecondOrLaterPosition) \ +TYPE_TRAIT_FN(jthrowable, "Ljava/lang/Throwable;", kFastNative, kNotFinal, kSecondOrLaterPosition) \ +TYPE_TRAIT_FN(JNIEnv*, "", kNormalJniCallingConventionParameter, kFinal, kZerothPosition) \ +TYPE_TRAIT_FN(void, "V", kCriticalNative, kFinal, kReturnPosition) \ + +DEFINE_JNI_TYPE_TRAIT(JNI_TYPE_TRAIT) + +// See ReifiedJniTypeTrait for documentation. +constexpr ConstexprOptional +ReifiedJniTypeTrait::MostSimilarTypeDescriptor(ConstexprStringView type_descriptor) { +#define MATCH_EXACT_TYPE_DESCRIPTOR_FN(type, type_desc, native_kind, ...) \ + if (type_descriptor == type_desc && native_kind >= kNormalNative) { \ + return { Reify() }; \ + } + + // Attempt to look up by the precise type match first. + DEFINE_JNI_TYPE_TRAIT(MATCH_EXACT_TYPE_DESCRIPTOR_FN); + + // Otherwise, we need to do an imprecise match: + char shorty = type_descriptor.size() >= 1 ? type_descriptor[0] : '\0'; + if (shorty == 'L') { + // Something more specific like Ljava/lang/Throwable, string, etc + // is already matched by the macro-expanded conditions above. + return {Reify()}; + } else if (type_descriptor.size() >= 2) { + auto shorty_shorty = type_descriptor.substr(/*start*/0, /*size*/2u); + if (shorty_shorty == "[[" || shorty_shorty == "[L") { + // JNI arrays are covariant, so any type T[] (T!=primitive) is castable to Object[]. + return {Reify()}; + } + } + + // To handle completely invalid values. + return NullConstexprOptional{}; +} + +// Is this actual JNI position consistent with the expected position? +constexpr bool IsValidJniParameterPosition(NativeKind native_kind, + NativePositionAllowed position, + NativePositionAllowed expected_position) { + X_ASSERT(expected_position != kNotAnyPosition); + + if (native_kind == kCriticalNative) { + // CriticalNatives ignore positions since the first 2 special + // parameters are stripped. + return true; + } + + // Is this a return-only position? + if (expected_position == kReturnPosition) { + if (position != kReturnPosition) { + // void can only be in the return position. + return false; + } + // Don't do the other non-return position checks for a return-only position. + return true; + } + + // JNIEnv* can only be in the first spot. + if (position == kZerothPosition && expected_position != kZerothPosition) { + return false; + // jobject, jclass can be 1st or anywhere afterwards. + } else if (position == kFirstOrLaterPosition && expected_position != kFirstOrLaterPosition) { + return false; + // All other parameters must be in 2nd+ spot, or in the return type. + } else if (position == kSecondOrLaterPosition || position == kReturnPosition) { + if (expected_position != kFirstOrLaterPosition && expected_position != kSecondOrLaterPosition) { + return false; + } + } + + return true; +} + +// Check if a jni parameter type is valid given its position and native_kind. +template +constexpr bool IsValidJniParameter(NativeKind native_kind, NativePositionAllowed position) { + // const,volatile does not affect JNI compatibility since it does not change ABI. + using expected_trait = jni_type_trait::type>; + NativeKind expected_native_kind = expected_trait::native_kind; + + // Most types 'T' are not valid for JNI. + if (expected_native_kind == NativeKind::kNotJni) { + return false; + } + + // The rest of the types might be valid, but it depends on the context (native_kind) + // and also on their position within the parameters. + + // Position-check first. + NativePositionAllowed expected_position = expected_trait::position_allowed; + if (!IsValidJniParameterPosition(native_kind, position, expected_position)) { + return false; + } + + // Ensure the type appropriate is for the native kind. + if (expected_native_kind == kNormalJniCallingConventionParameter) { + // It's always wrong to use a JNIEnv* anywhere but the 0th spot. + if (native_kind == kCriticalNative) { + // CriticalNative does not allow using a JNIEnv*. + return false; + } + + return true; // OK: JniEnv* used in 0th position. + } else if (expected_native_kind == kCriticalNative) { + // CriticalNative arguments are always valid JNI types anywhere used. + return true; + } else if (native_kind == kCriticalNative) { + // The expected_native_kind was non-critical but we are in a critical context. + // Illegal type. + return false; + } + + // Everything else is fine, e.g. fast/normal native + fast/normal native parameters. + return true; +} + +// Is there sufficient number of parameters given the kind of JNI that it is? +constexpr bool IsJniParameterCountValid(NativeKind native_kind, size_t count) { + if (native_kind == kNormalNative || native_kind == kFastNative) { + return count >= 2u; + } else if (native_kind == kCriticalNative) { + return true; + } + + constexpr bool invalid_parameter = false; + X_ASSERT(invalid_parameter); + return false; +} + +// Basic template interface. See below for partial specializations. +// +// Each instantiation will have a 'value' field that determines whether or not +// all of the Args are valid JNI arguments given their native_kind. +template +struct is_valid_jni_argument_type { + // static constexpr bool value = ?; +}; + +template +struct is_valid_jni_argument_type { + static constexpr bool value = true; +}; + +template +struct is_valid_jni_argument_type { + static constexpr bool value = + IsValidJniParameter(native_kind, ConvertPositionToAllowed(position)); +}; + +template +struct is_valid_jni_argument_type { + static constexpr bool value = + IsValidJniParameter(native_kind, ConvertPositionToAllowed(position)) + && is_valid_jni_argument_type::value; +}; + +// This helper is required to decompose the function type into a list of arg types. +template +struct is_valid_jni_function_type_helper; + +template +struct is_valid_jni_function_type_helper { + static constexpr bool value = + IsJniParameterCountValid(native_kind, sizeof...(Args)) + && IsValidJniParameter(native_kind, kReturnPosition) + && is_valid_jni_argument_type::value; +}; + +// Is this function type 'T' a valid C++ function type given the native_kind? +template +constexpr bool IsValidJniFunctionType() { + return is_valid_jni_function_type_helper::value; + // TODO: we could replace template metaprogramming with constexpr by + // using FunctionTypeMetafunction. +} + +// Many parts of std::array is not constexpr until C++17. +template +struct ConstexprArray { + // Intentionally public to conform to std::array. + // This means all constructors are implicit. + // *NOT* meant to be used directly, use the below functions instead. + // + // The reason std::array has it is to support direct-list-initialization, + // e.g. "ConstexprArray{T{...}, T{...}, T{...}, ...};" + // + // Note that otherwise this would need a very complicated variadic + // argument constructor to only support list of Ts. + T _array[N]; + + constexpr size_t size() const { + return N; + } + + using iterator = T*; + using const_iterator = const T*; + + constexpr iterator begin() { + return &_array[0]; + } + + constexpr iterator end() { + return &_array[N]; + } + + constexpr const_iterator begin() const { + return &_array[0]; + } + + constexpr const_iterator end() const { + return &_array[N]; + } + + constexpr T& operator[](size_t i) { + return _array[i]; + } + + constexpr const T& operator[](size_t i) const { + return _array[i]; + } +}; + +// Why do we need this? +// auto x = {1,2,3} creates an initializer_list, +// but they can't be returned because it contains pointers to temporaries. +// auto x[] = {1,2,3} doesn't even work because auto for arrays is not supported. +// +// an alternative would be to pull up std::common_t directly into the call site +// std::common_type_t array[] = {1,2,3} +// but that's even more cludgier. +// +// As the other "stdlib-wannabe" functions, it's weaker than the library +// fundamentals std::make_array but good enough for our use. +template +constexpr auto MakeArray(Args&& ... args) { + return ConstexprArray::type, + sizeof...(Args)>{args...}; +} + +// See below. +template +struct FunctionTypeMetafunction { +}; + +// Enables the "map" operation over the function component types. +template +struct FunctionTypeMetafunction { + // Count how many arguments there are, and add 1 for the return type. + static constexpr size_t + count = sizeof...(Args) + 1u; // args and return type. + + // Return an array where the metafunction 'Func' has been applied + // to every argument type. The metafunction must be returning a common type. + template class Func> + static constexpr auto map_args() { + return map_args_impl(holder < Args > {}...); + } + + // Apply the metafunction 'Func' over the return type. + template class Func> + static constexpr auto map_return() { + return Func{}(); + } + + private: + template + struct holder { + }; + + template class Func, typename Arg0, typename... ArgsRest> + static constexpr auto map_args_impl(holder, holder...) { + // One does not simply call MakeArray with 0 template arguments... + auto array = MakeArray( + Func{}()... + ); + + return array; + } + + template class Func> + static constexpr auto map_args_impl() { + // This overload provides support for MakeArray() with 0 arguments. + using ComponentType = decltype(Func{}()); + + return ConstexprArray{}; + } +}; + +// Apply ReifiedJniTypeTrait::Reify for every function component type. +template +struct ReifyJniTypeMetafunction { + constexpr ReifiedJniTypeTrait operator()() const { + auto res = ReifiedJniTypeTrait::Reify(); + X_ASSERT(res.native_kind != kNotJni); + return res; + } +}; + +// Ret(Args...) where every component is a ReifiedJniTypeTrait. +template +using ReifiedJniSignature = FunctionSignatureDescriptor; + +// Attempts to convert the function type T into a list of ReifiedJniTypeTraits +// that correspond to the function components. +// +// If conversion fails (i.e. non-jni compatible types), then: +// parses are fatal -> assertion is triggered (default behavior), +// parses are nonfatal -> returns nullopt (test behavior). +template ::count> +constexpr ConstexprOptional> +MaybeMakeReifiedJniSignature() { + if (!IsValidJniFunctionType()) { + PARSE_FAILURE("The function signature has one or more types incompatible with JNI."); + } + + ReifiedJniTypeTrait return_jni_trait = + FunctionTypeMetafunction::template map_return(); + + constexpr size_t + kSkipArgumentPrefix = (native_kind != kCriticalNative) ? 2u : 0u; + ConstexprVector args; + auto args_list = + FunctionTypeMetafunction::template map_args(); + size_t args_index = 0; + for (auto& arg : args_list) { + // Ignore the 'JNIEnv*, jobject' / 'JNIEnv*, jclass' prefix, + // as its not part of the function descriptor string. + if (args_index >= kSkipArgumentPrefix) { + args.push_back(arg); + } + + ++args_index; + } + + return {{args, return_jni_trait}}; +} + +#define COMPARE_DESCRIPTOR_CHECK(expr) if (!(expr)) return false +#define COMPARE_DESCRIPTOR_FAILURE_MSG(msg) if ((true)) return false + +// Compares a user-defined JNI descriptor (of a single argument or return value) +// to a reified jni type trait that was derived from the C++ function type. +// +// If comparison fails (i.e. non-jni compatible types), then: +// parses are fatal -> assertion is triggered (default behavior), +// parses are nonfatal -> returns false (test behavior). +constexpr bool +CompareJniDescriptorNodeErased(JniDescriptorNode user_defined_descriptor, + ReifiedJniTypeTrait derived) { + + ConstexprOptional user_reified_opt = + ReifiedJniTypeTrait::MostSimilarTypeDescriptor(user_defined_descriptor.longy); + + if (!user_reified_opt.has_value()) { + COMPARE_DESCRIPTOR_FAILURE_MSG( + "Could not find any JNI C++ type corresponding to the type descriptor"); + } + + char user_shorty = user_defined_descriptor.longy.size() > 0 ? + user_defined_descriptor.longy[0] : + '\0'; + + ReifiedJniTypeTrait user = user_reified_opt.value(); + if (user == derived) { + // If we had a similar match, immediately return success. + return true; + } else if (derived.type_name == "jthrowable") { + if (user_shorty == 'L') { + // Weakly allow any objects to correspond to a jthrowable. + // We do not know the managed type system so we have to be permissive here. + return true; + } else { + COMPARE_DESCRIPTOR_FAILURE_MSG( + "jthrowable must correspond to an object type descriptor"); + } + } else if (derived.type_name == "jarray") { + if (user_shorty == '[') { + // a jarray is the base type for all other array types. Allow. + return true; + } else { + // Ljava/lang/Object; is the root for all array types. + // Already handled above in 'if user == derived'. + COMPARE_DESCRIPTOR_FAILURE_MSG( + "jarray must correspond to array type descriptor"); + } + } + // Otherwise, the comparison has failed and the rest of this is only to + // pick the most appropriate error message. + // + // Note: A weaker form of comparison would allow matching 'Ljava/lang/String;' + // against 'jobject', etc. However the policy choice here is to enforce the strictest + // comparison that we can to utilize the type system to its fullest. + + if (derived.type_finality == kFinal || user.type_finality == kFinal) { + // Final types, e.g. "I", "Ljava/lang/String;" etc must match exactly + // the C++ jni descriptor string ('I' -> jint, 'Ljava/lang/String;' -> jstring). + COMPARE_DESCRIPTOR_FAILURE_MSG( + "The JNI descriptor string must be the exact type equivalent of the " + "C++ function signature."); + } else if (user_shorty == '[') { + COMPARE_DESCRIPTOR_FAILURE_MSG( + "The array JNI descriptor must correspond to j${type}Array or jarray"); + } else if (user_shorty == 'L') { + COMPARE_DESCRIPTOR_FAILURE_MSG( + "The object JNI descriptor must correspond to jobject."); + } else { + X_ASSERT(false); // We should never get here, but either way this means the types did not match + COMPARE_DESCRIPTOR_FAILURE_MSG( + "The JNI type descriptor string does not correspond to the C++ JNI type."); + } +} + +// Matches a user-defined JNI function descriptor against the C++ function type. +// +// If matches fails, then: +// parses are fatal -> assertion is triggered (default behavior), +// parses are nonfatal -> returns false (test behavior). +template +constexpr bool +MatchJniDescriptorWithFunctionType(ConstexprStringView user_function_descriptor) { + constexpr size_t kReifiedMaxSize = FunctionTypeMetafunction::count; + + ConstexprOptional> + reified_signature_opt = + MaybeMakeReifiedJniSignature(); + if (!reified_signature_opt) { + // Assertion handling done by MaybeMakeReifiedJniSignature. + return false; + } + + ConstexprOptional> user_jni_sig_desc_opt = + ParseSignatureAsList(user_function_descriptor); + + if (!user_jni_sig_desc_opt) { + // Assertion handling done by ParseSignatureAsList. + return false; + } + + ReifiedJniSignature + reified_signature = reified_signature_opt.value(); + JniSignatureDescriptor + user_jni_sig_desc = user_jni_sig_desc_opt.value(); + + if (reified_signature.args.size() != user_jni_sig_desc.args.size()) { + COMPARE_DESCRIPTOR_FAILURE_MSG( + "Number of parameters in JNI descriptor string" + "did not match number of parameters in C++ function type"); + } else if (!CompareJniDescriptorNodeErased(user_jni_sig_desc.ret, + reified_signature.ret)) { + // Assertion handling done by CompareJniDescriptorNodeErased. + return false; + } else { + for (size_t i = 0; i < user_jni_sig_desc.args.size(); ++i) { + if (!CompareJniDescriptorNodeErased(user_jni_sig_desc.args[i], + reified_signature.args[i])) { + // Assertion handling done by CompareJniDescriptorNodeErased. + return false; + } + } + } + + return true; +} + +// Supports inferring the JNI function descriptor string from the C++ +// function type when all type components are final. +template +struct InferJniDescriptor { + static constexpr size_t kMaxSize = FunctionTypeMetafunction::count; + + // Convert the C++ function type into a JniSignatureDescriptor which holds + // the canonical (according to jni_traits) descriptors for each component. + // The C++ type -> JNI mapping must be nonambiguous (see jni_macros.h for exact rules). + // + // If conversion fails (i.e. C++ signatures is illegal for JNI, or the types are ambiguous): + // if parsing is fatal -> assertion failure (default behavior) + // if parsing is nonfatal -> returns nullopt (test behavior). + static constexpr ConstexprOptional> FromFunctionType() { + constexpr size_t kReifiedMaxSize = kMaxSize; + ConstexprOptional> + reified_signature_opt = + MaybeMakeReifiedJniSignature(); + if (!reified_signature_opt) { + // Assertion handling done by MaybeMakeReifiedJniSignature. + return NullConstexprOptional{}; + } + + ReifiedJniSignature + reified_signature = reified_signature_opt.value(); + + JniSignatureDescriptor signature_descriptor; + + if (reified_signature.ret.type_finality != kFinal) { + // e.g. jint, jfloatArray, jstring, jclass are ok. jobject, jthrowable, jarray are not. + PARSE_FAILURE("Bad return type. Only unambigous (final) types can be used to infer a signature."); // NOLINT + } + signature_descriptor.ret = + JniDescriptorNode{reified_signature.ret.type_descriptor}; + + for (size_t i = 0; i < reified_signature.args.size(); ++i) { + const ReifiedJniTypeTrait& arg_trait = reified_signature.args[i]; + if (arg_trait.type_finality != kFinal) { + PARSE_FAILURE("Bad parameter type. Only unambigous (final) types can be used to infer a signature."); // NOLINT + } + signature_descriptor.args.push_back(JniDescriptorNode{ + arg_trait.type_descriptor}); + } + + return {signature_descriptor}; + } + + // Calculate the exact string size that the JNI descriptor will be + // at runtime. + // + // Without this we cannot allocate enough space within static storage + // to fit the compile-time evaluated string. + static constexpr size_t CalculateStringSize() { + ConstexprOptional> + signature_descriptor_opt = + FromFunctionType(); + if (!signature_descriptor_opt) { + // Assertion handling done by FromFunctionType. + return 0u; + } + + JniSignatureDescriptor signature_descriptor = + signature_descriptor_opt.value(); + + size_t acc_size = 1u; // All sigs start with '('. + + // Now add every parameter. + for (size_t j = 0; j < signature_descriptor.args.size(); ++j) { + const JniDescriptorNode& arg_descriptor = signature_descriptor.args[j]; + // for (const JniDescriptorNode& arg_descriptor : signature_descriptor.args) { + acc_size += arg_descriptor.longy.size(); + } + + acc_size += 1u; // Add space for ')'. + + // Add space for the return value. + acc_size += signature_descriptor.ret.longy.size(); + + return acc_size; + } + + static constexpr size_t kMaxStringSize = CalculateStringSize(); + using ConstexprStringDescriptorType = ConstexprArray; + + // Convert the JniSignatureDescriptor we get in FromFunctionType() + // into a flat constexpr char array. + // + // This is done by repeated string concatenation at compile-time. + static constexpr ConstexprStringDescriptorType GetString() { + ConstexprStringDescriptorType c_str{}; + + ConstexprOptional> + signature_descriptor_opt = + FromFunctionType(); + if (!signature_descriptor_opt.has_value()) { + // Assertion handling done by FromFunctionType. + c_str[0] = '\0'; + return c_str; + } + + JniSignatureDescriptor signature_descriptor = + signature_descriptor_opt.value(); + + size_t pos = 0u; + c_str[pos++] = '('; + + // Copy all parameter descriptors. + for (size_t j = 0; j < signature_descriptor.args.size(); ++j) { + const JniDescriptorNode& arg_descriptor = signature_descriptor.args[j]; + ConstexprStringView longy = arg_descriptor.longy; + for (size_t i = 0; i < longy.size(); ++i) { + c_str[pos++] = longy[i]; + } + } + + c_str[pos++] = ')'; + + // Copy return descriptor. + ConstexprStringView longy = signature_descriptor.ret.longy; + for (size_t i = 0; i < longy.size(); ++i) { + c_str[pos++] = longy[i]; + } + + X_ASSERT(pos == kMaxStringSize); + + c_str[pos] = '\0'; + + return c_str; + } + + // Turn a pure constexpr string into one that can be accessed at non-constexpr + // time. Note that the 'static constexpr' storage must be in the scope of a + // function (prior to C++17) to avoid linking errors. + static const char* GetStringAtRuntime() { + static constexpr ConstexprStringDescriptorType str = GetString(); + return &str[0]; + } +}; + +// Expression to return JNINativeMethod, performs checking on signature+fn. +#define MAKE_CHECKED_JNI_NATIVE_METHOD(native_kind, name_, signature_, fn) \ + ([]() { \ + using namespace nativehelper::detail; /* NOLINT(google-build-using-namespace) */ \ + static_assert( \ + MatchJniDescriptorWithFunctionType(signature_),\ + "JNI signature doesn't match C++ function type."); \ + /* Suppress implicit cast warnings by explicitly casting. */ \ + return JNINativeMethod { \ + const_cast(name_), \ + const_cast(signature_), \ + reinterpret_cast(&(fn))}; \ + })() + +// Expression to return JNINativeMethod, infers signature from fn. +#define MAKE_INFERRED_JNI_NATIVE_METHOD(native_kind, name_, fn) \ + ([]() { \ + using namespace nativehelper::detail; /* NOLINT(google-build-using-namespace) */ \ + /* Suppress implicit cast warnings by explicitly casting. */ \ + return JNINativeMethod { \ + const_cast(name_), \ + const_cast( \ + InferJniDescriptor::GetStringAtRuntime()), \ + reinterpret_cast(&(fn))}; \ + })() + +} // namespace detail +} // namespace nativehelper + diff --git a/third_party/libnativehelper/platform_include/nativehelper/jni_macros.h b/third_party/libnativehelper/platform_include/nativehelper/jni_macros.h new file mode 100644 index 0000000000..da01e6fae6 --- /dev/null +++ b/third_party/libnativehelper/platform_include/nativehelper/jni_macros.h @@ -0,0 +1,285 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Compile-time, zero-cost checking of JNI signatures against their C++ function type. + * This can trigger compile-time assertions if any of the input is invalid: + * (a) The signature specified does not conform to the JNI function descriptor syntax. + * (b) The C++ function is itself an invalid JNI function (e.g. missing JNIEnv*, etc). + * (c) The descriptor does not match the C++ function (e.g. "()V" will not match jint(jint)). + * + * The fundamental macros are as following: + * MAKE_JNI_[FAST_|CRITICAL_]NATIVE_METHOD - Create a checked JNINativeMethod{name, sig, func}. + * MAKE_JNI_[FAST_|CRITICAL_]NATIVE_METHOD_AUTOSIG - Same as above, but infer the JNI signature. + * + * Usage examples: + * // path/to/package/KlassName.java + * class KlassName { + * native jobject normal(int x); + * @FastNative native jobject fast(int x); + * @CriticalNative native int critical(long ptr); + * } + * // path_to_package_KlassName.cpp + * jobject KlassName_normal(JNIEnv*,jobject,jint) {...} + * jobject KlassName_fast(JNIEnv*,jobject,jint) {...} + * jint KlassName_critical(jlong) {...} + * + * // Manually specify each signature: + * JNINativeMethod[] gMethods = { + * MAKE_JNI_NATIVE_METHOD("normal", "(I)Ljava/lang/Object;", KlassName_normal), + * MAKE_JNI_FAST_NATIVE_METHOD("fast", "(I)Ljava/lang/Object;", KlassName_fast), + * MAKE_JNI_CRITICAL_NATIVE_METHOD("critical", "(Z)I", KlassName_critical), + * }; + * + * // Automatically infer the signature: + * JNINativeMethod[] gMethodsAutomaticSignature = { + * MAKE_JNI_NATIVE_METHOD_AUTOSIG("normal", KlassName_normal), + * MAKE_JNI_FAST_NATIVE_METHOD_AUTOSIG("fast", KlassName_fast), + * MAKE_JNI_CRITICAL_NATIVE_METHOD_AUTOSIG("critical", KlassName_critical), + * }; + * + * // and then call JNIEnv::RegisterNatives with gMethods as usual. + * + * For convenience the following macros are defined: + * [FAST_|CRITICAL_]NATIVE_METHOD - Return JNINativeMethod for class, func name, and signature. + * OVERLOADED_[FAST_|CRITICAL_]NATIVE_METHOD - Same as above but allows a separate func identifier. + * [FAST_|CRITICAL_]NATIVE_METHOD_AUTOSIG - Return JNINativeMethod, sig inferred from function. + * + * The FAST_ prefix corresponds to functions annotated with @FastNative, + * and the CRITICAL_ prefix corresponds to functions annotated with @CriticalNative. + * See dalvik.annotation.optimization.CriticalNative for more details. + * + * ======================================= + * Checking rules + * ======================================= + * + * --------------------------------------- + * JNI descriptor syntax for functions + * + * Refer to "Chapter 3: JNI Types and Data Structures" of the JNI specification + * under the subsection "Type Signatures" table entry "method type". + * + * JNI signatures not conforming to the above syntax are rejected. + * --------------------------------------- + * C++ function types + * + * A normal or @FastNative JNI function type must be of the form + * + * ReturnType (JNIEnv*, jclass|jobject, [ArgTypes...]) {} + * + * A @CriticalNative JNI function type: + * + * must be of the form... ReturnType ([ArgTypes...]){} + * and must not contain any Reference Types. + * + * Refer to "Chapter 3: JNI Types and Data Structures" of the JNI specification + * under the subsection "Primitive Types" and "Reference Types" for the list + * of valid argument/return types. + * + * C++ function types not conforming to the above requirements are rejected. + * --------------------------------------- + * Matching of C++ function type against JNI function descriptor. + * + * Assuming all of the above conditions are met for signature and C++ type validity, + * then matching between the signature and the type validity can occur: + * + * Given a signature (Args...)Ret and the + * C++ function type of the form "CRet fn(JNIEnv*, jclass|jobject, CArgs...)", + * or for @CriticalNative of the form "CRet fn(CArgs...)" + * + * The number of Args... and the number of CArgs... must be equal. + * + * If so, attempt to match every component from the signature and function type + * against each other: + * + * ReturnType: + * V <-> void + * ArgumentType + * + * ArgumentType: + * PrimitiveType + * ReferenceType [except for @CriticalNative] + * + * PrimitiveType: + * Z <-> jboolean + * B <-> jbyte + * C <-> jchar + * S <-> jshort + * I <-> jint + * J <-> jlong + * F <-> jfloat + * D <-> jdouble + * + * ReferenceType: + * Ljava/lang/String; <-> jstring + * Ljava/lang/Class; <-> jclass + * L*; <- jobject + * Ljava/lang/Throwable; -> jthrowable + * L*; <- jthrowable + * [ PrimitiveType <-> ${CPrimitiveType}Array + * [ ReferenceType <-> jobjectArray + * [* <- jarray + * + * Wherein <-> represents a strong match (if the left or right pattern occurs, + * then left must match right, otherwise matching fails). <- and -> represent + * weak matches (that is, other match rules can be still attempted). + * + * Sidenote: Whilst a jobject could also represent a jclass, jstring, etc, + * the stricter approach is taken: the most exact C++ type must be used. + */ + +#ifndef NATIVEHELPER_JNI_MACROS_H +#define NATIVEHELPER_JNI_MACROS_H + +// The below basic macros do not perform automatic stringification, +// invoked e.g. as MAKE_JNI_NATIVE_METHOD("some_name", "()V", void_fn) + +// An expression that evaluates to JNINativeMethod { name, signature, function }, +// and applies the above compile-time checking for signature+function. +// The equivalent Java Language code must not be annotated with @FastNative/@CriticalNative. +#define MAKE_JNI_NATIVE_METHOD(name, signature, function) \ + _NATIVEHELPER_JNI_MAKE_METHOD(kNormalNative, name, signature, function) + +// An expression that evaluates to JNINativeMethod { name, signature, function }, +// and applies the above compile-time checking for signature+function. +// The equivalent Java Language code must be annotated with @FastNative. +#define MAKE_JNI_FAST_NATIVE_METHOD(name, signature, function) \ + _NATIVEHELPER_JNI_MAKE_METHOD(kFastNative, name, signature, function) + +// An expression that evaluates to JNINativeMethod { name, signature, function }, +// and applies the above compile-time checking for signature+function. +// The equivalent Java Language code must be annotated with @CriticalNative. +#define MAKE_JNI_CRITICAL_NATIVE_METHOD(name, signature, function) \ + _NATIVEHELPER_JNI_MAKE_METHOD(kCriticalNative, name, signature, function) + +// Automatically signature-inferencing macros are also available, +// which also checks the C++ function types for validity: + +// An expression that evalutes to JNINativeMethod { name, infersig(function), function) } +// by inferring the signature at compile-time. Only works when the C++ function type +// corresponds to one unambigous JNI parameter (e.g. 'jintArray' -> '[I' but 'jobject' -> ???). +// +// The equivalent Java Language code must not be annotated with @FastNative/@CriticalNative. +#define MAKE_JNI_NATIVE_METHOD_AUTOSIG(name, function) \ + _NATIVEHELPER_JNI_MAKE_METHOD_AUTOSIG(kNormalNative, name, function) + +// An expression that evalutes to JNINativeMethod { name, infersig(function), function) } +// by inferring the signature at compile-time. Only works when the C++ function type +// corresponds to one unambigous JNI parameter (e.g. 'jintArray' -> '[I' but 'jobject' -> ???). +// +// The equivalent Java Language code must be annotated with @FastNative. +#define MAKE_JNI_FAST_NATIVE_METHOD_AUTOSIG(name, function) \ + _NATIVEHELPER_JNI_MAKE_METHOD_AUTOSIG(kFastNative, name, function) + +// An expression that evalutes to JNINativeMethod { name, infersig(function), function) } +// by inferring the signature at compile-time. +// +// The equivalent Java Language code must be annotated with @CriticalNative. +#define MAKE_JNI_CRITICAL_NATIVE_METHOD_AUTOSIG(name, function) \ + _NATIVEHELPER_JNI_MAKE_METHOD_AUTOSIG(kCriticalNative, name, function) + +// Convenience macros when the functions follow the naming convention: +// .java file .cpp file +// JavaLanguageName <-> ${ClassName}_${JavaLanguageName} +// +// Stringification is done automatically, invoked as: +// NATIVE_[FAST_|CRITICAL]_METHOD(ClassName, JavaLanguageName, Signature) +// +// Intended to construct a JNINativeMethod. +// (Assumes the C name is the ClassName_JavaMethodName). +// +// The Java Language code must be annotated with one of (none,@FastNative,@CriticalNative) +// for the (none,FAST_,CRITICAL_) variants of these macros. + +#define NATIVE_METHOD(className, functionName, signature) \ + MAKE_JNI_NATIVE_METHOD(#functionName, signature, className ## _ ## functionName) + +#define OVERLOADED_NATIVE_METHOD(className, functionName, signature, identifier) \ + MAKE_JNI_NATIVE_METHOD(#functionName, signature, className ## _ ## identifier) + +#define NATIVE_METHOD_AUTOSIG(className, functionName) \ + MAKE_JNI_NATIVE_METHOD_AUTOSIG(#functionName, className ## _ ## functionName) + +#define FAST_NATIVE_METHOD(className, functionName, signature) \ + MAKE_JNI_FAST_NATIVE_METHOD(#functionName, signature, className ## _ ## functionName) + +#define OVERLOADED_FAST_NATIVE_METHOD(className, functionName, signature, identifier) \ + MAKE_JNI_FAST_NATIVE_METHOD(#functionName, signature, className ## _ ## identifier) + +#define FAST_NATIVE_METHOD_AUTOSIG(className, functionName) \ + MAKE_JNI_FAST_NATIVE_METHOD_AUTOSIG(#functionName, className ## _ ## functionName) + +#define CRITICAL_NATIVE_METHOD(className, functionName, signature) \ + MAKE_JNI_CRITICAL_NATIVE_METHOD(#functionName, signature, className ## _ ## functionName) + +#define OVERLOADED_CRITICAL_NATIVE_METHOD(className, functionName, signature, identifier) \ + MAKE_JNI_CRITICAL_NATIVE_METHOD(#functionName, signature, className ## _ ## identifier) + +#define CRITICAL_NATIVE_METHOD_AUTOSIG(className, functionName) \ + MAKE_JNI_CRITICAL_NATIVE_METHOD_AUTOSIG(#functionName, className ## _ ## functionName) + +//////////////////////////////////////////////////////// +// IMPLEMENTATION ONLY. +// DO NOT USE DIRECTLY. +//////////////////////////////////////////////////////// + +#if defined(__cplusplus) && __cplusplus >= 201402L +#include "nativehelper/detail/signature_checker.h" // for MAKE_CHECKED_JNI_NATIVE_METHOD +#endif + +// Expands to an expression whose type is JNINativeMethod. +// This is for older versions of C++ or C, so it has no compile-time checking. +#define _NATIVEHELPER_JNI_MAKE_METHOD_OLD(kind, name, sig, fn) \ + ( \ + (JNINativeMethod) { \ + (name), \ + (sig), \ + _NATIVEHELPER_JNI_MACRO_CAST(reinterpret_cast, void *)(fn) \ + } \ + ) + +// C++14 or better, use compile-time checking. +#if defined(__cplusplus) && __cplusplus >= 201402L +// Expands to a compound expression whose type is JNINativeMethod. +#define _NATIVEHELPER_JNI_MAKE_METHOD(kind, name, sig, fn) \ + MAKE_CHECKED_JNI_NATIVE_METHOD(kind, name, sig, fn) + +// Expands to a compound expression whose type is JNINativeMethod. +#define _NATIVEHELPER_JNI_MAKE_METHOD_AUTOSIG(kind, name, function) \ + MAKE_INFERRED_JNI_NATIVE_METHOD(kind, name, function) + +#else +// Older versions of C++ or C code get the regular macro that's unchecked. +// Expands to a compound expression whose type is JNINativeMethod. +#define _NATIVEHELPER_JNI_MAKE_METHOD(kind, name, sig, fn) \ + _NATIVEHELPER_JNI_MAKE_METHOD_OLD(kind, name, sig, fn) + +// Need C++14 or newer to use the AUTOSIG macros. +#define _NATIVEHELPER_JNI_MAKE_METHOD_AUTOSIG(kind, name, function) \ + static_assert(false, "Cannot infer JNI signatures prior to C++14 for function " #function); + +#endif // C++14 check + +// C-style cast for C, C++-style cast for C++ to avoid warnings/errors. +#if defined(__cplusplus) +#define _NATIVEHELPER_JNI_MACRO_CAST(which_cast, to) \ + which_cast +#else +#define _NATIVEHELPER_JNI_MACRO_CAST(which_cast, to) \ + (to) +#endif + +#endif // NATIVEHELPER_JNI_MACROS_H diff --git a/third_party/libnativehelper/tests/Android.bp b/third_party/libnativehelper/tests/Android.bp new file mode 100644 index 0000000000..8bbeed7d19 --- /dev/null +++ b/third_party/libnativehelper/tests/Android.bp @@ -0,0 +1,74 @@ +// Build the unit tests. + +cc_test { + name: "JniInvocation_test", + test_suites: ["device-tests"], + host_supported: true, + srcs: ["JniInvocation_test.cpp"], + cflags: ["-Wall", "-Werror"], + // Link to the non-stub version of the library to access some internal + // functions. + bootstrap: true, + shared_libs: ["libnativehelper"], +} + +cc_test { + name: "JniSafeRegisterNativeMethods_test", + host_supported: true, + srcs: ["JniSafeRegisterNativeMethods_test.cpp"], + + cflags: [ + // Base set of cflags used by all things ART. + "-fno-rtti", + "-ggdb3", + "-Wall", + "-Werror", + "-Wextra", + "-Wstrict-aliasing", + "-fstrict-aliasing", + "-Wunreachable-code", + "-Wredundant-decls", + "-Wshadow", + "-Wunused", + "-fvisibility=protected", + + // Warn about thread safety violations with clang. + "-Wthread-safety", + "-Wthread-safety-negative", + + // Warn if switch fallthroughs aren't annotated. + "-Wimplicit-fallthrough", + + // Enable float equality warnings. + "-Wfloat-equal", + + // Enable warning of converting ints to void*. + "-Wint-to-void-pointer-cast", + + // Enable warning of wrong unused annotations. + "-Wused-but-marked-unused", + + // Enable warning for deprecated language features. + "-Wdeprecated", + + // Enable warning for unreachable break & return. + "-Wunreachable-code-break", + "-Wunreachable-code-return", + + // Enable thread annotations for std::mutex, etc. + "-D_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS", + ], + + tidy: true, + + shared_libs: ["libnativehelper"], +} + +cc_test { + name: "libnativehelper_api_test", + host_supported: true, + cflags: ["-Wall", "-Werror"], + srcs: ["libnativehelper_api_test.c"], // C Compilation test. + tidy: true, + shared_libs: ["libnativehelper"], +} diff --git a/third_party/libnativehelper/tests/AndroidTest.xml b/third_party/libnativehelper/tests/AndroidTest.xml new file mode 100644 index 0000000000..0f377f249f --- /dev/null +++ b/third_party/libnativehelper/tests/AndroidTest.xml @@ -0,0 +1,26 @@ + + + + + + \ No newline at end of file diff --git a/third_party/libnativehelper/tests/JniInvocation_test.cpp b/third_party/libnativehelper/tests/JniInvocation_test.cpp new file mode 100644 index 0000000000..bb62e3933a --- /dev/null +++ b/third_party/libnativehelper/tests/JniInvocation_test.cpp @@ -0,0 +1,95 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "NativeBridge_test" + +#include +#include + + +#include "string.h" + +#if defined(__ANDROID__) && defined(__BIONIC__) +#define HAVE_TEST_STUFF 1 +#else +#undef HAVE_TEST_STUFF +#endif + +#ifdef HAVE_TEST_STUFF + +// PROPERTY_VALUE_MAX. +#include "cutils/properties.h" + +#endif + +#ifdef HAVE_TEST_STUFF +static const char* kTestNonNull = "libartd.so"; +static const char* kTestNonNull2 = "libartd2.so"; +static const char* kExpected = "libart.so"; +#endif + +TEST(JNIInvocation, Debuggable) { +#ifdef HAVE_TEST_STUFF + auto is_debuggable = []() { return true; }; + auto get_library_system_property = [](char* buffer) -> int { + strcpy(buffer, kTestNonNull2); + return sizeof(kTestNonNull2); + }; + + char buffer[PROPERTY_VALUE_MAX]; + const char* result = + JniInvocation::GetLibrary(NULL, buffer, is_debuggable, get_library_system_property); + EXPECT_FALSE(result == NULL); + if (result != NULL) { + EXPECT_TRUE(strcmp(result, kTestNonNull2) == 0); + EXPECT_FALSE(strcmp(result, kExpected) == 0); + } + + result = + JniInvocation::GetLibrary(kTestNonNull, buffer, is_debuggable, get_library_system_property); + EXPECT_FALSE(result == NULL); + if (result != NULL) { + EXPECT_TRUE(strcmp(result, kTestNonNull) == 0); + EXPECT_FALSE(strcmp(result, kTestNonNull2) == 0); + } +#else + GTEST_LOG_(WARNING) << "Host testing unsupported. Please run target tests."; +#endif +} + +TEST(JNIInvocation, NonDebuggable) { +#ifdef HAVE_TEST_STUFF + auto is_debuggable = []() { return false; }; + + char buffer[PROPERTY_VALUE_MAX]; + const char* result = JniInvocation::GetLibrary(NULL, buffer, is_debuggable, nullptr); + EXPECT_FALSE(result == NULL); + if (result != NULL) { + EXPECT_TRUE(strcmp(result, kExpected) == 0); + EXPECT_FALSE(strcmp(result, kTestNonNull) == 0); + EXPECT_FALSE(strcmp(result, kTestNonNull2) == 0); + } + + result = JniInvocation::GetLibrary(kTestNonNull, buffer, is_debuggable, nullptr); + EXPECT_FALSE(result == NULL); + if (result != NULL) { + EXPECT_TRUE(strcmp(result, kExpected) == 0); + EXPECT_FALSE(strcmp(result, kTestNonNull) == 0); + } +#else + GTEST_LOG_(WARNING) << "Host testing unsupported. Please run target tests."; +#endif +} diff --git a/third_party/libnativehelper/tests/JniSafeRegisterNativeMethods_test.cpp b/third_party/libnativehelper/tests/JniSafeRegisterNativeMethods_test.cpp new file mode 100644 index 0000000000..716b5f789c --- /dev/null +++ b/third_party/libnativehelper/tests/JniSafeRegisterNativeMethods_test.cpp @@ -0,0 +1,1282 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wused-but-marked-unused" +#pragma clang diagnostic ignored "-Wdeprecated-dynamic-exception-spec" +#pragma clang diagnostic ignored "-Wdeprecated" +#include +#pragma clang diagnostic pop +#include + +#define PARSE_FAILURES_NONFATAL // return empty optionals wherever possible instead of asserting. +#include "nativehelper/jni_macros.h" + +// Provide static storage to these values so they can be used in a runtime context. +// This has to be defined local to the test translation unit to avoid ODR violations prior to C++17. +#define STORAGE_FN_FOR_JNI_TRAITS(jtype, ...) \ +constexpr char nativehelper::detail::jni_type_trait::type_descriptor[]; \ +constexpr char nativehelper::detail::jni_type_trait::type_name[]; + +DEFINE_JNI_TYPE_TRAIT(STORAGE_FN_FOR_JNI_TRAITS) + +template +std::string stringify_helper(const T& val) { + std::stringstream ss; + ss << val; + return ss.str(); +} + +#define EXPECT_STRINGIFY_EQ(x, y) EXPECT_EQ(stringify_helper(x), stringify_helper(y)) + +TEST(JniSafeRegisterNativeMethods, StringParsing) { + using namespace nativehelper::detail; // NOLINT + + // Super basic bring-up tests for core functionality. + + { + constexpr ConstexprStringView v_str = "V"; + EXPECT_EQ(1u, v_str.size()); + EXPECT_EQ(false, v_str.empty()); + + std::stringstream ss; + ss << v_str; + EXPECT_EQ("V", ss.str()); + } + + { + auto parse = ParseSingleTypeDescriptor("", /*allow_void*/true); + EXPECT_EQ("", parse->token); + EXPECT_EQ("", parse->remainder); + } + + { + auto parse = ParseSingleTypeDescriptor("V", /*allow_void*/true); + EXPECT_EQ("V", parse->token); + EXPECT_EQ("", parse->remainder); + } + + { + auto parse = ParseSingleTypeDescriptor("[I"); + EXPECT_EQ("[I", parse->token); + EXPECT_EQ("", parse->remainder); + } + + { + auto parse = ParseSingleTypeDescriptor("LObject;"); + EXPECT_EQ("LObject;", parse->token); + EXPECT_EQ("", parse->remainder); + } + + { + auto parse = ParseSingleTypeDescriptor("LBadObject);"); + EXPECT_FALSE(parse.has_value()); + } + + { + auto parse = ParseSingleTypeDescriptor("LBadObject(;"); + EXPECT_FALSE(parse.has_value()); + } + + { + auto parse = ParseSingleTypeDescriptor("LBadObject[;"); + EXPECT_FALSE(parse.has_value()); + } + + // Stringify is used for convenience to make writing out tests easier. + // Transforms as "(XYZ)W" -> "args={X,Y,Z}, ret=W" + +#define PARSE_SIGNATURE_AS_LIST(str) (ParseSignatureAsList(str)) + + { + constexpr auto jni_descriptor = PARSE_SIGNATURE_AS_LIST("()V"); + EXPECT_STRINGIFY_EQ("args={}, ret=V", jni_descriptor); + } + + { + constexpr auto + jni_descriptor = PARSE_SIGNATURE_AS_LIST("()Ljava/lang/Object;"); + EXPECT_STRINGIFY_EQ("args={}, ret=Ljava/lang/Object;", jni_descriptor); + } + + { + constexpr auto jni_descriptor = PARSE_SIGNATURE_AS_LIST("()[I"); + EXPECT_STRINGIFY_EQ("args={}, ret=[I", jni_descriptor); + } + +#define EXPECT_OK_SIGNATURE_PARSE(signature, args, ret) \ + do { \ + constexpr auto jni_descriptor = PARSE_SIGNATURE_AS_LIST(signature); \ + EXPECT_EQ(true, jni_descriptor.has_value()); \ + EXPECT_STRINGIFY_EQ("args={" args "}, ret=" ret, jni_descriptor); \ + } while (0) + + // Exhaustive tests for successful parsing. + + EXPECT_OK_SIGNATURE_PARSE("()V", /*args*/"", /*ret*/"V"); + EXPECT_OK_SIGNATURE_PARSE("()Z", /*args*/"", /*ret*/"Z"); + EXPECT_OK_SIGNATURE_PARSE("()B", /*args*/"", /*ret*/"B"); + EXPECT_OK_SIGNATURE_PARSE("()C", /*args*/"", /*ret*/"C"); + EXPECT_OK_SIGNATURE_PARSE("()S", /*args*/"", /*ret*/"S"); + EXPECT_OK_SIGNATURE_PARSE("()I", /*args*/"", /*ret*/"I"); + EXPECT_OK_SIGNATURE_PARSE("()F", /*args*/"", /*ret*/"F"); + EXPECT_OK_SIGNATURE_PARSE("()J", /*args*/"", /*ret*/"J"); + EXPECT_OK_SIGNATURE_PARSE("()D", /*args*/"", /*ret*/"D"); + EXPECT_OK_SIGNATURE_PARSE("()Ljava/lang/Object;", /*args*/"", /*ret*/"Ljava/lang/Object;"); + EXPECT_OK_SIGNATURE_PARSE("()[Ljava/lang/Object;", /*args*/"", /*ret*/"[Ljava/lang/Object;"); + EXPECT_OK_SIGNATURE_PARSE("()[I", /*args*/"", /*ret*/"[I"); + EXPECT_OK_SIGNATURE_PARSE("()[[I", /*args*/"", /*ret*/"[[I"); + EXPECT_OK_SIGNATURE_PARSE("()[[[I", /*args*/"", /*ret*/"[[[I"); + + + EXPECT_OK_SIGNATURE_PARSE("(Z)V", /*args*/"Z", /*ret*/"V"); + EXPECT_OK_SIGNATURE_PARSE("(B)V", /*args*/"B", /*ret*/"V"); + EXPECT_OK_SIGNATURE_PARSE("(C)D", /*args*/"C", /*ret*/"D"); + EXPECT_OK_SIGNATURE_PARSE("(S)V", /*args*/"S", /*ret*/"V"); + EXPECT_OK_SIGNATURE_PARSE("(I)V", /*args*/"I", /*ret*/"V"); + EXPECT_OK_SIGNATURE_PARSE("(F)V", /*args*/"F", /*ret*/"V"); + EXPECT_OK_SIGNATURE_PARSE("(J)F", /*args*/"J", /*ret*/"F"); + EXPECT_OK_SIGNATURE_PARSE("(D)V", /*args*/"D", /*ret*/"V"); + EXPECT_OK_SIGNATURE_PARSE("(Ljava/lang/Object;)V", "Ljava/lang/Object;", "V"); + EXPECT_OK_SIGNATURE_PARSE("([Ljava/lang/Object;)V", + "[Ljava/lang/Object;", + "V"); + EXPECT_OK_SIGNATURE_PARSE("([I)V", /*ret*/"[I", "V"); + EXPECT_OK_SIGNATURE_PARSE("([[I)V", /*ret*/"[[I", "V"); + EXPECT_OK_SIGNATURE_PARSE("([[[I)V", /*ret*/"[[[I", "V"); + + EXPECT_OK_SIGNATURE_PARSE("(ZIJ)V", /*args*/"Z,I,J", /*ret*/"V"); + EXPECT_OK_SIGNATURE_PARSE("(B[IJ)V", /*args*/"B,[I,J", /*ret*/"V"); + EXPECT_OK_SIGNATURE_PARSE("(Ljava/lang/Object;B)D", + /*args*/"Ljava/lang/Object;,B", + /*ret*/"D"); + EXPECT_OK_SIGNATURE_PARSE("(Ljava/lang/Object;Ljava/lang/String;IF)D", + /*args*/"Ljava/lang/Object;,Ljava/lang/String;,I,F", + /*ret*/"D"); + EXPECT_OK_SIGNATURE_PARSE("([[[Ljava/lang/Object;Ljava/lang/String;IF)D", + /*args*/"[[[Ljava/lang/Object;,Ljava/lang/String;,I,F", + /*ret*/"D"); + + /* + * Test Failures in Parsing + */ + +#define EXPECT_FAILED_SIGNATURE_PARSE(jni_descriptor) \ + EXPECT_STRINGIFY_EQ(ConstexprOptional>{},\ + ParseSignatureAsList(jni_descriptor)) + + // For the failures to work we must turn off 'PARSE_FAILURES_FATAL'. + // Otherwise they immediately cause a crash, which is actually the desired behavior + // when this is used by the end-user in REGISTER_NATIVE_METHOD. + { + EXPECT_FAILED_SIGNATURE_PARSE(""); + EXPECT_FAILED_SIGNATURE_PARSE("A"); + EXPECT_FAILED_SIGNATURE_PARSE(")"); + EXPECT_FAILED_SIGNATURE_PARSE("V"); + EXPECT_FAILED_SIGNATURE_PARSE("("); + EXPECT_FAILED_SIGNATURE_PARSE("(A"); + EXPECT_FAILED_SIGNATURE_PARSE("()"); + EXPECT_FAILED_SIGNATURE_PARSE("()A"); + EXPECT_FAILED_SIGNATURE_PARSE("()VV"); + EXPECT_FAILED_SIGNATURE_PARSE("()L"); + EXPECT_FAILED_SIGNATURE_PARSE("()L;"); + EXPECT_FAILED_SIGNATURE_PARSE("()BAD;"); + EXPECT_FAILED_SIGNATURE_PARSE("()Ljava/lang/Object"); + EXPECT_FAILED_SIGNATURE_PARSE("()Ljava/lang/Object;X"); + + EXPECT_FAILED_SIGNATURE_PARSE("(V)V"); + EXPECT_FAILED_SIGNATURE_PARSE("(ILcat)V"); + EXPECT_FAILED_SIGNATURE_PARSE("([dog)V"); + EXPECT_FAILED_SIGNATURE_PARSE("(IV)V"); + EXPECT_FAILED_SIGNATURE_PARSE("([V)V"); + EXPECT_FAILED_SIGNATURE_PARSE("([[V)V"); + EXPECT_FAILED_SIGNATURE_PARSE("()v"); + EXPECT_FAILED_SIGNATURE_PARSE("()i"); + EXPECT_FAILED_SIGNATURE_PARSE("()f"); + } + +} + +#define EXPECT_IS_VALID_JNI_ARGUMENT_TYPE(expected, expr) \ + { constexpr bool is_valid = (expr); \ + EXPECT_EQ(expected, is_valid) << #expr; \ + } + +// Basic smoke tests for parameter validity. +// See below for more exhaustive tests. +TEST(JniSafeRegisterNativeMethods, ParameterTypes) { + using namespace nativehelper::detail; // NOLINT + EXPECT_TRUE(IsJniParameterCountValid(kCriticalNative, 0u)); + EXPECT_TRUE(IsJniParameterCountValid(kCriticalNative, 1u)); + EXPECT_TRUE(IsJniParameterCountValid(kCriticalNative, 2u)); + EXPECT_TRUE(IsJniParameterCountValid(kCriticalNative, 3u)); + EXPECT_TRUE(IsJniParameterCountValid(kCriticalNative, 4u)); + + EXPECT_FALSE(IsJniParameterCountValid(kNormalNative, 0u)); + EXPECT_FALSE(IsJniParameterCountValid(kNormalNative, 1u)); + EXPECT_TRUE(IsJniParameterCountValid(kNormalNative, 2u)); + EXPECT_TRUE(IsJniParameterCountValid(kNormalNative, 3u)); + EXPECT_TRUE(IsJniParameterCountValid(kNormalNative, 4u)); + + EXPECT_TRUE((IsValidJniParameter(kNormalNative, kReturnPosition))); + EXPECT_IS_VALID_JNI_ARGUMENT_TYPE(true,(is_valid_jni_argument_type::value)); + EXPECT_IS_VALID_JNI_ARGUMENT_TYPE(true,(is_valid_jni_argument_type::value)); + EXPECT_IS_VALID_JNI_ARGUMENT_TYPE(true,(is_valid_jni_argument_type::value)); + EXPECT_IS_VALID_JNI_ARGUMENT_TYPE(false,(is_valid_jni_argument_type::value)); +} + +struct TestReturnAnything { + template + operator T() const { // NOLINT + return T{}; + } +}; + +namespace test_jni { + void empty_fn() {} +} +struct TestJni { + +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wunused-parameter" + + // Always bad. + static void bad_cptr(const char* ptr) {} + static void* bad_ret_ptr() { return nullptr; } + static JNIEnv* bad_ret_env() { return nullptr; } + static void bad_wrongplace_env(jobject, JNIEnv*) {} + static void bad_wrongplace_env2(jobject, jobject, JNIEnv*) {} + static void v_e(JNIEnv*) {} + static void v_ei(JNIEnv*, jint l) {} + static void v_el(JNIEnv*, jlong l) {} + static void v_et(JNIEnv*, jstring) {} + static jobject o_none() { return nullptr; } + static void bad_noref_jint_norm(JNIEnv*, jclass, jint&) {} + static void bad_noref_jint_crit(jint&) {} + + // Good depending on the context: + + // CriticalNative + static void empty_fn() {} + static jint int_fn() { return 0; } + + static void v_() {} + // Note: volatile,const don't participate in the function signature + // but we still have these here to clarify that it is indeed allowed. + static void v_vol_i(volatile jint) {} + static void v_const_i(const jint) {} + static void v_i(jint) {} + static void v_l(jlong) {} + static void v_lib(jlong, jint, jboolean) {} + static jshort s_lib(jlong, jint, jboolean) { return 0; } + + // Normal or FastNative. + static void v_eo(JNIEnv*, jobject) {} + static void v_eoo(JNIEnv*, jobject, jobject) {} + static void v_ek(JNIEnv*, jclass) {} + static void v_eolib(JNIEnv*, jobject, jlong, jint, jboolean) {} + static jshort s_eolAibA(JNIEnv*, jobject, jlongArray, jint, jbooleanArray) { return 0; } + +#define DEC_TEST_FN_IMPL(name, ret_t, ...) \ + static ret_t name (__VA_ARGS__) { return TestReturnAnything{}; } + +#define DEC_TEST_FN(name, correct, ret_t, ...) \ + DEC_TEST_FN_IMPL(normal_ ## name, ret_t, JNIEnv*, jobject, __VA_ARGS__) \ + DEC_TEST_FN_IMPL(normal2_ ## name, ret_t, JNIEnv*, jclass, __VA_ARGS__) \ + DEC_TEST_FN_IMPL(critical_ ## name, ret_t, __VA_ARGS__) + +#define DEC_TEST_FN0(name, correct, ret_t) \ + DEC_TEST_FN_IMPL(normal_ ## name, ret_t, JNIEnv*, jobject) \ + DEC_TEST_FN_IMPL(normal2_ ## name, ret_t, JNIEnv*, jclass) \ + DEC_TEST_FN_IMPL(critical_ ## name, ret_t) + +#define JNI_TEST_FN(FN, FN0) \ + FN0(a0,CRITICAL,void) \ + FN0(a ,CRITICAL,jboolean) \ + FN0(a1,CRITICAL,jbyte) \ + FN0(g, CRITICAL,jchar) \ + FN0(c, CRITICAL,jshort) \ + FN0(b, CRITICAL,jint) \ + FN0(f, CRITICAL,jlong) \ + FN0(d, CRITICAL,jfloat) \ + FN0(e, CRITICAL,jdouble) \ + FN0(f2,NORMAL ,jobject) \ + FN0(f3,NORMAL ,jclass) \ + FN0(fr,NORMAL ,jstring) \ + FN0(fa,NORMAL ,jarray) \ + FN0(fb,NORMAL ,jobjectArray) \ + FN0(fc,NORMAL ,jbooleanArray) \ + FN0(fd,NORMAL ,jcharArray) \ + FN0(fe,NORMAL ,jshortArray) \ + FN0(ff,NORMAL ,jintArray) \ + FN0(fg,NORMAL ,jlongArray) \ + FN0(fk,NORMAL ,jfloatArray) \ + FN0(fi,NORMAL ,jdoubleArray) \ + FN0(fl,NORMAL ,jthrowable) \ + FN(aa, CRITICAL,jboolean,jboolean) \ + FN(ax, CRITICAL,jbyte,jbyte) \ + FN(ag, CRITICAL,jchar,jchar) \ + FN(ac, CRITICAL,jshort,jshort) \ + FN(ac2,CRITICAL,jshort,jshort,jchar) \ + FN(ab, CRITICAL,jint,jint) \ + FN(af, CRITICAL,jlong,jlong) \ + FN(ad, CRITICAL,jfloat,jfloat) \ + FN(ae, CRITICAL,jdouble,jdouble) \ + FN(af2,NORMAL ,jobject,jobject) \ + FN(af3,NORMAL ,jclass,jclass) \ + FN(afr,NORMAL ,jstring,jstring) \ + FN(afa,NORMAL ,jarray,jarray) \ + FN(afb,NORMAL ,jobjectArray,jobjectArray) \ + FN(afc,NORMAL ,jbooleanArray,jbooleanArray) \ + FN(afd,NORMAL ,jcharArray,jcharArray) \ + FN(afe,NORMAL ,jshortArray,jshortArray) \ + FN(aff,NORMAL ,jintArray,jintArray) \ + FN(afg,NORMAL ,jlongArray,jlongArray) \ + FN(afk,NORMAL ,jfloatArray,jfloatArray) \ + FN(afi,NORMAL ,jdoubleArray,jdoubleArray) \ + FN(agi,NORMAL ,jdoubleArray,jdoubleArray,jobject) \ + FN(afl,NORMAL ,jthrowable,jthrowable) \ + \ + FN0(z0,ILLEGAL ,JNIEnv*) \ + FN(z1, ILLEGAL ,void, JNIEnv*) \ + FN(z2, ILLEGAL ,JNIEnv*, JNIEnv*) \ + FN(z3, ILLEGAL ,void, void*) \ + FN0(z4,ILLEGAL ,void*) \ + +#define JNI_TEST_FN_BOTH(x) JNI_TEST_FN(x,x) + +// we generate a return statement because some functions are non-void. +// disable the useless warning about returning from a non-void function. +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wreturn-type" + JNI_TEST_FN(DEC_TEST_FN, DEC_TEST_FN0); +#pragma clang diagnostic pop + + // TODO: probably should be an x-macro table + // and that way we can add critical/normal to it as well + // and also the type descriptor, and reuse this for multiple tests. + +#pragma clang diagnostic pop +}; +// Note: Using function-local structs does not work. +// Template parameters must have linkage, which function-local structs lack. + +TEST(JniSafeRegisterNativeMethods, FunctionTypes) { + using namespace nativehelper::detail; // NOLINT + // The exact error messages are not tested but they would be seen in the compiler + // stack trace when used from a constexpr context. + +#define IS_VALID_JNI_FUNCTION_TYPE(native_kind, func) \ + (IsValidJniFunctionType()) +#define IS_VALID_NORMAL_JNI_FUNCTION_TYPE(func) IS_VALID_JNI_FUNCTION_TYPE(kNormalNative, func) +#define IS_VALID_CRITICAL_JNI_FUNCTION_TYPE(func) IS_VALID_JNI_FUNCTION_TYPE(kCriticalNative, func) + +#define EXPECT_ILLEGAL_JNI_FUNCTION_TYPE(func) \ + do { \ + EXPECT_FALSE(IS_VALID_CRITICAL_JNI_FUNCTION_TYPE(func)); \ + EXPECT_FALSE(IS_VALID_NORMAL_JNI_FUNCTION_TYPE(func)); \ + } while (false) + +#define EXPECT_NORMAL_JNI_FUNCTION_TYPE(func) \ + do { \ + EXPECT_FALSE(IS_VALID_CRITICAL_JNI_FUNCTION_TYPE(func)); \ + EXPECT_TRUE(IS_VALID_NORMAL_JNI_FUNCTION_TYPE(func)); \ + } while (false) + +#define EXPECT_CRITICAL_JNI_FUNCTION_TYPE(func) \ + do { \ + EXPECT_TRUE(IS_VALID_CRITICAL_JNI_FUNCTION_TYPE(func)); \ + EXPECT_FALSE(IS_VALID_NORMAL_JNI_FUNCTION_TYPE(func)); \ + } while (false) + + { + EXPECT_ILLEGAL_JNI_FUNCTION_TYPE(TestJni::bad_cptr); + EXPECT_ILLEGAL_JNI_FUNCTION_TYPE(TestJni::bad_ret_ptr); + EXPECT_ILLEGAL_JNI_FUNCTION_TYPE(TestJni::bad_ret_env); + EXPECT_ILLEGAL_JNI_FUNCTION_TYPE(TestJni::bad_wrongplace_env); + EXPECT_ILLEGAL_JNI_FUNCTION_TYPE(TestJni::bad_wrongplace_env2); + + EXPECT_CRITICAL_JNI_FUNCTION_TYPE(TestJni::empty_fn); + EXPECT_CRITICAL_JNI_FUNCTION_TYPE(test_jni::empty_fn); + EXPECT_CRITICAL_JNI_FUNCTION_TYPE(TestJni::int_fn); + + EXPECT_CRITICAL_JNI_FUNCTION_TYPE(TestJni::v_); + EXPECT_CRITICAL_JNI_FUNCTION_TYPE(TestJni::v_vol_i); + EXPECT_CRITICAL_JNI_FUNCTION_TYPE(TestJni::v_const_i); + EXPECT_CRITICAL_JNI_FUNCTION_TYPE(TestJni::v_i); + EXPECT_CRITICAL_JNI_FUNCTION_TYPE(TestJni::v_l); + + EXPECT_ILLEGAL_JNI_FUNCTION_TYPE(TestJni::v_e); + EXPECT_ILLEGAL_JNI_FUNCTION_TYPE(TestJni::v_ei); + EXPECT_ILLEGAL_JNI_FUNCTION_TYPE(TestJni::v_el); + EXPECT_ILLEGAL_JNI_FUNCTION_TYPE(TestJni::v_et); + + EXPECT_NORMAL_JNI_FUNCTION_TYPE(TestJni::v_eo); + EXPECT_NORMAL_JNI_FUNCTION_TYPE(TestJni::v_ek); + + EXPECT_ILLEGAL_JNI_FUNCTION_TYPE(TestJni::o_none); + EXPECT_ILLEGAL_JNI_FUNCTION_TYPE(TestJni::bad_noref_jint_norm); + EXPECT_ILLEGAL_JNI_FUNCTION_TYPE(TestJni::bad_noref_jint_crit); + } + + enum class TestJniKind { + ILLEGAL, + NORMAL, + CRITICAL + }; + + // ILLEGAL signatures are always illegal. + bool kExpected_ILLEGAL_against_NORMAL = false; + bool kExpected_ILLEGAL_against_CRITICAL = false; + // NORMAL signatures are only legal for Normal JNI. + bool kExpected_NORMAL_against_NORMAL = true; + bool kExpected_NORMAL_against_CRITICAL = false; + // CRITICAL signatures are legal for both Normal+Critical JNI. + bool kExpected_CRITICAL_against_CRITICAL = true; + bool kExpected_CRITICAL_against_NORMAL = true; + // Note that we munge normal and critical type signatures separately + // and that a normal_ prefixed is always a bad critical signature, + // and a critical_ prefixed signature is always a bad normal signature. + // See JNI_TEST_FN_MAKE_TEST for the implementation of this logic. + +#undef EXPECTED_FOR +#define EXPECTED_FOR(jni_kind, context) \ + (kExpected_ ## jni_kind ## _against_ ## context) + + { +#define JNI_TEST_FN_MAKE_TEST(name, jni_kind, ...) \ + do { \ + EXPECT_EQ(EXPECTED_FOR(jni_kind, NORMAL), \ + IS_VALID_NORMAL_JNI_FUNCTION_TYPE(TestJni::normal_ ## name)); \ + EXPECT_FALSE(IS_VALID_CRITICAL_JNI_FUNCTION_TYPE(TestJni::normal_ ## name)); \ + EXPECT_EQ(EXPECTED_FOR(jni_kind, NORMAL), \ + IS_VALID_NORMAL_JNI_FUNCTION_TYPE(TestJni::normal2_ ## name)); \ + EXPECT_FALSE(IS_VALID_CRITICAL_JNI_FUNCTION_TYPE(TestJni::normal2_ ## name)); \ + EXPECT_EQ(EXPECTED_FOR(jni_kind, CRITICAL), \ + IS_VALID_CRITICAL_JNI_FUNCTION_TYPE(TestJni::critical_ ## name)); \ + EXPECT_FALSE(IS_VALID_NORMAL_JNI_FUNCTION_TYPE(TestJni::critical_ ## name)); \ + } while (false); + + JNI_TEST_FN_BOTH(JNI_TEST_FN_MAKE_TEST); + } +} + +#define EXPECT_CONSTEXPR_EQ(lhs, rhs) \ + { constexpr auto lhs_val = (lhs); \ + constexpr auto rhs_val = (rhs); \ + EXPECT_EQ(lhs_val, rhs_val) << "LHS: " << #lhs << ", RHS: " << #rhs; \ + } + +TEST(JniSafeRegisterNativeMethods, FunctionTypeDescriptorConversion) { + using namespace nativehelper::detail; // NOLINT + { + constexpr auto cvrt = MaybeMakeReifiedJniSignature(); + ASSERT_TRUE(cvrt.has_value()); + EXPECT_CONSTEXPR_EQ(2u, cvrt->max_size); + EXPECT_CONSTEXPR_EQ(1u, cvrt->args.size()); + EXPECT_STRINGIFY_EQ("args={jint}, ret=void", cvrt.value()); + } + + { + constexpr auto cvrt = MaybeMakeReifiedJniSignature(); + EXPECT_FALSE(cvrt.has_value()); + } + + { + constexpr auto cvrt = MaybeMakeReifiedJniSignature(); + ASSERT_TRUE(cvrt.has_value()); + EXPECT_EQ(2u, cvrt->args.size()); + EXPECT_STRINGIFY_EQ("args={jdoubleArray,jobject}, ret=jdoubleArray", cvrt.value()); + } + + { + constexpr auto cvrt = MaybeMakeReifiedJniSignature(); + ASSERT_TRUE(cvrt.has_value()); + EXPECT_EQ(2u, cvrt->args.size()); + EXPECT_STRINGIFY_EQ("args={jshort,jchar}, ret=jshort", cvrt.value()); + } + + // TODO: use JNI_TEST_FN to generate these tests automatically. +} + +struct test_function_traits { + static int int_returning_function() { return 0; } +}; + +template +struct apply_return_type { + constexpr int operator()() const { + return sizeof(T) == sizeof(int); + } +}; + +#define FN_ARGS_PAIR(fn) decltype(fn), (fn) + +TEST(JniSafeRegisterNativeMethods, FunctionTraits) { + using namespace nativehelper::detail; // NOLINT + using traits_for_int_ret = + FunctionTypeMetafunction; + int applied = traits_for_int_ret::map_return(); + EXPECT_EQ(1, applied); + + auto arr = traits_for_int_ret::map_args(); + EXPECT_EQ(0u, arr.size()); +} + +struct IntHolder { + int value; +}; + +constexpr int GetTestValue(const IntHolder& i) { + return i.value; +} + +constexpr int GetTestValue(int i) { + return i; +} + +template +constexpr size_t SumUpVector(const nativehelper::detail::ConstexprVector& vec) { + size_t s = 0; + for (const T& elem : vec) { + s += static_cast(GetTestValue(elem)); + } + return s; +} + +template +constexpr auto make_test_int_vector() { + using namespace nativehelper::detail; // NOLINT + ConstexprVector vec_int; + vec_int.push_back(T{1}); + vec_int.push_back(T{2}); + vec_int.push_back(T{3}); + vec_int.push_back(T{4}); + vec_int.push_back(T{5}); + return vec_int; +} + +TEST(JniSafeRegisterNativeMethods, ConstexprOptional) { + using namespace nativehelper::detail; // NOLINT + + ConstexprOptional int_opt; + EXPECT_FALSE(int_opt.has_value()); + + int_opt = ConstexprOptional(12345); + EXPECT_EQ(12345, int_opt.value()); + EXPECT_EQ(12345, *int_opt); +} + +TEST(JniSafeRegisterNativeMethods, ConstexprVector) { + using namespace nativehelper::detail; // NOLINT + { + constexpr ConstexprVector vec_int = make_test_int_vector(); + constexpr size_t the_sum = SumUpVector(vec_int); + EXPECT_EQ(15u, the_sum); + } + + { + constexpr ConstexprVector vec_int = make_test_int_vector(); + constexpr size_t the_sum = SumUpVector(vec_int); + EXPECT_EQ(15u, the_sum); + } +} + +// Need this intermediate function to make a JniDescriptorNode from a string literal. +// C++ doesn't do implicit conversion through two+ type constructors. +constexpr nativehelper::detail::JniDescriptorNode MakeNode( + nativehelper::detail::ConstexprStringView str) { + return nativehelper::detail::JniDescriptorNode{str}; +} + +#define EXPECT_EQUALISH_JNI_DESCRIPTORS_IMPL(user_desc, derived, cond) \ + do { \ + constexpr bool res = \ + CompareJniDescriptorNodeErased(MakeNode(user_desc), \ + ReifiedJniTypeTrait::Reify()); \ + (void)res; \ + EXPECT_ ## cond(CompareJniDescriptorNodeErased(MakeNode(user_desc), \ + ReifiedJniTypeTrait::Reify())); \ + } while (0); + +#define EXPECT_EQUALISH_JNI_DESCRIPTORS(user_desc, derived_desc) \ + EXPECT_EQUALISH_JNI_DESCRIPTORS_IMPL(user_desc, derived_desc, TRUE) + +#define EXPECT_NOT_EQUALISH_JNI_DESCRIPTORS(user_desc, derived_desc) \ + EXPECT_EQUALISH_JNI_DESCRIPTORS_IMPL(user_desc, derived_desc, FALSE) + +TEST(JniSafeRegisterNativeMethods, CompareJniDescriptorNodeErased) { + using namespace nativehelper::detail; // NOLINT + EXPECT_EQUALISH_JNI_DESCRIPTORS("V", void); + EXPECT_NOT_EQUALISH_JNI_DESCRIPTORS("V", jint); + EXPECT_EQUALISH_JNI_DESCRIPTORS("Z", jboolean); + EXPECT_NOT_EQUALISH_JNI_DESCRIPTORS("Z", void); + EXPECT_NOT_EQUALISH_JNI_DESCRIPTORS("Z", jobject); + EXPECT_EQUALISH_JNI_DESCRIPTORS("J", jlong); + EXPECT_NOT_EQUALISH_JNI_DESCRIPTORS("J", jobject); + EXPECT_NOT_EQUALISH_JNI_DESCRIPTORS("J", jthrowable); + EXPECT_NOT_EQUALISH_JNI_DESCRIPTORS("J", jint); + EXPECT_EQUALISH_JNI_DESCRIPTORS("Ljava/lang/String;", jstring); + EXPECT_EQUALISH_JNI_DESCRIPTORS("Ljava/lang/Class;", jclass); + EXPECT_EQUALISH_JNI_DESCRIPTORS("Ljava/lang/Object;", jobject); + EXPECT_EQUALISH_JNI_DESCRIPTORS("Ljava/lang/Integer;", jobject); + EXPECT_NOT_EQUALISH_JNI_DESCRIPTORS("[Z", jthrowable); + EXPECT_NOT_EQUALISH_JNI_DESCRIPTORS("[Z", jobjectArray); + EXPECT_NOT_EQUALISH_JNI_DESCRIPTORS("Ljava/lang/Integer;", jintArray); + EXPECT_NOT_EQUALISH_JNI_DESCRIPTORS("Ljava/lang/Integer;", jarray); + EXPECT_NOT_EQUALISH_JNI_DESCRIPTORS("Ljava/lang/Integer;", jarray); + + // Stricter checks. + EXPECT_NOT_EQUALISH_JNI_DESCRIPTORS("Ljava/lang/Object;", jobjectArray); + EXPECT_NOT_EQUALISH_JNI_DESCRIPTORS("Ljava/lang/String;", jobject); + EXPECT_NOT_EQUALISH_JNI_DESCRIPTORS("Ljava/lang/Class;", jobject); + EXPECT_NOT_EQUALISH_JNI_DESCRIPTORS("[Z", jobject); + EXPECT_NOT_EQUALISH_JNI_DESCRIPTORS("[Ljava/lang/Object;", jobject); + EXPECT_NOT_EQUALISH_JNI_DESCRIPTORS("Ljava/lang/Object;", jarray); + + // Permissive checks that are weaker than normal. + EXPECT_EQUALISH_JNI_DESCRIPTORS("Ljava/lang/Exception;", jobject); + EXPECT_EQUALISH_JNI_DESCRIPTORS("Ljava/lang/Error;", jobject); + EXPECT_EQUALISH_JNI_DESCRIPTORS("[Z", jarray); + EXPECT_EQUALISH_JNI_DESCRIPTORS("[I", jarray); + EXPECT_EQUALISH_JNI_DESCRIPTORS("[[Z", jarray); + EXPECT_EQUALISH_JNI_DESCRIPTORS("[[Ljava/lang/Object;", jarray); + + // jthrowable-related checks. + EXPECT_NOT_EQUALISH_JNI_DESCRIPTORS("Ljava/lang/Throwable;", jobject); + EXPECT_EQUALISH_JNI_DESCRIPTORS("Ljava/lang/Throwable;", jthrowable); + EXPECT_EQUALISH_JNI_DESCRIPTORS("Ljava/lang/Exception;", jthrowable); + EXPECT_EQUALISH_JNI_DESCRIPTORS("Ljava/lang/Error;", jthrowable); +} + +#define EXPECT_SIMILAR_TYPE_DESCRIPTOR_MATCH(type_desc, type) \ + do { \ + constexpr auto res = ReifiedJniTypeTrait::MostSimilarTypeDescriptor(type_desc); \ + EXPECT_TRUE((ReifiedJniTypeTrait::MostSimilarTypeDescriptor(type_desc)).has_value()); \ + if (res.has_value()) EXPECT_EQ(ReifiedJniTypeTrait::Reify(), res.value()); \ + } while (false) + +#define EXPECT_SIMILAR_TYPE_DESCRIPTOR_NO_MATCH(type_desc) \ + do { \ + auto res = ReifiedJniTypeTrait::MostSimilarTypeDescriptor(type_desc); \ + EXPECT_FALSE(res.has_value()); \ + } while (false) + +#define JNI_TYPE_TRAIT_MUST_BE_SAME_FN(type_name, type_desc, ...) \ + /* skip jarray because it aliases Ljava/lang/Object; */ \ + do { \ + constexpr auto str_type_name = ConstexprStringView(#type_name); \ + if (str_type_name != "jarray" && str_type_name != "JNIEnv*") { \ + EXPECT_SIMILAR_TYPE_DESCRIPTOR_MATCH(type_desc, type_name); \ + } \ + } while(false); + +TEST(JniSafeRegisterNativeMethods, MostSimilarTypeDescriptor) { + using namespace nativehelper::detail; // NOLINT + EXPECT_SIMILAR_TYPE_DESCRIPTOR_MATCH("Z", jboolean); + EXPECT_SIMILAR_TYPE_DESCRIPTOR_MATCH("[[I", jobjectArray); + EXPECT_SIMILAR_TYPE_DESCRIPTOR_MATCH("[[Z", jobjectArray); + EXPECT_SIMILAR_TYPE_DESCRIPTOR_MATCH("[Ljava/lang/String;", jobjectArray); + EXPECT_SIMILAR_TYPE_DESCRIPTOR_MATCH("[Ljava/lang/Integer;", jobjectArray); + EXPECT_SIMILAR_TYPE_DESCRIPTOR_NO_MATCH("illegal"); + EXPECT_SIMILAR_TYPE_DESCRIPTOR_NO_MATCH("?"); + EXPECT_SIMILAR_TYPE_DESCRIPTOR_NO_MATCH(""); + + DEFINE_JNI_TYPE_TRAIT(JNI_TYPE_TRAIT_MUST_BE_SAME_FN); +} + +#define ENFORCE_CONSTEXPR(expr) \ + static_assert(__builtin_constant_p(expr), "Expression must be constexpr") + +#define EXPECT_MATCH_JNI_DESCRIPTOR_AGAINST_FUNCTION_IMPL(cond, native_kind, func, desc) \ + do { \ + ENFORCE_CONSTEXPR((MatchJniDescriptorWithFunctionType< \ + native_kind, \ + decltype(func), \ + func, \ + sizeof(desc)>(desc))); \ + EXPECT_ ## cond((MatchJniDescriptorWithFunctionType< \ + native_kind, \ + decltype(func), \ + func, \ + sizeof(desc)>(desc))); \ + } while(0) + +#define EXPECT_MATCH_JNI_DESCRIPTOR_AGAINST_FUNCTION(native_kind, func, desc) \ + EXPECT_MATCH_JNI_DESCRIPTOR_AGAINST_FUNCTION_IMPL(TRUE, native_kind, func, desc) + +#define EXPECT_NO_MATCH_JNI_DESCRIPTOR_AGAINST_FUNCTION(native_kind, func, desc) \ + EXPECT_MATCH_JNI_DESCRIPTOR_AGAINST_FUNCTION_IMPL(FALSE, native_kind, func, desc) + +TEST(JniSafeRegisterNativeMethods, MatchJniDescriptorWithFunctionType) { + using namespace nativehelper::detail; // NOLINT + // Bad C++ signature. + EXPECT_NO_MATCH_JNI_DESCRIPTOR_AGAINST_FUNCTION(kCriticalNative, TestJni::bad_cptr, "()V"); + EXPECT_NO_MATCH_JNI_DESCRIPTOR_AGAINST_FUNCTION(kNormalNative, TestJni::bad_cptr, "()V"); + + // JNI type descriptor is not legal (by itself). + EXPECT_NO_MATCH_JNI_DESCRIPTOR_AGAINST_FUNCTION(kCriticalNative, TestJni::v_, "BAD"); + EXPECT_NO_MATCH_JNI_DESCRIPTOR_AGAINST_FUNCTION(kNormalNative, TestJni::v_eo, "BAD"); + + // Number of parameters in signature vs C++ function does not match. + EXPECT_NO_MATCH_JNI_DESCRIPTOR_AGAINST_FUNCTION(kCriticalNative, TestJni::v_i, "()V"); + EXPECT_NO_MATCH_JNI_DESCRIPTOR_AGAINST_FUNCTION(kNormalNative, TestJni::v_eoo, "()V"); + + // Return types don't match. + EXPECT_NO_MATCH_JNI_DESCRIPTOR_AGAINST_FUNCTION(kCriticalNative, TestJni::v_, "()Z"); + EXPECT_NO_MATCH_JNI_DESCRIPTOR_AGAINST_FUNCTION(kFastNative, TestJni::v_eo, "()Z"); + + // Argument types don't match. + EXPECT_NO_MATCH_JNI_DESCRIPTOR_AGAINST_FUNCTION(kCriticalNative, TestJni::v_i, "(Z)V"); + EXPECT_NO_MATCH_JNI_DESCRIPTOR_AGAINST_FUNCTION(kNormalNative, + TestJni::v_eoo, + "(Ljava/lang/Class;)V"); + + // OK. + EXPECT_MATCH_JNI_DESCRIPTOR_AGAINST_FUNCTION(kCriticalNative, TestJni::v_i, "(I)V"); + EXPECT_MATCH_JNI_DESCRIPTOR_AGAINST_FUNCTION(kNormalNative, + TestJni::v_eoo, + "(Ljava/lang/Object;)V"); + + EXPECT_MATCH_JNI_DESCRIPTOR_AGAINST_FUNCTION(kCriticalNative, TestJni::v_lib, "(JIZ)V"); + EXPECT_MATCH_JNI_DESCRIPTOR_AGAINST_FUNCTION(kNormalNative, TestJni::v_eolib, "(JIZ)V"); + EXPECT_MATCH_JNI_DESCRIPTOR_AGAINST_FUNCTION(kCriticalNative, TestJni::s_lib, "(JIZ)S"); + EXPECT_MATCH_JNI_DESCRIPTOR_AGAINST_FUNCTION(kNormalNative, TestJni::s_eolAibA, "([JI[Z)S"); +} + +TEST(JniSafeRegisterNativeMethods, Infer) { + using namespace nativehelper::detail; // NOLINT + { + using Infer_v_eolib_t = InferJniDescriptor; + EXPECT_CONSTEXPR_EQ(6u, Infer_v_eolib_t::kMaxStringSize); + std::string x = Infer_v_eolib_t::GetStringAtRuntime(); + EXPECT_STRINGIFY_EQ("(JIZ)V", x.c_str()); + } + + { + using Infer_v_eolib_t = InferJniDescriptor; + EXPECT_STRINGIFY_EQ("args={[J,I,[Z}, ret=S", Infer_v_eolib_t::FromFunctionType().value()); + EXPECT_CONSTEXPR_EQ(8u, Infer_v_eolib_t::kMaxStringSize); + std::string x = Infer_v_eolib_t::GetStringAtRuntime(); + EXPECT_STRINGIFY_EQ("([JI[Z)S", x.c_str()); + } +} + +// Test the macro definition only. See other tests above for signature-match testing. +TEST(JniSafeRegisterNativeMethods, MakeCheckedJniNativeMethod) { + // Ensure the temporary variables don't conflict with other local vars of same name. + JNINativeMethod tmp_native_method; // shadow test. + (void) tmp_native_method; + bool is_signature_valid = true; // shadow test. + (void) is_signature_valid; + + // Ensure it works with critical. + { + JNINativeMethod m = + MAKE_CHECKED_JNI_NATIVE_METHOD(kCriticalNative, + "v_lib", + "(JIZ)V", + TestJni::v_lib); + (void)m; + } + + // Ensure it works with normal. + { + JNINativeMethod m = + MAKE_CHECKED_JNI_NATIVE_METHOD(kNormalNative, + "v_eolib", + "(JIZ)V", + TestJni::v_eolib); + (void)m; + } + + // Make sure macros properly expand inside of an array. + { + JNINativeMethod m_array[] = { + MAKE_CHECKED_JNI_NATIVE_METHOD(kCriticalNative, + "v_lib", + "(JIZ)V", + TestJni::v_lib), + MAKE_CHECKED_JNI_NATIVE_METHOD(kNormalNative, + "v_eolib", + "(JIZ)V", + TestJni::v_eolib), + }; + (void)m_array; + } + { + JNINativeMethod m_array_direct[] { + MAKE_CHECKED_JNI_NATIVE_METHOD(kCriticalNative, + "v_lib", + "(JIZ)V", + TestJni::v_lib), + MAKE_CHECKED_JNI_NATIVE_METHOD(kNormalNative, + "v_eolib", + "(JIZ)V", + TestJni::v_eolib), + }; + (void)m_array_direct; + } + +} + +static auto sTestCheckedAtFileScope = + MAKE_CHECKED_JNI_NATIVE_METHOD(kCriticalNative, + "v_lib", + "(JIZ)V", + TestJni::v_lib); + +static auto sTestInferredAtFileScope = + MAKE_INFERRED_JNI_NATIVE_METHOD(kCriticalNative, + "v_lib", + TestJni::v_lib); + +TEST(JniSafeRegisterNativeMethods, TestInferredJniNativeMethod) { + (void) sTestCheckedAtFileScope; + (void) sTestInferredAtFileScope; + + // Ensure it works with critical. + { + JNINativeMethod m = + MAKE_INFERRED_JNI_NATIVE_METHOD(kCriticalNative, + "v_lib", + TestJni::v_lib); + (void)m; + } + + // Ensure it works with normal. + { + JNINativeMethod m = + MAKE_INFERRED_JNI_NATIVE_METHOD(kNormalNative, + "v_eolib", + TestJni::v_eolib); + (void)m; + } +} + +static void TestJniMacros_v_lib(jlong, jint, jboolean) {} +static void TestJniMacros_v_lib_od(jlong, jint, jboolean) {} +static void TestJniMacros_v_eolib(JNIEnv*, jobject, jlong, jint, jboolean) {} +static void TestJniMacros_v_eolib_od(JNIEnv*, jobject, jlong, jint, jboolean) {} + +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wunused-parameter" + +static jint android_os_Parcel_dataSize(jlong) { return 0; } +static jint android_os_Parcel_dataAvail(jlong) { return 0; } +static jint android_os_Parcel_dataPosition(jlong) { return 0; } +static jint android_os_Parcel_dataCapacity(jlong) { return 0; } +static jlong android_os_Parcel_setDataSize(JNIEnv*, jclass, jlong, jint) { return 0; } +static void android_os_Parcel_setDataPosition(jlong, jint) {} +static void android_os_Parcel_setDataCapacity(JNIEnv*, jclass, jlong, jint) {} +static jboolean android_os_Parcel_pushAllowFds(jlong, jboolean) { return true; } +static void android_os_Parcel_restoreAllowFds(jlong, jboolean) {} +static void android_os_Parcel_writeByteArray(JNIEnv*, jclass, jlong, jbyteArray, jint, jint) {} + +static void android_os_Parcel_writeBlob(JNIEnv*, jclass, jlong, jbyteArray, jint, jint) {} +static void android_os_Parcel_writeInt(JNIEnv*, jclass, jlong, jint) {} +static void android_os_Parcel_writeLong(JNIEnv* env, + jclass clazz, + jlong nativePtr, + jlong val) {} +static void android_os_Parcel_writeFloat(JNIEnv* env, + jclass clazz, + jlong nativePtr, + jfloat val) {} +static void android_os_Parcel_writeDouble(JNIEnv* env, + jclass clazz, + jlong nativePtr, + jdouble val) {} +static void android_os_Parcel_writeString(JNIEnv* env, + jclass clazz, + jlong nativePtr, + jstring val) {} +static void android_os_Parcel_writeStrongBinder(JNIEnv* env, + jclass clazz, + jlong nativePtr, + jobject object) {} +static jlong android_os_Parcel_writeFileDescriptor(JNIEnv* env, + jclass clazz, + jlong nativePtr, + jobject object) { return 0; } +static jbyteArray android_os_Parcel_createByteArray(JNIEnv* env, + jclass clazz, + jlong nativePtr) { return nullptr; } + +static jboolean android_os_Parcel_readByteArray(JNIEnv* env, + jclass clazz, + jlong nativePtr, + jbyteArray dest, + jint destLen) { return false; } +static jbyteArray android_os_Parcel_readBlob(JNIEnv* env, + jclass clazz, + jlong nativePtr) { return nullptr; } + +static jint android_os_Parcel_readInt(jlong nativePtr) { return 0; } + +static jlong android_os_Parcel_readLong(jlong nativePtr) { return 0; } + +static jfloat android_os_Parcel_readFloat(jlong nativePtr) { return 0.0f; } +static jdouble android_os_Parcel_readDouble(jlong nativePtr) { return 0.0; } + +static jstring android_os_Parcel_readString(JNIEnv* env, + jclass clazz, + jlong nativePtr) { return nullptr; } + +static jobject android_os_Parcel_readStrongBinder(JNIEnv* env, + jclass clazz, + jlong nativePtr) { return nullptr; } + + +static jobject android_os_Parcel_readFileDescriptor(JNIEnv* env, + jclass clazz, + jlong nativePtr) { return nullptr; } + +static jobject android_os_Parcel_openFileDescriptor(JNIEnv* env, + jclass clazz, + jstring name, + jint mode) { return 0; } + + +static jobject android_os_Parcel_dupFileDescriptor(JNIEnv* env, + jclass clazz, + jobject orig) { return 0; } + + +static void android_os_Parcel_closeFileDescriptor(JNIEnv* env, + jclass clazz, + jobject object) {} + + +static void android_os_Parcel_clearFileDescriptor(JNIEnv* env, + jclass clazz, + jobject object) {} + + +static jlong android_os_Parcel_create(JNIEnv* env, jclass clazz) { return 0; } + + +static jlong android_os_Parcel_freeBuffer(JNIEnv* env, + jclass clazz, + jlong nativePtr) { return 0; } + + +static void android_os_Parcel_destroy(JNIEnv* env, jclass clazz, jlong nativePtr) {} + + +static jbyteArray android_os_Parcel_marshall(JNIEnv* env, + jclass clazz, + jlong nativePtr) { return 0; } + + +static jlong android_os_Parcel_unmarshall(JNIEnv* env, + jclass clazz, + jlong nativePtr, + jbyteArray data, + jint offset, + jint length) { return 0; } + + +static jint android_os_Parcel_compareData(JNIEnv* env, + jclass clazz, + jlong thisNativePtr, + jlong otherNativePtr) { return 0; } + + +static jlong android_os_Parcel_appendFrom(JNIEnv* env, + jclass clazz, + jlong thisNativePtr, + jlong otherNativePtr, + jint offset, + jint length) { return 0; } + + +static jboolean android_os_Parcel_hasFileDescriptors(jlong nativePtr) { return 0; } + + +static void android_os_Parcel_writeInterfaceToken(JNIEnv* env, + jclass clazz, + jlong nativePtr, + jstring name) {} + + +static void android_os_Parcel_enforceInterface(JNIEnv* env, + jclass clazz, + jlong nativePtr, + jstring name) {} + + +static jlong android_os_Parcel_getGlobalAllocSize(JNIEnv* env, jclass clazz) { return 0; } + + +static jlong android_os_Parcel_getGlobalAllocCount(JNIEnv* env, jclass clazz) { return 0; } + + +static jlong android_os_Parcel_getBlobAshmemSize(jlong nativePtr) { return 0; } + +#pragma clang diagnostic pop + +TEST(JniSafeRegisterNativeMethods, ParcelExample) { + // Test a wide range of automatic signature inferencing. + // This is taken from real code in android_os_Parcel.cpp. + + const JNINativeMethod gParcelMethods[] = { + // @CriticalNative + MAKE_JNI_CRITICAL_NATIVE_METHOD_AUTOSIG( + "nativeDataSize", android_os_Parcel_dataSize), + // @CriticalNative + MAKE_JNI_CRITICAL_NATIVE_METHOD_AUTOSIG( + "nativeDataAvail", android_os_Parcel_dataAvail), + // @CriticalNative + MAKE_JNI_CRITICAL_NATIVE_METHOD_AUTOSIG( + "nativeDataPosition", android_os_Parcel_dataPosition), + // @CriticalNative + MAKE_JNI_CRITICAL_NATIVE_METHOD_AUTOSIG( + "nativeDataCapacity", android_os_Parcel_dataCapacity), + // @FastNative + MAKE_JNI_FAST_NATIVE_METHOD_AUTOSIG( + "nativeSetDataSize", android_os_Parcel_setDataSize), + // @CriticalNative + MAKE_JNI_CRITICAL_NATIVE_METHOD_AUTOSIG( + "nativeSetDataPosition", android_os_Parcel_setDataPosition), + // @FastNative + MAKE_JNI_FAST_NATIVE_METHOD_AUTOSIG( + "nativeSetDataCapacity", android_os_Parcel_setDataCapacity), + // @CriticalNative + MAKE_JNI_CRITICAL_NATIVE_METHOD_AUTOSIG( + "nativePushAllowFds", android_os_Parcel_pushAllowFds), + // @CriticalNative + MAKE_JNI_CRITICAL_NATIVE_METHOD_AUTOSIG( + "nativeRestoreAllowFds", android_os_Parcel_restoreAllowFds), + MAKE_JNI_NATIVE_METHOD_AUTOSIG( + "nativeWriteByteArray", android_os_Parcel_writeByteArray), + MAKE_JNI_NATIVE_METHOD_AUTOSIG( + "nativeWriteBlob", android_os_Parcel_writeBlob), + // @FastNative + MAKE_JNI_FAST_NATIVE_METHOD_AUTOSIG( + "nativeWriteInt", android_os_Parcel_writeInt), + // @FastNative + MAKE_JNI_FAST_NATIVE_METHOD_AUTOSIG( + "nativeWriteLong", android_os_Parcel_writeLong), + // @FastNative + MAKE_JNI_FAST_NATIVE_METHOD_AUTOSIG( + "nativeWriteFloat", android_os_Parcel_writeFloat), + // @FastNative + MAKE_JNI_FAST_NATIVE_METHOD_AUTOSIG( + "nativeWriteDouble", android_os_Parcel_writeDouble), + MAKE_JNI_NATIVE_METHOD_AUTOSIG( + "nativeWriteString", android_os_Parcel_writeString), + MAKE_JNI_NATIVE_METHOD( + "nativeWriteStrongBinder", "(JLandroid/os/IBinder;)V", android_os_Parcel_writeStrongBinder), + MAKE_JNI_NATIVE_METHOD( + "nativeWriteFileDescriptor", "(JLjava/io/FileDescriptor;)J", android_os_Parcel_writeFileDescriptor), + + MAKE_JNI_NATIVE_METHOD_AUTOSIG( + "nativeCreateByteArray", android_os_Parcel_createByteArray), + MAKE_JNI_NATIVE_METHOD_AUTOSIG( + "nativeReadByteArray", android_os_Parcel_readByteArray), + MAKE_JNI_NATIVE_METHOD_AUTOSIG( + "nativeReadBlob", android_os_Parcel_readBlob), + // @CriticalNative + MAKE_JNI_CRITICAL_NATIVE_METHOD_AUTOSIG( + "nativeReadInt", android_os_Parcel_readInt), + // @CriticalNative + MAKE_JNI_CRITICAL_NATIVE_METHOD_AUTOSIG( + "nativeReadLong", android_os_Parcel_readLong), + // @CriticalNative + MAKE_JNI_CRITICAL_NATIVE_METHOD_AUTOSIG( + "nativeReadFloat", android_os_Parcel_readFloat), + // @CriticalNative + MAKE_JNI_CRITICAL_NATIVE_METHOD_AUTOSIG( + "nativeReadDouble", android_os_Parcel_readDouble), + MAKE_JNI_NATIVE_METHOD_AUTOSIG( + "nativeReadString", android_os_Parcel_readString), + MAKE_JNI_NATIVE_METHOD( + "nativeReadStrongBinder", "(J)Landroid/os/IBinder;", android_os_Parcel_readStrongBinder), + MAKE_JNI_NATIVE_METHOD( + "nativeReadFileDescriptor", "(J)Ljava/io/FileDescriptor;", android_os_Parcel_readFileDescriptor), + MAKE_JNI_NATIVE_METHOD( + "openFileDescriptor", "(Ljava/lang/String;I)Ljava/io/FileDescriptor;", android_os_Parcel_openFileDescriptor), + MAKE_JNI_NATIVE_METHOD( + "dupFileDescriptor", "(Ljava/io/FileDescriptor;)Ljava/io/FileDescriptor;", android_os_Parcel_dupFileDescriptor), + MAKE_JNI_NATIVE_METHOD( + "closeFileDescriptor", "(Ljava/io/FileDescriptor;)V", android_os_Parcel_closeFileDescriptor), + MAKE_JNI_NATIVE_METHOD( + "clearFileDescriptor", "(Ljava/io/FileDescriptor;)V", android_os_Parcel_clearFileDescriptor), + + MAKE_JNI_NATIVE_METHOD_AUTOSIG( + "nativeCreate", android_os_Parcel_create), + MAKE_JNI_NATIVE_METHOD_AUTOSIG( + "nativeFreeBuffer", android_os_Parcel_freeBuffer), + MAKE_JNI_NATIVE_METHOD_AUTOSIG( + "nativeDestroy", android_os_Parcel_destroy), + + MAKE_JNI_NATIVE_METHOD_AUTOSIG( + "nativeMarshall", android_os_Parcel_marshall), + MAKE_JNI_NATIVE_METHOD_AUTOSIG( + "nativeUnmarshall", android_os_Parcel_unmarshall), + MAKE_JNI_NATIVE_METHOD_AUTOSIG( + "nativeCompareData", android_os_Parcel_compareData), + MAKE_JNI_NATIVE_METHOD_AUTOSIG( + "nativeAppendFrom", android_os_Parcel_appendFrom), + // @CriticalNative + MAKE_JNI_CRITICAL_NATIVE_METHOD_AUTOSIG( + "nativeHasFileDescriptors", android_os_Parcel_hasFileDescriptors), + MAKE_JNI_NATIVE_METHOD_AUTOSIG( + "nativeWriteInterfaceToken", android_os_Parcel_writeInterfaceToken), + MAKE_JNI_NATIVE_METHOD_AUTOSIG( + "nativeEnforceInterface", android_os_Parcel_enforceInterface), + + MAKE_JNI_NATIVE_METHOD_AUTOSIG( + "getGlobalAllocSize", android_os_Parcel_getGlobalAllocSize), + MAKE_JNI_NATIVE_METHOD_AUTOSIG( + "getGlobalAllocCount", android_os_Parcel_getGlobalAllocCount), + + // @CriticalNative + MAKE_JNI_CRITICAL_NATIVE_METHOD_AUTOSIG( + "nativeGetBlobAshmemSize", android_os_Parcel_getBlobAshmemSize), + }; + + const JNINativeMethod gParcelMethodsExpected[] = { + // @CriticalNative + {"nativeDataSize", "(J)I", (void*)android_os_Parcel_dataSize}, + // @CriticalNative + {"nativeDataAvail", "(J)I", (void*)android_os_Parcel_dataAvail}, + // @CriticalNative + {"nativeDataPosition", "(J)I", (void*)android_os_Parcel_dataPosition}, + // @CriticalNative + {"nativeDataCapacity", "(J)I", (void*)android_os_Parcel_dataCapacity}, + // @FastNative + {"nativeSetDataSize", "(JI)J", (void*)android_os_Parcel_setDataSize}, + // @CriticalNative + {"nativeSetDataPosition", "(JI)V", (void*)android_os_Parcel_setDataPosition}, + // @FastNative + {"nativeSetDataCapacity", "(JI)V", (void*)android_os_Parcel_setDataCapacity}, + + // @CriticalNative + {"nativePushAllowFds", "(JZ)Z", (void*)android_os_Parcel_pushAllowFds}, + // @CriticalNative + {"nativeRestoreAllowFds", "(JZ)V", (void*)android_os_Parcel_restoreAllowFds}, + + {"nativeWriteByteArray", "(J[BII)V", (void*)android_os_Parcel_writeByteArray}, + {"nativeWriteBlob", "(J[BII)V", (void*)android_os_Parcel_writeBlob}, + // @FastNative + {"nativeWriteInt", "(JI)V", (void*)android_os_Parcel_writeInt}, + // @FastNative + {"nativeWriteLong", "(JJ)V", (void*)android_os_Parcel_writeLong}, + // @FastNative + {"nativeWriteFloat", "(JF)V", (void*)android_os_Parcel_writeFloat}, + // @FastNative + {"nativeWriteDouble", "(JD)V", (void*)android_os_Parcel_writeDouble}, + {"nativeWriteString", "(JLjava/lang/String;)V", (void*)android_os_Parcel_writeString}, + {"nativeWriteStrongBinder", "(JLandroid/os/IBinder;)V", (void*)android_os_Parcel_writeStrongBinder}, + {"nativeWriteFileDescriptor", "(JLjava/io/FileDescriptor;)J", (void*)android_os_Parcel_writeFileDescriptor}, + + {"nativeCreateByteArray", "(J)[B", (void*)android_os_Parcel_createByteArray}, + {"nativeReadByteArray", "(J[BI)Z", (void*)android_os_Parcel_readByteArray}, + {"nativeReadBlob", "(J)[B", (void*)android_os_Parcel_readBlob}, + // @CriticalNative + {"nativeReadInt", "(J)I", (void*)android_os_Parcel_readInt}, + // @CriticalNative + {"nativeReadLong", "(J)J", (void*)android_os_Parcel_readLong}, + // @CriticalNative + {"nativeReadFloat", "(J)F", (void*)android_os_Parcel_readFloat}, + // @CriticalNative + {"nativeReadDouble", "(J)D", (void*)android_os_Parcel_readDouble}, + {"nativeReadString", "(J)Ljava/lang/String;", (void*)android_os_Parcel_readString}, + {"nativeReadStrongBinder", "(J)Landroid/os/IBinder;", (void*)android_os_Parcel_readStrongBinder}, + {"nativeReadFileDescriptor", "(J)Ljava/io/FileDescriptor;", (void*)android_os_Parcel_readFileDescriptor}, + + {"openFileDescriptor", "(Ljava/lang/String;I)Ljava/io/FileDescriptor;", (void*)android_os_Parcel_openFileDescriptor}, + {"dupFileDescriptor", "(Ljava/io/FileDescriptor;)Ljava/io/FileDescriptor;", (void*)android_os_Parcel_dupFileDescriptor}, + {"closeFileDescriptor", "(Ljava/io/FileDescriptor;)V", (void*)android_os_Parcel_closeFileDescriptor}, + {"clearFileDescriptor", "(Ljava/io/FileDescriptor;)V", (void*)android_os_Parcel_clearFileDescriptor}, + + {"nativeCreate", "()J", (void*)android_os_Parcel_create}, + {"nativeFreeBuffer", "(J)J", (void*)android_os_Parcel_freeBuffer}, + {"nativeDestroy", "(J)V", (void*)android_os_Parcel_destroy}, + + {"nativeMarshall", "(J)[B", (void*)android_os_Parcel_marshall}, + {"nativeUnmarshall", "(J[BII)J", (void*)android_os_Parcel_unmarshall}, + {"nativeCompareData", "(JJ)I", (void*)android_os_Parcel_compareData}, + {"nativeAppendFrom", "(JJII)J", (void*)android_os_Parcel_appendFrom}, + // @CriticalNative + {"nativeHasFileDescriptors", "(J)Z", (void*)android_os_Parcel_hasFileDescriptors}, + {"nativeWriteInterfaceToken", "(JLjava/lang/String;)V", (void*)android_os_Parcel_writeInterfaceToken}, + {"nativeEnforceInterface", "(JLjava/lang/String;)V", (void*)android_os_Parcel_enforceInterface}, + + {"getGlobalAllocSize", "()J", (void*)android_os_Parcel_getGlobalAllocSize}, + {"getGlobalAllocCount", "()J", (void*)android_os_Parcel_getGlobalAllocCount}, + + // @CriticalNative + {"nativeGetBlobAshmemSize", "(J)J", (void*)android_os_Parcel_getBlobAshmemSize}, + }; + + ASSERT_EQ(sizeof(gParcelMethodsExpected)/sizeof(JNINativeMethod), + sizeof(gParcelMethods)/sizeof(JNINativeMethod)); + + + for (size_t i = 0; i < sizeof(gParcelMethods) / sizeof(JNINativeMethod); ++i) { + const JNINativeMethod& actual = gParcelMethods[i]; + const JNINativeMethod& expected = gParcelMethodsExpected[i]; + + EXPECT_STREQ(expected.name, actual.name); + EXPECT_STREQ(expected.signature, actual.signature) << expected.name; + EXPECT_EQ(expected.fnPtr, actual.fnPtr) << expected.name; + } +} + +TEST(JniSafeRegisterNativeMethods, JniMacros) { + JNINativeMethod tmp_native_method; // shadow variable check. + (void)tmp_native_method; + using Infer_t = int; // shadow using check. + Infer_t unused; + (void)unused; + + MAKE_JNI_CRITICAL_NATIVE_METHOD("v_lib", "(JIZ)V", TestJniMacros_v_lib); + MAKE_JNI_CRITICAL_NATIVE_METHOD_AUTOSIG("v_lib", TestJniMacros_v_lib); + CRITICAL_NATIVE_METHOD(TestJniMacros, v_lib, "(JIZ)V"); + OVERLOADED_CRITICAL_NATIVE_METHOD(TestJniMacros, v_lib, "(JIZ)V", v_lib_od); + CRITICAL_NATIVE_METHOD_AUTOSIG(TestJniMacros, v_lib); + + MAKE_JNI_FAST_NATIVE_METHOD("v_eolib", "(JIZ)V", TestJniMacros_v_eolib); + MAKE_JNI_FAST_NATIVE_METHOD_AUTOSIG("v_eolib", TestJniMacros_v_eolib); + FAST_NATIVE_METHOD(TestJniMacros, v_eolib, "(JIZ)V"); + OVERLOADED_FAST_NATIVE_METHOD(TestJniMacros, v_eolib, "(JIZ)V", v_eolib_od); + FAST_NATIVE_METHOD_AUTOSIG(TestJniMacros, v_eolib); + + MAKE_JNI_NATIVE_METHOD("v_eolib", "(JIZ)V", TestJniMacros_v_eolib); + MAKE_JNI_NATIVE_METHOD_AUTOSIG("v_eolib", TestJniMacros_v_eolib); + NATIVE_METHOD(TestJniMacros, v_eolib, "(JIZ)V"); + OVERLOADED_NATIVE_METHOD(TestJniMacros, v_eolib, "(JIZ)V", v_eolib_od); + NATIVE_METHOD_AUTOSIG(TestJniMacros, v_eolib); + + _NATIVEHELPER_JNI_MAKE_METHOD_OLD(kNormalNative, "v_eolib", "(JIZ)V", TestJniMacros_v_eolib); + tmp_native_method = + _NATIVEHELPER_JNI_MAKE_METHOD_OLD(kNormalNative, "v_eolib", "(JIZ)V", TestJniMacros_v_eolib); +} diff --git a/third_party/libnativehelper/tests/jni_gtest/Android.bp b/third_party/libnativehelper/tests/jni_gtest/Android.bp new file mode 100644 index 0000000000..d6a0a6fe77 --- /dev/null +++ b/third_party/libnativehelper/tests/jni_gtest/Android.bp @@ -0,0 +1,12 @@ +// Do not use directly. Use the defaults below. +cc_library_headers { + name: "jni_gtest_headers", + host_supported: true, + export_include_dirs: ["base"], +} + +cc_defaults { + name: "jni_gtest_defaults", + header_libs: ["jni_gtest_headers"], + shared_libs: ["libnativehelper"], +} diff --git a/third_party/libnativehelper/tests/jni_gtest/base/nativehelper/jni_gtest.h b/third_party/libnativehelper/tests/jni_gtest/base/nativehelper/jni_gtest.h new file mode 100644 index 0000000000..975d56caf3 --- /dev/null +++ b/third_party/libnativehelper/tests/jni_gtest/base/nativehelper/jni_gtest.h @@ -0,0 +1,126 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef LIBNATIVEHELPER_TESTS_JNI_GTEST_H +#define LIBNATIVEHELPER_TESTS_JNI_GTEST_H + +#include + +#include + +#include +#include + +namespace android { + +// Example test setup following googletest docs: +// +// template +// class TemplatedTest : public JNITestBase { +// ... +// } +// +// typedef ::testing::Types Providers; +// TYPED_TEST_CASE(TemplatedTest, Providers); +// +// TYPED_TEST() { +// // Test code. Use "this->" to access TemplatedTest members. +// } + + + +// Provider is a concept that must follow this structure: +// +// class JNIProvider { +// public: +// JNIProvider(); +// +// void SetUp(); +// JNIEnv* CreateJNIEnv(); +// +// void DestroyJNIEnv(JNIEnv* env); +// void TearDown(); +// } + +template +class JNITestBase : public Test { +protected: + JNITestBase() : provider_(), env_(nullptr), java_vm_(nullptr) { + } + + void SetUp() override { + Test::SetUp(); + provider_.SetUp(); + env_ = provider_.CreateJNIEnv(); + ASSERT_TRUE(env_ != nullptr); + } + + void TearDown() override { + provider_->DestroyJNIEnv(env_); + provider_->TearDown(); + Test::TearDown(); + } + +protected: + Provider provider_; + + JNIEnv* env_; + JavaVM* java_vm_; +}; + +// A mockable implementation of the Provider concept. It is the responsibility +// of the test to stub out any needed functions (all function pointers will be +// null initially). +// +// TODO: Consider googlemock. +class MockJNIProvider { +public: + MockJNIProvider() { + } + + void SetUp() { + // Nothing to here. + } + + // TODO: Spawn threads to allow more envs? + JNIEnv* CreateJNIEnv() { + return CreateMockedJNIEnv().release(); + } + + void DestroyJNIEnv(JNIEnv* env) { + delete env->functions; + delete env; + } + + void TearDown() { + // Nothing to do here. + } + +protected: + std::unique_ptr CreateMockedJNIEnv() { + JNINativeInterface* inf = new JNINativeInterface(); + memset(inf, 0, sizeof(JNINativeInterface)); + + std::unique_ptr ret(new JNIEnv{0}); + ret->functions = inf; + + return ret; + } +}; + +} // namespace android + +#endif // LIBNATIVEHELPER_TESTS_JNI_GTEST_H diff --git a/third_party/libnativehelper/tests/libnativehelper_api_test.c b/third_party/libnativehelper/tests/libnativehelper_api_test.c new file mode 100644 index 0000000000..fde635606b --- /dev/null +++ b/third_party/libnativehelper/tests/libnativehelper_api_test.c @@ -0,0 +1,26 @@ +/* + * Copyright (C) 2019 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// All header files with MODULE_API decorated function declarations. +#include "nativehelper/JNIHelp.h" +#include "nativehelper/JniInvocation.h" +#include "nativehelper/toStringArray.h" + +int main() { + // The test here is that the headers are properly guarded to support + // compilation with a C compiler. + return 0; +} diff --git a/third_party/libnativehelper/toStringArray.cpp b/third_party/libnativehelper/toStringArray.cpp new file mode 100644 index 0000000000..b1f0f42868 --- /dev/null +++ b/third_party/libnativehelper/toStringArray.cpp @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "JniConstants.h" + +namespace { + +struct ArrayCounter { + const char* const* strings; + explicit ArrayCounter(const char* const* strings) : strings(strings) {} + size_t operator()() { + size_t count = 0; + while (strings[count] != nullptr) { + ++count; + } + return count; + } +}; + +struct ArrayGetter { + const char* const* strings; + explicit ArrayGetter(const char* const* strings) : strings(strings) {} + const char* operator()(size_t i) { + return strings[i]; + } +}; + +} // namespace + +MODULE_API jobjectArray newStringArray(JNIEnv* env, size_t count) { + return env->NewObjectArray(count, JniConstants::GetStringClass(env), nullptr); +} + +MODULE_API jobjectArray toStringArray(JNIEnv* env, const char* const* strings) { + ArrayCounter counter(strings); + ArrayGetter getter(strings); + return toStringArray(env, &counter, &getter); +} -- Gitee From 8645b9e62895bb5f9b08d5f4519cc361605516e3 Mon Sep 17 00:00:00 2001 From: binaryfz Date: Mon, 30 Nov 2020 10:59:39 +0800 Subject: [PATCH 2/9] update mulanPSL V2 --- license/LICENSE | 248 +++++++++--------- ...Third_Party_Open_Source_Software_Notice.md | 92 +++++++ 2 files changed, 219 insertions(+), 121 deletions(-) create mode 100644 license/Third_Party_Open_Source_Software_Notice.md diff --git a/license/LICENSE b/license/LICENSE index 66d2cd5053..9e32cdef16 100644 --- a/license/LICENSE +++ b/license/LICENSE @@ -1,121 +1,127 @@ -ľÀ¼¿íËÉÐí¿ÉÖ¤£¬ µÚ1°æ - -2019Äê8Ô http://license.coscl.org.cn/MulanPSL - -Äú¶Ô¡°Èí¼þ¡±µÄ¸´ÖÆ¡¢Ê¹Óá¢Ð޸ļ°·Ö·¢ÊÜľÀ¼¿íËÉÐí¿ÉÖ¤£¬µÚ1°æ£¨¡°±¾Ðí¿ÉÖ¤¡±£©µÄÈçÏÂÌõ¿îµÄÔ¼Êø£º - -0. ¶¨Òå - -¡°Èí¼þ¡±ÊÇÖ¸ÓÉ¡°¹±Ïס±¹¹³ÉµÄÐí¿ÉÔÚ¡°±¾Ðí¿ÉÖ¤¡±ÏµijÌÐòºÍÏà¹ØÎĵµµÄ¼¯ºÏ¡£ - -¡°¹±Ï×Õß¡±ÊÇÖ¸½«ÊܰæÈ¨·¨±£»¤µÄ×÷Æ·Ðí¿ÉÔÚ¡°±¾Ðí¿ÉÖ¤¡±ÏµÄ×ÔÈ»ÈË»ò¡°·¨ÈËʵÌ塱¡£ - -¡°·¨ÈËʵÌ塱ÊÇÖ¸Ìá½»¹±Ï׵Ļú¹¹¼°Æä¡°¹ØÁªÊµÌ塱¡£ - -¡°¹ØÁªÊµÌ塱ÊÇÖ¸£¬¶Ô¡°±¾Ðí¿ÉÖ¤¡±ÏµÄÒ»·½¶øÑÔ£¬¿ØÖÆ¡¢ÊÜ¿ØÖÆ»òÓëÆä¹²Í¬ÊÜ¿ØÖƵĻú¹¹£¬´Ë´¦µÄ¿ØÖÆÊÇÖ¸ÓÐÊܿط½»ò¹²Í¬Êܿط½ÖÁÉÙ50%Ö±½Ó»ò¼ä½ÓµÄͶƱȨ¡¢×ʽð»òÆäËûÓмÛ֤ȯ¡£ - -¡°¹±Ïס±ÊÇÖ¸ÓÉÈÎÒ»¡°¹±Ï×Õß¡±Ðí¿ÉÔÚ¡°±¾Ðí¿ÉÖ¤¡±ÏµÄÊܰæÈ¨·¨±£»¤µÄ×÷Æ·¡£ - -1. ÊÚÓè°æÈ¨Ðí¿É - -ÿ¸ö¡°¹±Ï×Õß¡±¸ù¾Ý¡°±¾Ðí¿ÉÖ¤¡±ÊÚÓèÄúÓÀ¾ÃÐԵġ¢È«ÇòÐԵġ¢Ãâ·ÑµÄ¡¢·Ç¶ÀÕ¼µÄ¡¢²»¿É³·ÏúµÄ°æÈ¨Ðí¿É£¬Äú¿ÉÒÔ¸´ÖÆ¡¢Ê¹Óá¢Ð޸ġ¢·Ö·¢Æä¡°¹±Ïס±£¬²»ÂÛÐÞ¸ÄÓë·ñ¡£ - -2. ÊÚÓèרÀûÐí¿É - -ÿ¸ö¡°¹±Ï×Õß¡±¸ù¾Ý¡°±¾Ðí¿ÉÖ¤¡±ÊÚÓèÄúÓÀ¾ÃÐԵġ¢È«ÇòÐԵġ¢Ãâ·ÑµÄ¡¢·Ç¶ÀÕ¼µÄ¡¢²»¿É³·ÏúµÄ£¨¸ù¾Ý±¾Ìõ¹æ¶¨³·Ïú³ýÍ⣩רÀûÐí¿É£¬¹©ÄúÖÆÔ졢ίÍÐÖÆÔ졢ʹÓá¢ÐíŵÏúÊÛ¡¢ÏúÊÛ¡¢½ø¿ÚÆä¡°¹±Ïס±»òÒÔÆäËû·½Ê½×ªÒÆÆä¡°¹±Ïס±¡£Ç°ÊöרÀûÐí¿É½öÏÞÓÚ¡°¹±Ï×Õß¡±ÏÖÔÚ»ò½«À´ÓµÓлò¿ØÖÆµÄÆä¡°¹±Ïס±±¾Éí»òÆä¡°¹±Ïס±ÓëÐí¿É¡°¹±Ïס±Ê±µÄ¡°Èí¼þ¡±½áºÏ¶ø½«±ØÈ»»áÇÖ·¸µÄרÀûȨÀûÒªÇ󣬲»°üÀ¨½öÒòÄú»òËûÈËÐ޸ġ°¹±Ïס±»òÆäËû½áºÏ¶ø½«±ØÈ»»áÇÖ·¸µ½µÄרÀûȨÀûÒªÇó¡£ÈçÄú»òÄúµÄ¡°¹ØÁªÊµÌ塱ֱ½Ó»ò¼ä½ÓµØ£¨°üÀ¨Í¨¹ý´úÀí¡¢×¨Àû±»Ðí¿ÉÈË»òÊÜÈÃÈË£©£¬¾Í¡°Èí¼þ¡±»òÆäÖеġ°¹±Ïס±¶ÔÈκÎÈË·¢ÆðרÀûÇÖȨËßËÏ£¨°üÀ¨·´Ëß»ò½»²æËßËÏ£©»òÆäËûרÀûάȨÐж¯£¬Ö¸¿ØÆäÇÖ·¸×¨ÀûȨ£¬Ôò¡°±¾Ðí¿ÉÖ¤¡±ÊÚÓèÄú¶Ô¡°Èí¼þ¡±µÄרÀûÐí¿É×ÔÄúÌáÆðËßËÏ»ò·¢ÆðάȨÐж¯Ö®ÈÕÖÕÖ¹¡£ - -3. ÎÞÉ̱êÐí¿É - -¡°±¾Ðí¿ÉÖ¤¡±²»Ìṩ¶Ô¡°¹±Ï×Õß¡±µÄÉÌÆ·Ãû³Æ¡¢É̱ꡢ·þÎñ±êÖ¾»ò²úÆ·Ãû³ÆµÄÉ̱êÐí¿É£¬µ«ÄúΪÂú×ãµÚ4Ìõ¹æ¶¨µÄÉùÃ÷ÒåÎñ¶ø±ØÐëʹÓóýÍâ¡£ - -4. ·Ö·¢ÏÞÖÆ - -Äú¿ÉÒÔÔÚÈκÎý½éÖн«¡°Èí¼þ¡±ÒÔÔ´³ÌÐòÐÎʽ»ò¿ÉÖ´ÐÐÐÎÊ½ÖØÐ·ַ¢£¬²»ÂÛÐÞ¸ÄÓë·ñ£¬µ«Äú±ØÐëÏò½ÓÊÕÕßÌṩ¡°±¾Ðí¿ÉÖ¤¡±µÄ¸±±¾£¬²¢±£Áô¡°Èí¼þ¡±ÖеİæÈ¨¡¢É̱ꡢרÀû¼°ÃâÔðÉùÃ÷¡£ - -5. ÃâÔðÉùÃ÷ÓëÔðÈÎÏÞÖÆ - -¡°Èí¼þ¡±¼°ÆäÖеġ°¹±Ïס±ÔÚÌṩʱ²»´øÈκÎÃ÷ʾ»òĬʾµÄµ£±£¡£ÔÚÈκÎÇé¿öÏ£¬¡°¹±Ï×Õß¡±»ò°æÈ¨ËùÓÐÕß²»¶ÔÈκÎÈËÒòʹÓá°Èí¼þ¡±»òÆäÖеġ°¹±Ïס±¶øÒý·¢µÄÈκÎÖ±½Ó»ò¼ä½ÓËðʧ³Ðµ£ÔðÈΣ¬²»ÂÛÒòºÎÖÖÔ­Òòµ¼Ö»òÕß»ùÓÚºÎÖÖ·¨ÂÉÀíÂÛ,¼´Ê¹ÆäÔø±»½¨ÒéÓдËÖÖËðʧµÄ¿ÉÄÜÐÔ¡£ - -Ìõ¿î½áÊø - -ÈçºÎ½«Ä¾À¼¿íËÉÐí¿ÉÖ¤£¬µÚ1°æ£¬Ó¦Óõ½ÄúµÄÈí¼þ - -Èç¹ûÄúÏ£Íû½«Ä¾À¼¿íËÉÐí¿ÉÖ¤£¬µÚ1°æ£¬Ó¦Óõ½ÄúµÄÐÂÈí¼þ£¬ÎªÁË·½±ã½ÓÊÕÕß²éÔÄ£¬½¨ÒéÄúÍê³ÉÈçÏÂÈý²½£º - -1£¬ ÇëÄú²¹³äÈçÏÂÉùÃ÷ÖеĿհף¬°üÀ¨Èí¼þÃû¡¢Èí¼þµÄÊ״η¢±íÄê·ÝÒÔ¼°Äú×÷Ϊ°æÈ¨È˵ÄÃû×Ö£» - -2£¬ ÇëÄúÔÚÈí¼þ°üµÄÒ»¼¶Ä¿Â¼Ï´´½¨ÒÔ¡°LICENSE¡±ÎªÃûµÄÎļþ£¬½«Õû¸öÐí¿ÉÖ¤Îı¾·ÅÈë¸ÃÎļþÖУ» - -3£¬ Ç뽫ÈçÏÂÉùÃ÷Îı¾·ÅÈëÿ¸öÔ´ÎļþµÄÍ·²¿×¢ÊÍÖС£ - -Copyright (c) [2019] [name of copyright holder] -[Software Name] is licensed under the Mulan PSL v1. -You can use this software according to the terms and conditions of the Mulan PSL v1. -You may obtain a copy of Mulan PSL v1 at: - http://license.coscl.org.cn/MulanPSL -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR -PURPOSE. -See the Mulan PSL v1 for more details. - - - -Mulan Permissive Software License£¬Version 1 - -Mulan Permissive Software License£¬Version 1 (Mulan PSL v1) - -August 2019 http://license.coscl.org.cn/MulanPSL - -Your reproduction, use, modification and distribution of the Software shall be subject to Mulan PSL v1 (this License) with following terms and conditions: - -0. Definition - -Software means the program and related documents which are comprised of those Contribution and licensed under this License. - -Contributor means the Individual or Legal Entity who licenses its copyrightable work under this License. - -Legal Entity means the entity making a Contribution and all its Affiliates. - -Affiliates means entities that control, or are controlled by, or are under common control with a party to this License, ¡®control¡¯ means direct or indirect ownership of at least fifty percent (50%) of the voting power, capital or other securities of controlled or commonly controlled entity. - -Contribution means the copyrightable work licensed by a particular Contributor under this License. - -1. Grant of Copyright License - -Subject to the terms and conditions of this License, each Contributor hereby grants to you a perpetual, worldwide, royalty-free, non-exclusive, irrevocable copyright license to reproduce, use, modify, or distribute its Contribution, with modification or not. - -2. Grant of Patent License - -Subject to the terms and conditions of this License, each Contributor hereby grants to you a perpetual, worldwide, royalty-free, non-exclusive, irrevocable (except for revocation under this Section) patent license to make, have made, use, offer for sale, sell, import or otherwise transfer its Contribution where such patent license is only limited to the patent claims owned or controlled by such Contributor now or in future which will be necessarily infringed by its Contribution alone, or by combination of the Contribution with the Software to which the Contribution was contributed, excluding of any patent claims solely be infringed by your or others¡¯ modification or other combinations. If you or your Affiliates directly or indirectly (including through an agent, patent licensee or assignee£©, institute patent litigation (including a cross claim or counterclaim in a litigation) or other patent enforcement activities against any individual or entity by alleging that the Software or any Contribution in it infringes patents, then any patent license granted to you under this License for the Software shall terminate as of the date such litigation or activity is filed or taken. - -3. No Trademark License - -No trademark license is granted to use the trade names, trademarks, service marks, or product names of Contributor, except as required to fulfill notice requirements in section 4. - -4. Distribution Restriction - -You may distribute the Software in any medium with or without modification, whether in source or executable forms, provided that you provide recipients with a copy of this License and retain copyright, patent, trademark and disclaimer statements in the Software. - -5. Disclaimer of Warranty and Limitation of Liability - -The Software and Contribution in it are provided without warranties of any kind, either express or implied. In no event shall any Contributor or copyright holder be liable to you for any damages, including, but not limited to any direct, or indirect, special or consequential damages arising from your use or inability to use the Software or the Contribution in it, no matter how it¡¯s caused or based on which legal theory, even if advised of the possibility of such damages. - -End of the Terms and Conditions - -How to apply the Mulan Permissive Software License£¬Version 1 (Mulan PSL v1) to your software - -To apply the Mulan PSL v1 to your work, for easy identification by recipients, you are suggested to complete following three steps: - -1. Fill in the blanks in following statement, including insert your software name, the year of the first publication of your software, and your name identified as the copyright owner; -2. Create a file named ¡°LICENSE¡± which contains the whole context of this License in the first directory of your software package; -3. Attach the statement to the appropriate annotated syntax at the beginning of each source file. - -Copyright (c) [2019] [name of copyright holder] -[Software Name] is licensed under the Mulan PSL v1. -You can use this software according to the terms and conditions of the Mulan PSL v1. -You may obtain a copy of Mulan PSL v1 at: - http://license.coscl.org.cn/MulanPSL -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR -PURPOSE. -See the Mulan PSL v1 for more details. + 木兰宽æ¾è®¸å¯è¯, 第2版 + + 木兰宽æ¾è®¸å¯è¯ï¼Œ 第2版 + 2020å¹´1月 http://license.coscl.org.cn/MulanPSL2 + + + 您对“软件â€çš„å¤åˆ¶ã€ä½¿ç”¨ã€ä¿®æ”¹åŠåˆ†å‘å—æœ¨å…°å®½æ¾è®¸å¯è¯ï¼Œç¬¬2版(“本许å¯è¯â€ï¼‰çš„å¦‚ä¸‹æ¡æ¬¾çš„约æŸï¼š + + 0. 定义 + + â€œè½¯ä»¶â€æ˜¯æŒ‡ç”±â€œè´¡çŒ®â€æž„æˆçš„许å¯åœ¨â€œæœ¬è®¸å¯è¯â€ä¸‹çš„程åºå’Œç›¸å…³æ–‡æ¡£çš„集åˆã€‚ + + â€œè´¡çŒ®â€æ˜¯æŒ‡ç”±ä»»ä¸€â€œè´¡çŒ®è€…â€è®¸å¯åœ¨â€œæœ¬è®¸å¯è¯â€ä¸‹çš„å—ç‰ˆæƒæ³•ä¿æŠ¤çš„ä½œå“。 + + â€œè´¡çŒ®è€…â€æ˜¯æŒ‡å°†å—ç‰ˆæƒæ³•ä¿æŠ¤çš„ä½œå“许å¯åœ¨â€œæœ¬è®¸å¯è¯â€ä¸‹çš„自然人或“法人实体â€ã€‚ + + â€œæ³•äººå®žä½“â€æ˜¯æŒ‡æäº¤è´¡çŒ®çš„æœºæž„åŠå…¶â€œå…³è”实体â€ã€‚ + + “关è”å®žä½“â€æ˜¯æŒ‡ï¼Œå¯¹â€œæœ¬è®¸å¯è¯â€ä¸‹çš„行为方而言,控制ã€å—控制或与其共åŒå—æŽ§åˆ¶çš„æœºæž„ï¼Œæ­¤å¤„çš„æŽ§åˆ¶æ˜¯æŒ‡æœ‰å—æŽ§æ–¹æˆ–å…±åŒå—控方至少50%直接或间接的投票æƒã€èµ„金或其他有价è¯åˆ¸ã€‚ + + 1. 授予版æƒè®¸å¯ + + æ¯ä¸ªâ€œè´¡çŒ®è€…â€æ ¹æ®â€œæœ¬è®¸å¯è¯â€æŽˆäºˆæ‚¨æ°¸ä¹…性的ã€å…¨çƒæ€§çš„ã€å…费的ã€éžç‹¬å çš„ã€ä¸å¯æ’¤é”€çš„版æƒè®¸å¯ï¼Œæ‚¨å¯ä»¥å¤åˆ¶ã€ä½¿ç”¨ã€ä¿®æ”¹ã€åˆ†å‘其“贡献â€ï¼Œä¸è®ºä¿®æ”¹ä¸Žå¦ã€‚ + + 2. æŽˆäºˆä¸“åˆ©è®¸å¯ + + æ¯ä¸ªâ€œè´¡çŒ®è€…â€æ ¹æ®â€œæœ¬è®¸å¯è¯â€æŽˆäºˆæ‚¨æ°¸ä¹…性的ã€å…¨çƒæ€§çš„ã€å…费的ã€éžç‹¬å çš„ã€ä¸å¯æ’¤é”€çš„ï¼ˆæ ¹æ®æœ¬æ¡è§„定撤销除外)专利许å¯ï¼Œä¾›æ‚¨åˆ¶é€ ã€å§”托制造ã€ä½¿ç”¨ã€è®¸è¯ºé”€å”®ã€é”€å”®ã€è¿›å£å…¶â€œè´¡çŒ®â€æˆ–以其他方å¼è½¬ç§»å…¶â€œè´¡çŒ®â€ã€‚å‰è¿°ä¸“利许å¯ä»…é™äºŽâ€œè´¡çŒ®è€…â€çŽ°åœ¨æˆ–å°†æ¥æ‹¥æœ‰æˆ–æŽ§åˆ¶çš„å…¶â€œè´¡çŒ®â€æœ¬èº«æˆ–其“贡献â€ä¸Žè®¸å¯â€œè´¡çŒ®â€æ—¶çš„“软件â€ç»“åˆè€Œå°†å¿…然会侵犯的专利æƒåˆ©è¦æ±‚,ä¸åŒ…括对“贡献â€çš„修改或包å«â€œè´¡çŒ®â€çš„其他结åˆã€‚如果您或您的“关è”实体â€ç›´æŽ¥æˆ–é—´æŽ¥åœ°ï¼Œå°±â€œè½¯ä»¶â€æˆ–其中的“贡献â€å¯¹ä»»ä½•人å‘起专利侵æƒè¯‰è®¼ï¼ˆåŒ…括å诉或交å‰è¯‰è®¼ï¼‰æˆ–其他专利维æƒè¡ŒåŠ¨ï¼ŒæŒ‡æŽ§å…¶ä¾µçŠ¯ä¸“åˆ©æƒï¼Œåˆ™â€œæœ¬è®¸å¯è¯â€æŽˆäºˆæ‚¨å¯¹â€œè½¯ä»¶â€çš„专利许å¯è‡ªæ‚¨æèµ·è¯‰è®¼æˆ–å‘èµ·ç»´æƒè¡ŒåŠ¨ä¹‹æ—¥ç»ˆæ­¢ã€‚ + + 3. æ— å•†æ ‡è®¸å¯ + + “本许å¯è¯â€ä¸æä¾›å¯¹â€œè´¡çŒ®è€…â€çš„商å“åç§°ã€å•†æ ‡ã€æœåŠ¡æ ‡å¿—æˆ–äº§å“å称的商标许å¯ï¼Œä½†æ‚¨ä¸ºæ»¡è¶³ç¬¬4æ¡è§„定的声明义务而必须使用除外。 + + 4. 分å‘é™åˆ¶ + + 您å¯ä»¥åœ¨ä»»ä½•媒介中将“软件â€ä»¥æºç¨‹åºå½¢å¼æˆ–坿‰§è¡Œå½¢å¼é‡æ–°åˆ†å‘,ä¸è®ºä¿®æ”¹ä¸Žå¦ï¼Œä½†æ‚¨å¿…é¡»å‘æŽ¥æ”¶è€…æä¾›â€œæœ¬è®¸å¯è¯â€çš„副本,并ä¿ç•™â€œè½¯ä»¶â€ä¸­çš„版æƒã€å•†æ ‡ã€ä¸“利åŠå…责声明。 + + 5. å…责声明与责任é™åˆ¶ + + “软件â€åŠå…¶ä¸­çš„“贡献â€åœ¨æä¾›æ—¶ä¸å¸¦ä»»ä½•明示或默示的担ä¿ã€‚åœ¨ä»»ä½•æƒ…å†µä¸‹ï¼Œâ€œè´¡çŒ®è€…â€æˆ–ç‰ˆæƒæ‰€æœ‰è€…ä¸å¯¹ä»»ä½•äººå› ä½¿ç”¨â€œè½¯ä»¶â€æˆ–其中的“贡献â€è€Œå¼•å‘的任何直接或间接æŸå¤±æ‰¿æ‹…责任,ä¸è®ºå› ä½•ç§åŽŸå› å¯¼è‡´æˆ–è€…åŸºäºŽä½•ç§æ³•律ç†è®ºï¼Œå³ä½¿å…¶æ›¾è¢«å»ºè®®æœ‰æ­¤ç§æŸå¤±çš„å¯èƒ½æ€§ã€‚ + + 6. 语言 + “本许å¯è¯â€ä»¥ä¸­è‹±æ–‡åŒè¯­è¡¨è¿°ï¼Œä¸­è‹±æ–‡ç‰ˆæœ¬å…·æœ‰åŒç­‰æ³•律效力。如果中英文版本存在任何冲çªä¸ä¸€è‡´ï¼Œä»¥ä¸­æ–‡ç‰ˆä¸ºå‡†ã€‚ + + æ¡æ¬¾ç»“æŸ + + 如何将木兰宽æ¾è®¸å¯è¯ï¼Œç¬¬2版,应用到您的软件 + + 如果您希望将木兰宽æ¾è®¸å¯è¯ï¼Œç¬¬2版,应用到您的新软件,为了方便接收者查阅,建议您完æˆå¦‚下三步: + + 1, 请您补充如下声明中的空白,包括软件åã€è½¯ä»¶çš„首次å‘è¡¨å¹´ä»½ä»¥åŠæ‚¨ä½œä¸ºç‰ˆæƒäººçš„åå­—ï¼› + + 2, 请您在软件包的一级目录下创建以“LICENSEâ€ä¸ºå的文件,将整个许å¯è¯æ–‡æœ¬æ”¾å…¥è¯¥æ–‡ä»¶ä¸­ï¼› + + 3, 请将如下声明文本放入æ¯ä¸ªæºæ–‡ä»¶çš„头部注释中。 + + Copyright (c) [Year] [name of copyright holder] + [Software Name] is licensed under Mulan PSL v2. + You can use this software according to the terms and conditions of the Mulan PSL v2. + You may obtain a copy of Mulan PSL v2 at: + http://license.coscl.org.cn/MulanPSL2 + THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + See the Mulan PSL v2 for more details. + + + Mulan Permissive Software License,Version 2 + + Mulan Permissive Software License,Version 2 (Mulan PSL v2) + January 2020 http://license.coscl.org.cn/MulanPSL2 + + Your reproduction, use, modification and distribution of the Software shall be subject to Mulan PSL v2 (this License) with the following terms and conditions: + + 0. Definition + + Software means the program and related documents which are licensed under this License and comprise all Contribution(s). + + Contribution means the copyrightable work licensed by a particular Contributor under this License. + + Contributor means the Individual or Legal Entity who licenses its copyrightable work under this License. + + Legal Entity means the entity making a Contribution and all its Affiliates. + + Affiliates means entities that control, are controlled by, or are under common control with the acting entity under this License, ‘control’ means direct or indirect ownership of at least fifty percent (50%) of the voting power, capital or other securities of controlled or commonly controlled entity. + + 1. Grant of Copyright License + + Subject to the terms and conditions of this License, each Contributor hereby grants to you a perpetual, worldwide, royalty-free, non-exclusive, irrevocable copyright license to reproduce, use, modify, or distribute its Contribution, with modification or not. + + 2. Grant of Patent License + + Subject to the terms and conditions of this License, each Contributor hereby grants to you a perpetual, worldwide, royalty-free, non-exclusive, irrevocable (except for revocation under this Section) patent license to make, have made, use, offer for sale, sell, import or otherwise transfer its Contribution, where such patent license is only limited to the patent claims owned or controlled by such Contributor now or in future which will be necessarily infringed by its Contribution alone, or by combination of the Contribution with the Software to which the Contribution was contributed. The patent license shall not apply to any modification of the Contribution, and any other combination which includes the Contribution. If you or your Affiliates directly or indirectly institute patent litigation (including a cross claim or counterclaim in a litigation) or other patent enforcement activities against any individual or entity by alleging that the Software or any Contribution in it infringes patents, then any patent license granted to you under this License for the Software shall terminate as of the date such litigation or activity is filed or taken. + + 3. No Trademark License + + No trademark license is granted to use the trade names, trademarks, service marks, or product names of Contributor, except as required to fulfill notice requirements in Section 4. + + 4. Distribution Restriction + + You may distribute the Software in any medium with or without modification, whether in source or executable forms, provided that you provide recipients with a copy of this License and retain copyright, patent, trademark and disclaimer statements in the Software. + + 5. Disclaimer of Warranty and Limitation of Liability + + THE SOFTWARE AND CONTRIBUTION IN IT ARE PROVIDED WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED. IN NO EVENT SHALL ANY CONTRIBUTOR OR COPYRIGHT HOLDER BE LIABLE TO YOU FOR ANY DAMAGES, INCLUDING, BUT NOT LIMITED TO ANY DIRECT, OR INDIRECT, SPECIAL OR CONSEQUENTIAL DAMAGES ARISING FROM YOUR USE OR INABILITY TO USE THE SOFTWARE OR THE CONTRIBUTION IN IT, NO MATTER HOW IT’S CAUSED OR BASED ON WHICH LEGAL THEORY, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + + 6. Language + + THIS LICENSE IS WRITTEN IN BOTH CHINESE AND ENGLISH, AND THE CHINESE VERSION AND ENGLISH VERSION SHALL HAVE THE SAME LEGAL EFFECT. IN THE CASE OF DIVERGENCE BETWEEN THE CHINESE AND ENGLISH VERSIONS, THE CHINESE VERSION SHALL PREVAIL. + + END OF THE TERMS AND CONDITIONS + + How to Apply the Mulan Permissive Software License,Version 2 (Mulan PSL v2) to Your Software + + To apply the Mulan PSL v2 to your work, for easy identification by recipients, you are suggested to complete following three steps: + + i Fill in the blanks in following statement, including insert your software name, the year of the first publication of your software, and your name identified as the copyright owner; + + ii Create a file named “LICENSE†which contains the whole context of this License in the first directory of your software package; + + iii Attach the statement to the appropriate annotated syntax at the beginning of each source file. + + + Copyright (c) [Year] [name of copyright holder] + [Software Name] is licensed under Mulan PSL v2. + You can use this software according to the terms and conditions of the Mulan PSL v2. + You may obtain a copy of Mulan PSL v2 at: + http://license.coscl.org.cn/MulanPSL2 + THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + See the Mulan PSL v2 for more details. diff --git a/license/Third_Party_Open_Source_Software_Notice.md b/license/Third_Party_Open_Source_Software_Notice.md new file mode 100644 index 0000000000..86d5d02017 --- /dev/null +++ b/license/Third_Party_Open_Source_Software_Notice.md @@ -0,0 +1,92 @@ +**OPEN SOURCE SOFTWARE NOTICE** + +Please note we provide an open source software notice along with this product and/or this product firmware (in the following just “this productâ€). The open source software licenses are granted by the respective right holders. And the open source licenses prevail all other license information with regard to the respective open source software contained in the product, including but not limited to End User Software Licensing Agreement. This notice is provided on behalf of Huawei Technologies Co. Ltd. and any of its local subsidiaries which may have provided this product to you in your local country. + +**Warranty Disclaimer ** + +**THE OPEN SOURCE SOFTWARE IN THIS PRODUCT IS DISTRIBUTED IN THE HOPE THAT IT WILL BE USEFUL, BUT WITHOUT ANY WARRANTY, WITHOUT EVEN THE IMPLIED WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. SEE THE APPLICABLE LICENSES FOR MORE DETAILS.** + +**Copyright Notice and License Texts ** + +**Software:** Android 10.0.0_r2 + +**Copyright notice:** + +Copyright (c) 2005-2020, The Android Open Source Project + +**License:** Apache License V2.0 + +
Apache License
+ +
Version 2.0, January 2004
+ +
http://www.apache.org/licenses/
+ +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1\. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. + +"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work(an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. + +2\. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. + + +3\. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. + +4\. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: + +(a) You must give any other recipients of the Work or Derivative Works a copy of this License; and + +(b) You must cause any modified files to carry prominent notices stating that You changed the files; and + +(c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and + +(d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. + +You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. + +5\. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. + +6\. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. + +7\. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. + +8\. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. + +9\. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + +To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); + +you may not use this file except in compliance with the License. + +You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +THIS OFFER IS VALID FOR THREE YEARS FROM THE MOMENT WE DISTRIBUTED THE PRODUCT OR FIRMWARE. \ No newline at end of file -- Gitee From 5674f6087a2dbe158b6d9f2ecff869af63ab9aa9 Mon Sep 17 00:00:00 2001 From: binaryfz Date: Mon, 30 Nov 2020 14:06:18 +0800 Subject: [PATCH 3/9] add compiler-rt --- src/mrt/compiler-rt/BUILD.gn | 144 ++ src/mrt/compiler-rt/include/address.h | 66 + src/mrt/compiler-rt/include/allocator.h | 588 ++++++ .../include/allocator/alloc_callbacks.h | 112 ++ .../include/allocator/alloc_config.h | 205 ++ .../include/allocator/alloc_utils.h | 86 + src/mrt/compiler-rt/include/allocator/box.h | 200 ++ .../include/allocator/bp_allocator.h | 126 ++ .../include/allocator/bp_allocator_inlined.h | 63 + .../include/allocator/cartesian_tree.h | 484 +++++ src/mrt/compiler-rt/include/allocator/deque.h | 228 +++ .../compiler-rt/include/allocator/mem_map.h | 122 ++ .../include/allocator/page_allocator.h | 423 ++++ .../compiler-rt/include/allocator/page_map.h | 421 ++++ .../compiler-rt/include/allocator/page_pool.h | 269 +++ .../include/allocator/ros_alloc_run.h | 467 +++++ .../include/allocator/ros_allocator.h | 629 ++++++ .../include/allocator/ros_allocator_inlined.h | 490 +++++ src/mrt/compiler-rt/include/allocator/space.h | 282 +++ src/mrt/compiler-rt/include/chelper.h | 270 +++ src/mrt/compiler-rt/include/chosen.h | 119 ++ src/mrt/compiler-rt/include/cinterface.h | 215 ++ src/mrt/compiler-rt/include/collector/arena.h | 251 +++ .../compiler-rt/include/collector/collector.h | 559 ++++++ .../include/collector/collector_ms.h | 307 +++ .../include/collector/collector_naiverc.h | 237 +++ .../include/collector/collector_naiverc_ms.h | 294 +++ .../include/collector/collector_platform.h | 26 + .../include/collector/collector_rc.h | 58 + .../include/collector/collector_tracing.h | 419 ++++ .../compiler-rt/include/collector/conn_comp.h | 141 ++ .../include/collector/cp_generator.h | 589 ++++++ .../include/collector/cycle_collector.h | 149 ++ .../collector/gc_reference_processor.h | 55 + .../include/collector/mpl_thread_pool.h | 207 ++ .../include/collector/mrt_bitmap.h | 138 ++ .../compiler-rt/include/collector/native_gc.h | 149 ++ .../compiler-rt/include/collector/rc_inline.h | 329 +++ .../collector/rc_reference_processor.h | 107 + .../compiler-rt/include/collector/rp_base.h | 261 +++ .../include/collector/satb_buffer.h | 205 ++ src/mrt/compiler-rt/include/collector/stats.h | 138 ++ .../include/collector/task_queue.h | 225 +++ src/mrt/compiler-rt/include/collie.h | 240 +++ src/mrt/compiler-rt/include/cpphelper.h | 232 +++ src/mrt/compiler-rt/include/deps.h | 47 + src/mrt/compiler-rt/include/errno_utils.h | 23 + .../include/exception/eh_personality.h | 276 +++ .../include/exception/exception_handling.h | 119 ++ .../include/exception/mpl_exception.h | 136 ++ .../include/exception/mrt_exception.h | 112 ++ .../include/exception/stack_unwinder.h | 627 ++++++ .../compiler-rt/include/fast_alloc_inline.h | 150 ++ src/mrt/compiler-rt/include/file_layout.h | 36 + src/mrt/compiler-rt/include/gc_log.h | 130 ++ src/mrt/compiler-rt/include/heap_stats.h | 103 + src/mrt/compiler-rt/include/imported.h | 58 + .../compiler-rt/include/java_primitive_ops.h | 55 + src/mrt/compiler-rt/include/libs.h | 79 + src/mrt/compiler-rt/include/linker/linker.h | 91 + .../compiler-rt/include/linker/linker_cache.h | 172 ++ .../include/linker/linker_common.h | 571 ++++++ .../include/linker/linker_compiler.h | 274 +++ .../compiler-rt/include/linker/linker_debug.h | 42 + .../compiler-rt/include/linker/linker_gctib.h | 27 + .../include/linker/linker_hotfix.h | 41 + .../compiler-rt/include/linker/linker_info.h | 404 ++++ .../include/linker/linker_inline.h | 264 +++ .../include/linker/linker_lazy_binding.h | 111 ++ .../include/linker/linker_method_builder.h | 130 ++ .../compiler-rt/include/linker/linker_model.h | 274 +++ .../compiler-rt/include/linker/linker_utils.h | 93 + .../compiler-rt/include/loader/hash_pool.h | 108 + .../compiler-rt/include/loader/loader_utils.h | 32 + .../include/loader/object_loader.h | 102 + .../include/loader/object_locator.h | 109 + src/mrt/compiler-rt/include/metadata_inline.h | 414 ++++ src/mrt/compiler-rt/include/metadata_layout.h | 352 ++++ src/mrt/compiler-rt/include/mm_config.h | 286 +++ src/mrt/compiler-rt/include/mm_utils.h | 51 + src/mrt/compiler-rt/include/mrt_common.h | 26 + src/mrt/compiler-rt/include/muid.h | 132 ++ src/mrt/compiler-rt/include/mutator_list.h | 196 ++ src/mrt/compiler-rt/include/namemangler.h | 210 ++ src/mrt/compiler-rt/include/panic.h | 92 + src/mrt/compiler-rt/include/profile.h | 36 + src/mrt/compiler-rt/include/profile_type.h | 136 ++ src/mrt/compiler-rt/include/saferegion.h | 247 +++ src/mrt/compiler-rt/include/sizes.h | 738 +++++++ src/mrt/compiler-rt/include/version.h | 24 + src/mrt/compiler-rt/include/yieldpoint.h | 94 + .../compiler-rt/public-headers/file_adapter.h | 210 ++ .../compiler-rt/public-headers/file_loader.h | 29 + .../compiler-rt/public-headers/gc_callback.h | 44 + .../compiler-rt/public-headers/gc_reason.h | 107 + .../compiler-rt/public-headers/linker_api.h | 159 ++ .../compiler-rt/public-headers/loader_api.h | 295 +++ .../compiler-rt/public-headers/object_type.h | 88 + src/mrt/compiler-rt/public-headers/tracer.h | 38 + src/mrt/compiler-rt/src/allocator.cpp | 85 + .../src/allocator/bp_allocator.cpp | 209 ++ src/mrt/compiler-rt/src/allocator/mem_map.cpp | 224 +++ .../compiler-rt/src/allocator/page_map.cpp | 131 ++ .../src/allocator/ros_allocator.cpp | 1556 +++++++++++++++ src/mrt/compiler-rt/src/allocator/space.cpp | 292 +++ .../arch/arm64/cc_native_method_stub_arm64.S | 437 ++++ .../src/arch/arm64/dump_register_stub_arm64.S | 61 + .../compiler-rt/src/arch/arm64/fastFuncs.S | 1372 +++++++++++++ .../src/arch/arm64/i2r_stub_arm64.S | 0 .../arm64/interp_native_method_stub_arm64.S | 0 src/mrt/compiler-rt/src/arch/arm64/memset.S | 114 ++ ...prepare_args_for_exception_catcher_arm64.S | 38 + .../src/arch/arm64/proxy_stub_arm64.S | 853 ++++++++ .../src/arch/arm64/r2c_stub_arm64.S | 412 ++++ .../src/arch/arm64/signal_handler_arm64.cpp | 597 ++++++ .../arch/arm64/signal_handler_stub_arm64.S | 167 ++ .../src/arch/arm64/yieldpoint_arm64.S | 146 ++ src/mrt/compiler-rt/src/chelper.cpp | 550 +++++ src/mrt/compiler-rt/src/chosen.cpp | 26 + src/mrt/compiler-rt/src/cinterface.cpp | 1665 ++++++++++++++++ src/mrt/compiler-rt/src/collector/arena.cpp | 45 + .../compiler-rt/src/collector/collector.cpp | 211 ++ .../src/collector/collector_ms.cpp | 1762 +++++++++++++++++ .../src/collector/collector_naiverc.cpp | 1168 +++++++++++ .../src/collector/collector_naiverc_ms.cpp | 892 +++++++++ .../src/collector/collector_rc.cpp | 73 + .../src/collector/collector_tracing.cpp | 1100 ++++++++++ .../compiler-rt/src/collector/conn_comp.cpp | 267 +++ .../src/collector/cp_generator.cpp | 1228 ++++++++++++ .../src/collector/cycle_collector.cpp | 636 ++++++ .../compiler-rt/src/collector/gc_reason.cpp | 121 ++ .../src/collector/gc_reference_processor.cpp | 245 +++ .../src/collector/mpl_thread_pool.cpp | 296 +++ .../compiler-rt/src/collector/mrt_bitmap.cpp | 103 + .../compiler-rt/src/collector/native_gc.cpp | 172 ++ .../src/collector/rc_reference_processor.cpp | 1224 ++++++++++++ src/mrt/compiler-rt/src/collector/rp_base.cpp | 446 +++++ src/mrt/compiler-rt/src/collector/stats.cpp | 250 +++ src/mrt/compiler-rt/src/collie.cpp | 450 +++++ src/mrt/compiler-rt/src/errno_utils.cpp | 89 + .../src/exception/eh_personality.cpp | 516 +++++ .../src/exception/exception_handling.cpp | 187 ++ .../src/exception/mpl_exception.cpp | 220 ++ .../src/exception/mrt_exception.cpp | 615 ++++++ .../src/exception/stack_unwinder.cpp | 1106 +++++++++++ src/mrt/compiler-rt/src/file_layout.cpp | 39 + src/mrt/compiler-rt/src/gc_log.cpp | 247 +++ src/mrt/compiler-rt/src/heap_stats.cpp | 101 + src/mrt/compiler-rt/src/libs.cpp | 537 +++++ src/mrt/compiler-rt/src/libs_fast.cpp | 116 ++ src/mrt/compiler-rt/src/linker/linker.cpp | 979 +++++++++ src/mrt/compiler-rt/src/linker/linker_api.cpp | 974 +++++++++ .../compiler-rt/src/linker/linker_cache.cpp | 1289 ++++++++++++ .../compiler-rt/src/linker/linker_debug.cpp | 428 ++++ .../compiler-rt/src/linker/linker_gctib.cpp | 150 ++ .../compiler-rt/src/linker/linker_hotfix.cpp | 80 + .../compiler-rt/src/linker/linker_info.cpp | 37 + .../src/linker/linker_lazy_binding.cpp | 1536 ++++++++++++++ .../src/linker/linker_method_builder.cpp | 791 ++++++++ .../compiler-rt/src/linker/linker_model.cpp | 659 ++++++ .../compiler-rt/src/linker/linker_utils.cpp | 375 ++++ .../compiler-rt/src/loader/file_adapter.cpp | 202 ++ src/mrt/compiler-rt/src/loader/hash_pool.cpp | 297 +++ .../compiler-rt/src/loader/loader_utils.cpp | 203 ++ .../compiler-rt/src/loader/object_loader.cpp | 503 +++++ .../compiler-rt/src/loader/object_locator.cpp | 420 ++++ src/mrt/compiler-rt/src/mangle_for_jni.cpp | 137 ++ src/mrt/compiler-rt/src/mm_config.cpp | 32 + src/mrt/compiler-rt/src/mm_utils.cpp | 257 +++ src/mrt/compiler-rt/src/muid.cpp | 305 +++ src/mrt/compiler-rt/src/mutator_list.cpp | 27 + src/mrt/compiler-rt/src/namemangler.cpp | 626 ++++++ src/mrt/compiler-rt/src/panic.cpp | 38 + src/mrt/compiler-rt/src/profile.cpp | 208 ++ src/mrt/compiler-rt/src/tracer.cpp | 27 + src/mrt/compiler-rt/src/yieldpoint.cpp | 537 +++++ 176 files changed, 54639 insertions(+) create mode 100644 src/mrt/compiler-rt/BUILD.gn create mode 100644 src/mrt/compiler-rt/include/address.h create mode 100644 src/mrt/compiler-rt/include/allocator.h create mode 100644 src/mrt/compiler-rt/include/allocator/alloc_callbacks.h create mode 100644 src/mrt/compiler-rt/include/allocator/alloc_config.h create mode 100644 src/mrt/compiler-rt/include/allocator/alloc_utils.h create mode 100644 src/mrt/compiler-rt/include/allocator/box.h create mode 100644 src/mrt/compiler-rt/include/allocator/bp_allocator.h create mode 100644 src/mrt/compiler-rt/include/allocator/bp_allocator_inlined.h create mode 100644 src/mrt/compiler-rt/include/allocator/cartesian_tree.h create mode 100644 src/mrt/compiler-rt/include/allocator/deque.h create mode 100644 src/mrt/compiler-rt/include/allocator/mem_map.h create mode 100644 src/mrt/compiler-rt/include/allocator/page_allocator.h create mode 100644 src/mrt/compiler-rt/include/allocator/page_map.h create mode 100644 src/mrt/compiler-rt/include/allocator/page_pool.h create mode 100644 src/mrt/compiler-rt/include/allocator/ros_alloc_run.h create mode 100644 src/mrt/compiler-rt/include/allocator/ros_allocator.h create mode 100644 src/mrt/compiler-rt/include/allocator/ros_allocator_inlined.h create mode 100644 src/mrt/compiler-rt/include/allocator/space.h create mode 100644 src/mrt/compiler-rt/include/chelper.h create mode 100644 src/mrt/compiler-rt/include/chosen.h create mode 100644 src/mrt/compiler-rt/include/cinterface.h create mode 100644 src/mrt/compiler-rt/include/collector/arena.h create mode 100644 src/mrt/compiler-rt/include/collector/collector.h create mode 100644 src/mrt/compiler-rt/include/collector/collector_ms.h create mode 100644 src/mrt/compiler-rt/include/collector/collector_naiverc.h create mode 100644 src/mrt/compiler-rt/include/collector/collector_naiverc_ms.h create mode 100644 src/mrt/compiler-rt/include/collector/collector_platform.h create mode 100644 src/mrt/compiler-rt/include/collector/collector_rc.h create mode 100644 src/mrt/compiler-rt/include/collector/collector_tracing.h create mode 100644 src/mrt/compiler-rt/include/collector/conn_comp.h create mode 100644 src/mrt/compiler-rt/include/collector/cp_generator.h create mode 100644 src/mrt/compiler-rt/include/collector/cycle_collector.h create mode 100644 src/mrt/compiler-rt/include/collector/gc_reference_processor.h create mode 100644 src/mrt/compiler-rt/include/collector/mpl_thread_pool.h create mode 100644 src/mrt/compiler-rt/include/collector/mrt_bitmap.h create mode 100644 src/mrt/compiler-rt/include/collector/native_gc.h create mode 100644 src/mrt/compiler-rt/include/collector/rc_inline.h create mode 100644 src/mrt/compiler-rt/include/collector/rc_reference_processor.h create mode 100644 src/mrt/compiler-rt/include/collector/rp_base.h create mode 100644 src/mrt/compiler-rt/include/collector/satb_buffer.h create mode 100644 src/mrt/compiler-rt/include/collector/stats.h create mode 100644 src/mrt/compiler-rt/include/collector/task_queue.h create mode 100644 src/mrt/compiler-rt/include/collie.h create mode 100644 src/mrt/compiler-rt/include/cpphelper.h create mode 100644 src/mrt/compiler-rt/include/deps.h create mode 100644 src/mrt/compiler-rt/include/errno_utils.h create mode 100644 src/mrt/compiler-rt/include/exception/eh_personality.h create mode 100644 src/mrt/compiler-rt/include/exception/exception_handling.h create mode 100644 src/mrt/compiler-rt/include/exception/mpl_exception.h create mode 100644 src/mrt/compiler-rt/include/exception/mrt_exception.h create mode 100644 src/mrt/compiler-rt/include/exception/stack_unwinder.h create mode 100644 src/mrt/compiler-rt/include/fast_alloc_inline.h create mode 100644 src/mrt/compiler-rt/include/file_layout.h create mode 100644 src/mrt/compiler-rt/include/gc_log.h create mode 100644 src/mrt/compiler-rt/include/heap_stats.h create mode 100644 src/mrt/compiler-rt/include/imported.h create mode 100644 src/mrt/compiler-rt/include/java_primitive_ops.h create mode 100644 src/mrt/compiler-rt/include/libs.h create mode 100644 src/mrt/compiler-rt/include/linker/linker.h create mode 100644 src/mrt/compiler-rt/include/linker/linker_cache.h create mode 100644 src/mrt/compiler-rt/include/linker/linker_common.h create mode 100644 src/mrt/compiler-rt/include/linker/linker_compiler.h create mode 100644 src/mrt/compiler-rt/include/linker/linker_debug.h create mode 100644 src/mrt/compiler-rt/include/linker/linker_gctib.h create mode 100644 src/mrt/compiler-rt/include/linker/linker_hotfix.h create mode 100644 src/mrt/compiler-rt/include/linker/linker_info.h create mode 100644 src/mrt/compiler-rt/include/linker/linker_inline.h create mode 100644 src/mrt/compiler-rt/include/linker/linker_lazy_binding.h create mode 100644 src/mrt/compiler-rt/include/linker/linker_method_builder.h create mode 100644 src/mrt/compiler-rt/include/linker/linker_model.h create mode 100644 src/mrt/compiler-rt/include/linker/linker_utils.h create mode 100644 src/mrt/compiler-rt/include/loader/hash_pool.h create mode 100644 src/mrt/compiler-rt/include/loader/loader_utils.h create mode 100644 src/mrt/compiler-rt/include/loader/object_loader.h create mode 100644 src/mrt/compiler-rt/include/loader/object_locator.h create mode 100644 src/mrt/compiler-rt/include/metadata_inline.h create mode 100644 src/mrt/compiler-rt/include/metadata_layout.h create mode 100644 src/mrt/compiler-rt/include/mm_config.h create mode 100644 src/mrt/compiler-rt/include/mm_utils.h create mode 100644 src/mrt/compiler-rt/include/mrt_common.h create mode 100644 src/mrt/compiler-rt/include/muid.h create mode 100644 src/mrt/compiler-rt/include/mutator_list.h create mode 100644 src/mrt/compiler-rt/include/namemangler.h create mode 100644 src/mrt/compiler-rt/include/panic.h create mode 100644 src/mrt/compiler-rt/include/profile.h create mode 100644 src/mrt/compiler-rt/include/profile_type.h create mode 100644 src/mrt/compiler-rt/include/saferegion.h create mode 100644 src/mrt/compiler-rt/include/sizes.h create mode 100644 src/mrt/compiler-rt/include/version.h create mode 100644 src/mrt/compiler-rt/include/yieldpoint.h create mode 100644 src/mrt/compiler-rt/public-headers/file_adapter.h create mode 100644 src/mrt/compiler-rt/public-headers/file_loader.h create mode 100644 src/mrt/compiler-rt/public-headers/gc_callback.h create mode 100644 src/mrt/compiler-rt/public-headers/gc_reason.h create mode 100644 src/mrt/compiler-rt/public-headers/linker_api.h create mode 100644 src/mrt/compiler-rt/public-headers/loader_api.h create mode 100644 src/mrt/compiler-rt/public-headers/object_type.h create mode 100644 src/mrt/compiler-rt/public-headers/tracer.h create mode 100644 src/mrt/compiler-rt/src/allocator.cpp create mode 100644 src/mrt/compiler-rt/src/allocator/bp_allocator.cpp create mode 100644 src/mrt/compiler-rt/src/allocator/mem_map.cpp create mode 100644 src/mrt/compiler-rt/src/allocator/page_map.cpp create mode 100644 src/mrt/compiler-rt/src/allocator/ros_allocator.cpp create mode 100644 src/mrt/compiler-rt/src/allocator/space.cpp create mode 100644 src/mrt/compiler-rt/src/arch/arm64/cc_native_method_stub_arm64.S create mode 100644 src/mrt/compiler-rt/src/arch/arm64/dump_register_stub_arm64.S create mode 100644 src/mrt/compiler-rt/src/arch/arm64/fastFuncs.S create mode 100644 src/mrt/compiler-rt/src/arch/arm64/i2r_stub_arm64.S create mode 100644 src/mrt/compiler-rt/src/arch/arm64/interp_native_method_stub_arm64.S create mode 100644 src/mrt/compiler-rt/src/arch/arm64/memset.S create mode 100644 src/mrt/compiler-rt/src/arch/arm64/prepare_args_for_exception_catcher_arm64.S create mode 100644 src/mrt/compiler-rt/src/arch/arm64/proxy_stub_arm64.S create mode 100644 src/mrt/compiler-rt/src/arch/arm64/r2c_stub_arm64.S create mode 100644 src/mrt/compiler-rt/src/arch/arm64/signal_handler_arm64.cpp create mode 100644 src/mrt/compiler-rt/src/arch/arm64/signal_handler_stub_arm64.S create mode 100644 src/mrt/compiler-rt/src/arch/arm64/yieldpoint_arm64.S create mode 100644 src/mrt/compiler-rt/src/chelper.cpp create mode 100644 src/mrt/compiler-rt/src/chosen.cpp create mode 100644 src/mrt/compiler-rt/src/cinterface.cpp create mode 100644 src/mrt/compiler-rt/src/collector/arena.cpp create mode 100644 src/mrt/compiler-rt/src/collector/collector.cpp create mode 100644 src/mrt/compiler-rt/src/collector/collector_ms.cpp create mode 100644 src/mrt/compiler-rt/src/collector/collector_naiverc.cpp create mode 100644 src/mrt/compiler-rt/src/collector/collector_naiverc_ms.cpp create mode 100644 src/mrt/compiler-rt/src/collector/collector_rc.cpp create mode 100644 src/mrt/compiler-rt/src/collector/collector_tracing.cpp create mode 100644 src/mrt/compiler-rt/src/collector/conn_comp.cpp create mode 100644 src/mrt/compiler-rt/src/collector/cp_generator.cpp create mode 100644 src/mrt/compiler-rt/src/collector/cycle_collector.cpp create mode 100644 src/mrt/compiler-rt/src/collector/gc_reason.cpp create mode 100644 src/mrt/compiler-rt/src/collector/gc_reference_processor.cpp create mode 100644 src/mrt/compiler-rt/src/collector/mpl_thread_pool.cpp create mode 100644 src/mrt/compiler-rt/src/collector/mrt_bitmap.cpp create mode 100644 src/mrt/compiler-rt/src/collector/native_gc.cpp create mode 100644 src/mrt/compiler-rt/src/collector/rc_reference_processor.cpp create mode 100644 src/mrt/compiler-rt/src/collector/rp_base.cpp create mode 100644 src/mrt/compiler-rt/src/collector/stats.cpp create mode 100644 src/mrt/compiler-rt/src/collie.cpp create mode 100644 src/mrt/compiler-rt/src/errno_utils.cpp create mode 100644 src/mrt/compiler-rt/src/exception/eh_personality.cpp create mode 100644 src/mrt/compiler-rt/src/exception/exception_handling.cpp create mode 100644 src/mrt/compiler-rt/src/exception/mpl_exception.cpp create mode 100644 src/mrt/compiler-rt/src/exception/mrt_exception.cpp create mode 100644 src/mrt/compiler-rt/src/exception/stack_unwinder.cpp create mode 100644 src/mrt/compiler-rt/src/file_layout.cpp create mode 100644 src/mrt/compiler-rt/src/gc_log.cpp create mode 100644 src/mrt/compiler-rt/src/heap_stats.cpp create mode 100644 src/mrt/compiler-rt/src/libs.cpp create mode 100644 src/mrt/compiler-rt/src/libs_fast.cpp create mode 100644 src/mrt/compiler-rt/src/linker/linker.cpp create mode 100644 src/mrt/compiler-rt/src/linker/linker_api.cpp create mode 100644 src/mrt/compiler-rt/src/linker/linker_cache.cpp create mode 100644 src/mrt/compiler-rt/src/linker/linker_debug.cpp create mode 100644 src/mrt/compiler-rt/src/linker/linker_gctib.cpp create mode 100644 src/mrt/compiler-rt/src/linker/linker_hotfix.cpp create mode 100644 src/mrt/compiler-rt/src/linker/linker_info.cpp create mode 100644 src/mrt/compiler-rt/src/linker/linker_lazy_binding.cpp create mode 100644 src/mrt/compiler-rt/src/linker/linker_method_builder.cpp create mode 100644 src/mrt/compiler-rt/src/linker/linker_model.cpp create mode 100644 src/mrt/compiler-rt/src/linker/linker_utils.cpp create mode 100644 src/mrt/compiler-rt/src/loader/file_adapter.cpp create mode 100644 src/mrt/compiler-rt/src/loader/hash_pool.cpp create mode 100644 src/mrt/compiler-rt/src/loader/loader_utils.cpp create mode 100644 src/mrt/compiler-rt/src/loader/object_loader.cpp create mode 100644 src/mrt/compiler-rt/src/loader/object_locator.cpp create mode 100644 src/mrt/compiler-rt/src/mangle_for_jni.cpp create mode 100644 src/mrt/compiler-rt/src/mm_config.cpp create mode 100644 src/mrt/compiler-rt/src/mm_utils.cpp create mode 100644 src/mrt/compiler-rt/src/muid.cpp create mode 100644 src/mrt/compiler-rt/src/mutator_list.cpp create mode 100644 src/mrt/compiler-rt/src/namemangler.cpp create mode 100644 src/mrt/compiler-rt/src/panic.cpp create mode 100644 src/mrt/compiler-rt/src/profile.cpp create mode 100644 src/mrt/compiler-rt/src/tracer.cpp create mode 100644 src/mrt/compiler-rt/src/yieldpoint.cpp diff --git a/src/mrt/compiler-rt/BUILD.gn b/src/mrt/compiler-rt/BUILD.gn new file mode 100644 index 0000000000..0bfc72a4f4 --- /dev/null +++ b/src/mrt/compiler-rt/BUILD.gn @@ -0,0 +1,144 @@ +# +# Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +sources_common = [ + "src/allocator.cpp", + "src/chelper.cpp", + "src/chosen.cpp", + "src/cinterface.cpp", + "src/collector/arena.cpp", + "src/collector/collector.cpp", + "src/collector/collector_rc.cpp", + "src/collector/collector_ms.cpp", + "src/collector/collector_naiverc.cpp", + "src/collector/collector_naiverc_ms.cpp", + "src/collector/collector_tracing.cpp", + "src/collector/conn_comp.cpp", + "src/collector/cp_generator.cpp", + "src/collector/cycle_collector.cpp", + "src/collector/gc_reason.cpp", + "src/collector/mpl_thread_pool.cpp", + "src/collector/mrt_bitmap.cpp", + "src/collector/native_gc.cpp", + "src/collector/rp_base.cpp", + "src/collector/rc_reference_processor.cpp", + "src/collector/gc_reference_processor.cpp", + "src/collector/stats.cpp", + "src/exception/eh_personality.cpp", + "src/exception/exception_handling.cpp", + "src/gc_log.cpp", + "src/libs.cpp", + "src/mm_config.cpp", + "src/errno_utils.cpp", + "src/mm_utils.cpp", + "src/exception/mpl_exception.cpp", + "src/exception/mrt_exception.cpp", + "src/mutator_list.cpp", + "src/namemangler.cpp", + "src/mangle_for_jni.cpp", + "src/file_layout.cpp", + "src/muid.cpp", + "src/panic.cpp", + "src/profile.cpp", + "src/heap_stats.cpp", + "src/allocator/ros_allocator.cpp", + "src/allocator/space.cpp", + "src/allocator/mem_map.cpp", + "src/allocator/bp_allocator.cpp", + "src/allocator/page_map.cpp", + "src/exception/stack_unwinder.cpp", + "src/yieldpoint.cpp", + "src/tracer.cpp", + "src/linker/linker.cpp", + "src/linker/linker_api.cpp", + "src/linker/linker_info.cpp", + "src/linker/linker_model.cpp", + "src/linker/linker_utils.cpp", + "src/linker/linker_cache.cpp", + "src/linker/linker_gctib.cpp", + "src/linker/linker_lazy_binding.cpp", + "src/linker/linker_method_builder.cpp", + "src/linker/linker_hotfix.cpp", + "src/linker/linker_debug.cpp", + "src/loader/file_adapter.cpp", + "src/loader/hash_pool.cpp", + "src/loader/loader_utils.cpp", + "src/loader/object_locator.cpp", + "src/loader/object_loader.cpp", +] + +include_common = [ + "include", + "public-headers", + "${THIRD_PARTY_ROOT}/libnativehelper/include_jni", + "${MAPLEALL_ROOT}/huawei_secure_c/include/", + "${MAPLE_MRT_ROOT}/maplert/include", + "${MAPLE_MRT_ROOT}/maplert/public-headers/", + "${MAPLE_MRT_ROOT}/libmrtbase/include/", + "${MAPLE_MRT_ROOT}/libmrtbase/include/linux", + "${MAPLE_MRT_ROOT}/dexinterface", + "${MAPLE_MRT_ROOT}/interpreter/zterp", + "${MAPLE_MRT_ROOT}", +] + +static_library("libmplcompiler-rt") { + sources = sources_common + include_dirs = include_common + + sources += [ + "src/arch/arm64/cc_native_method_stub_arm64.S", + "src/arch/arm64/fastFuncs.S", + "src/arch/arm64/i2r_stub_arm64.S", + "src/arch/arm64/interp_native_method_stub_arm64.S", + "src/arch/arm64/memset.S", + "src/arch/arm64/prepare_args_for_exception_catcher_arm64.S", + "src/arch/arm64/proxy_stub_arm64.S", + "src/arch/arm64/r2c_stub_arm64.S", + "src/arch/arm64/signal_handler_arm64.cpp", + "src/arch/arm64/signal_handler_stub_arm64.S", + "src/arch/arm64/yieldpoint_arm64.S", + ] + + cflags = [] + cflags_cc = [] + cflags_cc += [ + "-fvisibility=hidden", + "-pthread", + "-std=gnu++14", + "-fasynchronous-unwind-tables", + "-nostdlibinc", + ] + if (OPS_ANDROID == 1) { + sources += [ + "src/collie.cpp", + ] + + include_dirs += [ + "${ANDROID_ROOT}/system/core/libcutils/include/", + "${ANDROID_ROOT}/system/logging/liblog/include/", + "${MAPLE_ROOT}/android/bionic/libc/private", + ] + + cflags_cc -= [ + "-nostdlibinc", + "-pthread", + ] + } else { + sources += ["src/libs_fast.cpp",] + } + configs = [ "${MAPLE_MRT_ROOT}:mrt_cxx_flags" ] + + cflags_cc += [ "-DMAPLE_EH_UTEST=0" ] + asmflags = cflags_cc +} diff --git a/src/mrt/compiler-rt/include/address.h b/src/mrt/compiler-rt/include/address.h new file mode 100644 index 0000000000..b827589441 --- /dev/null +++ b/src/mrt/compiler-rt/include/address.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_ADDRESS_H +#define MAPLE_RUNTIME_ADDRESS_H + +#include +#include +#include "gc_callback.h" + +// Operations for the address type and memory operations +namespace maplert { +static constexpr address_t kDummyAddress = static_cast(-1); + +template +inline T &AddrToLVal(address_t addr) { + return *reinterpret_cast(addr); +} + +template +inline std::atomic &AddrToLValAtomic(address_t addr) { + return *reinterpret_cast*>(addr); +} + +static inline address_t RefFieldToAddress(reffield_t refField) { + return static_cast(refField); +} + +static inline reffield_t AddressToRefField(address_t addr) { + return static_cast(addr); +} + +// Note: only used to load reference field of Java object +// raw load, no RC-bookkeeping +static inline address_t LoadRefField(address_t *fieldAddr) { + return static_cast(*reinterpret_cast(fieldAddr)); +} + +// dialect of the above version. Doesn't do NULL-check against obj +static inline address_t LoadRefField(address_t obj, std::size_t offset) { + return LoadRefField(reinterpret_cast(obj + offset)); +} + +// Note: only used to store reference field of Java object +// raw store, no RC-bookkeeping +static inline void StoreRefField(address_t *fieldAddr, address_t newVal) { + *reinterpret_cast(fieldAddr) = static_cast(newVal); +} + +// dialect of the above version. Doesn't do NULL-check against obj +static inline void StoreRefField(address_t obj, std::size_t offset, address_t newVal) { + StoreRefField(reinterpret_cast(obj + offset), newVal); +} +} +#endif diff --git a/src/mrt/compiler-rt/include/allocator.h b/src/mrt/compiler-rt/include/allocator.h new file mode 100644 index 0000000000..eab6594b90 --- /dev/null +++ b/src/mrt/compiler-rt/include/allocator.h @@ -0,0 +1,588 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_ALLOCATOR_H +#define MAPLE_RUNTIME_ALLOCATOR_H + +#include +#include +#include +#include + +#include "mm_config.h" +#include "address.h" +#include "heap_stats.h" +#include "mrt_object.h" +#include "panic.h" +#include "thread_api.h" +#include "collector/mrt_bitmap.h" +#include "collector/mpl_thread_pool.h" +#include "mrt_reference_api.h" +#if LOG_ALLOC_TIMESTAT +#include "utils/time_utils.h" +#endif + +#define ALLOC_ENABLE_LOCK_CONTENTION_STATS __MRT_DEBUG_COND_FALSE + +#if ALLOC_ENABLE_LOCK_CONTENTION_STATS +#define ALLOC_MUTEX_TYPE maple::Mutex +#define ALLOC_LOCK_TYPE maple::MutexLock +#define ALLOC_CURRENT_THREAD maple::IThread::Current(), +#else +#define ALLOC_MUTEX_TYPE std::mutex +#define ALLOC_LOCK_TYPE std::lock_guard +#define ALLOC_CURRENT_THREAD +#endif + +namespace maplert { +// The allocator collects five kinds of data from the heap: +// 1. Fragmentation related: the utilisation of the heap +// 2. Per-mutator allocation data: this is non-atomic +// 3. Global allocation data: this is atomic +// 4. Allocation time statistics +// 5. Lock contention statistics +// These are written in the contract and every allocator implementation should support them. +// Additional stats can be supported by adding callbacks to the callback interface. +class FragmentationRecord { + public: + // this is allocator-specific, move into allocator implementation header + size_t GetExternalFragmentation() const { + return runFreeSlots + runOverhead + freePages; + } + size_t GetInternalFragmentation() const { + return internal; + } + size_t GetRCHeader()const { + return rcOverhead; + } + size_t GetBytesInFreePages() const { + return freePages; + } + size_t GetBytesInPages() const { + return totalPages; + } + size_t GetFreeSlots() const { + return runFreeSlots; + } + size_t GetSlots() const { + return runTotalSlots; + } + size_t GetRunCacheVolume() const { + return runCacheVolume; + } + size_t GetRunOverhead() const { + return runOverhead; + } + size_t GetRun(bool isFree, bool isLocal) const { + if (isFree) { + if (isLocal) { + return freeLocalRun; + } else { + return freeRun; + } + } else { + if (isLocal) { + return totalLocalRun; + } else { + return totalRun; + } + } + } + + void RecordInternalFrag(size_t totalBytes, size_t requestedBytes) { + internal = totalBytes - requestedBytes; + } + + void IncFreePages(size_t bytes) { + freePages += bytes; + } + void IncPages(size_t bytes) { + totalPages += bytes; + } + void IncFreeSlots(size_t bytes) { + runFreeSlots += bytes; + } + void IncSlots(size_t bytes) { + runTotalSlots += bytes; + } + void IncRunOverhead(size_t bytes) { + runOverhead += bytes; + } + void IncRunCacheVolume(size_t bytes) { + runCacheVolume += bytes; + } + void IncRun(size_t bytes, bool isFree, bool isLocal) { + if (isFree) { + freeRun += bytes; + if (isLocal) { + freeLocalRun += bytes; + } + } + totalRun += bytes; + if (isLocal) { + totalLocalRun += bytes; + } + } + + FragmentationRecord() + : internal(0), + freePages(0), + totalPages(0), + runFreeSlots(0), + runTotalSlots(0), + runOverhead(0), + rcOverhead(0), + runCacheVolume(0), + freeRun(0), + totalRun(0), + freeLocalRun(0), + totalLocalRun(0) {} + ~FragmentationRecord() = default; + + void Reset() { + internal = 0; + freePages = 0; + totalPages = 0; + runFreeSlots = 0; + runTotalSlots = 0; + runOverhead = 0; + rcOverhead = 0; + runCacheVolume = 0; + freeRun = 0; + totalRun = 0; + freeLocalRun = 0; + totalLocalRun = 0; + } + + void Print(std::basic_ostream &os, std::string tag) { + os << "External fragment [" << tag << "]: " << GetExternalFragmentation() << "\n"; + os << "Internal fragment [" << tag << "]: " << GetInternalFragmentation() << "\n"; + os << "Free page bytes [" << tag << "]: " << GetBytesInFreePages() << "\n"; + os << "Total page bytes [" << tag << "]: " << GetBytesInPages() << "\n"; + os << "Free run bytes (incl cache) [" << tag << "]: " << GetRun(true, false) << "\n"; + os << "Total run bytes (incl cache) [" << tag << "]: " << GetRun(false, false) << "\n"; + os << "Free slot bytes (incl cache) [" << tag << "]: " << GetFreeSlots() << "\n"; + os << "Run overhead [" << tag << "]: " << GetRunOverhead() << "\n"; + // only use when not considering the gc header + os << "Total slot bytes (incl cache) [" << tag << "]: " << GetSlots() << "\n"; + os << "Free local run bytes [" << tag << "]: " << GetRun(true, true) << "\n"; + os << "Total local run bytes [" << tag << "]: " << GetRun(false, true) << "\n"; + os << "Cached run bytes [" << tag << "]: " << GetRunCacheVolume() << "\n"; + } + private: + // fragmentation fields: + size_t internal; // internal frag. caused by rc/gc header and alignment + size_t freePages; // external frag. free pages in bytes not yet allocated + size_t totalPages; + size_t runFreeSlots; // external frag. total bytes of the free slots + size_t runTotalSlots; + size_t runOverhead; // external frag. total bytes of the run header and the trailing space + // extra fields: + size_t rcOverhead; // total bytes used by RC header + size_t runCacheVolume; // bytes consumed by the cached runs + size_t freeRun; + size_t totalRun; + size_t freeLocalRun; + size_t totalLocalRun; +}; // class FragmentationRecord + +class AccUnSynchedSizeField { + public: + inline size_t LoadSize() const { + return val; + } + inline void Inc(size_t param) { + val += param; + } + inline void Dec(size_t param) { + val -= param; + } + inline void Init() { + val = 0; + } + AccUnSynchedSizeField() : val(0) {} + ~AccUnSynchedSizeField() = default; + private: + size_t val; +}; + +class AccSynchedSizeField { + public: + inline size_t LoadSize() const { + return val.load(std::memory_order_relaxed); + } + inline void Inc(size_t param) { + (void)val.fetch_add(param, std::memory_order_relaxed); + } + inline void Sub(size_t param) { + (void)val.fetch_sub(param, std::memory_order_relaxed); + } + inline void Dec(size_t param) { + (void)val.fetch_sub(param, std::memory_order_relaxed); + } + inline void Init() { + val.store(0, std::memory_order_seq_cst); + } + AccSynchedSizeField() : val(0) {} + ~AccSynchedSizeField() = default; + private: + std::atomic val; +}; + +template +class AllocAccounting { + public: + inline size_t GetNetBytes() const { + return netBytes.LoadSize(); + } + inline size_t GetNetObjs() const { + return netObjs.LoadSize(); + } + inline size_t GetNetObjBytes() const { + return netObjBytes.LoadSize(); + } + inline size_t GetNetLargeObjBytes() const { + return netLargeObjBytes.LoadSize(); + } + + AllocAccounting() { + netBytes.Init(); + netObjs.Init(); + netObjBytes.Init(); + netLargeObjBytes.Init(); + totalFreedBytes.Init(); + totalFreedObjBytes.Init(); + totalFreedObjs.Init(); + totalAllocdBytes.Init(); + totalAllocdObjs.Init(); + totalAllocdObjBytes.Init(); + } + ~AllocAccounting() = default; + inline size_t TotalAllocdBytes() { + return totalAllocdBytes.LoadSize(); + } + inline size_t TotalFreedBytes() { + return totalFreedBytes.LoadSize(); + } + inline size_t TotalAllocdObjs() { + return totalAllocdObjs.LoadSize(); + } + inline size_t TotalFreedObjs() { + return totalFreedObjs.LoadSize(); + } + inline void AtAlloc(size_t objSize, size_t internalSize, bool isLarge) { + netBytes.Inc(internalSize); + netObjBytes.Inc(objSize); + netObjs.Inc(1); + if (UNLIKELY(isLarge)) { + netLargeObjBytes.Inc(objSize); + } +#if ENABLE_HPROF + if (UNLIKELY(IsHeapStatsEnabled())) { + // this collects total alloc stats from a time window, instead of net stats + totalAllocdBytes.Inc(internalSize); + totalAllocdObjs.Inc(1); + } +#endif + } + inline void AtFree(size_t objSize, size_t internalSize, bool isLarge) { + netBytes.Dec(internalSize); + netObjBytes.Dec(objSize); + netObjs.Dec(1); + if (UNLIKELY(isLarge)) { + netLargeObjBytes.Dec(objSize); + } +#if ENABLE_HPROF + if (UNLIKELY(IsHeapStatsEnabled())) { + // this collects total free stats from a time window, instead of net stats + totalFreedBytes.Inc(internalSize); + totalFreedObjs.Inc(1); + } +#endif + } + inline void ResetWindowTotal() { + totalFreedBytes.Init(); + totalFreedObjBytes.Init(); + totalFreedObjs.Init(); + totalAllocdBytes.Init(); + totalAllocdObjs.Init(); + totalAllocdObjBytes.Init(); + } + + private: + SizeField netBytes; + SizeField netObjBytes; + SizeField netObjs; + SizeField netLargeObjBytes; // net bytes excluding overhead + + // these are for total numbers in a time window, caution there is no overflow protection + SizeField totalAllocdBytes; // total bytes including overhead + SizeField totalAllocdObjBytes; // total bytes excluding overhead + SizeField totalAllocdObjs; // total number of objects allocated + SizeField totalFreedBytes; // total bytes including overhead + SizeField totalFreedObjBytes; // total bytes excluding overhead + SizeField totalFreedObjs; // total number of objects freed +}; + +// the allocator uses a version that does requires atomic instructions, because multiple +// threads could do the same operation concurrently +using SynchedAllocAccounting = AllocAccounting; +// the mutator uses a light weight version that does not require atomic instructions +using UnsyncAllocAccounting = AllocAccounting; + +#if LOG_ALLOC_TIMESTAT +// Time statistic obj for measured functions such as allocator obj alloc/release +class TimeStat { + public: + uint64_t tmMin; // min time for an ocurrance of the function + uint64_t tmMax; // max time for an ocurrance of the function + uint64_t tmSum; // sum total time of all ocurrances of the function + uint64_t tmCnt; // count of ourrances of the function + + TimeStat() : tmMin(std::numeric_limits::max()), tmMax(0), tmSum(0), tmCnt(0) {} + + void Reset() { + tmMin = std::numeric_limits::max(); + tmMax = 0; + tmSum = 0; + tmCnt = 0; + } + + // Update time stats with time taken from an ocurrance of the function + void Update(uint64_t val) { + tmMin = std::min(val, tmMin); + tmMax = std::max(val, tmMax); + tmSum += val; + ++tmCnt; + } + + // True if there is recorded timestat data for the function + bool HasStat() { + return (tmCnt != 0); + } + + uint64_t GetMin() { + return tmMin; + } + uint64_t GetMax() { + return tmMax; + } + uint64_t GetSum() { + return tmSum; + } + uint64_t GetCnt() { + return tmCnt; + } + uint64_t GetAvg() { + return tmSum / tmCnt; + } +}; + +// TypeInd list of timed functions +enum { + kTimeAllocLocal, + kTimeAllocGlobal, + kTimeAllocLarge, + kTimeFreeLocal, + kTimeFreeGlobal, + kTimeFreeLarge, + kTimeReleaseObj, + kTimeMax +}; +#endif + +class AllocMutator { + public: + virtual void Init() = 0; + virtual void Fini() = 0; + virtual void VisitGCRoots(std::function visitor) = 0; + AllocMutator() = default; + +#if LOG_ALLOC_TIMESTAT + void StartTimer() { + timeStamp = timeutils::NanoSeconds(); + } + + void StopTimer(int typeInd) { + uint64_t stopTime = timeutils::NanoSeconds(); + timeStat[typeInd].Update(stopTime - timeStamp); + } + + // reset timestat for all tracked functions + void ResetTimers() { + for (int typeInd = 0; typeInd < kTimeMax; ++typeInd) { + timeStat[typeInd].Reset(); + } + } + + uint64_t GetTimerMin(int typeInd) { + return timeStat[typeInd].GetMin(); + } + uint64_t GetTimerMax(int typeInd) { + return timeStat[typeInd].GetMax(); + } + uint64_t GetTimerSum(int typeInd) { + return timeStat[typeInd].GetSum(); + } + uint64_t GetTimerCnt(int typeInd) { + return timeStat[typeInd].GetCnt(); + } + uint64_t GetTimerAvg(int typeInd) { + return timeStat[typeInd].GetAvg(); + } + + void SuspendFreeObjTimeStat() { + statOn = false; + } + void ResumeFreeObjTimeStat() { + statOn = true; + } + bool DoFreeObjTimeStat() { + return statOn; + } +#endif + + virtual ~AllocMutator() = default; + AllocMutator(AllocMutator const&) = delete; + void operator=(AllocMutator const &x) = delete; + inline UnsyncAllocAccounting &GetAllocAccount() { + return account; + } + + public: + // the mutator uses a light weight version that does not require atomic instructions + // this is currently not properly instrumented, use the global/atomic version instead + UnsyncAllocAccounting account; +#if LOG_ALLOC_TIMESTAT + // Note: timestat for allocator FreeObj is not collected in following cases: + // 1. objects freed by MS collector via RosAllocImpl::FreeAllIf + // 2. objects freed by non alloc-mutator threads calling the free function + // 3. objects freed by naive RC collector via ReleaseObj call are not individually timestat'd + // again since they are already timestat'd in the ReleaseObj operation. + TimeStat timeStat[kTimeMax]; + uint64_t timeStamp; + bool statOn = true; +#endif +}; // class AllocMutator + +// Globals used by fast allocation containing context information. +// Putting all the information together helps avoid virtual calls (Collector::Instance()). +// It also reduces adrp usage in the generated code, saving a few lines of code. +struct FastAllocData { + static FastAllocData data; + std::atomic isConcurrentSweeping = { false }; + bool isConcurrentMarking = false; + bool isGCOnly = false; + std::atomic allocatedInternalSize = { 0 }; + MrtBitmap *bm; +}; + +#if ALLOC_USE_FAST_PATH +#define FAST_ALLOC_ACCOUNT_ADD(s) \ + (void) FastAllocData::data.allocatedInternalSize.fetch_add((s), std::memory_order_relaxed) +#define FAST_ALLOC_ACCOUNT_SUB(s) \ + (void) FastAllocData::data.allocatedInternalSize.fetch_sub((s), std::memory_order_relaxed) +#else +#define FAST_ALLOC_ACCOUNT_ADD(s) +#define FAST_ALLOC_ACCOUNT_SUB(s) +#endif + +// Allocator abstract class +class Allocator { + public: + // For methods that visit objects via callback functions. + using AddressVisitor = std::function; + using VisitorFactory = std::function; + + // returns the total number of objs allocated + inline size_t AllocatedObjs() const { + return account.GetNetObjs(); + } + + // returns the total bytes that has been occupied + // to be specific, this is the sum of internal size of all allocated objects + inline size_t AllocatedMemory() const { +#if ALLOC_USE_FAST_PATH + return FastAllocData::data.allocatedInternalSize.load(std::memory_order_relaxed); +#else + return account.GetNetBytes(); +#endif + } + + // returns the total size of the objects allocated + // the difference between requested memory and allocated memory, is that + // allocated memory sums internal size, whereas requested memory sums raw obj size (no header) + inline size_t RequestedMemory() const { + return account.GetNetObjBytes(); + } + + inline SynchedAllocAccounting &GetAllocAccount() { + return account; + } + + // callback for mutator finalisation + inline void PostMutatorFini(AllocMutator &mutator); + + // callback before allocation + __attribute__ ((always_inline)) + void PreObjAlloc(address_t objAddress, size_t objSize) const; + + // callback after allocation + // objSize is the requested object size + // internalSize is the size including overhead + __attribute__ ((always_inline)) + void PostObjAlloc(address_t objAddress, size_t objSize, size_t internalSize); + + // callback before free. returns the object size + template + __attribute__ ((always_inline)) + size_t PreObjFree(address_t objAddress) const; + + // callback after free +// internalSize is the bytes occupied by the object including overhead + template + __attribute__ ((always_inline)) + void PostObjFree(address_t objAddress, size_t objSize, size_t internalSize); + + virtual void Init(const VMHeapParam&) = 0; + + // Allocate space for a new object of the requested size + virtual address_t NewObj(size_t size) = 0; + + static void ReleaseResource(address_t obj); + + virtual ~Allocator() { + oome = nullptr; + } + Allocator(); + void NewOOMException(); + + // Allocation tracking support, set callback + void SetAllocRecordingCallback(std::function allocRecordingCallbackFunc) { + mAllocRecordingCallbackFunc = allocRecordingCallbackFunc; + } + + public: + MObject *oome; // this doesn't belong here! + SynchedAllocAccounting account; + protected: + std::atomic oomeCreated; + ALLOC_MUTEX_TYPE globalLock; + // Allocation tracking support, recorder callback + std::function mAllocRecordingCallbackFunc = nullptr; +}; +} + +#endif + diff --git a/src/mrt/compiler-rt/include/allocator/alloc_callbacks.h b/src/mrt/compiler-rt/include/allocator/alloc_callbacks.h new file mode 100644 index 0000000000..3ced4242bb --- /dev/null +++ b/src/mrt/compiler-rt/include/allocator/alloc_callbacks.h @@ -0,0 +1,112 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_ALLOC_CALLBACKS_H +#define MAPLE_RUNTIME_ALLOC_CALLBACKS_H + +#include +#include +#include +#include + +#include "allocator.h" +#include "chosen.h" +#include "exception/mrt_exception.h" +#include "mrt_reflection.h" +#include "mrt_object.h" +#include "cpphelper.h" +#include "chelper.h" +#include "mm_config.h" +#include "address.h" +#include "sizes.h" +#include "panic.h" +#include "collector/stats.h" +#include "jsan.h" + +namespace maplert { +inline void Allocator::PreObjAlloc(address_t objAddress __attribute__((unused)), + size_t objSize __attribute__((unused))) const {} + +inline void Allocator::PostObjAlloc(address_t objAddress, size_t objSize, size_t internalSize) { + JSAN_ADD_OBJ(objAddress, objSize); + + bool isLarge = internalSize > RosAllocImpl::kLargeObjSize; + // non-atomic: per-mutator allocation statistics (currently not instrumented) + // atomic: keep track of total bytes allocated in the heap + account.AtAlloc(objSize, internalSize, isLarge); + + if (UNLIKELY(mAllocRecordingCallbackFunc != nullptr)) { + mAllocRecordingCallbackFunc(objAddress, objSize); + } +} + +template +inline size_t Allocator::PreObjFree(address_t objAddress) const { + if (UNLIKELY(mAllocRecordingCallbackFunc != nullptr)) { + mAllocRecordingCallbackFunc(objAddress, 0); + } + + if (isFast) { + return 0; + } else { + MObject *freeObj = MObject::Cast(objAddress); + return freeObj->GetSize(); + } +} + +template +inline void Allocator::PostObjFree(address_t, size_t objSize, size_t internalSize) { +#if ALLOC_USE_FAST_PATH + static_cast(objSize); + if (!isFast) { + FAST_ALLOC_ACCOUNT_SUB(internalSize); + } +#else + static_cast(isFast); + bool isLarge = internalSize > RosAllocImpl::kLargeObjSize; + // non-atomic: per-mutator allocation statistics (currently not instrumented) + // atomic: keep track of total bytes allocated in the heap + account.AtFree(objSize, internalSize, isLarge); +#endif +} + +#if LOG_ALLOC_TIMESTAT +#define PrintTimeStat(outs, op, typeInd, mut) \ + if (mut.GetTimerCnt(typeInd) != 0) { \ + outs << " " << op << "[Min] : " << mut.GetTimerMin(typeInd) << maple::endl; \ + outs << " " << op << "[Max] : " << mut.GetTimerMax(typeInd) << maple::endl; \ + outs << " " << op << "[Avg] : " << mut.GetTimerAvg(typeInd) << \ + " out of " << mut.GetTimerCnt(typeInd) << maple::endl ; \ + } +#endif + +void Allocator::PostMutatorFini(AllocMutator &mutator) { +#if LOG_ALLOC_TIMESTAT + std::ostringstream os1; + os1 << "[Mutator Allocation Time] : " << &mutator << maple::endl; + PrintTimeStat(os1, "[local]", kTimeAllocLocal, mutator); + PrintTimeStat(os1, "[global]", kTimeAllocGlobal, mutator); + PrintTimeStat(os1, "[large]", kTimeAllocLarge, mutator); + PrintTimeStat(os1, "[release]", kTimeReleaseObj, mutator); + PrintTimeStat(os1, "[free local]", kTimeFreeLocal, mutator); + PrintTimeStat(os1, "[free global]", kTimeFreeGlobal, mutator); + PrintTimeStat(os1, "[free large]", kTimeFreeLarge, mutator); + LOG(ERROR) << os1.str().c_str() << maple::endl; +#else + static_cast(mutator); +#endif +} +} // namespace maplert + +#endif // MAPLE_RUNTIME_ALLOC_CALLBACKS_H diff --git a/src/mrt/compiler-rt/include/allocator/alloc_config.h b/src/mrt/compiler-rt/include/allocator/alloc_config.h new file mode 100644 index 0000000000..1f813bf34a --- /dev/null +++ b/src/mrt/compiler-rt/include/allocator/alloc_config.h @@ -0,0 +1,205 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_ALLOC_CONFIG_H +#define MAPLE_RUNTIME_ALLOC_CONFIG_H + +#include "sizes.h" +#include "mm_config.h" +#include "alloc_utils.h" + +// ROS allocator specific --- start --- +// Run configuration section --------------------------------------------------- +const size_t kROSAllocLocalSize = 104; +const size_t kROSAllocLargeSize = 2016; + +namespace maplert { +struct RunConfigType { + const bool isLocal; // this kind of run can be local + const uint8_t numCaches; // this kind of run at most has this many caches, deprecated + const uint8_t numPagesPerRun; // pages per run + const uint32_t size; // slot size of this kind of run +}; +class RunConfig { + public: + // this class only has static member, so its constructor and destructors are useless. + RunConfig() = delete; + RunConfig(const RunConfig&) = delete; + RunConfig(RunConfig&&) = delete; + RunConfig &operator=(const RunConfig&) = delete; + RunConfig &operator=(RunConfig&&) = delete; + ~RunConfig() = delete; + // REMEMBER TO CHANGE THIS WHEN YOU ADD/REMOVE CONFIGS + static const uint32_t kRunConfigs = 53; + + // this supports a maximum of (256 * 8 == 2048 byte) run + // we need to extend this if we want to config multiple-page run + static const uint32_t kMaxRunConfigs = 256; + // change this when add/remove configs + // this stores a config for each kind of run (represented by an index) + static const RunConfigType kCfgs[kRunConfigs]; + // this map maps a size ((size >> 3 - 1) to be precise) to a run config + // this map takes 4 * kMaxRunConfigs == 1k + static uint32_t size2idx[kMaxRunConfigs]; // all zero-initialised +}; +} // namespace maplert + +// assume(size <= (kCfgs[N_RUN_CONFIGS - 1].size << 3)) +#define ROSIMPL_RUN_IDX(size) RunConfig::size2idx[((size) >> 3) - 1] +// this is a short cut of ROSIMPL_RUN_IDX, only works under certain configs, see kCfgs def +#define ROSIMPL_FAST_RUN_IDX(size) (((size) >> 3) - 2) +#define ROSIMPL_RUN_SIZE(idx) (RunConfig::kCfgs[(idx)].size) +// return true if for a obj of this size, we use thread-local storage +#define ROSIMPL_IS_LOCAL_RUN_SIZE(size) \ + ((size) <= kROSAllocLargeSize && RunConfig::kCfgs[ROSIMPL_RUN_IDX(size)].isLocal) +// this is a short cut of ROSIMPL_IS_LOCAL_RUN_SIZE, only works under certain configs, see kCfgs def +#define ROSIMPL_FAST_IS_LOCAL_RUN_SIZE(size) ((size) <= kROSAllocLocalSize) +// an idx corresponds to a kind of run with a certain size, +// return true if we allow thread-local runs of this size +#define ROSIMPL_IS_LOCAL_RUN_IDX(idx) (RunConfig::kCfgs[(idx)].isLocal) +#define ROSIMPL_N_CACHE_RUNS(idx) (RunConfig::kCfgs[(idx)].numCaches) +#define ROSIMPL_N_PAGES_PER_RUN(idx) (RunConfig::kCfgs[(idx)].numPagesPerRun) + +const uint32_t kROSAllocLocalRuns = ROSIMPL_FAST_RUN_IDX(kROSAllocLocalSize) + 1; +const int kRosimplDefaultPagePerRun = 1; +const int kRosimplDefaultMaxCacheRun = 8; // unused + +// Heap configuration section -------------------------------------------------- +#define ROSIMPL_DEFAULT_MAX_SPACE (1ul << 29) // 512MB +#define ROSIMPL_DEFAULT_MAX_PAGES (ROSIMPL_DEFAULT_MAX_SPACE >> ROSALLOC_LOG_PAGE_SIZE) +const int kRosimplDefaultPageOneTime = 256; + +const bool kRosimplReleasePageAtFree = true; +const bool kRosimplReleasePageAtTrim = true; + +#define ROSIMPL_DEFAULT_HEAP_START_SIZE (1 << 23) // 8m +#define ROSIMPL_DEFAULT_HEAP_SIZE ROSIMPL_DEFAULT_MAX_SPACE +#define ROSIMPL_DEFAULT_HEAP_GROWTH_LIMIT ROSIMPL_DEFAULT_HEAP_SIZE +// we trigger grow exactly when we run out of free pages +// changing this number won't add other triggers, so don't expect this to do what ART is doing +#define ROSIMPL_DEFAULT_HEAP_MIN_FREE (0U) +#define ROSIMPL_DEFAULT_HEAP_MAX_FREE (1 << 23) // 8m +const double kRosimplDefaultHeapTargetUtilization = 0.95; // ART 0.75 +const bool kRosimplDefaultIgnoreMaxFootprint = false; + +// when true, we memset obj memory to 0 at Free() time; +// when false, we memset the allocated memory to 0 at New() time instead +// memset at New() might be useful for debugging, but lacking in performance +#define ROSIMPL_MEMSET_AT_FREE (true) + +constexpr size_t kAllocAlign = 8; +#define ROSIMPL_HEADER_ALLOC_SIZE (maplert::kHeaderSize) + +static_assert((ROSIMPL_HEADER_ALLOC_SIZE) % kAllocAlign == 0, + "obj header size must be aligned"); +static_assert((maplert::kJavaArrayContentOffset) % kAllocAlign == 0, + "java array content offset is required to be aligned by the allocator"); + +static_assert(kROSAllocLargeSize >= ROSIMPL_HEADER_ALLOC_SIZE, "large size too small"); +constexpr size_t kFastAllocMaxSize = + maplert::AllocUtilRndDown(kROSAllocLocalSize - ROSIMPL_HEADER_ALLOC_SIZE, kAllocAlign); +static_assert(kFastAllocMaxSize >= ROSIMPL_HEADER_ALLOC_SIZE, "large size too small"); +constexpr size_t kFastAllocArrayMaxSize = kFastAllocMaxSize - maplert::kJavaArrayContentOffset; + +static_assert(kFastAllocArrayMaxSize <= maplert::kMrtMaxArrayLength, "array length too big"); + +constexpr size_t kAllocArrayMaxSize = maplert::AllocUtilRndDown(std::numeric_limits::max() - + ROSIMPL_HEADER_ALLOC_SIZE - maplert::kJavaArrayContentOffset, kAllocAlign); + +#define ROSIMPL_GET_OBJ_FROM_ADDR(addr) ((addr) + ROSIMPL_HEADER_ALLOC_SIZE) +#define ROSIMPL_GET_ADDR_FROM_OBJ(objAddr) ((objAddr) - ROSIMPL_HEADER_ALLOC_SIZE) + +// define the size of the bitmap. +// we need to address the issue of multiple page runs +#define ROSIMPL_SLOTS_RAW (8 * sizeof(uint32_t)) +#define ROSIMPL_BITMAP_SIZE (ALLOCUTIL_PAGE_SIZE / (ROSIMPL_SLOTS_RAW * ROSIMPL_HEADER_ALLOC_SIZE)) + +// Debugging options ----------------------------------------------------------- +#ifndef ROSIMPL_ENABLE_VERIFY +#define ROSIMPL_ENABLE_VERIFY __MRT_DEBUG_COND_FALSE +#endif + +#if ROSIMPL_ENABLE_VERIFY +#define ROSIMPL_ENABLE_ASSERTS true +#define ROSIMPL_DEBUG(func) func +#else +#define ROSIMPL_DEBUG(func) (void(0)) +#endif + +#ifndef ROSIMPL_ENABLE_ASSERTS +#define ROSIMPL_ENABLE_ASSERTS __MRT_DEBUG_COND_FALSE +#endif + +#if ROSIMPL_ENABLE_ASSERTS +#define ROSIMPL_ASSERT(p, msg) __MRT_ASSERT(p, msg) +#define ROSIMPL_ASSERT_IF(cond, p, msg) if (cond) __MRT_ASSERT(p, msg) +#define ROSIMPL_DUNUSED(x) x +#else +#define ROSIMPL_DUNUSED(x) x __attribute__((unused)) +#define ROSIMPL_ASSERT(p, msg) (void(0)) +#define ROSIMPL_ASSERT_IF(cond, p, msg) (void(0)) +#endif + +// ASSERTS only working under verification mode +#if ROSIMPL_ENABLE_VERIFY && ROSIMPL_ENABLE_ASSERTS +#define ROSIMPL_VERIFY_ASSERT(p, msg) __MRT_ASSERT(p, msg) +#else +#define ROSIMPL_VERIFY_ASSERT(p, msg) (void(0)) +#endif + +#define ROSIMPL_ENABLE_DUMP __MRT_DEBUG_COND_FALSE + +#if ROSIMPL_ENABLE_DUMP +#define ROSIMPL_VERIFY_DUMP_PG_TABLE Dump() +#else +#define ROSIMPL_VERIFY_DUMP_PG_TABLE (void(0)) +#endif + +// Permanent-space allocator specific -- start --- +const size_t kBPAllocObjAlignment = 8; // align to 8 for 64-bit system +const size_t kBPAllocHeaderSize = maplert::DecoupleAllocHeader::kHeaderSize; + +const size_t kPermMaxSpaceSize = (64u << 20); // 64MB maximum +const size_t kMetaMaxSpaceSize = (64u << 20); // 64MB maximum +const size_t kDecoupleMaxSpaceSize = (64u << 20); // 64MB maximum +const size_t kZterpMaxSpaceSize = (32u << 20); // 32MB maximum +static_assert((kPermMaxSpaceSize % ALLOCUTIL_PAGE_SIZE) == 0, "invalid perm space size"); +static_assert((kMetaMaxSpaceSize % ALLOCUTIL_PAGE_SIZE) == 0, "invalid meta space size"); +static_assert((kDecoupleMaxSpaceSize % ALLOCUTIL_PAGE_SIZE) == 0, "invalid decouple space size"); +const size_t kOffHeapSpaceGap = ALLOCUTIL_PAGE_SIZE; // defensive gap +const size_t kOffHeapSpaceSize = kPermMaxSpaceSize + kOffHeapSpaceGap + + kMetaMaxSpaceSize + kOffHeapSpaceGap + + kZterpMaxSpaceSize + kOffHeapSpaceGap + + kDecoupleMaxSpaceSize + kOffHeapSpaceGap + kZterpMaxSpaceSize; + +#define BPALLOC_GET_OBJ_FROM_ADDR(addr) ((addr) + maplert::DecoupleAllocHeader::kHeaderSize) + +#ifndef BPALLOC_DEBUG +#define BPALLOC_DEBUG __MRT_DEBUG_COND_FALSE +#endif +#ifndef BPALLOC_ENABLE_ASSERTS +#define BPALLOC_ENABLE_ASSERTS BPALLOC_DEBUG +#endif + +#if BPALLOC_ENABLE_ASSERTS +#define BPALLOC_ASSERT(p, msg) __MRT_ASSERT(p, msg) +#define BPALLOC_ASSERT_IF(cond, p, msg) if (cond) __MRT_ASSERT(p, msg) +#define BPALLOC_DUNUSED(x) x +#else +#define BPALLOC_ASSERT(p, msg) (void(0)) +#define BPALLOC_ASSERT_IF(cond, p, msg) (void(0)) +#define BPALLOC_DUNUSED(x) x __attribute__((unused)) +#endif + +#endif // MAPLE_RUNTIME_ALLOC_CONFIG_H diff --git a/src/mrt/compiler-rt/include/allocator/alloc_utils.h b/src/mrt/compiler-rt/include/allocator/alloc_utils.h new file mode 100644 index 0000000000..147641ec94 --- /dev/null +++ b/src/mrt/compiler-rt/include/allocator/alloc_utils.h @@ -0,0 +1,86 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_ALLOC_UTILS_H +#define MAPLE_RUNTIME_ALLOC_UTILS_H + +#include +#include "sizes.h" +#include "mm_config.h" +#include "deps.h" + +namespace maplert { +#ifndef ALLOCUTIL_PAGE_SIZE +#define ALLOCUTIL_PAGE_SIZE (static_cast(0x1000)) // 4K page +#endif + +constexpr uint32_t kAllocUtilLogPageSize = 12; +#define ALLOCUTIL_PAGE_BYTE2CNT(x) ((x) >> kAllocUtilLogPageSize) +#define ALLOCUTIL_PAGE_CNT2BYTE(x) (static_cast(x) << kAllocUtilLogPageSize) + + +#define ALLOCUTIL_PAGE_RND_DOWN(x) ((static_cast(x)) & (~(ALLOCUTIL_PAGE_SIZE - 1))) +#define ALLOCUTIL_PAGE_RND_UP(x) \ + (((static_cast(x)) + ALLOCUTIL_PAGE_SIZE - 1) & (~(ALLOCUTIL_PAGE_SIZE - 1))) + +#define ALLOCUTIL_PAGE_ADDR(x) (static_cast(x) & (~(ALLOCUTIL_PAGE_SIZE - 1))) + +#define ALLOCUTIL_MEM_UNMAP(address, size_in_bytes) \ + if (munmap(reinterpret_cast(address), size_in_bytes) != EOK) { \ + perror("munmap failed. Process terminating."); \ + MRT_Panic(); \ + } + +#define ALLOCUTIL_MEM_MADVISE(address, size_in_bytes, option) \ + if (madvise(reinterpret_cast(address), size_in_bytes, option) != EOK) { \ + perror("madvise failed. Process terminating."); \ + MRT_Panic(); \ + } + +constexpr uint32_t kAllocUtilPrefetchWrite = 1; +#define ALLOCUTIL_PREFETCH_WRITE(address) \ + __builtin_prefetch(reinterpret_cast(address), kAllocUtilPrefetchWrite) + +template +constexpr T AllocUtilRndDown(T x, size_t n) { + return (x & static_cast(-n)); +} + +template +constexpr T AllocUtilRndUp(T x, size_t n) { + return AllocUtilRndDown(x + n - 1, n); +} + +template +class AllocUtilRand { + public: + AllocUtilRand() = delete; + AllocUtilRand(IntType randStart, IntType randEnd) { + std::random_device rd; + e = std::mt19937(rd()); + dist = std::uniform_int_distribution(randStart, randEnd); + } + ~AllocUtilRand() = default; + // return random value between l and u (inclusive), please make sure l <= u + inline IntType next() { + return dist(e); + } + + private: + std::mt19937 e; + std::uniform_int_distribution dist; +}; +} // namespace maplert + +#endif // MAPLE_RUNTIME_ALLOC_UTILS_H diff --git a/src/mrt/compiler-rt/include/allocator/box.h b/src/mrt/compiler-rt/include/allocator/box.h new file mode 100644 index 0000000000..c097ed05a2 --- /dev/null +++ b/src/mrt/compiler-rt/include/allocator/box.h @@ -0,0 +1,200 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_BOX_H +#define MAPLE_RUNTIME_BOX_H + +#include +#include + +// Port of Rust Box +namespace maplert { +// Port of std::boxed::Box from the Rust programming language. +// +// Basically equivalent to C++ std::unique_ptr, but is supposed to be used with +// a custom allocator, and is not supposed to customize the deleter. Basically, +// - If you care more about where the object is allocated, use Box. +// - If you care more about where the object is freed, use unique_ptr. +// Note that you can always convert a Box to and from Box::unique_ptr, a +// std::unique_ptr specialization, using IntoUniquePtr and FromUniquePtr. +// +// The user should allocate object using Box::New, and let the Box go out +// of scope so that it gets naturally deleted by the allocator. We can also +// convert a Box to T* using IntoRaw, and back from T* to Box using +// FromRaw, so that the object remains alive when pointed by T*, and gets +// recycled after converting back to Box and going out of scope. +// +// Requirements: +// The allocator must be stateless. The default allocator (std::allocator) is +// stateless. maplert::StdContainerAllocator is also statelss. +template> +class Box final { + public: + // Deleter that supports std::unique_ptr + class Deleter { + public: + void operator()(T *ptr) { + DestructAndDeallocate(ptr); + } + }; + + // Our std::unique_ptr alias. + using unique_ptr = std::unique_ptr; + + // Make an empty box. + Box() : rawPointer(nullptr) {} + + // Move from another box, take the responsibility to free the object. + Box(Box &&other) { // not explicit + MoveFrom(other); + }; + + // Move from another box, take the responsibility to free the object. + Box &operator=(Box &&other) { + MoveFrom(other); + return *this; + } + + // Destruct the contained object and deallocate it. + ~Box() { + Drop(); + } + + // @return true if the box is empty (initialized as empty, moved, destructed, + // or explicitly discarded). + bool IsEmpty() const { + return rawPointer == nullptr; + } + + // Same as IsEmpty(), enabling the `box == nullptr` expression. + bool operator==(std::nullptr_t) const { + return IsEmpty(); + } + + // Same as !IsEmpty(), enabling the `box != nullptr` expression. + bool operator!=(std::nullptr_t) const { + return !IsEmpty(); + } + + // Get the reference to the contained object. Similar to + // std::unique_ptr::operator*() + T &operator*() const { + return *rawPointer; + } + + // Allow the box->member syntax so that the box can be used as a pointer. + // Similar to std::unique_ptr::operator->() + T *operator->() const { + return rawPointer; + } + + // Convert into a raw pointer, and give up the responsibility to free the + // object. Similar to std::unique_ptr::release() + T *IntoRaw() { + return TakePointer(); + } + + // Get the underlying raw pointer. Similar to std::unique_ptr::get(). + // + // Use with care! It breaks the "This Box is the only pointer of the object" + // rule. + T *UnsafeGetRaw() const { + return rawPointer; + } + + // Convert into an std::unique_ptr. The unique_ptr is now responsible for + // freeing the object. + unique_ptr IntoUniquePtr() { + T *ptr = TakePointer(); + return unique_ptr(ptr); + } + + // Destruct the contained object and deallocate the memory using the + // allocator. Do nothing if this Box has already been moved or destroyed. + void Drop() noexcept { + if (rawPointer != nullptr) { + T *ptr = TakePointer(); + DestructAndDeallocate(ptr); + } + } + + // Create an empty, with a nullptr as its content. Same as the default + // constructor, but more explicit. + static Box Empty() { + return Box(); + } + + // Create a box using the constructor of a type. Similar to std::make_unique. + template + static Box New(Args &&...args) { + T *ptr = AllocateAndConstruct(std::forward(args)...); + return Box(ptr); + } + + // Convert a raw pointer back into a Box, taking the reponsibility to free the + // object. The object must be allocated by the same allocator specified for + // the Box type. Similar to the constructor of std::unique_ptr + static Box FromRaw(T *ptr) { + return Box(ptr); + } + + // Convert a unique_ptr back into a Box, taking the reponsibility to free the + // object. The object must be allocated by the same allocator specified for + // the Box type. + static Box FromUniquePtr(unique_ptr uptr) { + return FromRaw(uptr.release()); + } + + private: + // Construct a box from a raw pointer. Internal use, only. + Box(T *ptr) : rawPointer(ptr) {} + + Box(const Box &other) = delete; // Cannot be copied. + Box &operator=(const Box &other) = delete; // Cannot be copy-assigned. + + // Get the rawPointer, ensuring we do not forget to clear the rawPointer field. + T *TakePointer() { + T *ptr = rawPointer; + rawPointer = nullptr; + return ptr; + } + + // Take the pointer (and the reponsibility to free the object) from another + // Box + void MoveFrom(Box &other) { + rawPointer = other.TakePointer(); + } + + // Use the allocator to allocate and construct the object. + template + static T *AllocateAndConstruct(Args &&...args) { + Allocator allocator; + T *ptr = std::allocator_traits::allocate(allocator, 1); + std::allocator_traits::construct(allocator, ptr, std::forward(args)...); + return ptr; + } + + // Use the allocator to destruct and deallocate the object. + static void DestructAndDeallocate(T *ptr) { + Allocator allocator; + std::allocator_traits::destroy(allocator, ptr); + std::allocator_traits::deallocate(allocator, ptr, 1); + } + + // The raw pointer + T *rawPointer; +}; +} // namespace maplert + +#endif // MAPLE_RUNTIME_BOX_H diff --git a/src/mrt/compiler-rt/include/allocator/bp_allocator.h b/src/mrt/compiler-rt/include/allocator/bp_allocator.h new file mode 100644 index 0000000000..8905170761 --- /dev/null +++ b/src/mrt/compiler-rt/include/allocator/bp_allocator.h @@ -0,0 +1,126 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_BP_ALLOCATOR_H +#define MAPLE_RUNTIME_BP_ALLOCATOR_H + +#include +#include +#include +#include + +#include "alloc_config.h" +#include "allocator/mem_map.h" + +namespace maplert { +// A bump-pointer allocator for permanent space. +class BumpPointerAlloc { + public: + static const uint32_t kInitialSpaceSize; + static const size_t kExtendedSize; + + // kClassInitializedState is defined according to kFireBreak and kSpaceAnchor, + // so adjust kClassInitializedState if they are modified. + // + // kFireBreak also reserves some time for C2IBridge. We need to make sure the + // C2I bridge does not exceed this amount. + // + // In the future, it is better to arrange the entire memory layout in a single + // file in order to coordinate multiple modules that need to use the low 4GB + // memory. + static constexpr size_t kFireBreak = (256u << 20); // 256MB. Leave enough space for C2I Bridge. + static constexpr address_t kSpaceAnchor = (3ul << 30); // 3GB + + BumpPointerAlloc(const string&, size_t); + virtual ~BumpPointerAlloc(); + virtual address_t Alloc(size_t size); + inline address_t AllocThrowExp(size_t size); + bool Contains(address_t obj) const; + void Dump(std::basic_ostream &os); + + protected: + MemMap *memMap; + address_t startAddr; + address_t currentAddr; + address_t endAddr; + mutex globalLock; + const string showmapName; + + template + inline address_t AllocInternal(const size_t &allocSize); + virtual void DumpUsage(std::basic_ostream&) {} + + private: + void Init(size_t growthLimit); +}; // class BumpPointerAlloc + +class MetaAllocator : public BumpPointerAlloc { + public: + MetaAllocator(const string &name, size_t maxSpaceSize) : BumpPointerAlloc(name, maxSpaceSize) {} + ~MetaAllocator() = default; + address_t Alloc(size_t size, MetaTag metaTag); + address_t Alloc(size_t) override { + BPALLOC_ASSERT(false, "must provide MetaTag in meta space"); + return 0U; + } + + protected: + void DumpUsage(std::basic_ostream &os) override; + + private: + uint32_t sizeUsed[kMetaTagNum] = { 0 }; +}; + +class DecoupleAllocator : public BumpPointerAlloc { + public: + DecoupleAllocator(const string &name, size_t maxSpaceSize); + ~DecoupleAllocator() = default; + + bool ForEachObj(function visitor); + address_t Alloc(size_t size, DecoupleTag tag); + address_t Alloc(size_t) override { + BPALLOC_ASSERT(false, "must provide DecoupleTag in decouple space"); + return 0U; + } + + static inline size_t GetAllocSize(size_t size) { + return AllocUtilRndUp(size + DecoupleAllocHeader::kHeaderSize, kBPAllocObjAlignment); + } + + protected: + void DumpUsage(std::basic_ostream &os) override; +}; + +class ZterpStaticRootAllocator : public BumpPointerAlloc { + public: + static const size_t singleObjSize = kBPAllocObjAlignment; + ZterpStaticRootAllocator(const string &name, size_t maxSpaceSize) : BumpPointerAlloc(name, maxSpaceSize) {}; + virtual ~ZterpStaticRootAllocator() = default; + inline address_t GetStartAddr() const { + return startAddr; + } + inline size_t GetObjNum() { + // because the space of bump pointer allocator only increase linearly, and the obj won't be erased, so the race + // only occurs when currentAddr is modified. + std::lock_guard lock(globalLock); + __MRT_ASSERT(currentAddr >= startAddr, "currentAddr should not be lower than startAddr!"); + size_t usedSpace = currentAddr - startAddr; + __MRT_ASSERT((usedSpace % singleObjSize) == 0, "used space should be devided by single size of root pointer!"); + return usedSpace / singleObjSize; + } + void VisitStaticRoots(const RefVisitor &visitor); +}; +} // namespace maplert + +#endif // MAPLE_RUNTIME_BPALLOCATOR_H diff --git a/src/mrt/compiler-rt/include/allocator/bp_allocator_inlined.h b/src/mrt/compiler-rt/include/allocator/bp_allocator_inlined.h new file mode 100644 index 0000000000..827bceaef4 --- /dev/null +++ b/src/mrt/compiler-rt/include/allocator/bp_allocator_inlined.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_BP_ALLOCATOR_INLINED_H +#define MAPLE_RUNTIME_BP_ALLOCATOR_INLINED_H + +#include +#include + +#include "chosen.h" +#include "bp_allocator.h" + +namespace maplert { +inline address_t BumpPointerAlloc::AllocThrowExp(size_t size) { + size_t internalSize = AllocUtilRndUp(size, kBPAllocObjAlignment); + address_t allocAddr = AllocInternal(internalSize); + if (UNLIKELY(allocAddr == 0)) { + (*theAllocator).OutOfMemory(); + return 0; + } + return allocAddr; +} + +// tries to allocate an object given the size +template +inline address_t BumpPointerAlloc::AllocInternal(const size_t &allocSize) { + lock_guard guard(globalLock); + if (UNLIKELY((allocSize + currentAddr) > endAddr)) { + size_t requiredSize = ALLOCUTIL_PAGE_RND_UP((allocSize - (endAddr - currentAddr))); + size_t remainingSize = reinterpret_cast(memMap->GetMappedEndAddr()) - endAddr; + size_t extendSize = std::max(requiredSize, std::min(remainingSize, kExtendedSize)); + if (UNLIKELY(!memMap->Extend(extendSize))) { + if (throwExp) { + LOG(ERROR) << showmapName << " space out of memory, alloc size " << allocSize << ", space left " << + remainingSize << maple::endl; + return 0; + } + // allocator failure is fatal (no OOME thrown) + LOG(FATAL) << showmapName << " space out of memory, alloc size " << allocSize << ", space left " << + remainingSize << maple::endl; + __builtin_unreachable(); + return 0; + } + endAddr = reinterpret_cast(memMap->GetCurrEnd()); + } + address_t allocAddr = currentAddr; + currentAddr += allocSize; + return allocAddr; +} +} // namespace maplert + +#endif // MAPLE_RUNTIME_BPALLOCATOR_INLINED_H diff --git a/src/mrt/compiler-rt/include/allocator/cartesian_tree.h b/src/mrt/compiler-rt/include/allocator/cartesian_tree.h new file mode 100644 index 0000000000..9b5b419b39 --- /dev/null +++ b/src/mrt/compiler-rt/include/allocator/cartesian_tree.h @@ -0,0 +1,484 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_CARTESIAN_TREE_H +#define MAPLE_RUNTIME_CARTESIAN_TREE_H + +#include +#include +#include +#include "deque.h" +#include "panic.h" + +#define DEBUG_CARTESIAN_TREE __MRT_DEBUG_COND_FALSE +#if DEBUG_CARTESIAN_TREE +#define CTREE_ASSERT(cond, msg) __MRT_ASSERT(cond, msg) +#define CTREE_CHECK_PARENT_AND_LCHILD(n) CheckParentAndLeftChild(n) +#define CTREE_CHECK_PARENT_AND_RCHILD(n) CheckParentAndRightChild(n) +#else +#define CTREE_ASSERT(cond, msg) (void(0)) +#define CTREE_CHECK_PARENT_AND_LCHILD(n) (void(0)) +#define CTREE_CHECK_PARENT_AND_RCHILD(n) (void(0)) +#endif + +// This is an implementation of a Cartesian tree. +// This can be used in arbitrary-sized, free-list allocation algorithm. +// The use of this tree and the algorithm is inspired by +// R. Jones, A. Hosking, E. Moss. The garbage collection handbook: +// the art of automatic memory management. Chapman and Hall/CRC, 2016. +// This implementation is all hand-written, in which process the +// author might have referenced some online tutorials, notably https://www.geeksforgeeks.org/ +// This data structure doesn't guarantee the multi-thread safety, so the external invoker should take some +// policy to avoid competition problems. +namespace maplert { +template +class CartesianTree { + public: + CartesianTree() : root(nullptr) {} + + ~CartesianTree() { + DeleteFrom(root); + } + + void Init(size_t mapSize) { + // when used to manage free page regions, we count how many nodes we need at max + size_t pageCount = ALLOCUTIL_PAGE_RND_UP(mapSize) / ALLOCUTIL_PAGE_SIZE; + size_t regionCount = (pageCount >> 1) + 1; // at most we need this many regions + // calculate how much we need for native allocation + // we might need some extra space for some temporaries, so set aside another 7 slots + size_t nativeSize = (regionCount + 7) * AllocUtilRndUp(sizeof(Node), alignof(Node)); + NativeAlloc::nal.Init(ALLOCUTIL_PAGE_RND_UP(nativeSize)); + // calculate how much we need for the deque temporary + size_t dequeSize = regionCount * sizeof(void*); + sud.Init(ALLOCUTIL_PAGE_RND_UP(dequeSize)); + traversalSud.Init(sud.GetMemMap()); + } + + inline bool Empty() { + return root == nullptr; + } + inline S Top() { + return root ? root->key2 : 0U; + } + + // insert a node to the tree, if we find connecting nodes, we merge them + // (the non-merging insertion is not allowed) + // true when insertion succeeded, false otherwise + // if [a, a + s) clashes with existing node, it fails + // if s is 0U, it always fails + bool MergeInsert(A a, S s) { + if (root == nullptr) { + root = new Node(a, s); + __MRT_ASSERT(root != nullptr, "fail to allocate a new node"); + return true; + } + + if (s == 0) { + return false; + } + + return MergeInsertInternal(a, s); + } + + // find a node with a key2 of at least s, store its key1 into a + // split/remove this node if found + // return false if nothing is found or s is 0U + bool Find(A &a, S s) { + if (root == nullptr || s == 0) { + return false; + } + + return Find(root, root, a, s); + } + + struct CartesianTreeNode { + A key1; + S key2; + CartesianTreeNode *l; + CartesianTreeNode *r; + + CartesianTreeNode(A k1, S k2) : key1(k1), key2(k2), l(nullptr), r(nullptr) {} + ~CartesianTreeNode() { + l = nullptr; + r = nullptr; + } + + static void *operator new(std::size_t sz __attribute__((unused))) { + return NativeAlloc::Allocate(); + } + + static void operator delete(void *ptr) { + NativeAlloc::Deallocate(ptr); + } + + // alias of key1: in PageManager, key1 is the first page's index of a region + inline A &Idx() { + return key1; + } + + // alias of key2: in PageManager, key2 is the page count of a region + inline S &Cnt() { + return key2; + } + }; + + class Iterator { + public: + Iterator(CartesianTree &tree) : ct(tree) { + tq.SetSud(ct.traversalSud); + if (ct.root != nullptr) { + tq.Push(ct.root); + } + } + ~Iterator() = default; + // we provide a preorder traversal method (preorder lets the largest region visited first): + inline CartesianTreeNode *Next() { + if (tq.Empty()) { + return nullptr; + } + Node *front = tq.Front(); + if (front->r != nullptr) { + tq.Push(front->r); + } + if (front->l != nullptr) { + tq.Push(front->l); + } + tq.PopFront(); + return front; + } + private: + CartesianTree &ct; + LocalDeque tq; + }; + + using Node = CartesianTreeNode; + using NativeAlloc = NativeAllocLite; + + private: + Node *root; + SingleUseDeque sud; + SingleUseDeque traversalSud; + + // used by destructor to resursively delete the tree + void DeleteFrom(Node *n) { + if (n == nullptr) { + return; + } + DeleteFrom(n->l); + DeleteFrom(n->r); + delete n; + n = nullptr; + } + + // the following function tries to merge new node (a, s) with n + enum MergeResult { + kSuccess = 0, // successfully merged with the node n + kMiss, // the new node (a, s) is not connected to n, cannot merge + kError // error, operation aborted + }; + MergeResult MergeAt(Node *n, A a, S s) { + A m = a + s; + + // try to merge the inserted node to the right of n + if (a == n->key1 + n->key2) { + Node *last = n; + Node *next = n->r; + // also, find a connected node to the right of the inserted node + while (next != nullptr) { + if (next->key1 == m) { + if (next->l != nullptr) { + CTREE_ASSERT(false, "merging failed case 1"); + return kError; + } + break; + } else if (next->key1 < m) { + CTREE_ASSERT(false, "merging failed case 2"); + return kError; + } else { + last = next; + next = next->l; + } + } + n->key2 += s; + if (next != nullptr) { + n->key2 += next->key2; + if (last == n) { + last->r = RemoveNode(next); + } else { + last->l = RemoveNode(next); + } + } + CTREE_CHECK_PARENT_AND_RCHILD(n); + return kSuccess; + } + + // try to merge the inserted node to the left of n + if (m == n->key1) { + Node *last = n; + Node *next = n->l; + // also, find a connected node to the left of the inserted node + while (next != nullptr) { + if (next->key1 + next->key2 == a) { + if (next->r != nullptr) { + CTREE_ASSERT(false, "merging failed case 3"); + return kError; + } + break; + } else if (next->key1 + next->key2 > a) { + CTREE_ASSERT(false, "merging failed case 4"); + return kError; + } else { + last = next; + next = next->r; + } + } + n->key1 = a; + n->key2 += s; + if (next != nullptr) { + n->key1 = next->key1; + n->key2 += next->key2; + if (last == n) { + last->l = RemoveNode(next); + } else { + last->r = RemoveNode(next); + } + } + CTREE_CHECK_PARENT_AND_LCHILD(n); + return kSuccess; + } + + return kMiss; + } + + // see the public MergeInsert() + inline bool MergeInsertInternal(A a, S s) { + // +-------------+ +--------------+ + // | parent node | n--> | current node | + // | | | | + // pn ---> Node* l; -------> | | + // | Node* r; | | | + // +-------------+ +--------------+ + // suppose current node is parent node's left child, then + // n points to the current node, + // pn points to the 'l' field in the parent node + Node *n = root; // root is current node + Node **pn = &root; // pointer to the 'root' field in this tree + // stack of pn recording how to go from root to the current node + LocalDeque pnStack(sud); // this uses another deque as container + A m = a + s; + + // this loop insert the new node (a, s) at the proper place + do { + if (n == nullptr) { + n = new Node(a, s); + __MRT_ASSERT(n != nullptr, "fail to allocate a new node"); + *pn = n; + break; + } + MergeResult res = MergeAt(n, a, s); + if (res == kSuccess) { + break; + } else if (UNLIKELY(res == kError)) { + return false; + } + // kMiss: (a, s) cannot be connected to n + if (m < n->Idx()) { + // should insert into left subtree + pnStack.Push(pn); + pn = &(n->l); + n = n->l; + } else if (a > n->Idx() + n->Cnt()) { + // should insert into right subtree + pnStack.Push(pn); + pn = &(n->r); + n = n->r; + } else { + // something clashes + CTREE_ASSERT(false, "merge insertion failed"); + return false; + } + } while (true); + + // this loop bubbles the inserted node up the tree to satisfy heap property + while (!pnStack.Empty()) { + pn = pnStack.Top(); + pnStack.Pop(); + n = *pn; + CTREE_ASSERT(n, "merge insertion bubbling failed case 1"); + if (m < n->Idx()) { + // (a, s) was inserted into n's left subtree, do rotate l, if needed + if (n->Cnt() < n->l->Cnt()) { + *pn = RotateLeft(n); + CTREE_CHECK_PARENT_AND_RCHILD(*pn); + } else { + break; + } + } else if (a > n->Idx() + n->Cnt()) { + // (a, s) was inserted into n's right subtree, do rotate r, if needed + if (n->Cnt() < n->r->Cnt()) { + *pn = RotateRight(n); + CTREE_CHECK_PARENT_AND_LCHILD(*pn); + } else { + break; + } + } else { + CTREE_ASSERT(false, "merge insertion bubbling failed case 2"); + return false; + } + } + + return true; + } + + // rotate the node and its left child to maintain heap property + inline Node *RotateLeft(Node *n) { + Node *tmp = n->l; + n->l = tmp->r; + tmp->r = n; + return tmp; + } + // rotate the node and its right child to maintain heap property + inline Node *RotateRight(Node *n) { + Node *tmp = n->r; + n->r = tmp->l; + tmp->l = n; + return tmp; + } + + // see the public find() + // balance could be a problem + bool Find(Node *&pn, Node *n, A &a, S s) { + if (n->key2 < s) { + return false; + } + + if (n->l != nullptr) { + // leftmost node preference + // this makes left tree significantly shorter + CTREE_CHECK_PARENT_AND_LCHILD(n); + if (Find(n->l, n->l, a, s)) { + return true; + } + } + + a = n->key1; + n->key1 += s; + n->key2 -= s; + if (n->key2) { + pn = LowerNode(n); + } else { + pn = RemoveNode(n); + } + CTREE_CHECK_PARENT_AND_LCHILD(pn); + CTREE_CHECK_PARENT_AND_RCHILD(pn); + return true; + } + + // move a node down in the tree below to maintain heap property + Node *LowerNode(Node *n) { + CTREE_ASSERT(n, "lowering node failed"); + Node *tmp = nullptr; + + if (n->l != nullptr && n->l->key2 > n->key2) { + // this makes right tree slightly taller + if (n->r != nullptr && n->r->key2 > n->l->key2) { + tmp = RotateRight(n); + tmp->l = LowerNode(tmp->l); + CTREE_CHECK_PARENT_AND_LCHILD(tmp); + return tmp; + } else { + tmp = RotateLeft(n); + tmp->r = LowerNode(tmp->r); + CTREE_CHECK_PARENT_AND_RCHILD(tmp); + return tmp; + } + } + + if (n->r && n->r->key2 > n->key2) { + tmp = RotateRight(n); + tmp->l = LowerNode(tmp->l); + CTREE_CHECK_PARENT_AND_LCHILD(tmp); + return tmp; + } + + return n; + } + + // remove a node and adjust the tree below + // inline + Node *RemoveNode(Node *n) { + CTREE_ASSERT(n, "removing node failed"); + if (n->l == nullptr && n->r == nullptr) { + delete n; + n = nullptr; + return nullptr; + } + Node *tmp = nullptr; + if (n->l == nullptr) { + tmp = RotateRight(n); + tmp->l = RemoveNode(tmp->l); + CTREE_CHECK_PARENT_AND_LCHILD(tmp); + return tmp; + } else { + if (n->r == nullptr) { + tmp = RotateLeft(n); + tmp->r = RemoveNode(tmp->r); + CTREE_CHECK_PARENT_AND_RCHILD(tmp); + return tmp; + } else { + // this makes right tree slightly taller + if (n->l->key2 < n->r->key2) { + tmp = RotateRight(n); + tmp->l = RemoveNode(tmp->l); + CTREE_CHECK_PARENT_AND_LCHILD(tmp); + return tmp; + } else { + tmp = RotateLeft(n); + tmp->r = RemoveNode(tmp->r); + CTREE_CHECK_PARENT_AND_RCHILD(tmp); + return tmp; + } + } + } + } + + inline void CheckParentAndLeftChild(const Node *n) { +#if DEBUG_CARTESIAN_TREE + if (n != nullptr) { + const Node *l = n->l; + if (l != nullptr) { + CTREE_ASSERT((n->key1 > (l->key1 + l->key2)), "left child overlapped with parent"); + CTREE_ASSERT((n->key2 >= l->key2), "left child bigger than parent"); + } + } +#else + (void)n; +#endif + } + inline void CheckParentAndRightChild(const Node *n) { +#if DEBUG_CARTESIAN_TREE + if (n != nullptr) { + const Node *r = n->r; + if (r != nullptr) { + CTREE_ASSERT(((n->key1 + n->key2) < r->key1), "right child overlapped with parent"); + CTREE_ASSERT((n->key2 >= r->key2), "right child bigger than parent"); + } + } +#else + (void)n; +#endif + } +}; +} +#endif diff --git a/src/mrt/compiler-rt/include/allocator/deque.h b/src/mrt/compiler-rt/include/allocator/deque.h new file mode 100644 index 0000000000..a0e2f07ee9 --- /dev/null +++ b/src/mrt/compiler-rt/include/allocator/deque.h @@ -0,0 +1,228 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co., Ltd. All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_ROSALLOC_DEQUE_H +#define MAPLE_RUNTIME_ROSALLOC_DEQUE_H + +#include +#include +#include "mem_map.h" +#include "panic.h" + +#define DEBUG_DEQUE __MRT_DEBUG_COND_FALSE +#if DEBUG_DEQUE +#define DEQUE_ASSERT(cond, msg) __MRT_ASSERT(cond, msg) +#else +#define DEQUE_ASSERT(cond, msg) (void(0)) +#endif + +namespace maplert { +// this deque is single-use, meaning we can use it, then after a while, +// we can discard its whole content +// under this assumption, clearing this data structure is really fast +// (each clear takes O(1) time) +// also, this assumes that the underlying memory does not need to be freed +// because we can reuse it after each clear +// +// it can be used like a queue or a stack +template +class SingleUseDeque { + public: + static constexpr size_t kValSize = sizeof(ValType); + void Init(size_t mapSize) { + static_assert(kValSize == sizeof(void*), "invalid val type"); + MemMap::Option opt = MemMap::kDefaultOptions; + opt.tag = "maple_alloc_ros_sud"; + opt.reqBase = nullptr; + opt.reqRange = false; + memMap = MemMap::MapMemory(mapSize, mapSize, opt); + beginAddr = reinterpret_cast(memMap->GetBaseAddr()); + endAddr = reinterpret_cast(memMap->GetCurrEnd()); + Clear(); + } + void Init(MemMap &other) { + // init from another sud, that is, the two suds share the same mem map + static_assert(kValSize == sizeof(void*), "invalid val type"); + memMap = &other; + beginAddr = reinterpret_cast(memMap->GetBaseAddr()); + endAddr = reinterpret_cast(memMap->GetCurrEnd()); + Clear(); + } + MemMap &GetMemMap() { + return *memMap; + } + bool Empty() const { + return topAddr < frontAddr; + } + void Push(ValType v) { + topAddr += kValSize; + DEQUE_ASSERT(topAddr < endAddr, "not enough memory"); + *reinterpret_cast(topAddr) = v; + } + ValType Top() const { + DEQUE_ASSERT(topAddr >= frontAddr, "read empty queue"); + return *reinterpret_cast(topAddr); + } + void Pop() { + DEQUE_ASSERT(topAddr >= frontAddr, "pop empty queue"); + topAddr -= kValSize; + } + ValType Front() const { + DEQUE_ASSERT(frontAddr <= topAddr, "front reach end"); + return *reinterpret_cast(frontAddr); + } + void PopFront() { + DEQUE_ASSERT(frontAddr <= topAddr, "pop front empty queue"); + frontAddr += kValSize; + } + void Clear() { + frontAddr = beginAddr; + topAddr = beginAddr - kValSize; + } + private: + MemMap *memMap = nullptr; + address_t beginAddr = 0; + address_t frontAddr = 0; + address_t topAddr = 0; + address_t endAddr = 0; +}; + +// this deque lives on the stack, hence local +// this is better than the above deque because it avoids visiting ram +// and it also avoids using unfreeable memory +// however, its capacity is limited +template +class LocalDeque { + public: + static_assert(sizeof(ValType) == sizeof(void*), "invalid val type"); + static constexpr int kLocalLength = ALLOCUTIL_PAGE_SIZE / sizeof(ValType); + LocalDeque() : sud(nullptr) {} + LocalDeque(SingleUseDeque &singleUseDeque) : sud(&singleUseDeque) {} + ~LocalDeque() = default; + void SetSud(SingleUseDeque &singleUseDeque) { + sud = &singleUseDeque; + } + bool Empty() const { + return (top < front) || (front == kLocalLength && sud->Empty()); + } + void Push(ValType v) { + if (LIKELY(top < kLocalLength - 1)) { + array[++top] = v; + return; + } else if (top == kLocalLength - 1) { + ++top; + sud->Clear(); + } + sud->Push(v); + } + ValType Top() const { + if (LIKELY(top < kLocalLength)) { + DEQUE_ASSERT(top >= front, "read empty queue"); + return array[top]; + } + return sud->Top(); + } + void Pop() { + if (LIKELY(top < kLocalLength)) { + DEQUE_ASSERT(top >= front, "pop empty queue"); + --top; + return; + } + DEQUE_ASSERT(top == kLocalLength, "pop error"); + sud->Pop(); + if (front < kLocalLength) { + // if front is already in sud, there is no need to dec top, since local is no longer useful + --top; + return; + } + } + ValType Front() const { + if (LIKELY(front < kLocalLength)) { + DEQUE_ASSERT(front <= top, "read empty queue front"); + return array[front]; + } + DEQUE_ASSERT(top == kLocalLength, "queue front error"); + return sud->Front(); + } + void PopFront() { + if (LIKELY(front < kLocalLength)) { + DEQUE_ASSERT(front <= top, "pop front empty queue"); + ++front; + return; + } + DEQUE_ASSERT(front == kLocalLength, "pop front error"); + sud->PopFront(); + } + private: + int front = 0; + int top = -1; + SingleUseDeque *sud; + ValType array[kLocalLength]; +}; + +// this allocator allocates for a certain-sized native data structure +// it is very lightweight but doesn't recycle pages as much as page allocator +template +class NativeAllocLite { + struct Slot { + Slot *next = nullptr; + }; + + public: + static NativeAllocLite nal; + + void Init(size_t mapSize) { + static_assert(allocSize >= sizeof(Slot), "invalid alloc size"); + static_assert(align >= alignof(Slot), "invalid align"); + static_assert(allocSize % align == 0, "size not aligned"); + MemMap::Option opt = MemMap::kDefaultOptions; + opt.tag = "maple_alloc_ros_nal"; + opt.reqBase = nullptr; + opt.reqRange = false; + memMap = MemMap::MapMemory(mapSize, mapSize, opt); + currAddr = reinterpret_cast(memMap->GetBaseAddr()); + endAddr = reinterpret_cast(memMap->GetCurrEnd()); + } + + static void *Allocate() { + void *result = nullptr; + if (UNLIKELY(nal.head == nullptr)) { + DEQUE_ASSERT(nal.currAddr + allocSize <= nal.endAddr, "not enough memory"); + result = reinterpret_cast(nal.currAddr); + nal.currAddr += allocSize; + } else { + result = reinterpret_cast(nal.head); + nal.head = nal.head->next; + } + // no zeroing + return result; + } + + static void Deallocate(void *addr) { + reinterpret_cast(addr)->next = nal.head; + nal.head = reinterpret_cast(addr); + } + + private: + Slot *head = nullptr; + address_t currAddr = 0; + address_t endAddr = 0; + MemMap *memMap = nullptr; +}; + +template +NativeAllocLite NativeAllocLite::nal; +} + +#endif // MAPLE_RUNTIME_ROSALLOC_DEQUE_H diff --git a/src/mrt/compiler-rt/include/allocator/mem_map.h b/src/mrt/compiler-rt/include/allocator/mem_map.h new file mode 100644 index 0000000000..1ab6d48d0a --- /dev/null +++ b/src/mrt/compiler-rt/include/allocator/mem_map.h @@ -0,0 +1,122 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_ALLOC_MEM_MAP_H +#define MAPLE_RUNTIME_ALLOC_MEM_MAP_H + +#include +#include "alloc_utils.h" + +namespace maplert { +class MemMap { + public: + static const bool kEnableRange = true; + static const bool kEnableRandomMemStart = true; + static const int kDefaultMemFlags = MAP_PRIVATE | MAP_ANONYMOUS; + static const int kDefaultMemProt = PROT_READ | PROT_WRITE; + + struct Option { // optional args for mem map + const char *tag; // name to identify the mapped memory + void *reqBase; // a hint to mmap about start addr, not guaranteed + int flags; // mmap flags + int prot; // initial access flags + bool protAll; // applying prot to all pages in range + bool reqRange; // request mapping within the following range (guaranteed) + address_t lowestAddr; // lowest start addr (only used when reqRange == true) + address_t highestAddr; // highest end addr (only used when reqRange == true) + bool isRandom; // randomise start addr (only used when reqRange == true) + }; + // by default, it tries to map memory in low addr space, with a random start + static constexpr Option kDefaultOptions = { + "maple_unnamed", nullptr, kDefaultMemFlags, kDefaultMemProt, false, + true, HEAP_START, HEAP_END, true + }; + + // the only way to get a MemMap + static MemMap *MapMemory(size_t reqSize, size_t initSize, const Option &opt = kDefaultOptions); + static MemMap *CreateMemMapAtExactAddress(void *addr, size_t size, const Option &opt = kDefaultOptions); + + // destroy a MemMap + static void DestroyMemMap(MemMap *&memMap) noexcept { + if (memMap != nullptr) { + delete memMap; + memMap = nullptr; + } + } + + void *GetBaseAddr() const { + return memBaseAddr; + } + void *GetCurrEnd() const { + return memCurrEndAddr; + } + void *GetMappedEndAddr() const { + return memMappedEndAddr; + } + size_t GetCurrSize() const { + return memCurrSize; + } + size_t GetMappedSize() const { + return memMappedSize; + } + + inline bool IsAddrInCurrentRange(address_t addr) const { + return reinterpret_cast(addr - reinterpret_cast(memBaseAddr)) < memCurrSize; + } + + // change the access flags of the memory in given range + bool ProtectMem(address_t addr, size_t size, int prot) const; + + // grow the size of the usable memory + bool Extend(size_t reqSize); + + // madvise(DONTNEED) memory in the range [releaseBeginAddr, releaseBeginAddr + size) + bool ReleaseMem(address_t releaseBeginAddr, size_t size) const; + + // resize the memory map by releasing the pages at the end (munmap) + bool Shrink(size_t newSize); + + ~MemMap(); + MemMap(const MemMap &that) = delete; + MemMap(MemMap &&that) = delete; + MemMap &operator=(const MemMap &that) = delete; + MemMap &operator=(MemMap &&that) = delete; + + private: + // Map in the specified range. + // The return addr is guaranteed to be in [lowestAddr, highestAddr - reqSize) if successful. + static void *MemMapInRange(address_t lowestAddr, address_t highestAddr, + size_t reqSize, int prot, int flags, + int fd = -1, int offset = 0); + static void *MemMapInRangeInternal(address_t hint, + address_t lowestAddr, address_t highestAddr, + size_t reqSize, int prot, int flags, + int fd, int offset); + static bool ProtectMemInternal(void *addr, size_t size, int prot); + + void *memBaseAddr; // start of the mapped memory + void *memCurrEndAddr; // end of the memory **in use** + void *memMappedEndAddr; // end of the mapped memory, always >= currEndAddr + size_t memCurrSize; // size of the memory **in use** + size_t memMappedSize; // size of the mapped memory, always >= currSize + bool protOnce; + const int memProt; // memory accessibility flags + + // MemMap is created via factory method + MemMap(void *baseAddr, size_t initSize, size_t mappedSize, bool protAll, int prot); + + void UpdateCurrEndAddr(); +}; // class MemMap +} // namespace maplert +#endif // MAPLE_RUNTIME_ALLOC_MEM_MAP_H diff --git a/src/mrt/compiler-rt/include/allocator/page_allocator.h b/src/mrt/compiler-rt/include/allocator/page_allocator.h new file mode 100644 index 0000000000..a25ce7203d --- /dev/null +++ b/src/mrt/compiler-rt/include/allocator/page_allocator.h @@ -0,0 +1,423 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_PAGEALLOCATOR_H +#define MAPLE_RUNTIME_PAGEALLOCATOR_H + +#include +#include +#include +#include +#include +#include + +#include "mm_utils.h" +#include "globals.h" +#include "panic.h" +#include "deps.h" +#include "page_pool.h" +#include "mrt_api_common.h" + +#define PA_VERBOSE_LVL DEBUGY + +namespace maplert { +// when there is a need to use PageAllocator to manage +// the memory for a specific data structure, please add +// a new type +enum AllocationTag : uint32_t { + kMinAllocationTag = 0u, + // allocation type for customized data struction + kCartesianTree = kMinAllocationTag, // manage memory for CartesianTree Node + + // alllocation type for std container + kReferenceProcessor, // manage the std container in ReferenceProcessor + kROSAllocator, // for ROS + kMutatorList, // for mutator list + kClassNameStringPool, // for className string pool + kZterpAllocator, // for Zterp + kGCWorkStack, // for gc mark and write barrier + kGCWriteBarrier, // for write barrier + kClassLoaderAllocator, // for classloader + kGCTaskQueue, // for gc task queue + // more to come + kMaxAllocationTag +}; + +// constants and utility function +class AllocatorUtils { + public: + AllocatorUtils() = delete; + AllocatorUtils(const AllocatorUtils&) = delete; + AllocatorUtils(AllocatorUtils&&) = delete; + AllocatorUtils &operator=(const AllocatorUtils&) = delete; + AllocatorUtils &operator=(AllocatorUtils&&) = delete; + ~AllocatorUtils() = delete; + static constexpr uint32_t kLogAllocPageSize = 12; + static constexpr uint32_t kAllocPageSize = maple::kPageSize; + static constexpr uint32_t kLogAllocAlignment = 3; + static constexpr uint32_t kAllocAlignment = 1 << kLogAllocAlignment; + static constexpr uint32_t kMaxSlotSize = kAllocPageSize / 2; +}; + +// Allocator manages page allocation and deallocation +class PageAllocator { + static constexpr uint32_t kMaxCached = 0; + + // slots in a page are managed as a linked list + struct Slot { + Slot *next = nullptr; + }; + + // pages are linked to each other as double-linked list. + // the free slot list and other infomation are also in + // the page header + class Page { + friend class PageAllocator; + + public: + // get a slot from the free slot list + inline void *Allocate() { + if (header != nullptr) { + void *result = reinterpret_cast(header); + header = header->next; + --free; + return result; + } + return nullptr; + } + + // return a slot to the free slot list + inline void Deallocate(void *slot) { + Slot *cur = reinterpret_cast(slot); + cur->next = header; + header = cur; + ++free; + } + + inline bool Available() const { + return free != 0; + } + + inline bool Empty() const { + return free == total; + } + + private: + Page *prev = nullptr; + Page *next = nullptr; + Slot *header = nullptr; + uint16_t free = 0; + uint16_t total = 0; + }; + + public: + PageAllocator() : nonFull(nullptr), cached(nullptr), totalPages(0), totalCached(0), slotSize(0), slotAlignment(0) {} + + explicit PageAllocator(uint32_t size) + : nonFull(nullptr), cached(nullptr), totalPages(0), totalCached(0), slotSize(size) { + slotAlignment = maple::AlignUp(size, AllocatorUtils::kAllocAlignment); + } + + ~PageAllocator() = default; + + void Destroy() { + DestroyList(nonFull); + DestroyList(cached); + } + + void Init(uint32_t size) { + slotSize = size; + slotAlignment = maple::AlignUp(size, AllocatorUtils::kAllocAlignment); + } + + // allocation entrypoint + void *Allocate() { + void *result = nullptr; + { + std::lock_guard guard(allocLock); + + if (nonFull == nullptr) { + if (cached != nullptr) { + Page *cur = cached; + RemoveFromList(cached, *cur); + AddToList(nonFull, *cur); + --totalCached; + } else { + Page *cur = CreatePage(); + InitPage(*cur); + nonFull = cur; + ++totalPages; + } + LOG(PA_VERBOSE_LVL) << "\ttotal pages mapped: " << totalPages << + ", total cached page: " << totalCached << ", slot_size: " << slotSize << maple::endl; + } + + result = nonFull->Allocate(); + + if (!(nonFull->Available())) { + // move from nonFull to full + Page *cur = nonFull; + RemoveFromList(nonFull, *cur); + } + } + if (result != nullptr) { + if (memset_s(result, slotSize, 0, slotSize) != EOK) { + LOG(FATAL) << "memset_s fail" << maple::endl; + } + } + return result; + } + + // deallocation entrypoint + void Deallocate(void *slot) { + Page *cur = reinterpret_cast(maple::AlignDown(reinterpret_cast(slot), + AllocatorUtils::kAllocPageSize)); + + std::lock_guard guard(allocLock); + if (!(cur->Available())) { + // move from full to nonFull + AddToList(nonFull, *cur); + } + cur->Deallocate(slot); + if (cur->Empty()) { + RemoveFromList(nonFull, *cur); + if (totalCached < kMaxCached) { + AddToList(cached, *cur); + ++totalCached; + } else { + DestroyPage(*cur); + --totalPages; + } + LOG(PA_VERBOSE_LVL) << "\ttotal pages mapped: " << totalPages << + ", total cached page: " << totalCached << ", slot_size: " << slotSize << maple::endl; + } + } + + private: + // get a page from os + static inline Page *CreatePage() { + return reinterpret_cast(PagePool::Instance().GetPage()); + } + + // return the page to os + static inline void DestroyPage(Page &page) { + if (page.free != page.total) { + LOG(FATAL) << "\t destroy page in use: total = " << page.total << ", free = " << page.free << maple::endl; + } else { + LOG(PA_VERBOSE_LVL) << "\t destroy page " << std::hex << &page << std::dec << + " total = " << page.total << ", free = " << page.free << maple::endl; + } + PagePool::Instance().ReturnPage(reinterpret_cast(&page)); + } + + // construct the data structure of a new allocated page + void InitPage(Page &page) { + page.prev = nullptr; + page.next = nullptr; + constexpr uint32_t offset = maple::AlignUp(sizeof(Page), AllocatorUtils::kAllocAlignment); + page.free = (AllocatorUtils::kAllocPageSize - offset) / slotAlignment; + page.total = page.free; + if (UNLIKELY(page.free < 1)) { + LOG(FATAL) << "use the wrong allocator! slot size = " << slotAlignment << maple::endl; + } + + char *start = reinterpret_cast(&page); + char *end = start + AllocatorUtils::kAllocPageSize - 1; + char *slot = start + offset; + page.header = reinterpret_cast(slot); + Slot *prevSlot = page.header; + while (true) { + slot += slotAlignment; + char *slotEnd = slot + slotAlignment - 1; + if (slotEnd > end) { + break; + } + + Slot *cur = reinterpret_cast(slot); + prevSlot->next = cur; + prevSlot = cur; + } + + LOG(PA_VERBOSE_LVL) << "new page start = " << std::hex << start << ", end = " << end << + ", slot header = " << page.header << std::dec << + ", total slots = " << page.total << ", slot size = " << slotAlignment << + ", sizeof(Page) = " << sizeof(Page) << maple::endl; + } + + // linked-list management + inline void AddToList(Page *&list, Page &page) { + if (list != nullptr) { + list->prev = &page; + } + page.next = list; + list = &page; + } + + inline void RemoveFromList(Page *&list, Page &page) { + Page *prev = page.prev; + Page *next = page.next; + if (&page == list) { + list = next; + if (next != nullptr) { + next->prev = nullptr; + } + } else { + prev->next = next; + if (next != nullptr) { + next->prev = prev; + } + } + page.next = nullptr; + page.prev = nullptr; + } + + inline void DestroyList(Page *&list) { + Page *cur = nullptr; + while (list != nullptr) { + cur = list; + list = list->next; + DestroyPage(*cur); + } + } + + Page *nonFull; + Page *cached; + std::mutex allocLock; + uint32_t totalPages; + uint32_t totalCached; + uint32_t slotSize; + uint32_t slotAlignment; +}; + +// Utility class used for StdContainerAllocator +// It has lots of PageAllocators, each for different slot size, +// so all allocation sizes can be handled by this bridge class. +class AggregateAllocator { + public: + static constexpr uint32_t kMaxAllocators = + AllocatorUtils::kMaxSlotSize >> AllocatorUtils::kLogAllocAlignment; + + MRT_EXPORT static AggregateAllocator &Instance(AllocationTag tag); + + AggregateAllocator() { + uint32_t slotSize = AllocatorUtils::kAllocAlignment; + for (uint32_t i = 0; i < kMaxAllocators; ++i) { + allocator[i].Init(slotSize); + slotSize += AllocatorUtils::kAllocAlignment; + } + } + ~AggregateAllocator() = default; + + // choose appropriate allocation to allocate + void *Allocate(size_t size) { + uint32_t alignedSize = maple::AlignUp(static_cast(size), AllocatorUtils::kAllocAlignment); + if (alignedSize <= AllocatorUtils::kMaxSlotSize) { + uint32_t index = alignedSize >> AllocatorUtils::kLogAllocAlignment; + return allocator[index - 1].Allocate(); + } else { + return PagePool::Instance().GetPage(size); + } + } + + void Deallocate(void *p, size_t size) { + uint32_t alignedSize = maple::AlignUp(static_cast(size), AllocatorUtils::kAllocAlignment); + if (alignedSize <= AllocatorUtils::kMaxSlotSize) { + uint32_t index = alignedSize >> AllocatorUtils::kLogAllocAlignment; + allocator[index - 1].Deallocate(p); + } else { + PagePool::Instance().ReturnPage(reinterpret_cast(p), size); + } + } + + private: + PageAllocator allocator[kMaxAllocators]; +}; + +// Allocator used to take control of memory allocation for std containers. +// It uses AggregateAllocator to dispatch the memory operation to appropriate PageAllocator. +template +class StdContainerAllocator { + public: + using size_type = size_t; + using difference_type = ptrdiff_t; + using pointer = T*; + using const_pointer = const T*; + using reference = T&; + using const_reference = const T&; + using value_type = T; + + using propagate_on_container_copy_assignment = std::false_type; + using propagate_on_container_move_assignment = std::true_type; + using propagate_on_container_swap = std::true_type; + + template + struct rebind { + using other = StdContainerAllocator; + }; + + StdContainerAllocator() = default; + ~StdContainerAllocator() = default; + + template + StdContainerAllocator(const StdContainerAllocator&) {} + + StdContainerAllocator(const StdContainerAllocator&) {} + + StdContainerAllocator(StdContainerAllocator&&) {} + + StdContainerAllocator &operator = (const StdContainerAllocator&) { + return *this; + } + + StdContainerAllocator &operator = (StdContainerAllocator&&) { + return *this; + } + + pointer address(reference x) const { + return std::addressof(x); + } + + const_pointer address(const_reference x) const { + return std::addressof(x); + } + + pointer allocate(size_type n, const void *hint __attribute__((unused)) = 0) { + pointer result = static_cast(AggregateAllocator::Instance(cat).Allocate(sizeof(T) * n)); + return result; + } + + void deallocate(pointer p, size_type n) { + AggregateAllocator::Instance(cat).Deallocate(p, sizeof(T) * n); + } + + size_type max_size() const { + return static_cast(~0) / sizeof(value_type); + } + + void construct(pointer p, const_reference val) { + ::new (reinterpret_cast(p)) value_type(val); + } + + template + void construct(Up *p, Args&&... args) { + ::new (reinterpret_cast(p)) Up(std::forward(args)...); + } + + void destroy(pointer p) { + p->~value_type(); + } +}; +} // end of namespace maplert + +#endif diff --git a/src/mrt/compiler-rt/include/allocator/page_map.h b/src/mrt/compiler-rt/include/allocator/page_map.h new file mode 100644 index 0000000000..06ce57e669 --- /dev/null +++ b/src/mrt/compiler-rt/include/allocator/page_map.h @@ -0,0 +1,421 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_PAGE_MAP_H +#define MAPLE_RUNTIME_PAGE_MAP_H + +#include "alloc_utils.h" +#include "mem_map.h" +#include "alloc_config.h" + +// This file declares a page map and its operations for use in the allocator +namespace maplert { +// Enable this to count the total incs and decs of a page. +#define MRT_RESURRECTION_PROFILE 0 +const uint8_t kPageLabelMygoteBit = 0x80; + +// rename these? +enum PageLabel : uint8_t { + kPReleased = 0, + kPFree, + kPRun, + kPRunRem, + kPLargeObj, + kPLargeObjRem, + kPSweeping, + kPSwept, + // mygote pages transition from the corresponding normal pages, mygote pages are never cleared + kPMygoteRun = kPRun | kPageLabelMygoteBit, + kPMygoteRunRem = kPRunRem | kPageLabelMygoteBit, + kPMygoteLargeObj = kPLargeObj | kPageLabelMygoteBit, + kPMygoteLargeObjRem = kPLargeObjRem | kPageLabelMygoteBit, + kPMax, +}; + +// This class records the number of un-resurrected finalizable objects in each page. +class FinalizableProf { + friend class PageMap; + + public: + struct PageEntry { +#if MRT_RESURRECTION_PROFILE == 1 + std::atomic incs = { 0 }; // Number of increments + std::atomic decs = { 0 }; // Number of decrements +#endif // MRT_RESURRECTION_PROFILE + std::atomic fins = { 0 }; // Number of finalizable objects not yet resurrected + }; + + void Init(size_t maxMapSize) { + size_t maxSize = maxMapSize * sizeof(PageEntry); + maxSize = ALLOCUTIL_PAGE_RND_UP(maxSize); + MemMap::Option opt = MemMap::kDefaultOptions; + opt.tag = "maple_alloc_ros_fp"; + opt.reqBase = nullptr; + opt.reqRange = false; + memMap = MemMap::MapMemory(maxSize, maxSize, opt); + entries = static_cast(memMap->GetBaseAddr()); + if (UNLIKELY(entries == nullptr)) { + LOG(FATAL) << "finalizable prof initialisation failed" << maple::endl; + } + } + + void IncPage(size_t index) { + PageEntry &entry = GetEntry(index); +#if MRT_RESURRECTION_PROFILE == 1 + entry.incs.fetch_add(1, std::memory_order_relaxed); +#endif // MRT_RESURRECTION_PROFILE + entry.fins.fetch_add(1, std::memory_order_relaxed); + } + + void DecPage(size_t index) { + PageEntry &entry = GetEntry(index); +#if MRT_RESURRECTION_PROFILE == 1 + entry.decs.fetch_add(1, std::memory_order_relaxed); +#endif // MRT_RESURRECTION_PROFILE + entry.fins.fetch_sub(1, std::memory_order_relaxed); + } + + private: + MemMap *memMap = nullptr; + PageEntry *entries = nullptr; + + PageEntry &GetEntry(size_t index) { +#if __MRT_DEBUG + size_t maxEntries = memMap->GetCurrSize() / sizeof(PageEntry); + if (index >= maxEntries) { + LOG(FATAL) << "index overflow when reading the finalizable object record index: " << + index << " size: " << maxEntries << maple::endl; + } +#endif + return entries[index]; + } +}; + +// This is a map from index to type for all pages. +// Also has a finalizable obj recorder. +class PageMap { + public: + // the type of page index and page count + using IntType = size_t; + + PageMap(); + ~PageMap(); + void Init(address_t baseAddr, size_t maxSize, size_t size); + + PageMap(const PageMap &that) = delete; + PageMap(PageMap &&that) = delete; + PageMap &operator=(const PageMap &that) = delete; + PageMap &operator=(PageMap &&that) = delete; + + inline address_t GetBeginAddr() const { + return spaceBeginAddr; + } + inline address_t GetEndAddr() const { + return spaceEndAddr; + } + inline IntType GetPageIndex(address_t addr) const { + ROSIMPL_ASSERT((addr >= spaceBeginAddr) && (addr <= spaceEndAddr), "addr out of bound"); + return ALLOCUTIL_PAGE_BYTE2CNT(addr - spaceBeginAddr); + } + inline address_t GetPageAddr(IntType idx) const { + ROSIMPL_ASSERT(idx < maxMapSize, "index out of bound"); + return spaceBeginAddr + ALLOCUTIL_PAGE_CNT2BYTE(idx); + } + inline IntType GetMapSize() const { + return pageMapSize; + } + inline PageLabel GetType(IntType idx) const { + ROSIMPL_ASSERT(idx < maxMapSize, "index out of bound"); + return map[idx]; + } + inline PageLabel GetTypeForAddr(address_t addr) const { + ROSIMPL_ASSERT((addr >= spaceBeginAddr) && (addr < spaceEndAddr), "addr out of bound"); + return map[GetPageIndex(addr)]; + } + inline const PageLabel *GetMap() const { + return const_cast(map); + } + + inline PageLabel GetTypeAcquire(IntType idx) { + PageLabel *p = const_cast(&map[idx]); + std::atomic &atomicLabel = *reinterpret_cast*>(p); + return atomicLabel.load(std::memory_order_acquire); + } + inline void SetTypeRelease(IntType idx, PageLabel label) { + PageLabel *p = const_cast(&map[idx]); + std::atomic &atomicLabel = *reinterpret_cast*>(p); + atomicLabel.store(label, std::memory_order_release); + } + + inline bool MatchAddr(address_t addr, PageLabel pageLabel) const { + ROSIMPL_ASSERT((addr >= spaceBeginAddr) && (addr < spaceEndAddr), "addr out of bound"); + return (map[GetPageIndex(addr)] == pageLabel); + } + inline bool ContainsAddr(address_t addr) const { + return (addr >= spaceBeginAddr && addr < spaceEndAddr); + } + + inline bool IsMygotePageAddr(address_t addr) const { + return (static_cast(map[GetPageIndex(addr)]) & kPageLabelMygoteBit) != 0; + } + + inline bool PageHasFinalizableObject(IntType index) { + ROSIMPL_ASSERT(index < maxMapSize, "index out of bound"); + return finProf.GetEntry(index).fins.load(std::memory_order_relaxed) > 0; + } + inline size_t NumOfFinalizableObjectsInPage(IntType index) { + ROSIMPL_ASSERT(index < pageMapSize, "index out of bound"); + return finProf.GetEntry(index).fins.load(std::memory_order_relaxed); + } + inline size_t NumOfFinalizableObjectsInRun(IntType index) { + ROSIMPL_ASSERT(index < pageMapSize, "index out of bound"); + ROSIMPL_ASSERT((map[index] == kPRun) || (map[index] == kPMygoteRun), "not a run page"); + size_t n = NumOfFinalizableObjectsInPage(index); + for (IntType next = index + 1; next < pageMapSize && + (map[next] == kPRunRem || map[next] == kPMygoteRunRem); ++next) { + n += NumOfFinalizableObjectsInPage(next); + } + return n; + } + + // update the map size and the end address + inline void UpdateSize(size_t size) { + IntType mapSize = ALLOCUTIL_PAGE_BYTE2CNT(size); + if (mapSize > pageMapSize) { + pageMapSize = mapSize; + spaceEndAddr = spaceBeginAddr + size; + } + } + + // set the page type of each page in range + inline void SetRange(IntType idx1, IntType idx2, PageLabel label) { + ROSIMPL_ASSERT(idx1 < idx2, "idx1 < idx2"); + ROSIMPL_ASSERT(idx2 <= maxMapSize, "index out of bound"); + for (IntType i = idx1; i < idx2; ++i) { + map[i] = label; + } + } + + // set the page type of each page in range, also count + // how many pages are converted free -> released or released -> (free/allocated) + inline IntType SetRangeAndCount(IntType idx1, IntType idx2, PageLabel label) { + ROSIMPL_ASSERT(idx1 < idx2, "idx1 < idx2"); + ROSIMPL_ASSERT(idx2 <= maxMapSize, "index out of bound"); + bool isToReleased = (label == kPReleased); + IntType count = 0; + for (IntType i = idx1; i < idx2; ++i) { + if (isToReleased && map[i] != kPReleased) { + ROSIMPL_ASSERT(map[i] == kPFree, "released non-free page"); + ++count; + } else if (!isToReleased && map[i] == kPReleased) { + ALLOCUTIL_PREFETCH_WRITE(GetPageAddr(i)); + ++count; + } + map[i] = label; + } + return count; + } + + // set the pageCnt pages starting from address addr to "run" type + inline IntType SetAsRunPage(address_t addr, IntType pageCnt) { + ROSIMPL_ASSERT((addr >= spaceBeginAddr) && (addr < spaceEndAddr), "addr out of bound"); + IntType index = GetPageIndex(addr); + ROSIMPL_ASSERT(index < pageMapSize, "index out of bound"); + ROSIMPL_ASSERT(map[index] == kPFree || map[index] == kPReleased, "set an occupied page to run"); + IntType count = 0; + if (map[index] == kPReleased) { + ALLOCUTIL_PREFETCH_WRITE(addr); + ++count; + } + // for ConcurrentPrepareResurrection, see SetAsLargeObjPage comment +#if ROSIMPL_MEMSET_AT_FREE + map[index] = kPRun; +#else + if (map[index] == kPFree) { + *reinterpret_cast(GetPageAddr(index)) = 0; + } + SetTypeRelease(index, kPRun); +#endif + if (pageCnt > 1) { + count += SetRangeAndCount(index + 1, index + pageCnt, kPRunRem); + } + return count; + } + + // count the number of pages in a run (starting with a Run page then 0 or more RunRem pages) + inline IntType RunPageCount(address_t addr) const { + ROSIMPL_ASSERT((addr >= spaceBeginAddr) && (addr < spaceEndAddr), "addr out of bound"); + IntType index = GetPageIndex(addr); + ROSIMPL_ASSERT(map[index] == kPRun || map[index] == kPMygoteRun, "not a run page"); + IntType cnt = 1U; + for (IntType i = index + 1; i < pageMapSize; ++i, ++cnt) { + if (map[i] != kPRunRem && map[i] != kPMygoteRunRem) { + break; + } + } + return cnt; + } + + // return the beginning of a run given an address (nb multi-page run) + inline address_t GetRunStartFromAddr(address_t addr) const { + ROSIMPL_ASSERT((addr >= spaceBeginAddr) && (addr < spaceEndAddr), "addr out of bound"); + IntType index = GetPageIndex(addr); + IntType runIndex = 0; + for (IntType i = index; i > 0; --i) { + if (map[i] == kPRun || map[i] == kPMygoteRun) { + runIndex = i; + break; + } + } + // if we are here it means that the run could be at position 0 + ROSIMPL_ASSERT(map[runIndex] == kPRun || map[runIndex] == kPMygoteRun, "wrong page type"); + return GetPageAddr(runIndex); + } + + // set the pageCnt pages starting from address addr to "large obj" type + inline IntType SetAsLargeObjPage(address_t addr, IntType pageCnt) { + ROSIMPL_ASSERT((addr >= spaceBeginAddr) && (addr < spaceEndAddr), "addr out of bound"); + IntType index = GetPageIndex(addr); + ROSIMPL_ASSERT(index < pageMapSize, "index out of bound"); + ROSIMPL_ASSERT(map[index] == kPFree || map[index] == kPReleased, "set an occupied page to large obj"); + IntType count = 0; + if (map[index] == kPReleased) { + ALLOCUTIL_PREFETCH_WRITE(addr); + ++count; + } + // ConcurrentPrepareResurrection bug. This synchronises with GetTypeAcquire + // during unsafe heap scan. This ensures that when the heap scan finds a + // large obj page, it must look like an unallocated page at first (allocated bit is 0). + // If memset is done earlier, then there might not be a problem at all, + // assuming reorders don't pass through global locks. + // Caution, this atomic op is very expensive according to flame graphs. +#if ROSIMPL_MEMSET_AT_FREE + map[index] = kPLargeObj; +#else + if (map[index] == kPFree) { + // prevent ConcurrentPrepareResurrection from getting this page + *reinterpret_cast(GetPageAddr(index)) = 0; + } + SetTypeRelease(index, kPLargeObj); +#endif + if (pageCnt > 1) { + count += SetRangeAndCount(index + 1, index + pageCnt, kPLargeObjRem); + } + return count; + } + + // count the number of pages of a large obj + // (starting with a LargeObj page then 0 or more LargeObjRem pages) + inline IntType LargeObjPageCount(address_t addr) const { + ROSIMPL_ASSERT((addr >= spaceBeginAddr) && (addr < spaceEndAddr), "addr out of bound"); + IntType index = GetPageIndex(addr); + ROSIMPL_ASSERT(map[index] == kPLargeObj || map[index] == kPMygoteLargeObj, "not a large obj page"); + IntType cnt = 1U; + for (IntType i = index + 1; i < pageMapSize; ++i, ++cnt) { + if (map[i] != kPLargeObjRem && map[i] != kPMygoteLargeObjRem) { + break; + } + } + return cnt; + } + + // clear run page (set as free or released) + inline void ClearRunPage(address_t addr, IntType pageCnt, bool isReleased) { + ROSIMPL_ASSERT((addr >= spaceBeginAddr) && (addr < spaceEndAddr), "addr out of bound"); + IntType index = GetPageIndex(addr); + PageLabel label = isReleased ? kPReleased : kPFree; + for (IntType i = index; i < index + pageCnt; ++i) { + ROSIMPL_ASSERT((map[i] == kPRun || map[i] == kPRunRem), "page type was not correct"); + map[i] = label; + } + } + + // clear large obj page (set as free or released) + inline void ClearLargeObjPage(address_t addr, IntType pageCnt, bool isReleased) { + ROSIMPL_ASSERT((addr >= spaceBeginAddr) && (addr < spaceEndAddr), "addr out of bound"); + IntType index = GetPageIndex(addr); + PageLabel label = isReleased ? kPReleased : kPFree; + for (IntType i = index; i < index + pageCnt; ++i) { + ROSIMPL_ASSERT((map[i] == kPLargeObj || map[i] == kPLargeObjRem), "page type was not correct"); + map[i] = label; + } + } + + // return number of pages freed after resetting labels for a large object + inline IntType ClearLargeObjPageAndCount(address_t addr, bool isReleased) { + ROSIMPL_ASSERT((addr >= spaceBeginAddr) && (addr < spaceEndAddr), "addr out of bound"); + IntType index = GetPageIndex(addr); + ROSIMPL_ASSERT(map[index] == kPLargeObj, "page addr in the middle"); + IntType cnt = 1U; + PageLabel label = isReleased ? kPReleased : kPFree; + map[index] = label; + for (IntType i = index + 1; i < pageMapSize; ++i, ++cnt) { + if (map[i] != kPLargeObjRem) { + break; + } + map[i] = label; + } + return cnt; + } + + // set all allocated pages as mygote pages, used by mygote before forking + inline void SetAllAsMygotePage() { + for (IntType i = 0; i < pageMapSize; ++i) { + if (map[i] == kPRun || map[i] == kPRunRem || map[i] == kPLargeObj || map[i] == kPLargeObjRem) { + map[i] = static_cast(static_cast(map[i]) | kPageLabelMygoteBit); + } + } + } + + inline void OnFinalizableObjCreated(address_t addr) { + ROSIMPL_ASSERT((addr >= spaceBeginAddr) && (addr < spaceEndAddr), "addr out of bound"); + finProf.IncPage(GetPageIndex(addr)); + } + + inline void OnFinalizableObjResurrected(address_t addr) { + ROSIMPL_ASSERT((addr >= spaceBeginAddr) && (addr < spaceEndAddr), "addr out of bound"); + finProf.DecPage(GetPageIndex(addr)); + } + + void Dump(); + void DumpFinalizableInfo(std::ostream &ost); + + private: + // defensive padding, do nothing + uint64_t padding __attribute__((unused)) = 0; + + // the page map holds some knowledge of the address space + // they are updated when the page manager is updated + address_t spaceBeginAddr; + address_t spaceEndAddr; + + // the total number of pages mapped + IntType maxMapSize; + // the current number of pages (the rest must be of "released" type) + IntType pageMapSize; + + // this mmaps some memory where the page map lives + // this should be resizable + MemMap *memMap; + // this is a map from index to type for all pages. unlike the page manager, it only changes + // in lock but it can be read out of locks to quickly decide the state of a page, hence volatile. + PageLabel *map; + + // this records the number of unresurrected finalizable objs in each page + // this doesn't have to be here, because it has no dependency on page map + FinalizableProf finProf; +}; +} // namespace maplert + +#endif // MAPLE_RUNTIME_PAGE_MAP_H diff --git a/src/mrt/compiler-rt/include/allocator/page_pool.h b/src/mrt/compiler-rt/include/allocator/page_pool.h new file mode 100644 index 0000000000..68e07131d0 --- /dev/null +++ b/src/mrt/compiler-rt/include/allocator/page_pool.h @@ -0,0 +1,269 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_ALLOCATOR_PAGE_POOL_H +#define MAPLE_RUNTIME_ALLOCATOR_PAGE_POOL_H + +#include +#include "globals.h" +#include "mrt_api_common.h" +#include "syscall.h" + +namespace maplert { +// a page pool maintain a pool of free pages, serve page allocation and free +class PagePool { + static constexpr int kNumTotalPages = 8192; // maximum of 8192 * 4K ~ 32M + static constexpr int kLargeMemAllocateStart = kNumTotalPages / 4; + static constexpr double kCachedRatio = 1.05; // 1.05 * used_page of pages should be cached + + class Bitmap { + friend class PagePool; + public: + void UnmarkBits(int index, int num) { + int row = GetRow(index); + int line = GetLine(index); + uint32_t cur = data[line].load(std::memory_order_acquire); + uint32_t mask = GetMask(num) << static_cast(row); + while (!data[line].compare_exchange_weak(cur, cur & (~mask), std::memory_order_release, + std::memory_order_acquire)) { + } + } + + void Unmark(int index, int num) { + if (num <= kBitsInBitmapWord) { + UnmarkBits(index, num); + } else { + UnmarkLongBits(index, num); + } + } + + // get the first free index + int MarkFirstNBits(int num) { + // the bitmap only serve maximum of 32 * 4k = 128k bytes allocation + if (num > kBitsInBitmapWord) { + return MarkLongBits(num); + } + + // calculate the mask + uint32_t mask = GetMask(num); + + // get the first index + for (int i = 0; i < kLen; ++i) { + uint32_t cur = data[i].load(std::memory_order_acquire); + if ((~cur) == 0u) { + continue; + } + uint32_t bit = mask; + for (int j = 0; j < kBitsInBitmapWord - num + 1; ++j) { + if ((cur & bit) == 0u) { + if (data[i].compare_exchange_weak(cur, cur | bit, std::memory_order_release, std::memory_order_acquire)) { + return i * kBitsInBitmapWord + j; + } + } + bit = bit << 1; + } + } + return -1; + } + + protected: + static inline bool InSmallArena(int num) { + return num <= kBitsInBitmapWord; + } + + // mark the whole word at the index + inline bool TryMarkWord(int index) { + int line = GetLine(index); + uint32_t cur = data[line].load(std::memory_order_acquire); + if (cur != 0u) { + return false; + } + uint32_t mask = 0u; + mask = ~mask; + + if (data[line].compare_exchange_weak(cur, mask, + std::memory_order_release, std::memory_order_acquire)) { + return true; + } + return false; + } + + // mark maximum sucessive bits at index in a word + int TryMarkBits(int index) { + int row = GetRow(index); + int line = GetLine(index); + uint32_t cur = data[line].load(std::memory_order_acquire); + uint32_t mask = 1u; + mask = mask << static_cast(row); + if ((mask & cur) != 0) { + return 0; + } + uint32_t bit = mask; + int avail = 1; + for (int i = 1; i < (kBitsInBitmapWord - row); ++i) { + bit = bit << 1; + if ((cur & bit) != 0) { + break; + } + mask = mask | bit; + ++avail; + } + + if (data[line].compare_exchange_weak(cur, cur | mask, std::memory_order_release, std::memory_order_acquire)) { + return avail; + } + return 0; + } + + private: + inline int GetRow(int index) const { + return index % kBitsInBitmapWord; + } + + inline int GetLine(int index) const { + return index / kBitsInBitmapWord; + } + + inline int GetTotalWords(int num) const { + return (num + kBitsInBitmapWord - 1) / kBitsInBitmapWord; + } + + inline uint32_t GetMask(int num) const { + uint32_t mask = 1u; + for (int i = 1; i < num; ++i) { + mask = mask << 1; + mask = mask | 1; + } + return mask; + } + + bool MarkSuccesiveWords(int start, int len) { + if (start > kNumTotalPages - kBitsInBitmapWord) { + // exceed the last word + return false; + } + if (len == 0) { + return true; + } + if (TryMarkWord(start)) { + if (MarkSuccesiveWords(start + kBitsInBitmapWord, len - 1)) { + return true; + } else { + // undo current mark + UnmarkBits(start, kBitsInBitmapWord); + } + } + return false; + } + + int MarkLongBits(int num) { + int index = -1; + int len = GetTotalWords(num); + for (int i = kLargeMemAllocateStart; i < kNumTotalPages - num; i += kBitsInBitmapWord) { + if (MarkSuccesiveWords(i, len)) { + index = i; + break; + } + } + return index; + } + + void UnmarkLongBits(int index, int num) { + int len = GetTotalWords(num); + int start = index; + for (int i = 0; i < len; ++i) { + UnmarkBits(start, kBitsInBitmapWord); + start += kBitsInBitmapWord; + } + } + + static constexpr int kBitsInBitmapWord = 32; + static constexpr int kLen = kNumTotalPages / kBitsInBitmapWord; + std::atomic data[kLen] = {}; + }; + + public: + PagePool() { + size_t size = kNumTotalPages * maple::kPageSize; + uint8_t *result = MapMemory(size); + base = reinterpret_cast(result); + end = base + size; + } + + ~PagePool() { + (void)munmap(base, kNumTotalPages * maple::kPageSize); + } + + uint8_t *GetPage(size_t bytes = maple::kPageSize) { + int num = static_cast((bytes + maple::kPageSize - 1) / maple::kPageSize); + int index = bitmap.MarkFirstNBits(num); + if (index == -1) { + return MapMemory(num * maple::kPageSize); + } + + if (Bitmap::InSmallArena(num)) { + smallPageUsed += num; + } + uint8_t *ret = base + index * maple::kPageSize; + return ret; + } + + void ReturnPage(uint8_t *page, size_t bytes = maple::kPageSize) { + int num = static_cast((bytes + maple::kPageSize - 1) / maple::kPageSize); + if (page >= base && page < end) { + int index = static_cast((page - base) / maple::kPageSize); + bitmap.Unmark(index, num); + if (Bitmap::InSmallArena(num)) { + smallPageUsed -= num; + } + } else { + (void)munmap(page, num * maple::kPageSize); + } + } + + // return unused pages to os + void Trim() { + int start = static_cast(smallPageUsed.load() * kCachedRatio); + for (int i = start; i < kNumTotalPages;) { + int num = bitmap.TryMarkBits(i); + if (num == 0) { + ++i; + continue; + } + uint8_t *addr = base + i * maple::kPageSize; + (void)madvise(addr, num * maple::kPageSize, MADV_DONTNEED); + bitmap.UnmarkBits(i, num); + i = i + num; + } + } + + MRT_EXPORT static PagePool& Instance(); + + private: + uint8_t *MapMemory(size_t size) { + void *result = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + if (result == MAP_FAILED) { + LOG(FATAL) << "allocate create page failed! Out of Memory!" << maple::endl; + } + MRT_PRCTL(result, size, "PagePool"); + return reinterpret_cast(result); + } + + uint8_t *base; // start address of the mapped pages + uint8_t *end; // end address of the mapped pages + std::atomic smallPageUsed = { 0 }; + Bitmap bitmap; // record the state of the mapped memory: mapped or unmapped +}; +} // namespace maplert +#endif // MAPLE_RUNTIME_ALLOCATOR_PAGE_POOL_H diff --git a/src/mrt/compiler-rt/include/allocator/ros_alloc_run.h b/src/mrt/compiler-rt/include/allocator/ros_alloc_run.h new file mode 100644 index 0000000000..6aed137b55 --- /dev/null +++ b/src/mrt/compiler-rt/include/allocator/ros_alloc_run.h @@ -0,0 +1,467 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co., Ltd. All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_ROS_ALLOC_RUN_H +#define MAPLE_RUNTIME_ROS_ALLOC_RUN_H + +#include "alloc_config.h" +#include "sizes.h" + +namespace maplert { +#ifdef USE_32BIT_REF +using SlotPtrType = uint32_t; +#else +using SlotPtrType = address_t; +#endif // USE_32BIT_REF + +class Slot { + public: + address_t GetNext() const { + // possible extension + return static_cast(mNext); + } + void SetNext(address_t next) { + // possible truncation + mNext = static_cast(next); + } + + private: + // This pointer occupies the first few bytes in a slot of a run; + // it's important that this pointer is always set (as nullptr when it is the last node) + // because we also use this as a flag to tell if it is allocated by us. + // See IsAllocatedByAllocator. + SlotPtrType mNext; +}; + +class FreeList { + public: + inline address_t GetHead() const { + return static_cast(mHead); + } + inline address_t GetTail() const { + return static_cast(mTail); + } + + inline void SetHead(address_t addr) { + mHead = static_cast(addr); + } + inline void SetTail(address_t addr) { + mTail = static_cast(addr); + } + + // insert a slot into the list, i.e., free a slot + inline void Insert(address_t addr) { + ROSIMPL_ASSERT(addr != 0, "cannot insert null slot into the list"); + + // when we don't memset at free, we can enable double free check (checks 'allocated' bit) +#if !ROSIMPL_MEMSET_AT_FREE + CheckDoubleFree(ROSIMPL_GET_OBJ_FROM_ADDR(addr)); +#if CONFIG_JSAN || RC_HOT_OBJECT_DATA_COLLECT + // in jsan the slot is extra-padded, need to manually clear 'allocated' bit + ClearAllocatedBit(ROSIMPL_GET_OBJ_FROM_ADDR(addr)); +#endif +#endif + + Slot *slot = reinterpret_cast(addr); + // this normally also clears 'allocated' bit, unless we do jsan + slot->SetNext(mHead); + SetHead(addr); + + if (UNLIKELY(mTail == 0)) { + SetTail(addr); + } + } + + // fetch a slot from the list, i.e., alloc a slot + inline address_t Fetch() { + if (UNLIKELY(mHead == 0)) { + return 0; + } + + address_t addr = static_cast(mHead); + Slot *slot = reinterpret_cast(addr); + SetHead(slot->GetNext()); + + if (UNLIKELY(mHead == 0)) { + SetTail(0); + } + return addr; + } + + // add the other list to the beginning of this one, and clear the other list + // the other list is not allowed to be empty + inline void Prepend(FreeList &other) { + ROSIMPL_ASSERT(other.GetHead() != 0, "prepending an empty list"); + Slot *tail = reinterpret_cast(other.GetTail()); + tail->SetNext(GetHead()); + SetHead(other.GetHead()); + + if (mTail == 0) { + SetTail(other.GetTail()); + } + + other.SetHead(0); + other.SetTail(0); + } + + void Init(address_t baseAddr, size_t slotSize, size_t slotCount); + + private: + SlotPtrType mHead = 0; + SlotPtrType mTail = 0; +}; + +// Used when enumerating objects. Only call the visitor on objects that match this criterion. +enum class OnlyVisit : uint32_t { + kVisitAll, // Visit all objects + kVisitFinalizable, // Only visit objects with finalizable bit + kVisitLast +}; + +class SweepContext; + +#ifdef USE_32BIT_REF +using RunListPtrType = uint32_t; +#else +using RunListPtrType = address_t; +#endif // USE_32BIT_REF + +class RunSlots { + public: + static constexpr uint8_t kIsLocalMask = 0x1; + static constexpr uint8_t kIsInListMask = 0x10; + static constexpr uint8_t kHasInitMask = 0x80; + + // run data structure size is a very vital part in heap fragmentation + // we currently aim for a 64-byte run header in production mode + // offset +0 + uint8_t magic; + uint8_t mIdx; + uint8_t flags; + uint8_t padding __attribute__ ((unused)); + // offset +4 + uint32_t nFree; // this number can tell us whether a run is full quickly + // offset +8 + RunListPtrType mNext; // +4 + RunListPtrType mPrev; // +4 + FreeList freeList; // +8 + // offset +24 + // +40; run header size 64 bytes, slots begin at offset +64 + // put a static check/warning to see if it's actually 40 + ALLOC_MUTEX_TYPE lock; + static size_t maxSlots[RunConfig::kRunConfigs]; + + static inline constexpr size_t GetHeaderSize() { + return sizeof(RunSlots); + } + + static inline constexpr size_t GetContentOffset() { + return AllocUtilRndUp(GetHeaderSize(), kAllocAlign); + } + + inline size_t GetRunSize() const { + return ROSIMPL_RUN_SIZE(mIdx); + } + inline size_t GetMaxSlots() const { + return maxSlots[mIdx]; + }; + + inline RunSlots *GetNext() { + return reinterpret_cast(static_cast(mNext)); + } + inline RunSlots *GetPrev() { + return reinterpret_cast(static_cast(mPrev)); + } + inline void SetNext(const RunSlots *run) { + mNext = static_cast(reinterpret_cast(run)); + } + inline void SetPrev(const RunSlots *run) { + mPrev = static_cast(reinterpret_cast(run)); + } + inline float Value() const { + // the "value" of a run depends on its current utilization + return 1 - static_cast(nFree) / GetMaxSlots(); + } + + inline void SetLocalFlag(bool val) { + flags = val ? (flags | kIsLocalMask) : (flags & ~kIsLocalMask); + } + + inline void SetInList(bool val) { + flags = val ? (flags | kIsInListMask) : (flags & ~kIsInListMask); + } + + inline void SetInit() { + flags |= kHasInitMask; + } + + inline void SetInitRelease() { + (void)reinterpret_cast*>(&flags)->fetch_or(kHasInitMask, std::memory_order_release); + } + + inline bool IsLocal() const { + return (flags & kIsLocalMask) != 0; + } + + inline bool IsInList() const { + return (flags & kIsInListMask) != 0; + } + + inline bool HasInit() const { + return (flags & kHasInitMask) != 0; + } + + inline bool HasInitAcquire() { + return (reinterpret_cast*>(&flags)->load(std::memory_order_acquire) & kHasInitMask) != 0; + } + + explicit RunSlots(uint32_t idx); + ~RunSlots() = default; + void Init(bool setInitRelease); + + inline bool IsEmpty() const { + return (nFree == GetMaxSlots()); + } + inline bool IsFull() const { + return (nFree == 0); + } + + inline address_t GetBaseAddress() const { + return reinterpret_cast(this) + GetContentOffset(); + } + + inline address_t AllocSlot() { + address_t addr = reinterpret_cast(freeList.Fetch()); + if (LIKELY(addr != 0)) { + --nFree; + } + return addr; + } + inline void FreeSlot(address_t addr) { + freeList.Insert(addr); + ++nFree; + } + + // sweep this run page if it is not swept. + // return true if swept run to empty (need free) + bool Sweep(RosAllocImpl &allocator); + bool DoSweep(RosAllocImpl &allocator, SweepContext &context, size_t pageIndex); + + // return true if the given address represents an allocated slot + bool IsLiveObjAddr(address_t objAddr) const; + // NOTE: when `onlyVisit` is present, `hint` is the maximum number of objects + // in the run that matches the criterion. This method will return immediately + // after `hint` slots are visited. + void ForEachObj(std::function visitor, + OnlyVisit onlyVisit = OnlyVisit::kVisitAll, + size_t hint = numeric_limits::max()); +}; + +// this implements a partially sorted list, according to a node's "Value", in descending order +// this supports these operations: +// Insert: insert into one of the three parts of the list, according to the "Value" +// Erase: erase a node from the list +// Fetch: fetch a node from the middle part of the list, which is considered the ideal choice +// +// this list is sorted based on the assumption: each node's value only slowly decreases, not incs +// +// with this assumption, we never need to do any log(n) or whatever time-consuming operation +template +class ThreeTierList { + public: + static constexpr float kFirstTierBar = 0.6; + static constexpr float kSecondTierBar = 0.3; + static constexpr bool kPreferFirstTier = false; + + ThreeTierList() + : firstTierHead(nullptr), + secondTierHead(nullptr), + secondTierTail(nullptr), + thirdTierHead(nullptr) {} + + ~ThreeTierList() { + Release(); + } + + void Release() { + firstTierHead = nullptr; + secondTierHead = nullptr; + secondTierTail = nullptr; + thirdTierHead = nullptr; + } + + // insert a node + void Insert(NodeType &n) { + ROSIMPL_ASSERT(!n.IsInList(), "inserting node already in list"); + if (secondTierHead == nullptr) { + ROSIMPL_ASSERT(firstTierHead == nullptr, "list must be empty, no first tier head"); + ROSIMPL_ASSERT(thirdTierHead == nullptr, "list must be empty, no third tier head"); + ROSIMPL_ASSERT(secondTierTail == nullptr, "list must be empty, no second tier tail"); + n.SetNext(nullptr); + n.SetPrev(nullptr); + secondTierHead = &n; + secondTierTail = &n; + n.SetInList(true); + return; + } + if (n.Value() > kFirstTierBar) { + ROSIMPL_ASSERT(n.Value() <= 1, "invalid node value"); + InsertFirst(n); + } else if (n.Value() > kSecondTierBar) { + InsertSecond(n); + } else { + ROSIMPL_ASSERT(n.Value() >= 0, "negative node value"); + InsertThird(n); + } + n.SetInList(true); + } + + // fetch a node, ideally from the second tier + NodeType *Fetch() { + if (secondTierHead == nullptr) { + ROSIMPL_ASSERT(firstTierHead == nullptr, "list must be empty, no first tier head"); + ROSIMPL_ASSERT(thirdTierHead == nullptr, "list must be empty, no third tier head"); + ROSIMPL_ASSERT(secondTierTail == nullptr, "list must be empty, no second tier tail"); + return nullptr; + } + + NodeType *n = secondTierHead; + EraseSecond(); + if (n->GetNext() != nullptr) { + n->GetNext()->SetPrev(n->GetPrev()); + } + if (n->GetPrev() != nullptr) { + n->GetPrev()->SetNext(n->GetNext()); + } + n->SetNext(nullptr); + n->SetPrev(nullptr); + n->SetInList(false); + return n; + } + + // erase a particular node from the list + void Erase(NodeType &n) { + ROSIMPL_ASSERT(n.IsInList(), "erasing node not in list"); + + if (&n == firstTierHead) { + firstTierHead = firstTierHead->GetNext(); + if (firstTierHead != nullptr && firstTierHead == secondTierHead) { + firstTierHead = nullptr; + } + } else if (&n == secondTierHead) { + EraseSecond(); + } else if (&n == secondTierTail) { + ROSIMPL_ASSERT(secondTierTail != secondTierHead, "must not be second tier head"); + secondTierTail = secondTierTail->GetPrev(); + } else if (&n == thirdTierHead) { + thirdTierHead = thirdTierHead->GetNext(); + } + if (n.GetNext() != nullptr) { + n.GetNext()->SetPrev(n.GetPrev()); + } + if (n.GetPrev() != nullptr) { + n.GetPrev()->SetNext(n.GetNext()); + } + n.SetNext(nullptr); + n.SetPrev(nullptr); + n.SetInList(false); + } + + private: + NodeType *firstTierHead; + NodeType *secondTierHead; + NodeType *secondTierTail; + NodeType *thirdTierHead; + + inline void InsertFirst(NodeType &n) { + // insert into first tier + if (firstTierHead != nullptr) { + n.SetNext(firstTierHead); + n.SetPrev(nullptr); + firstTierHead->SetPrev(&n); + firstTierHead = &n; + } else { + n.SetNext(secondTierHead); + n.SetPrev(nullptr); + secondTierHead->SetPrev(&n); + firstTierHead = &n; + } + } + + inline void InsertSecond(NodeType &n) { + // insert into second tier + n.SetNext(secondTierHead); + NodeType *prev = secondTierHead->GetPrev(); + n.SetPrev(prev); + if (prev != nullptr) { + prev->SetNext(&n); + } + secondTierHead->SetPrev(&n); + secondTierHead = &n; + } + + inline void InsertThird(NodeType &n) { + // insert into third tier + if (thirdTierHead != nullptr) { + n.SetNext(thirdTierHead); + n.SetPrev(thirdTierHead->GetPrev()); + thirdTierHead->GetPrev()->SetNext(&n); + thirdTierHead->SetPrev(&n); + thirdTierHead = &n; + } else { + n.SetNext(nullptr); + n.SetPrev(secondTierTail); + secondTierTail->SetNext(&n); + thirdTierHead = &n; + } + } + + // remove the first node in the second tier + void EraseSecond() { + if (secondTierHead != secondTierTail) { + secondTierHead = secondTierHead->GetNext(); + return; + } + // last one in the second tier + NodeType *newHead = nullptr; + if (kPreferFirstTier == true) { + // move a node from first tier to second tier + newHead = secondTierHead->GetPrev(); + if (newHead == firstTierHead && firstTierHead != nullptr) { + firstTierHead = nullptr; + } else if (newHead == nullptr) { + newHead = secondTierHead->GetNext(); + if (newHead == thirdTierHead && thirdTierHead != nullptr) { + thirdTierHead = thirdTierHead->GetNext(); + } + } + } else { + // move a node from third tier to second tier + newHead = secondTierHead->GetNext(); + if (newHead == thirdTierHead && thirdTierHead != nullptr) { + thirdTierHead = thirdTierHead->GetNext(); + } else if (newHead == nullptr) { + newHead = secondTierHead->GetPrev(); + if (newHead == firstTierHead && firstTierHead != nullptr) { + firstTierHead = nullptr; + } + } + } + secondTierHead = newHead; + secondTierTail = newHead; + } +}; +} // namespace maplert + +#endif // MAPLE_RUNTIME_ROSALLOC_RUN_H diff --git a/src/mrt/compiler-rt/include/allocator/ros_allocator.h b/src/mrt/compiler-rt/include/allocator/ros_allocator.h new file mode 100644 index 0000000000..88911f0293 --- /dev/null +++ b/src/mrt/compiler-rt/include/allocator/ros_allocator.h @@ -0,0 +1,629 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_ROS_ALLOCATOR_H +#define MAPLE_RUNTIME_ROS_ALLOCATOR_H + +#include +#include +#include +#include +#include +#include +#include + +#include "allocator.h" +#include "base/systrace.h" +#include "space.h" +#include "exception/mrt_exception.h" +#include "mrt_reflection.h" +#include "mrt_object.h" +#include "collector/mrt_bitmap.h" +#include "ros_alloc_run.h" +#include "page_allocator.h" +#include "thread_offsets.h" + +#if __MRT_DEBUG +#ifndef DEBUG_CONCURRENT_SWEEP +#define DEBUG_CONCURRENT_SWEEP (1) +#endif +#endif + +extern "C" { +// implemented in asm, only works for size >= 8, only allow zeroing +void ROSAllocZero(void *addr, size_t size); +} + +#define ROSALLOC_MEMSET_S(addr, dstsize, c, size) \ + do { \ + static_assert(c == 0, "we only allow zeroing"); \ + ROSIMPL_ASSERT(addr, "null destination"); \ + ROSIMPL_ASSERT(dstsize >= size, "destination size too small"); \ + ROSIMPL_ASSERT(size >= 8, "we only allow size ge 8"); \ + ROSIMPL_ASSERT(addr % 4 == 0, "addr must be aligned to at least 4"); \ + ROSIMPL_ASSERT(size % 4 == 0, "size must be aligned to at least 4"); \ + ROSAllocZero(reinterpret_cast(addr), size); \ + } while (0) + +namespace maplert { +static inline void TagGCFreeObject(address_t objAddr) { +#if MRT_DEBUG_DOUBLE_FREE + static constexpr uint32_t kGCFreeHeaderTag = 0xd4678763; + RefCountLVal(objAddr) &= ~(kWeakRcBits | kResurrectWeakRcBits | kRCBits); // trigger inc/dec from 0 + *(reinterpret_cast(objAddr + kLockWordOffset)) = kGCFreeHeaderTag; // tag special value on lock word +#ifdef DISABLE_RC_DUPLICATE + static constexpr int32_t kGCFreeContentTag = 0xff; + size_t objByteSize = reinterpret_cast(objAddr)->GetSize(); + if (memset_s(reinterpret_cast(objAddr), objByteSize, kGCFreeContentTag, objByteSize) != EOK) { + LOG(FATAL) << "failed to memset at address: " << objAddr << " size: " << objByteSize << maple::endl; + } +#endif +#else + (void)objAddr; +#endif +} + +// sweep timeout. +constexpr auto kSweepTimeout = std::chrono::seconds(3); +class RosAllocImpl; + +class SweepContext { + friend class RunSlots; + friend class RosAllocImpl; + + class ScopedAliveClosure { + public: + ScopedAliveClosure(SweepContext &context) : sweepContext(context) { + alive = sweepContext.TryIncRC(); + } + + ~ScopedAliveClosure() { + if (alive) { + sweepContext.DecRC(); + } + } + + bool Alive() const { + return alive; + } + + private: + SweepContext &sweepContext; + bool alive; + }; + + public: + // only called in STW + void Init(const Allocator &allocator, const size_t endPageIndex, const PageMap &snapshot) { + highestPageIndex = endPageIndex; + oldAllocatedBytes = allocator.AllocatedMemory(); + // alloc page map to store the snapshot of page labels + pageMap = reinterpret_cast(PagePool::Instance().GetPage(TotalBytesOfPageMap())); + refCount.store(1, std::memory_order_relaxed); + if (memcpy_s(pageMap, TotalBytesOfPageMap(), snapshot.GetMap(), TotalBytesOfPageMap()) != EOK) { + LOG(FATAL) << "memcpy error for page_map snapshot"; + } + emptyRuns.store(0, std::memory_order_relaxed); + nonFullRuns.store(0, std::memory_order_relaxed); + scanedRuns.store(0, std::memory_order_relaxed); + sweptRuns.store(0, std::memory_order_relaxed); + releasedObjects.store(0, std::memory_order_relaxed); + releasedLargeObjects.store(0, std::memory_order_relaxed); + releasedBytes.store(0, std::memory_order_relaxed); + } + + // called after concurrent sweep, may run concurrently with java thread + void Release() { + DecRC(); + std::deque().swap(deadNeighbours); + } + + inline void AddDeadNeighbours(std::vector &candidates) { + lock_guard guard(contextMutex); + for (address_t obj : candidates) { + deadNeighbours.push_back(obj); + } + } + + // Try to set sweeping state for the given page, + // when another thread is sweeping the page, wait until it finished. + // return true if set to sweeping success, otherwise false. + // when success, old_state will be the original page type. + bool SetSweeping(size_t pageIndex, PageLabel &oldState) { + ScopedAliveClosure sac(*this); + // check page index bound. + if (UNLIKELY(pageIndex >= highestPageIndex || !sac.Alive())) { + // page_index out of bound, we treat it as swept. + oldState = kPSwept; + return false; + } + + // get atomic state object by page index. + auto &atomicState = AtomicPageState(pageIndex); + + // load the old state. + oldState = atomicState.load(std::memory_order_acquire); + + do { + if (oldState == kPSweeping) { + // another thread is sweeping the page, + // wait until it finished. + if (UNLIKELY(!WaitUntilSwept(atomicState))) { + LOG(FATAL) << "WaitUntilSwept() timeout!!! page_index: " << pageIndex << maple::endl; + } + return false; + } + if (oldState != kPRun && oldState != kPLargeObj) { + // all other states except kPRun and kPLargeObj are treat as swept. + oldState = kPSwept; + return false; + } + // try to change state from kPRun/kPLargeObj to kPSweeping, + // loop to try again if change state failed. + } while (!atomicState.compare_exchange_weak(oldState, kPSweeping, + std::memory_order_release, std::memory_order_acquire)); + return true; + } + + // atomic mark page to swept state. + inline void SetSwept(size_t pageIndex) { + ScopedAliveClosure sac(*this); + if (LIKELY(sac.Alive() && pageIndex < highestPageIndex)) { + AtomicPageState(pageIndex).store(kPSwept, std::memory_order_release); + contextCondVar.notify_all(); + } + } + + protected: + // input + size_t highestPageIndex; + size_t oldAllocatedBytes; + + // page map snapshot. contains a pointer to an array of PageLabel + PageLabel *pageMap; + std::atomic refCount; + + std::mutex contextMutex; + std::condition_variable contextCondVar; + + // output + std::deque deadNeighbours; + std::atomic emptyRuns; + std::atomic nonFullRuns; + std::atomic scanedRuns; + std::atomic sweptRuns; + std::atomic releasedObjects; + std::atomic releasedLargeObjects; + std::atomic releasedBytes; + + private: + inline size_t TotalBytesOfPageMap() const { + return highestPageIndex * sizeof(PageLabel); + } + + inline void ReleaseMap() { + PagePool::Instance().ReturnPage(reinterpret_cast(pageMap), TotalBytesOfPageMap()); + pageMap = nullptr; + } + + inline bool TryIncRC() noexcept { + size_t old = refCount.load(std::memory_order_acquire); + do { + if (old == 0) { + return false; + } + } while (!refCount.compare_exchange_weak(old, old + 1, std::memory_order_release, std::memory_order_acquire)); + return true; + } + + inline void DecRC() noexcept { + size_t old = refCount.load(std::memory_order_acquire); + while (!refCount.compare_exchange_weak(old, old - 1, std::memory_order_release, std::memory_order_acquire)) {}; + if (old == 1) { + ReleaseMap(); + } + } + + inline std::atomic &AtomicPageState(size_t pageIndex) const { + return reinterpret_cast&>(*(pageMap + pageIndex)); + } + + // return false if timeout. + inline bool WaitUntilSwept(const std::atomic &state) { + std::unique_lock lock(contextMutex); + return contextCondVar.wait_for(lock, kSweepTimeout, [&state] { + return state.load(std::memory_order_acquire) == kPSwept; + }); + } +}; + +// allocation can .. in order to succeed +enum AllocEagerness { + kEagerLevelSoft = 0, // clear soft referents (unused, should be lv2?) + kEagerLevelCoalesce, // coalesce free pages (default) + kEagerLevelExtend, // trigger gc, revoke cache, extend + kEagerLevelOOM, // trigger oom gc, revoke local/cache + kEagerLevelMax, +}; + +// thread-local data structure +template +class ROSAllocMutator : public AllocMutator { + public: + static inline ROSAllocMutator *Get() { + return reinterpret_cast(maple::tls::GetTLS(maple::tls::kSlotAllocMutator)); + } + + ROSAllocMutator() : freeListSizes{}, freeListHeads{}, localRuns{} {} + ~ROSAllocMutator() = default; + void Init() override; + void Fini() override; + void VisitGCRoots(std::function) override {}; + + static constexpr uint64_t kLocalAllocActivenessThreshold = 64; + static int gcIndex; + bool throwingOOME = false; + bool useLocal = false; + int mutatorGCIndex = 0; + uint64_t allocActiveness = 0; + uint16_t freeListSizes[kLocalRuns]; + uint32_t freeListHeads[kLocalRuns]; // a backup storage for free list heads + FreeList freeLists[kLocalRuns]; + RunSlots *localRuns[kLocalRuns]; + inline bool UseLocal(int idx); + + inline void DisableLocalBeforeSweep(int idx) { + if (freeLists[idx].GetHead() != 0) { + ROSIMPL_ASSERT(freeListHeads[idx] == 0, "backup already in use"); + std::swap(*reinterpret_cast(&(freeLists[idx])), freeListHeads[idx]); + } + } + + inline void EnableLocalAfterSweep(int idx) { + if (freeListHeads[idx] != 0) { + ROSIMPL_ASSERT(freeLists[idx].GetHead() == 0, "free list already in use"); + std::swap(freeListHeads[idx], *reinterpret_cast(&(freeLists[idx]))); + } + } + + // thread local allocation + inline address_t Alloc(int idx) { + address_t addr = reinterpret_cast(freeLists[idx].Fetch()); + if (LIKELY(addr != 0)) { + --freeListSizes[idx]; + } + return addr; + } + + // thread local free + inline void Free(int idx, address_t internalAddr) { + FAST_ALLOC_ACCOUNT_ADD(localRuns[idx]->GetRunSize()); + freeLists[idx].Insert(internalAddr); + ++freeListSizes[idx]; + } + + // localise a free list for this mutator for thread-local operations + inline void Localise(int idx, FreeList &freeList, uint32_t &freeListSize) { + this->freeLists[idx].Prepend(freeList); + this->freeListSizes[idx] += freeListSize; + freeListSize = 0; + } + + // localise the free list *currently left* in the run + // also making this run *owned* by this mutator, that is, other mutator can't alloc from it + inline void Localise(RunSlots &run) { + FAST_ALLOC_ACCOUNT_ADD(run.nFree * run.GetRunSize()); + Localise(run.mIdx, run.freeList, run.nFree); + + // this will make the run owned by the mutator + // alternatively, we can only localise part of the run, i.e., the current list of slots + // the difference is very subtle: theoretically localising whole runs yields + // more allocation speed; localising lists yields more memory efficiency + run.SetLocalFlag(true); + localRuns[run.mIdx] = &run; + } + + // release the ownership of the slots in the list + inline void Globalise(int idx, FreeList &freeList, uint32_t &freeListSize) { + freeList.Prepend(this->freeLists[idx]); + freeListSize += this->freeListSizes[idx]; + this->freeListSizes[idx] = 0; + } + + // release the ownership of the run + inline void Globalise(RunSlots &run) { + if (freeListSizes[run.mIdx] != 0) { + FAST_ALLOC_ACCOUNT_SUB(freeListSizes[run.mIdx] * run.GetRunSize()); + Globalise(run.mIdx, run.freeList, run.nFree); + } + + run.SetLocalFlag(false); + localRuns[run.mIdx] = nullptr; + } + + // get a local address that belongs to this mutator, this is used to compute the run address + inline address_t GetLocalAddress(int idx) const { + return reinterpret_cast(localRuns[idx]); + } + + inline void ResetRuns() { + for (size_t i = 0; i < kLocalRuns; i++) { + freeListSizes[i] = 0; + freeListHeads[i] = 0; + freeLists[i].SetHead(0); + freeLists[i].SetTail(0); + localRuns[i] = nullptr; + } + } +}; + +// initial index is -1, so mutator's index (0) will be different +template +int ROSAllocMutator::gcIndex = -1; + +template class ROSAllocMutator; +template class ROSAllocMutator; + +class FreeTask; +class ForEachTask; + +class RosAllocImpl : public Allocator { + public: + static const size_t kLargeObjSize = kROSAllocLargeSize; + static const int kNumberROSRuns = RunConfig::kRunConfigs; + static uint8_t kRunMagic; + + RosAllocImpl(); + ~RosAllocImpl(); + address_t NewObj(size_t size) override; +#if ALLOC_USE_FAST_PATH + // this is the thread-local allocation path. + // assuming: + // not in jsan mode + // in 32-bit mode + // fast run config + // not supported: + // HPROF/large obj accounting/alloc record/timer + // debug code + // HOT_OBJECT_DATA + // TRACE + __attribute__ ((always_inline)) + address_t FastNewObj(size_t size) { + size_t internalSize = size + ROSIMPL_HEADER_ALLOC_SIZE; + uint32_t idx = ROSIMPL_FAST_RUN_IDX(internalSize); + RosBasedMutator &mutator = *RosBasedMutator::Get(); + address_t internalAddr = mutator.Alloc(idx); + { + if (LIKELY(internalAddr)) { + address_t addr = ROSIMPL_GET_OBJ_FROM_ADDR(internalAddr); +#if (!ROSIMPL_MEMSET_AT_FREE) + // memset call is not inlined + ROSALLOC_MEMSET_S(addr, size, 0, size); +#endif + return addr; + } + } + internalAddr = AllocInternal(internalSize); + if (UNLIKELY(internalAddr == 0)) { + return 0; + } + return ROSIMPL_GET_OBJ_FROM_ADDR(internalAddr); + } +#endif + void ForEachMutator(std::function visitor); + void FreeObj(address_t objAddr); + bool ParallelFreeAllIf(MplThreadPool &threadPool, const function &shouldFree); + void ForEachObjUnsafe(const function &visitor, OnlyVisit onlyVisit); + bool ForEachObj(const function &visitor, bool debug = false); + bool ParallelForEachObj(MplThreadPool &threadPool, VisitorFactory visitorFactory, + OnlyVisit onlyVisit = OnlyVisit::kVisitAll); + + bool ForPartialRunsObj(function visitor, + const function &stepFunc, bool debug); + + // AccurateIsValidObjAddr(Concurrent) is used to identify heap objs when using + // conservative stack scan, where the input addr can be a random number + // for fast checks (precise stack scan), we use FastIsValidObjAddr + bool AccurateIsValidObjAddr(address_t addr); + bool AccurateIsValidObjAddrConcurrent(address_t addr); + address_t HeapLowerBound() const; + address_t HeapUpperBound() const; + // visite GC roots in allocator, for example OOM object + void VisitGCRoots(const RefVisitor &visitor); + void Init(const VMHeapParam&) override; + + // releases the empty range of pages at the end of the heap. + // returns true if any pages were released + // It grabs the global lock + bool ReleaseFreePages(bool aggressive = false); + void OutOfMemory(bool isJNI = false); + // This function computes the capacity by (end addr - begin addr) + // This doesn't reflect the actual physical memory usage + size_t GetCurrentSpaceCapacity() const { + return allocSpace.GetSize(); + } + // returns the estimate value of current available bytes in the space. this is an + // approximate because it does not account for the space occupied by run headers + size_t GetCurrentFreeBytes() const { + // get the live bytes in the heap + size_t allocatedSize = AllocatedMemory(); + // the current free bytes is equal to (current heap size - allocated size) + size_t heapSize = GetCurrentSpaceCapacity(); + ROSIMPL_ASSERT(heapSize >= allocatedSize, "heap stats error"); + return (heapSize - allocatedSize); + } + // This function sums the size of the non-released pages in the heap + // Only non-released pages are backed by actual physical memory + size_t GetActualSize() const { + return allocSpace.GetNonReleasedPageSize(); + } + + void GetInstances(const MClass *klass, bool includeAssignable, size_t maxCount, vector &instances); + + void ClassInstanceNum(map &objNameCntMp); + + // returns the current available page count within the reserved virtual memory + size_t GetAvailPages() { + ALLOC_LOCK_TYPE guard(ALLOC_CURRENT_THREAD globalLock); + return allocSpace.GetAvailPageCount(); + } + + // returns the approximate number of pages being used within the current + // reserved memory range. + size_t GetUsedPages() { + ALLOC_LOCK_TYPE guard(ALLOC_CURRENT_THREAD globalLock); + return ALLOCUTIL_PAGE_BYTE2CNT(allocSpace.GetAllocatedPageSize()); + } + + void PrintPageFragment(std::basic_ostream &os, std::string tag); + + size_t GetMaxCapacity() const { + return allocSpace.GetMaxCapacity(); + } + + // HasAddress/FastIsValidObjAddr is a range-based check, used to quickly identify heap objs, + // assuming non-heap objs never fall into the same address range + // for a more accurate check, use AccurateIsValidObjAddr(Concurrent) + inline bool HasAddress(address_t addr) const { + return HeapStats::IsHeapAddr(addr); + } + inline bool FastIsValidObjAddr(address_t addr) { + return ((addr & (kAllocAlign - 1)) == 0) && allocSpace.HasAddress(addr); + } + + // Parepare data for concurrent sweep and create SweepContext. + // call this when the world stopped. + void PrepareConcurrentSweep(); + + // Concurrent sweep heap pages. + // if thread_pool not null, do it parallel. + // DO Not trim heap when this function is running. + void ConcurrentSweep(MplThreadPool *threadPool); + +#if ALLOC_ENABLE_LOCK_CONTENTION_STATS + // because some runs are deleted, we need to record the + // contention stats of the page locks in the deleted runs + static uint32_t pageLockContentionRec; + static uint64_t pageLockWaitTimeRec; +#endif + inline void DumpContention(std::basic_ostream &os) const { + // no longer supported + (void)os; + } + + void OnFinalizableObjCreated(address_t addr); + void OnFinalizableObjResurrected(address_t addr); + void Debug_DumpFinalizableInfo(ostream& ost); + void OnPreFork(); + + friend RosBasedMutator; + friend FreeTask; + friend ForEachTask; + friend RunSlots; + private: + PageMap pageMap; + // The instance space associated with the allocator + Space allocSpace; + ALLOC_MUTEX_TYPE runLocks[kNumberROSRuns]; + ALLOC_MUTEX_TYPE globalMutatorLocks[kNumberROSRuns]; + ThreeTierList nonFullRuns[kNumberROSRuns]; + unordered_set, + std::equal_to, + StdContainerAllocator> allocatorMutators; + static ROSAllocMutator globalMutator; + + // context data for concurrent sweep. + SweepContext sweepContext; + // this indicates if we have performed fork on mygote + bool hasForked = false; + + inline bool IsMygotePageAlloc(address_t addr) { + return hasForked && addr != 0 && pageMap.IsMygotePageAddr(addr); + } + + inline bool IsConcurrentSweepRunning() const { + return FastAllocData::data.isConcurrentSweeping.load(std::memory_order_relaxed); + } + + inline void SetConcurrentSweepRunning(bool running) { + FastAllocData::data.isConcurrentSweeping.store(running, std::memory_order_relaxed); + } + + inline bool IsConcurrentMarkRunning() const { + return FastAllocData::data.isConcurrentMarking; + } + + // inline functions that are used within the allocator (not exposed) + static inline void CheckRunMagic(RunSlots &run); + static inline uint32_t GetRunIdx(size_t size); + static inline uint32_t GetPagesPerRun(int index); + static inline size_t GetRunSize(int index); + inline address_t GetSpaceEnd() const; + inline size_t GetEndPageIndex() const; + inline address_t AllocRun(int idx, int eagerness); + inline RunSlots *FetchRunFromNonFulls(int idx); + inline RunSlots *FetchOrAllocRun(int idx, int eagerness); + enum LocalAllocResult { + kLocalAllocSucceeded = 0, + kLocalAllocFailed, + kLocalAllocDismissed + }; + template + __attribute__ ((always_inline)) + std::pair + AllocFromLocalRun(ROSAllocMutator &mutator, int idx, int eagerness); + inline address_t AllocFromGlobalRun(RosBasedMutator &mutator, int idx, int eagerness); + inline address_t AllocFromRun(RosBasedMutator &mutator, size_t &internalSize, int eagerness); + inline void RevokeLocalRun(RosBasedMutator &mutator, RunSlots &run); + inline size_t FreeFromRun(RosBasedMutator &mutator, RunSlots &run, address_t internalAddr); + inline bool UpdateGlobalsAfterFree(RunSlots &run, bool wasFull); + inline void SweepSlot(RunSlots &run, address_t slotAddr); + inline void SweepLocalRun(RunSlots &run, std::function shouldFree); + inline void SweepRun(RunSlots &run, std::function shouldFree); + // tries to allocate an object given the size + __attribute__ ((always_inline)) + address_t AllocInternal(size_t &allocSize); + // returns the number of bytes freed including overhead. + inline size_t FreeInternal(address_t objAddr); + inline void AddAllocMutator(RosBasedMutator &mutator); + inline void RemoveAllocMutator(RosBasedMutator &mutator); + inline void ForEachObjInRunUnsafe(RunSlots &run, + std::function visitor, + OnlyVisit onlyVisit = OnlyVisit::kVisitAll, + size_t numHint = numeric_limits::max()) const; + inline bool AccurateIsValidObjAddrUnsafe(address_t addr); + + address_t AllocPagesInternal(size_t reqSize, size_t &actualSize, int forceLevel); + address_t AllocLargeObject(size_t &allocSize, int forceLevel); + void SweepLargeObj(address_t objAddr); + void FreeLargeObj(address_t objAddr, size_t &internalSize, bool delayFree = false); + bool SweepPage(size_t, size_t&); + void SweepPages(size_t, size_t); + void RecordFragment(); + + // Handle the case when we fail to allocate. force level will pass the context of the + // situation, this will allow to change the aggressiveness of the memory allocation + void HandleAllocFailure(size_t allocSize, int &forceLevel); + void DumpStackBeforeOOM(size_t allocSize); + void GetMemoryInfoBeforeOOM(size_t allocSize, size_t newLargestChunk); + void FreeRun(RunSlots &run, bool delayFree = false); + + void ForEachObjInRun(RunSlots &runSlots, + std::function visitor, + OnlyVisit onlyVisit = OnlyVisit::kVisitAll, + size_t numHint = numeric_limits::max()) const; + void RevokeLocalRuns(RosBasedMutator &mutator); +}; // class RosAllocImpl +} // namespace maplert +#endif // MAPLE_RUNTIME_ROSALLOCATOR_H diff --git a/src/mrt/compiler-rt/include/allocator/ros_allocator_inlined.h b/src/mrt/compiler-rt/include/allocator/ros_allocator_inlined.h new file mode 100644 index 0000000000..35ee5382e0 --- /dev/null +++ b/src/mrt/compiler-rt/include/allocator/ros_allocator_inlined.h @@ -0,0 +1,490 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_ROS_ALLOCATOR_INLINED_H +#define MAPLE_RUNTIME_ROS_ALLOCATOR_INLINED_H + +#include "ros_allocator.h" + +namespace maplert { +inline void RosAllocImpl::CheckRunMagic(RunSlots &ROSIMPL_DUNUSED(run)) { + ROSIMPL_ASSERT(run.magic == kRunMagic, "invalid run"); +} + +inline uint32_t RosAllocImpl::GetRunIdx(size_t size) { + ROSIMPL_ASSERT(size <= kLargeObjSize, "large size doesn't have an idx"); + return ROSIMPL_RUN_IDX(size); +} + +inline uint32_t RosAllocImpl::GetPagesPerRun(int index) { + ROSIMPL_ASSERT(index >= 0 && index < kNumberROSRuns, "index out of bound"); + return ROSIMPL_N_PAGES_PER_RUN(index); +} + +inline size_t RosAllocImpl::GetRunSize(int index) { + ROSIMPL_ASSERT(index >= 0 && index < kNumberROSRuns, "index out of bound"); + return ROSIMPL_RUN_SIZE(index); +} + +inline address_t RosAllocImpl::GetSpaceEnd() const { + return reinterpret_cast(allocSpace.GetEnd()); +} + +inline size_t RosAllocImpl::GetEndPageIndex() const { + return pageMap.GetPageIndex(GetSpaceEnd()); +} + +inline address_t RosAllocImpl::AllocRun(int idx, int eagerness) { + size_t pageCount = GetPagesPerRun(idx); + size_t size = ALLOCUTIL_PAGE_CNT2BYTE(pageCount); + size_t internalSize = 0; + address_t pageAddr = AllocPagesInternal(size, internalSize, eagerness); + if (UNLIKELY(pageAddr == 0)) { + return 0U; + } + ROSIMPL_ASSERT(internalSize == size, "page alloc for run error"); + size_t converted = pageMap.SetAsRunPage(pageAddr, pageCount); + allocSpace.RecordReleasedToNonReleased(converted); + return pageAddr; +} + +inline RunSlots *RosAllocImpl::FetchRunFromNonFulls(int idx) { + return nonFullRuns[idx].Fetch(); +} + +inline RunSlots *RosAllocImpl::FetchOrAllocRun(int idx, int eagerness) { + RunSlots *run = nullptr; + { + ALLOC_LOCK_TYPE guard(ALLOC_CURRENT_THREAD runLocks[idx]); + run = FetchRunFromNonFulls(idx); + } + ROSIMPL_ASSERT(!IsMygotePageAlloc(reinterpret_cast(run)), "do not allocate in mygote page"); + if (UNLIKELY(run == nullptr)) { + address_t runAddr = 0; + { + ALLOC_LOCK_TYPE guard(ALLOC_CURRENT_THREAD globalLock); + runAddr = AllocRun(idx, eagerness); + } + if (LIKELY(runAddr != 0)) { + ROSIMPL_ASSERT(!reinterpret_cast(runAddr)->HasInit(), "dirty page for run"); + // we new run out of lock, in order to prevent page faults holding the lock for too long + // a problem is that unsafe heap scan (e.g., concurrent gc) will scan unintialised runs + // we use the init bit in each run to tell whether the run is ready for scan + run = new (reinterpret_cast(runAddr)) RunSlots(idx); + run->Init(IsConcurrentMarkRunning()); + } + } + return run; +} + +template +__attribute__ ((always_inline)) +std::pair RosAllocImpl::AllocFromLocalRun( + ROSAllocMutator &mutator, int idx, int eagerness) { + ROSIMPL_ASSERT(static_cast(idx) < kLocalRuns, "idx >= number of local runs"); + address_t localAddress = mutator.GetLocalAddress(idx); + ROSIMPL_ASSERT(!IsMygotePageAlloc(localAddress), "do not allocate in mygote page"); + // sweep run before we do anything + if (UNLIKELY(IsConcurrentSweepRunning())) { + if (LIKELY(localAddress)) { + RunSlots &run = *(reinterpret_cast(localAddress)); + bool ROSIMPL_DUNUSED(needRemove) = run.Sweep(*this); + ROSIMPL_ASSERT(needRemove == false, "mutator run swept to empty"); + } + } + + // this put a toll on regular allocation, remove when not using concurrent sweep + mutator.EnableLocalAfterSweep(idx); + + // try to allocate from the mutator's local run + address_t addr = 0; + if (LIKELY(localAddress)) { + // try lockless path first + addr = mutator.Alloc(idx); + if (LIKELY(addr)) { + return std::make_pair(kLocalAllocSucceeded, addr); + } else { + // this is the locked path, means mutator's local list is used up, need more + RunSlots &run = *reinterpret_cast(localAddress); + ALLOC_LOCK_TYPE guard(ALLOC_CURRENT_THREAD run.lock); + if (UNLIKELY(run.IsFull())) { + mutator.Globalise(run); + } else { + mutator.Localise(run); + addr = mutator.Alloc(idx); + ROSIMPL_ASSERT(addr != 0, "unable to alloc from local list"); + return std::make_pair(kLocalAllocSucceeded, addr); + } + } + } + + // if we have used up the local run (we could dismiss local alloc, unimplemented) + RunSlots *localRun = nullptr; + localRun = FetchOrAllocRun(idx, eagerness); + if ((UNLIKELY(localRun == nullptr))) { + return std::make_pair(kLocalAllocFailed, 0); + } else { + if (UNLIKELY(IsConcurrentSweepRunning())) { + // doesn't matter if it's swept to empty + static_cast(localRun->Sweep(*this)); + } + ALLOC_LOCK_TYPE pageLock(ALLOC_CURRENT_THREAD localRun->lock); + // we can potentially localise multiple runs + mutator.Localise(*localRun); + } + + addr = mutator.Alloc(idx); + ROSIMPL_ASSERT(addr, "mutator unable to alloc"); + return std::make_pair(kLocalAllocSucceeded, addr); +} + +inline address_t RosAllocImpl::AllocFromGlobalRun(RosBasedMutator &mutator, int idx, int eagerness) { + static_cast(mutator); + ALLOC_LOCK_TYPE guard(ALLOC_CURRENT_THREAD globalMutatorLocks[idx]); + auto allocRes = AllocFromLocalRun(globalMutator, idx, eagerness); + return allocRes.second; +} + +template +inline bool ROSAllocMutator::UseLocal(int idx) { + if (UNLIKELY(!ROSIMPL_IS_LOCAL_RUN_IDX(idx))) { + return false; + } + if (LIKELY(useLocal)) { + return true; + } + if (UNLIKELY(mutatorGCIndex != gcIndex)) { + mutatorGCIndex = gcIndex; + allocActiveness = 0; + } + if (UNLIKELY(++allocActiveness > kLocalAllocActivenessThreshold)) { + useLocal = true; + return true; + } + return false; +} + +// return internal addr +inline address_t RosAllocImpl::AllocFromRun(RosBasedMutator &mutator, size_t &internalSize, int eagerness) { + int idx = static_cast(GetRunIdx(internalSize)); + internalSize = GetRunSize(idx); + // if the mutator already has a local run, or it is eligible for one, we use local alloc + if (LIKELY(ROSIMPL_IS_LOCAL_RUN_IDX(idx) && + (mutator.GetLocalAddress(idx) != 0 || mutator.UseLocal(idx)))) { + auto allocRes = AllocFromLocalRun(mutator, idx, eagerness); + if (LIKELY(allocRes.first != kLocalAllocDismissed)) { + return allocRes.second; + } + } + size_t internalAddr = AllocFromGlobalRun(mutator, idx, eagerness); + return internalAddr; +} + +inline void RosAllocImpl::RevokeLocalRun(RosBasedMutator &mutator, RunSlots &run) { + int idx = run.mIdx; + bool needRemove = false; + { + ALLOC_LOCK_TYPE pageLock(ALLOC_CURRENT_THREAD run.lock); + mutator.Globalise(run); + if (run.IsEmpty() && !Collector::Instance().IsConcurrentMarkRunning()) { + // concurrent marking relies on run data structure, don't free + needRemove = true; + } else { + if (LIKELY(!run.IsFull())) { + ALLOC_LOCK_TYPE guard(ALLOC_CURRENT_THREAD runLocks[idx]); + nonFullRuns[idx].Insert(run); + } + } + } + if (needRemove) { + FreeRun(run); + } +} + +// return internal size +inline size_t RosAllocImpl::FreeFromRun(RosBasedMutator &mutator, RunSlots &run, address_t internalAddr) { + if (UNLIKELY(IsConcurrentSweepRunning())) { + bool ROSIMPL_DUNUSED(needRemove) = run.Sweep(*this); + ROSIMPL_ASSERT(!needRemove, "need remove before free"); + } + + size_t internalSize = run.GetRunSize(); + ROSIMPL_ASSERT(!run.IsEmpty(), "free from an empty run"); + + // free to local list if possible, i.e., this obj resides in this mutator's local run + int idx = static_cast(GetRunIdx(internalSize)); + if (LIKELY(ROSIMPL_IS_LOCAL_RUN_IDX(idx) && mutator.useLocal)) { + address_t localAddress = mutator.GetLocalAddress(idx); + if (&run == reinterpret_cast(localAddress)) { + mutator.EnableLocalAfterSweep(idx); + mutator.Free(idx, internalAddr); + return internalSize; + } + } + + // must free to global run + bool needRemove = false; + { + ALLOC_LOCK_TYPE guard(ALLOC_CURRENT_THREAD run.lock); + bool wasFull = run.IsFull(); + run.FreeSlot(internalAddr); + needRemove = UpdateGlobalsAfterFree(run, wasFull); + } + if (UNLIKELY(needRemove)) { + ROSIMPL_ASSERT(run.IsEmpty(), "free a non-empty run"); + FreeRun(run); + } + return internalSize; +} + +inline bool RosAllocImpl::UpdateGlobalsAfterFree(RunSlots &run, bool wasFull) { + // local runs must be globalised before they are passed in here + if (LIKELY(run.IsLocal())) { + return false; + } + int idx = run.mIdx; + bool needRemove = false; + if (UNLIKELY(run.IsEmpty())) { + ALLOC_LOCK_TYPE guard(ALLOC_CURRENT_THREAD runLocks[idx]); + bool wasInNonFullRuns = false; + if (run.IsInList()) { + wasInNonFullRuns = true; + nonFullRuns[idx].Erase(run); + } + if (wasInNonFullRuns || wasFull) { + // this cond makes sure it's not fetched by a mutator in AllocFrom(Local/Global)Run + needRemove = true; + } + } else if (wasFull && !run.IsFull()) { + ALLOC_LOCK_TYPE guard(ALLOC_CURRENT_THREAD runLocks[idx]); + ROSIMPL_ASSERT(!run.IsInList(), "full run in nfrs"); + nonFullRuns[idx].Insert(run); + } + return needRemove; +} + +// must be called in critical section, either in STW or holding lock +inline void RosAllocImpl::SweepSlot(RunSlots &run, address_t slotAddr) { + address_t objAddr = ROSIMPL_GET_OBJ_FROM_ADDR(slotAddr); + maplert::Allocator::ReleaseResource(objAddr); +#if ALLOC_USE_FAST_PATH + size_t objSize = PreObjFree(objAddr); +#else + size_t objSize = PreObjFree(objAddr); +#endif + size_t slotSize = run.GetRunSize(); +#if ROSIMPL_MEMSET_AT_FREE + CheckDoubleFree(objAddr); + // ensure header is cleared in 8 bytes, this is needed by dec from 0 check in backup tracing + ROSALLOC_MEMSET_S(slotAddr, slotSize, 0, slotSize); +#else + TagGCFreeObject(objAddr); +#endif + run.FreeSlot(slotAddr); +#if ALLOC_USE_FAST_PATH + PostObjFree(objAddr, objSize, slotSize); +#else + PostObjFree(objAddr, objSize, slotSize); +#endif +} + +// Only invoked at STW +// Iterate all slots in run, if it should be freed, perform free operation without lock +// +// If JSAN is on, don't invoke this method, this is handled in MarkSweepCollector::Sweep +inline void RosAllocImpl::SweepLocalRun(RunSlots &run, std::function shouldFree) { + if (run.IsEmpty()) { + return; + } + size_t slotSize = run.GetRunSize(); + size_t slotCount = run.GetMaxSlots(); + address_t slotAddr = run.GetBaseAddress(); + size_t releasedBytes = 0; + + for (size_t idx = 0; idx < slotCount; ++idx) { + address_t objAddr = ROSIMPL_GET_OBJ_FROM_ADDR(slotAddr); + if (IsAllocatedByAllocator(objAddr) && shouldFree(objAddr)) { + SweepSlot(run, slotAddr); + releasedBytes += slotSize; + } + slotAddr += slotSize; + } + FAST_ALLOC_ACCOUNT_SUB(releasedBytes); +} + +// Only invoked at STW +// Itreate all slots in run, if it should be freed, perform free operation without lock +// +// If JSAN is on, don't invoke this method, this is handled in MarkSweepCollector::Sweep +inline void RosAllocImpl::SweepRun(RunSlots &run, std::function shouldFree) { + if (run.IsEmpty()) { + // this is most likely cache, but sometimes free runs can be left in nfrs + return; + } + + size_t slotSize = run.GetRunSize(); + size_t slotCount = run.GetMaxSlots(); + address_t slotAddr = run.GetBaseAddress(); + size_t releasedBytes = 0; + bool wasFull = run.IsFull(); + bool found = false; + for (size_t i = 0; i < slotCount; ++i) { + address_t objAddr = ROSIMPL_GET_OBJ_FROM_ADDR(slotAddr); + if (IsAllocatedByAllocator(objAddr) && shouldFree(objAddr)) { + SweepSlot(run, slotAddr); + releasedBytes += slotSize; + found = true; + } + slotAddr += slotSize; + } + FAST_ALLOC_ACCOUNT_SUB(releasedBytes); + if (found) { + if (UpdateGlobalsAfterFree(run, wasFull)) { + FreeRun(run); + } + } +} + +__attribute__ ((always_inline)) address_t RosAllocImpl::AllocInternal(size_t &allocSize) { + RosBasedMutator &mutator = static_cast(TLAllocMutator()); + bool allocSuccess = false; + int eagerness = kEagerLevelCoalesce; + address_t internalAddr = 0U; + do { + if (UNLIKELY(allocSize > kLargeObjSize)) { + ALLOC_LOCK_TYPE guard(ALLOC_CURRENT_THREAD globalLock); + internalAddr = AllocLargeObject(allocSize, eagerness); + } else { + internalAddr = AllocFromRun(mutator, allocSize, eagerness); + } + allocSuccess = (internalAddr != 0U); + if (UNLIKELY(!allocSuccess)) { + if (eagerness >= kEagerLevelMax) { + size_t largestChunkSize = 0; + { + ALLOC_LOCK_TYPE guard(ALLOC_CURRENT_THREAD globalLock); + largestChunkSize = allocSpace.GetLargestChunkSize(); + } + DumpStackBeforeOOM(allocSize); + GetMemoryInfoBeforeOOM(allocSize, largestChunkSize); + break; + } + HandleAllocFailure(allocSize, eagerness); +#if (!ROSIMPL_MEMSET_AT_FREE) + } else { + size_t sizeMinusHeader = allocSize - ROSIMPL_HEADER_ALLOC_SIZE; + address_t objAddr = ROSIMPL_GET_OBJ_FROM_ADDR(internalAddr); + ROSALLOC_MEMSET_S(objAddr, sizeMinusHeader, 0, sizeMinusHeader); +#endif + } + } while (!allocSuccess); + return internalAddr; +} + +inline size_t RosAllocImpl::FreeInternal(address_t objAddr) { + ROSIMPL_ASSERT(pageMap.ContainsAddr(objAddr), "free objAddr out of range"); + + size_t internalSize = 0U; + address_t pageAddr = 0U; + PageLabel pageType = pageMap.GetTypeForAddr(objAddr); + RunSlots *run = nullptr; + if (LIKELY(pageType == kPRun)) { + pageAddr = ALLOCUTIL_PAGE_ADDR(objAddr); + run = reinterpret_cast(pageAddr); + } else if(LIKELY(pageType == kPLargeObj)) { + if (UNLIKELY(IsConcurrentSweepRunning())) { + // ensure large object page is swept before free. + SweepLargeObj(objAddr); + } + FreeLargeObj(objAddr, internalSize); + return internalSize; + } else if (pageType == kPRunRem) { + pageAddr = pageMap.GetRunStartFromAddr(objAddr); + run = reinterpret_cast(pageAddr); + } else if (LIKELY(pageType == kPMygoteRun || pageType == kPMygoteRunRem || pageType == kPMygoteLargeObj)) { + // never free anything in mygote pages + return 0U; + } else { + LOG(ERROR) << "invalid object address inside FreeInternal. Address: " << + objAddr << " type: " << static_cast(pageType) << maple::endl; + return 0U; + } + + ROSIMPL_ASSERT(run != nullptr, "run cannot be null"); + ROSIMPL_DEBUG(CheckRunMagic(*run)); + + address_t memAddr = ROSIMPL_GET_ADDR_FROM_OBJ(objAddr); + CheckDoubleFree(objAddr); +#if ROSIMPL_MEMSET_AT_FREE + internalSize = run->GetRunSize(); + ROSALLOC_MEMSET_S(memAddr, internalSize, 0, internalSize); +#endif + RosBasedMutator &mutator = reinterpret_cast(TLAllocMutator()); + return FreeFromRun(mutator, *run, memAddr); +} + +inline void RosAllocImpl::AddAllocMutator(RosBasedMutator &mutator) { + { + ALLOC_LOCK_TYPE guard(ALLOC_CURRENT_THREAD globalLock); + (void)allocatorMutators.insert(&mutator); + } +} + +inline void RosAllocImpl::RemoveAllocMutator(RosBasedMutator &mutator) { + RevokeLocalRuns(mutator); + { + ALLOC_LOCK_TYPE guard(ALLOC_CURRENT_THREAD globalLock); + (void)allocatorMutators.erase(&mutator); + PostMutatorFini(mutator); + } +} + +inline void RosAllocImpl::ForEachObjInRunUnsafe( + RunSlots &run, std::function visitor, OnlyVisit onlyVisit, size_t numHint) const { + if (!run.HasInitAcquire() || run.IsEmpty()) { + return; + } + run.ForEachObj(visitor, onlyVisit, numHint); +} + +inline bool RosAllocImpl::AccurateIsValidObjAddrUnsafe(address_t addr) { + RunSlots *run = nullptr; + address_t pageAddr = ALLOCUTIL_PAGE_ADDR(addr); + PageLabel pageType = pageMap.GetTypeForAddr(addr); + if (LIKELY(pageType == kPRun || pageType == kPMygoteRun)) { + run = reinterpret_cast(pageAddr); + } else if (pageType == kPRunRem || pageType == kPMygoteRunRem) { + pageAddr = pageMap.GetRunStartFromAddr(addr); + run = reinterpret_cast(pageAddr); + } else if (pageType == kPLargeObj || pageType == kPMygoteLargeObj) { + if (pageAddr == ROSIMPL_GET_ADDR_FROM_OBJ(addr)) { + return IsAllocatedByAllocator(addr); + } + return false; + } else { + return false; + } + + if (!run->HasInitAcquire()) { + return false; + } + // run obj + ROSIMPL_DEBUG(CheckRunMagic(*run)); + if (LIKELY(run != nullptr)) { + return run->IsLiveObjAddr(addr); + } + return false; +} +} // namespace maplert +#endif // MAPLE_RUNTIME_ROSALLOCATOR_INLINED_H \ No newline at end of file diff --git a/src/mrt/compiler-rt/include/allocator/space.h b/src/mrt/compiler-rt/include/allocator/space.h new file mode 100644 index 0000000000..5ae26bc9c8 --- /dev/null +++ b/src/mrt/compiler-rt/include/allocator/space.h @@ -0,0 +1,282 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_SPACE_H +#define MAPLE_RUNTIME_SPACE_H + +#include +#include +#include +#include +#include "page_map.h" +#include "address.h" +#include "panic.h" +#include "utils/time_utils.h" +#include "alloc_config.h" +#include "mem_map.h" +#include "cartesian_tree.h" + +namespace maplert { +class Space; +class SpacePageManager { + public: + SpacePageManager(Space &allocSpaceVal, uintptr_t begin) : allocSpace(allocSpaceVal), baseAddress(begin) {} + ~SpacePageManager() = default; + void SetBaseAddress(uintptr_t baseAddr) { + baseAddress = baseAddr; + } + + // given size in bytes we return the size normalized to pages (4k) + uint32_t CalcChunkSize(size_t regionSize) const; + + // given index of a region, calculate the page address + uintptr_t GetRegAddrFromIdx(uint32_t idx) const; + + // given the first page and the size of memory region in bytes, we add + // it to the set of free chunks + void AddRegion(uintptr_t regionAddr, size_t regionSize); + // add contiguous pages of count page_count starting with first_page + void AddPages(uintptr_t firstPage, uint32_t pageCount); + + // release all free pages back to the system + void ReleaseAllFreePages(size_t freeBytes, const MemMap &memMap, bool isAggressive); + + // Given a request size, we split the first chunk and return the + // address to the first page + uintptr_t GetChunk(size_t reqSize); + + uint32_t CalcPageIdx(uintptr_t pageAddr) const; + + size_t GetLargestChunkSize(); + private: + friend Space; + using TreeType = CartesianTree; + Space &allocSpace; + uintptr_t baseAddress; + TreeType pageCTree; +}; // end of SpacePageManager + +class Space { + public: + // These parameters can be set via static methods Space::SetXXX(). + // Currently they are called before Space::Init(). They take + // the values from the configuration of the virtual machine. + // Default configuration (please check the corresponding .prop files to confirm): + // kReleasePageAtFree/kReleasePageAtTrim: set in alloc_config.h + // heap start size: 8m + // heap size: 512m + // heap growth limit: 384m + // heap min free: 2m + // heap max free: 8m + // heap target utilization: 0.75 + // ignore max footprint: see runtime.cc + // + // they are not constant.. + // whether to release pages immediately after we free them + static bool kReleasePageAtFree; + // whether to release free pages all together at a separate phase we call "trim" + static bool kReleasePageAtTrim; + // the initial size of the heap + static size_t kHeapStartSize; + // the maximum heap size + static size_t kHeapSize; + // the default heap size + // In ART, kHeapGrowthLimit is used as the heap size for all processes, + // UNLESS the manifest file set the largeHeap property to true, + // in which case the heap size is kHeapSize. + // In our Maple, we ignore this and use the kHeapSize directly (for now), + // see RosAllocImpl::Init() + static size_t kHeapGrowthLimit; + // the minimum size of free pages, unused + static size_t kHeapMinFree; + // the maximum size of free pages, used to trigger trim + static size_t kHeapMaxFree; + // the following is unused + // target utilization rate of the heap + static float kHeapTargetUtilization; + // unused + static bool kIgnoreMaxFootprint; + // 1s interval for trim, to prevent impeding performance + static const uint64_t kMinTrimInterval = 1000; // ms + // non-aggressive trim max time + static const uint64_t kMaxTrimTime = 10000; // us + + // this controls how many free pages we don't release (return to the os) + static inline size_t TargetFreePageSize() { + return Space::kHeapMaxFree >> 1; + } + + static inline void SetHeapStartSize(size_t heapStartSize) { + Space::kHeapStartSize = heapStartSize; + } + static inline void SetHeapSize(size_t heapSize) { + Space::kHeapSize = heapSize; + } + static inline void SetHeapGrowthLimit(size_t heapGrowthLimit) { + Space::kHeapGrowthLimit = heapGrowthLimit; + } + static inline void SetHeapMinFree(size_t heapMinFree) { + Space::kHeapMinFree = heapMinFree; + } + static inline void SetHeapMaxFree(size_t heapMaxFree) { + Space::kHeapMaxFree = heapMaxFree; + } + static inline void SetHeapTargetUtilization(float heapTargetUtilization) { + Space::kHeapTargetUtilization = heapTargetUtilization; + } + static inline void SetIgnoreMaxFootprint(bool ignoreMaxFootprint) { + Space::kIgnoreMaxFootprint = ignoreMaxFootprint; + } + + public: + string name; // full name of the space + string tag; // tag to identify the memory region in smaps + bool isManaged; // whether the space is under the management of RC/GC + // maximum memory allowed for the space + size_t maxCapacity; + // mem map is a wrapper of system calls like mmap(), madvise(), mprotect() + MemMap *memMap; + // page map labels each page with a type, e.g., run type, large obj type + PageMap &pageMap; + // The beginning of the storage for fast access. + uint8_t *begin; + // current end of the space. + atomic end; + // we do out-of-lock reads to the following fields for gc trigger heuristics + // as long as this fields are aligned, we don't have to worry about atomicity problems + // total size of pages allocated + size_t allocatedPageSize; + // total size of pages fetched from the kernel + // 'release' is the action of returning physical memory pages back to the kernel + // if a page is 'non-released', it means we still occupy the corresponding physical memory + size_t nonReleasedPageSize; + + friend class SpacePageManager; + // the page manager manages all of the free pages (including released pages) + SpacePageManager pageManager; + + // records the last time we trimed + uint64_t lastTrimTime; + inline bool IsTrimAllowedAtTheMoment() { + uint64_t currTime = timeutils::MilliSeconds(); + if (currTime - lastTrimTime < kMinTrimInterval) { + return false; + } else { + return true; + } + } + + public: + static const int kPagesOneTime = kRosimplDefaultPageOneTime; + Space(const string &nameVal, const string &tagVal, bool managed, PageMap &pageMapVal); + virtual ~Space(); + Space(const Space &that) = delete; + Space(Space &&that) = delete; + Space &operator=(const Space &that) = delete; + Space &operator=(Space &&that) = delete; + + void Init(uint8_t *reqStart = nullptr); + + // Name of the space. May vary, for example before/after the Zygote fork. + const char *GetName() const { + return name.c_str(); + } + + inline uint8_t *GetEnd() const { + return end.load(std::memory_order_acquire); + } + inline uint8_t *GetEndRelaxed() const { + return end.load(std::memory_order_relaxed); + } + + size_t GetMaxCapacity() const { + return maxCapacity; + } + + inline uint8_t *GetBegin() const { + return begin; + } + + size_t GetSize() const { + return static_cast(GetEnd() - GetBegin()); + } + size_t GetSizeRelaxed() const { + return static_cast(GetEndRelaxed() - GetBegin()); + } + + // Get the size of the allocated pages + inline size_t GetAllocatedPageSize() const { + return allocatedPageSize; + } + // Get the size of the non-released pages + inline size_t GetNonReleasedPageSize() const { + return nonReleasedPageSize; + } + // Get the size of the free pages + inline size_t GetFreePageSize() const { + return nonReleasedPageSize - allocatedPageSize; + } + + inline size_t HeapMaxFreeByUtilization() const { + // maximum is twice the ideal + // cap this for the same reason + return std::min(TargetFreePageSizeByUtilization() << 1, kHeapMaxFree); + } + + size_t TargetFreePageSizeByUtilization() const; + + void SetEnd(uint8_t *endVal) { + // This should be visible by other thread + // 1. expand happens under global_lock, new_expand space obj + // must be visible after new "end_" visible to other thread. + // 2. heapCurSize update before end_, and end_ store with release order + // + // when other thread see newly expand space obj, it must see new heapCurSize + if (isManaged) { + HeapStats::OnHeapExtended(static_cast(endVal - GetBegin())); + } + end.store(endVal, memory_order_release); + } + + void SetBegin(uint8_t *beginVal) { + begin = beginVal; + if (isManaged) { + HeapStats::OnHeapCreated(reinterpret_cast(beginVal)); + } + // assume begin only set once in Space, otherwise need update heapCurSize + } + + inline void RecordReleasedToNonReleased(size_t pageCount) { + nonReleasedPageSize += ALLOCUTIL_PAGE_CNT2BYTE(pageCount); + } + + // returns the number of pages free within the reserved memry space + size_t GetAvailPageCount() const; + + uintptr_t GetChunk(size_t reqSize); + size_t GetLargestChunkSize(); + + inline bool HasAddress(address_t addr) { + const uint8_t *objPtr = reinterpret_cast(addr); + return objPtr >= GetBegin() && objPtr < GetEnd(); + } + + void Extend(size_t deltaSize); + address_t Alloc(size_t reqSize, bool allowExtension); + size_t ReleaseFreePages(bool aggressive = false); + void FreeRegion(address_t addr, size_t pgCnt); +}; // class Space +} // namespace maplert + +#endif // MAPLE_RUNTIME_SPACE_H diff --git a/src/mrt/compiler-rt/include/chelper.h b/src/mrt/compiler-rt/include/chelper.h new file mode 100644 index 0000000000..abe738f350 --- /dev/null +++ b/src/mrt/compiler-rt/include/chelper.h @@ -0,0 +1,270 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_CHELPER_H +#define MAPLE_RUNTIME_CHELPER_H + +/** + * The purpose of this file is to help C programs to correctly create managed + * objects and access its fields. + * + * Some functions serve the MapleJava project, only. They are for the + * convenience of the C programmers. + * + * ========== + * HOW TO USE + * ========== + * + * When compiling .mpl files, add the -gen-c-macro-def option to mplcg: + * + * mplcg -gen-c-macro-def foo.mpl + * + * That will generate foo.macros.def alongside foo.s. If you have multiple + * .macros.def files, merge them with `cat foo.macros.def bar.macros.def | sort + * | uniq > unified.macros.def` so that duplicated entries are removed. + * + * In C files (.c), add the following in the beginning: + * + * #include "chelper.h" + * + * #ifndef __UNIFIED_MACROS_DEF__ + * #define __UNIFIED_MACROS_DEF__ + * #include "unified.macros.def" + * #endif + * + * This will create symbol definitions for class-related information, such as + * class instance sizes, and the offset of each class field. This will enable + * all the macros in this header. + * + * The __UNIFIED_MACROS_DEF__ is useful when we have the habit of including one + * .c file into another, and both need to use macros in this header. + * + * Create Java scalar objects using MRT_NEWOBJ(java_class_name), for example: + * + * address_t obj_addr = MRT_NEWOBJ(Ljava/lang/Object;); + * MRT_IncRef(obj_addr); + * + * or if you defined your own `jobject` type: + * + * jobject obj = (jobject)MRT_NEWOBJ(Ljava/lang/Object;); + * MRT_IncRef((address_t)obj_addr); + * + * Don't worry about GCTIB, itable, vtable or the klass field. MRT_NEWOBJ + * already filled those fields for you. In the future, when we change our + * naming convention, we change this header. + * + * Creating Java arrays using MRT_NEW_*_ARRAY(len), for example: + * + * address_t jint_array = MRT_NEW_JINT_ARRAY(100); + * MRT_IncRef(jint_array); + * + * address_t jobj_array = MRT_NEW_JOBJECT_ARRAY(100); + * MRT_IncRef(jobj_array); + * + * MRT_NEW_*_ARRAY handles the length field and the elemSize (component size) + * field, too. + * + * String has a custom layout, and cannot be allocated in the generic way. See + * `dex2mpl/projects/helloworld/HelloWorld.jni.c` for more information. + * + * Load and store object fields using the MRT_LOAD_*(object, offset) and the + * MRT_STORE_*(object, offset, value) macros: + * + * jint v = MRT_LOAD_JINT(foo, MRT_FIELD_OFFSET(Lcom_2Fexample_2FSomeObject_3B, someIntField)) + * MRT_STORE_JINT(foo, MRT_FIELD_OFFSET(Lcom_2Fexample_2FSomeObject_3B, someIntField), newval) + * + * The MRT_FIELD_OFFSET(className, fieldName) returns the offset of that field. + * Note that className must be the class in which fieldName is defined. + * Subclasses may define fields of the same name if the field is private, so our + * macros make a compromise by not inheriting fields into subclasses. + * (Interestingly, according to the Java Language Specification, subclasses + * really do not "inherit" fields from its parents.) + * + * When linking, link with `mapleall/runtime/src/buildaarch64/libmplrt.a`. + */ +#include "collector/collector.h" +#include "namemangler.h" +#include "mm_config.h" +#include "cinterface.h" +#include "mrt_fields_api.h" // Load and store operations for fields +#include "panic.h" +#include "jsan.h" +#include "mrt_compiler_api.h" +#include "mrt_reflection_api.h" +// Some Macro magics. +// +// Force expansion before pasting. +#define __MRT_MAGIC_PASTE(x, y) __MRT_MAGIC_PASTE2(x, y) +#define __MRT_MAGIC_PASTE2(x, y) x##y + +// Defer expansion +#define __MRT_MAGIC_EMPTY() +#define __MRT_MAGIC_DEFER(x) x __MRT_MAGIC_EMPTY() +#define __MRT_MAGIC_DEFER_IGNORE(x) __MRT_MAGIC_EMPTY() + +#ifdef __cplusplus +namespace maplert { +extern "C" { +#endif + +// Query instance size, field offset and field size. +// +// className is an identifier. These macros automatically paste identifiers. +#define MRT_INSTANCE_SIZE(className) __MRT_MAGIC_PASTE(__MRT_instance_size__, className) +#define MRT_FIELD_OFFSET(className, fieldName) \ + __MRT_MAGIC_PASTE(__MRT_MAGIC_PASTE(__MRT_MAGIC_PASTE(__MRT_field_offset__, className), __), fieldName) +#define MRT_FIELD_SIZE(className, fieldName) \ + __MRT_MAGIC_PASTE(__MRT_MAGIC_PASTE(__MRT_MAGIC_PASTE(__MRT_field_size__, className), __), fieldName) + +#define MRT_GCTIB(className) __MRT_MAGIC_PASTE(GCTIB_PREFIX, className) +#define MRT_ITABLE(className) __MRT_MAGIC_PASTE(ITAB_PREFIX, className) +#define MRT_VTABLE(className) __MRT_MAGIC_PASTE(VTAB_PREFIX, className) +#define MRT_CLASSINFO(className) __MRT_MAGIC_PASTE(CLASSINFO_PREFIX, className) +#define MRT_PRIMITIVECLASSINFO(className) __MRT_MAGIC_PASTE(PRIMITIVECLASSINFO_PREFIX, className) +#define MRT_FIELDS(className) __MRT_MAGIC_PASTE(FIELDINFO_PREFIX, className) +#define MRT_METHODS(className) __MRT_MAGIC_PASTE(METHODINFO_PREFIX, className) + +#define MRT_GETKLASS(className) MRT_GetClassByClassLoader(nullptr, TO_STR(className)) + +// Expand the definitions from the .macros.def files generated by +// `mplcg --gen-c-macro-def` +// +// The following two are not really a vararg macros. That's for preventing +// expanding className and offset too early. For example, when fieldName is +// "errno" or "stdout", it will be a problem. +#define MRT_FIELD_OFFSET_DEF(className, fieldName) __MRT_field_offset__##className##__##fieldName +#define MRT_FIELD_SIZE_DEF(className, fieldName) __MRT_field_size__##className##__##fieldName + +#define __MRT_CLASS(className, size, superclass, ...) \ + static const size_t MRT_INSTANCE_SIZE(className##__VA_ARGS__) = size; \ + extern void *MRT_CLASSINFO(className##__VA_ARGS__); + +#define __MRT_CLASS_FIELD(className, fieldName, offset, size, ...) \ + static const size_t MRT_FIELD_OFFSET_DEF(className##__VA_ARGS__, fieldName##__VA_ARGS__) = offset; \ + static const size_t MRT_FIELD_SIZE_DEF(className##__VA_ARGS__, fieldName##__VA_ARGS__) = size; + +// +// Creating an object in Java involves four steps: +// +// 0. Allocating the memory for the object. +// 1. Do proper initialization for the language-agnostic runtime +// - e.g. To implement GC, this will assign the GCTIB to the object header. +// 2. Do proper initialization for the language runtime +// - e.g. To implement Java, this will assign the fields not visible to the +// Java langauge, including the pointers to I-table, V-table, the Class +// object, and the monitor word. +// 3. Call the user-defined object initializer +// - e.g. For Java, it is the `` method, of the proper signature if +// overloaded. +// +// The following macros perform one or more of the above steps. +address_t __MRT_chelper_newobj_0(size_t size); + +address_t __MRT_chelper_newobj_flexible_0(size_t elemSize, size_t len); +address_t MRT_ChelperNewobjFlexible(size_t elemSize, size_t len, address_t klass, bool isJNI = false); + +#define MRT_NEWOBJ_0(className) \ + __MRT_chelper_newobj_0(MRT_INSTANCE_SIZE(className)) +// Must use MRT_GETKLASS(className) instead of MRT_CLASSINFO(className) here due to +// dynamic class loading +#define MRT_NEWOBJ_1(className) \ + MRT_ReflectAllocObject(MRT_GETKLASS(className)) + +#define MRT_NEWOBJ_FLEXIBLE_0(elemSize, len, cls) \ + __MRT_chelper_newobj_flexible_0(elemSize, len) + +#define MRT_NEWOBJ_FLEXIBLE_1(elemSize, len, cls) \ + MRT_ChelperNewobjFlexible(elemSize, len, reinterpret_cast(cls)) + +#define MRT_NEWOBJ_FLEXIBLE_JNI_1(elemSize, len, cls, jni) \ + MRT_ChelperNewobjFlexible(elemSize, len, reinterpret_cast(cls), jni) + +// CONFIG ME: Set the following macro to the desired level. +// +// Setting to 2 will automatically assign itab, vtab and klass. Since these +// assignments are idempotent, it does not affect the correctness if () +// also initializes those fields. +#define __MRT_DESIRED_INIT_LEVEL 1 + +#define __THE_MRT_NEWOBJ __MRT_MAGIC_PASTE(MRT_NEWOBJ_, __MRT_DESIRED_INIT_LEVEL) +#define __THE_MRT_NEWOBJ_FLEXIBLE __MRT_MAGIC_PASTE(MRT_NEWOBJ_FLEXIBLE_, __MRT_DESIRED_INIT_LEVEL) +#define THE_MRT_NEWOBJ_FLEXIBLE_JNI __MRT_MAGIC_PASTE(MRT_NEWOBJ_FLEXIBLE_JNI_, __MRT_DESIRED_INIT_LEVEL) + +// This is the intended way to allocate heap objects in C +#define MRT_NEWOBJ(className) __THE_MRT_NEWOBJ(className) + +#define MRT_NEW_JOBJECT_ARRAY(len, klass) __THE_MRT_NEWOBJ_FLEXIBLE(sizeof(reffield_t), len, klass) + +#define MRT_NEW_PRIMITIVE_ARRAY(elemSize, len, klass) __THE_MRT_NEWOBJ_FLEXIBLE(elemSize, len, klass) + +#define MRT_NEW_PRIMITIVE_ARRAY_JNI(elemSize, len, klass, jni) THE_MRT_NEWOBJ_FLEXIBLE_JNI(elemSize, len, klass, jni) + +// --- Write barriers --- +// Object *var = value; ----> MRT_WRITE_REF_VAR(var, value) +// not used now, Undefined +#define MRT_WRITE_REF_VAR(var, value) \ + MRT_WriteRefVar(reinterpret_cast(&(var)), reinterpret_cast(value)) + +// obj->field = value; ----> MRT_WRITE_REF_FIELD(obj, field, value) +#define MRT_WRITE_REF_FIELD(obj, field, value) \ + MRT_WriteRefField(reinterpret_cast(obj), \ + reinterpret_cast(&(obj->field)), reinterpret_cast(value)) + +// release(obj); ----> MRT_RELEASE_REF_VAR(obj) +#define MRT_RELEASE_REF_VAR(obj) MRT_ReleaseRefVar(reinterpret_cast(obj)) + +// RC operations used in MRT +#define ENABLE_RC_FASTPATH +#ifdef ENABLE_RC_FASTPATH +#define RC_LOCAL_INC_REF(obj) (void)(MRT_IncRefNaiveRCFast(reinterpret_cast(obj))) +#define RC_RUNTIME_INC_REF(obj) (void)(MRT_IncRefNaiveRCFast(reinterpret_cast(obj))) +#define RC_LOCAL_DEC_REF(obj) (void)MRT_DecRefNaiveRCFast(reinterpret_cast(obj)) +#define RC_RUNTIME_DEC_REF(obj) (void)MRT_DecRefNaiveRCFast(reinterpret_cast(obj)) +#else +#define RC_LOCAL_INC_REF(obj) MRT_IncRef(reinterpret_cast(obj)) +#define RC_RUNTIME_INC_REF(obj) MRT_IncRef(reinterpret_cast(obj)) +#define RC_LOCAL_DEC_REF(obj) MRT_DecRef(reinterpret_cast(obj)) +#define RC_RUNTIME_DEC_REF(obj) MRT_DecRef(reinterpret_cast(obj)) +#endif + +constexpr std::size_t kMrtKlassOffset = 0; + +// Have to add these, too, in order not to depend on the C++ header sizes.h +// GREP-ARRAYLAYOUT: Grep me to see if all these places agree. +void MRT_SetJavaClass(address_t objAddr, address_t klass); +void MRT_SetObjectPermanent(address_t objAddr); +void MRT_SetJavaArrayClass(address_t objAddr, address_t klass); +#if ALLOC_USE_FAST_PATH +void MRT_SetFastAlloc(ClassMetadata*); +#else +#define MRT_SetFastAlloc(x) +#endif +void MRT_CheckRefCount(address_t obj, uint32_t index); +void MRT_ReflectThrowNegtiveArraySizeException(); +void MRT_SetThreadPriority(pid_t tid, int32_t priority); + +inline void MRT_MemoryBarrier(void) { +#if defined(__aarch64__) || defined(__arm__) + __asm__ ("dmb ish":::); +#else +#error "Unsupported architecture" +#endif +} + +#ifdef __cplusplus +} // extern "C" +} // namespace maplert +#endif + +#endif diff --git a/src/mrt/compiler-rt/include/chosen.h b/src/mrt/compiler-rt/include/chosen.h new file mode 100644 index 0000000000..953406cef7 --- /dev/null +++ b/src/mrt/compiler-rt/include/chosen.h @@ -0,0 +1,119 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_CHOSEN_H +#define MAPLE_RUNTIME_CHOSEN_H + +#include "mm_config.h" +#include "deps.h" +// This module chooses the desired allocator and collector instances according +// to the GC strategy configured using preprocessor macros. +// The chosen strategy class inherits from the implementation, not the other way around. +// This avoids virtual dispatch, and is what's done in MMTk. +#if MRT_ALLOCATOR == MRT_ALLOCATOR_ROS +namespace maplert { +class RosAllocImpl; +template +class ROSAllocMutator; +// this number equals kROSAllocLocalRuns; because of forward declaration, we need to explicitly +// write it here; get rid of forward declaration in the future +using RosBasedMutator = ROSAllocMutator<12>; +using TheAllocator = RosAllocImpl; +using TheAllocMutator = RosBasedMutator; +} +#else +#error "Invalid MRT_ALLOCATOR" +#endif + +// We don't provide an empty collect/mutator. So when MRT_COLLECTOR_NONE is defined, +// using NaiveRC as the default ones, but disable its effect in runtime +#if ((MRT_COLLECTOR == MRT_COLLECTOR_NONE) || (MRT_COLLECTOR == MRT_COLLECTOR_NAIVERC)) +namespace maplert { +class NaiveRCCollector; +class NaiveRCMutator; +using TheCollector = NaiveRCCollector; +using TheMutator = NaiveRCMutator; +} +#elif MRT_COLLECTOR == MRT_COLLECTOR_MS +namespace maplert { +class MarkSweepCollector; +class MarkSweepMutator; +using TheCollector = MarkSweepCollector; +using TheMutator = MarkSweepMutator; +} +#else +#error "Invalid MRT_COLLECTOR" +#endif + +#include "tls_store.h" +namespace maplert { +class BumpPointerAlloc; +class DecoupleAllocator; +class MetaAllocator; +class ZterpStaticRootAllocator; +extern ImmortalWrapper permAllocator; +extern ImmortalWrapper zterpMetaAllocator; +extern ImmortalWrapper metaAllocator; +extern ImmortalWrapper decoupleAllocator; +extern ImmortalWrapper zterpStaticRootAllocator; +extern ImmortalWrapper theAllocator; + +class Mutator; + +static inline Mutator &TLMutator(void) noexcept { + // tl_the_mutator is added to thread context in GCInitThreadLocal() + return *reinterpret_cast(maple::tls::GetTLS(maple::tls::kSlotMutator)); +} + +static inline Mutator *TLMutatorPtr(void) { + if (maple::tls::HasTLS()) { + // tl_the_mutator is added to thread context in GCInitThreadLocal() + return reinterpret_cast(maple::tls::GetTLS(maple::tls::kSlotMutator)); + } + // CreateTLS() not called on current thread. + return nullptr; +} + +static inline TheAllocMutator &TLAllocMutator(void) { + // tl_alloc_mutator is added to thread context in GCInitThreadLocal() + return *reinterpret_cast(maple::tls::GetTLS(maple::tls::kSlotAllocMutator)); +} + +static inline TheAllocMutator *TLAllocMutatorPtr(void) { + if (maple::tls::HasTLS()) { + // tl_alloc_mutator is added to thread context in GCInitThreadLocal() + return reinterpret_cast(maple::tls::GetTLS(maple::tls::kSlotAllocMutator)); + } + // CreateTLS() not called on current thread. + return nullptr; +} +} // namespace maplert + +// we forward declared the classes above, then include the headers these classes depend on, +// so that the code (especially inline functions) in these headers can at least enjoy the benefits +// of knowing what the chosen collector/allocator/mutator are, as well as the mutator getters. +// +// if any of these headers uses the above information, it needs to include chosen.h to be +// self-contained. but we won't do that because usually chosen.h comes at the very beginning of a +// compilation unit, and it's intended to be used in place of collector/allocator headers. +#include "allocator/bp_allocator.h" +#if MRT_ALLOCATOR == MRT_ALLOCATOR_ROS +#include "allocator/ros_allocator.h" +#endif +#include "collector/collector_naiverc.h" +#if ((MRT_COLLECTOR == MRT_COLLECTOR_NONE) || (MRT_COLLECTOR == MRT_COLLECTOR_NAIVERC)) +#elif MRT_COLLECTOR == MRT_COLLECTOR_MS +#include "collector/collector_ms.h" +#endif +#endif // MAPLE_RUNTIME_CHOSEN_H diff --git a/src/mrt/compiler-rt/include/cinterface.h b/src/mrt/compiler-rt/include/cinterface.h new file mode 100644 index 0000000000..e58f740df7 --- /dev/null +++ b/src/mrt/compiler-rt/include/cinterface.h @@ -0,0 +1,215 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_CINTERFACE_H +#define MAPLE_RUNTIME_CINTERFACE_H + +#include +#include +#include +#include +#include +#include +#include +#include "gc_roots.h" +#include "address.h" +#include "mrt_reference_api.h" +#include "heap_stats.h" + +// These functions form the interface for the .s code generated from mplcg. +// A criteria for inclusion is that if the function is callable from generated .s, +// and is front-end independent (i.e. not specific to dex2mpl), it should be here. +// chelper.h shall help C programmers, including the developers +#ifdef __cplusplus +namespace maplert { +extern "C" { +#endif + +// Most functions are simple wrappers of methods of global or thread-local +// objects defined in chosen.h/cpp. Read cinterface.cpp for more information. +// +// Gets the polling page address. +void *MRT_GetPollingPage(); + +// returns the approximate total number of live objects +size_t MRT_TotalHeapObj(); + +// executes heap trim +void MRT_Trim(bool aggressive); + +// type of permanent space allocation +enum MetaTag : uint16_t { + kClassMetaData = 0, + kFieldMetaData, + kMethodMetaData, + kITabMetaData, + kNativeStringData, + kMetaTagNum +}; + +enum DecoupleTag : uint16_t { + kITab = 0, + kITabAggregate, + kVTab, + kVTabArray, + kTagMax +}; +static const std::string kDecoupleTagNames[kTagMax] = { + "itab", "itab aggregate", + "vtab", "vtab array", +}; +address_t MRT_AllocFromPerm(size_t size); +address_t MRT_AllocFromMeta(size_t size, MetaTag metaTag); +address_t MRT_AllocFromDecouple(size_t size, DecoupleTag tag); + +void MRT_FreeObj(address_t obj); +void MRT_ResetHeapStats(); + +bool MRT_IsPermJavaObj(address_t obj); + +// GC related interface +void MRT_PrintRCStats(); +void MRT_ResetRCStats(); + +size_t MRT_GetNativeAllocBytes(); +void MRT_SetNativeAllocBytes(size_t size); + +void MRT_ClassInstanceNum(std::map &objNameCntMp); + +// collector specific JNI interface +// Unsafe +bool MRT_UnsafeCompareAndSwapObject(address_t obj, ssize_t offset, + address_t expectedValue, address_t newValue); +address_t MRT_UnsafeGetObjectVolatile(address_t obj, ssize_t offset); +address_t MRT_UnsafeGetObject(address_t obj, ssize_t offset); +void MRT_UnsafePutObject(address_t obj, ssize_t offset, address_t newValue); +void MRT_UnsafePutObjectVolatile(address_t obj, ssize_t offset, address_t newValue); +void MRT_UnsafePutObjectOrdered(address_t obj, ssize_t offset, address_t newValue); + +// barriers for runtime code. +address_t MRT_LoadRefField(address_t obj, address_t *fieldAddr); +address_t MRT_LoadVolatileField(address_t obj, address_t *fieldAddr); +void MRT_WriteRefField(address_t obj, address_t *field, address_t value); +void MRT_WriteVolatileField(address_t obj, address_t *objAddr, address_t value); + +address_t MRT_LoadRefFieldCommon(address_t obj, address_t *fieldAddr); // only used in naiverc + +// write barrier +void MRT_WriteRefFieldNoRC(address_t obj, address_t *field, address_t value); +void MRT_WriteRefFieldNoDec(address_t obj, address_t *field, address_t value); +void MRT_WriteRefFieldNoInc(address_t obj, address_t *field, address_t value); + +void MRT_WriteVolatileFieldNoInc(address_t obj, address_t *objAddr, address_t value); +void MRT_WriteVolatileFieldNoDec(address_t obj, address_t *objAddr, address_t value); +void MRT_WriteVolatileFieldNoRC(address_t obj, address_t *objAddr, address_t value); + +// RC weak field processing +// static not supported +void MRT_WriteWeakField(address_t obj, address_t *field, address_t value, bool isVolatile); +address_t MRT_LoadWeakField(address_t obj, address_t *field, bool isVolatile); +address_t MRT_LoadWeakFieldCommon(address_t obj, address_t *field); + +// For referent processing +void MRT_WriteReferentField(address_t obj, address_t *fieldAddr, address_t value, bool isResurrectWeak); +// Load Referent can only happen in soft/weak/weak global, weak_global has special handling +// Used in java mutator thread +address_t MRT_LoadReferentField(address_t obj, address_t *fieldAddr); + +// write barrier for local reference variable update. +void MRT_WriteRefVar(address_t *var, address_t value); // not used now, undefined +void MRT_WriteRefVarNoInc(address_t *var, address_t value); // not used, undefined + +// Call this function when we are going to renew an dead object, +// for example: reuse string from pool in NewStringUtfFromPool(). +// when concurrent mark is running, this function will mark the object to +// prevent it from being swept by GC. +void MRT_PreRenewObject(address_t obj); + +void MRT_SetTracingObject(address_t obj); + +// release local reference variable, only naive rc need this. +void MRT_ReleaseRefVar(address_t obj); + +bool MRT_IsValidObjectAddress(address_t obj); + +// Trigger GC. Callable from mutator threads. This function may or may not +// block, depending on the specific reason. If it blocks, it will return when +// it has finished GC. +// +// reason: The specific reason for performing GC. +void MRT_TriggerGC(maplert::GCReason reason); + +bool MRT_IsNaiveRCCollector(); + +// Returns true if current thread is GC thread. +bool MRT_IsGcThread(); + +bool MRT_IsGcRunning(); + +bool MRT_FastIsValidObjAddr(address_t obj); + +#if LOG_ALLOC_TIMESTAT +void MRT_PrintAllocTimers(); +void MRT_ResetAllocTimers(); +#endif +void MRT_DebugShowCurrentMutators(); +size_t MRT_AllocSize(); +size_t MRT_AllocCount(); +size_t MRT_FreeSize(); +size_t MRT_FreeCount(); + +void MRT_DumpHeap(const std::string &tag); + +// dump RC and GC information into stream os +void MRT_DumpRCAndGCPerformanceInfo(std::ostream &os); + +void MRT_DumpRCAndGCPerformanceInfo_Stderr(); + +void MRT_VisitAllocatedObjects(maple::rootObjectFunc f); + +// Cycle pattern interface +void MRT_DumpDynamicCyclePatterns(std::ostream &os, size_t limit); +// Send a job that will be executed by the reference processor thread when it +// wakes up. Intended to perform cycle pattern saving/loading/writing signature etc. +// Implemented in reference-processor.cpp +void MRT_SendCyclePatternJob(std::function job); +void MRT_SetPeriodicSaveCpJob(std::function job); +void MRT_SetPeriodicLearnCpJob(std::function job); +void MRT_SendSaveCpJob(); +bool MRT_IsCyclePatternUpdated(); + +// Send a background gc job to reference processor when process state change to jank imperceptible. +void MRT_SendBackgroundGcJob(bool force); + + +void MRT_UpdateProcessState(ProcessState processState, bool isSystemServer); + +// Set a callback function which is called after GC finished, but before +// starting the world. Useful for performing cleaning up after a GC. +void MRT_DumpStaticField(std::ostream &os); +void MRT_WaitGCStopped(); +#if RC_TRACE_OBJECT +void TraceRefRC(address_t obj, uint32_t rc, const char *msg); +#endif +void MRT_SetAllocRecordingCallback(std::function callback); + +// traverse 'objects' allocated in the perm space +void MRT_VisitDecoupleObjects(maple::rootObjectFunc f); + +#ifdef __cplusplus +} // namespace maplert +} // extern "C" +#endif // __cplusplus + +#endif // MAPLE_RUNTIME_CINTERFACE_H diff --git a/src/mrt/compiler-rt/include/collector/arena.h b/src/mrt/compiler-rt/include/collector/arena.h new file mode 100644 index 0000000000..3b1ac01921 --- /dev/null +++ b/src/mrt/compiler-rt/include/collector/arena.h @@ -0,0 +1,251 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_ARENA_H +#define MAPLE_RUNTIME_ARENA_H + +#include "address.h" +#include "base/logging.h" +#include "jni.h" +#include "panic.h" +#include "allocator/page_pool.h" + +namespace maplert { +static constexpr uint32_t kLocalVarHeaderSlotCount = 1; +static constexpr uint32_t kLocalVarBankSlotCount = maple::kPageSize / sizeof(address_t) - kLocalVarHeaderSlotCount; + +// Use dynamic allocated MemoryBank to save address_t content. Basic operation is save snapshot +// pop to snapshot/iterate Object Handle Arena or iterate to snapshot. +// HandleArena use bump-the-pointer allocation to serve object handle allocation. It should have +// stack-style allocation and deallocation, which a set of successive allocated handles are bulk +// freed at a specific code point. +class HandleArena { + struct MemoryBank { + MemoryBank *prev = nullptr; + address_t *BeginSlotAddr() { + return (reinterpret_cast(this)) + kLocalVarHeaderSlotCount; + } + address_t *EndSlotAddr() { + return (reinterpret_cast(this)) + kLocalVarHeaderSlotCount + kLocalVarBankSlotCount; + } + }; + + public: + HandleArena() : tail(nullptr), top(nullptr) {} + ~HandleArena() { + if (tail != nullptr) { + PopBanks(nullptr, nullptr); + } + } + HandleArena &operator=(const HandleArena &other) { + this->tail = other.tail; + this->top = other.top; + return *this; + } + void VisitGCRoots(const RefVisitor &visitor); + void VisitTopSnapShot(const RefVisitor &visitor, const HandleArena &snapShot) noexcept { + VisitTopSnapShot(visitor, snapShot.tail, snapShot.top); + } + + // bump-the-pointer allocation + address_t *AllocateSlot() { + if (tail == nullptr || top == tail->EndSlotAddr()) { + GetNewBank(); + } + address_t *result = top; + ++top; + return result; + } + + // utility function for bulk free + void PopBanks(const HandleArena &snapshot) noexcept { + PopBanks(snapshot.tail, snapshot.top); + } + + inline void Clear() noexcept { + tail = nullptr; + top = nullptr; + } + private: + MemoryBank *tail; // current allocatable bank + address_t *top; // current allocatable pointer + inline void GetNewBank() { + static_assert(sizeof(MemoryBank*) == sizeof(address_t), "not equal size"); + MemoryBank *mb = reinterpret_cast(PagePool::Instance().GetPage()); + if (UNLIKELY(mb == nullptr)) { + LOG(FATAL) << "Failed to allocate a new bank" << maple::endl; + } + mb->prev = tail; + tail = mb; + top = tail->BeginSlotAddr(); + } + + void PopBanks(MemoryBank *toTail, address_t *toTop) noexcept { + while (tail != toTail && tail != nullptr) { + MemoryBank *prev = tail->prev; + PagePool::Instance().ReturnPage(reinterpret_cast(tail)); + tail = prev; + } + top = toTop; + } + + void VisitTopSnapShot(const RefVisitor &visitor, MemoryBank *toBank, address_t *toTop) { + MemoryBank *curBank = tail; + address_t *curTop = top; + if (curBank == nullptr) { + return; + } + while (curBank != toBank) { + address_t *start = curBank->BeginSlotAddr(); + address_t *end = curTop; + for (address_t *iter = start; iter < end; ++iter) { + visitor(*iter); + } + curBank = curBank->prev; + if (curBank != nullptr) { + curTop = curBank->EndSlotAddr(); + } else { + __MRT_ASSERT(toBank == nullptr && toTop == nullptr, "unexpected"); + curTop = nullptr; + } + } + // curBank is endBank, visit from curTop/end to endTop + if (curTop != toTop) { + for (address_t *iter = toTop; iter < curTop; ++iter) { + visitor(*iter); + } + } + } +}; + +class MObject; +class MArray; +class MString; +// Indirect holder for Java heap object in runtime, points to a slot in HandleArena +// It can be passed to callee, but cannot returned directly. +// Hierarchy: +// HandleBase +// ObjHandle +// ObjHandle +// ObjHandle +class HandleBase { + public: + virtual ~HandleBase() { + handle = nullptr; + } + inline address_t AsRaw() const { + return *handle; + } + inline jobject AsJObj() const { + return reinterpret_cast(AsRaw()); + } + inline jobjectArray AsJObjArray() const { + return reinterpret_cast(AsRaw()); + } + inline MObject *AsObject() const { + return reinterpret_cast(AsRaw()); + } + inline MArray *AsArray() const { + return reinterpret_cast(AsRaw()); + } + inline bool operator==(const HandleBase &ref) const { + return AsRaw() == ref.AsRaw(); + } + inline bool operator==(const address_t otherObj) const { + return AsRaw() == otherObj; + } + inline address_t Return() { + address_t old = AsRaw(); + *handle = 0; + return old; + } + inline jobject ReturnJObj() { + return reinterpret_cast(Return()); + } + inline MObject *ReturnObj() { + return reinterpret_cast(Return()); + } + protected: + inline void Release() noexcept { + if (handle != nullptr) { + *handle = 0; + } + } + void Push(address_t ref); + address_t *handle = nullptr; // indirect pointer(reference) of raw heap object pointer +}; + +// RC collector is supported in maple, rc count should be maintained in the Handle. +// Some Handle do produce rc count, for example return value from another function, +// it should use the following pattern: +// ObjectHandle newObj(NewObject(...)) +// +// But some handle do not produce rc count, for example the parameter arguments, it +// should use the following pattern: +// ObjectHandle argment(arg0) +template +class ObjHandle : public HandleBase { + using pointer = typename std::add_pointer::type; + using lref = typename std::add_lvalue_reference::type; + public: + explicit ObjHandle(address_t ref) { + Push(ref); + } + explicit ObjHandle(jobject ref) { + Push(reinterpret_cast(ref)); + } + explicit ObjHandle(MObject *object) { + Push(reinterpret_cast(object)); + } + explicit ObjHandle(MString *str) { + Push(reinterpret_cast(str)); + } + explicit ObjHandle(MArray *array) { + Push(reinterpret_cast(array)); + } + ~ObjHandle() { + if (!needRC) { + Release(); + } + } + inline pointer operator->() { + return reinterpret_cast(AsRaw()); + } + inline pointer operator()() { + return reinterpret_cast(AsRaw()); + } + inline lref operator*() { + return reinterpret_cast(AsRaw()); + } +}; + +// Used to snapshot the arena usage. +// It should be used before Handle allocation (means ObjectHandle declaration), +// the current top of the arena is recorded, and the memory above the recorded +// top is returned upon the destruction of ScopedHandles. +class ScopedHandles { + public: + ScopedHandles(); + ~ScopedHandles(); + + ScopedHandles(const ScopedHandles&) = delete; + ScopedHandles(ScopedHandles&&) = delete; + ScopedHandles &operator=(const ScopedHandles&) = delete; + ScopedHandles &operator=(ScopedHandles&&) = delete; + private: + HandleArena snapshot; +}; +} // namespace maplert + +#endif diff --git a/src/mrt/compiler-rt/include/collector/collector.h b/src/mrt/compiler-rt/include/collector/collector.h new file mode 100644 index 0000000000..696f3f6622 --- /dev/null +++ b/src/mrt/compiler-rt/include/collector/collector.h @@ -0,0 +1,559 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_COLLECTOR_H +#define MAPLE_RUNTIME_COLLECTOR_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "collector/stats.h" +#include "collector/satb_buffer.h" +#include "mm_utils.h" +#include "mm_config.h" +#include "address.h" +#include "gc_reason.h" +#include "mpl_thread_pool.h" +#include "gc_callback.h" +#include "arena.h" +#include "exception/stack_unwinder.h" +#include "syscall.h" + +constexpr uint32_t kMutatorDebugTrue = 0x12345678; // Used to debug STW mutator damage +constexpr uint32_t kMutatorDebugFalse = 0x88665544; +constexpr uint32_t kMutatorDebugMagic1 = 0x11223344; +constexpr uint32_t kMutatorDebugMagic2 = 0x22334455; +constexpr uint32_t kMutatorDebugMagic3 = 0x33445566; +constexpr uint32_t kMutatorDebugMagic4 = 0x44556677; + +constexpr int32_t kLargArraySize = 63; + +namespace maplert { +// The GC-related information after user data in the GCTIB. +struct GCTibGCInfo { + // GC header prototype + uint32_t headerProto; + // Number of bitmap words. + uint32_t nBitmapWords; + + // An array of bitmap words. Length is `nBitmapWords`. + uint64_t bitmapWords[]; +}; + +extern "C" struct GCTibGCInfo MCC_GCTIB___EmptyObject; +extern "C" struct GCTibGCInfo MCC_GCTIB___ArrayOfObject; +extern "C" struct GCTibGCInfo MCC_GCTIB___ArrayOfPrimitive; + +enum CollectorType { + kNoneCollector, // No Collector + kNaiveRC, // Naive RC + kMarkSweep, // Mark Sweep tracing GC + kNaiveRCMarkSweep // Naive RC backup tracing mark sweep collector +}; + +// Record gcIsGCOnly symbol address in echo RC compiled maple so +// If not found, no action is need. +// 1. At load time, check collector type and update +// 2. At fork time/collector create time, update all according to new collector type +class RegisteredCollectorTypeCheckAddrs { + public: + static RegisteredCollectorTypeCheckAddrs &Instance() { + if (UNLIKELY(instance == nullptr)) { + LOG(FATAL) << "Create RegisteredCollectorTypeCheckAddrs instance failed" << maple::endl; + } + return *instance; + } + RegisteredCollectorTypeCheckAddrs() = default; + ~RegisteredCollectorTypeCheckAddrs() = delete; + void Register(uint64_t *addr); + void PostCollectorCreate(); + private: + std::vector addrs; + std::mutex registerLock; + static RegisteredCollectorTypeCheckAddrs *instance; + static constexpr uint32_t kRegistedMagicNumber = 0x4d634734; +}; + +// Central garbage identification algorithm. +class Collector { + public: + Collector(); + virtual ~Collector() = default; + + // Initializer and finalizer. Default: no-op + // + // They are called from the MRT_GCInitGlobal and MRT_GCFiniGlobal, + // respectively. We can assume that they can only be called after global + // variables are constructed (i.e. after the constructor of theCollector + // and theAllocator). + virtual void Init(); + virtual void Fini() {} + static void SwitchToGCOnFork(); + static void Create(bool gcOnly); + static inline Collector &Instance() noexcept { + return *instance; + } + static inline Collector *InstancePtr() noexcept { + return instance; + } + const std::string &GetName(); + + // Initialize after fork (in child process). + virtual void InitAfterFork() { + if (type != kNaiveRCMarkSweep && !isSystem) { + processState = kProcessStateJankPerceptible; + startupPhase.store(true, std::memory_order_release); + } + stats::gcStats->OnCollectorInit(); + } + + // GC related + // Start/Stop GC thread(s). + virtual void StartThread(bool isZygoteProcess __attribute__((unused))) {} + virtual void StopThread() {} + virtual void JoinThread() {} + + // This pure virtual function implements the triggering of GC. + // + // reason: Reason for GC. + // unsafe: Trigger from unsafe context, e.g., holding a lock, in the middle of an alloc. + // In order to prevent deadlocks, unsafe trigger will not automatically + // enter safe region, instead the triggering mutator will enter safe region + // later naturally (e.g., from a yieldpoint). + // + // Return value: + // 0: This invocation did not trigger GC. + // 1: Other threads have triggered GC, and we waited for the GC to finish. + // 2: This invocation triggered GC, and we waited until GC finished. + virtual void InvokeGC(GCReason reason, bool unsafe = false) = 0; + + // Wait GC finished. + virtual void WaitGCStopped() {} + virtual bool IsGcRunning() const = 0; + virtual bool IsGcTriggered() const = 0; + virtual bool IsConcurrentMarkRunning() const = 0; + virtual bool IsGarbage(address_t obj) = 0; + virtual void StackScanBarrierInMutator() = 0; + virtual reffield_t RefFieldLoadBarrier(const address_t obj, const reffield_t &field) = 0; + + // Perform a clean-up GC after the application has finished. + // This is useful for detecting memory cells that should have been reclaimed but are not. + // + // This function must only be invoked after all mutator threads are + // terminated, but before the global collector is finalized (i.e. Call + // MRT_DebugCleanup() before calling MRT_GCFiniGlobal()). + virtual void DebugCleanup() {} + + // This function is used to deal with neighbours during sweeping. + // For gc collector, it's a noop. + // For rc collector, it needs to collect dead neighbours for further operation. + virtual void HandleNeighboursForSweep(address_t, std::vector&) {} + + // Utils + virtual void SetIsSystem(bool system) { + isSystem.store(system, std::memory_order_release); + } + void SetIsZygote(bool zygote) { + isZygote.store(zygote, std::memory_order_release); + } + bool IsZygote() { + return isZygote.load(std::memory_order_relaxed); + } + CollectorType Type() noexcept { + return type; + } + virtual void DumpHeap(const std::string &tag __attribute__((unused))) {} + virtual void DumpFinalizeGarbage() {} + virtual void DumpGarbage() {} + virtual void DumpCleaner() {} + virtual void DumpWeakSoft() {} + virtual void PostNewObject(address_t obj __attribute__((unused))) {} + + // Common object API + virtual void ObjectArrayCopy(address_t src, address_t dst, int32_t srcIndex, int32_t dstIndex, int32_t count, + bool check) = 0; + virtual void PostObjectClone(address_t src, address_t dst) = 0; + // Unsafe + virtual bool UnsafeCompareAndSwapObject(address_t obj, ssize_t offset, + address_t expectedValue, address_t newValue) = 0; + virtual address_t UnsafeGetObjectVolatile(address_t obj, ssize_t offset) = 0; + virtual address_t UnsafeGetObject(address_t obj, ssize_t offset) = 0; + virtual void UnsafePutObject(address_t obj, ssize_t offset, address_t newValue) = 0; + virtual void UnsafePutObjectVolatile(address_t obj, ssize_t offset, address_t newValue) = 0; + virtual void UnsafePutObjectOrdered(address_t obj, ssize_t offset, address_t newValue) = 0; + + // Status info + void UpdateProcessState(ProcessState processState, bool isSystemServer); + + // Returns true if we currently not care about long mutator pause. + inline bool InJankImperceptibleProcessState() const { + return processState == kProcessStateJankImperceptible; + } + inline bool InStartupPhase() { + return startupPhase.load(std::memory_order_acquire); + } + + inline void EndStartupPhase() { + startupPhase.store(false, std::memory_order_release); + } + + protected: + CollectorType type = kNoneCollector; + bool IsSystem() { + return isSystem.load(std::memory_order_acquire); + } + + private: + // Indicates whether we care about pause time. + ProcessState processState; + // set true after app fork and set false after startup background GC inovke GC + std::atomic startupPhase = { false }; + + // Set true when gc (backup tracing) is running. + std::atomic isSystem = { true }; + std::atomic isZygote = { true }; + + // instance + static Collector *instance; +}; + +struct UnwindContext; + +// Per-thread Collector context for each mutator (application thread). +// +// Strictly speaking, memory allocation operations (NewObj, NewObjFlexible, ...) +// also belongs to the mutator class, because collector and allocator are +// usually tightly-coupled. In MapleJava, currently at least, both RC and MS +// use free-list allocator, so we consider the allocator as a shared resource. +// +// If we ever decide to introduce bump-pointer allocators, we will need to +// refine the interface further. +class Mutator { + friend class MutatorList; + public: + enum StackScanState : int { + kNeedScan, + kInScan, + kFinishScan, + }; + + // Called when a thread starts and finishes, respectively. + virtual void Init() { + concurrentMarking = Collector::Instance().IsConcurrentMarkRunning(); + } + virtual void Fini() {} + virtual ~Mutator() { + tid *= -1; + currentCompiledMethod = nullptr; + if (satbNode != nullptr) { + SatbBuffer::Instance().RetireNode(satbNode); + satbNode = nullptr; + } + } + + // Mutator is active if it is added to MutatorList. + __attribute__ ((always_inline)) + inline bool IsActive() const { + if (active == kMutatorDebugTrue) { + return true; + } else if (active == kMutatorDebugFalse) { + return false; + } else { + DumpRaw(); + LOG(FATAL) << "active crash " << std::hex << active << std::dec << maple::endl; + return false; + } + } + + // Sets 'in saferegion' state of this mutator. + __attribute__ ((always_inline)) + inline void SetInSaferegion(bool b) { + inSaferegion = b ? kMutatorDebugTrue : kMutatorDebugFalse; + } + + // Gets 'in saferegion' state of this mutator. + // Returns true if this mutator is in saferegion, otherwise false. + __attribute__ ((always_inline)) + inline bool InSaferegion() const { + if (inSaferegion == kMutatorDebugTrue) { + return true; + } else if (inSaferegion == kMutatorDebugFalse) { + return false; + } else { + DumpRaw(); + LOG(FATAL) << "inSaferegion crash " << std::hex << inSaferegion << std::dec << maple::endl; + return false; + } + } + + // Force this mutator enter saferegion, internal use only. + __attribute__ ((always_inline)) + inline void DoEnterSaferegion(); + + // Force this mutator leave saferegion, internal use only. + __attribute__ ((always_inline)) + inline void DoLeaveSaferegion(); + + // Let this mutator enter saferegion. + __attribute__ ((always_inline)) + inline bool EnterSaferegion(bool rememberLastJavaFrame); + + // Let this mutator leave saferegion. + __attribute__ ((always_inline)) + inline bool LeaveSaferegion(); + + // Save the start stack pointer. + __attribute__ ((always_inline)) + inline void SaveStackBegin(void *begin) { + stackBegin = reinterpret_cast(begin); + InitTid(); + } + + // Save the end stack pointer. + __attribute__ ((always_inline)) + inline void SaveStackEnd(void *end) { + stackEnd = reinterpret_cast(end); + } + + // Clear stack begin/end. + void ClearStackInfo() { + stackBegin = 0; + stackEnd = 0; + GetLastJavaContext().frame.Reset(); + } + + // Init after fork. + void InitAfterFork() { + // tid changed after fork, + // so we re-initialize it. + InitTid(); + } + + void VisitJavaStackRoots(const AddressVisitor &func) { + MapleStack::VisitJavaStackRoots(GetLastJavaContext(), func, tid); + } + + void VisitNativeStackRoots(const RefVisitor &func) { + arena.VisitGCRoots(func); + } + + ATTR_NO_SANITIZE_ADDRESS + inline void VisitStackSlotsContent(const AddressVisitor &func) { + if (UNLIKELY(stackBegin == 0)) { + return; + } + // we scan stack by 32bit now, this is a workaround for stack allocated objects. + // we should restore 64bit stack scan if scalar replacement implemented in future + for (address_t slot = stackEnd; slot < stackBegin; slot += sizeof(reffield_t)) { + func(static_cast(*reinterpret_cast(slot))); + } + } + + void DebugShow() const; + + // Called on the return value of java.lang.ref.Reference.get() before + // returning to the caller, and also when decoding a weak global reference in + // JNI. + // + // Concurrent collectors need to handle this carefully to synchronize between + // Reference.get() and the concurrent marking threads. Particularly, if + // referent is not null, it shall not be prematurely reclaimed while the + // mutator is still using it. + // + // This mechanism cannot handle the clearing of Reference instances. + // According to the Java API, if an object is no longer softly/weakly + // reachable, all soft/weak references to that object must be ATOMICALLY + // cleared. In other words, mutators must not see some soft/weak references + // to that object cleared while other soft/weak references to that object not + // cleared. Moreover, weak global references are cleared at a different time + // than java.lang.ref.WeakReference. Specifically, it not cleared until the + // referent is finalized. We should refine the API and handle the two cases + // differently if we want to support concurrent reference cleaning. + // + // referent: The referent of the Reference object + virtual void WeakRefGetBarrier(address_t referent __attribute__((unused))) { + // By default, this barrier does nothing + } + + inline HandleArena &GetHandleArena() noexcept { + return arena; + } + + inline StackScanState GetScanState() { + return scanState; + } + + inline void SetScanState(StackScanState state) { + scanState.store(state); + } + + inline bool TrySetScanState(bool wait) { + while (true) { + StackScanState state = scanState.load(); + if (state == kFinishScan) { + return false; + } + + if (state == kInScan) { + if (wait) { + int *stateAddr = reinterpret_cast(&scanState); + if (UNLIKELY(maple::futex(stateAddr, FUTEX_WAIT, static_cast(state), nullptr, nullptr, 0) != 0)) { + LOG(ERROR) << "futex wait failed, " << errno << maple::endl; + } + } else { + return false; + } + } + + if (state == kNeedScan && scanState.compare_exchange_weak(state, kInScan)) { + return true; + } + } + } + + inline void FinishStackScan(bool notify) { + scanState.store(kFinishScan); + if (notify) { + int *stateAddr = reinterpret_cast(&scanState); + (void)maple::futex(stateAddr, FUTEX_WAKE, INT_MAX, nullptr, nullptr, 0); + } + } + + __attribute__ ((always_inline)) + inline void StackScanBarrier(); + + uint32_t GetTid() const { + return tid; + } + + uintptr_t GetStackBegin() const { + return stackBegin; + } + + // get stack scanning range size in bytes. + size_t GetStackSize() const { + // we assume stack is downward grow. + if (UNLIKELY(stackBegin == 0)) { + return 0; + } + return static_cast(stackBegin - stackEnd); + } + + void SetCurrentCompiledMethod(void *func) { + currentCompiledMethod = func; + } + void *GetCurrentCompiledMethod() { + return currentCompiledMethod; + } + + InitialUnwindContext &GetInitialUnwindContext() { + return initialUnwindContext; + } + + // should rename to GetUnwindContext + UnwindContext &GetLastJavaContext() { + return GetInitialUnwindContext().GetContext(); + } + + void DumpRaw() const { + LOG(ERROR) << "addr : " << this << " tid : " << tid << + std::hex << + " state : " << inSaferegion << + " active : " << active << + " magic3_ : " << magc3 << + " magic2_ : " << magc2 << + " magic1_ : " << magc1 << + std::dec << maple::endl; + } + void CopyStateOnFork(Mutator &orig); + + template + void SatbWriteBarrier(address_t obj) { + if (concurrentMarking) { + if (kPreserveSelf) { + PushIntoSatbBuffer(obj); + } else { + PushChildrenToSatbBuffer(obj); + } + } + } + + void SatbWriteBarrier(address_t obj, const reffield_t &field); + + void PushIntoSatbBuffer(address_t obj) { + if (LIKELY(IS_HEAP_ADDR(obj))) { + if (SatbBuffer::Instance().ShouldEnqueue(obj)) { + SatbBuffer::Instance().EnsureGoodNode(satbNode); + satbNode->Push(obj); + } + } + } + + void PushChildrenToSatbBuffer(address_t obj); + + const SatbBuffer::Node *GetSatbBufferNode() const { + return satbNode; + } + + void ResetSatbBufferNode() { + satbNode = nullptr; + } + + void SetConcurrentMarking(bool status) { + concurrentMarking = status; + } + private: + void InitTid(); + + // mutator become active if it is added to MutatorList. + uint32_t active = kMutatorDebugFalse; + + // in saferegion, mutator will not access any managed objects. + uint32_t inSaferegion = kMutatorDebugFalse; + + // the begin stack pointer for stack scanning. + address_t stackBegin = 0; + uint32_t magc3 = kMutatorDebugMagic3; + // the last stack pointer for stack scanning. + address_t stackEnd = 0; + // thread id + uint32_t magc2 = kMutatorDebugMagic2; + uint32_t tid = kMutatorDebugMagic4; + uint32_t magc1 = kMutatorDebugMagic1; + + // arena is used to save local object pointer in native function + // for precise stack scan. + HandleArena arena; + + // indicate whether the stack of current mutator is scanned + std::atomic scanState = { kFinishScan }; + + // initial context for unwinding java call stack + InitialUnwindContext initialUnwindContext; + void *currentCompiledMethod = nullptr; + + // satb buffer + bool concurrentMarking = false; + SatbBuffer::Node *satbNode = nullptr; +}; +} // namespace maplert + +#endif diff --git a/src/mrt/compiler-rt/include/collector/collector_ms.h b/src/mrt/compiler-rt/include/collector/collector_ms.h new file mode 100644 index 0000000000..549a55ef61 --- /dev/null +++ b/src/mrt/compiler-rt/include/collector/collector_ms.h @@ -0,0 +1,307 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_COLLECTOR_MS_H +#define MAPLE_RUNTIME_COLLECTOR_MS_H + +#include + +#include "address.h" +#include "allocator.h" +#include "collector_tracing.h" + +namespace maplert { +class MarkTask; +class ConcurrentMarkTask; + +// max size of work stack for a single mark task. +static constexpr size_t kMaxMarkTaskSize = 128; + +#if BT_CLEANUP_PROFILE +class BTCleanupStats{ + public: + static size_t rootSetSize; + static size_t totalRemain; + static size_t reachableRemain; + static size_t unreachableRemain; +}; +#endif + +class MarkSweepCollector : public TracingCollector { + friend MarkTask; + friend ConcurrentMarkTask; + public: + MarkSweepCollector() : TracingCollector() { + type = kMarkSweep; +#if CONFIG_JSAN + enableConcurrent = false; + isInitializedConcurrent = true; +#endif + lastTriggerTime = timeutils::NanoSeconds(); + } + + virtual ~MarkSweepCollector() = default; + + inline bool IsGcTriggered() const override { + return isGcTriggered.load(std::memory_order_relaxed); + } + + // always enable concurrent GC unless it is OOM. + inline bool IsConcurrent(GCReason reason) { + if (UNLIKELY(!isInitializedConcurrent)) { + InitConcurrentMarkSweep(); + } + return enableConcurrent && (reasonCfgs[reason].isConcurrent); + } + + // return true if concurrent mark is running. + inline bool IsConcurrentMarkRunning() const override { + return concurrentMarkRunning; // need move to collector-ms + } + + inline void SetConcurrentMarkRunning(bool state) { + // let allocator know +#if ALLOC_USE_FAST_PATH + FastAllocData::data.isConcurrentMarking = state; +#endif + concurrentMarkRunning = state; + } + + // Handle newly allocated objects during concurrent marking. + inline void PostNewObject(address_t obj) override { // move to concurrent marking ms + // When concurrent mark is running, we need to set the newly + // allocated object as marked to prevent it be swept by GC. + if (UNLIKELY(IsConcurrentMarkRunning())) { + (void)MarkObject(obj); + newObjDuringMarking.fetch_add(1, std::memory_order_relaxed); + } + } + + // Handle to-be-free objects during concurrent marking. + // return true if concurrent mark is running. + virtual bool PreFreeObject(address_t) { + return false; + } + + virtual bool IsGarbageBeforeResurrection(address_t addr) { + return !markBitmap.IsObjectMarked(addr); + } + + // barrier for stack scan in mutator + void StackScanBarrierInMutator() override; + + // pre/post hook + virtual void PreMSHook(); + virtual void PreSweepHook(); + virtual void PostMSHook(uint64_t gcIndex); + + void DebugCleanup() override {}; + void Fini() override; + + virtual reffield_t RefFieldLoadBarrier(const address_t obj, const reffield_t &field) override { + const address_t fieldAddr = reinterpret_cast(&field); + const MClass *cls = reinterpret_cast(obj)->GetClass(); + const char *clsName = (cls != 0) ? cls->GetName() : ""; + const address_t offset = fieldAddr - obj; + LOG(ERROR) << "Bad ref field! " << + " cls:" << clsName << + std::hex << + " obj:" << obj << + " off:" << offset << + " val:" << field << + std::dec << + maple::endl; + AbortWithHeader(obj); + return 0; + } + + protected: + // statistic values. + std::atomic newlyMarked = { 0 }; + std::atomic newObjDuringMarking = { 0 }; // new objects during concurrent marking. + std::atomic freedObjDuringMarking = { 0 }; // freed objects during concurrent marking. + std::atomic renewObjDuringMarking = { 0 }; // renew objects during concurrent marking. + + // Copy object child refs into a vector. + template + void CopyChildRefs(reffield_t obj, RefContainer &refs, bool collectRefs = false) { + address_t referentAddr = 0; + if (UNLIKELY(IsObjReference(obj))) { + // referent field address for Reference object. + referentAddr = obj + WellKnown::kReferenceReferentOffset; + // Move the decision of clearing referent to concurrent mark phase. + // Referent of soft references can be cleared or not depending on the implementation + // of gc. + // Whether or not to mark the referent is decided by the soft reference policy. + // If the referent needs to be keeped alive, reset the skipping field indicator + // "referent_addr". Otherwise, the skipping field indicator is keeped intact. + MClass *klass = reinterpret_cast(static_cast(obj))->GetClass(); + uint32_t classFlag = klass->GetFlag(); + if ((classFlag & modifier::kClassSoftReference) && + !ReferenceProcessor::Instance().ShouldClearReferent(gcReason)) { + referentAddr = 0; + } + } + + // copy child refs into refs vector. + auto refFunc = [this, obj, &refs, referentAddr, collectRefs](reffield_t &field, uint64_t kind) { + // skip referent field. + const address_t fieldAddr = reinterpret_cast(&field); + if (UNLIKELY((kind == kUnownedRefBits))) { + return; + } + if (Type() == kNaiveRCMarkSweep && fieldAddr == referentAddr) { + // RC mode skip referent + return; + } + + // ensure that we loaded a valid reference, not a lock flag + // set by functions like LoadRefVolatile(). + reffield_t ref = field; + if (UNLIKELY((ref & LOAD_INC_RC_MASK) != 0)) { + ref = RefFieldLoadBarrier(obj, field); + } + if ((fieldAddr == referentAddr) && IS_HEAP_ADDR(ref)) { + __MRT_ASSERT(Type() != kNaiveRCMarkSweep, "GCOnly allowed"); + address_t referent = RefFieldToAddress(ref); + if (referent != 0 && ReferenceGetPendingNext(obj) == 0 && IsGarbage(referent)) { + if (collectRefs) { + finalizerFindReferences.push_back(obj); + } else { + GCReferenceProcessor::Instance().DiscoverReference(obj); + } + } + return; + } + + // skip non-heap ref fields. + if (LIKELY(IS_HEAP_ADDR(ref))) { + refs.push_back(ref); + } + }; + // If need skip copy weak field udpate update with GC procesing + DoForEachRefField(obj, refFunc); + } + + virtual bool CheckAndPrepareSweep(address_t addr) { + return (IsGarbage(addr) && !IsMygoteObj(addr)); + } + + void AddMarkTask(RootSet &rs); + void AddConcurrentMarkTask(RootSet &rs); + void ParallelMark(WorkStack &workStack, bool followReferent = false); + // concurrent marking. + void ConcurrentMark(WorkStack &workStack, bool parallel, bool scanRoot); + // prepare for concurrent mark, called in the end of stop-the-world-1. + void ConcurrentMarkPrepare(); + + virtual void ConcurrentMarkPreparePhase(WorkStack &workStack, WorkStack &inaccurateRoots); + virtual void RunFullCollection(uint64_t gcIndex) override; + virtual void InitReferenceWorkSet() {} + virtual void ResetReferenceWorkSet() {} + virtual void ParallelDoReference(uint32_t) {} + virtual void ConcurrentDoReference(uint32_t) {} + virtual void ReferenceRefinement(uint32_t) {} + + // Unsynchronized reference counting manipulation. + // Only useable during stop-the-world. + // If don't ensure where objAddr is in heap, use the check version. + virtual void DecReferentUnsyncCheck(address_t, bool) {} + virtual void DecReferentSyncCheck(address_t, bool) {} + private: + bool enableConcurrent = true; + bool isInitializedConcurrent = false; + + // resurrect candidate (unmarked finalizables) collected during cm + WorkStack resurrectCandidates; + WorkStack finalizerFindReferences; + + // the flag to indicate whether concurrent mark is running. + // no need atomic here because this flag is changed when the world is stopped. + bool concurrentMarkRunning = false; + + inline void InitConcurrentMarkSweep() { + enableConcurrent = + !(MRT_ENVCONF(MRT_IS_NON_CONCURRENT_GC, MRT_IS_NON_CONCURRENT_GC_DEFAULT) || VLOG_IS_ON(nonconcurrentgc)); + isInitializedConcurrent = true; + } + + void RunMarkAndSweep(uint64_t gcIndex = 0); + + // common phases. + void ResurrectionPhase(bool isConcurrent); + void PrepareTracing(); + void FinishTracing(); + void DumpAfterGC(); + void ScanStackAndMark(Mutator &mutator); + void DoWeakGRT(); + void DoResurrection(WorkStack &workStack); + void ResurrectionCleanup(); + + /// parallel phases. + void ParallelMarkAndSweep(); + void ParallelMarkPhase(); + void ParallelScanMark(RootSet *rootSets, bool processWeak, bool rootString); + void ParallelResurrection(WorkStack &workStack); + void ParallelSweepPhase(); + + // concurrent phases. + void ConcurrentMarkAndSweep(); + void ConcurrentMSPreSTW1(); + void ConcurrentMSPostSTW2(); + void ConcurrentMarkPhase(WorkStack &&workStack, WorkStack &&inaccurateRoots); + // concurrent re-marking. + void ConcurrentReMark(bool parallel); + // cleanup for concurrent mark, called in the beginning of stop-the-world-2. + void ConcurrentMarkCleanupPhase(); + void ConcurrentMarkCleanup(); + void ConcurrentPrepareResurrection(); + void ConcurrentSweepPreparePhase(); + void ConcurrentSweepPhase(); + void ConcurrentStackScan(); + void ConcurrentStaticRootsScan(bool parallel); + void ConcurrentMarkFinalizer(); + void ConcurrentAddFinalizerToRP(); + + // pre-write barrier for concurrent marking. + // return true if success saved the object to mod-buf. + bool ConcurrentMarkPreWriteBarrier(address_t obj); + + inline WorkStack NewWorkStack() { + constexpr size_t kWorkStackInitCapacity = 65536UL; + WorkStack workStack; + workStack.reserve(kWorkStackInitCapacity); + return workStack; + } + void ScanSingleStaticRoot(address_t *rootAddr, TracingCollector::WorkStack &workStack); + void EnqueueNeighbors(address_t objAddr, WorkStack &workStack); + + // enqueue neighbors for a java.lang.Reference + void EnqueueNeighborsForRef(address_t objAddr, WorkStack &workStack); + + static inline void Enqueue(address_t objAddr, WorkStack &workStack) { + workStack.push_back(objAddr); + } +}; + +class MarkSweepMutator : public TracingMutator { + public: + MarkSweepMutator(Collector &collector) : TracingMutator(collector) { + __MRT_ASSERT(collector.Type() == kMarkSweep, "collector must be MarkSweepCollector"); + } + ~MarkSweepMutator() = default; +}; +} + +#endif diff --git a/src/mrt/compiler-rt/include/collector/collector_naiverc.h b/src/mrt/compiler-rt/include/collector/collector_naiverc.h new file mode 100644 index 0000000000..40b51a2ef9 --- /dev/null +++ b/src/mrt/compiler-rt/include/collector/collector_naiverc.h @@ -0,0 +1,237 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_COLLECTOR_NAIVERC_H +#define MAPLE_RUNTIME_COLLECTOR_NAIVERC_H + +#include +#include +#include +#include "pthread.h" +#include "collector_rc.h" +#include "collector_naiverc_ms.h" +#include "cycle_collector.h" +#include "rc_inline.h" + +namespace maplert { +class NaiveRCCollector : public RCCollector { + public: + NaiveRCCollector() : RCCollector() { + type = kNaiveRC; + } + ~NaiveRCCollector() = default; + void Init() override; + void InitAfterFork() override; + void StartThread(bool isZygote) override; + void StopThread() override; + void JoinThread() override; + void Fini() override; + void DebugCleanup() override { + ms.DebugCleanup(); + } + + void ObjectArrayCopy(address_t src, address_t dst, int32_t srcIndex, int32_t dstIndex, int32_t count, + bool check = false) override; + void PostObjectClone(address_t src, address_t dst) override; + bool UnsafeCompareAndSwapObject(address_t obj, ssize_t offset, address_t expectedValue, address_t newValue) override; + address_t UnsafeGetObjectVolatile(address_t obj, ssize_t offset) override; + address_t UnsafeGetObject(address_t obj, ssize_t offset) override; + void UnsafePutObject(address_t obj, ssize_t offset, address_t newValue) override; + void UnsafePutObjectVolatile(address_t obj, ssize_t offset, address_t newValue) override; + void UnsafePutObjectOrdered(address_t obj, ssize_t offset, address_t newValue) override; + + void DumpHeap(const std::string &tag) override { + return ms.DumpHeap(tag); + } + + void WaitGCStopped() override { + ms.WaitGCStopped(); + } + + bool IsGcRunning() const override { + return ms.IsGcRunning(); + } + + bool IsGcTriggered() const override { + return ms.IsGcTriggered(); + } + + bool IsConcurrentMarkRunning() const override { + return ms.IsConcurrentMarkRunning(); + } + + reffield_t RefFieldLoadBarrier(const address_t obj, const reffield_t &field) override { + return ms.RefFieldLoadBarrier(obj, field); + } + + void SetIsSystem(bool system) override { + Collector::SetIsSystem(system); + ms.SetIsSystem(system); + } + + void HandleNeighboursForSweep(address_t obj, std::vector &deads) override; + // 1. Initialize rc to 1 + // 2. Handle newly allocated objects during concurrent marking. + void PostNewObject(address_t obj) override { + // rc initialized with value 1. + RefCountLVal(obj) = 1 + kWeakRCOneBit; + ms.PostNewObject(obj); + } + + // Handle to-be-free objects during concurrent marking. + // return true if concurrent mark is running. + inline bool PreFreeObject(address_t obj) { // need remove adn direclty invoke this method in RC Free Obj + return ms.PreFreeObject(obj); + } + + // Return true if the object is a garbage to be swept. + bool IsGarbage(address_t obj) override { + return ms.IsGarbage(obj); + } + + // barrier to help gc thread to do stack scan + void StackScanBarrierInMutator() override { + // need move to collector.h and doesn't need muattor& parameter because its always current mutator. + ms.StackScanBarrierInMutator(); + } + + void InvokeGC(GCReason reason, bool unsafe = false) override; + + void SetHasWeakRelease(bool val) { + hasWeakRelease.store(val, std::memory_order_release); + } + bool HasWeakRelease() { + return hasWeakRelease.load(std::memory_order_acquire); + } + private: + NaiveRCMarkSweepCollector ms; // Private back-up tracer + std::atomic hasWeakRelease = { false }; // only used in zygote fork +}; + +class NaiveRCMutator : public RCMutator { + public: + NaiveRCMutator(Collector &baseCollector) + : collector(static_cast(baseCollector)), + releaseQueue(nullptr), + cycleDepth(0), + weakReleaseDepth(0) { + __MRT_ASSERT(baseCollector.Type() == kNaiveRC, "collector must be NaiveRCCollector"); + } + + virtual ~NaiveRCMutator() = default; + + void Init() override { + RCMutator::Init(); + if (releaseQueue == nullptr) { + releaseQueue = new (std::nothrow) std::deque(); + if (UNLIKELY(releaseQueue == nullptr)) { + LOG(FATAL) << "new deque failed" << maple::endl; + } + } + } + + void Fini() override { + RCMutator::Fini(); + if (releaseQueue != nullptr) { + delete releaseQueue; + releaseQueue = nullptr; + } + } + + void IncRef(address_t obj); + void DecRef(address_t obj); + void DecChildrenRef(address_t obj); + void ReleaseObj(address_t obj) override; + void WeakReleaseObj(address_t obj); + + address_t LoadIncRef(address_t *fieldAddr); + + address_t LoadRefVolatile(address_t *fieldAddr, bool loadReferent = false); + void WriteRefFieldVolatile(address_t obj, address_t *fieldAddr, address_t value, + bool writeReferent = false, bool isResurrectWeak = false); + void WriteRefFieldVolatileNoInc(address_t obj, address_t *fieldAddr, address_t value, + bool writeReferent = false, bool isResurrectWeak = false); + void WriteRefFieldVolatileNoDec(address_t obj, address_t *fieldAddr, address_t value, + bool writeReferent = false, bool isResurrectWeak = false); + void WriteRefFieldVolatileNoRC(address_t obj, address_t *fieldAddr, address_t value, + bool writeReferent = false); + + address_t LoadIncRefCommon(address_t *fieldAddr); + + // For Reference and JNI weak glboal + void IncWeak(address_t obj); + void IncResurrectWeak(address_t obj); + void DecWeak(address_t obj); + + // write barrier for local reference variable update. + void WriteRefVar(address_t *var, address_t value); + void WriteRefVarNoInc(address_t *var, address_t value); + + // writer barrier for object reference field update. + void WriteRefField(address_t obj, address_t *field, address_t value); + void WriteRefFieldNoDec(address_t obj, address_t *field, address_t value); + void WriteRefFieldNoInc(address_t obj, address_t *field, address_t value); + void WriteRefFieldNoRC(address_t obj, address_t *field, address_t value); + + // write/load for weak field + void WriteWeakField(address_t obj, address_t *field, address_t value, bool isVolatile); + address_t LoadWeakField(address_t *fieldAddr, bool isVolatile); + address_t LoadWeakRefCommon(address_t *field); + + // release local reference variable. + void ReleaseRefVar(address_t obj); + + void WeakRefGetBarrier(address_t referent) override { + // When concurrent marking is enabled, remember the referent and + // prevent it from being reclaimed in this GC cycle. + if (LIKELY(IS_HEAP_OBJ(referent))) { + SatbWriteBarrier(referent); + } + } + inline uint32_t CycleDepth() const { + return cycleDepth; + } + + inline void IncCycleDepth() { + ++cycleDepth; + } + + inline void DecCycleDepth() { + --cycleDepth; + } + + bool EnterWeakRelease(); + void ExitWeakRelease(); + + // GC releated + inline bool PreFreeObject(address_t obj) { + if (UNLIKELY(collector.IsConcurrentMarkRunning())) { + SetReleasedBit(obj); + return true; + } + return false; + } + private: + NaiveRCCollector &collector; + std::deque *releaseQueue; // queue for recurisive object release + uint32_t cycleDepth; // cycle pattern match max depth, if overflow skip cycle release + uint32_t weakReleaseDepth; // weak relase max depth, if overflow skip weak release +}; + +static inline NaiveRCMutator &NRCMutator(void) noexcept { + // tl_the_mutator is added to thread context in GCInitThreadLocal() + return *reinterpret_cast(maple::tls::GetTLS(maple::tls::kSlotMutator)); +} +} // namespace maplert +#endif // MAPLE_RUNTIME_COLLECTOR_NAIVERC_H diff --git a/src/mrt/compiler-rt/include/collector/collector_naiverc_ms.h b/src/mrt/compiler-rt/include/collector/collector_naiverc_ms.h new file mode 100644 index 0000000000..9cffc9231c --- /dev/null +++ b/src/mrt/compiler-rt/include/collector/collector_naiverc_ms.h @@ -0,0 +1,294 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_COLLECTOR_NAIVERC_MS_H +#define MAPLE_RUNTIME_COLLECTOR_NAIVERC_MS_H + +#include "collector_ms.h" +#include "rc_inline.h" + +namespace maplert { +struct StatRC { + uint32_t staticFields; + uint32_t weakGlobals; + uint32_t externals; + uint32_t strings; + uint32_t references; + uint32_t allocators; + uint32_t classloaders; + uint32_t stacks; + uint32_t heaps; + uint32_t weakHeaps; + uint32_t mygote; + uint32_t mygoteWeak; + uint32_t soft; + uint32_t weak; + uint32_t phantom; // phantom contains cleaner + + uint32_t weakCount; + uint32_t resurrectWeakCount; + uint32_t weakTotal; + uint32_t accurate; +}; + +class NaiveRCMarkSweepCollector : public MarkSweepCollector { + public: + NaiveRCMarkSweepCollector(); + virtual ~NaiveRCMarkSweepCollector() {} + + // pre/post hook + virtual void PreMSHook() override; + virtual void PreSweepHook() override; + virtual void PostMSHook(uint64_t gcIndex) override; + + // dump + void DumpCycleLeak(MplThreadPool &threadPool); + void DumpCycleProfile(); + + virtual void DebugCleanup() override; + inline void SetRCVerify(bool verify) { + rcVerification = verify; + } + + // Return true if the object is a garbage to be swept. + inline bool IsGarbage(address_t objAddr) override { + bool result = TracingCollector::IsGarbage(objAddr); + // object is garbage if it has released bit or not marked. + return (result || HasReleasedBit(objAddr)); // move to collector-naivercms + } + + bool IsGarbageBeforeResurrection(address_t addr) override { + return !markBitmap.IsObjectMarked(addr) || HasReleasedBit(addr); + } + + // Handle to-be-free objects during concurrent marking. + // return true if concurrent mark is running. + inline bool PreFreeObject(address_t obj) override { + // when concurrent mark is running, we do not directly free the object, + // instead we set a released flag on it, so that it can be swept by GC. + if (UNLIKELY(IsConcurrentMarkRunning())) { + SetReleasedBit(obj); + freedObjDuringMarking.fetch_add(1, std::memory_order_relaxed); + return true; + } + // let caller directly free the object + // if concurrent mark not running. + return false; + } + + reffield_t RefFieldLoadBarrier(const address_t obj, const reffield_t &field) override { + const address_t fieldAddr = reinterpret_cast(&field); + constexpr size_t kMaxLoadRefSpinCount = 1000000; + auto &atomicRef = AddrToLValAtomic(fieldAddr); + for (size_t spinCount = 0; ; ++spinCount) { + (void)sched_yield(); + reffield_t ref = atomicRef.load(std::memory_order_acquire); + if ((ref & LOAD_INC_RC_MASK) == 0) { + return ref; + } + if (UNLIKELY(spinCount > kMaxLoadRefSpinCount)) { + const MClass *cls = reinterpret_cast(obj)->GetClass(); + const char *clsName = (cls != nullptr) ? cls->GetName() : ""; + const address_t offset = fieldAddr - obj; + LOG(ERROR) << "Bad ref field! " << " cls:" << clsName << std::hex << " obj:" << obj << + " off:" << offset << " val:" << ref << std::dec << maple::endl; + HandleRCError(obj); + } + } + return 0; + } + + protected: + static constexpr uint32_t kInitialReferenceWorkSetSize = 100; + + address_t LoadStaticRoot(address_t *rootAddr) override; + + void InitReferenceWorkSet() override { + deadSoftWeaks.reserve(kInitialReferenceWorkSetSize); + deadPhantoms.reserve(kInitialReferenceWorkSetSize); + clearReferentSoftWeaks.reserve(kInitialReferenceWorkSetSize); + clearReferentPhantoms.reserve(kInitialReferenceWorkSetSize); + } + + void ResetReferenceWorkSet() override { + deadSoftWeaks.clear(); + deadPhantoms.clear(); + clearReferentSoftWeaks.clear(); + clearReferentPhantoms.clear(); + } + + bool CheckAndPrepareSweep(address_t addr) override { + if (IsGarbage(addr) && !IsMygoteObj(addr)) { + DecNeighborsAtomic(addr); + return true; + } else { + if (UNLIKELY(IsRCCollectable(addr))) { + if (!doConservativeStackScan) { + // under precise stack scan, rc collectable objects should be garbage + LOG(FATAL) << "Potential Leak " << std::hex << addr << " " << RCHeader(addr) << " " << GCHeader(addr) << + " " << reinterpret_cast(addr)->GetClass()->GetName() << + std::dec << std::endl; + } + } + return false; + } + } + + void DecReferentUnsyncCheck(address_t objAddr, bool isResurrect) override { + if (!FastIsHeapObject(objAddr)) { + LOG2FILE(kLogTypeMix) << "(unsync) DEC object out of heap: 0x" << + std::hex << objAddr << ", ignored." << std::endl; + return; + } + + // this is used for weak global and only dec garbage referent, so there is no need to check dec result + if (isResurrect) { + uint32_t oldHeader __MRT_UNUSED = UpdateRC<0, 0, -1>(objAddr); +#if __MRT_DEBUG + __MRT_ASSERT(IsRCOverflow(oldHeader) || (GetResurrectWeakRCFromRCHeader(oldHeader) != 0), + "Dec resurrect weak from 0"); +#endif + } else { + uint32_t oldHeader __MRT_UNUSED = UpdateRC<0, -1, 0>(objAddr); +#if __MRT_DEBUG + __MRT_ASSERT(IsRCOverflow(oldHeader) || (GetWeakRCFromRCHeader(oldHeader) != 0), "Dec weak from 0"); +#endif + } + +#if RC_TRACE_OBJECT + TraceRefRC(objAddr, RefCount(objAddr), "After DecReferentUnsyncCheck"); +#endif + } + + void DecReferentSyncCheck(address_t objAddr, bool isResurrect) override { + if (!FastIsHeapObject(objAddr)) { + LOG2FILE(kLogTypeMix) << "(unsync) DEC object out of heap: 0x" << + std::hex << objAddr << ", ignored." << std::endl; + return; + } + + // Need to check Dec result, because of concurrent mark: + // If referent is live (referenced by other live object) in concurrent mark stage, it may + // be dec by mutator, makes the referent rc collected. Then this dec will be the last dec. + // If referent is garbage, later sweep will free object + if (isResurrect) { + uint32_t oldHeader = AtomicUpdateRC<0, 0, -1>(objAddr); +#if __MRT_DEBUG + __MRT_ASSERT(IsRCOverflow(oldHeader) || (GetResurrectWeakRCFromRCHeader(oldHeader) != 0), + "Dec resurrect weak from 0"); +#endif + if (!IsGarbage(objAddr)) { + if (CanReleaseObj<0, 0, -1>(oldHeader) == kReleaseObject) { + RCReferenceProcessor::Instance().AddAsyncReleaseObj(objAddr, false);; + } + } + } else { + uint32_t oldHeader = AtomicUpdateRC<0, -1, 0>(objAddr); +#if __MRT_DEBUG + __MRT_ASSERT(IsRCOverflow(oldHeader) || (GetWeakRCFromRCHeader(oldHeader) != 0), "Dec weak from 0"); +#endif + if (!IsGarbage(objAddr)) { + if (CanReleaseObj<0, -1, 0>(oldHeader) == kReleaseObject) { + RCReferenceProcessor::Instance().AddAsyncReleaseObj(objAddr, false); + } + } + } + +#if RC_TRACE_OBJECT + TraceRefRC(objAddr, RefCount(objAddr), "After DecReferentSyncCheck"); +#endif + } + void ParallelDoReference(uint32_t flags) override; + void ConcurrentDoReference(uint32_t flags) override; + void ReferenceRefinement(uint32_t flags) override; + + private: + // DFX + void CheckLeakAndCycle(); + void DebugVerifyRC(); + + void DetectLeak(); + void PrintLeakRootAndRetainCount(set &garbages); + void PrintMultiNodeCycleCount(vector> &components); + void InsertSetForEachRefField(set &reachingSet, set &garbages); + void PrintLeakRoots(set &reachingSet, vector &component, size_t componentIdx); + StatRC NewStatRC(RCHashMap referentRoots[], uint32_t rpTypeNum, address_t obj); + void StatReferentRootRC(RCHashMap referentRoots[], uint32_t rpTypeNum); + void StatHeapRC(); + void PrintRCWrongDetails(address_t obj, const StatRC &statRC, const string &errMsg, uint32_t rc, uint32_t weakRC); + void PrintRCVerifyResult(std::map &weakRCDistribution, uint32_t potentialEarlyRelease, + uint32_t potentialLeak, uint32_t wrongWeakRCObjs); + inline uint32_t GetRCFromMap(address_t obj, RCHashMap &map) { + uint32_t rc = 0; + auto it = map.find(obj); + if (it != map.end()) { + rc = it->second; + } + return rc; + } + + void VerifyRC(); + void ClearRootsMap(); + virtual void PostInitTracing() override; + virtual void PostEndTracing() override; + virtual void PostParallelScanMark(bool processWeak) override; + virtual void PostParallelScanRoots() override; + virtual void ConcurrentMarkPreparePhase(WorkStack &workStack, WorkStack &inaccurateRoots) override; + virtual void PostParallelAddTask(bool processWeak) override; + void CollectReferentRoot(RootSet &rs, RCHashMap &map); + + void DecNeighborsAtomic(uintptr_t objAddr); + + inline void UpdateRCMap(RCHashMap &rcMap, address_t obj) { + auto it = rcMap.find(obj); + if (it != rcMap.end()) { + it->second = it->second + 1; + } else { + rcMap.insert({ obj, 1 }); + } + } + + inline void CollectRootRC(RootSet &rs, RCHashMap &rcMap) { + for (auto it = rs.begin(); it != rs.end(); ++it) { + UpdateRCMap(rcMap, *it); + } + } + + // Always reate cycle pattern from backup tracing, for unit testing + bool alwaysCreateCyclePattern; + + // The time (milli seconds) when cycle pattern learning was last performed. + uint64_t lastCyclePatternLearnMS; + // used for verifying RC + RCHashMap staticFieldRoots; + RCHashMap externalRoots; + RCHashMap weakGlobalRoots; + RCHashMap stringRoots; + RCHashMap referenceRoots; + RCHashMap allocatorRoots; + RCHashMap classloaderRoots; + RCHashMap stackRoots; + RCHashMap heapObjs; + RCHashMap heapWeakObjs; + RCHashMap mygoteWeakObjs; + RCHashMap mygoteObjs; + bool rcVerification = false; + + std::vector deadSoftWeaks; // need be in collector-naivercms + std::vector deadPhantoms; + std::vector clearReferentSoftWeaks; + std::vector clearReferentPhantoms; +}; +} +#endif diff --git a/src/mrt/compiler-rt/include/collector/collector_platform.h b/src/mrt/compiler-rt/include/collector/collector_platform.h new file mode 100644 index 0000000000..b668769af5 --- /dev/null +++ b/src/mrt/compiler-rt/include/collector/collector_platform.h @@ -0,0 +1,26 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_COLLECTOR_PLATFROM_H +#define MAPLE_RUNTIME_COLLECTOR_PLATFROM_H +namespace maplert { +class CollectorPlatform { + public: + CollectorPlatform(); + virtual ~CollectorPlatform() = default; + private: + virtual void InitAfterFork() {} +}; +} +#endif \ No newline at end of file diff --git a/src/mrt/compiler-rt/include/collector/collector_rc.h b/src/mrt/compiler-rt/include/collector/collector_rc.h new file mode 100644 index 0000000000..7c3428e496 --- /dev/null +++ b/src/mrt/compiler-rt/include/collector/collector_rc.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_RCCOLLECTOR_H +#define MAPLE_RUNTIME_RCCOLLECTOR_H + +#include "collector.h" + +namespace maplert { +class RCCollector : public Collector { + public: + static void PrintStats(); + static void ResetStats(); + RCCollector() : Collector() {} + virtual ~RCCollector() = default; +#if RC_PROFILE + static std::atomic numNativeInc; + static std::atomic numNativeDec; + static std::atomic numLoadIncRef; + static std::atomic numWriteRefVar; + static std::atomic numWriteRefField; + static std::atomic numReleaseRefVar; + static std::atomic numIncRef; + static std::atomic numDecRef; + static std::atomic numIncNull; + static std::atomic numDecNull; +#endif +}; + +class RCMutator : public Mutator { + public: + RCMutator() = default; + virtual ~RCMutator() = default; + void Fini() override; + virtual void ReleaseObj(address_t obj) = 0; // Recurisvely dec and free objects +}; + +#if RC_HOT_OBJECT_DATA_COLLECT +void StatsFreeObject(address_t obj); +void DumpHotObj(); +#else +#define StatsFreeObject(obj) +#define DumpHotObj() +#endif +} // namespace maplert + +#endif diff --git a/src/mrt/compiler-rt/include/collector/collector_tracing.h b/src/mrt/compiler-rt/include/collector/collector_tracing.h new file mode 100644 index 0000000000..cd8e29bebe --- /dev/null +++ b/src/mrt/compiler-rt/include/collector/collector_tracing.h @@ -0,0 +1,419 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_COLLECTOR_TRACING_H +#define MAPLE_RUNTIME_COLLECTOR_TRACING_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "address.h" +#include "collector.h" +#include "gc_roots.h" +#include "sizes.h" +#include "heap_stats.h" +#include "mm_config.h" +#include "rc_reference_processor.h" +#include "gc_reference_processor.h" +#include "mpl_thread_pool.h" +#include "mrt_bitmap.h" +#include "task_queue.h" +#include "deps.h" + +// set 1 to enable concurrent mark test. +#define MRT_TEST_CONCURRENT_MARK __MRT_DEBUG_COND_FALSE + +namespace maplert { +// prefetch distance for mark. +#define MARK_PREFETCH_DISTANCE 16 // this macro is used for check when pre-compiling. +static constexpr int kMarkPrefetchDistance = 16; // when it is changed, remember to change MARK_PREFETCH_DISTANCE. +static constexpr int kSystemServerConcurrentThreadCount = 2; + +#if RC_TRACE_OBJECT +extern "C" { + void TraceRefRC(address_t obj, uint32_t rc, const char *msg); +} +#endif + +struct RegisteredRoots { + size_t length; // number of roots + address_t **roots; // roots array + + // constructor + RegisteredRoots(size_t len, address_t **rootList) : length(len), roots(rootList) {} +}; + +static inline bool IsUnmarkedResurrectable(address_t objAddr) { + return Collector::Instance().IsGarbage(objAddr) && IsObjResurrectable(objAddr) && !IsMygoteObj(objAddr); +} + +class GCRegisteredRoots { + public: + static GCRegisteredRoots &Instance() { + return *instance; + } + GCRegisteredRoots() { + totalRootsCount = 0; + } + ~GCRegisteredRoots() = delete; + void Register(address_t **gcRootsList, size_t len); + void Visit(RefVisitor &visitor); + bool GetRootsLocked(size_t index, address_t **&list, size_t &len); + private: + // A list of registered GC roots, includeing: + // 1. static roots + // 2. registered single runtimes roots (runtime\interpreter...) + // Each registered roots is a bucket of roots address list + std::vector roots; + size_t totalRootsCount; + std::mutex staticRootsLock; + static ImmortalWrapper instance; +}; + +class TracingCollector : public Collector { + public: + class GCTask : public ScheduleTaskBase { + public: + GCTask() : ScheduleTaskBase(ScheduleTaskType::kInvalidScheduleType), gcReason(kInvalidGCReason) {} + + GCTask(ScheduleTaskType type) : ScheduleTaskBase(type), gcReason(kInvalidGCReason) { + __MRT_ASSERT(type != ScheduleTaskType::kScheduleTaskTypeInvokeGC, "invalid gc task!"); + } + + GCTask(ScheduleTaskType type, GCReason reason) + : ScheduleTaskBase(type), gcReason(reason) { + __MRT_ASSERT(gcReason > kInvalidGCReason && gcReason < kGCReasonMax, "invalid reason"); + } + + GCTask(const GCTask &task) = default; + virtual ~GCTask() = default; + GCTask &operator=(const GCTask&) = default; + + // For a task, we give it a priority based on schedule type and gc reason. + // Termination and timeout events get highest prio, and override lower-prio tasks. + // Each gc invocation task gets its prio relative to its reason. + // This prio is used by the async task queue. + static const uint32_t kPrioTerminate = 0; + static const uint32_t kPrioTimeout = 1; + static const uint32_t kPrioInvokeGC = 2; + static inline GCTask FromPrio(uint32_t prio) { + if (prio == kPrioTerminate) { + return GCTask(kScheduleTaskTypeTerminate); + } else if (prio == kPrioTimeout) { + return GCTask(kScheduleTaskTypeTimeout); + } else if (prio - kPrioInvokeGC < kGCReasonMax) { + return GCTask(kScheduleTaskTypeInvokeGC, static_cast(prio - kPrioInvokeGC)); + } + __MRT_ASSERT(false, "invalid prio"); + } + static inline GCTask DoNothing() { + return GCTask(); + } + static_assert(kPrioInvokeGC + static_cast(kGCReasonMax) <= + std::numeric_limits::digits, "task queue reached max capacity"); + inline uint32_t GetPrio() const { + if (taskType == kScheduleTaskTypeTerminate) { + return kPrioTerminate; + } else if (taskType == kScheduleTaskTypeTimeout) { + return kPrioTimeout; + } else if (taskType == kScheduleTaskTypeInvokeGC) { + return kPrioInvokeGC + static_cast(gcReason); + } + __MRT_ASSERT(false, "invalid task"); + } + inline bool IsNothing() const { + return (taskType == kInvalidScheduleType && gcReason == kInvalidGCReason); + } + inline bool IsOverriding() const { + // on timeout, the force gc removes all other gcs; + // on termination, all gcs get removed + return (taskType != ScheduleTaskType::kScheduleTaskTypeInvokeGC); + } + + inline GCReason GetGCReason() const { + return gcReason; + } + + inline void SetGCReason(GCReason reason) { + gcReason = reason; + } + + virtual std::string ToString() const override { + std::stringstream ss; + ss << ScheduleTaskBase::ToString() << " reason=" << gcReason; + return ss.str(); + } + + bool NeedFilter() const override { + return true; + } + + bool Execute(void *owner) override; + + private: + GCReason gcReason; + }; + + TracingCollector() : Collector() {} + virtual ~TracingCollector() = default; + virtual void InitTracing(); + virtual void EndTracing(); + + // Types, so that we don't confuse root sets and working stack. + // The policy is: we simply `push_back` into root set, + // but we use Enqueue to add into work stack. + using RootSet = vector>; + using WorkStack = vector>; + using RCHashMap = unordered_map; + + void Init() override; + void InitAfterFork() override; + void Fini() override; + void StartThread(bool isZygote) override; + void StopThread() override; + void JoinThread() override; + void WaitGCStopped() override; + void OnUnsuccessfulInvoke(GCReason reason); + void InvokeGC(GCReason reason, bool unsafe = false) override; + + // new interface added for leak detection + void MaybeAddRoot(address_t obj); + + void ScanStackRoots(Mutator &mutator, RootSet &rootSet); + + void ScanAllStacks(RootSet &rootSet); + + // parallel scan all roots. + void ParallelScanRoots(RootSet &rootSet, bool processWeak, bool rootString); + + // fast scan roots, inaccurately scan stacks (check heap boundry only). + void FastScanRoots(RootSet &rootSet, RootSet &inaccurateRoots, bool processWeak, bool rootString); + + // filter out inaccurate roots from inaccurateRoots, move accurate ones to rootSet. + // this function should be called after FastScanRoots(). + void PrepareRootSet(RootSet &rootSet, RootSet &&inaccurateRoots); + + void DumpRoots(std::ofstream &ofs); + void DumpFinalizeGarbage() override; + void DumpGarbage() override; + void DumpCleaner() override; + void DumpWeakSoft() override; + void DumpHeap(const std::string &tag) override; + + void ObjectArrayCopy(address_t src, address_t dst, int32_t srcIndex, int32_t dstIndex, int32_t count, + bool check = false) override; + void PostObjectClone(address_t src, address_t dst) override; + bool UnsafeCompareAndSwapObject(address_t obj, ssize_t offset, address_t expectedValue, address_t newValue) override; + address_t UnsafeGetObjectVolatile(address_t obj, ssize_t offset) override; + address_t UnsafeGetObject(address_t obj, ssize_t offset) override; + void UnsafePutObject(address_t obj, ssize_t offset, address_t newValue) override; + void UnsafePutObjectVolatile(address_t obj, ssize_t offset, address_t newValue) override; + void UnsafePutObjectOrdered(address_t obj, ssize_t offset, address_t newValue) override; + + inline bool IsGcRunning() const override { + return gcRunning.load(std::memory_order_acquire); + } + + // Return true if the object is a garbage to be swept. + inline bool IsGarbage(address_t objAddr) override { + bool result = !markBitmap.IsObjectMarked(objAddr); + if (useFinalBitmap) { + result = result && !finalBitmap.IsObjectMarked(objAddr); + } + return result; + } + + protected: + // bitmap for marking + MrtBitmap markBitmap; + + // bitmap for concurrent mark of dead finalizers + // it represent the object graph of dead finalizers + MrtBitmap finalBitmap; + // indicator whether use finalizer bitmap + bool useFinalBitmap = false; + + // reason for current GC. + GCReason gcReason = kGCReasonUser; + std::atomic gcRunning = { false }; + + GCFinishCallbackFunc GCFinishCallBack = DefaultGCFinishCallback; + + bool doConservativeStackScan = false; + + std::unordered_set snapshotMutators; + std::mutex snapshotMutex; + // barrier to wait for mutator finish concurrent stack scan + std::condition_variable concurrentPhaseBarrier; + // number of mutators whose stack needs to be scanned + std::atomic numMutatorToBeScan = { 0 }; + + // the collector thread handle. + pthread_t gcThread; + std::atomic gcTid; + std::atomic gcThreadRunning = { false }; + + // protect condition_variable gcFinishedCondVar's status. + std::mutex gcFinishedCondMutex; + // notified when GC finished, requires gcFinishedCondMutex + std::condition_variable gcFinishedCondVar; + + // whether GC is triggered. + // NOTE: When GC finishes, it clears both isGcTriggered and gc_running_. + // gc_running_ can be probed asynchronously + // isGcTriggered must be written by gc thread only + std::atomic isGcTriggered = { false }; + + // gc request index. + // increment each time RequestGCAndWait() is called, no matter whether enqueued successfully or not. + std::atomic curGcIndex = { 0 }; + // finishedGcIndex records the currently finished gcIndex + // may be read by mutator but only be written by gc thread sequentially + std::atomic finishedGcIndex = { 0 }; + // record last gc action's triggering time + uint64_t lastTriggerTime = 0; + + std::atomic staticRootsTaskIndex = { 0 }; + std::atomic stackTaskIndex = { 0 }; + + inline void ResetBitmap() { + markBitmap.ResetBitmap(); + finalBitmap.ResetBitmap(); + useFinalBitmap = false; + } + // Return true if and only if the object was marked before this marking. + inline bool MarkObject(address_t objAddr) { + return markBitmap.MarkObject(objAddr); + } + inline bool MarkObjectForFinalizer(address_t objAddr) { + return finalBitmap.MarkObject(objAddr); + } + + // Return true if and only if the object is marked as live. + inline bool IsObjectMarked(address_t objAddr) const { + bool result = markBitmap.IsObjectMarked(objAddr); + if (result) { + return result; + } + if (useFinalBitmap) { + return finalBitmap.IsObjectMarked(objAddr); + } + return false; + } + + virtual address_t LoadStaticRoot(address_t *rootAddr) { + LinkerRef ref(rootAddr); + if (ref.IsIndex()) { + return 0; + } + return LoadRefField(rootAddr); + } + + // the collector thread entry routine. + static void *CollectorThreadEntry(void *arg); + + // Perform full garbage collection. + virtual void RunFullCollection(uint64_t) { + LOG(FATAL) << "Should call function in concrete child class!" << maple::endl; + } + + // Notify the GC thread to start GC, and wait. + // Called by mutator. + // reason: The reason for this GC. + void RequestGCUnsafe(GCReason reason); + void RequestGCAndWait(GCReason reason); + + // Notify that GC has finished. + // Must be called by gc thread only + void NotifyGCFinished(uint64_t gcIndex); + void WaitGCStoppedLite(); + int32_t GetThreadCount(bool isConcurrent); + + void SetGcRunning(bool running) { + gcRunning.store(running, std::memory_order_release); + } + + void ScanStaticFieldRoots(RootSet &rootSet); + void ScanExternalRoots(RootSet &rootSet, bool processWeak); + void ScanLocalRefRoots(RootSet &rootSet); + void ScanGlobalRefRoots(RootSet &rootSet); + void ScanThreadExceptionRoots(RootSet &rootSet); + void ScanWeakGlobalRoots(RootSet &rootSet); + void ScanStringRoots(RootSet &rootSet); + void ScanAllocatorRoots(RootSet &rootSet); + void ScanReferenceRoots(RootSet &rootSet) const; + void ScanClassLoaderRoots(RootSet &rootSet); + void ScanZterpStaticRoots(RootSet &rootSet); + + void VisitStaticFieldRoots(const maple::rootObjectFunc &func); + void VisitAllocatorRoots(const RefVisitor &func) const; + void VisitStringRoots(const RefVisitor &func) const; + void VisitReferenceRoots(const maple::rootObjectFunc &func) const; + void MaybeAddRoot(address_t data, RootSet &rootSet, bool useFastCheck = false); + + // null-implemented here, override in NaiveRCMarkSweepCollector + virtual void PostInitTracing() {} + virtual void PostEndTracing() {} + virtual void PostParallelAddTask(bool) {} + virtual void PostParallelScanMark(bool) {} + virtual void PostParallelScanRoots() {} + + inline void SetGCReason(GCReason reason) { + gcReason = reason; + } + + inline void AddRoot(address_t obj, RootSet &rootSet) { + if (!IS_HEAP_OBJ(obj)) { + MRT_BuiltinAbortSaferegister(obj, nullptr); + } + rootSet.push_back(obj); + } + + inline bool InHeapBoundry(address_t objAddr) const { + return IS_HEAP_ADDR(objAddr); + } + + inline bool FastIsHeapObject(address_t objAddr) const { + return IS_HEAP_OBJ(objAddr); + } + + MplThreadPool *GetThreadPool() const { + return workerThreadPool; + } + + void RunTaskLoop(); + + private: + // the thread pool for parallel tracing. + MplThreadPool *workerThreadPool = nullptr; + int32_t concurrentThreadCount = 1; // first process is zygote + int32_t parallelThreadCount = 2; + TaskQueue taskQueue; + static void DefaultGCFinishCallback(); +}; + +class TracingMutator : public Mutator { + public: + TracingMutator(Collector &collector __attribute__((unused))) {} + virtual ~TracingMutator() = default; +}; +} + +#endif diff --git a/src/mrt/compiler-rt/include/collector/conn_comp.h b/src/mrt/compiler-rt/include/collector/conn_comp.h new file mode 100644 index 0000000000..f5e4d793da --- /dev/null +++ b/src/mrt/compiler-rt/include/collector/conn_comp.h @@ -0,0 +1,141 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_CONN_COMP_H +#define MAPLE_RUNTIME_CONN_COMP_H + +#include +#include +#include +#include +#include +#include +#include +#include "address.h" + +// Tarjan's strongly connected component algorithm. +// This is a modified algorithm that is not recursive. Different from the recursive version +// https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm, +// this algorithm uses the ENTER and EXIT action to decide whether to go +// deeper into a node's descendents, or backing up finding connected components. +// This is used to find strongly connected components in the object graph for finding cyclic garbages. +namespace maplert { +class TracingCollector; +class CyclePatternGenerator; + +struct NodeInfo { + uint32_t seqNum; + uint32_t lowLink; +}; + +NodeInfo inline NewNode() { + NodeInfo node; + node.seqNum = 0; + node.lowLink = 0; + return node; +} + +void inline InitNode(NodeInfo &node, uint32_t &curSeqNum) { + node.seqNum = curSeqNum; + node.lowLink = curSeqNum; + ++curSeqNum; +} + +class ConnectedComponentFinder { + enum Action { + kEnter, + kExit, + }; + + using Node = address_t; + using WorkItem = std::pair; + using SeqNum = size_t; + using NodeNeighborFinder = std::function(Node)>; + + std::vector roots; + NodeNeighborFinder nnf; + bool rootsOnly; + + std::unordered_set rootsSet; + + std::vector workList; + std::vector candidateStack; + std::unordered_set candidateSet; + + struct NodeInfoInClass { + SeqNum seqNum; + SeqNum lowLink; + }; + + std::unordered_map nodeToInfo; // Nodeinfo for each visited node + + std::vector> results; + + SeqNum nextSeqNum; + + bool IsRoot(Node node) { + return rootsSet.find(node) != rootsSet.end(); + } + + bool IsVisited(Node node) { + return nodeToInfo.find(node) != nodeToInfo.end(); + } + + void InitializeNodeInfo(Node node); + + bool IsCandidate(Node node) { + return candidateSet.find(node) != candidateSet.end(); + } + + void PushCandidate(Node node); + Node PopCandidate(); + + void ProcessActionEnter(std::vector &workingList, std::unordered_map &nodeInfoMap, + uint32_t &curSeqNum, address_t node); + void ProcessActionExit(std::unordered_map &nodeInfoMap, address_t node, + CyclePatternGenerator &cpg); + + void ProcessEnterAction(Node node); + void ProcessExitAction(Node node); + + public: + // paraRoots is the list of roots, i.e. known nodes. paraNnf is a function that + // returns the neighbors of an object. + ConnectedComponentFinder(const std::vector ¶Roots, const NodeNeighborFinder ¶Nnf) + : roots(paraRoots), nnf(paraNnf), rootsOnly(true), nextSeqNum(0ULL) {} + ~ConnectedComponentFinder() = default; + + // True if limit the search to the root set; false if we search among all + // nodes reachable from roots as determined by nnf. + void SetRootsOnly(bool paraRootsOnly) { + rootsOnly = paraRootsOnly; + } + + // Run the connected component algorithm + void Run(); + + // Return the results. + const std::vector> &GetResults() const { + return results; + } + + // improve speed and invoked only in backup tracing + // after marking and before sweep + void RunInBackupTrace(TracingCollector &collector, CyclePatternGenerator &cpg); + ConnectedComponentFinder(const NodeNeighborFinder ¶Nnf) + : nnf(paraNnf), rootsOnly(true), nextSeqNum(0ULL) {} +}; +} // namespace + +#endif // MAPLE_RUNTIME_CONN_COMP_H diff --git a/src/mrt/compiler-rt/include/collector/cp_generator.h b/src/mrt/compiler-rt/include/collector/cp_generator.h new file mode 100644 index 0000000000..eae12b13c1 --- /dev/null +++ b/src/mrt/compiler-rt/include/collector/cp_generator.h @@ -0,0 +1,589 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_CP_GENERATOR_H +#define MAPLE_RUNTIME_CP_GENERATOR_H + +#include +#include +#include +#include +#include +#include +#include +#include "mm_config.h" +#include "sizes.h" +#include "mrt_object.h" +#include "cycle_collector.h" + +namespace maplert { +#define CHECK_INFO_FALSE(p, logType, node, msg) \ + do { \ + if (UNLIKELY(p)) { \ + LOG2FILE(logType) << msg << node << std::endl; \ + return false; \ + } \ + } while (0) +using address_t = uintptr_t; + +// garbage subset choose description +// 1. choose 1 run from every kCyclePatternGarbageRunRatio +// 2. choose 1 run from every kCyclePatternGarbageCountRatio garbage object +// 3. max count is kCyclePatternMaxGarbageCount +const int kCyclePatternGarbageCountRatio = 4; +const int kCyclePatternGarbageRunRatio = 4; +const int kCyclePatternMaxGarbageCount = 10000; +// just test for reduce the pattern install time +const int kCyclePatternDupInitialThreshold = 3; +const int kLeastOutputThreshold = 3; +const double kMaxOutputPercent = 0.8; + +// record a cycle pattern, content mapping to pattern in following format +// class: Landroid_2Fcontent_2Fres_2FHwResourcesImpl_3B +// Header: ROOT, 1 +// Cycle: 1, 1, 1 +// Node: 0, 88, Landroid_2Fcontent_2Fres_2FResourcesImpl_3B, 1 +// Edge: 1, 0, 104 +// +class CyclePattern { + public: + uint32_t count; + // getter/setter + inline void IncCount() { + ++count; + } + inline int32_t NumNodes() const { + return headerInfo.nNodes; + } + inline int32_t NumEdges() const { + return headerInfo.nEdges; + } + + bool constructFromBinary(CyclePatternInfo &cyclePatternInfo, MClass *cls) { + headerInfo = cyclePatternInfo; + CHECK_INFO_FALSE(headerInfo.nNodes > kCyclepPatternMaxNodeNum || headerInfo.nNodes < 0, kLogtypeCycle, + headerInfo.nNodes, "cycle_check: not a valid header, invalide nodes "); + CHECK_INFO_FALSE(headerInfo.nEdges > kCyclepPatternMaxEdgeNum || headerInfo.nEdges < 1, kLogtypeCycle, + headerInfo.nEdges, "cycle_check: not a valid header, invalide edge number "); + CHECK_INFO_FALSE(headerInfo.expectRc < 1, kLogtypeCycle, headerInfo.expectRc, + "cycle_check: not a valid header, invalide expect rc "); + ++(headerInfo.nNodes); + // construct node 0 + CyclePatternNodeInfo node0; + node0.expectRc = cyclePatternInfo.expectRc; + node0.expectType = cls; + nodes[0] = node0; + CyclePatternNodeInfo *curNodeInfo = GetCyclePatternNodeInfo(cyclePatternInfo); + for (int32_t i = 1; i < headerInfo.nNodes; ++i) { + CHECK_INFO_FALSE(curNodeInfo->loadOffset < static_cast(kCyclepObjHeaderLength), kLogtypeCycle, + curNodeInfo->loadOffset, "cycle_check: not a valid node info, invalide loadOffset "); + CHECK_INFO_FALSE(curNodeInfo->loadIndex < 0, kLogtypeCycle, + curNodeInfo->loadIndex, "cycle_check: not a valid node info, invalide loadIndex "); + CHECK_INFO_FALSE(curNodeInfo->expectRc < 1, kLogtypeCycle, + curNodeInfo->expectRc, "cycle_check: not a valid node info, invalide expect_tc "); + nodes[i] = *curNodeInfo; + curNodeInfo = reinterpret_cast( + (reinterpret_cast(curNodeInfo)) + sizeof(CyclePatternNodeInfo)); + } + + CyclePatternEdgeInfo *curEdgeInfo = GetCyclePatternEdgeInfo(cyclePatternInfo); + for (int32_t i = 0; i < headerInfo.nEdges; ++i) { + CHECK_INFO_FALSE(curEdgeInfo->loadOffset < static_cast(kCyclepObjHeaderLength), kLogtypeCycle, + curEdgeInfo->loadOffset, "cycle_check: not a valid edge info, invalide loadOffset "); + CHECK_INFO_FALSE(curEdgeInfo->srcNodeIndex < 0, kLogtypeCycle, curEdgeInfo->srcNodeIndex, + "cycle_check: not a valid edge info, invalide srcNodeIndex "); + CHECK_INFO_FALSE(curEdgeInfo->destNodeIndex < 0, kLogtypeCycle, curEdgeInfo->destNodeIndex, + "cycle_check: not a valid edge info, invalide destNodeIndex "); + edges[i] = *curEdgeInfo; + curEdgeInfo = reinterpret_cast( + (reinterpret_cast(curEdgeInfo)) + sizeof(CyclePatternEdgeInfo)); + } + return true; + } + + // inline utilities + inline const CyclePatternNodeInfo *GetNodeInfo(int32_t index) { + if (index >= NumNodes()) { + LOG(ERROR) << "cycle_check: index out of range"; + return nullptr; + } + return &(nodes[index]); + } + + inline const CyclePatternEdgeInfo *GetEdgeInfo(int32_t index) { + if (index >= NumEdges()) { + LOG(ERROR) << "cycle_check: edge index out of range"; + return nullptr; + } + return &(edges[index]); + } + + inline bool NeedMerge() const { + // simplify now + if (LIKELY(!kMergeAllPatterns)) { + return count >= kLeastOutputThreshold; + } else { + return true; + } + } + inline void SetNeedMerge() { + mergeCandiate = true; + } + + // inline utilities + inline address_t node(int32_t index) const { + if (index >= NumNodes()) { + return 0; + } + return objAddrs[index]; + } + + bool AddNode(address_t addr, MClass *cls, int8_t loadIndex, int16_t loadOffset, int32_t rc) { + if (headerInfo.nNodes == kCyclepPatternMaxNodeNum) { + return false; + } + + if (rc > static_cast(kMaxRcInCyclePattern)) { + return false; + } + + if (headerInfo.nNodes == 0) { + headerInfo.expectRc = static_cast(rc); + } + nodes[headerInfo.nNodes].expectType = cls; + nodes[headerInfo.nNodes].loadOffset = loadOffset; + nodes[headerInfo.nNodes].loadIndex = loadIndex; + nodes[headerInfo.nNodes].expectRc = static_cast(rc); + objAddrs[headerInfo.nNodes] = addr; + ++(headerInfo.nNodes); + return true; + } + + inline bool AddEdge(int8_t srcIndex, int8_t destIndex, int16_t loadOffset) { + if (headerInfo.nEdges == kCyclepPatternMaxEdgeNum) { + return false; + } + edges[headerInfo.nEdges].srcNodeIndex = srcIndex; + edges[headerInfo.nEdges].destNodeIndex = destIndex; + edges[headerInfo.nEdges].loadOffset = loadOffset; + ++(headerInfo.nEdges); + return true; + } + + inline size_t EmitByteSize() const { + size_t size = sizeof(CyclePatternInfo); + size += sizeof(CyclePatternNodeInfo) * (NumNodes() - 1); + size += sizeof(CyclePatternEdgeInfo) * NumEdges(); + // align pattern with 8 bytes DWORD size + size_t rem = size % kDWordBytes; + if (rem != 0) { + size += (kDWordBytes - rem); + } + return size; + } + + inline int32_t IndexofObj(address_t obj) const { + for (int32_t i = 0; i < NumNodes(); ++i) { + if (objAddrs[i] == obj) { + return i; + } + } + return -1; + } + + inline int32_t IndexofClass(void *cls) const { + for (int32_t i = 0; i < NumNodes(); ++i) { + if (nodes[i].expectType == cls) { + return i; + } + } + return -1; + } + + bool operator==(const CyclePattern &other) const { + if (NumNodes() != other.NumNodes() || NumEdges() != other.NumEdges()) { + return false; + } + for (int32_t i = 0; i < NumNodes(); ++i) { + if (nodes[i].expectType != other.nodes[i].expectType || + nodes[i].loadOffset != other.nodes[i].loadOffset || + nodes[i].loadIndex != other.nodes[i].loadIndex || + nodes[i].expectRc != other.nodes[i].expectRc) { + return false; + } + } + for (int32_t i = 0; i < NumEdges(); ++i) { + if (edges[i].srcNodeIndex != other.edges[i].srcNodeIndex || + edges[i].destNodeIndex != other.edges[i].destNodeIndex || + edges[i].loadOffset != other.edges[i].loadOffset) { + return false; + } + } + return true; + } + + void ReindexNodesAndEdges(const CyclePattern &master, int8_t indexMap[], int32_t numTotalEdges, + const CyclePatternEdgeInfo edgesAll[], bool edgeUsed[]); + + // constructor + CyclePattern() : objAddrs {} { + count = 1; + headerInfo.nNodes = 0; + headerInfo.nEdges = 0; + headerInfo.expectRc = 0; + headerInfo.hasNextFlag = 0; + mergeCandiate = false; + } + CyclePattern(CyclePattern &master, int32_t startIndex); + CyclePattern &operator=(const CyclePattern&) = delete; + ~CyclePattern() = default; + + // verify and filter + // Cycle pattern is valid if internal each obj's rc >= internal edge count + bool IsValid(bool allowExternalReference) const { + int8_t internalRc[kCyclepPatternMaxNodeNum]; + int8_t internalref[kCyclepPatternMaxNodeNum]; // how many times ref other objects in cycle + for (int32_t i = 0; i < kCyclepPatternMaxNodeNum; ++i) { + internalref[i] = 0; + } + internalRc[0] = 0; + for (int32_t i = 1; i < NumNodes(); ++i) { + internalRc[i] = 1; + int8_t srcIndex = nodes[i].loadIndex; + ++internalref[srcIndex]; + } + + // accumulate rc in edges + for (int32_t i = 0; i < NumEdges(); ++i) { + int8_t destIndex = edges[i].destNodeIndex; + int8_t srcIndex = edges[i].srcNodeIndex; + ++internalRc[destIndex]; + ++internalref[srcIndex]; + } + + for (int32_t i = 0; i < NumNodes(); ++i) { + if (internalref[i] == 0) { + // not refed in cycle + return false; + } else if (internalRc[i] > (int8_t)(kMaxRcInCyclePattern)) { + return false; + } else if (internalRc[i] == nodes[i].expectRc) { + continue; + } else if (internalRc[i] > nodes[i].expectRc) { + return false; + } else if (allowExternalReference) { + continue; + } else { + return false; + } + } + return true; + } + + // adjust cycle pattern to remove external RC + void AdjustForOuptut() { + int8_t internalRc[kCyclepPatternMaxNodeNum]; + internalRc[0] = 0; + for (int32_t i = 1; i < NumNodes(); ++i) { + internalRc[i] = 1; + } + + // accumulate rc in edges + for (int32_t i = 0; i < NumEdges(); ++i) { + int8_t destIndex = edges[i].destNodeIndex; + ++internalRc[destIndex]; + } + + headerInfo.expectRc = internalRc[0]; + for (int32_t i = 0; i < NumNodes(); ++i) { + nodes[i].expectRc = internalRc[i]; + } + } + + // workflow + // Merge cycle to runtime + void Merge(); + // Emit cycle to binary to buffer + void Emit(char *buffer, size_t limit); + bool IsSamePattern(int8_t index, CyclePatternInfo &cyclePatternInfo, size_t &origGctibByteSize) const; + bool AppendPattern(MClass *cls, int8_t index, size_t origGctibByteSize, GCTibGCInfo *origGctibInfo, + const CyclePatternInfo *lastCyclePattern); + // Append current from index + bool FindAndAppendPattern(int8_t index); + // Find edges in current cycle + bool FindEdge(int8_t srcIndex, int8_t destIndex, int16_t loadOffset) const; + // log cycle into stream + void Print(ostream &os); + + private: + CyclePatternInfo headerInfo; + // might add with large count for analysis + address_t objAddrs[kCyclepPatternMaxNodeNum]; + CyclePatternNodeInfo nodes[kCyclepPatternMaxNodeNum]; + CyclePatternEdgeInfo edges[kCyclepPatternMaxEdgeNum]; + bool mergeCandiate; +}; + +// CycleGarbage and CyclePattern +// CycleGarbage is collected from garbage can hold any cycles founded at runtime. +// CyclePattern is CycleGarbage suitable added as dynamic cycle pattern at runtime. +// +// CyclePattern has more limitation than CycleGarbage +// 1. limited nodes +// 2. limited edges +// 3. limited rcs +// 4. limited offset +// +// Entire flow in self learning +// 1. check if big data cache string is empty: last collected big data is cleared or not. +// 1.1 if not empty, construct cycle pattern, goto 6 +// 1.2 if empty, construct cycle garbage, goto 2 +// 2. Collect CycleGarbage in garbage objects +// 2. Filter invalid CycleGarbage (adjust rc, find out incorrect cycle) +// 3. Add CycleGarbage into candidate set (compare and insert, write) +// 4. Select suitable CycleGarbage and covert them into CyclePattern, goto 7 +// 5. Write information to big data +// 5.1 suitable for merge at run time and newly merged +// 5.2 not suitable for merge +// 6. Create Cycle pattern from garbage objects +// 7. Cycle pattern emit +class GarbageNode { + reffield_t addr; + int32_t index; + uint32_t internalRc; + MClass *type; + bool visited; + public: + vector> references; + + GarbageNode(reffield_t paraAddr, int32_t paraIndex, MClass *paraType) + : addr(paraAddr), index(paraIndex), type(paraType) { + internalRc = 0; + visited = false; + } + ~GarbageNode() { + type = nullptr; + } + void addChild(GarbageNode &child, int32_t offset) { + references.push_back(make_pair(&child, offset)); + ++child.internalRc; + } + uint32_t GetInternalRc() const { + return internalRc; + } + int32_t GetIndex() const { + return index; + } + reffield_t GetAddr() const { + return addr; + } + MClass *GetType() const { + return type; + } + bool IsVisited() const { + return visited; + } + void SetVisited() { + visited = true; + } + const vector> &GetReferences() { + return references; + } + + bool operator==(const GarbageNode &other) const; +}; + +class CycleGarbage { + public: + CycleGarbage() { + adjusted = false; + valid = true; + count = 1; + hashValue = 0; + totalEdges = 0; + hasDuplicateType = false; + } + ~CycleGarbage() { + for (auto it : nodesVec) { + delete it; + } + } + GarbageNode *AddNode(reffield_t addr, MClass *type) { + GarbageNode *node = new (std::nothrow) GarbageNode(addr, static_cast(nodesVec.size()), type); + if (node == nullptr) { + LOG(FATAL) << "new GarbageNode failed" << maple::endl; + } + nodesVec.push_back(node); + nodesMap[addr] = node; + return node; + } + bool Construct(std::vector &sccNodeAddrs); + bool operator==(const CycleGarbage &other) const; + uint64_t Hash() const { + return hashValue; + } + void IncCount() { + ++count; + } + uint32_t Count() const { + return count; + } + + // Cycle pattern Merge + bool ToCyclePattern(CyclePattern &cyclePattern); + + // utilites + void AppendToString(ostringstream &oss); // content to string, for big data + void Print(std::ostream &os); // print into cyclelog or other file + + private: + unordered_map nodesMap; + vector nodesVec; + bool adjusted; + bool valid; + bool hasDuplicateType; + uint32_t count; + uint32_t totalEdges; + uint64_t hashValue; + + bool CheckAndUpdate(); + void ComputeHash(); + void ConstructCycle(std::vector &sccNodeAddrs, address_t rootAddr); +}; + +class CyclePatternGenerator { + public: + std::vector resultCycles; + CyclePatternGenerator() = default; + + ~CyclePatternGenerator() { + for (CycleGarbage *cycle : resultCycles) { + delete cycle; + } + } + + void CollectCycleGarbage(vector &sccNodes); + vector &Cycles() { + return resultCycles; + } +}; + +const int kPatternStaleThredhold = 50; +const int kMaxBigdataUploadSize = 4096; +const int kMaxBigDataCacheSize = kMaxBigdataUploadSize * 20; // means the max cache is 20 * 4k +const int kMaxBigDataUploadStringEndSize = 3; // reserved for ; and endl +class PatternAgeFlags { + public: + uint8_t dupThreshold; + bool preDefined; +}; +class ClassCycleManager { + public: + static inline bool HasDynamicLoadPattern(MClass *cls) { + if (dynamicLoadedCycleClasses.find(cls) != dynamicLoadedCycleClasses.end()) { + return true; + } + return false; + } + static inline void AddDynamicLoadPattern(MClass *cls, bool preDfined) { + // call this Add, the dynamicLoadedCycleClasses[second] must be zero, so as to Has method + PatternAgeFlags flag; + flag.dupThreshold = kCyclePatternDupInitialThreshold; + flag.preDefined = preDfined; + dynamicLoadedCycleClasses[cls] = flag; + } + + static inline void DeleteDynamicLoadPattern(MClass *cls) { + auto ret = dynamicLoadedCycleClasses.erase(cls); + if (ret == 0) { + LOG(FATAL) << "ClassCycleManager::DeleteDynamicLoadPattern delete zero element" << maple::endl; + } + } + + static inline bool DynamicPatternTryMerge(MClass *cls) { + if (dynamicLoadedCycleClasses.find(cls) == dynamicLoadedCycleClasses.end()) { + return true; + } + PatternAgeFlags &flag = dynamicLoadedCycleClasses[cls]; + if (flag.dupThreshold == 0) { + flag.dupThreshold = kCyclePatternDupInitialThreshold; + return true; + } + --flag.dupThreshold; + return false; + } + + static inline void findDuplicate(MClass *cls) { + PatternAgeFlags &flag = dynamicLoadedCycleClasses[cls]; + flag.dupThreshold = (kCyclePatternDupInitialThreshold << 1); + } + + static inline unordered_map &GetDynamicLoadClass() { + return dynamicLoadedCycleClasses; + } + + // Trim all dead patterns in dynamic laoded class + static void RemoveDeadPatterns(); + + // Dump all cycles in loaded_cycle_classes_ + static void DumpDynamicCyclePatterns(ostream &os, size_t limit, bool dupThreshold); + // merge cycle patterns into runtime + static void MergeCycles(vector &cycles); + static bool CheckValidPattern(MClass *cls, char *buffer); + + static inline string &GetPatternsCache() { + return patternsCache; + } + + static bool GetRCThreshold(uint32_t &rcMax, uint32_t &rcMin, char *cyclepatternBinary); + + static inline void WritePatternToCache(CyclePattern &pattern) { + uint32_t cacheSize = static_cast(patternsCache.size()); + if (cacheSize > kMaxBigdataUploadSize) { + return; + } + + ostringstream oss; + pattern.Print(oss); + const string content = oss.str(); + if (content.size() + cacheSize > kMaxBigdataUploadSize) { + return; + } + patternsCache.append(content); + } + + static inline void SetCpUpdatedFlag() { + cpUpdated = true; + } + + static inline bool IsCyclePatternUpdated() { + return cpUpdated; + } + + private: + // mapping and record object has dynmamic loaded cycle pattern + // + static unordered_map dynamicLoadedCycleClasses; + // for bigdata, volume is 4096 + static string patternsCache; + // mutex between updating and dump + static mutex cycleMutex; + // recode the patterns of system has been changed or not + // to reduce the IO for saving patterns + static bool cpUpdated; + + void MergeCyclePattern(CyclePattern &cyclePattern); +}; +} // namespace maplert +#endif // MAPLE_RUNTIME_CP_GENERATOR_H diff --git a/src/mrt/compiler-rt/include/collector/cycle_collector.h b/src/mrt/compiler-rt/include/collector/cycle_collector.h new file mode 100644 index 0000000000..feba9c5594 --- /dev/null +++ b/src/mrt/compiler-rt/include/collector/cycle_collector.h @@ -0,0 +1,149 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_CYCLECOLLECTOR_H +#define MAPLE_RUNTIME_CYCLECOLLECTOR_H + +#include +#include +#include +#include +#include "collector.h" +#include "sizes.h" + +namespace maplert { +// Explicitly trigger cycle collection +const int kCyclepPatternMaxNodeNum = 12; +const int kCyclepPatternMaxEdgeNum = 12; +const int kCyclepMaxNum = 8; +// from kCycleRCMaxMask to kCycleRCMaxShift +const int kMaxRcInCyclePattern = 7; + +const int kCyclepPatternProfileCheckThreshold = 2000; +const int kCyclepPatternProfileCleanThreshold = 2; +const int kCyclepPatternProfileCancelThreshold = 6; +const int kCyclepPatternProfileMatchCountShift = 32; +const int kCyclepMaxOffset = 32767; + +// the first (possible) non-shadow reference field offset +#ifdef USE_32BIT_REF +const size_t kCyclepObjHeaderLength = (sizeof(reffield_t) + sizeof(uint32_t)); // shadow+monitor +#else +const size_t kCyclepObjHeaderLength = (sizeof(reffield_t) + sizeof(address_t)); // shadow+monitor+padding +#endif // USE_32BIT_REF + +// enabled when ifndef ANDORID +// if merge_all_cps is true, then +// 1. fetch all garbages when DumpCycleLeak +// 2. merge all patterns when Appending Patterns +// 3. record the cost time of TryFreeCycleAtMutator +extern const bool kMergeAllPatterns; + +struct CyclePatternNodeInfo { + void *expectType = nullptr; // meta, 8 bytes + int16_t loadOffset = 0; // load field offset + int8_t loadIndex = 0; // load object form index on stack + int8_t expectRc = 0; // rc for this field + int32_t flags = 0; // 4 bytes padding and flags +}; + +struct CyclePatternEdgeInfo { + int8_t srcNodeIndex = 0; + int8_t destNodeIndex = 0; + int16_t loadOffset = 0; + int32_t flags = 0; // 4 bytes padding and flags +}; + +struct CyclePatternInfo { + int8_t nNodes = 0; + int8_t nEdges = 0; + int8_t expectRc = 0; + int8_t hasNextFlag = 0; + int8_t invalidated = kCycleValid; // is this pattern valid, invalidate at match time + int8_t duplicateCount = 0; // how many time duplicated in self study + int16_t matchCount = 0; // positive means ok, negative mean abandond time + uint64_t matchProfiling = 0; +}; + +static inline CyclePatternInfo *GetCyclePatternInfo(GCTibGCInfo &gctibInfo) { + CyclePatternInfo *res = reinterpret_cast( + (reinterpret_cast(&gctibInfo)) + ((gctibInfo.nBitmapWords + 1) * kDWordBytes)); + return res; +} + +static inline size_t GetCyclePatternSize(const CyclePatternInfo &info) { + size_t size = sizeof(CyclePatternInfo); + size += sizeof(CyclePatternNodeInfo) * info.nNodes; + size += sizeof(CyclePatternEdgeInfo) * info.nEdges; + // align pattern with 8 bytes + size_t rem = reinterpret_cast(size) % kDWordBytes; + if (rem != 0) { + size += (kDWordBytes - rem); + } + return size; +} + +static inline CyclePatternInfo *GetNextCyclePattern(CyclePatternInfo &info) { + if (info.hasNextFlag == 0) { + return nullptr; + } + char *res = reinterpret_cast(&info); + res += sizeof(CyclePatternInfo); + res += sizeof(CyclePatternNodeInfo) * info.nNodes; + res += sizeof(CyclePatternEdgeInfo) * info.nEdges; + // align pattern with 8 bytes + size_t rem = reinterpret_cast(res) % kDWordBytes; + if (rem != 0) { + res += (kDWordBytes - rem); + } + return reinterpret_cast(res); +} + +static inline CyclePatternNodeInfo *GetCyclePatternNodeInfo(CyclePatternInfo &info) { + return reinterpret_cast((reinterpret_cast(&info)) + sizeof(CyclePatternInfo)); +} + +static inline CyclePatternEdgeInfo *GetCyclePatternEdgeInfo(CyclePatternInfo &info) { + return reinterpret_cast( + (reinterpret_cast(&info)) + sizeof(CyclePatternInfo) + sizeof(CyclePatternNodeInfo) * info.nNodes); +} + +string MRT_CyclePatternValidityStr(int validStatus); + +class CycleCollector { + public: + CycleCollector() = default; + ~CycleCollector() = default; + + static bool TryFreeCycleAtMutator(address_t obj, uint32_t rootDelta, bool isRefProcess); + + private: + static bool MatchNodes(address_t stack[], CyclePatternNodeInfo &infos, int32_t nNodes, bool &hasFinal, + bool isRefProcess, uint32_t expectRC[]); + static bool MatchEdges(const address_t stack[], CyclePatternEdgeInfo &infos, int32_t nNodes, int32_t nEdges); + static bool CheckStackColorGray(const address_t stack[], int32_t nNodes, bool weakCollectedSet[], + const uint32_t expectRC[]); + static void ReleaseCycleObjects(const address_t stack[], + CyclePatternNodeInfo &nInfos, int32_t nNodes, + CyclePatternEdgeInfo &eInfos, int32_t nEdges, + const bool weakCollectedSet[]); + static void FinalizeCycleObjects(const address_t stack[], int32_t nNodes, const bool weakCollectedSet[]); + static bool CheckAndReleaseCycle(address_t obj, uint32_t rootDelta, bool isRefProcess, + CyclePatternInfo &cyclePatternInfo, CyclePatternInfo *prevPattern); +}; + +string GetSoNameFromCls(const MClass *elementClass); +} + +#endif diff --git a/src/mrt/compiler-rt/include/collector/gc_reference_processor.h b/src/mrt/compiler-rt/include/collector/gc_reference_processor.h new file mode 100644 index 0000000000..d27f0c4a6e --- /dev/null +++ b/src/mrt/compiler-rt/include/collector/gc_reference_processor.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_COMPILER_RT_GC_REFERENCE_PROCESSOR_H +#define MAPLE_COMPILER_RT_GC_REFERENCE_PROCESSOR_H + +#include +#include "address.h" +#include "sizes.h" +#include "rp_base.h" + +namespace maplert { +struct GCRefContext { + std::mutex discoverLock; + address_t discoverRefs; // Discover list + address_t enqueueRefs; // enqueue list + uint32_t discoverCount; + uint32_t enqueueCount; +}; + +class GCReferenceProcessor : public ReferenceProcessor { + public: + static inline GCReferenceProcessor &Instance() { + return static_cast(ReferenceProcessor::Instance()); + } + GCReferenceProcessor(); + ~GCReferenceProcessor() = default; + void DiscoverReference(address_t reference); + void ProcessDiscoveredReference(uint32_t flags); + void VisitGCRoots(RefVisitor &visitor) override; + void ConcurrentProcessDisovered(); + void InitEnqueueAtFork(uint32_t type, address_t refs); + protected: + bool ShouldStartIteration() override; + void LogRefProcessorBegin() override; + void LogRefProcessorEnd() override; + private: + void PreAddFinalizables(address_t obj[], uint32_t count, bool needLock) override; + void PostAddFinalizables(address_t obj[], uint32_t count, bool needLock) override; + void EnqeueReferences() override; + GCRefContext refContext[kRPPhantomRef + 1]; // weak/soft/phantom has index 0,1,2 +}; +} +#endif // MAPLE_COMPILER_RT_REFERENCE_PROCESSOR_H diff --git a/src/mrt/compiler-rt/include/collector/mpl_thread_pool.h b/src/mrt/compiler-rt/include/collector/mpl_thread_pool.h new file mode 100644 index 0000000000..c8c8dc73e2 --- /dev/null +++ b/src/mrt/compiler-rt/include/collector/mpl_thread_pool.h @@ -0,0 +1,207 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_COMPILER_RT_THREAD_POOL_H +#define MRT_COMPILER_RT_THREAD_POOL_H + +#include +#include +#include +#include +#include +#include +#include + +// thread pool implementation +namespace maplert { +class MplTask { + public: + MplTask() = default; + virtual ~MplTask() = default; + virtual void Execute(size_t threadId) = 0; +}; + + +class MplLambdaTask : public MplTask { + public: + explicit MplLambdaTask(const std::function &function) : func(function) {} + ~MplLambdaTask() = default; + void Execute(size_t threadId) override { + func(threadId); + } + private: + std::function func; +}; + +class MplThreadPool; + +class MplPoolThread { + public: + // use for profiling + std::vector *schedCores; + + MplPoolThread(MplThreadPool *threadPool, const char *threadName, size_t threadId, size_t stackSize); + ~MplPoolThread(); + + void SetPriority(int32_t prior); + + // get pthread of thread + pthread_t GetThread() const { + return pthread; + } + + // get thread id of thread + pid_t GetTid() const { + return tid; + } + + static void *WorkerFunc(void *param); + + private: + size_t id; + pthread_t pthread; + pid_t tid; + std::string name; + MplThreadPool *pool; +}; + + +// manual +// new (SetMaxActiveThreadNum(optional) addTask startPool waitFinish)^. Exit delete +// if need to change MaxActiveThreadNum, should waitFinish or stop pool at first +class MplThreadPool { + public: + // Constructor for thread pool, 1) Create threads, 2) wait all thread created & sleep + // name is the thread pool name. thread name = Pool_$(poolname)_ThreadId_$(threadId) + // maxThreadNum is the max thread number in pool. + // prior is the priority of threads in pool. + MplThreadPool(const char *name, int32_t maxThreadNum, int32_t prior); + + // Destructor for thread pool, 1) close pool 2) wait thread in pool to exit, 3) release resources of class + ~MplThreadPool(); + + // Set priority of each thread in pool. + void SetPriority(int32_t prior); + + // Set max active thread number of pool, redundant thread hangup in sleep condition var. + // notify more waitting thread get to work when pool is running. + // Range [1 - maxThreadNum]. + void SetMaxActiveThreadNum(int32_t num); + + // Get max active thread number of pool. + int32_t GetMaxActiveThreadNum() const { + return maxActiveThreadNum; + } + + // Get max thread number of pool, defalut = maxThreadNum. + int32_t GetMaxThreadNum() const { + return maxThreadNum; + } + + // Add new task to task queue , task should inherit from MplTask. + void AddTask(MplTask *task); + + // Add task to thread , func indicate Lambda statement. + void AddTask(std::function func); + + // Start thread pool, notify all sleep threads to get to work + void Start(); + + // Wait all task in task queue finished, if pool stopped, only wait until current excuting task finish + // after all task finished, stop pool + // addToExecute indicate whether the caller thread excute task + void WaitFinish(bool addToExecute, std::vector *schedCores = nullptr); + + // used in none-parallel concurrent mark + void DrainTaskQueue(); + + // Notify & Wait all thread waitting for task to sleep + void Stop(); + + // Notify all thread in pool to exit , notify all waitFinish thread to return,nonblock ^. + void Exit(); + + // Remove all task in task queue + void ClearAllTask(); + + // Get task count in queue + size_t GetTaskNumber() { + std::unique_lock taskLock(taskMutex); + return taskQueue.size(); + } + + // Get all MplPoolThread in pool + const std::vector &GetThreads() const { + return threads; + } + + private: + // thread default stack size 512 KB. + static const size_t kDefaultStackSize = (512 * 1024); + int32_t priority; + + std::string name; + // pool stop or running state + std::atomic running; + // is pool exit + std::atomic exit; + // all task put in task queue + std::queue taskQueue; + + // active thread 0 ..... maxActiveThreadNum .....maxThreadNum + // max thread number in pool + int32_t maxThreadNum; + + // max active thread number, redundant thread hang up in threadSleepingCondVar + int32_t maxActiveThreadNum; + + // current active thread, when equals to zero, no thread running, all thread slept + int32_t currActiveThreadNum; + + // current waitting thread, when equals to currActiveThreadNum + // no thread excuting, all task finished + int32_t currWaittingThreadNum; + + // single lock + std::mutex taskMutex; + + // hangup when no task available + std::condition_variable taskEmptyCondVar; + + // hangup when to much active thread or pool stopped + std::condition_variable threadSleepingCondVar; + + // hangup when there is thread excuting + std::condition_variable allWorkDoneCondVar; + + // hangup when there is thread active + std::condition_variable allThreadStopped; + + // use for profiling + std::vector threads; + + // is pool running or stopped + bool IsRunning() const { + return running.load(std::memory_order_relaxed); + } + + bool IsExited() const { + return exit.load(std::memory_order_relaxed); + } + + friend class MplPoolThread; +}; +} // namespace maplert + +#endif // MRT_COMPILER_RT_THREAD_POOL_H diff --git a/src/mrt/compiler-rt/include/collector/mrt_bitmap.h b/src/mrt/compiler-rt/include/collector/mrt_bitmap.h new file mode 100644 index 0000000000..48a0fabb15 --- /dev/null +++ b/src/mrt/compiler-rt/include/collector/mrt_bitmap.h @@ -0,0 +1,138 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_MRT_BITMAP_H +#define MAPLE_RUNTIME_MRT_BITMAP_H + +#include +#include +#include "mm_utils.h" // address_t + +// set 1 to enable bitmap debug functions +#define MRT_DEBUG_BITMAP 0 +namespace maplert { +// kLogObjAlignment == 4 indicates 2^4(16) bytes per bit in bitmap +static constexpr size_t kLogObjAlignment = 4; +static constexpr size_t kLogBitsPerByte = 3; // 8 bit per byte +static constexpr size_t kLogBytesPerWord = 2; // 4 byte per word +static constexpr size_t kLogBitsPerWord = kLogBitsPerByte + kLogBytesPerWord; + +static constexpr size_t kBitsPerWord = sizeof(uint32_t) * 8; // 8 bits per byte. + + +class MrtBitmap { + public: + MrtBitmap() : spaceStart(0), spaceEnd(0), curEnd(0), isInitialized(false), bitmapBegin(nullptr), bitmapSize(0) {} + ~MrtBitmap(); + // for initialization + bool Initialized() const { + return isInitialized; + } + + void Initialize(); + void ResetCurEnd(); + ALWAYS_INLINE void ResetBitmap() { + if (bitmapBegin != nullptr) { +#ifdef __ANDROID__ + int result = madvise(bitmapBegin, bitmapSize, MADV_DONTNEED); + if (result != 0) { + LOG(WARNING) << "ResetBitmap madvise failed"; + size_t curBitmapSize = GetBitmapSizeByHeap(curEnd - spaceStart); + (void)memset_s(reinterpret_cast(bitmapBegin), curBitmapSize, 0, curBitmapSize); + } +#else + // use memset on qemu + size_t curBitmapSize = GetBitmapSizeByHeap(curEnd - spaceStart); + (void)memset_s(reinterpret_cast(bitmapBegin), curBitmapSize, 0, curBitmapSize); +#endif + } + } + + // return true if object already marked, false if unmarked + ALWAYS_INLINE bool MarkObject(address_t addr) { + uint32_t lineIndex; + uint32_t bitMask; + GetLocation(addr, lineIndex, bitMask); + std::atomic *line = bitmapBegin + lineIndex; + + uint32_t old = line->fetch_or(bitMask, std::memory_order_relaxed); + return ((old & bitMask) != 0); + } + + ALWAYS_INLINE bool IsObjectMarked(address_t addr) const { + uint32_t lineIndex; + uint32_t bitMask; + GetLocation(addr, lineIndex, bitMask); + std::atomic *line = bitmapBegin + lineIndex; + uint32_t word = line->load(std::memory_order_relaxed); + return (word & bitMask) != 0; + } + +#if MRT_DEBUG_BITMAP + // copy from another bitmap, for debug purpose. + void CopyBitmap(const BitMap &bitmap); + + // size in bytes, just for debug purpose. + inline size_t Size() const { + return bitmapSize; + } + + // data as an uint32_t array, for debug purpose. + inline const uint32_t *Data() const { + return reinterpret_cast(bitmapBegin); + } +#endif + + private: + ALWAYS_INLINE bool AddrInRange(address_t addr) const { + return (addr >= spaceStart) && (addr <= spaceEnd); + } + + ALWAYS_INLINE void CheckAddr(address_t addr) const { + if (UNLIKELY(!AddrInRange(addr))) { + LOG(FATAL) << "invalid object address." << " addr:" << std::hex << addr << " spaceStart:" << spaceStart << + " curEnd:" << curEnd << " spaceEnd:" << spaceEnd << std::dec << maple::endl; + } + } + + // round value up to alignValue + uint32_t AlignRight(uint32_t value, uint32_t alignValue) const noexcept; + size_t GetBitmapSizeByHeap(size_t heapBytes); + + // get index in the bitmap array and bit mask in the word. + ALWAYS_INLINE void GetLocation(address_t addr, uint32_t &lineIndex, uint32_t &bitMask) const { +#if MRT_DEBUG_BITMAP + CheckAddr(addr); +#endif + uint32_t offset = static_cast(addr - spaceStart); + + // lower bitmap word in the bitmapWords array encodes lower address + lineIndex = (offset >> kLogBitsPerWord) >> kLogObjAlignment; + + // higher bits in each word encodes lower address + unsigned int bitIndex = kBitsPerWord - 1 - ((offset >> kLogObjAlignment) & (kBitsPerWord - 1)); + bitMask = static_cast(1) << bitIndex; + } + + // the start and end of memory space covered by the bitmap. + address_t spaceStart; + address_t spaceEnd; + address_t curEnd; + bool isInitialized; + std::atomic *bitmapBegin; + size_t bitmapSize; +}; +} // namespace maplert + +#endif // MAPLE_RUNTIME_MRT_BITMAP_H diff --git a/src/mrt/compiler-rt/include/collector/native_gc.h b/src/mrt/compiler-rt/include/collector/native_gc.h new file mode 100644 index 0000000000..e3c028470d --- /dev/null +++ b/src/mrt/compiler-rt/include/collector/native_gc.h @@ -0,0 +1,149 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_NATIVEGC_H +#define MAPLE_RUNTIME_NATIVEGC_H + +#include +#include +#include + +#include "globals.h" +#include "mm_config.h" +#include "gc_log.h" + +// Define Native Trigger GC model and data structure. +namespace maplert { +const uint32_t kDefaultNativeEpochSeconds = 15; +const uint32_t kEpochSecondRatio = 1000; // set ratio to 1000 +// Record Native GC Epoch to determin if trigger GC or RP +class NativeEpochStats { + public: + NativeEpochStats() { + Init(); + } + ~NativeEpochStats() = default; + // Used in PostForChild + void SetEpochInterval(uint32_t seconds) { + Init(seconds); + } + + void CheckNativeGC(); + + static inline NativeEpochStats &Instance() { + return instance; + } + + bool isEnabled() const { + return epochInterval > 0; + } + + void Init(uint32_t epochSeconds = kDefaultNativeEpochSeconds); + private: + static constexpr float kEpochGCEagerDeltaRatio = 1.4; + static constexpr float kEpochGCDeltaRatio = 1.2; + static constexpr float kEpochRPDeltaRatio = 1.1; + __attribute__((visibility("default"))) static NativeEpochStats instance; + // only RP thread will check and update these values, no need atomic + size_t epochMin; + size_t epochMax; + size_t epochTotal; + + size_t curGCWatermark; + size_t curRPWatermark; + + uint32_t epochInterval; + uint32_t curEpochIndex; + + bool RPTriggered; + bool logNativeInfo; +}; + +class NativeGCStats { + public: + typedef std::ratio<11, 20> NativeDiscountRatio; // native bytes discount ratio when compared with java bytes + static constexpr float kSkipNativeGcFactor = 0.0; + static constexpr float kTriggerNativeGcFactor = 1.0; + static constexpr float kWaitNativeGcFactor = 4.0; + static constexpr size_t kHugeNativeBytes = 1 * maple::GB; + static constexpr size_t kOldDiscountFactor = 65536; + static constexpr size_t kNativeWatermarkExtra = 2 * maple::MB; + static constexpr size_t kNativeWatermarkFactor = 8; + static constexpr size_t kCheckInterval = 32; + static constexpr size_t kCheckBytesThreshold = 300000; + static constexpr double kFrontgroundGrowthRate = 3.0; + static constexpr double kBackgroundGrowthRate = 1.0; + + size_t GetNativeBytes(); + + static inline NativeGCStats &Instance() { + return instance; + } + + void SetIsEpochBasedTrigger(bool isEpochBased) { + isEpochBasedTrigger.store(isEpochBased, std::memory_order_relaxed); + NativeEpochStats::Instance().Init(isEpochBased ? kDefaultNativeEpochSeconds : 0); + } + + void NotifyNativeAllocation() { + if (isEpochBasedTrigger.load(std::memory_order_relaxed)) { + return; + } + nativeObjectNotified.fetch_add(kCheckInterval, std::memory_order_relaxed); + CheckForGC(); + } + + void RegisterNativeAllocation(size_t bytes) { + if (isEpochBasedTrigger.load(std::memory_order_relaxed)) { + return; + } + nativeBytesRegistered.fetch_add(bytes, std::memory_order_relaxed); + size_t objectsNotified = nativeObjectNotified.fetch_add(1, std::memory_order_relaxed); + if (UNLIKELY((objectsNotified % kCheckInterval) + 1 == kCheckInterval || bytes > kCheckBytesThreshold)) { + CheckForGC(); + } + } + + void RegisterNativeFree(size_t bytes) { + if (isEpochBasedTrigger.load(std::memory_order_relaxed)) { + return; + } + size_t allocated = nativeBytesRegistered.load(std::memory_order_relaxed); + size_t newFreedBytes; + do { + newFreedBytes = std::min(allocated, bytes); + } while (!nativeBytesRegistered.compare_exchange_weak(allocated, allocated - newFreedBytes)); + } + + // should be called after GC finished. + void OnGcFinished() { + // update native bytes allocated. + const size_t nativeBytes = GetNativeBytes(); + oldNativeBytesAllocated.store(nativeBytes, std::memory_order_relaxed); + LOG2FILE(kLogtypeGc) << "Native bytes updated to " << nativeBytes << '\n'; + } + + private: + __attribute__((visibility("default"))) static NativeGCStats instance; + std::atomic isEpochBasedTrigger = { false }; + std::atomic nativeBytesRegistered = { 0 }; + std::atomic nativeObjectNotified = { 0 }; + std::atomic oldNativeBytesAllocated = { 0 }; + + void CheckForGC(); + float NativeGcFactor(size_t currentNativeBytes); +}; +} // namespace maplert + +#endif // MAPLE_RUNTIME_NATIVEGC_H diff --git a/src/mrt/compiler-rt/include/collector/rc_inline.h b/src/mrt/compiler-rt/include/collector/rc_inline.h new file mode 100644 index 0000000000..f838f380e6 --- /dev/null +++ b/src/mrt/compiler-rt/include/collector/rc_inline.h @@ -0,0 +1,329 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co., Ltd. All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_RC_INLINE_H +#define MAPLE_RUNTIME_RC_INLINE_H + +#include "sizes.h" + +namespace maplert { +static void inline HandleRCError(address_t obj) { + AbortWithHeader(obj); +} + +template +static inline uint32_t GetNewRCHeaderStrong(uint32_t header) { + static_assert(strongRCDelta == 1 || strongRCDelta == -1 || strongRCDelta == 0, "Invalid strong rc delta"); + if (strongRCDelta == 0) { + return header; + } + uint32_t newHeader = static_cast((static_cast(header)) + strongRCDelta); + if (updateColor) { + uint32_t color = (strongRCDelta > 0) ? kRCCycleColorBlack : kRCCycleColorBrown; + newHeader = (newHeader & ~kRCCycleColorMask) | color; + } + return newHeader; +} + +template +static inline uint32_t GetNewRCHeaderWeak(uint32_t header) { + static_assert(weakRCDelta == 1 || weakRCDelta == -1 || weakRCDelta == 0, "Invalid weak rc delta"); + if (weakRCDelta == 0) { + return header; + } + uint32_t weakRC = GetWeakRCFromRCHeader(header); + if (weakRC == kMaxWeakRC) { + return header; + } + + weakRC = static_cast((static_cast(weakRC)) + weakRCDelta); + uint32_t newHeader = header & ~kWeakRcBits; + newHeader |= ((weakRC << kWeakRcBitsShift) & kWeakRcBits); + return newHeader; +} + +template +static inline uint32_t GetNewRCHeaderResurrectWeak(uint32_t header) { + static_assert(resurrectWeakRCDelta == 1 || resurrectWeakRCDelta == -1 || resurrectWeakRCDelta == 0, + "Invalid weak rc delta"); + if (resurrectWeakRCDelta == 0) { + return header; + } + uint32_t resurrectWeakRC = GetResurrectWeakRCFromRCHeader(header); + if (resurrectWeakRC == kMaxResurrectWeakRC) { + return header; + } + + resurrectWeakRC = + static_cast((static_cast(resurrectWeakRC)) + resurrectWeakRCDelta); + uint32_t newHeader = header & ~kResurrectWeakRcBits; + newHeader |= ((resurrectWeakRC << KResurrectWeakRcBitsShift) & kResurrectWeakRcBits); + return newHeader; +} + +// Update reference count non-atomically: +// 1. During backup tracing STW time +// 2. Or for non-escaped objects +// No color update needed, because no racing with cycle pattern in above scenario +template +static inline uint32_t UpdateRC(address_t objAddr) { + uint32_t oldHeader = RCHeader(objAddr); + StatsRCOperationCount(objAddr); + if (UNLIKELY(SkipRC(oldHeader))) { + return oldHeader; + } + uint32_t newHeader = GetNewRCHeaderStrong(oldHeader); + newHeader = GetNewRCHeaderWeak(newHeader); + newHeader = GetNewRCHeaderResurrectWeak(newHeader); + RefCountLVal(objAddr) = newHeader; + return oldHeader; +} + +// Update reference count atomically: +// 1. Mutator update RC +// 2. Backup tracing: parallel/concurrent sweep +template +static inline uint32_t AtomicUpdateRC(address_t objAddr) { + std::atomic &headerAtomic = RefCountAtomicLVal(objAddr); + uint32_t oldHeader = headerAtomic.load(); + uint32_t newHeader = 0; + + do { + StatsRCOperationCount(objAddr); + if (UNLIKELY(SkipRC(oldHeader))) { + return oldHeader; + } + newHeader = GetNewRCHeaderStrong(oldHeader); + newHeader = GetNewRCHeaderWeak(newHeader); + newHeader = GetNewRCHeaderResurrectWeak(newHeader); + } while (!headerAtomic.compare_exchange_weak(oldHeader, newHeader)); + return oldHeader; +} + +// Check what to do next after Dec +// 1. Release object if: +// 1.1 weak collected bit is set, and all rc is zero +// 1.2 weak collected bit is not set and only weak rc is 1, strong rc and resurrect weak rc is 0 +// 2. Collected object weak rc if: +// 2.1 weak collected bit is not set and strong rc and resurrect rc is 0 +static constexpr uint32_t kReleaseObject = 0x0; +static constexpr uint32_t kCollectedWeak = 0x1; +static constexpr uint32_t kNotRelease = 0x2; +template +static inline uint32_t CanReleaseObj(uint32_t oldHeader) { + static_assert(strongRCDelta + weakRCDelta + resurrectWeakRCDelta == -1, "only one with -1"); + static_assert(strongRCDelta * weakRCDelta * resurrectWeakRCDelta == 0, "only one with -1"); + uint32_t oldRC = oldHeader & (kWeakCollectedBit | kWeakRcBits | kResurrectWeakRcBits | kRCBits); + if (strongRCDelta != 0) { + // old header is strong rc = 1, resurrect weak rc = 0, weak rc = 0 and collected bit set + if ((oldRC == (1 + kWeakRCOneBit)) || (oldRC == (1 + kWeakCollectedBit))) { + return kReleaseObject; + } else if ((oldHeader & (kWeakCollectedBit | kResurrectWeakRcBits | kRCBits)) == 1) { + return kCollectedWeak; + } + } else if (weakRCDelta != 0) { + if ((oldRC == (kWeakRCOneBit + kWeakRCOneBit)) || (oldRC == (kWeakRCOneBit + kWeakCollectedBit))) { + return kReleaseObject; + } + } else { + // Dec resurrect weak rc + if (oldRC == (kResurrectWeakOneBit + kWeakRCOneBit) || oldRC == (kResurrectWeakOneBit + kWeakCollectedBit)) { + return kReleaseObject; + } else if ((oldHeader & (kWeakCollectedBit | kResurrectWeakRcBits | kRCBits)) == kResurrectWeakOneBit) { + return kCollectedWeak; + } + } + return kNotRelease; +} + +template +static inline uint32_t AtomicDecRCAndCheckRelease(address_t objAddr, uint32_t &releaseState) { + static_assert(strongRCDelta == -1 || strongRCDelta == 0, "Invalid dec rc delta"); + static_assert(weakRCDelta == -1 || weakRCDelta == 0, "Invalid dec rc delta"); + static_assert(resurrectWeakRCDelta == -1 || resurrectWeakRCDelta == 0, "Invalid dec rc delta"); + static_assert(strongRCDelta == -1 || weakRCDelta == -1 || resurrectWeakRCDelta == -1, "must have one -1"); + std::atomic &headerAtomic = RefCountAtomicLVal(objAddr); + uint32_t oldHeader = headerAtomic.load(); + uint32_t newHeader = 0; + + do { + StatsRCOperationCount(objAddr); + releaseState = kNotRelease; + if (UNLIKELY(SkipRC(oldHeader))) { + return oldHeader; + } + newHeader = GetNewRCHeaderStrong(oldHeader); + newHeader = GetNewRCHeaderWeak(newHeader); + newHeader = GetNewRCHeaderResurrectWeak(newHeader); + // update release state + // release: weak rc is 1, resurrect and strong rc and weak collected bit not set + // weak rc is 0, resurrect and strong rc and weak collected bit set + if (IsWeakCollectedFromRCHeader(newHeader)) { + if (GetTotalRCFromRCHeader(newHeader) == 0) { + releaseState = kReleaseObject; + } + } else { + if (GetTotalRCFromRCHeader(newHeader) == kWeakRCOneBit) { + releaseState = kReleaseObject; + } else if (checkWeakCollected && (newHeader & (kRCBits | kResurrectWeakRcBits)) == 0) { + releaseState = kCollectedWeak; + newHeader = newHeader | kWeakCollectedBit; + } + } + } while (!headerAtomic.compare_exchange_weak(oldHeader, newHeader)); + return oldHeader; +} + +template +static inline bool IsInvalidDec(uint32_t oldHeader) { + static_assert(strongRCDelta + weakRCDelta + resurrectWeakRCDelta == -1, "only one with -1"); + static_assert(strongRCDelta * weakRCDelta * resurrectWeakRCDelta == 0, "only one with -1"); + if (IsRCOverflow(oldHeader)) { + return false; + } + if (strongRCDelta != 0) { + return (oldHeader & kRCBits) == 0; + } else if (weakRCDelta != 0) { + return (oldHeader & kWeakRcBits) == 0; + } + return (oldHeader & kResurrectWeakRcBits) == 0; +} + +// Update color atomically: +// 1. Cycle pattern match +static inline uint32_t AtomicUpdateColor(address_t objAddr, uint32_t color) { + std::atomic &headerAtomic = RefCountAtomicLVal(objAddr); + uint32_t oldHeader = headerAtomic.load(); + uint32_t newHeader = 0; + + do { + StatsRCOperationCount(objAddr); + if (UNLIKELY(SkipRC(oldHeader))) { + return oldHeader; + } + newHeader = (oldHeader & ~kRCCycleColorMask) | color; + } while (!headerAtomic.compare_exchange_weak(oldHeader, newHeader)); + return oldHeader; +} + +static inline bool TryAtomicIncStrongRC(address_t objAddr) { + std::atomic &headerAtomic = RefCountAtomicLVal(objAddr); + uint32_t oldHeader = headerAtomic.load(); + uint32_t newHeader = 0; + + do { + StatsRCOperationCount(objAddr); + if (UNLIKELY(SkipRC(oldHeader))) { + return true; + } + if (UNLIKELY(GetRCFromRCHeader(oldHeader) == 0)) { + return false; + } + newHeader = GetNewRCHeaderStrong<1, true>(oldHeader); + } while (!headerAtomic.compare_exchange_weak(oldHeader, newHeader)); + return true; +} + +// RC Java Reference and Weak Global processing +// Operatoins +// 1. Collect: +// 1.1. weak rc == strong rc is equal, set weak collected bit +// 1.2. weak rc == (strong rc - matched cycle rc), set weak collected bit +// 1.3. If weak collected bit is set, perform clearAndEnqueue/clear WGRT +// 2. Process finalizable +// 2.1 add finalizable queue +// 2.2 clear weak collected bit: object is resurrected +// 3. Get +// If weak collected bit is on, Reference.get and Weak Global.get return null +// +// delta is used in cycle collect for referent +// 1. when process reference: check cycle with rc (rc-weakrc) +// 2. if match, invoke AtomicCheckWeakCollectable(obj, rc-weakrc) +// no overflow/skipRC check! should be handled by caller! +template +static inline bool AtomicCheckWeakCollectable(address_t objAddr, uint32_t delta) { + std::atomic &headerAtomic = RefCountAtomicLVal(objAddr); + uint32_t oldHeader = headerAtomic.load(); + uint32_t newHeader = 0; + + do { + if (IsWeakCollectedFromRCHeader(oldHeader)) { + return false; + } + uint32_t strongRC = GetRCFromRCHeader(oldHeader); + if (strongRC != delta) { + return false; + } + if (checkResurrectWeak) { + if (oldHeader & kResurrectWeakRcBits) { + return false; + } + } + newHeader = oldHeader | kWeakCollectedBit; + } while (!headerAtomic.compare_exchange_weak(oldHeader, newHeader)); + return true; +} + +// invoked in reference processor thread or muator thread +// 1. reference process clear kWeakCollectedBit for finalizable +// 2. mutator set obj as referent again and need clear corrsponding bit +// no overflow/skipRC check! should be handled by caller! +static inline void AtomicClearWeakCollectable(address_t objAddr) { + std::atomic &headerAtomic = RefCountAtomicLVal(objAddr); + uint32_t oldHeader = headerAtomic.load(); + uint32_t newHeader = 0; + do { + if (!(oldHeader & kWeakCollectedBit)) { + return; + } + newHeader = oldHeader & (~kWeakCollectedBit); + } while (!headerAtomic.compare_exchange_weak(oldHeader, newHeader)); + return; +} + +// Load and inc Referent, load can from +// 1. Weak Global.get +// 2. SoftReference.get +// 3. WeakReference.get +// +// If object 1) has no weak rc 2) weak collected bit set return 0 +// Otherwise, try inc weak rc +template +static inline address_t AtomicIncLoadWeak(address_t objAddr) { + std::atomic &headerAtomic = RefCountAtomicLVal(objAddr); + uint32_t oldHeader = headerAtomic.load(); + uint32_t newHeader = 0; + do { + if (oldHeader & kWeakCollectedBit) { + return 0; + } + StatsRCOperationCount(objAddr); + if (UNLIKELY(SkipRC(oldHeader))) { + return objAddr; + } + if (incWeak) { + // weak collected bit not set, weak rc is 1 means no other weak reference + if (GetWeakRCFromRCHeader(oldHeader) == 1) { + return 0; + } + newHeader = GetNewRCHeaderWeak<1>(oldHeader); + } else { + newHeader = GetNewRCHeaderStrong<1, true>(oldHeader); + } + } while (!headerAtomic.compare_exchange_weak(oldHeader, newHeader)); + return objAddr; +} +} // namespace maplert + +#endif // MAPLE_RUNTIME_RC_INLINE_H diff --git a/src/mrt/compiler-rt/include/collector/rc_reference_processor.h b/src/mrt/compiler-rt/include/collector/rc_reference_processor.h new file mode 100644 index 0000000000..82e34661d4 --- /dev/null +++ b/src/mrt/compiler-rt/include/collector/rc_reference_processor.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_COMPILER_RT_RC_REFERENCE_PROCESSOR_H +#define MAPLE_COMPILER_RT_RC_REFERENCE_PROCESSOR_H + +#include +#include +#include "mm_config.h" +#include "address.h" +#include "sizes.h" +#include "mrt_reference_api.h" +#include "mrt_well_known.h" +#include "rp_base.h" + +namespace maplert { +class RCReferenceProcessor : public ReferenceProcessor { + public: + static inline RCReferenceProcessor &Instance() { + return static_cast(ReferenceProcessor::Instance()); + } + RCReferenceProcessor(); + ~RCReferenceProcessor() = default; + void Init() override; + void Fini() override; + void VisitFinalizers(RefVisitor &visitor) override; + + // async release queue + void AddAsyncReleaseObj(address_t obj, bool isMutator); + void ClearAsyncReleaseObjs(); + void ProcessAsyncReleaseObjs(); + void VisitAsyncReleaseObjs(const RefVisitor &vistor); + // stats + inline void CountRecent(uint32_t type) { + ++(recentCount[type]); + } + inline uint32_t RecentCount(uint32_t type) { + return recentCount[type]; + } + inline void AddAgedProcessedCount() { + ++(numProcessedAgedRefs[curProcessingRef]); + } + bool CheckAndSetReferenceFlags(); + bool CheckAndUpdateAgedReference(uint32_t type); // check if perfrom aged reference process after young + bool CheckAndAddFinalizable(address_t obj); + void TransferFinalizblesOnFork(ManagedList &toFinalizables); + address_t TransferEnquenenableReferenceOnFork(uint32_t type); + protected: + bool ShouldStartIteration() override; + void LogRefProcessorBegin() override; + void LogRefProcessorEnd() override; + private: + void PreIteration() override; + void PostIteration() override; + void PreExitDoFinalize() override; + void PostProcessFinalizables() override; + void PostFinalizable(address_t obj) override; + void PreAddFinalizables(address_t obj[], uint32_t count, bool needLock) override; + void PostAddFinalizables(address_t obj[], uint32_t count, bool needLock) override; + bool SpecializedAddFinalizable(address_t obj) override; + void EnqeueReferences() override; + // RP check and start + inline bool HasReferenceFlag(uint32_t type) { + return referenceFlags & RPMask(type); + } + + // finalization + ManagedList pendingFinalizables; // pending list for RC, workaround for too fast finalize + ManagedList pendingFinalizablesPrev; // Prev to record next list for processing + // release queue + std::mutex releaseQueueLock; + ManagedDeque asyncReleaseQueue; // release queue for async release + ManagedDeque workingAsyncReleaseQueue; // working async release queue + // reference processing flags + uint32_t referenceFlags; // which references to process + uint32_t agedReferenceFlags; // which aged references to process + uint32_t numProcessedAgedRefs[kRPTypeNum]; // aged reference processed from begining + uint32_t numLastProcessedAgedRefs[kRPTypeNum]; + volatile uint32_t recentCount[kRPTypeNum]; // recent reference count added to list + uint32_t agedReferenceCount[kRPTypeNum]; // how many iterations aged is skipped + uint32_t hungryCount; // how many iteration not process RP + uint32_t agedHungryCount; // how many iteration not process aged RP +}; + + +// add a (newly created) reference object for later processing +void AddNewReference(address_t obj, uint32_t classFlag); +void MrtVisitReferenceRoots(AddressVisitor visitor, uint32_t flags); + +// used by gc thread for visit and process references in non-parallel or parallel mode. +void MRT_GCVisitReferenceRoots(RefVisitor &visitor, uint32_t flags); +void MRT_ParallelVisitReferenceRoots(MplThreadPool &threadPool, RefVisitor &visitor, uint32_t flags); + +uint32_t MrtRpEpochIntervalMs(); +} +#endif // MAPLE_COMPILER_RT_REFERENCE_PROCESSOR_H diff --git a/src/mrt/compiler-rt/include/collector/rp_base.h b/src/mrt/compiler-rt/include/collector/rp_base.h new file mode 100644 index 0000000000..3728ff890f --- /dev/null +++ b/src/mrt/compiler-rt/include/collector/rp_base.h @@ -0,0 +1,261 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_COMPILER_RT_RP_BASE_H +#define MAPLE_COMPILER_RT_RP_BASE_H + +#include +#include +#include + +#include "mm_config.h" +#include "address.h" +#include "sizes.h" +#include "allocator/page_allocator.h" + +namespace maplert { +constexpr float kPercentageDivend = 100.0f; + +// flags for RP visitor and type +enum RPType : uint32_t { + kRPWeakRef = 0, // weak reference + kRPSoftRef, // soft reference + kRPPhantomRef, // phantom and cleaner + kRPFinalizer, // finalizers + kRPWeakGRT, // weak global reference + kRPReleaseQueue, // release queue for RC + kRPTypeNum +}; +constexpr uint32_t kRPTypeNumCommon = 4; // previous 4 type is shared by GC and RC +constexpr uint32_t kRPAllFlags = ((1U << kRPTypeNum) - 1); + +static inline uint32_t RPMask(uint32_t type) { + __MRT_ASSERT(type >= kRPWeakRef && type <= kRPReleaseQueue, "invalid type"); + return 1U << type; +} + +using LockType = std::mutex; +using LockGuard = std::lock_guard; + +template +using ManagedForwardList = std::forward_list>; + +template +using ManagedDeque = std::deque>; + +template +using ManagedList = std::list>; + +// raw load access, no RC book-keeping +static inline address_t ReferenceGetReferent(address_t reference) { + return LoadRefField(reference, WellKnown::kReferenceReferentOffset); +} + +static inline address_t ReferenceGetQueue(address_t reference) { + return LoadRefField(reference, WellKnown::kReferenceQueueOffset); +} + +static inline address_t ReferenceGetPendingNext(address_t reference) { + return LoadRefField(reference, WellKnown::kReferencePendingnextOffset); +} + +static inline void ReferenceSetPendingNext(address_t reference, address_t next) { + StoreRefField(reference, WellKnown::kReferencePendingnextOffset, next); +} + +// raw store 0 access, no RC book-keeping +static inline void ReferenceClearReferent(address_t reference) { + StoreRefField(reference, WellKnown::kReferenceReferentOffset, 0); +} + +// there are some policy to recycle SoftReference +// 1. NeverClearPolicy is used for GCtype: Force. +// 2. ClearAllPolicy is used for GCtype: OOM. +// 3. other GCtype and RC will use the CurrentHeapPolicy. +// CurrentHeapPolicy is the avaliable memory of current processor +class SoftReferencePolicy { + public: + virtual bool ShouldClearSoftRef() = 0; + virtual ~SoftReferencePolicy() = default; + virtual void Init() {} +}; + +class NeverClearPolicy : public SoftReferencePolicy { + public: + bool ShouldClearSoftRef() override { + return false; + } +}; + +class ClearAllPolicy : public SoftReferencePolicy { + public: + bool ShouldClearSoftRef() override { + return true; + } +}; + +class CurrentHeapPolicy : public SoftReferencePolicy { + public: + CurrentHeapPolicy() : counter(1), maxInterval(0) {} + ~CurrentHeapPolicy() = default; + void Init() override; + uint32_t MaxInterval() const { + return maxInterval; + } + bool ShouldClearSoftRef() override { + if (maxInterval == 0) { + return true; + } + uint32_t cnt = counter.fetch_add(1, std::memory_order_relaxed); + return (cnt % maxInterval) == 0; + } + private: + std::atomic counter; + uint32_t maxInterval; +}; + +class ReferenceProcessor { + public: + ReferenceProcessor(); + virtual ~ReferenceProcessor() = default; + + // GC Roots or util iterators + virtual void VisitFinalizers(RefVisitor &visitor); // finalizable objs visotr, not template for rvalue + virtual void VisitGCRoots(RefVisitor &visitor); + bool ShouldClearReferent(GCReason reason); + void InitSoftRefPolicy(); + + // utils + static inline ReferenceProcessor &Instance() { + return *instance; + } + bool IsCurrentRPThread() const; + void SetIterationWaitTimeMs(uint32_t waitTime) { + iterationWaitTime = waitTime; + } + static MethodMeta *EnqueueMethod() { + return enqueueMethod; + } + + // notify & wait for RP processing, invoked after GC/Naive GC heurstic, start RP iteration + void Notify(bool processAll); + void NotifyBackgroundGC(bool force); + + // notify & wait for RP start + void WaitStarted(); + + static void Create(CollectorType type); + static void SwitchToGCOnFork(); + virtual void Run(); // now virtual, tobe devirtualized + virtual void Init(); + virtual void Fini() {}; + virtual void Stop(bool finalize); + virtual void WaitStop(); + virtual void PreSwitchCollector(); + + void RunFinalization(); + void AddFinalizable(address_t obj, bool needLock); + void AddFinalizables(address_t objs[], uint32_t count, bool needLock); + void LogFinalizeInfo(); + + static void Enqueue(address_t reference); + + void CountProcessed() { // tobe in private + ++numProcessedRefs[curProcessingRef]; + } + void SetProcessingType(uint32_t type) { // tobe private + curProcessingRef = type; + } + + static uint32_t GetRPTypeByClassFlag(uint32_t classFlag) { + uint32_t type = kRPWeakRef; + if (classFlag & (modifier::kClassCleaner | modifier::kClassPhantomReference)) { + type = kRPPhantomRef; + } else if (classFlag & modifier::kClassWeakReference) { + type = kRPWeakRef; + } else if (classFlag & modifier::kClassSoftReference) { + type = kRPSoftRef; + } else { + __MRT_ASSERT(false, "not expected reference"); + } + return type; + } + protected: + virtual bool ShouldStartIteration() = 0; + void NotifyStrated(); + void Wait(uint32_t timeoutMilliSeconds); + void ProcessFinalizables(); + void ProcessFinalizablesList(ManagedList &list); + // run process + virtual void PreIteration(); // pre reference processing iteration + virtual void PostIteration() {} + virtual void PreExitDoFinalize(); + virtual void DoChores(); + + std::mutex wakeLock; + std::condition_variable wakeCondition; // notify RP processing continue + + std::mutex startedLock; // notify RP thread is started + std::condition_variable startedCondition; + volatile bool RPStarted; + + volatile pthread_t threadHandle; // thread handle to RP thread + volatile bool RPRunning; // Initially false and set true after RP thread start, set false when stop + volatile bool doFinalizeOnStop; // Should perfrom run finalization when stop RP thread + + uint32_t iterationWaitTime; + std::atomic processAllRefs; + + // finalization + std::mutex finalizersLock; // lock list swap/add + std::mutex finalizerProcessingLock; // finalizer processing lock, for runFinalization sync + std::mutex runFinalizationLock; // runFinalization lock, for multiple run Finalization sync + ManagedList finalizables; // candiate objects to perform finalize method + ManagedList workingFinalizables; // finlaize working queue + ManagedList runFinalizations; // finalizerables processed by runFinalization + // stats + virtual void LogRefProcessorBegin(); + virtual void LogRefProcessorEnd(); + uint32_t curProcessingRef; + uint32_t numProcessedRefs[kRPTypeNum]; // reference processed from begining used in RPLog + uint32_t numLastProcessedRefs[kRPTypeNum]; + // background gc + std::atomic_bool hasBackgroundGC; // back ground gc, tobe moved to task based GC thread + uint64_t timeRefProcessorBegin; + uint64_t timeRefProcessUsed; + uint64_t timeCurrentRefProcessBegin; + private: + void RunBackgroundGC(); + bool forceBackgroundGC; // force background GC can not be canceled, used in start up + uint64_t catchBGGcJobTime; + // finalize + virtual void PostProcessFinalizables() {} // inovke after finish process finalizables in RP thread + virtual void PostFinalizable(address_t) {} // inovke after process signle finalizables + virtual void PreAddFinalizables(address_t obj[], uint32_t count, bool needLock) = 0; + virtual void PostAddFinalizables(address_t obj[], uint32_t count, bool needLock) = 0; + virtual bool SpecializedAddFinalizable(address_t) { + return false; + } + // reference + virtual void EnqeueReferences() = 0; + // fields + static ReferenceProcessor *instance; + static MethodMeta *enqueueMethod; + // Policy used in gc + NeverClearPolicy forceGcSoftPolicy; + ClearAllPolicy oomGcSoftPolicy; + CurrentHeapPolicy heapGcSoftPolicy; +}; +} +#endif // MAPLE_COMPILER_RT_REFERENCE_PROCESSOR_H diff --git a/src/mrt/compiler-rt/include/collector/satb_buffer.h b/src/mrt/compiler-rt/include/collector/satb_buffer.h new file mode 100644 index 0000000000..7ecd156496 --- /dev/null +++ b/src/mrt/compiler-rt/include/collector/satb_buffer.h @@ -0,0 +1,205 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_SATB_BUFFER_H +#define MAPLE_RUNTIME_SATB_BUFFER_H + +#include "allocator/page_pool.h" +#include "collector/mrt_bitmap.h" + +namespace maplert { +// snapshot at the beginning buffer +// mainly used to buffer modified field of mutator write +class SatbBuffer { + static constexpr size_t kInitialPages = 64; // 64 pages of initial satb buffer + static constexpr size_t kNodeSize = 512 - 8; // size of SatbBuffer node, which can store 63 entries + public: + static SatbBuffer &Instance(); + + class Node { + friend class SatbBuffer; + public: + Node() { + top = reinterpret_cast(this + 1); + next = nullptr; + } + ~Node() = default; + bool IsEmpty() const { + return reinterpret_cast(top) == (reinterpret_cast(this) + sizeof(Node)); + } + bool IsFull() const { + static_assert((sizeof(Node) % sizeof(address_t*)) == 0, "Satb node must be aligned"); + return reinterpret_cast(top) == (reinterpret_cast(this) + kNodeSize); + } + void Push(address_t obj) { + *top = obj; + top++; + } + template + void GetObjects(T &stack) const { + address_t *start = reinterpret_cast(const_cast(this + 1)); + __MRT_ASSERT(top <= reinterpret_cast(reinterpret_cast(this) + kNodeSize), "invalid node"); + (void)std::copy(start, top, std::back_inserter(stack)); + } + private: + address_t *top; + Node *next; + }; + + struct Page { + Page *next; + }; + + // there is no need to use LL/SC to avoid ABA problem, becasue Nodes are all unique. + template + class LockFreeList { + friend class SatbBuffer; + public: + LockFreeList() : head(nullptr) {} + ~LockFreeList() = default; + + void Reset() { + head = nullptr; + } + + void Push(T *n) { + T *old = head.load(std::memory_order_relaxed); + do { + n->next = old; + } while (!head.compare_exchange_weak(old, n, std::memory_order_release, std::memory_order_relaxed)); + } + + T *Pop() { + T *old = head.load(std::memory_order_relaxed); + do { + if (old == nullptr) { + return nullptr; + } + } while (!head.compare_exchange_weak(old, old->next, std::memory_order_release, std::memory_order_relaxed)); + old->next = nullptr; + return old; + } + + T *PopAll() { + T *old = head.load(std::memory_order_relaxed); + while (!head.compare_exchange_weak(old, nullptr, std::memory_order_release, std::memory_order_relaxed)) {}; + return old; + } + private: + std::atomic head; + }; + + void EnsureGoodNode(Node *&node) { + if (node == nullptr) { + node = freeNodes.Pop(); + } else if (node->IsFull()) { + // means current node is full + retiredNodes.Push(node); + node = freeNodes.Pop(); + } else { + // not null & have slots + return; + } + if (node == nullptr) { + // there is no free nodes in the freeNodes list + Page *page = GetPages(maple::kPageSize); + Node *list = ConstructFreeNodeList(page, maple::kPageSize); + node = list; + Node *cur = list->next; + node->next = nullptr; + while (cur != nullptr) { + Node *next = cur->next; + freeNodes.Push(cur); + cur = next; + } + } + } + bool ShouldEnqueue(address_t obj) { + if (markMap->IsObjectMarked(obj)) { + return false; + } + return !enqueueMap.MarkObject(obj); + } + // must not have thread racing + void Init(MrtBitmap *map) { + size_t initalBytes = kInitialPages * maple::kPageSize; + Page *page = GetPages(initalBytes); + Node *list = ConstructFreeNodeList(page, initalBytes); + freeNodes.head = list; + retiredNodes.head = nullptr; + enqueueMap.Initialize(); + enqueueMap.ResetCurEnd(); + markMap = map; + } + void RetireNode(Node *node) { + retiredNodes.Push(node); + } + // must not have thread racing + void Reset() { + Page *list = arena.head; + while (list->next != nullptr) { + Page *next = list->next; + PagePool::Instance().ReturnPage(reinterpret_cast(list), maple::kPageSize); + list = next; + } + PagePool::Instance().ReturnPage(reinterpret_cast(list), kInitialPages * maple::kPageSize); + arena.head = nullptr; + freeNodes.head = nullptr; + retiredNodes.head = nullptr; + enqueueMap.ResetBitmap(); + markMap = nullptr; + } + template + void GetRetiredObjects(T &stack) { + Node *head = retiredNodes.PopAll(); + while (head != nullptr) { + head->GetObjects(stack); + head = head->next; + } + } + private: + Page *GetPages(size_t bytes) { + Page *page = reinterpret_cast(PagePool::Instance().GetPage(bytes)); + page->next = nullptr; + arena.Push(page); + return page; + } + Node *ConstructFreeNodeList(const Page *page, size_t bytes) const { + address_t start = reinterpret_cast(page) + sizeof(Page); + address_t end = reinterpret_cast(page) + bytes; + Node *cur = nullptr; + Node *head = nullptr; + while (start <= (end - kNodeSize)) { + Node *node = new (reinterpret_cast(start)) Node(); + if (cur == nullptr) { + cur = node; + head = node; + } else { + cur->next = node; + cur = node; + } + start += kNodeSize; + } + return head; + } + + LockFreeList arena; // arena of allocatable area, first area is 64 * 4k = 256k, the rest is 4k + LockFreeList freeNodes; // free nodes, mutator will acquire nodes from this list to record old value writes + LockFreeList retiredNodes; // has been filled by mutator, ready for scan + MrtBitmap enqueueMap; // enqueue bitmap to indicate whether the obj has been enqueued to SatbBuffer + MrtBitmap *markMap; // mark bitmap to filter marked objects from being enqueued to SatbBuffer +}; +} // namespace maplert + +#endif // MAPLE_RUNTIME_SATB_BUFFER_H diff --git a/src/mrt/compiler-rt/include/collector/stats.h b/src/mrt/compiler-rt/include/collector/stats.h new file mode 100644 index 0000000000..8e4b03df2a --- /dev/null +++ b/src/mrt/compiler-rt/include/collector/stats.h @@ -0,0 +1,138 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_STATS_H +#define MAPLE_RUNTIME_STATS_H + +#include +#include +#include +#include +#include + +#include "mm_config.h" +#include "gc_reason.h" +#include "panic.h" +#include "deps.h" + +namespace maplert { +namespace stats { +// Time unit is nanoseconds. +struct SingleGCRecord { + GCReason reason; + bool isConcurrentMark; + bool async; + uint64_t stw1Time; // First stop-the-world time, or the whole STW if marking is not concurrent. + uint64_t stw2Time; // Second stop-the-world time, or 0 if marking is not concurrent. + uint64_t totalGcTime; // Including both marking and sweeping + size_t objectsCollected; + size_t bytesCollected; + size_t bytesSurvived; + size_t newHeapSize; + + inline uint64_t MaxSTWTime() const { + if (isConcurrentMark) { + return std::max(stw1Time, stw2Time); + } else { + return stw1Time; + } + } + + inline uint64_t TotalSTWTime() const { + if (isConcurrentMark) { + return stw1Time + stw2Time; + } else { + return stw1Time; + } + } +}; + +// Public-visible statistics generated by triggered GCs. +// Time unit is nanoseconds. +class GCStats { + public: + // GC record handling. + // Call this at the beginning of GC to initialize the curRec field. + void BeginGCRecord(); + void CommitGCRecord(); + SingleGCRecord &CurrentGCRecord() { + __MRT_ASSERT(curRec != nullptr, "curRec is nullptr. Call BeginGCRecord() first!"); + return *curRec; + } + + // Writers. Collect statistics from different place of the runtime. + void OnCollectorInit(); + void OnAllocAnomaly(); + void OnFreeObject(size_t size) const; + void OnRCAnomaly(); + + // Readers. + uint64_t MaxSTWNanos() const; + size_t NumGCTriggered() const; + size_t AverageMemoryLeak() const; + // returns the bytes claimed by the tracing GC + size_t TotalMemoryLeak() const; + double MemoryUtilization() const; + size_t NumAllocAnomalies() const; + size_t NumRCAnomalies() const; + void ResetMaxSTWNanos(); + void ResetNumGCTriggered(); + void ResetMemoryLeak(); + void ResetMemoryUtility(); + void ResetNumAllocAnomalies(); + void ResetNumRCAnomalies(); + + size_t CurAllocBytes() const; + size_t CurAllocatorCapacity() const; + size_t CurSpaceCapacity() const; + size_t CurGCThreshold() const; + void InitialGCThreshold(const bool isSystem); + void InitialGCProcessName(); + + GCStats(); + ~GCStats() = default; + + private: + std::atomic numGcTriggered; + // Total bytes collected since last reset + // this field is incremented inside OnGCFinished; so we do not need to increment + // on every object release. + size_t totalBytesCollected; + // The number of gc times since last reset + size_t recentGcCount; + // The max bytes collected by gc since last reset + size_t maxBytesCollected; + std::atomic maxStopTheWorldTime; + // The number of allocation requests that failed due to OOM. Reset on every getMemAlloc + std::atomic numAllocAnomalies; + std::atomic numRcAnomalies; + std::atomic currentGcThreshold; + + // Current GC record. Need to be written by many different classes, so we put it here. + std::unique_ptr curRec; + float waterLevelLow; + float waterLevel; + std::string processName; + void UpdateStatistics(const std::unique_ptr &rec); + void Dump(const std::unique_ptr &rec); + // Add the GC record. + // GC thread should use Begin/Current/CommitGCRecord instead. + void OnGCFinished(const std::unique_ptr &rec); +}; + +extern ImmortalWrapper gcStats; +} // namespace stats +} // namespace maplert + +#endif diff --git a/src/mrt/compiler-rt/include/collector/task_queue.h b/src/mrt/compiler-rt/include/collector/task_queue.h new file mode 100644 index 0000000000..ce5648aa85 --- /dev/null +++ b/src/mrt/compiler-rt/include/collector/task_queue.h @@ -0,0 +1,225 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLE_RUNTIME_TASK_QUEUE_H +#define MAPLE_RUNTIME_TASK_QUEUE_H + +#include "allocator/page_allocator.h" + +// task queue implementation +namespace maplert { +class ScheduleTaskBase { + public: + enum ScheduleTaskType : int { + kInvalidScheduleType = -1, + kScheduleTaskTypeInvokeGC = 0, // invoke gc task + kScheduleTaskTypeTimeout = 1, // timeout task + kScheduleTaskTypeTerminate = 2, // terminate task + }; + static constexpr uint64_t kIndexExit = 0; + static constexpr uint64_t kSyncIndexStartValue = 1; + static constexpr uint64_t kAsyncIndex = ULONG_MAX; + + ScheduleTaskBase(const ScheduleTaskBase &task) = default; + ScheduleTaskBase(ScheduleTaskType type) : taskType(type), syncIndex(kAsyncIndex) {} + virtual ~ScheduleTaskBase() = default; + ScheduleTaskBase &operator=(const ScheduleTaskBase&) = default; + + ScheduleTaskType GetType() const { + return taskType; + } + + void SetType(ScheduleTaskType type) { + taskType = type; + } + + uint64_t GetSyncIndex() const { + return syncIndex; + } + + void SetSyncIndex(uint64_t index) { + syncIndex = index; + } + + virtual bool NeedFilter() const { + return false; + } + + virtual bool Execute(void *owner) = 0; + + virtual std::string ToString() const { + std::stringstream ss; + ss << ScheduleTaskTypeToString(taskType) << " index=" << syncIndex; + return ss.str(); + } + + protected: + ScheduleTaskType taskType; + uint64_t syncIndex; + + private: + const char *ScheduleTaskTypeToString(ScheduleTaskType type) const { + switch (type) { + case ScheduleTaskType::kScheduleTaskTypeInvokeGC: + return "InvokeGC"; + case ScheduleTaskType::kScheduleTaskTypeTimeout: + return "Timeout"; + case ScheduleTaskType::kScheduleTaskTypeTerminate: + return "Terminate"; + default: + return "Wrong Type"; + } + } +}; + +template +class LocklessTaskQueue { + public: + // this queue manages a list of deduplicated tasks of *a fixed number of* kinds + // each task has a priority directly associated with the kind + // smaller kind (that is, smaller integer) has a higher priority + // high priority task's enqueue might erase all lower-priority tasks + // the intuition is that, (when managing concurrent gc tasks, which are merely + // to lower memory level from the background), maybe we only need the strongest + // one in the queue + // this queue is lockless + void Push(const T &task) { + bool overriding = task.IsOverriding(); + uint32_t taskMask = (1U << task.GetPrio()); + uint32_t oldWord = tqWord.load(std::memory_order_relaxed); + uint32_t newWord = 0; + do { + if (overriding) { + newWord = taskMask | ((taskMask - 1) & oldWord); + } else { + newWord = taskMask | oldWord; + } + } while (!tqWord.compare_exchange_weak(oldWord, newWord, std::memory_order_relaxed)); + } + T Pop() { + uint32_t oldWord = tqWord.load(std::memory_order_relaxed); + uint32_t newWord = 0; + uint32_t dequeued = oldWord; + do { + newWord = oldWord & (oldWord - 1); + dequeued = oldWord; + } while (!tqWord.compare_exchange_weak(oldWord, newWord, std::memory_order_relaxed)); + if (oldWord == 0) { + return T::DoNothing(); + } + // count the number of trailing zeros + return T::FromPrio(__builtin_ctz(dequeued)); + } + void Clear() { + tqWord.store(0, std::memory_order_relaxed); + } + private: + std::atomic tqWord = {}; +}; + +template +class TaskQueue { + static_assert(std::is_base_of::value, "T is not a subclass of maplert::ScheduleTaskBase"); + + public: + using TaskFilter = std::function; + using TaskQueueType = std::list>; + + void Init() { + queueSyncIndex = ScheduleTaskBase::kSyncIndexStartValue; + } + + void DeInit() { + std::lock_guard lock(taskQueueLock); + taskQueue.Clear(); + syncTaskQueue.clear(); + LOG(INFO) << "[GC] DeInit task Q done" << maple::endl; + } + + template + uint64_t Enqueue(T &task, TaskFilter &filter) { + if (!sync) { + EnqueueAsync(task); + return ScheduleTaskBase::kAsyncIndex; + } + std::unique_lock lock(taskQueueLock); + TaskQueueType &queue = syncTaskQueue; + + if (!queue.empty() && task.NeedFilter()) { + for (auto iter = queue.rbegin(); iter != queue.rend(); ++iter) { + if (filter(*iter, task)) { + return (*iter).GetSyncIndex(); + } + } + } + task.SetSyncIndex(++queueSyncIndex); + queue.push_back(task); + taskQueueCondVar.notify_all(); + return task.GetSyncIndex(); + } + + void EnqueueAsync(const T &task) { + taskQueue.Push(task); + taskQueueCondVar.notify_all(); + } + + T Dequeue() { + std::cv_status cvResult = std::cv_status::no_timeout; + std::chrono::nanoseconds waitTime(kDefaultTheadTimeoutNs); + while (true) { + std::unique_lock qLock(taskQueueLock); + // check sync queue firstly + if (!syncTaskQueue.empty()) { + T curTask(syncTaskQueue.front()); + syncTaskQueue.pop_front(); + return curTask; + } + + if (cvResult == std::cv_status::timeout) { + taskQueue.Push(T(ScheduleTaskBase::ScheduleTaskType::kScheduleTaskTypeTimeout)); + } + + T task = taskQueue.Pop(); + if (!task.IsNothing()) { + return task; + } + cvResult = taskQueueCondVar.wait_for(qLock, waitTime); + } + } + + void LoopDrainTaskQueue(void *owner) { + while (true) { + T task = Dequeue(); + if (!task.Execute(owner)) { + DeInit(); + break; + } + } + } + + private: +#if LOG_ALLOC_TIMESTAT + static constexpr uint64_t kDefaultTheadTimeoutNs = 300000L * 1000 * 1000; // default 300s +#else + static constexpr uint64_t kDefaultTheadTimeoutNs = 1000L * 1000 * 1000; // default 1s +#endif + std::mutex taskQueueLock; + uint64_t queueSyncIndex; + TaskQueueType syncTaskQueue; + LocklessTaskQueue taskQueue; + std::condition_variable taskQueueCondVar; +}; +} +#endif diff --git a/src/mrt/compiler-rt/include/collie.h b/src/mrt/compiler-rt/include/collie.h new file mode 100644 index 0000000000..00e8c03799 --- /dev/null +++ b/src/mrt/compiler-rt/include/collie.h @@ -0,0 +1,240 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef COLLIE_H +#define COLLIE_H + +#ifdef __ANDROID__ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mrt_api_common.h" +#include "panic.h" +#include "mutator_list.h" + +namespace maplert { +constexpr int kMplWaitCheckInterval = 5; +const int kMaxDelayCount = 3; +constexpr unsigned int kTimerRingCheckInterval = 1; +const int kMplCollieCallbackHistoryMax = 5; +const int kMplCollieCallbackTimewinMax = 60; +const int kInvalidId = -1; + +constexpr int kMplWaitHeavyGcTimeout = 90; // other threads wait for gc finish +constexpr int kMplWaitGcTimeout = 60; // normal gc timeout +constexpr int kMplFinalizerTimeout = 30; +constexpr int kThreadDumpTimeout = 40; +constexpr int kMplCollieMaxRecordVal = 10; + +#define MPLCOLLIE_FLAG_ABORT (1 << 0) +#define MPLCOLLIE_FLAG_PROMOTE_PRIORITY (1 << 7) +#define MPLCOLLIE_FLAG_IN_STW (1 << 8) +#define MPLCOLLIE_FLAG_FOR_STW (1 << 9) +#define MPLCOLLIE_FLAG_IS_STW(flag) (((flag) & MPLCOLLIE_FLAG_IN_STW) || ((flag) & MPLCOLLIE_FLAG_FOR_STW)) + +enum CollieType { + kGCCollie = 0, // wait for GC finish + kProcessFinalizeCollie, // reference processor trigger + kSTWCollie, // wait for stop the world finish + kThreadDumpCollie, + kCollieTypeMax +}; + +struct CollieNode { + CollieType type; + bool isUsed; + std::string name; + time_t startTime; + int timeout; + int promoteTimes; + pid_t tid; + void *arg; + uint32_t flag; + void (*callback)(void *args); + CollieNode *next = nullptr; + void Reset() { + next = nullptr; + } +}; + +class CollieList { + public: + CollieList() : head(nullptr) {} + + void Init() { + head = nullptr; + } + + void Insert(CollieNode &target) { + if (head == nullptr) { + head = ⌖ + target.Reset(); + return; + } + CollieNode *prev = nullptr; + CollieNode *cur = head; + while (cur != nullptr) { + if (target.timeout >= cur->timeout) { + prev = cur; + cur = cur->next; + continue; + } + if (prev == nullptr) { + head = ⌖ + head->next = cur; + } else { + prev->next = ⌖ + target.next = cur; + } + return; + } + prev->next = ⌖ + target.Reset(); + } + + void Remove(CollieNode &target) { + __MRT_ASSERT(head != nullptr, "remove a node from a empty list"); + CollieNode *prev = nullptr; + CollieNode *cur = head; + while (cur != nullptr) { + if (cur != &target) { + prev = cur; + cur = cur->next; + continue; + } + if (prev == nullptr) { + head = cur->next; + target.Reset(); + } else { + prev->next = cur->next; + target.Reset(); + } + return; + } + __MRT_ASSERT(cur != nullptr, "collie list can not find a target node"); + } + + CollieNode *GetHead() const { + return head; + } + + bool IsEmpty() { + return head == nullptr; + } + private: + CollieNode *head; +}; + +struct CollieTimerRing { + CollieList cl; + int timer; +}; + +class MplCollie { + public: + void Init(); + void Fini(); + + MRT_EXPORT int Start(CollieType type, int flag, pid_t tid = maple::GetTid(), + void (*func)(void*) = nullptr, void *arg = nullptr); + MRT_EXPORT void End(int type); + + void StartSTW() { + Start(kSTWCollie, MPLCOLLIE_FLAG_ABORT | MPLCOLLIE_FLAG_FOR_STW); + } + + void EndSTW() { + End(kSTWCollie); + } + + void JoinThread(); + MRT_EXPORT void SetSTWPanic(bool enable); + MRT_EXPORT bool GetSTWPanic(void); + + MRT_EXPORT void FatalPanic(std::string &s, int tid); + void FatalPanicStopTheWorld(std::string &msg); + + private: + int CallbackShouldLimit(int flag); + void CheckTimerRing(CollieNode callbackList[], CollieTimerRing &r, int &count); + static void *CollieThreadEntry(void *arg); + void *CollieThreadHandle(); + void CollieTrySleep(void); + void CollieTryWake(void); + void FatalPanicLocked(std::string &msg); + void InitImp(); + void ResetNode(CollieNode &node, int seq); + void RunCallback(CollieNode &cb); + void TimerRingTimeout(); + void ForceEnd(); + pthread_t collieThread; + + int targetTid = kInvalidId; + + int stwID = kInvalidId; + bool runnable = false; // false means the collie thread should stop. + std::mutex initMutex; + std::mutex listMutex; + std::mutex panicMutex; + // used for timing. + std::mutex timerMutex; + std::condition_variable timerCond; + // call back count + unsigned int nrCallback = 0; + + // call back time stamp + time_t timeCallback = 0; + + // to store the time + CollieTimerRing collieTimerRingSTW; + CollieTimerRing collieTimerRingNonSTW; + + // save all the monitor info + CollieNode collieNodesArray[kCollieTypeMax]; + + struct TimerRingSleepControl { + bool threadInSleep; + std::atomic delayCount; + std::unique_ptr sleepMutexPtr; + std::unique_ptr sleepCond; + } trCtl; +}; +#ifdef __ANDROID__ +extern MRT_EXPORT MplCollie mplCollie; +#endif + +class MplCollieScope { + public: + MplCollieScope(CollieType type, int flag, pid_t tid = maple::GetTid(), void (*func)(void*) = nullptr, + void *arg = nullptr) { + mplCollieType = mplCollie.Start(type, flag, tid, func, arg); + } + ~MplCollieScope() { + mplCollie.End(mplCollieType); + } + private: + int mplCollieType = kInvalidId; +}; +} +#endif // __ANDROID__ +#endif // COLLIE_H diff --git a/src/mrt/compiler-rt/include/cpphelper.h b/src/mrt/compiler-rt/include/cpphelper.h new file mode 100644 index 0000000000..136bb943bb --- /dev/null +++ b/src/mrt/compiler-rt/include/cpphelper.h @@ -0,0 +1,232 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_CPP_HELPER_H +#define MAPLE_RUNTIME_CPP_HELPER_H + +#include +#include "thread_api.h" +#include "mm_utils.h" +#include "cinterface.h" +#include "saferegion.h" + +// helper utilities for runtime C++ code. +namespace maplert { +// Scoped guard for saferegion. +class ScopedEnterSaferegion { + public: + __attribute__ ((always_inline)) + ScopedEnterSaferegion() { + Mutator *mutator = CurrentMutatorPtr(); + stateChanged = (mutator != nullptr) ? mutator->EnterSaferegion(true) : false; + } + + __attribute__ ((always_inline)) + ~ScopedEnterSaferegion() { + if (LIKELY(stateChanged)) { + Mutator *mutator = CurrentMutatorPtr(); // state changed, mutator must be not null + (void)mutator->LeaveSaferegion(); + } + } + private: + bool stateChanged; +}; + +// Scoped guard for access java objects from native code. +class ScopedObjectAccess { + public: + __attribute__ ((always_inline)) + ScopedObjectAccess() + : mutator(CurrentMutator()), + stateChanged(mutator.LeaveSaferegion()) {} + + __attribute__ ((always_inline)) + explicit ScopedObjectAccess(JNIEnv *env __attribute__((unused))) : ScopedObjectAccess() {} + + __attribute__ ((always_inline)) + explicit ScopedObjectAccess(maple::IThread &self __attribute__((unused))) : ScopedObjectAccess() {} + + __attribute__ ((always_inline)) + ~ScopedObjectAccess() { + if (LIKELY(stateChanged)) { + (void)mutator.EnterSaferegion(false); + } + } + private: + Mutator &mutator; + bool stateChanged; +}; + +// Scoped enable GC for current thread. +class ScopedEnableGC { + public: + ScopedEnableGC() { + MRT_GCInitThreadLocal(false); + } + + __attribute__ ((always_inline)) + ~ScopedEnableGC() { + MRT_GCFiniThreadLocal(); + } +}; + +extern "C" void MRT_SetCurrentCompiledMethod(void *func); +extern "C" void *MRT_GetCurrentCompiledMethod(); + +// return value is in d0 if return type is float/double. +// Since R2CForwardStubXX is assembly code, we have to wrap them with partially +// specialized class template. Note C++ does not support partially specializing +// template method/function. Thus we place these template functions as static methods +// in some template class. +#if defined(__arm__) +extern "C" jlong R2CForwardStubLong(...); +extern "C" void R2CForwardStubVoid(...); + +// note arm compiler always passes return value via r0/r1 for functions with variadic argument list. +// refer to r2c_stub_arm.S to see how this is handled. +extern "C" jfloat R2CForwardStubFloat(...); +extern "C" jdouble R2CForwardStubDouble(...); +#else +extern "C" jlong R2CForwardStubLong(void *func, ...); +extern "C" void R2CForwardStubVoid(void *func, ...); +extern "C" jfloat R2CForwardStubFloat(void *func, ...); +extern "C" jdouble R2CForwardStubDouble(void *func, ...); +#endif + +extern "C" jlong R2CBoxedStubLong(void *func, jvalue *argJvalue, size_t stackSize, size_t dregSize); +extern "C" void R2CBoxedStubVoid(void *func, jvalue *argJvalue, size_t stackSize, size_t dregSize); +extern "C" jfloat R2CBoxedStubFloat(void *func, jvalue *argJvalue, size_t stackSize, size_t dregSize); +extern "C" jdouble R2CBoxedStubDouble(void *func, jvalue *argJvalue, size_t stackSize, size_t dregSize); + +// generic stubs for calling java code from maple runtime. +// setup a stub frame to call java method represented by func +template +class RuntimeStub { + public: + template + static Ret FastCallCompiledMethod(Func &&func, Args&&... args) { +#if defined(__arm__) + MRT_SetCurrentCompiledMethod(reinterpret_cast(func)); + Ret result = reinterpret_cast(R2CForwardStubLong(std::forward(args)...)); +#else + Ret result = (Ret) R2CForwardStubLong(reinterpret_cast(func), std::forward(args)...); +#endif + return result; + } + + template + static jvalue FastCallCompiledMethodJ(Func &&func, Args&&... args) { + jvalue result; +#if defined(__arm__) + MRT_SetCurrentCompiledMethod(reinterpret_cast(func)); + result.j = R2CForwardStubLong(std::forward(args)...); +#else + result.j = R2CForwardStubLong(reinterpret_cast(func), std::forward(args)...); +#endif + return result; + } + + template + static Ret SlowCallCompiledMethod(Func func, jvalue *argJvalue, size_t stackSize, size_t dregSize) { + Ret result = (Ret)R2CBoxedStubLong(reinterpret_cast(func), argJvalue, stackSize, dregSize); + return result; + } + + template + static jvalue SlowCallCompiledMethodJ(Func func, jvalue *argJvalue, size_t stackSize, size_t dregSize) { + jvalue result; + result.j = R2CBoxedStubLong(reinterpret_cast(func), argJvalue, stackSize, dregSize); + return result; + } +}; + +// specialized RuntimeStub for returning void +template<> +class RuntimeStub { + public: + template + static void FastCallCompiledMethod(Func &&func, Args&&... args) { +#if defined(__arm__) + MRT_SetCurrentCompiledMethod(reinterpret_cast(func)); + R2CForwardStubVoid(std::forward(args)...); +#else + R2CForwardStubVoid(reinterpret_cast(func), std::forward(args)...); +#endif + return; + } + + template + static void SlowCallCompiledMethod(Func func, jvalue *argJvalue, size_t stackSize, size_t dregSize) { + R2CBoxedStubVoid(reinterpret_cast(func), argJvalue, stackSize, dregSize); + return; + } +}; + +template<> +class RuntimeStub { + public: + template + static jfloat FastCallCompiledMethod(Func &&func, Args&&... args) { +#if defined(__arm__) + MRT_SetCurrentCompiledMethod(reinterpret_cast(func)); + jfloat result = R2CForwardStubFloat(std::forward(args)...); +#else + jfloat result = R2CForwardStubFloat(reinterpret_cast(func), std::forward(args)...); +#endif + return result; + } + + template + static jfloat SlowCallCompiledMethod(Func func, jvalue *argJvalue, size_t stackSize, size_t dregSize) { + jfloat result = R2CBoxedStubFloat(reinterpret_cast(func), argJvalue, stackSize, dregSize); + return result; + } +}; + +template<> +class RuntimeStub { + public: + template + static jdouble FastCallCompiledMethod(Func &&func, Args&&... args) { +#if defined(__arm__) + MRT_SetCurrentCompiledMethod(reinterpret_cast(func)); + jdouble result = R2CForwardStubDouble(std::forward(args)...); +#else + jdouble result = R2CForwardStubDouble(reinterpret_cast(func), std::forward(args)...); +#endif + return result; + } + + template + static jdouble SlowCallCompiledMethod(Func func, jvalue *argJvalue, size_t stackSize, size_t dregSize) { + jdouble result = R2CBoxedStubDouble(reinterpret_cast(func), argJvalue, stackSize, dregSize); + return result; + } +}; + +inline jobject MRT_JNI_AddLocalReference(JNIEnv *env, jobject objAddr) { + if (env == nullptr) { + // it must be from static-binding native method, + // which it's in libcore-static-binding-jni/etc/static-binding-list.txt list. + // we needn't do encode, as native stub needn't do decode. + return objAddr; + } + + maple::IThread *self = maple::IThread::Current(); + return reinterpret_cast( + (self == nullptr) ? nullptr : self->JniAddObj2LocalRefTbl(env, (reinterpret_cast(objAddr)))); +} +} // namespace maplert + +#endif // MAPLE_RUNTIME_CPP_HELPER_H diff --git a/src/mrt/compiler-rt/include/deps.h b/src/mrt/compiler-rt/include/deps.h new file mode 100644 index 0000000000..535cff7fe7 --- /dev/null +++ b/src/mrt/compiler-rt/include/deps.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_DEFS_H +#define MAPLE_RUNTIME_DEFS_H + +#include + +namespace maplert { +// utility class to avoid un-ordered static global destruction +template +class ImmortalWrapper { + public: + using pointer = typename std::add_pointer::type; + using lref = typename std::add_lvalue_reference::type; + + template + ImmortalWrapper(Args && ...args) { + new(buffer) T(std::forward(args)...); + } + ImmortalWrapper(const ImmortalWrapper&) = delete; + ImmortalWrapper &operator=(const ImmortalWrapper&) = delete; + ~ImmortalWrapper() = default; + inline pointer operator->() { + return reinterpret_cast(buffer); + } + + inline lref operator*() { + return reinterpret_cast(buffer); + } + + private: + alignas(T) unsigned char buffer[sizeof(T)]; +}; +} // namespace maplert +#endif // MAPLE_RUNTIME_DEFS_H diff --git a/src/mrt/compiler-rt/include/errno_utils.h b/src/mrt/compiler-rt/include/errno_utils.h new file mode 100644 index 0000000000..9d2842fb82 --- /dev/null +++ b/src/mrt/compiler-rt/include/errno_utils.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MPL_RUNTIME_ERRNO_UTILS_H +#define MPL_RUNTIME_ERRNO_UTILS_H + +#include + +namespace maplert { +std::string ErrnoToString(int errNum); +} // namespace maplert +#endif // MPL_RUNTIME_ERRNO_UTILS_H diff --git a/src/mrt/compiler-rt/include/exception/eh_personality.h b/src/mrt/compiler-rt/include/exception/eh_personality.h new file mode 100644 index 0000000000..d12324325b --- /dev/null +++ b/src/mrt/compiler-rt/include/exception/eh_personality.h @@ -0,0 +1,276 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef _MPL_EH_PERSONALITY_H +#define _MPL_EH_PERSONALITY_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define EHLOG(level) LOG(level) << " " + +using MrtClass = void*; + +namespace maplert { +const unsigned int kAbnormalFrameTag = 0x55555555; +const unsigned int kNoExceptionTag = 0xFFFFFFFF; + +typedef enum { + kNoException = 0, + kNoFrame = 1, + kNormal = 2, +} EHTableType; +extern "C" { + +/* This section defines global identifiers and their values that are associated + * with interfaces contained in libgcc_s. These definitions are organized into + * groups that correspond to system headers. This convention is used as a + * convenience for the reader, and does not imply the existence of these headers, + * or their content. + * These definitions are intended to supplement those provided in the referenced + * underlying specifications. + * This specification uses ISO/IEC 9899 C Language as the reference programming + * language, and data definitions are specified in ISO C format. The C language + * is used here as a convenient notation. Using a C language description of these + * data objects does not preclude their use by other programming languages. + * http://refspecs.linuxbase.org/LSB_3.0.0/LSB-PDA/LSB-PDA/libgcc-s-ddefs.html + */ +typedef enum { + _URC_NO_REASON = 0, + _URC_FOREIGN_EXCEPTION_CAUGHT = 1, + _URC_FATAL_PHASE2_ERROR = 2, + _URC_FATAL_PHASE1_ERROR = 3, + _URC_NORMAL_STOP = 4, + _URC_END_OF_STACK = 5, + _URC_HANDLER_FOUND = 6, + _URC_INSTALL_CONTEXT = 7, + _URC_CONTINUE_UNWIND = 8, +} UnwindReasonCode; + +typedef enum { + kSearchPhase = 1, + kCleanupPhase = 2, +} UnwindAction; + +using _Unwind_Word = uintptr_t; +using _Unwind_Ptr = uintptr_t; +using _Unwind_Exception_Class = uintptr_t; +struct _Unwind_Context; +struct _Unwind_Exception; + +typedef void (*_Unwind_Exception_Cleanup_Fn)(UnwindReasonCode, struct _Unwind_Exception*); + +#if defined(__aarch64__) +struct _Unwind_Exception { + _Unwind_Exception_Class exception_class; + _Unwind_Exception_Cleanup_Fn exception_cleanup; + _Unwind_Word private_1; + _Unwind_Word private_2; +} __attribute__((__aligned__)); +#elif defined(__arm__) +struct _Unwind_Control_Block; +#define _Unwind_Exception _Unwind_Control_Block +#endif + +typedef UnwindReasonCode (*UnwindTraceFn)(const _Unwind_Context*, void*); + +void _Unwind_SetGR(struct _Unwind_Context*, int, _Unwind_Word); + +_Unwind_Word _Unwind_GetIP(struct _Unwind_Context*); +void _Unwind_SetIP(struct _Unwind_Context*, _Unwind_Word); +void _Unwind_Resume(struct _Unwind_Exception*); +UnwindReasonCode _Unwind_Backtrace(UnwindTraceFn, void*); + +#if defined(__arm__) +typedef enum { + _UVRSC_CORE = 0, // INTEGER REGISTER + _UVRSC_VFP = 1, // vfp + _UVRSC_WMMXD = 3, // Intel WMMX data register + _UVRSC_WMMXC = 4, // Intel WMMX control register +} _Unwind_VRS_RegClass; + +typedef enum { + _UVRSD_UINT32 = 0, + _UVRSD_VFPX = 1, + _UVRSD_UINT64 = 3, + _UVRSD_FLOAT = 4, + _UVRSD_DOUBLE = 5 +} _Unwind_VRS_DataRepresentation; + +typedef enum { + _UVRSR_OK = 0, + _UVRSR_NOT_IMPLEMENTED = 1, + _UVRSR_FAILED = 2 +} _Unwind_VRS_Result; + +typedef enum { + _US_VIRTUAL_UNWIND_FRAME = 0, + _US_UNWIND_FRAME_STARTING = 1, + _US_UNWIND_FRAME_RESUME = 2, + _US_ACTION_MASK = 3, + _US_FORCE_UNWIND = 8, + _US_END_OF_STACK = 16 +} _Unwind_State; + +_Unwind_VRS_Result _Unwind_VRS_Get(_Unwind_Context *context, _Unwind_VRS_RegClass regclass, + uint32_t regno, _Unwind_VRS_DataRepresentation representation, void *valuep); + +_Unwind_VRS_Result _Unwind_VRS_Set(_Unwind_Context *context, _Unwind_VRS_RegClass regclass, + uint32_t regno, _Unwind_VRS_DataRepresentation representation, void *valuep); + +typedef uintptr_t _Unwind_EHT_Header; +typedef uintptr_t _uw; + +const unsigned int kExceptionClassNameLength = 8; +const unsigned int kBarrierCachePatternSize = 5; +const unsigned int kCleanupCachePatternSize = 4; + +// UCB: +struct _Unwind_Control_Block { + char exception_class[kExceptionClassNameLength]; + void (*exception_cleanup)(UnwindReasonCode, _Unwind_Control_Block*); + // Unwinder cache, private fields for the unwinder's use + struct { + _uw reserved1; // Forced unwind stop fn, 0 if not forced + _uw reserved2; // Personality routine address + _uw reserved3; // Saved callsite address + _uw reserved4; // Forced unwind stop arg + _uw reserved5; + } + unwinder_cache; + // Propagation barrier cache (valid after phase 1): + struct { + _uw sp; + _uw bitpattern[kBarrierCachePatternSize]; + } + barrier_cache; + // Cleanup cache (preserved over cleanup): + struct { + _uw bitpattern[kCleanupCachePatternSize]; + } + cleanup_cache; + // Pr cache (for pr's benefit): + struct { + _uw fnstart; // function start address + _Unwind_EHT_Header *ehtp; // pointer to EHT entry header word + _uw additional; // additional data + _uw reserved1; + } + pr_cache; + long long int :0; // Force alignment to 8-byte boundary +}; +#endif +} + +struct ScanResults { + uintptr_t tTypeIndex; // > 0 catch handler, < 0 exception spec handler, == 0 a cleanup + uintptr_t landingPad; // null -> nothing found, else something found + bool caughtByJava; // set if exception is caught by some java frame + UnwindReasonCode unwindReason; // One of _URC_FATAL_PHASE1_ERROR, + // _URC_FATAL_PHASE2_ERROR, + // _URC_CONTINUE_UNWIND, + // _URC_HANDLER_FOUND + ScanResults() { + tTypeIndex = 0; + landingPad = 0; + caughtByJava = false; + unwindReason = _URC_FATAL_PHASE1_ERROR; + } +}; + +class EHTable { + public: + ScanResults results; + bool CanCatch(const MrtClass catchType, const void *exObj); + uintptr_t ReadSData4(const uint8_t **p) const; + uintptr_t ReadULEB128(const uint8_t **data) const; + uintptr_t ReadTypeData(const uint8_t **data, bool use32Ref = false); + MrtClass GetMplTypeInfo(uintptr_t tTypeIndex, const uint8_t *classInfo); + void ScanExceptionTable(UnwindAction actions, bool nativeException, const _Unwind_Exception &unwindException); + + void SetScanResultsValue(const uintptr_t tTypeIndex, const uintptr_t landingPad, + const UnwindReasonCode unwindReason, const bool caughtByJava); + + void ParseEHTableHeadInfo(const uint8_t *start); + + EHTable(const uint32_t *funcStart, const uint32_t *funcEnd, const uint32_t *pc) { + funcEndPoint = funcEnd; + funcStartPoint = funcStart; + currentPC = pc; + if ((*funcEndPoint) == kNoExceptionTag) { + type = kNoException; + uintptr_t cleanupOffset = *(funcEndPoint + 1); + uintptr_t cleanupLabel = 0; + if (cleanupOffset != 0) { + cleanupLabel = reinterpret_cast(funcStartPoint) + cleanupOffset; + } + // Found a cleanup + SetScanResultsValue(0, cleanupLabel, _URC_HANDLER_FOUND, false); + } else if ((*funcEndPoint) == kAbnormalFrameTag) { + type = kNoFrame; + SetScanResultsValue(0, 0, _URC_HANDLER_FOUND, false); + } else { + type = kNormal; + uintptr_t lsdaOffset = *funcEndPoint; + ehTableStart = reinterpret_cast(reinterpret_cast(funcStartPoint) + lsdaOffset); + curPtr = ehTableStart; + ParseEHTableHeadInfo(curPtr); + } + } + + private: + EHTableType type; + const uint8_t *curPtr = nullptr; + const uint8_t *ehTableStart = nullptr; + const uint32_t *funcStartPoint = nullptr; + const uint32_t *funcEndPoint = nullptr; + const uint32_t *currentPC = nullptr; + const uint8_t *classInfoPoint = nullptr; + const uint8_t *callSiteTableStart = nullptr; + const uint8_t *callSiteTableEnd = nullptr; + const uint8_t *actionTableStart = nullptr; +}; + +#pragma GCC visibility push(hidden) + +#if defined(__aarch64__) +// "MPLJAVA\0" for maple java runtime +static const uint64_t kOurExceptionClass = 0x4d504c4a41564100; +static const uint64_t kGetVendorAndLanguage = 0xFFFFFFFFFFFFFF00; +#elif defined(__arm__) +static const char kOurExceptionClass[kExceptionClassNameLength] = "MPLEC"; +static const uint32_t kGetVendorAndLanguage = 0xFFFFFF00; +#endif + +#pragma GCC visibility pop + +extern "C" { +#if defined(__aarch64__) +__attribute__((__visibility__("default"))) UnwindReasonCode __mpl_personality_v0 (int version, + UnwindAction actions, uintptr_t exceptionClass, const _Unwind_Exception *unwindException, + const _Unwind_Context *context); +#elif defined(__arm__) +__attribute__((__visibility__("default"))) UnwindReasonCode __mpl_personality_v0(_Unwind_State state, + _Unwind_Exception *unwindException, _Unwind_Context *context); +#endif +} // extern "C" +} // namespace maplert +#endif // _MPL_EH_PERSONALITY_H diff --git a/src/mrt/compiler-rt/include/exception/exception_handling.h b/src/mrt/compiler-rt/include/exception/exception_handling.h new file mode 100644 index 0000000000..d067d9de83 --- /dev/null +++ b/src/mrt/compiler-rt/include/exception/exception_handling.h @@ -0,0 +1,119 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef _MPL_STACK_UNWIND_H_ +#define _MPL_STACK_UNWIND_H_ + +#include +#include +#include +#include +#include "stack_unwinder.h" + +namespace maplert { +const int kMethodDescOffset = 1; + +const int kPrevInsnOffset = 1; + +// offset used to tag a java frame, we adjust return address of a java frame +// with this adderss, so that we can quickly tell whether a frame is java or not. +const int kJavaFrameRaTag = 1; + +class EHFrameInfo : public JavaFrameInfo { + public: + void LookupExceptionHandler(const _Unwind_Exception &unwindException); + + EHFrameInfo(const JavaFrame &frame, _Unwind_Exception &unwindException) : JavaFrameInfo(frame) { + mUnwindException = &unwindException; + // resolved is false in java function written with assembly + if (resolved) { + LookupExceptionHandler(unwindException); + } + } + EHFrameInfo(const EHFrameInfo&) = default; + ~EHFrameInfo() { + mCatchCode = nullptr; + mCleanupCode = nullptr; + mGeneralHandler = nullptr; + mUnwindException = nullptr; + } + const void *GetGeneralExceptionHandler() const { + return mGeneralHandler; + } + + const void *GetCatchCode() const { + return mCatchCode; + } + + const void *GetReturnAddress() const { + return javaFrame.GetReturnAddress(); + } + + const void *GetFrameAddress() const { + return javaFrame.GetFrameAddress(); + } + + // this will modify the return address of this frame to the exception handler, thus + // after the epilogue is executed, control flow turns to the exception handler. + inline void ChainToGeneralHandler(const void *handler) { + if (mGeneralHandler != nullptr) { + CallChain *fa = javaFrame.GetFrameAddress(); + fa->returnAddress = reinterpret_cast(handler); + } + } + + // frameinfo is usually from the caller frame + inline void ChainToCallerEHFrame(const EHFrameInfo &frameinfo) { + uintptr_t handler = reinterpret_cast(frameinfo.GetGeneralExceptionHandler()); + ChainToGeneralHandler(reinterpret_cast(handler)); + } + + void Dump(const std::string msg, std::stringstream &ss) override { + JavaFrameInfo::Dump(msg, ss); + if (mCleanupCode != nullptr) { + ss << "\t" << "cleanup code : " << std::hex << reinterpret_cast(mCleanupCode) << std::dec; + } + if (mCatchCode != nullptr) { + ss << "\t" << "catch code : " << std::hex << reinterpret_cast(mCatchCode) << std::dec; + } + } + private: + const void *mCatchCode = nullptr; // exception handler + const void *mCleanupCode = nullptr; // extended epilogue (including clean up code) + const void *mGeneralHandler = nullptr; // exception handler or extended epilogue (including clean up code). + _Unwind_Exception *mUnwindException = nullptr; // this value is propagated from EHStackInfo. +}; + +class EHStackInfo { + public: + void Build(_Unwind_Exception &unwindException, bool isRet = false); + void ChainAllEHFrames(const _Unwind_Exception &unwindException, bool isRet = false, bool isImplicitNPE = false); + EHStackInfo() : priUnwindException(nullptr) {} + explicit EHStackInfo(_Unwind_Exception &unwindException) { + priUnwindException = &unwindException; + Build(unwindException); + } + ~EHStackInfo() { + priUnwindException = nullptr; + } + private: + std::vector priEhStackInfo; + BasicFrame C2RStub; + _Unwind_Exception *priUnwindException; +}; + +void RaiseException(struct _Unwind_Exception &unwindException, bool isRet = false, bool isImplicitNPE = false); +} // end namespace maplert + +#endif // _MPL_STACK_UNWIND_H_ diff --git a/src/mrt/compiler-rt/include/exception/mpl_exception.h b/src/mrt/compiler-rt/include/exception/mpl_exception.h new file mode 100644 index 0000000000..c30465c047 --- /dev/null +++ b/src/mrt/compiler-rt/include/exception/mpl_exception.h @@ -0,0 +1,136 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef _MPL_EXCEPTION_H +#define _MPL_EXCEPTION_H + +#include +#include "cinterface.h" +#include "eh_personality.h" +#include "exception_handling.h" +#include "stack_unwinder.h" +#include "base/logging.h" + +namespace maplert { +extern "C" { +#if defined(__arm__) +enum Register { + kR0 = 0, + kR1 = 1, + KR12 = 12, + kFP = 11, + kLR = 14, + kPC = 15, +}; +inline _Unwind_Ptr _Unwind_GetIP(_Unwind_Context*) { + return 0; +} + +inline void _Unwind_SetGR(struct _Unwind_Context*, int, _Unwind_Word) {} +inline void _Unwind_SetIP(struct _Unwind_Context*, _Unwind_Word) {} +#elif defined(__aarch64__) +enum Register { + kX0 = 0, + kX17 = 17, + kFP = 29, + kLR = 30, +}; +#endif + +// exception header for maple +struct MExceptionHeader { + std::type_info *exceptionType; + void (*exceptionDestructor)(void*); + // top java frame landing pad or clean up entry. + const void *topJavaHandler; + int handlerCount; + int handlerSwitchValue; + const unsigned char *actionRecord; + const unsigned char *languageSpecificData; + // exception handler of the Java frame if caught, + // otherwise the continuation in native-to-java frame + void *realHandler; + uintptr_t tTypeIndex; + // caught by Java context + bool caughtByJava; + // instruction pointer of signaling point for signal handlers + // null if not signal handler context + void *sigIP; +#if defined(__aarch64__) + _Unwind_Exception unwindHeader; +#elif defined(__arm__) + _Unwind_Control_Block unwindHeader; +#endif +}; + +// struct are 16-byte algned, so we need a header and let compiler calculate the offset. +struct MplException { + MExceptionHeader exceptionHeader; + // the reference to the thrown object must be placed here. + // adjust GetThrownException and GetThrownExceptionHeader if you modify + // this layout. + jthrowable thrownObject; +}; + +static inline MplException *MplExceptionFromThrownObject(MrtClass *thrownPtr) { + if (thrownPtr != nullptr) { + return reinterpret_cast(reinterpret_cast(thrownPtr) - 1); + } else { + EHLOG(FATAL) << "thrownPtr should not be null!" << maple::endl; + return nullptr; + } +} + +static inline jthrowable GetThrownObject(const _Unwind_Exception &unwindException) { + if (const_cast<_Unwind_Exception*>(&unwindException) != nullptr) { + jthrowable thrown = *reinterpret_cast(const_cast<_Unwind_Exception*>(&unwindException) + 1); + return thrown; + } else { + EHLOG(FATAL) << "unwindException should not be null!" << maple::endl; + return nullptr; + } +} + +// Get the exception object from the unwind pointer. +// Relies on the structure layout, where the unwind pointer is right in +// front of the user's exception object +static inline MExceptionHeader *GetThrownExceptionHeader(const _Unwind_Exception &unwindException) { + if (const_cast<_Unwind_Exception*>(&unwindException) != nullptr) { + return reinterpret_cast(MplExceptionFromThrownObject(reinterpret_cast( + const_cast<_Unwind_Exception*>(&unwindException) + 1))); + } else { + EHLOG(FATAL) << "unwindException should not be null!" << maple::endl; + return nullptr; + } +} + +static inline MplException *GetThrownException(const _Unwind_Exception &unwindException) { + if (const_cast<_Unwind_Exception*>(&unwindException) != nullptr) { + return MplExceptionFromThrownObject(reinterpret_cast( + const_cast<_Unwind_Exception*>(&unwindException) + 1)); + } else { + EHLOG(FATAL) << "unwindException should not be null!" << maple::endl; + return nullptr; + } +} + +void MplThrow(const MrtClass &thrownObject, bool isRet = false, bool isImplicitNPE = false); + +void MplDumpStack(const std::string &msg = ""); + +void MplCheck(bool ok, const std::string &msg = ""); +} // extern "C" +} // namespace maplert + +#endif // _MPL_EXCEPTION_H diff --git a/src/mrt/compiler-rt/include/exception/mrt_exception.h b/src/mrt/compiler-rt/include/exception/mrt_exception.h new file mode 100644 index 0000000000..24b51cfb33 --- /dev/null +++ b/src/mrt/compiler-rt/include/exception/mrt_exception.h @@ -0,0 +1,112 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEALL_MAPLERT_JAVA_ANDROID_MRT_INCLUDE_MRT_EXCEPTION_H_ +#define MAPLEALL_MAPLERT_JAVA_ANDROID_MRT_INCLUDE_MRT_EXCEPTION_H_ + +#include "jni.h" +#include "mrt_object.h" +#include "mrt_string.h" +#include "mrt_array.h" +#include "mrt_reflection.h" +#include "mrt_exception_api.h" // exported API declaration + +#ifdef __cplusplus +namespace maplert { +extern "C" { +#endif +class ScopedException { + public: + ScopedException() = default; + + ~ScopedException() { + MRT_CheckThrowPendingExceptionRet(); + } +}; + +// record an exception if delayed handling is expetced. +void MRT_ThrowExceptionSafe(jobject ex); // check whether ex is throwable +void MRT_ThrowExceptionUnsafe(jobject ex); // do not check, throw anything + +void MRT_ClearPendingException(); +void MRT_ClearThrowingException(); + +// null if no pending exception. if return value is not null, an exception is +// raised earlier, and should be handled or rethrown some time later. +jobject MRT_PendingException(); +bool MRT_HasPendingException(); +void __MRT_SetPendingException(jobject e); +void MRT_InvokeResetHandler(); + +// Note: these functions do not raise exception immediately. +// It saves exception to so-called pending exception which will be raised when +// control flow leaves a JNI stub frame to return to Java frame. +// ThrowNewExceptionInternalTypeUnw() will be called automatically in JNI stub frame +// which is inserted by maple compiler. +// If you want to bypass JNI stub frame, please do call ThrowNewExceptionInternalTypeUnw() +// when you are ready to return to a Java frame from a native frame. +// For other normal situations, only MRT_ThrowNewException() is allowed to be invoked +// in normal native code. +void MRT_ThrowNewExceptionInternalType(MrtClass classType, const char *msg = "unknown reason"); +void MRT_ThrowNewException(const char *className, const char *msg = "unknown reason"); +// Note: these functions raise exception immediately. +void ThrowExceptionUnw(MrtClass e); +void ThrowNewExceptionInternalTypeUnw(MrtClass classType, const char *kMsg = "unknown reason"); + +void MRT_DecRefThrowExceptionUnw(MrtClass obj, const void *sigIP = nullptr); +void MRT_DecRefThrowExceptionRet(MrtClass obj, bool isImplicitNPE = false, const void *sigIP = nullptr); + +// note JNI native code is different from runtime native code for raising exception +// if we raise exceptions in runtime, it is thrown immediately. On the other hand, +// exceptions raised in native code is delayed until it is prepared to return to its caller. +void MRT_ThrowImplicitNullPointerExceptionUnw(const void *sigIP); +void MRT_ThrowArithmeticExceptionUnw(); +void MRT_ThrowClassCastExceptionUnw(const std::string msg = "unknown reason"); +void MRT_ThrowStringIndexOutOfBoundsExceptionUnw(); +void MRT_ThrowUnsatisfiedLinkErrorUnw(); +void MRT_ThrowArrayStoreExceptionUnw(const std::string msg = "unknown reason"); +void MRT_ThrowInterruptedExceptionUnw(); +void MRT_ThrowExceptionInInitializerErrorUnw(MrtClass cause); +void MRT_ThrowNoClassDefFoundErrorUnw(const char *msg); +void MRT_ThrowNoClassDefFoundErrorClassUnw(const void *classInfo); +void MRT_ThrowNoSuchMethodErrorUnw(const std::string &msg); +void MRT_ThrowNoSuchFieldErrorUnw(const std::string &msg); +void MRT_ThrowVerifyErrorUnw(const std::string &msg); + +// Note: these functions raise exception by pending +void MRT_ThrowNullPointerException(); +void MRT_ThrowImplicitNullPointerException(); +void MRT_ThrowArithmeticException(); +void MRT_ThrowClassCastException(const std::string msg = "unknown reason"); +void MRT_ThrowStringIndexOutOfBoundsException(); +void MRT_ThrowUnsatisfiedLinkError(); +void MRT_ThrowArrayStoreException(const std::string msg = "unknown reason"); +void MRT_ThrowInterruptedException(); +void MRT_ThrowExceptionInInitializerError(MrtClass cause); +void MRT_ThrowNoClassDefFoundError(const std::string &msg); +void MRT_ThrowNoSuchMethodError(const std::string &msg); +void MRT_ThrowNoSuchFieldError(const std::string &msg); +void MRT_ThrowVerifyError(const std::string &msg); + +extern list ehObjectList; +extern std::map ehObjectStackMap; +extern std::mutex ehObjectListLock; +extern std::mutex ehObjectStackMapLock; + +#ifdef __cplusplus +} // namespace maplert +} // extern "C" +#endif + +#endif // MAPLEALL_MAPLERT_JAVA_ANDROID_MRT_INCLUDE_MRT_EXCEPTION_H_ diff --git a/src/mrt/compiler-rt/include/exception/stack_unwinder.h b/src/mrt/compiler-rt/include/exception/stack_unwinder.h new file mode 100644 index 0000000000..d511650c62 --- /dev/null +++ b/src/mrt/compiler-rt/include/exception/stack_unwinder.h @@ -0,0 +1,627 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +// Stack unwinder +// +// The stack unwinder iterates through the frames of a stack from the top to +// the bottom. The main purpose is: +// +// 1. During garbage collection, it reveals the register and stack states at the +// call site of each frame (without modifying the frames) in order to helps the +// stack scanner identify object references held on the stack. +// +// It may also server the following purposes: +// +// 2. During exception handling, it removes stack frames from the top of the +// stack. Alternatively, exception handling can be done using the system ABI. +// +// The design is mainly inspired by: +// +// a) libunwind +// Original implementation: http://www.nongnu.org/libunwind/ +// LLVM implementation: http://releases.llvm.org/download.html +// b) The stack introspection API for the Mu micro virtual machine +// https://gitlab.anu.edu.au/mu/mu-spec/blob/master/api.rst#stack-introspection +// +#ifndef __MAPLERT_STACKUNWINDER_H__ +#define __MAPLERT_STACKUNWINDER_H__ + +#include +#include +#include +#include +#include +#include +#include + +#include "cinterface.h" +#include "linker_api.h" +#include "eh_personality.h" +#include "metadata_layout.h" + +#define MAPLE_STACK_UNWIND_STEP_MAX 8000UL + +namespace maplert { +enum UnwindState { + kUnwindFail = 0, + kUnwindSucc = 1, + kUnwindFinish = 2, +}; + +// this data struct matches the machine frame layout for both aarch64 and arm. +// | ... | +// | ... | +// | lr to caller | return address for this frame +// fp --> | caller fp | +// | ... | +// sp --> | ... | +// pointer to CallChain is actually frame address (fp) +struct CallChain { + CallChain *callerFrameAddress; + const uint32_t *returnAddress; +}; + +// do not ever define any virtual method for BasicFrame +// we should rename BasicFrame to MachineFrame +class BasicFrame { + public: + // fa: frame address of this frame, for now this is frame pointer + CallChain *fa; + + // ip: instruction pointer, the value in PC register when unwinding this frame, + // which points to the instruction when control flow returns from callee to this frame. + const uint32_t *ip; + + // ra: return address of this frame. For a normal frame, this can + // be retrieved from frame address, + const uint32_t *ra; + + // lr: Specific to frames which do not have a normal frame structure. + // This field is 0 for a normal frame, because we can always retrieve return address from CallChain. + // If not null, this field indicates the return address. + // This is necessary for segv occured in a frame without data on stack . + const uint32_t *lr; + + void Reset() { + ip = nullptr; + fa = nullptr; + lr = nullptr; + ra = nullptr; + } + + BasicFrame() { + Reset(); + } + + BasicFrame(const uint32_t *ip, CallChain *fa, const uint32_t *ra) : fa(fa), ip(ip), ra(ra), lr(nullptr) {} + + explicit BasicFrame(const BasicFrame &frame) { + CopyFrom(frame); + } + + BasicFrame& operator=(const BasicFrame &frame) { + CopyFrom(frame); + return *this; + } + + virtual ~BasicFrame() = default; + + void Dump(const std::string msg, std::stringstream &ss) const { + static constexpr size_t defaultBufSize = 128; + char buf[defaultBufSize]; + if (snprintf_s(buf, sizeof buf, sizeof buf, + ": frame pc %p, frame address %p, retrun to %p, link register %p", ip, fa, ra, lr) < 0) { + LOG(ERROR) << "Dump snprintf_s fail" << maple::endl; + } + ss << msg << buf; + } + + void Dump(const std::string msg) const { + std::stringstream ss; + Dump(msg, ss); + EHLOG(INFO) << ss.str() << maple::endl; + } + + void Check(bool val, const char *msg) const; + + const uint32_t *GetFramePC() const { + return ip; + } + + void SetFramePC(const void *pc) { + ip = static_cast(pc); + } + + CallChain *GetFrameAddress() const { + return fa; + } + + const uint32_t *GetReturnAddress() const { + return ra; + } + + const uint32_t *GetLR() const { + return lr; + } + + inline bool IsCompiledFrame() const { + return LinkerAPI::Instance().IsJavaText(ip); + } + + inline bool IsR2CFrame() const { + return (reinterpret_cast(ra) & 1) == 1; + } + + // note: this method takes advantage of JavaMethodHasNoFrame + inline bool HasFrameStructure() const { + return lr == nullptr; + } + + // The anchor frame is conceptually a sentinel frame, whose + // fp/ip is all zeros. This frame does not exists but is natually a good + // choice to detect the completion of unwinding if we initialize fp/ip to null. + inline bool IsAnchorFrame() const { + return (fa == nullptr); + } + + inline void CopyFrom(const BasicFrame &frame) { + ip = frame.ip; + fa = frame.fa; + ra = frame.ra; + lr = frame.lr; + } + + // return kUnwindSucc or kUnwindFinish only, do not return kUnwindFail. + // caller assures this frame is a normal frame. + // we name the direct caller frame in machine stack with "machine caller". + UnwindState UnwindToMachineCaller(BasicFrame &caller) const; +}; + +// JavaFrame should be renamed to CommonFrame or just Frame +class JavaFrame : public BasicFrame { + public: + JavaFrame() : BasicFrame() { + md = nullptr; + } + + // caller assures this frame is a normal frame. + UnwindState UnwindToNominalCaller(JavaFrame &caller) const; + UnwindState UnwindToJavaCallerFromRuntime(JavaFrame &frame, bool isEH = false); + + // format: class|method|signature + MRT_EXPORT static bool GetJavaMethodFullName(std::string &name, const uint64_t *md); + + // method|signature + MRT_EXPORT static void GetJavaMethodSignatureName(std::string &name, const void *md); + // method + MRT_EXPORT static void GetJavaMethodName(std::string &name, const void *md); + // class: A.B.C + MRT_EXPORT static void GetJavaClassName(std::string &name, const uint64_t *md); + + MRT_EXPORT static jclass GetDeclaringClass(const uint64_t *md); + + MRT_EXPORT static uint64_t *GetMethodMetadata(const uint32_t *startPC); + MRT_EXPORT static const MethodDesc *GetMethodDesc(const uint32_t *startPC); + + MRT_EXPORT static uint64_t GetRelativePc( + const std::string &soName, uint64_t pc, LinkerMFileInfo *mplInfo = nullptr); + + static bool GetMapleMethodFullName(std::string &name, const uint64_t *md, const void *ip); + // format: || + MRT_EXPORT static bool GetMapleMethodFullName(std::string &name, const uint64_t *md); + + // Maple-mangled format + static void GetMapleClassName(std::string &name, const uint64_t *md); + + // for resolving symbols of call stack + static bool DumpProcInfo(uintptr_t pc, std::string *name = nullptr); + static void DumpProcInfoLog(jlong elem); + + static bool JavaMethodHasNoFrame(const uint32_t *ip); + + inline void ResolveMethodMetadata() { + if (md == nullptr) { + LinkerLocInfo info; + if (LinkerAPI::Instance().LocateAddress(const_cast(ip), info, false)) { + md = static_cast( + GetMethodMetadata(reinterpret_cast(reinterpret_cast(info.addr)))); + } + } + } + + inline jclass GetDeclaringClass() { + ResolveMethodMetadata(); + return GetDeclaringClass(md); + } + + // format: class|method|signature + inline void GetJavaMethodFullName(std::string &name) { + ResolveMethodMetadata(); + (void)GetJavaMethodFullName(name, md); + } + // method|signature + inline void GetJavaMethodSignatureName(std::string &name) { + ResolveMethodMetadata(); + GetJavaMethodSignatureName(name, md); + } + + // method + inline void GetJavaMethodName(std::string &name) { + ResolveMethodMetadata(); + GetJavaMethodName(name, md); + } + + // class: A.B.C + inline void GetJavaClassName(std::string &name) { + ResolveMethodMetadata(); + GetJavaClassName(name, md); + } + + // Maple-mangled format + inline void GetMapleClassName(std::string &name) { + ResolveMethodMetadata(); + GetMapleClassName(name, md); + } + + inline const uint64_t *GetMetadata() const { + return md; + } + + inline void SetMetadata(const uint64_t *metadata) { + md = metadata; + } + private: + const uint64_t *md; // mMD: Metadata for this Java method, nullptr indicates unresolved +}; + +// UnwindData is a helper for retrieving previous java context. +// this data struct should match the layout defined in r2c_stub +struct UnwindData { + const uint32_t *pc; + CallChain *fa; + const uint32_t *lr; + uintptr_t ucstatus; + uintptr_t interpFrame; + uintptr_t directCallJava; +}; + +// R2CFrame is a stub frame constructed by runtime code to invoke compiled java methods. R2CFrame saves +// its java caller context as it is for unwinding. R2CStub is implemented in r2c_stub_xx.S. +// Runtime code must be always compiled with -fno-omit-frame-pointer to maintain standard frame layout according to ABI. +class R2CFrame : public BasicFrame { + public: + R2CFrame() : BasicFrame() {} + R2CFrame(const uint32_t *ip, CallChain *fa, const uint32_t *ra) : BasicFrame(ip, fa, ra) {} + + // return kUnwindSucc or kUnwindFinish only, do not return kUnwindFail + UnwindState UnwindToJavaCaller(JavaFrame &caller) const; + + static UnwindData *GetUnwindData(CallChain *fa) { + return reinterpret_cast(fa + kOffsetForUnwindData); + } + + UnwindData *GetUnwindData() { + return GetUnwindData(this->fa); + } + private: + static constexpr size_t kOffsetForUnwindData = 1; +}; + +enum UnwindContextStatus { + UnwindContextStatusIsIgnored = 0x12345678, // ignore this status when unwinding to caller context + UnwindContextIsReliable = 1, // we should unwind these frames one by one + UnwindContextIsRisky = 2, // we should cross these frames to get to previous java frame +}; + +extern "C" void MRT_UpdateLastUnwindFrameLR(const uint32_t *lr); +extern "C" void MRT_UpdateLastUnwindContext(const uint32_t *pc, CallChain *fp, UnwindContextStatus status); + +// UnwindContext contains all information needed for stack unwinding. +// *frame* is current frame which is start point for stack unwinding. +// If *status* is UnwindContextIsReliable, caller frame is constructed by inspecting frame structure according to ABI. +// Otherwise, if *status* is UnwindContextIsRisky, caller frame is constructed either from TLS when we get +// last-java-frame, or by inspecting R2J frame structure if *frame* is a R2J frame. +struct UnwindContext { + UnwindContextStatus status = UnwindContextIsRisky; + JavaFrame frame; // BasicFrame + void *interpFrame = nullptr; // if not null, some interpreted frame is the last java frame + + bool hasDirectJavaCallee = false; // tag when interpreted method calls a compiled method, used only by R2C stub + public: + inline void *GetInterpFrame() { + return interpFrame; + } + inline void SetInterpFrame(void *newInterpFrame) { + interpFrame = newInterpFrame; + } + inline bool HasDirectJavaCallee() const { + return hasDirectJavaCallee; + } + inline void TagDirectJavaCallee(bool val) { + hasDirectJavaCallee = val; + } + inline void Reset() { + status = UnwindContextIsRisky; + frame.Reset(); + interpFrame = nullptr; + hasDirectJavaCallee = false; + } + inline void CopyFrom(const UnwindContext &context) { + this->status = context.status; + this->interpFrame = context.interpFrame; + this->hasDirectJavaCallee = context.hasDirectJavaCallee; + this->frame.CopyFrom(context.frame); + } + + bool IsCompiledContext() const { + return frame.IsCompiledFrame(); + } + bool IsInterpretedContext() const { + return (interpFrame != nullptr); + } + bool IsJavaContext() const { + return IsCompiledContext() || IsInterpretedContext(); + } + bool IsStackBottomContext() const; + + UnwindState UnwindToCallerContext(UnwindContext &caller, bool isEH = false) const; + UnwindState UnwindToCallerContextFromR2CStub(UnwindContext &caller) const; + void UpdateFrame(const uint32_t *pc, CallChain *fp); + + void RestoreUnwindContextFromR2CStub(CallChain *fa); // fa is the address of R2CStub frame + static UnwindState GetStackTopUnwindContext(UnwindContext &context); +}; + +struct InitialUnwindContext { + public: + UnwindContext &GetContext() { + return uwContext; + } + + bool IsEffective() const { + return effective.load(std::memory_order_acquire); + } + + void SetEffective(bool value) { + effective.store(value, std::memory_order_release); + } + + void CopyFrom(InitialUnwindContext &context) { + uwContext.CopyFrom(context.GetContext()); + effective.store(context.effective.load(std::memory_order_acquire), std::memory_order_release); + } + + private: + UnwindContext uwContext; + std::atomic effective = { true }; +}; + +class JavaFrameInfo { + public: + void VisitGCRoots(const AddressVisitor &func) { + const JavaFrame &thisFrame = GetJavaFrame(); + const MethodDesc *desc = JavaFrame::GetMethodDesc(GetStartPC()); + uintptr_t localRefStart = reinterpret_cast(thisFrame.GetFrameAddress()) + desc->localRefOffset; + for (size_t i = 0; i < desc->localRefNumber; ++i) { + uintptr_t localRefSlot = localRefStart + i * sizeof(address_t); + address_t &localRef = *reinterpret_cast(localRefSlot); + func(localRef); + } + } + + const uint32_t *GetEndPC() const { + return endIp; + } + + void SetEndPC(uint32_t *pc) { + endIp = pc; + } + + const uint32_t *GetStartPC() const { + return startIp; + } + + void SetStartPC(uint32_t *pc) { + startIp = pc; + } + + // These asm frame are in the jave section, but not in any java type. + bool IsAsmFrame() const { + return (startIp == nullptr); + } + + const JavaFrame &GetJavaFrame() const { + return javaFrame; + } + + inline void Build(const JavaFrame &frame) { + this->javaFrame = frame; + ResolveFrameInfo(); + } + + inline void CopyFrom(const JavaFrameInfo &frameInfo) { + this->javaFrame = frameInfo.javaFrame; + this->startIp = frameInfo.startIp; + this->endIp = frameInfo.endIp; + this->resolved = frameInfo.resolved; + } + + JavaFrameInfo(uint32_t *startIP, uint32_t *endIP, const void *pc) { + startIp = startIP; + endIp = endIP; + javaFrame.SetFramePC(pc); + } + + explicit JavaFrameInfo(const JavaFrame &frame) { + Build(frame); + } + + explicit JavaFrameInfo(const JavaFrameInfo &frameInfo) { + CopyFrom(frameInfo); + }; + + virtual ~JavaFrameInfo() = default; + protected: + JavaFrame javaFrame; + uint32_t *startIp; + uint32_t *endIp; // endIp == lsda, when the lsda is next to the .cfi_endproc + bool resolved = false; // true if proc_info_ is valid + + inline void ResolveMethodMetadata() { + if (javaFrame.GetMetadata() == nullptr && startIp != nullptr) { + uint64_t *md = JavaFrame::GetMethodMetadata(reinterpret_cast(reinterpret_cast(startIp))); + javaFrame.SetMetadata(md); + } + } + + void ResolveFrameInfo(); + + virtual void Dump(const std::string msg, std::stringstream &ss) { + std::string name; + javaFrame.GetJavaMethodFullName(name); + ss << msg << " --- java frame info " << "\n" << + "\t" << "name : " << name << "\n" << std::hex << + "\t" << "current pc : " << javaFrame.ip << "\n" << + "\t" << "start pc : " << startIp << "\n" << + "\t" << "end pc : " << endIp << "\n" << + std::dec; + } +}; + +using RemoteUnwindStatus = enum { + kRemoteUnwindIdle, + kRemoteLastJavaFrameUpdataFinish, + kRemoteUnwindFinish, + kRemoteTargetThreadSignalHandlingExited, +}; + +class RemoteUnwinder { + public: + RemoteUnwinder() { + remoteUnwindStatus = kRemoteUnwindIdle; + remoteIdle = true; + unwinderIdle = true; + } + + ~RemoteUnwinder() = default; + + std::mutex &GetRemoteUnwindLock() { + return remoteUnwindLock; + } + + void SetRemoteUnwindStatus(RemoteUnwindStatus status) { + remoteUnwindStatus = status; + } + + void SetRemoteIdle(bool status) { + remoteIdle = status; + } + + void SetUnwinderIdle(bool status) { + unwinderIdle = status; + } + + bool IsUnwinderIdle() const { + return unwinderIdle; + } + + bool WaitForRemoteIdle() const { + uint64_t times = 0; + constexpr uint64_t maxTimes = 500000; + while (remoteIdle != true) { + ++times; + if (times > maxTimes) { + return false; + } + } + return true; + } + + bool WaitRemoteUnwindStatus(RemoteUnwindStatus status) const { + uint64_t times = 0; + constexpr uint64_t maxTimes = 500000; + if (remoteIdle == true) { + return false; + } + while (remoteUnwindStatus != status) { + ++times; + if (times > maxTimes) { + return false; + } + } + return true; + } + + private: + std::atomic remoteIdle; + std::atomic unwinderIdle; + std::mutex remoteUnwindLock; + std::atomic remoteUnwindStatus; +}; + +class MapleStack { + public: + MapleStack() = default; + ~MapleStack() = default; + + // the last java frame is on the top of java call stack. + static UnwindState GetLastJavaContext(UnwindContext &context, const UnwindContext &initialContext, uint32_t tid); + static UnwindState GetLastJavaFrame(JavaFrame &frame, const UnwindContext *initialContext = nullptr, + bool isEH = false); + + + const inline std::vector &GetStackFrames() { + return stackFrames; + } + inline std::vector &GetContextStack() { + return uwContextStack; + } + + inline void AddFrame(const JavaFrame &frame) { + stackFrames.push_back(frame); + } + + // visit GC roots of current java thread for tracing GC + static void VisitJavaStackRoots(const UnwindContext &initialContext, const AddressVisitor &func, uint32_t tid); + + MRT_EXPORT static void FastRecordCurrentJavaStack(std::vector &uwContextStack, + size_t steps = MAPLE_STACK_UNWIND_STEP_MAX, + bool resolveMD = false); + + MRT_EXPORT static void FastRecordCurrentJavaStack(MapleStack &callStack, + size_t steps = MAPLE_STACK_UNWIND_STEP_MAX, + bool resolveMD = false) { + FastRecordCurrentJavaStack(callStack.GetContextStack(), steps, resolveMD); + } + + MRT_EXPORT static void *VisitCurrentJavaStack(UnwindContext &uwContext, + std::function filter); + + MRT_EXPORT static void FastRecordCurrentStackPCsByUnwind(std::vector &framePCs, + size_t steps = MAPLE_STACK_UNWIND_STEP_MAX); + + MRT_EXPORT static void FastRecordCurrentStackPCs(std::vector &framePCs, + size_t steps = MAPLE_STACK_UNWIND_STEP_MAX); + + MRT_EXPORT static void RecordStackPCs(std::vector &framePCs, uint32_t tid, + size_t steps = MAPLE_STACK_UNWIND_STEP_MAX); + + private: + std::vector stackFrames; + std::vector uwContextStack; +}; +} // namespace maplert + +#endif // __MAPLERT_STACKUNWINDER_H__ diff --git a/src/mrt/compiler-rt/include/fast_alloc_inline.h b/src/mrt/compiler-rt/include/fast_alloc_inline.h new file mode 100644 index 0000000000..efe732e100 --- /dev/null +++ b/src/mrt/compiler-rt/include/fast_alloc_inline.h @@ -0,0 +1,150 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co., Ltd. All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_FAST_ALLOC_INLINE_H +#define MAPLE_RUNTIME_FAST_ALLOC_INLINE_H + +#include "chosen.h" +#include "sizes.h" + +namespace maplert { +#if ALLOC_USE_FAST_PATH +// common fast allocation routine, it assumes the size is aligned, and +// it shouldn't be too big (bigger than large obj size) +__attribute__ ((always_inline)) +static address_t MRT_TryNew(size_t alignedObjSize, address_t klass) { + address_t objAddr = (*theAllocator).FastNewObj(alignedObjSize); + if (LIKELY(objAddr != 0)) { + StoreRefField(objAddr, kMrtKlassOffset, klass); + // in rc mode, we need to set cycle bits and rc bits + // this produces very long code, need to optimise + GCTibGCInfo *gcInfo = reinterpret_cast(reinterpret_cast(klass)->GetGctib()); + CHECK(gcInfo != nullptr) << "MRT_TryNew : gcInfo is nullptr"; + GCHeaderLVal(objAddr) = (kAllocatedBit | gcInfo->headerProto); + RefCountLVal(objAddr) = 1 + kWeakRCOneBit; + if (UNLIKELY(FastAllocData::data.isConcurrentMarking)) { + FastAllocData::data.bm->MarkObject(objAddr); + } + } + return objAddr; +} + +// try new object: it takes fast path if possible, otherwise returns 0 +// see slow path code for more info +__attribute__ ((always_inline, used)) +static address_t MRT_TryNewObject(address_t klass) { + if (UNLIKELY(!modifier::IsFastAllocClass(reinterpret_cast(klass)->flag))) { + return 0; + } + MClass *cls = MObject::Cast(klass); + size_t objSize = cls->GetObjectSize(); // must have been aligned + address_t objAddr = MRT_TryNew(objSize, klass); + if (UNLIKELY(objAddr == 0)) { + return 0; + } + + return objAddr; +} + +// try new object: it takes fast path if possible, otherwise returns 0 +// it also accepts an explicit size instead of taking the size from the klass +// useful for allocation of strings (where the klass's size is not accurate) +// see slow path code for more info +__attribute__ ((always_inline, used)) +static address_t MRT_TryNewObject(address_t klass, size_t explicitSize) { + if (UNLIKELY(explicitSize > kFastAllocMaxSize)) { + return 0; + } + // we need to make sure the string class passes the following condition + // somehow it does, coincidently + if (UNLIKELY(!modifier::IsFastAllocClass(reinterpret_cast(klass)->flag))) { + return 0; + } + // we additionally need to align the size, because the explicit size can be any number + // this costs additional time + address_t alignedSize = AllocUtilRndUp(explicitSize, kAllocAlign); + address_t objAddr = MRT_TryNew(alignedSize, klass); + if (UNLIKELY(objAddr == 0)) { + return 0; + } + + return objAddr; +} + +// try new array: it takes fast path if possible, otherwise returns 0 +// see slow path code for more info +// assumption: class initialised +template +__attribute__ ((always_inline, used)) +static address_t MRT_TryNewArray(size_t nElems, address_t klass) { + if (UNLIKELY(nElems > (kFastAllocArrayMaxSize >> static_cast(elemSizeExp)))) { + return 0; + } + size_t contentSize = nElems << static_cast(elemSizeExp); + // this size must be aligned because the allocator fast path takes it as an assumption + size_t objSize = kJavaArrayContentOffset + AllocUtilRndUp(contentSize, kAllocAlign); + // potentially optimise this given the constraint that we are an array class? + address_t objAddr = MRT_TryNew(objSize, klass); + if (LIKELY(objAddr != 0)) { + ArrayLength(objAddr) = static_cast(nElems); + // not sure if we want this fence + std::atomic_thread_fence(std::memory_order_release); + } + return objAddr; +} + +// try new array: it takes fast path if possible, otherwise returns 0 +// this function also accepts an elem size variable, in case the elem size is not +// immediately available +// the previous function is preferred over this one +// see slow path code for more info +// assumption: class initialised +__attribute__ ((always_inline, used)) +static address_t MRT_TryNewArray(size_t elemSize, size_t nElems, address_t klass) { + // multiplication cost more cpu clocks, we don't like it, prefer shifting + if (UNLIKELY(nElems > (kFastAllocArrayMaxSize / elemSize))) { + return 0; + } + size_t contentSize = elemSize * nElems; + // this size must be aligned because the allocator fast path takes it as an assumption + size_t objSize = kJavaArrayContentOffset + AllocUtilRndUp(contentSize, kAllocAlign); + // potentially optimise this given the constraint that we are an array class? + address_t objAddr = MRT_TryNew(objSize, klass); + if (LIKELY(objAddr != 0)) { + ArrayLength(objAddr) = static_cast(nElems); + // not sure if we want this fence + std::atomic_thread_fence(std::memory_order_release); + } + return objAddr; +} +#else +inline address_t MRT_TryNewObject(address_t) { + return 0; +} +inline address_t MRT_TryNewObject(address_t, size_t) { + return 0; +} +template +__attribute__ ((always_inline, used)) +static address_t MRT_TryNewArray(size_t, address_t) { + return 0; +} +__attribute__ ((always_inline, used)) +static address_t MRT_TryNewArray(size_t, size_t, address_t) { + return 0; +} +#endif +} + +#endif diff --git a/src/mrt/compiler-rt/include/file_layout.h b/src/mrt/compiler-rt/include/file_layout.h new file mode 100644 index 0000000000..ffc42b797f --- /dev/null +++ b/src/mrt/compiler-rt/include/file_layout.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MIRLAYOUT_H +#define MIRLAYOUT_H +#include + +namespace maple { +// file-layout is shared between maple compiler and runtime, thus not in namespace maplert +enum LayoutType : uint8_t { + kLayoutBootHot, + kLayoutBothHot, + kLayoutRunHot, + kLayoutStartupOnly, + kLayoutUsedOnce, + kLayoutExecuted, // method excuted in some condition + kLayoutUnused, + kLayoutTypeCount +}; + +// this used for c string layout +static uint8_t kCStringShift = 1; +std::string GetLayoutTypeString(uint32_t type); +} // namespace maple +#endif diff --git a/src/mrt/compiler-rt/include/gc_log.h b/src/mrt/compiler-rt/include/gc_log.h new file mode 100644 index 0000000000..22af7911ea --- /dev/null +++ b/src/mrt/compiler-rt/include/gc_log.h @@ -0,0 +1,130 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_GC_LOG_H +#define MAPLE_RUNTIME_GC_LOG_H + +#include +#include +#include +#include +#include "utils/time_utils.h" +#include "mm_config.h" + +namespace maplert { +enum Logtype : int { + kLogtypeGc = 0, + kLogtypeRcTrace, + kLogtypeRp, + kLogtypeCycle, + kLogTypeAllocFrag, + kLogTypeAllocator, + kLogTypeMix, + kLogtypeGcOrStderr, + kLogtypeNum +}; + +// gclog file stream buffer size. +const int kGcLogBufSize = 4096; + +class GCLogImpl { + public: + GCLogImpl() = default; + ~GCLogImpl() = default; + std::ostream &Stream(uint32_t logType = kLogtypeGc) noexcept { + return *os[logType]; + } + + void Init(bool openGClog); + void OnPreFork(); + void OnPostFork(); + + void OnGCStart(); + void OnGCEnd(); + + bool IsWriteToFile(uint32_t logType = kLogtypeGc) const noexcept { + return writeToFile[logType]; + } + + private: + void OpenFile(); + void CloseFile(); + + void SetFlags(); + + std::ostream *os[kLogtypeNum]; // Current stream + bool doWriteToFile; + bool doOpenFileOnStartup; + + bool writeToFile[kLogtypeNum] = { false }; + bool openFileOnStartup[kLogtypeNum] = { false }; + + std::ofstream file[kLogtypeNum]; // Open file + + char *buffer[kLogtypeNum] = { nullptr }; // buffer for file stream + bool openGCLog = false; +}; + +GCLogImpl &GCLog(); +std::string Pretty(uint64_t number); +std::string Pretty(uint32_t number); +std::string Pretty(int64_t number); +std::string PrettyOrderInfo(uint64_t number, std::string unit); +std::string PrettyOrderMathNano(uint64_t number, std::string unit); + +#if __MRT_DEBUG +#define MRT_PASTE_ARGS_EXPANDED(x, y) x ## y +#define MRT_PASTE(x, y) MRT_PASTE_ARGS_EXPANDED(x, y) +#define MRT_PHASE_TIMER(...) PhaseTimerImpl MRT_PASTE(MRT_pt_, __LINE__)(__VA_ARGS__) +#else +#define MRT_PHASE_TIMER(...) +#endif +class PhaseTimerImpl { + public: + explicit PhaseTimerImpl(std::string name, uint32_t type = kLogtypeGc) : name(name), logType(type) { + if (GCLog().IsWriteToFile(type)) { + startTime = timeutils::MicroSeconds(); + } + } + + ~PhaseTimerImpl() { + if (GCLog().IsWriteToFile(logType)) { + uint64_t stopTime = timeutils::MicroSeconds(); + uint64_t diffTime = stopTime - startTime; + GCLog().Stream(logType) << name << " time: " << Pretty(diffTime) << "us\n"; + } + } + + private: + std::string name; + uint64_t startTime = 0; + uint32_t logType; +}; + +// The LOG2FILE(type) macro can only be used in this fashion: +// +// LOG2FILE(xxxx) << blah << blah << blah; +// +// The if statement in the macro will nullify the effect of the << expression +// following the LOG2FILE macro. +#if __MRT_DEBUG +#define LOG2FILE(type) if (GCLog().IsWriteToFile(type)) GCLog().Stream(type) +#define FLUSH2FILE(type) if (GCLog().IsWriteToFile(type)) GCLog().Stream(type).flush() +#else +#define LOG2FILE(type) if (false) GCLog().Stream(type) +#define FLUSH2FILE(type) if (false) GCLog().Stream(type).flush() +#endif +} // namespace maplert + +#endif diff --git a/src/mrt/compiler-rt/include/heap_stats.h b/src/mrt/compiler-rt/include/heap_stats.h new file mode 100644 index 0000000000..04187ec7be --- /dev/null +++ b/src/mrt/compiler-rt/include/heap_stats.h @@ -0,0 +1,103 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_HEAP_STATS_H +#define MAPLE_RUNTIME_HEAP_STATS_H + +#include "address.h" + +namespace maplert { +#if PERMISSIVE_HEAP_COND +#define IS_HEAP_ADDR(addr) ((static_cast(addr) - HEAP_START) < (HEAP_END - HEAP_START)) +#define IS_HEAP_OBJ(addr) ((static_cast(addr) % maplert::kMrtHeapAlign == 0) && IS_HEAP_ADDR(addr)) +#else +#define IS_HEAP_ADDR(addr) maplert::HeapStats::IsHeapAddr(addr) +#define IS_HEAP_OBJ(addr) maplert::MRT_FastIsValidObjAddr(addr) +#endif + +// Used by runtime interfaces SetStatsEnabled/ResetStats/GetStats +// for collecting heap allocation stats between StatsEnable/Disable windows. +// On StatsEnable, start heapstats allocation counting. +// On StatsDisable, stop heapstats allocation counting and sum results. +class HeapStats { + public: + static void OnHeapCreated(address_t startAddr) { + heapStartAddr = startAddr; + heapCurrentSize = 0; + } + static void OnHeapExtended(size_t newSize) { + heapCurrentSize = newSize; + } + static bool IsHeapAddr(address_t addr) { + return (addr - heapStartAddr) < heapCurrentSize; + } + static address_t StartAddr() { + return heapStartAddr; + } + static size_t CurrentSize() { + return heapCurrentSize; + } + + size_t GetAllocSize() const { + return bytesAlloc; + } + + size_t GetAllocCount() const { + return numAlloc; + } + + size_t GetFreeSize() const { + return bytesFreed; + } + + size_t GetFreeCount() const { + return numFreed; + } + + size_t GetNativeAllocBytes() const { + return nativeAllocatedBytes.load(std::memory_order_relaxed); + } + void SetNativeAllocBytes(size_t size) { + nativeAllocatedBytes.store(size, std::memory_order_relaxed); + } + + HeapStats(); + ~HeapStats() = default; + + void PrintHeapStats(); + void ResetHeapStats(); + void EnableHeapStats(); + void DisableHeapStats(); + void SumHeapStats(); + + private: + static address_t heapStartAddr; + static size_t heapCurrentSize; + // allocation stats collected in a time window + size_t bytesAlloc; // total internal bytes allocated + size_t bytesFreed; // total internal bytes freed + size_t numAlloc; // total allocated objects + size_t numFreed; // total freed objects + std::atomic nativeAllocatedBytes; +}; + +extern HeapStats heapStats; +extern std::atomic heapProfile; + +inline int IsHeapStatsEnabled() { + return heapProfile.load(std::memory_order_relaxed); +} +} + +#endif diff --git a/src/mrt/compiler-rt/include/imported.h b/src/mrt/compiler-rt/include/imported.h new file mode 100644 index 0000000000..a0bd1cb24c --- /dev/null +++ b/src/mrt/compiler-rt/include/imported.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_IMPORTED_H +#define MAPLE_RUNTIME_IMPORTED_H + +#include "address.h" + +namespace maplert { +// This file include imported symbols from a specific build of the application. +// +// Currently, the imported symbols include: +// +// - __MRT_GCRootList: List of GC roots. +// - __MRT_PrimordialObjectList: List of primordial objects +// +// DO NOT MAKE THE SYMBOLS BELOW WEAK! They are necessary for the proper +// execution of the program. If there is a linking error, it means the +// application buildilng script does not properly provide the GC root list. The +// building script needs to be fixed. If we make the list empty "by default", +// we may silently ignore a class of building mistakes. +// +// The two global variables are global roots provided by the application. +// Tracing collectors, including the back-up tracer which is currently used in +// the RC collector for cleaning up, work by starting tracing the heap from +// global GC roots and computing its transitive closure. +// +// They are defined in unified.groots.s, which is generated by the script +// mapleall/maplebe/collect/mpl-collect-groot-lists. That script contains +// detailed instruction about how to generate the unified.groots.s file. +// +// After generating, assemble and link the unified.groots.s into the executable +// or the shared object. +// +// If you are writing test cases, you can manually define the two lists to +// manually specify GC roots. If you don't have GC roots in your test case, +// make an empty list. +// +// A list of global GC roots. For Java, it is the complete list of static +// variables of reference types. +extern "C" address_t *gcRootNewList[]; + +// The length of the __MRT_GCRootList array. +extern "C" size_t gcRootNewListSize; +} + +#endif diff --git a/src/mrt/compiler-rt/include/java_primitive_ops.h b/src/mrt/compiler-rt/include/java_primitive_ops.h new file mode 100644 index 0000000000..a94b2a6dac --- /dev/null +++ b/src/mrt/compiler-rt/include/java_primitive_ops.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_JAVA_PRIMITIVE_OPS_H +#define MAPLE_RUNTIME_JAVA_PRIMITIVE_OPS_H + +#include +#include +#include + +namespace maplert { +/** + * Convert a floating point value to a signed integral type, using the Java semantics. This function exists mainly + * because on ARMv7, such convertion must be done by software, and there is no equivalent in the ARMv7 instruction set. + * This is a template function that serves all types. If it is desired to call this from assembly/machine code, please + * wrap this function in non-inline functions, such as MCC_JDouble2JLong for AoT-compiled code and DoubleToLong for + * fterp. + * + * @tparam FPType The floating point type, can be float or double. + * @tparam IntType The integral type, can be int32_t or int64_t. + * @param fpValue The input floating-point value. + * @return The value converted to IntType according to the Java semantics. + */ +template +static inline IntType JavaFPToSInt(FPType fpValue) { + static_assert(std::numeric_limits::is_integer, "IntType must be an integral type."); + static_assert(!std::numeric_limits::is_integer, "FPType must be a floating point type."); + static_assert(std::numeric_limits::is_iec559, "Java requires IEC559/IEEE754 floating point format."); + static_assert(std::numeric_limits::round_style == std::round_to_nearest, + "Java requires the round-to-nearest rounding mode."); + + if (std::isnan(fpValue)) { + return IntType(0); + } else if (fpValue >= static_cast(std::numeric_limits::max())) { + return std::numeric_limits::max(); + } else if (fpValue <= static_cast(std::numeric_limits::min())) { + return std::numeric_limits::min(); + } else { + return static_cast(fpValue); + } +} +} // namespace maplert + +#endif // MAPLE_RUNTIME_CINTERFACE_H diff --git a/src/mrt/compiler-rt/include/libs.h b/src/mrt/compiler-rt/include/libs.h new file mode 100644 index 0000000000..3d14324391 --- /dev/null +++ b/src/mrt/compiler-rt/include/libs.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef __CG_LIB_H_ +#define __CG_LIB_H_ + +#include +#include +#include +#include "mrt_api_common.h" +#include "mrt_compiler_api.h" + +namespace maplert { +// sync enter, sync exit compiler not used +extern "C" MRT_EXPORT void MRT_BuiltinSyncEnter(address_t obj); +extern "C" MRT_EXPORT void MRT_BuiltinSyncExit(address_t obj); + + +// This macro identifies all Java functions implemented in c++. It can be defined as +// __attribute__((section(".java_text"))),and then you can use the MplLinker: +// IsJavaText interface to identify it.Because these frameworks do not have metedata +// data, they cannot be typed out like normal Java stacks in the process of stacking. +#define MAPLE_JAVA_SECTION_DIRECTIVE + +// size in bytes for encoding one aarch64 instruction +const int kAarch64InsnSize = 4; + +// msg buffer size +constexpr int kBufferSize = 128; + +#define MRT_UNW_GETCALLERFRAME(frame) \ + do { \ + void *ip = __builtin_return_address(0); \ + void *fa = __builtin_frame_address(1); \ + __MRT_ASSERT(fa != nullptr, "frame address is nullptr!"); \ + CallChain *thisFrame = reinterpret_cast(fa); \ + (frame).ip = reinterpret_cast(ip); \ + (frame).fa = thisFrame; \ + (frame).ra = thisFrame->returnAddress; \ + } while (0) + +extern "C" void PrepareArgsForExceptionCatcher(); + +#if defined(__arm__) +extern "C" UnwindReasonCode AdaptationFunc(_Unwind_State, _Unwind_Exception*, _Unwind_Context*); +#endif +extern "C" uintptr_t IsThrowingExceptionByRet(); +} + +extern "C" MRT_EXPORT void MRT_DumpRegisters(); + +// this is helper for logging one line with no more than 256 characters +inline static std::string FormatString(const char *format, ...) { + constexpr size_t defaultBufferSize = 256; + char buf[defaultBufferSize]; + + va_list argList; + va_start(argList, format); + if (vsprintf_s(buf, sizeof(buf), format, argList) == -1) { + return ""; + } + va_end(argList); + + std::string str(buf); + return str; +} + +#endif // __CG_LIB_H_ diff --git a/src/mrt/compiler-rt/include/linker/linker.h b/src/mrt/compiler-rt/include/linker/linker.h new file mode 100644 index 0000000000..756c185c2d --- /dev/null +++ b/src/mrt/compiler-rt/include/linker/linker.h @@ -0,0 +1,91 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_MPL_LINKER_H_ +#define MAPLE_RUNTIME_MPL_LINKER_H_ + +#include "linker_model.h" + +#ifdef __cplusplus + +namespace maplert { +// Maple linker tools. +class Linker : public FeatureModel { + public: + static FeatureName featureName; + explicit Linker(LinkerInvoker &invoker) : pInvoker(&invoker) {}; + ~Linker() { + pInvoker = nullptr; + } + bool HandleSymbol(); + bool HandleMethodSymbol(); + bool HandleDataSymbol(); + bool HandleSymbol(LinkerMFileInfo &mplInfo); + bool HandleMethodSymbol(LinkerMFileInfo &mplInfo); + bool HandleDataSymbol(LinkerMFileInfo &mplInfo); + bool ResolveMethodSymbol(LinkerMFileInfo &mplInfo); + bool RelocateMethodSymbol(LinkerMFileInfo &mplInfo); + void InitMethodSymbol(LinkerMFileInfo &mplInfo); + bool ResolveVTableSymbol(LinkerMFileInfo &mplInfo); + bool ResolveITableSymbol(LinkerMFileInfo &mplInfo); + bool ResolveDataSymbol(LinkerMFileInfo &mplInfo); + + bool ResolveSuperClassSymbol(LinkerMFileInfo &mplInfo); + bool ResolveGCRootSymbol(LinkerMFileInfo &mplInfo); + bool RelocateDataSymbol(LinkerMFileInfo &mplInfo); + void InitDataSymbol(LinkerMFileInfo &mplInfo); + + void *LookUpSymbolAddress(const void *handle, const MUID &muid); + void *LookUpSymbolAddress(const MUID &muid); + void *LookUpSymbolAddress(LinkerMFileInfo &mplInfo, const MUID &muid); + + bool InitLinkerMFileInfo(LinkerMFileInfo &mplInfo, int32_t pos = -1); + void FreeAllCacheTables(const MObject *classLoader) const; + + void AdjustDefTableAddress(const LinkerMFileInfo &mplInfo, LinkerAddrTableItem &pTable, size_t index); + template + int64_t BinarySearch(T1 &pTable, int64_t start, int64_t end, const T2 &value); + template + int64_t BinarySearch(T1 &pTable, const T2 &pMuidIndexTable, int64_t start, int64_t end, const T3 &value); + + private: + void InitLinkerMFileInfoIgnoreSysCL(LinkerMFileInfo &mplInfo); + void InitLinkerMFileInfoTableAddr(LinkerMFileInfo &mplInfo) const; + bool HandleMethodSymbolNoLazy(LinkerMFileInfo &mplInfo); + bool HandleDataSymbolNoLazy(LinkerMFileInfo &mplInfo); + bool DoResolveSuperClassSymbol(LinkerMFileInfo &mplInfo, IndexSlice &superTableSlice, + const AddrSlice &dataUndefSlice, const AddrSlice &dataDefSlice, size_t i); + bool ResolveUndefVTableSymbol( + LinkerMFileInfo &mplInfo, bool fromUndef, size_t index, VTableSlice &vTableSlice, size_t i); + bool ResolveUndefITableSymbol( + LinkerMFileInfo &mplInfo, bool fromUndef, size_t index, ITableSlice &iTableSlice, size_t i); + +#if defined(LINKER_LAZY_BINDING) && defined(LINKER_32BIT_REF_FOR_DEF_UNDEF) + void InitMethodSymbolLazy32(LinkerMFileInfo &mplInfo, size_t defSize); + void InitDataSymbolLazy32(LinkerMFileInfo &mplInfo, size_t defSize); +#endif // LINKER_32BIT_REF_FOR_DEF_UNDEF LINKER_LAZY_BINDING + + bool methodHasNotResolved = false; + bool dataHasNotResolved = false; + bool methodHasNotRelocated = false; + bool dataHasNotRelocated = false; + bool vtableHasNotResolved = false; + bool itableHasNotResolved = false; + bool superClassHasNotResolved = false; + bool gcRootListHasNotResolved = false; + LinkerInvoker *pInvoker = nullptr; +}; +} // namespace maplert +#endif // __cplusplus +#endif // MAPLE_RUNTIME_MPL_LINKER_H_ diff --git a/src/mrt/compiler-rt/include/linker/linker_cache.h b/src/mrt/compiler-rt/include/linker/linker_cache.h new file mode 100644 index 0000000000..4aa7b79b80 --- /dev/null +++ b/src/mrt/compiler-rt/include/linker/linker_cache.h @@ -0,0 +1,172 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_MPL_LINKER_CACHE_H_ +#define MAPLE_RUNTIME_MPL_LINKER_CACHE_H_ + +#include + +#include "linker_model.h" + +namespace maplert { +#ifdef LINKER_RT_CACHE +class MFileCacheInf { + public: + uint16_t Append(const std::string &name, const MUID &muid) { + auto iter = filter.insert(std::make_pair(name, 0)); + if (iter.second) { + size_t size = data.size(); + data.push_back(std::make_pair(name, muid)); + iter.first->second = size; + return size; + } + return iter.first->second; + } + + // Required: id < data.size() + const std::string &GetName(uint16_t id) const { + return data[id].first; + } + const MUID &GetHash(uint16_t id) const { + return data[id].second; + } + + const std::vector> &NameList() const { + return data; + } + + private: + std::unordered_map filter; + std::vector> data; +}; + +class LinkerCache : public FeatureModel { + public: + using MplInfoStore = std::vector; + using LookUpSymbolAddr = LinkerVoidType(maplert::LinkerInvoker::*)(LinkerMFileInfo&, const MUID&, size_t&); + enum CacheIndex { + kMethodUndefIndex = 0, + kMethodDefIndex, + kDataUndefIndex, + kDataDefIndex + }; + struct CacheInfo { + CacheIndex cacheIndex; + LookUpSymbolAddr lookUpSymbolAddress; + MFileInfoFlags notResolved; + MFileInfoFlags cacheValid; + bool isMethod; + bool isUndef; + }; + + static FeatureName featureName; + explicit LinkerCache(LinkerInvoker &invoker) + : pInvoker(invoker), pClSoCnt{ 0 } {} + ~LinkerCache() = default; + void SetPath(const std::string &path); + bool GetPath(LinkerMFileInfo &mplInfo, std::string &path, + LinkerCacheType cacheType, LoadStateType loadState); + void Reset(); + void FreeAllTables(LinkerMFileInfo &mplInfo) { + FreeTable(mplInfo, kMethodUndefIndex); + FreeTable(mplInfo, kMethodDefIndex); + FreeTable(mplInfo, kDataUndefIndex); + FreeTable(mplInfo, kDataDefIndex); + } + bool RemoveAllTables(LinkerMFileInfo &mplInfo) { + RemoveTable(mplInfo, kMethodUndefIndex); + RemoveTable(mplInfo, kMethodDefIndex); + RemoveTable(mplInfo, kDataUndefIndex); + RemoveTable(mplInfo, kDataDefIndex); + return true; + } + + void ResolveMethodSymbol(LinkerMFileInfo &mplInfo, AddrSlice &addrSlice, MuidSlice &muidSlice); + void FreeMethodUndefTable(LinkerMFileInfo &mplInfo); + + void RelocateMethodSymbol(LinkerMFileInfo &mplInfo, AddrSlice &addrSlice, MuidSlice &muidSlice); + void FreeMethodDefTable(LinkerMFileInfo &mplInfo); + + void ResolveDataSymbol(LinkerMFileInfo &mplInfo, AddrSlice &addrSlice, MuidSlice &muidSlice); + void FreeDataUndefTable(LinkerMFileInfo &mplInfo); + + void RelocateDataSymbol(LinkerMFileInfo &mplInfo, AddrSlice &addrSlice, MuidSlice &muidSlice); + void FreeDataDefTable(LinkerMFileInfo &mplInfo); + + bool DumpTable(LinkerMFileInfo &mplInfo, CacheIndex cacheIndex); + + private: + bool GetFastPath(LinkerMFileInfo &mplInfo, LinkerCacheType cacheType, std::string &path); + void FinishPath(LinkerMFileInfo &mplInfo, LinkerCacheType cacheType, std::string &path); + + bool InitDump(LinkerMFileInfo &mplInfo, std::ifstream &in, std::ofstream &out, CacheIndex cacheIndex); + bool DumpMetaValidity(std::stringstream &ss, BufferSlice &buf); + bool DumpMetaVersion(LinkerMFileInfo &mplInfo, std::stringstream &ss, BufferSlice &buf); + bool DumpMetaCl(std::stringstream &ss, BufferSlice &buf, CacheIndex cacheIndex); + int DumpMap(std::stringstream &ss, BufferSlice &buf); + bool DumpData(std::stringstream &ss, BufferSlice &buf, size_t mapSize); + bool DumpFile(std::ofstream &out, std::stringstream &ss); + + void UpdateProperty(LinkerMFileInfo &mplInfo, CacheIndex cacheIndex); + LinkerCacheType GetLinkerCacheType(CacheIndex cacheIndex); + MplCacheMapT *GetCacheMap(LinkerMFileInfo &mplInfo, CacheIndex cacheIndex); + + bool RemoveTable(LinkerMFileInfo &mplInfo, CacheIndex cacheIndex); + void FreeTable(LinkerMFileInfo &mplInfo, CacheIndex cacheIndex); + + bool LoadInstallCache(LinkerMFileInfo &mplInfo, LinkerCacheType cacheType); + + bool SaveTable(LinkerMFileInfo &mplInfo, CacheIndex cacheIndex); + int GetCacheFd(LinkerMFileInfo &mplInfo, std::string &path, CacheIndex cacheIndex); + void SaveMeta(LinkerMFileInfo &mplInfo, std::string &buffer, CacheIndex cacheIndex); + bool SaveData(LinkerMFileInfo &mplInfo, std::string &buffer, CacheIndex cacheIndex); + bool SaveNameList(std::string &buffer, CacheIndex cacheIndex); + bool WriteTable(int fd, std::string &buffer, CacheIndex cacheIndex); + bool CleanSavingTable(LinkerMFileInfo &mplInfo, int fd, bool res); + + bool LoadTable(LinkerMFileInfo &mplInfo, CacheIndex cacheIndex); + bool LoadTable(LinkerMFileInfo &mplInfo, const std::string &path, CacheIndex cacheIndex); + void *LoadCache(const std::string &path, size_t &cacheSize); + bool LoadFooter(BufferSlice &buf, CacheIndex cacheIndex); + bool LoadMeta(LinkerMFileInfo &mplInfo, BufferSlice &buf, CacheIndex cacheIndex); + bool LoadData(LinkerMFileInfo &mplInfo, BufferSlice &buf, CacheIndex cacheIndex); + bool LoadNameList(BufferSlice &buf, CacheIndex cacheIndex); + + bool ProcessTable(LinkerMFileInfo &mplInfo, AddrSlice &addrSlice, MuidSlice &muidSlice, CacheInfo cacheInfo); + bool LookUpUndefSymbol(LinkerMFileInfo &mplInfo, AddrSlice &addrSlice, MuidSlice &muidSlice, CacheInfo cacheInfo); + bool UndefSymbolFailHandler(LinkerMFileInfo &mplInfo, uint32_t idx, CacheInfo cacheInfo); + bool LookUpDefSymbol(LinkerMFileInfo &mplInfo, AddrSlice &addrSlice, MuidSlice &muidSlice, CacheInfo cacheInfo); + bool LookUpUndefAddr(LinkerMFileInfo &mplInfo, const MUID &muid, + LinkerOffsetType &addr, LinkerCacheTableItem &pItem, CacheInfo cacheInfo); + bool LookUpDefAddr(LinkerMFileInfo &mplInfo, const MUID &muid, + LinkerOffsetType &addr, LinkerCacheTableItem &pItem, CacheInfo cacheInfo); + LinkerMFileInfo *FindLinkerMFileInfo(uint16_t soid, const MFileCacheInf &inf, MplInfoStore &store); + LinkerOffsetType GetAddr(LinkerMFileInfo &res, LinkerCacheTableItem &pItem, bool isMethod); + + template + void ParallelLookUp(F const &lookFunc, int numThreads, size_t defSize); + + static constexpr int kLinkerInvalidNameLen = 1; + static constexpr int kMaxCacheIndex = kDataDefIndex + 1; + static const MUID kInvalidHash; + LinkerInvoker &pInvoker; + std::string pCachePath; + MUID pClHash[kMaxCacheIndex]; // Hash code of class loader .so list + uint16_t pClSoCnt[kMaxCacheIndex]; // .so count in class loader + MFileCacheInf pMplCacheInf[kMaxCacheIndex]; + MplInfoStore pMplStore[kMaxCacheIndex]; +}; +#endif // LINKER_RT_CACHE +} // namespace maplert +#endif diff --git a/src/mrt/compiler-rt/include/linker/linker_common.h b/src/mrt/compiler-rt/include/linker/linker_common.h new file mode 100644 index 0000000000..45da7a9709 --- /dev/null +++ b/src/mrt/compiler-rt/include/linker/linker_common.h @@ -0,0 +1,571 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_MPL_LINKER_COMMON_H_ +#define MAPLE_RUNTIME_MPL_LINKER_COMMON_H_ + +#include +#include + +#include "mrt_api_common.h" +#include "jni.h" +#include "linker_compiler.h" +#include "object_type.h" +#include "metadata_layout.h" +#ifdef LINKER_DECOUPLE +#include "decouple/linker_decouple_common.h" +#endif + +#ifdef __cplusplus +#include +#include +#include +#include +#include +#include +namespace maplert { +// For un|define tables' address. +using PtrFuncType = void *(*)(); + +#define LINKER_INSTALL_STATE (maple::MplCacheFirstFile::GetInstallStat()) + +#ifdef LINKER_RT_CACHE +enum class LinkerCacheType : uint8_t { + kLinkerCacheDataUndef = 0, // data undef symbol + kLinkerCacheDataDef = 1, // data def symbol + kLinkerCacheMethodUndef = 2, // method symbol + kLinkerCacheMethodDef = 3, // method symbol + kLinkerCacheVTable = 4, // vtable + kLinkerCacheCTable = 5, // vtable offset table + kLinkerCacheFieldOffset = 6, // field offset + kLinkerCacheFieldTable = 7, // field offset table + kLinkerCacheITable = 8, // itable + kLinkerCacheStaticTable = 9, // static table + kLinkerCacheLazy = 10 // lazy cache +}; + +static constexpr char kLinkerSystemCachePath[] = "/data/system"; +static constexpr char kLinkerRootCachePath[] = "/data/dalvik-cache"; +static constexpr char kLinkerCacheFold[] = "mpl-lnk"; +static constexpr unsigned int kLinkerIndexMax = 0xFFFFFFFF; +static constexpr int kLinkerClHashFlagValue = 0; +static constexpr unsigned int kLinkerClHashFlagIndex = 0xFFFF; +// Crash type(count) kLinkerServiceCrashRecoveryMygote(2) for mygote recovery. +static constexpr int kLinkerServiceCrashRecoveryMygote = 2; +// Crash type(count) kLinkerServiceCrashRecoverySystemServer(3) for sysvr recovery. +static constexpr int kLinkerServiceCrashRecoverySystemServer = 3; + +// Method undefine cache table item. +class LinkerCacheTableItem { + public: + LinkerCacheTableItem() : addrIndex(0), soIndex(0), filled(0), state(kValid) {} + + LinkerCacheTableItem(const LinkerCacheTableItem &item) { + addrIndex = item.addrIndex; + soIndex = item.soIndex; + filled = item.filled; + state = item.state; + } + + LinkerCacheTableItem &operator=(const LinkerCacheTableItem &item) { + this->addrIndex = item.addrIndex; + this->soIndex = item.soIndex; + this->filled = item.filled; + this->state = item.state; + return *this; + } + LinkerCacheTableItem(uint32_t aidx, uint16_t soidx) : addrIndex(aidx), soIndex(soidx) { + filled = 0; + state = kValid; + } + ~LinkerCacheTableItem() = default; + uint32_t AddrId() const { + return addrIndex; + } + uint16_t SoId() const { + return soIndex; + } + + void SetFilled() { + filled = 1; + } + bool Filled() const { + return filled == 1; + } + bool LazyInvalidName() const { + return state == kLazyInvalid; + } + bool InvalidName() const { + return state == kInvalid; + } + bool Valid() const { + return state == kValid; + } + + LinkerCacheTableItem &SetIds(size_t addrId, uint16_t soid) { + addrIndex = static_cast(addrId); + soIndex = soid; + state = kValid; + return *this; + } + LinkerCacheTableItem &SetLazyInvalidSoId(uint16_t soidx) { + soIndex = soidx; + state = kLazyInvalid; + return *this; + } + LinkerCacheTableItem &SetInvalidSoId(uint16_t soidx) { + soIndex = soidx; + state = kInvalid; + return *this; + } + private: + uint32_t addrIndex; // Index, the same order as item in undef|def table + uint16_t soIndex; // index for maple so + uint8_t filled; // If this item was handled before. + uint8_t state; // if maple so name valid + + enum SoIdState : uint8_t { + kValid = 0, + kInvalid = 1, + kLazyInvalid = 2, + }; +}; + +using MplCacheMapT = std::unordered_map; + +struct LinkerCacheRep { + MplCacheMapT methodUndefCacheMap; + int64_t methodUndefCacheSize = -1; + MplCacheMapT methodDefCacheMap; + int64_t methodDefCacheSize = -1; + std::string methodUndefCachePath; + std::string methodDefCachePath; + MplCacheMapT dataUndefCacheMap; + int64_t dataUndefCacheSize = -1; + MplCacheMapT dataDefCacheMap; + int64_t dataDefCacheSize = -1; + std::string dataUndefCachePath; + std::string dataDefCachePath; + std::string lazyCachePath; +}; +#endif // LINKER_RT_CACHE + +// Refer to MUIDReplacement::GenRangeTable() for sequence. +enum SectTabIndex { + kRange = 0, + kDecouple, + kVTable, + kITable, + kVTabOffset, + kFieldOffset, + kValueOffset, + kLocalCinfo, + kDataConstStr, + kDataSuperClass, + kDataGcRoot, + kTabClassMetadata, + kClassMetadataBucket, + kJavaText, + kJavaJni, + kJavaJniFunc, + kMethodDef, + kMethodDefOrig, + kMethodInfo, + kMethodUndef, + kDataDef, + kDataDefOrig, + kDataUndef, + kMethodDefMuid, + kMethodUndefMuid, + kDataDefMuid, + kDataUndefMuid, + kMethodMuidIndex, + kMethodProfile, + kDataSection, + kStaticDecoupleKey, + kStaticDecoupleValue, + kBssSection, + kLinkerHashSo, + kArrayClassCacheIndex, + kArrayClassCacheNameIndex, + kFuncIRProfDesc, + kFuncIRProfCounter, + kMaxTableIndexCount +}; + +// Start/end address of table +enum SectTabPairIndex { + kTable1stIndex, + kTable2ndIndex, + kTable2ndDimMaxCount +}; + +// State of address, for lazy binding. +enum BindingState { + kBindingStateUnresolved = 0, // Represents all kinds of unresolved state. + kBindingStateCinfUndef = 1, + kBindingStateCinfDef = 2, + kBindingStateDataUndef = 3, + kBindingStateDataDef = 4, + kBindingStateMethodUndef = 5, + kBindingStateMethodDef = 6, + kBindingStateMax = 4 * 1024, // a page + kBindingStateResolved // Already resolved if great than Max. +}; + +// Type for searching address. +enum AddressRangeType { + kTypeWhole = 0, // The range is the whole MFile. + kTypeText = 1, // The range is Java Text sections + kTypeClass = 2, // The range is Class Metadata sections. + kTypeData = 3 // The range is Data section +}; + +// State of loading process. +enum LoadStateType { + kLoadStateNone = 0, // The state of no loading or loading finished. + kLoadStateBoot = 1, // The state of loading in boot(mygote). + kLoadStateSS = 2, // The state of loading system_server. + kLoadStateApk = 3 // The state of loading APK. +}; + +// State of app loading. +enum AppLoadState { + kAppLoadNone = 0, // Not loading APK for App. + kAppLoadBaseOnlyReady = 1, // Ready to load the base APK + kAppLoadBaseOnly = 2, // Loading the base APK and successive + kAppLoadBaseAndOthers = 3 // Start to load other APKs +}; + +enum MFileInfoSource { + kFromPC = 0, + kFromAddr, + kFromMeta, + kFromHandle, + kFromName, + kFromClass, + kFromClassLoader +}; + +using PtrMatrixType = void *(*)[kTable2ndDimMaxCount]; + +struct FuncProfInfo { + uint32_t callTimes; + uint8_t layoutType; + std::string funcName; + FuncProfInfo(uint32_t callTimes, uint8_t layoutType) : callTimes(callTimes), layoutType(layoutType) {} + FuncProfInfo(uint32_t callTimes, uint8_t layoutType, std::string funcName) + : callTimes(callTimes), layoutType(layoutType), funcName(std::move(funcName)) {} +}; + +struct FuncIRProfInfo { + std::string funcName; + uint64_t hash; + uint32_t start; // indicate the start index in counter Table + uint32_t end; + FuncIRProfInfo(std::string funcName, uint64_t hash, uint32_t start, uint32_t end) + : funcName(std::move(funcName)), hash(hash), start(start), end(end) {} +}; + +struct MFileIRProfInfo { + std::vector descTab; + std::vector counterTab; +}; + +struct LinkerLocInfo { + std::string path; // Path name of maple library + void *addr = nullptr; // Method address + uint32_t size = 0; // Method size + std::string sym; // Method symbol pointer +}; + +using ClassLoaderListT = std::vector; +using ClassLoaderListItT = std::vector::iterator; +using ClassLoaderListRevItT = std::vector::reverse_iterator; + +enum MFileInfoFlags { + // LinkerMFileInfo flags + kIsBoot = 0, + kIsLazy, + kIsRelMethodOnce, + kIsRelDataOnce, + kIsMethodDefResolved, + kIsDataDefResolved, + kIsVTabResolved, + kIsITabResolved, + kIsVTabOffsetResolved, + kIsFieldOffsetResolved, + kIsSuperClassResolved, + kIsGCRootListResolved, + kIsMethodUndefHasNotResolved, + kIsMethodDefHasNotResolved, + kIsDataUndefHasNotResolved, + kIsDataDefHasNotResolved, + kIsMethodRelocated, + kIsDataRelocated, + kIsMethodUndefCacheValid, + kIsMethodCacheValid, + kIsDataUndefCacheValid, + kIsDataCacheValid, + // DecoupleMFileInfo flags + kIsRelDecoupledClassOnce, + kIsDecoupledVTabResolved, + kIsDecoupledFieldOffsetResolved, + kIsFieldOffsetTabResolved, + kIsDecoupledStaticAddrTabResolved, + kIsFieldOffsetTrashCacheFileRemoved, + kIsFieldTableTrashCacheFileRemoved, + kIsStaticTrashCacheFileRemoved, + kIsVTabTrashCacheFileRemoved, + kIsCTabTrashCacheFileRemoved, + kIsITabTrashCacheFileRemoved, + kMFileInfoFlagMaxIndex +}; + +// each interval part for SectTabIndex Item +static constexpr size_t kTableSizeInterval[] = { + 1, 1, sizeof(LinkerVTableItem), sizeof(LinkerITableItem), sizeof(LinkerVTableOffsetItem), + sizeof(LinkerFieldOffsetItem), sizeof(int32_t), sizeof(size_t), sizeof(void*), sizeof(LinkerSuperClassTableItem), + sizeof(LinkerGCRootTableItem), 1, 1, 1, 1, 1, sizeof(LinkerAddrTableItem), sizeof(LinkerAddrTableItem), + sizeof(LinkerInfTableItem), sizeof(LinkerAddrTableItem), sizeof(LinkerAddrTableItem), sizeof(LinkerAddrTableItem), + sizeof(LinkerAddrTableItem), sizeof(LinkerMuidTableItem), sizeof(LinkerMuidTableItem), sizeof(LinkerMuidTableItem), + sizeof(LinkerMuidTableItem), sizeof(uint32_t), sizeof(LinkerFuncProfileTableItem), 1, + sizeof(LinkerStaticDecoupleClass), 1, 1, 1, 1, 1, sizeof(LinkerFuncProfDescItem), + sizeof(LinkerFuncProfileTableItem), 1 +}; +// each offset part for SectTabIndex Item +static constexpr size_t kTableSizeOffset[] = { + 0, 0, 0, 0, 0, 0, sizeof(LinkerOffsetKeyTableInfo) / sizeof(int32_t), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, sizeof(MplStaticAddrTabHead) / sizeof(LinkerStaticAddrItem), 0, 0, 0, 0, 0, 0, 0 +}; +using MplFuncProfile = std::unordered_map; +class LinkerMFileInfo { + public: + LinkerMFileInfo() { + classLoader = nullptr; + startHotStrTab = nullptr; + bothHotStrTab = nullptr; + runHotStrTab = nullptr; + coldStrTab = nullptr; + coldStrTabEnd = nullptr; + rometadataFieldStart = nullptr; + rometadataFieldEnd = nullptr; + rometadataMethodStart = nullptr; + rometadataMethodEnd = nullptr; + romuidtabStart = nullptr; + romuidtabEnd = nullptr; + } + virtual ~LinkerMFileInfo() {} + void *elfBase = nullptr; // ELF base address. + void *elfStart = nullptr; // ELF tables start address. + void *elfEnd = nullptr; // ELF tables ending address. + std::string name; // Maple file path name. + void *handle = nullptr; // Maple file open handle. + MUID hash; // Hash code of .so. + MUID hashOfDecouple; // Hash code for decouple of .so. + uint32_t rangeTabSize = 0; + uint64_t decoupleStaticLevel = 0; + + PtrMatrixType tableAddr = 0; + + MplFuncProfile funProfileMap; +#ifdef LINKER_RT_CACHE + LinkerCacheRep rep; +#endif + + jobject classLoader; + ClassLoaderListT clList; + + char *startHotStrTab; + char *bothHotStrTab; + char *runHotStrTab; + char *coldStrTab; + char *coldStrTabEnd; + + void *rometadataFieldStart; + void *rometadataFieldEnd; + void *rometadataMethodStart; + void *rometadataMethodEnd; + + void *romuidtabStart; + void *romuidtabEnd; + + MUID vtbCLHash; + MUID ctbCLHash; + MUID fosCLHash; + MUID ftbCLHash; + MUID staticCLHash; + +#if defined(LINKER_RT_CACHE) && defined(LINKER_RT_LAZY_CACHE) + int methodCachingNum = 0; + std::string methodCachingStr; + std::mutex methodCachingLock; +#endif // defined(LINKER_RT_CACHE) && defined(LINKER_RT_LAZY_CACHE) + + bool BelongsToApp(); + void ReleaseReadOnlyMemory(); +#ifdef LINKER_DECOUPLE + virtual uint64_t GetDecoupleLevel() const = 0; + virtual void SetDecoupleLevel(uint64_t level) = 0; +#endif + template + inline Type GetTblBegin(SectTabIndex index) { + assert(tableAddr != nullptr); + index = ((index == kMethodDefOrig || index == kDataDefOrig) && !flag[kIsLazy]) ? + static_cast(static_cast(index) - 1) : index; + return static_cast(reinterpret_cast(tableAddr[index][kTable1stIndex])); + } + template + inline Type GetTblEnd(SectTabIndex index) { + assert(tableAddr != nullptr); + index = ((index == kMethodDefOrig || index == kDataDefOrig) && !flag[kIsLazy]) ? + static_cast(static_cast(index) - 1) : index; + return static_cast(reinterpret_cast(tableAddr[index][kTable2ndIndex])); + } + template + inline Type GetTblSize(SectTabIndex index) { + assert(tableAddr != nullptr); + size_t begin = reinterpret_cast(tableAddr[index][kTable1stIndex]); + size_t end = reinterpret_cast(tableAddr[index][kTable2ndIndex]); + size_t result = (end - begin) / kTableSizeInterval[index]; + size_t offset = kTableSizeOffset[index]; + size_t size = (begin == 0 || end == 0 || result <= offset) ? 0 : (result - offset); + return static_cast(size); + } + inline bool IsFlag(MFileInfoFlags flagIndex) const { + return flag[flagIndex]; + } + inline void SetFlag(MFileInfoFlags flagIndex, bool isTrue) { + flag[flagIndex] = isTrue; + } + private: + bool flag[kMFileInfoFlagMaxIndex] = { false }; +}; + +class LinkerRef : public DataRef { + public: + LinkerRef() = default; + template + explicit LinkerRef(T ref) { + SetDataRef(ref); + } + LinkerRef(const LinkerRef&) = default; + ~LinkerRef() = default; + inline bool IsEmpty() const { + return refVal == 0; + } + inline bool IsVTabIndex() const { // 1 marks a conflict. + return ((refVal & kNegativeNum) == 0) && refVal != 1 && refVal != 0; + } + inline bool IsITabIndex() const { // 1 marks a conflict. + return (refVal & kNegativeNum) == 0 && refVal != 1; + } + inline bool IsIndex() const { + return (refVal & kFromIndexMask) != 0; + } + inline bool IsTabUndef() const { + constexpr uintptr_t kTableUndefBitMask = 0x2; + return (refVal & kTableUndefBitMask) == 0; + } + inline bool IsFromUndef() const { + return (refVal & kLinkerRefUnDef) != 0; + } + inline size_t GetTabIndex() const { + return static_cast((refVal >> 2) - 1); // 2 bits as flag, real index is start with 3 bits; + } + inline size_t GetIndex() const { +#if defined(__aarch64__) + constexpr uintptr_t kAddressMask = 0x0fffffffffffffff; + return static_cast(refVal & kAddressMask); +#elif defined(__arm__) + return static_cast(refVal >> 2); // low 2 bit as flag +#else + return 0; +#endif + } + protected: +#if defined(__aarch64__) + enum LinkerRefFormat { + kLinkerRefAddress = 0ul, // must be 0 + kLinkerRefDef = 1ul << 61, // 61 bit as Def + kLinkerRefUnDef = 1ul << 62, // 62 bit as Undef + kFromIndexMask = kLinkerRefDef | kLinkerRefUnDef + }; +#elif defined(__arm__) + enum LinkerRefFormat { + kLinkerRefAddress = 0ul, // must be 0 + kLinkerRefDef = 1ul, // def + kLinkerRefUnDef = 2ul, // undef + kFromIndexMask = kLinkerRefDef | kLinkerRefUnDef + }; +#endif +}; + +template +class Slice { + public: + Slice() : base(nullptr), num(0) {} + Slice(T *b, size_t n) : base(b), num(n) {} + ~Slice() { + base = nullptr; + num = 0; + } + Slice(const Slice&) = default; + Slice &operator=(const Slice&) = default; + T *Data() { + return base; + } + const T *Data() const { + return base; + } + size_t Size() const { + return num; + } + // Required: n < num. + T &operator[](size_t n) { + return base[n]; + } + const T &operator[](size_t n) const { + return base[n]; + } + // Required: num >= n. + Slice &operator+=(size_t n) { + base += n; + num -= n; + return *this; + } + bool Empty() const { + return (base == nullptr || num == 0); + } + void Clear() { + base = nullptr; + num = 0; + } + + private: + T *base; + size_t num; +}; + +using AddrSlice = Slice; +using MuidSlice = Slice; +using MuidIndexSlice = Slice; +using BufferSlice = Slice; +using IndexSlice = Slice; +using VTableSlice = Slice; +using ITableSlice = VTableSlice; +using InfTableSlice = Slice; +} +#endif // __cplusplus +#endif // MAPLE_RUNTIME_MPL_LINKER_COMMON_H_ diff --git a/src/mrt/compiler-rt/include/linker/linker_compiler.h b/src/mrt/compiler-rt/include/linker/linker_compiler.h new file mode 100644 index 0000000000..9533685191 --- /dev/null +++ b/src/mrt/compiler-rt/include/linker/linker_compiler.h @@ -0,0 +1,274 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_MPL_LINKER_COMPILER_H_ +#define MAPLE_RUNTIME_MPL_LINKER_COMPILER_H_ + +#include "muid.h" +#include "address.h" + +namespace maplert { +#ifndef LINKER_LAZY_BINDING +#define LINKER_LAZY_BINDING +#endif + +#if defined(__ANDROID__) +#define LINKER_RT_CACHE +#define LINKER_DECOUPLE_CACHE +#endif + +#ifdef USE_32BIT_REF +constexpr uint32_t kDsoLoadedAddressStart = 0xC0000000; +constexpr uint32_t kDsoLoadedAddressEnd = 0xF0000000; +#endif + +#ifdef USE_32BIT_REF +constexpr uint32_t kNegativeNum = 0x80000000; +constexpr uint32_t kGoldSymbolTableEndFlag = 0X40000000; +#else +constexpr uint64_t kNegativeNum = 0x8000000000000000; +constexpr uint64_t kGoldSymbolTableEndFlag = 0X4000000000000000; +#endif + +// We must open LINKER_ADDR_FROM_OFFSET and LINKER_32BIT_REF_FOR_DEF_UNDEF +// at the same time for '.long' and '[sym]-.', such as '.long __cinf_Xxx - .', +// or close them for '.quad __cinf_Xxx' +#ifdef USE_32BIT_REF +#define LINKER_32BIT_REF_FOR_DEF_UNDEF +#define LINKER_ADDR_FROM_OFFSET +#endif + +#ifdef LINKER_32BIT_REF_FOR_DEF_UNDEF // We use exclusive LINKER_32BIT_REF_FOR_DEF_UNDEF for def/undef USE_32BIT_REF +using LinkerOffsetType = int32_t; +using LinkerVoidType = uint32_t; +#else +using LinkerOffsetType = int64_t; +using LinkerVoidType = uint64_t; +#endif // USE_32BIT_REF + +#ifdef USE_32BIT_REF +using VOID_PTR = uint32_t; +using INT_VAL = int32_t; +#else +using VOID_PTR = uint64_t; +using INT_VAL = int64_t; +#endif + +static constexpr const char kPreinitModuleClasses[] = "MRT_PreinitModuleClasses"; +static constexpr const char kRangeTableFunc[] = "MRT_GetRangeTableBegin"; +static constexpr const char kRangeTableEndFunc[] = "MRT_GetRangeTableEnd"; +static constexpr const char kMapleStartFunc[] = "MRT_GetMapleStart"; +static constexpr const char kMapleEndFunc[] = "MRT_GetMapleEnd"; +static constexpr const char kMapleVersionBegin[] = "MRT_GetVersionTabBegin"; +static constexpr const char kMapleVersionEnd[] = "MRT_GetVersionTabEnd"; +static constexpr const char kMapleCompileStatusBegin[] = "MRT_GetMFileStatusBegin"; +static constexpr const char kMapleCompileStatusEnd[] = "MRT_GetMFileStatusEnd"; +static constexpr const char kStartHotStrTabBegin[] = "MRT_GetStartHotStrTabBegin"; +static constexpr const char kBothHotStrTabBegin[] = "MRT_GetBothHotStrTabBegin"; +static constexpr const char kRunHotStrTabBegin[] = "MRT_GetRunHotStrTabBegin"; +static constexpr const char kColdStrTabBegin[] = "MRT_GetColdStrTabBegin"; +static constexpr const char kColdStrTabEnd[] = "MRT_GetColdStrTabEnd"; +static constexpr const char kMetadataFieldStart[] = "MRT_GetMFileROMetadataFieldStart"; +static constexpr const char kMetadataFieldEnd[] = "MRT_GetMFileROMetadataFieldEnd"; +static constexpr const char kMetadataMethodStart[] = "MRT_GetMFileROMetadataMethodStart"; +static constexpr const char kMetadataMethodEnd[] = "MRT_GetMFileROMetadataMethodEnd"; +static constexpr const char kMuidTabStart[] = "MRT_GetMFileROMuidtabStart"; +static constexpr const char kMuidTabEnd[] = "MRT_GetMFileROMuidtabEnd"; +static constexpr const char kBBProfileTabBegin[] = "MRT_GetBBProfileTabBegin"; +static constexpr const char kBBProfileTabEnd[] = "MRT_GetBBProfileTabEnd"; +static constexpr const char kBBProfileStrTabBegin[] = "MRT_GetBBProfileStrTabBegin"; +static constexpr const char kBBProfileStrTabEnd[] = "MRT_GetBBProfileStrTabEnd"; + +#pragma pack(4) +struct LinkerOffsetKeyTableInfo { +#ifdef USE_32BIT_REF + int32_t vtableOffsetTable; + int32_t fieldOffsetTable; +#else + int64_t vtableOffsetTable; + int64_t fieldOffsetTable; +#endif + uint32_t vtableOffsetTableSize; + uint32_t fieldOffsetTableSize; +}; + +// vtable offset key table for decoupling +struct LinkerVTableOffsetItem { + size_t index; // pointer to class metadata + int32_t methodName; + int32_t signatureName; + int32_t fieldIndex; +}; + +struct LinkerFieldOffsetItem { + size_t index; // pointer to class metadata + int32_t fieldName; + int32_t fieldType; + int32_t vtableIndex; +}; + +struct LinkerStaticDecoupleClass { + size_t callee; // class index + uint32_t fieldsNum; + uint32_t methodsNum; + uint32_t pad; +}; + +struct LinkerStaticAddrItem { + VOID_PTR address; + VOID_PTR index; + VOID_PTR dcpAddr; + VOID_PTR classInfo; +}; + +struct MplStaticFieldItem { + size_t caller; + uint32_t fieldName; + uint32_t fieldType; + uint32_t fieldIdx; +}; + +struct MplStaticMethodItem { + size_t caller; + uint32_t methodName; + uint32_t methodSignature; + uint32_t methodIdx; +}; + +struct MplStaticAddrTabHead { + union { + uintptr_t mplInfo; + uint64_t pad; // keep 64 bits alignment + }; + VOID_PTR classSize; + VOID_PTR tableSize; + uint64_t magic; +}; + +struct MplStaticFieldInfo { + bool isAccess; + uint64_t fieldAddr; + uint64_t classInfo; +}; + +struct MplFieldInfo { + char *fieldName; + char *fieldType; +}; +#pragma pack() + +struct LinkerAddrTableItem { + LinkerOffsetType addr; // Original method address offset, 64bits + + // We've used AddressFromOffset() as hard code everywhere, + // so we control using offset(implying 32bit in the case of def/undef) + // or not in the function, by LINKER_ADDR_FROM_OFFSET. + void *AddressFromOffset() const { +#ifdef LINKER_ADDR_FROM_OFFSET + LinkerOffsetType offset = addr; + char *base = reinterpret_cast(const_cast(&addr)); + return reinterpret_cast(base + offset); +#else + return reinterpret_cast(static_cast(addr)); +#endif + } + + void *AddressFromBase(void *baseAddr) const { + LinkerOffsetType offset = addr; + char *base = reinterpret_cast(baseAddr); + return reinterpret_cast(base + offset); + } + + void *Address() const { + return reinterpret_cast(static_cast(addr)); + } +}; + +// Method undefine table item. +struct LinkerMuidTableItem { + MUID muid; // MUID, 128bits + + // MUID comparison + bool operator < (const MUID &tmp) const { + return muid < tmp; + } + bool operator > (const MUID &tmp) const { + return muid > tmp; + } + bool operator == (const MUID &tmp) const { + return muid == tmp; + } +}; + +// Muid sorted index table item for 'LinkerAddrTableItem'. +// Use for SYMBOL/MUID --> ADDRESS. +// See struct 'LinkerAddrTableItem' +struct LinkerMuidIndexTableItem { + uint32_t index; // Address sorted index, 32bits +}; + +struct LinkerFuncProfileTableItem { + uint32_t callTimes; // function call times or bb call times +}; + +struct LinkerFuncProfDescItem { + uint64_t hash; // function hash + // [start,end] indicate the range in profile counter tab + // which belongs to this function + uint32_t start; + uint32_t end; +}; + +struct LinkerVTableItem { +#ifdef USE_32BIT_REF + uint32_t index; // Index in undef table, 32bits +#else + size_t index; // Index in undef table, 64bits +#endif +}; + +using LinkerITableItem = LinkerVTableItem; + +struct LinkerTableItem { + size_t index; +}; + +using LinkerSuperClassTableItem = LinkerTableItem; +using LinkerGCRootTableItem = LinkerTableItem; + +// offset value table for decoupling +struct LinkerOffsetValItem { + int32_t offset; // field offset in object layout, or method offset in vtable +}; + +struct LinkerOffsetValItemLazyLoad { + reffield_t offsetAddr; + uint32_t offset; // field offset in object layout, or method offset in vtable +#ifndef USE_32BIT_REF + int32_t pad; +#endif +}; + +struct MapleVersionT { + uint32_t mplMajorVersion; + uint32_t compilerMinorVersion; +}; + +// Method info. table item. +struct LinkerInfTableItem { + uint32_t size; // Method size, 32bits + int32_t funcNameOffset; // Offset of func name, 32bits +}; +} // namespace maplert +#endif // MAPLE_RUNTIME_MPL_LINKER_COMPILER_H_ diff --git a/src/mrt/compiler-rt/include/linker/linker_debug.h b/src/mrt/compiler-rt/include/linker/linker_debug.h new file mode 100644 index 0000000000..1ba9703a22 --- /dev/null +++ b/src/mrt/compiler-rt/include/linker/linker_debug.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_MPL_DEBUG_H_ +#define MAPLE_RUNTIME_MPL_DEBUG_H_ + +#include "linker_model.h" + +namespace maplert { +class Debug : public FeatureModel { + public: + static FeatureName featureName; + explicit Debug(LinkerInvoker &invoker) : pInvoker(&invoker) {}; + ~Debug() { + pInvoker = nullptr; + } + void DumpAllMplSectionInfo(std::ostream &os); + void DumpAllMplFuncProfile(std::unordered_map> &funcProfileRaw); + void DumpAllMplFuncIRProfile(std::unordered_map &funcProfileRaw); + uint64_t DumpMetadataSectionSize(std::ostream &os, void *handle, const std::string sectionName); + bool DumpMethodUndefSymbol(LinkerMFileInfo &mplInfo); + bool DumpMethodSymbol(LinkerMFileInfo &mplInfo); + bool DumpDataUndefSymbol(LinkerMFileInfo &mplInfo); + bool DumpDataSymbol(LinkerMFileInfo &mplInfo); + void DumpStackInfoInLog(); + void DumpBBProfileInfo(std::ostream &os); + private: + LinkerInvoker *pInvoker = nullptr; +}; +} // namespace maplert +#endif // MAPLE_RUNTIME_MPL_DEBUG_H_ diff --git a/src/mrt/compiler-rt/include/linker/linker_gctib.h b/src/mrt/compiler-rt/include/linker/linker_gctib.h new file mode 100644 index 0000000000..9abb2be93c --- /dev/null +++ b/src/mrt/compiler-rt/include/linker/linker_gctib.h @@ -0,0 +1,27 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MPL_RUNTIME_LINKER_GCTIB_H +#define MPL_RUNTIME_LINKER_GCTIB_H + +#include "sizes.h" + +namespace maplert { +void RefCal(const MClass &cls, std::vector &refOffsets, std::vector &weakOffsets, + std::vector &unownedOffsets, uint64_t &maxRefOffset); +void DumpGctib(const struct GCTibGCInfo &gctib); +bool ReGenGctib(ClassMetadata *classInfo, bool forceUpdate = true); +} // namespace maplert +#endif // MPL_RUNTIME_LINKER_GCTIB_H diff --git a/src/mrt/compiler-rt/include/linker/linker_hotfix.h b/src/mrt/compiler-rt/include/linker/linker_hotfix.h new file mode 100644 index 0000000000..3b785b0c61 --- /dev/null +++ b/src/mrt/compiler-rt/include/linker/linker_hotfix.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_MPL_HOTFIX_H_ +#define MAPLE_RUNTIME_MPL_HOTFIX_H_ + +#include "linker_model.h" + +namespace maplert { +class Hotfix : public FeatureModel { + public: + static FeatureName featureName; + explicit Hotfix(LinkerInvoker &invoker) : pInvoker(&invoker) {} + ~Hotfix() { + pInvoker = nullptr; + } + void SetClassLoaderParent(const MObject *classLoader, const MObject *newParent); + void InsertClassesFront(const MObject *classLoader, const LinkerMFileInfo &mplInfo, const std::string &path); + void SetPatchPath(const std::string &path, int32_t mode); + std::string GetPatchPath(); + bool IsFrontPatchMode(const std::string &path); + bool ResetResolvedFlags(LinkerMFileInfo &mplInfo); + bool ReleaseCaches(LinkerMFileInfo &mplInfo); + private: + int32_t patchMode = 1; // 0 - classloader parent, 1 - dexpath list + std::string patchPath; + LinkerInvoker *pInvoker = nullptr; +}; +} // namespace maplert +#endif // MAPLE_RUNTIME_MPL_HOTFIX_H_ diff --git a/src/mrt/compiler-rt/include/linker/linker_info.h b/src/mrt/compiler-rt/include/linker/linker_info.h new file mode 100644 index 0000000000..514278b8a2 --- /dev/null +++ b/src/mrt/compiler-rt/include/linker/linker_info.h @@ -0,0 +1,404 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_MPL_LINKER_INFO_H_ +#define MAPLE_RUNTIME_MPL_LINKER_INFO_H_ + +#include +#include + +#include "linker_utils.h" +#include "methodmeta.h" + +namespace maplert { +MUID GetHash(const std::vector &mplInfos, bool getSolidHash); + +class LinkerMFileInfoListT { + public: + using InfoList = std::vector; + + LinkerMFileInfoListT() {} + ~LinkerMFileInfoListT() {} + + LinkerMFileInfoListT(const LinkerMFileInfoListT &other) = delete; + LinkerMFileInfoListT &operator=(const LinkerMFileInfoListT&) = delete; + + void Merge(const InfoList &other) { + std::lock_guard lock(mutex); + (void)infos.insert(infos.end(), other.begin(), other.end()); + } + + void Clear() { + std::lock_guard lock(mutex); + infos.clear(); + } + + std::vector Clone() { + std::lock_guard lock(mutex); + return infos; + } + + LinkerMFileInfo *Get(size_t index) { + std::lock_guard lock(mutex); + return infos[index]; + } + + size_t Size() { + std::lock_guard lock(mutex); + return infos.size(); + } + + bool Empty() { + std::lock_guard lock(mutex); + return infos.empty(); + } + + void Append(LinkerMFileInfo &mplInfo, int32_t pos) { + std::lock_guard lock(mutex); + if (pos <= -1) { + infos.push_back(&mplInfo); + } else { + (void)infos.insert(infos.begin() + pos, &mplInfo); + } + } + template + void ForEach(const UnaryFunction f) { + std::lock_guard lock(mutex); + for (auto mplInfo : infos) { + f(*mplInfo); + } + } + template + bool FindIf(const UnaryPredicate p) { + std::lock_guard lock(mutex); + for (auto mplInfo : infos) { + if (p(*mplInfo)) { + return true; + } + } + return false; + } + template + bool FindIfNot(UnaryPredicate p) { + std::lock_guard lock(mutex); + for (auto mplInfo : infos) { + if (!p(*mplInfo)) { + return false; + } + } + return true; + } + MUID Hash(bool getSolidHash) { + std::lock_guard lock(mutex); + return GetHash(infos, getSolidHash); + } + + private: + std::mutex mutex; + InfoList infos; +}; + +class LinkerMFileInfoListCLMapT { + public: + using ClInfoMapT = std::unordered_map>; + + LinkerMFileInfoListCLMapT() { + (void)pthread_rwlock_init(&lock, nullptr); + } + ~LinkerMFileInfoListCLMapT() {} + + LinkerMFileInfoListCLMapT(const LinkerMFileInfoListCLMapT &other) = delete; + LinkerMFileInfoListCLMapT &operator=(const LinkerMFileInfoListCLMapT&) = delete; + + void Clear() { + (void)(pthread_rwlock_wrlock(&lock)); + infos.clear(); + (void)(pthread_rwlock_unlock(&lock)); + } + void Append(LinkerMFileInfo &mplInfo, int32_t pos) { + (void)(pthread_rwlock_wrlock(&lock)); + const MObject *classLoader = reinterpret_cast(mplInfo.classLoader); + auto &list = infos[classLoader]; + if (pos <= -1) { + list.push_back(&mplInfo); + } else { + (void)list.insert(list.begin() + pos, &mplInfo); + } + (void)(pthread_rwlock_unlock(&lock)); + } + template + bool FindIf(const MObject *classLoader, const UnaryFunction f) { + (void)(pthread_rwlock_rdlock(&lock)); + auto iter = infos.find(classLoader); + if (iter != infos.end()) { + auto &list = iter->second; + for (auto mplInfo : list) { + if (f(*mplInfo)) { + (void)(pthread_rwlock_unlock(&lock)); + return true; + } + } + } + (void)(pthread_rwlock_unlock(&lock)); + return false; + } + template + bool ForEach(const UnaryFunction f) { + (void)(pthread_rwlock_rdlock(&lock)); + for (auto &item : infos) { + auto &list = item.second; + for (auto mplinfo : list) { + f(*mplinfo); + } + } + (void)(pthread_rwlock_unlock(&lock)); + return true; + } + template + bool ForEach(const MObject *classLoader, const UnaryFunction f) { + (void)(pthread_rwlock_rdlock(&lock)); + auto iter = infos.find(classLoader); + if (iter != infos.end()) { + auto &list = iter->second; + for (auto mplinfo : list) { + f(*mplinfo); + } + } + (void)(pthread_rwlock_unlock(&lock)); + return true; + } + void FindToExport(const MObject *classLoader, LinkerMFileInfoListT &fileList) { + (void)(pthread_rwlock_rdlock(&lock)); + auto iter = infos.find(classLoader); + if (iter != infos.end()) { + fileList.Merge(iter->second); + } + (void)(pthread_rwlock_unlock(&lock)); + } + bool FindListInfo(const MObject *classLoader, size_t &size, MUID &hash) { + (void)(pthread_rwlock_rdlock(&lock)); + auto iter = infos.find(classLoader); + if (iter != infos.end()) { + auto &list = iter->second; + size = list.size(); + hash = GetHash(list, false); + (void)(pthread_rwlock_unlock(&lock)); + return true; + } + (void)(pthread_rwlock_unlock(&lock)); + return false; + } + + private: + pthread_rwlock_t lock; + ClInfoMapT infos; +}; + +class LinkerMFileInfoNameMapT { + public: + using NameInfoMapT = std::unordered_map; + + void Clear() { + std::lock_guard lock(mutex); + nameInfos.clear(); + } + void Append(LinkerMFileInfo &mplInfo) { + std::lock_guard lock(mutex); + (void)nameInfos.insert(std::make_pair(mplInfo.name, &mplInfo)); + } + bool Find(const std::string &name) { + std::lock_guard lock(mutex); + return nameInfos.find(name) != nameInfos.end(); + } + bool Find(const std::string &name, LinkerMFileInfo *&res) { + std::lock_guard lock(mutex); + auto it = nameInfos.find(name); + if (it != nameInfos.end()) { + res = it->second; + return true; + } + res = nullptr; + return false; + } + LinkerMFileInfo *FindByDecoupleHash(const MUID &hash) { + std::lock_guard lock(mutex); + auto it = nameInfos.begin(); + for (; it != nameInfos.end(); ++it) { + if (it->second->hashOfDecouple == hash) { + return it->second; + } + } + return nullptr; + } + private: + std::mutex mutex; + NameInfoMapT nameInfos; +}; + +class LinkerMFileInfoHandleMapT { + public: + using HandleInfoMapT = std::unordered_map; + + bool Find(const void *handle) { + std::lock_guard lock(mutex); + return handleInfos.find(handle) != handleInfos.end(); + } + bool Find(const void *handle, LinkerMFileInfo *&res) { + std::lock_guard lock(mutex); + auto it = handleInfos.find(handle); + if (it != handleInfos.end()) { + res = it->second; + return true; + } + res = nullptr; + return false; + } + void Clear() { + std::lock_guard lock(mutex); + handleInfos.clear(); + } + void Append(LinkerMFileInfo &mplInfo) { + std::lock_guard lock(mutex); + (void)handleInfos.insert(std::make_pair(mplInfo.handle, &mplInfo)); + } + + private: + std::mutex mutex; + HandleInfoMapT handleInfos; +}; + +struct ElfAddrCmp { + using is_transparent = void; // stl::set search will use this defing, cannot remove + const void *addr = nullptr; + AddressRangeType searchType = kTypeText; + bool operator()(const ElfAddrCmp *leftObj, const LinkerMFileInfo *rightObj) const { + switch (leftObj->searchType) { + case kTypeText: + return leftObj->addr > rightObj->tableAddr[kJavaText][kTable1stIndex]; + case kTypeClass: + return leftObj->addr > rightObj->tableAddr[kTabClassMetadata][kTable1stIndex]; + case kTypeData: + return leftObj->addr > rightObj->tableAddr[kBssSection][kTable1stIndex]; + default: + return leftObj->addr > rightObj->elfStart; + } + } + bool operator()(const LinkerMFileInfo *leftObj, const ElfAddrCmp *rightObj) const { + switch (rightObj->searchType) { + case kTypeText: + return leftObj->tableAddr[kJavaText][kTable1stIndex] > rightObj->addr; + case kTypeClass: + return leftObj->tableAddr[kTabClassMetadata][kTable1stIndex] > rightObj->addr; + case kTypeData: + return leftObj->tableAddr[kBssSection][kTable1stIndex] > rightObj->addr; + default: + return leftObj->elfStart > rightObj->addr; + } + } + bool operator()(const LinkerMFileInfo *leftObj, const LinkerMFileInfo *rightObj) const { + // NOTICE: 'tableAddr' must be initialized before here! Check InitLinkerMFileInfo()->GetValidityCode(). + // We expect the ELF files don't overlap with each other, so we just us JAVA TEXT to compare. + return leftObj->tableAddr[kJavaText][kTable1stIndex] > rightObj->tableAddr[kJavaText][kTable1stIndex]; + } + bool operator()(const LinkerMFileInfo *leftObj, const void *address) const { + return leftObj->tableAddr[kJavaText][kTable1stIndex] > address; + } + bool operator()(const void *address, const LinkerMFileInfo *rightObj) const { + return address > rightObj->tableAddr[kJavaText][kTable1stIndex]; + } +}; +class LinkerMFileInfoElfAddrSetT { + public: + using InfoSetT = std::set; + + void Clear() { + std::lock_guard lock(mutex); + infos.clear(); + } + + bool Append(LinkerMFileInfo &mplInfo) { + std::lock_guard lock(mutex); + return infos.insert(&mplInfo).second; + } + + LinkerMFileInfo *Search(const void *pc, AddressRangeType type) { + std::lock_guard lock(mutex); + ElfAddrCmp tmpObj; + tmpObj.addr = pc; + tmpObj.searchType = type; + const void *endAddr = nullptr; + auto iter = infos.lower_bound(&tmpObj); + if (iter == infos.end()) { + return nullptr; + } + switch (type) { + case kTypeText: + endAddr = (*iter)->tableAddr[kJavaText][kTable2ndIndex]; + break; + case kTypeClass: + endAddr = (*iter)->tableAddr[kTabClassMetadata][kTable2ndIndex]; + break; + case kTypeData: + endAddr = (*iter)->tableAddr[kBssSection][kTable2ndIndex]; + break; + default: + endAddr = (*iter)->elfEnd; + break; + } + if (pc <= endAddr) { + return *iter; + } + return nullptr; + } + + LinkerMFileInfo *SearchJavaText(const void *pc) { + if (UNLIKELY(pc == nullptr)) { + return nullptr; + } + auto iter = infos.lower_bound(pc); + if (iter != infos.end() && pc <= (*iter)->tableAddr[kJavaText][kTable2ndIndex]) { + return *iter; + } + return nullptr; + } + + private: + std::mutex mutex; + InfoSetT infos; +}; + +// Map from classloader object to loadClass method. +class MplClassLoaderLoadClassMethodMapT { + public: + // Map from classloader object to loadClass method. + using ClMethodMapT = std::unordered_map; + + MethodMeta *Find(MClass *classLoaderClass) { + std::lock_guard lock(mutex); + auto it = clMethodInfo.find(classLoaderClass); + if (it != clMethodInfo.end()) { + return it->second; + } + return nullptr; + } + void Append(MClass *classLoaderClass, MethodMeta *method) { + std::lock_guard lock(mutex); + (void)clMethodInfo.insert(std::make_pair(classLoaderClass, method)); + } + + private: + std::mutex mutex; + ClMethodMapT clMethodInfo; +}; +} // namespace maplert +#endif diff --git a/src/mrt/compiler-rt/include/linker/linker_inline.h b/src/mrt/compiler-rt/include/linker/linker_inline.h new file mode 100644 index 0000000000..4b833381d1 --- /dev/null +++ b/src/mrt/compiler-rt/include/linker/linker_inline.h @@ -0,0 +1,264 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_MPL_LINKER_INLINE_H_ +#define MAPLE_RUNTIME_MPL_LINKER_INLINE_H_ + +#include "linker_api.h" +#include "linker_hotfix.h" +#include "linker_lazy_binding.h" +#include "linker.h" + +namespace maplert { +// Do action for each maple file +template +bool LinkerInvoker::ForEachDoAction(Type obj, F action, const Data &data) { + bool ret = true; + auto handle = [&](LinkerMFileInfo &mplInfo) { + if (!(obj->*action)(mplInfo, *data)) { + ret = false; + } + }; + (void)mplInfoListCLMap.ForEach(handle); + return ret; +} +template +bool LinkerInvoker::ForEachDoAction(Type obj, F action) { + bool ret = true; + auto handle = [&](LinkerMFileInfo &mplInfo) { + if (!(obj->*action)(mplInfo)) { + ret = false; + } + }; + (void)mplInfoListCLMap.ForEach(handle); + return ret; +} +template +bool LinkerInvoker::DoAction(Type obj, F action, LinkerMFileInfo &mplInfo) { + bool ret = true; + ret = (obj->*action)(mplInfo); + return ret; +} + +// Return the position in un|define table, -1 if not found. +// pTable: Main table, maintaining all data. +// pMuidIndexTable: Index table, use it to find the index in muid table. +// start: Start index of sorted table, if index table exists, use index table start index. +// End: End index of sorted table, if index table exists, use index table end index. +// value: Value, to search. +template +int64_t Linker::BinarySearch(T1 &pTable, const T2 &pMuidIndexTable, int64_t start, int64_t end, const T3 &value) { + int64_t mid = 0; + while (start <= end) { + mid = (start + end) / 2; + // reserve index low 31 bits + uint64_t index = (&pMuidIndexTable)[mid].index & ~(1UL << 31); + T1 *tmp = &pTable + index; + if (*tmp < value) { // Less than value + start = mid + 1; + } else if (*tmp > value) { // Greater than value + end = mid - 1; + } else { // Resolved + return static_cast(index); + } + } + return -1; // Not resolved +} + +// Return the position in un|define table, -1 if not found. +// pTable: Main table, maintaining all data. +// start: Start index of sorted table, if index table exists, use index table statrt index. +// End: End index of sorted table, if index table exists, use index table end index. +// value: Value, to search. +template +int64_t Linker::BinarySearch(T1 &pTable, int64_t start, int64_t end, const T2 &value) { + int64_t mid = 0; + while (start <= end) { + mid = (start + end) / 2; + T1 *tmp = &pTable + mid; + if (*tmp < value) { // Less than value + start = mid + 1; + } else if (*tmp > value) { // Greater than value + end = mid - 1; + } else { // Resolved + return mid; + } + } + return -1; // Not resolved +} + +inline bool AddrInScope(size_t addr, size_t start, size_t end, uint32_t index, const int skipInvalidBitsNum) { + return ((!((addr >= start) && (addr < end))) || ((index >> static_cast(skipInvalidBitsNum)) & 1)); +} +// Return the position in un|define table, -1 if not found. +// +// table: Main table, maintaining all data. +// start: Start index of sorted table, if index table exists, use index table statrt index. +// end: End index of sorted table, if index table exists, use index table end index. +// value: Value, to search. +// indexTable: Index table, containing sorted indice. +// infoTable: Info. table, containing sorted indice. +// scopeStartAddr: Java text section start address. +// scopeEndAddr: Java text section end address. +template +int64_t LinkerInvoker::BinarySearchIndex(const LinkerMFileInfo &mplInfo, const T1 &table, size_t start, + size_t end, const size_t value, const T2 &indexTable, T3 &infTable, size_t scopeStartAddr, + size_t scopeEndAddr) { + constexpr int skipInvalidBitsNum = 31; + while (AddrInScope(reinterpret_cast(linkerutils::GetDefTableAddress(mplInfo, table, start, true)), + scopeStartAddr, scopeEndAddr, indexTable[start].index, skipInvalidBitsNum) && (start <= end)) { + ++start; // Skip invalid address towards END. Check 32 bit + } + while (AddrInScope(reinterpret_cast(linkerutils::GetDefTableAddress(mplInfo, table, end, true)), + scopeStartAddr, scopeEndAddr, indexTable[end].index, skipInvalidBitsNum) && (end >= start)) { + --end; // Skip invalid address towards START. Check 32 bit + } + while (start <= end) { + size_t mid = (start + end) / 2; + while (AddrInScope(reinterpret_cast(linkerutils::GetDefTableAddress(mplInfo, table, mid, true)), + scopeStartAddr, scopeEndAddr, indexTable[mid].index, skipInvalidBitsNum) && (mid < end)) { + ++mid; // Skip invalid address towards END. Check 32 bit + } + if (mid == end) { // Check if all addresses are invalid between MID and END + mid = (start + end) / 2; + while (AddrInScope(reinterpret_cast(linkerutils::GetDefTableAddress(mplInfo, table, mid, true)), + scopeStartAddr, scopeEndAddr, indexTable[mid].index, skipInvalidBitsNum) && (mid > start)) { + --mid; // Skip invalid address towards START. Check 32 bit + } + // Here, mid is equal to start or a position between start and end. + } + size_t index = mid; + void *addr = linkerutils::GetDefTableAddress(mplInfo, table, index, true); + size_t startAddr = reinterpret_cast(addr); + size_t endAddr = reinterpret_cast(addr) + static_cast(infTable[index].size) - 1; + if (endAddr < value) { // Less than value + start = mid + 1; + while (AddrInScope(reinterpret_cast(linkerutils::GetDefTableAddress(mplInfo, table, start, true)), + scopeStartAddr, scopeEndAddr, indexTable[mid].index, skipInvalidBitsNum) && (start < end)) { + ++start; // Skip invalid address towards END. Check 32 bit + } + } else if (startAddr > value) { // Greater than value + end = mid - 1; + while (AddrInScope(reinterpret_cast(linkerutils::GetDefTableAddress(mplInfo, table, end, true)), + scopeStartAddr, scopeEndAddr, indexTable[mid].index, skipInvalidBitsNum) && (end > start)) { + --end; // Skip invalid address towards START. Check 32 bit + } + } else { + return static_cast(index); // Resolved + } + } + return -1; // Not resolved +} + +// Look up for each maple file. +template +bool LinkerInvoker::ForEachLookUp(const MUID &muid, Type obj, F lookup, LinkerMFileInfo &mplInfo, +#ifdef LINKER_RT_CACHE + LinkerMFileInfo **resInfo, size_t &index, +#endif // LINKER_RT_CACHE + LinkerOffsetType &pAddr) { + // Find LinkerMFileInfo list by class loader. + auto handle = [&](LinkerMFileInfo &item)->bool { + LinkerVoidType addr = 0; + size_t tmp = 0; + addr = (obj->*lookup)(item, muid, tmp); + if (addr != 0) { + *reinterpret_cast(&pAddr) = addr; +#ifdef LINKER_RT_CACHE + *resInfo = &item; + index = tmp; +#endif // LINKER_RT_CACHE + return true; + } + return false; + }; + ClassLoaderListT &classLoaderList = mplInfo.clList; + // Look up the address. + for (auto iter = classLoaderList.rbegin(); iter != classLoaderList.rend(); ++iter) { + MObject *classLoader = reinterpret_cast(*iter); +#ifdef LINKER_RT_CACHE + // Ignore the LinkerMFileInfos not from system(Boot class loader) for lazy binding with half option. + if (mplInfo.IsFlag(kIsLazy) && !IsBootClassLoader(classLoader) && classLoader != nullptr && + !IsSystemClassLoader(classLoader)) { // Not system + continue; + } +#endif // LINKER_RT_CACHE + if (mplInfoListCLMap.FindIf(classLoader, handle)) { + return true; + } + } + return false; +} + +template +bool LinkerInvoker::ResolveSymbolLazily(LinkerMFileInfo &mplInfo, bool isSuper, const AddrSlice &dstSlice, + size_t index, bool fromUpper, bool fromUndef, TableItem &tableItem, size_t tableIndex, const void *addr) { + size_t muidSize = fromUndef ? + (isSuper ? (mplInfo.GetTblSize(kDataUndefMuid)) : (mplInfo.GetTblSize(kMethodUndefMuid))) : + (isSuper ? (mplInfo.GetTblSize(kDataDefMuid)) : (mplInfo.GetTblSize(kMethodDefMuid))); + MuidSlice muidSlice = fromUndef ? + (isSuper ? (MuidSlice(mplInfo.GetTblBegin(kDataUndefMuid), muidSize)) : + (MuidSlice(mplInfo.GetTblBegin(kMethodUndefMuid), muidSize))) : + (isSuper ? (MuidSlice(mplInfo.GetTblBegin(kDataDefMuid), muidSize)) : + (MuidSlice(mplInfo.GetTblBegin(kMethodDefMuid), muidSize))); + if (isSuper) { + addr = Get()->ResolveClassSymbol(mplInfo, dstSlice, muidSlice, index, nullptr, fromUpper, !fromUndef); + } else { + addr = Get()->ResolveMethodSymbol(mplInfo, dstSlice, muidSlice, index, !fromUndef); + } + if (addr != nullptr) { + __atomic_store_n(&tableItem.index, reinterpret_cast(addr), __ATOMIC_RELEASE); + return false; + } else { + LINKER_LOG(ERROR) << "not resolved lazily, " << fromUndef << ", " << index << ", " << " in " << + mplInfo.name << maple::endl; + return isSuper ? true : ((tableIndex != 0) ? true : false); + } +} + +template +bool LinkerInvoker::ResolveSymbolByClass(LinkerMFileInfo &mplInfo, + TableItem &tableItem, size_t index, size_t tableIndex, bool fromUndef) { + bool fromUpper = false; + void *addr = nullptr; + AddrSlice dstSlice; + bool ret = false; + size_t undefSize = mplInfo.GetTblSize(UndefName); + AddrSlice undefSlice(mplInfo.GetTblBegin(UndefName), undefSize); + size_t defSize = mplInfo.GetTblSize(DefName); + AddrSlice defSlice(mplInfo.GetTblBegin(DefName), defSize); + if (fromUndef && index < undefSize && !undefSlice.Empty()) { + if (mplInfo.IsFlag(kIsLazy) && GetAddrBindingState(undefSlice, index) != kBindingStateResolved) { + dstSlice = undefSlice; + ret = ResolveSymbolLazily( + mplInfo, isSuper, dstSlice, index, fromUpper, fromUndef, tableItem, tableIndex, addr); + } else { + __atomic_store_n(&tableItem.index, reinterpret_cast(undefSlice[index].Address()), __ATOMIC_RELEASE); + } + } else if (!fromUndef && index < defSize && !defSlice.Empty()) { + if (mplInfo.IsFlag(kIsLazy)) { + dstSlice = defSlice; + ret = ResolveSymbolLazily( + mplInfo, isSuper, dstSlice, index, fromUpper, fromUndef, tableItem, tableIndex, addr); + } else { + __atomic_store_n(&tableItem.index, reinterpret_cast(isSuper ? linkerutils::GetDefTableAddress( + mplInfo, defSlice, index, false) : defSlice[index].Address()), __ATOMIC_RELEASE); + } + } else { + return isSuper ? true : ((tableIndex != 0) ? true : false); + } + return ret; +} +} // namespace maplert +#endif // MAPLE_RUNTIME_MPL_LINKER_INLINE_H_ diff --git a/src/mrt/compiler-rt/include/linker/linker_lazy_binding.h b/src/mrt/compiler-rt/include/linker/linker_lazy_binding.h new file mode 100644 index 0000000000..93ba6c86c6 --- /dev/null +++ b/src/mrt/compiler-rt/include/linker/linker_lazy_binding.h @@ -0,0 +1,111 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_MPL_LINKER_LAZY_H_ +#define MAPLE_RUNTIME_MPL_LINKER_LAZY_H_ + +#include "linker_method_builder.h" + +#ifdef __cplusplus +namespace maplert { +class LazyBinding : public FeatureModel { + public: + static FeatureName featureName; + LazyBinding(LinkerInvoker &invoker, MethodBuilder &build) : pInvoker(&invoker), pMethodBuilder(&build) {}; + ~LazyBinding() { + pInvoker = nullptr; + pMethodBuilder = nullptr; + } + void *SetClassInDefAddrTable(size_t index, const MClass *klass); + inline void *SetAddrInAddrTable(AddrSlice &addrSlice, size_t index, const MClass *addr); + BindingState GetAddrBindingState(LinkerVoidType addr); + BindingState GetAddrBindingState(const AddrSlice &addrSlice, size_t index, bool isAtomic = true); + BindingState GetAddrBindingState(const LinkerMFileInfo &mplInfo, const AddrSlice &addrSlice, const void *offset); + void LinkClass(MClass *klass); + MClass *LinkDataUndefSuperClassAndInterfaces(LinkerMFileInfo &mplInfo, MObject *candidateClassLoader, size_t index, + AddrSlice dataUndefSlice, MClass *superClassesItem, IndexSlice superTableSlice, uint32_t i, bool fromUndef); + MClass *LinkDataDefSuperClassAndInterfaces(LinkerMFileInfo &mplInfo, size_t index, AddrSlice dataDefSlice, + MClass *superClassesItem, IndexSlice superTableSlice, uint32_t i, bool fromUndef); + void LinkSuperClassAndInterfaces( + const MClass *klass, MObject *candidateClassLoader, bool recursive = true, bool forCold = false); + + void GetAddrAndMuidSlice(LinkerMFileInfo &mplInfo, + BindingState state, AddrSlice &addrSlice, MuidSlice &muidSlice, const void *offset); + void *ResolveClassSymbolClassification( + LinkerMFileInfo &mplInfo, BindingState state, const AddrSlice &addrSlice, const MuidSlice &muidSlice, + const size_t &index, MObject *candidateClassLoader, bool &fromUpper, const void *pc, const void *offset); + bool HandleSymbol(const void *offset, const void *pc, BindingState state, bool fromSignal); + bool HandleSymbolForDecoupling(LinkerMFileInfo &mplInfo, const AddrSlice &addrSlice, size_t index); + bool HandleSymbol( + LinkerMFileInfo &mplInfo, const void *offset, const void *pc, BindingState state, bool fromSignal); + + size_t GetAddrIndex(const LinkerMFileInfo &mplInfo, const AddrSlice &addrSlice, const void *offset); + void *SearchInUndef(LinkerMFileInfo &mplInfo, const AddrSlice &addrSlice, const MuidSlice &muidSlice, + size_t index, MObject *candidateClassLoader, bool &fromUpper, bool isDef, std::string className); + void *ResolveClassSymbolForAPK(LinkerMFileInfo &mplInfo, size_t index, bool &fromUpper, bool isDef); + void *ResolveClassSymbol(LinkerMFileInfo &mplInfo, const AddrSlice &addrSlice, const MuidSlice &muidSlice, + size_t index, MObject *candidateClassLoader, bool &fromUpper, bool isDef, bool clinit = false); + void *ResolveClassSymbolInternal(LinkerMFileInfo &mplInfo, const AddrSlice &addrSlice, const MuidSlice &muidSlice, + size_t index, MObject *classLoader, bool &fromUpper, bool isDef, const std::string &className); + void *ResolveDataSymbol(const AddrSlice &addrSlice, const MuidSlice &muidSlice, size_t index, bool isDef); + void *ResolveDataSymbol(LinkerMFileInfo &mplInfo, const AddrSlice &addrSlice, + const MuidSlice &muidSlice, size_t index, bool isDef); + void *ResolveMethodSymbol(const AddrSlice &addrSlice, const MuidSlice &muidSlice, size_t index, bool isDef); + void *ResolveMethodSymbol(LinkerMFileInfo &mplInfo, const AddrSlice &addrSlice, const MuidSlice &muidSlice, + size_t index, bool isDef, bool forClass = false); + MClass *GetUnresolvedClass(MClass *klass, bool &fromUpper, bool isDef); + void ResolveMethodsClassSymbol(const LinkerMFileInfo &mplInfo, bool isDef, const void *addr); + void LinkStaticSymbol(LinkerMFileInfo &mplInfo, const MClass *target); + void LinkStaticMethod(LinkerMFileInfo &mplInfo, const MClass *target); + void LinkStaticField(LinkerMFileInfo &mplInfo, const MClass *target); + void LinkStaticMethodSlow(MethodMeta &meta, std::string name, LinkerMFileInfo &mplInfo, + LinkerMFileInfo &oldLinkerMFileInfo, AddrSlice &srcTable, AddrSlice &dstTable); + void LinkStaticMethodFast(MethodMeta &meta, const std::string name, size_t index, LinkerMFileInfo &mplInfo, + const LinkerMFileInfo &oldLinkerMFileInfo, AddrSlice &srcTable, AddrSlice &dstTable); + void LinkStaticFieldSlow(FieldMeta &meta, std::string name, LinkerMFileInfo &mplInfo, + LinkerMFileInfo &oldLinkerMFileInfo, AddrSlice &srcTable, AddrSlice &dstTable); + void LinkStaticFieldFast(FieldMeta &meta, const std::string name, size_t index, LinkerMFileInfo &mplInfo, + const LinkerMFileInfo &oldLinkerMFileInfo, AddrSlice &srcTable, AddrSlice &dstTable); + void LinkClassInternal(MClass *klass, MObject *candidateClassLoader); + void LinkClassInternal(LinkerMFileInfo &mplInfo, MClass *klass, MObject *candidateClassLoader, bool first = true); + void LinkSuperClassAndInterfaces(LinkerMFileInfo &mplInfo, const MClass *klass, MObject *candidateClassLoader, + bool recursive = true, bool forCold = false); + void LinkMethods(LinkerMFileInfo &mplInfo, MClass *klass, bool first, MetaRef index); + inline void LinkFields(const LinkerMFileInfo &mplInfo, MClass *klass); + void *GetClassMetadata(LinkerMFileInfo &mplInfo, size_t classIndex); + void *LookUpDataSymbolAddressLazily(const MObject *classLoader, const MUID &muid, + LinkerMFileInfo **outLinkerMFileInfo = nullptr, bool ignoreClassLoader = false); + void *LookUpMethodSymbolAddressLazily(const MObject *classLoader, const MUID &muid, bool ignoreClassLoader = false); + inline std::string ResolveClassMethod(MClass *klass); + + void PreLinkLazyMethod(LinkerMFileInfo &mplInfo); +#if defined(LINKER_RT_CACHE) && defined(LINKER_RT_LAZY_CACHE) + bool LoadLazyCache(LinkerMFileInfo &mplInfo, std::string &cachingIndex); + bool LoadLazyCacheInternal( + LinkerMFileInfo &mplInfo, std::string &path, std::string &cachingIndex, + char *buf, std::streamsize &index, std::streamsize &byteCount); + bool SaveLazyCache(LinkerMFileInfo &mplInfo, const std::string &cachingIndex); + bool SaveLazyCacheInternal(std::string &path, int &fd, std::vector &bufVector); +#endif // defined(LINKER_RT_CACHE) && defined(LINKER_RT_LAZY_CACHE) + private: + LinkerInvoker *pInvoker = nullptr; + MethodBuilder *pMethodBuilder = nullptr; +}; +extern "C" { +#endif // __cplusplus +#ifdef __cplusplus +} // extern "C" +} // namespace maplert +#endif // __cplusplus +#endif // MAPLE_RUNTIME_MPL_LINKER_LAZY_H_ diff --git a/src/mrt/compiler-rt/include/linker/linker_method_builder.h b/src/mrt/compiler-rt/include/linker/linker_method_builder.h new file mode 100644 index 0000000000..e78e14c06e --- /dev/null +++ b/src/mrt/compiler-rt/include/linker/linker_method_builder.h @@ -0,0 +1,130 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_MPL_LINKER_METHOD_H_ +#define MAPLE_RUNTIME_MPL_LINKER_METHOD_H_ + +#include "linker_model.h" +#include "methodmeta.h" + +namespace maplert { +constexpr uint16_t kAdjacencyInvalidValue = static_cast(-1); + +struct MethodItem { + LinkerVoidType methodMeta; // MethodMetaBase* + uint32_t hash; +#ifdef LINKER_RT_LAZY_CACHE + std::vector depth; + uint16_t index; +#endif // LINKER_RT_LAZY_CACHE + bool isInterface; + + public: + void SetMethod(const MethodMetaBase &method) { + methodMeta = static_cast(reinterpret_cast(&method)); + } + + MethodMetaBase *GetMethod() const { + return reinterpret_cast(static_cast(methodMeta)); + } +}; + +struct AdjItem { + uint16_t first; + uint16_t next; + LinkerVoidType methodMeta; // MethodMetaBase* + + public: + AdjItem(uint16_t firstVal, uint16_t nextVal, uint16_t methodVal) + : first(firstVal), next(nextVal), methodMeta(methodVal) {}; + AdjItem() : first(kAdjacencyInvalidValue), next(kAdjacencyInvalidValue), methodMeta(0) {}; + + void SetMethod(const MethodMetaBase &method) { + methodMeta = static_cast(reinterpret_cast(&method)); + } + + MethodMetaBase *GetMethod() const { + return reinterpret_cast(static_cast(methodMeta)); + } +}; + +// Map from class object to its vtab index mapping. +using MplLazyBindingVTableMapT = std::vector; +class MethodBuilder : public FeatureModel { + public: + static FeatureName featureName; + explicit MethodBuilder(LinkerInvoker &invoker) : pInvoker(&invoker) {}; + ~MethodBuilder() { + pInvoker = nullptr; + } + int32_t UpdateOffsetTableByVTable( + const MClass *klass, MplLazyBindingVTableMapT &adjMethods, + LinkerVTableOffsetItem &vtableOffsetItem, LinkerOffsetValItem &offsetTableItem); + int32_t UpdateOffsetTableByVTable( + const MClass *klass, MplLazyBindingVTableMapT &adjMethods, + LinkerVTableOffsetItem &vtableOffsetItem, LinkerOffsetValItemLazyLoad &offsetTableItem); +#ifdef LINKER_RT_LAZY_CACHE + void BuildMethodByCachingIndex(MClass *klass, const std::string &cachingIndex); +#endif // LINKER_RT_LAZY_CACHE + std::string BuildMethod(MClass *klass); + MplLazyBindingVTableMapT GetMethodVTableMap(MClass *klass); + + private: + unsigned int GetHashIndex(const char *name); + inline uint16_t GetMethodMetaHash(const MethodMetaBase &method); + std::string GetMethodFullName(const MethodMetaBase &method); + inline std::string GetMethodSignature(const MethodMetaBase &method) const; + bool EqualMethod(const MethodMetaBase &method1, const MethodMetaBase &method2); + bool EqualMethod(const MethodMetaBase *method, const char *methodName, const char *methodSignature); + bool CanAccess(const MethodMetaBase &baseMethod, const MethodMetaBase ¤tMethod); + bool CheckMethodMeta(const MClass *klass, std::vector> &methods); + bool CheckMethodMetaNoSort(const MClass *klass, std::vector> &methods); + void OverrideVTabMethod(std::vector &virtualMethods, uint16_t vtabIndex, + const MethodMetaBase &method, const std::vector &depth, uint16_t index, bool isInterface); + void AppendVTabMethod(std::vector &virtualMethods, int16_t vtabIndex, const MethodMetaBase &method, + const std::vector &depth, uint16_t index, bool isInterface, uint16_t hash); + void CollectClassMethodsFast(const MClass *klass, const std::vector &depth, + std::vector &virtualMethods, const std::vector> &methods); + void CollectClassMethodsSlow(const MClass *klass, const std::vector &depth, + std::vector &virtualMethods, const MplLazyBindingVTableMapT &adjMethods, + const std::vector> &methods); + void CollectClassMethodsRecursive(MClass *klass, bool &isDecouple, std::set &checkedClasses, + std::vector &depth, uint16_t superNum, + std::vector &virtualMethods, MplLazyBindingVTableMapT &adjMethods); + void CollectClassMethods(const MClass *klass, const bool &isDecouple, const std::vector &depth, + std::vector &virtualMethods, MplLazyBindingVTableMapT &adjMethods); + void BuildAdjMethodList(std::vector &virtualMethods, MplLazyBindingVTableMapT &adjMethods); + void GenerateAndAttachClassVTable(MClass *klass, std::vector &virtualMethods); + uint32_t ProcessITableFirstTable(std::vector &virtualMethods, + std::vector &firstITabVector, std::vector &firstITabConflictVector, + uint32_t &maxFirstITabIndex); + size_t ProcessITableSecondTable(std::vector &firstITabConflictVector, + std::map &secondITableMap, std::vector &secondITabConflictVector); + inline void GenerateITableFirstTable(LinkerVoidType &itab, const std::vector &firstITabVector, + const std::vector &firstITabConflictVector, uint32_t maxFirstITabIndex) const; + void GenerateITableSecondTable(LinkerVoidType &itab, std::map &secondITableMap, + std::vector &secondITabConflictVector, size_t sizeOfITabNoNames); + void GenerateAndAttachClassITable(MClass *klass, std::vector &virtualMethods); + inline void BuildMethodVTableITable(MClass *klass, std::vector &virtualMethods); + inline bool IsValidMethod(const MethodMetaBase &method) const; + int32_t GetVTableItemIndex( + const MClass *klass, MplLazyBindingVTableMapT &adjMethods, const char *methodName, const char *methodSignature); +#ifdef LINKER_RT_LAZY_CACHE + MethodMetaBase *GetMethodByIndex(MClass *klass, uint16_t index); + std::string GetMethodCachingIndexString(MClass *klass, std::vector &virtualMethods); +#endif // LINKER_RT_LAZY_CACHE + LinkerInvoker *pInvoker = nullptr; +}; +} // namespace maplert +#endif // MAPLE_RUNTIME_MPL_LINKER_METHOD_H_ diff --git a/src/mrt/compiler-rt/include/linker/linker_model.h b/src/mrt/compiler-rt/include/linker/linker_model.h new file mode 100644 index 0000000000..26258d994b --- /dev/null +++ b/src/mrt/compiler-rt/include/linker/linker_model.h @@ -0,0 +1,274 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_MPL_LINKER_MODEL_H_ +#define MAPLE_RUNTIME_MPL_LINKER_MODEL_H_ + +#include "linker_info.h" +#include "linker_api.h" +#include "loader_api.h" +#include "mclass_inline.h" + +#include +#include +#include +namespace maplert { +enum FeatureName { + kFLinker, +#ifdef LINKER_DECOUPLE + kFDecouple, +#endif + kFLinkerCache, + kFDebug, + kFHotfix, + kFLazyBinding, + kFMethodBuilder +}; +class FeatureModel { + public: + FeatureModel() {} + virtual ~FeatureModel() {} + virtual std::string GetName() { + return ""; + } +}; +class LinkerInvoker : public LinkerAPI { + public: + friend class FeatureModel; + struct CodeRange { + uintptr_t start; + uintptr_t end; + LinkerMFileInfo *mplInfo; + }; + struct UpdateNode { + MUID symbolId; + MClass *klass; + uintptr_t oldAddr; + uintptr_t newAddr; + UpdateNode(MClass *cls, const MUID id, uintptr_t oldA, uintptr_t newA) { + klass = cls; + symbolId = id; + oldAddr = oldA; + newAddr = newA; + } + }; + struct LinkerMFileCache { + MClass *clsArray[4] = { nullptr }; // CacheSize is 4 + LinkerMFileInfo *mpArray[4] = { nullptr }; // CacheSize is 4 + uint8_t idx = 0; + }; + // API Interfaces Begin + std::string GetAppInfo() { + return maplert::linkerutils::GetAppInfo(); + } + void SetAppInfo(const char *dataPath, int64_t versionCode) { + maplert::linkerutils::SetAppInfo(dataPath, versionCode); + } + AppLoadState GetAppLoadState() { + return maplert::linkerutils::GetAppLoadState(); + } + std::string GetAppPackageName() { + return maplert::linkerutils::GetAppPackageName(); + } + void *GetSymbolAddr(void *handle, const char *symbol, bool isFunction) { + return maplert::linkerutils::GetSymbolAddr(handle, symbol, isFunction); + } + void GetMplVersion(const LinkerMFileInfo &mplInfo, MapleVersionT &item) { + return maplert::linkerutils::GetMplVersion(mplInfo, item); + } + void GetMplCompilerStatus(const LinkerMFileInfo &mplInfo, uint32_t &status) { + return maplert::linkerutils::GetMplCompilerStatus(mplInfo, status); + } + std::string GetMethodSymbolByOffset(const LinkerInfTableItem &pTable) { + return maplert::linkerutils::GetMethodSymbolByOffset(pTable); + } + bool IsJavaText(const void *addr) { + return GetLinkerMFileInfo(kFromPC, addr) != nullptr; + } + LinkerMFileInfo *GetLinkerMFileInfoByName(const std::string &name) { + return GetLinkerMFileInfo(kFromName, name.c_str()); + } + LinkerMFileInfo *GetLinkerMFileInfoByAddress(const void *addr, bool isLazyBinding) { + return GetLinkerMFileInfo(kFromAddr, addr, isLazyBinding); + } + std::string GetMFileNameByPC(const void *pc, bool isLazyBinding) { + return GetMFileName(kFromPC, pc, isLazyBinding); + } + std::string GetMFileNameByClassMetadata(const void *addr, bool isLazyBinding) { + return GetMFileName(kFromMeta, addr, isLazyBinding); + } + LinkerMFileInfo *GetLinkerMFileInfoByClassMetadata(const void *addr, bool isClass) { + bool isLazyBinding = false; + if (isClass) { + isLazyBinding = (reinterpret_cast(addr))->IsLazyBinding(); + } + return GetLinkerMFileInfo(kFromMeta, addr, isLazyBinding); + } + LinkerInvoker(); + virtual ~LinkerInvoker(); + virtual void PreInit(); + virtual void PostInit(); + virtual void UnInit(); +#ifdef LINKER_RT_CACHE + void SetCachePath(const char *path); + bool GetCachePath(LinkerMFileInfo &mplInfo, std::string &path, LinkerCacheType cacheType); +#endif + bool IsFrontPatchMode(const std::string &path); + bool LinkClassLazily(jclass klass); + bool ReGenGctib4Class(jclass classInfo); + uint64_t DumpMetadataSectionSize(std::ostream &os, void *handle, const std::string sectionName); + void DumpAllMplSectionInfo(std::ostream &os); + void DumpAllMplFuncProfile(std::unordered_map> &funcProfileRaw); + void DumpAllMplFuncIRProfile(std::unordered_map &funcProfileRaw); + void DumpBBProfileInfo(std::ostream &os); + void ClearAllMplFuncProfile(); + void ReleaseBootPhaseMemory(bool isZygote, bool isSystemServer); + bool CheckLinkerMFileInfoElfBase(LinkerMFileInfo &mplInfo); + void ClearLinkerMFileInfo(); + bool ContainLinkerMFileInfo(const std::string &name); + bool ContainLinkerMFileInfo(const void *handle); + bool GetJavaTextInfo(const void *addr, LinkerMFileInfo **mplInfo, LinkerLocInfo &info, bool getName); + bool UpdateMethodSymbolAddress(jmethodID method, uintptr_t addr); + jclass GetSuperClass(ClassMetadata **addr); + void GetStrTab(jclass dCl, StrTab &strTab); + char *GetCString(jclass dCl, uint32_t index); + void DestroyMFileCache(); + bool LocateAddress(const void *addr, LinkerLocInfo &info, bool getName); + void ResolveColdClassSymbol(jclass classInfo); + jclass InvokeClassLoaderLoadClass(jobject classLoader, const std::string &className); + void *LookUpSymbolAddress(const MUID &muid); + MUID GetMUID(const std::string symbol, bool forSystem); + void CreateMplInfo(ObjFile &objFile, jobject classLoader); + bool Add(ObjFile &objFile, jobject classLoader); + bool Resolve(); + bool Resolve(LinkerMFileInfo &mplInfo, bool decouple); // Resolve the single maple file. +#ifdef LINKER_DECOUPLE + bool HandleDecouple(std::vector &mplList); +#endif + void FinishLink(jobject classLoader); + bool Link(); + // Resolve the single maple file. MUST invoked Add() before. + bool Link(LinkerMFileInfo &mplInfo, bool decouple); + void SetLoadState(LoadStateType state); + void SetLinkerMFileInfoClassLoader(const ObjFile &objFile, jobject classLoader); + void SetClassLoaderParent(jobject classLoader, jobject newParent); + bool InsertClassesFront(ObjFile &objFile, jobject classLoader); + void SetPatchPath(std::string &path, int32_t mode); + // API Interfaces END +#ifdef LINKER_DECOUPLE + bool IsClassComplete(const MClass &classInfo); +#endif + BindingState GetAddrBindingState(LinkerVoidType addr); + BindingState GetAddrBindingState(const AddrSlice &addrSlice, size_t index, bool isAtomic = true); + void DumpStackInfoInLog(); + void *GetMplOffsetValue(LinkerMFileInfo &mplInfo); + MUID GetValidityCode(LinkerMFileInfo &mplInfo) const; + MUID GetValidityCodeForDecouple(LinkerMFileInfo &mplInfo) const; + void *GetMethodSymbolAddress(LinkerMFileInfo &mplInfo, size_t index); + void *GetDataSymbolAddress(LinkerMFileInfo &mplInfo, size_t index); + void GetClassLoaderList(const LinkerMFileInfo &mplInfo, ClassLoaderListT &out, bool isNewList = true); + void ResetClassLoaderList(const MObject *classLoader); + MObject *GetClassLoaderByAddress(LinkerMFileInfo &mplInfo, const void *addr); + void GetLinkerMFileInfos(LinkerMFileInfo &mplInfo, LinkerMFileInfoListT &fileList, bool isNewList = true); + void *GetClassMetadataLazily(LinkerMFileInfo &mplInfo, size_t classIndex); + void *GetClassMetadataLazily(const void *offsetTable, size_t classIndex); + LinkerMFileInfo *SearchAddress(const void *pc, AddressRangeType type = kTypeWhole, bool isLazyBinding = false); + + bool LocateAddress(const void *handle, const void *addr, LinkerLocInfo &info, bool getName); + bool LocateAddress(LinkerMFileInfo &mplInfo, const void *addr, LinkerLocInfo &info, bool getName); + bool DoLocateAddress(const LinkerMFileInfo &mplInfo, LinkerLocInfo &info, const void *addr, + const AddrSlice &pTable, const InfTableSlice &infTableSlice, int64_t pos, bool getName); + LinkerVoidType LookUpDataSymbolAddress(LinkerMFileInfo &mplInfo, const MUID &muid, size_t &index); + LinkerVoidType LookUpMethodSymbolAddress(LinkerMFileInfo &mplInfo, const MUID &muid, size_t &index); + void ResolveVTableSymbolByClass(LinkerMFileInfo &mplInfo, const MClass *classInfo, bool flag); + void ResolveITableSymbolByClass(LinkerMFileInfo &mplInfo, const MClass *classInfo); + void ResolveSuperClassSymbolByClass(LinkerMFileInfo &mplInfo, const MClass *classInfo); + bool UpdateMethodSymbolAddressDef(const MClass *klass, const MUID &symbolId, const uintptr_t newAddr); + bool UpdateMethodSymbolAddressUndef(LinkerMFileInfo &mplInfo, const UpdateNode &node); + bool UpdateMethodSymbolAddressDecouple(LinkerMFileInfo &mplInfo, const UpdateNode &node); + LinkerMFileInfo *GetLinkerMFileInfo(MFileInfoSource source, const void *key, bool isLazyBinding = false); + std::string GetMFileName(MFileInfoSource source, const void *key, bool isLazyBinding = false); + void InitArrayCache(uintptr_t pc, uintptr_t addr); + template + bool ForEachDoAction(Type obj, F action); + template + bool ForEachDoAction(Type obj, F action, const Data &data); + template + bool DoAction(Type obj, F action, LinkerMFileInfo &mplInfo); + template + bool ForEachLookUp(const MUID &muid, Type obj, F lookup, LinkerMFileInfo &mplInfo, +#ifdef LINKER_RT_CACHE + LinkerMFileInfo **resInfo, size_t &index, +#endif // LINKER_RT_CACHE + LinkerOffsetType &pAddr); + template + int64_t BinarySearchIndex(const LinkerMFileInfo &mplInfo, const T1 &pTable, size_t start, + size_t end, const size_t value, const T2 &pIndexTable, T3 &pInfTable, size_t scopeStartAddr, + size_t scopeEndAddr); + template + bool ResolveSymbolLazily(LinkerMFileInfo &mplInfo, bool isSuper, const AddrSlice &dstSlice, size_t index, + bool fromUpper, bool fromUndef, TableItem &tableItem, size_t tableIndex, const void *addr); + template + bool ResolveSymbolByClass(LinkerMFileInfo &mplInfo, TableItem &tableItem, size_t index, size_t tableIndex, + bool fromUndef); + template + T *Get() { + return reinterpret_cast(features[T::featureName]); + } + inline const LoaderAPI *GetLoader() const { + return pLoader; + } + inline bool IsSystemClassLoader(const MObject *classLoader) { + return pLoader->GetSystemClassLoader() == reinterpret_cast(const_cast(classLoader)); + } + inline bool IsBootClassLoader(const MObject *classLoader) { + return pLoader->IsBootClassLoader(reinterpret_cast(const_cast(classLoader))); + } + // Reset class loader hierarchy list + inline void ResetMplInfoClList(LinkerMFileInfo &mplInfo) { + mplInfo.clList.clear(); + } + inline int32_t GetMultiSoPendingCount() const { + return multiSoPendingCount; + } + inline void AddMultiSoPendingCount() { + ++multiSoPendingCount; + } + inline void SubMultiSoPendingCount() { + --multiSoPendingCount; + } + inline uint32_t AddrToUint32(const void *addr) { + if (addr == nullptr) { + LINKER_LOG(FATAL) << "AddrToUint32: addr is nullptr!" << maple::endl; + } + return static_cast(reinterpret_cast(addr)); + } + LinkerMFileInfoListT mplInfoList; + LinkerMFileInfoListCLMapT mplInfoListCLMap; + LinkerMFileInfoNameMapT mplInfoNameMap; + LinkerMFileInfoHandleMapT mplInfoHandleMap; + LinkerMFileInfoElfAddrSetT mplInfoElfAddrSet; + // Compare the lazy binding ELF file only. + LinkerMFileInfoElfAddrSetT mplInfoElfAddrLazyBindingSet; + MplClassLoaderLoadClassMethodMapT mplClassLoaderLoadClassMethodMap; + protected: + std::mutex mLinkLock; + const uint32_t kItabHashSize = 23; + LoaderAPI *pLoader = nullptr; + std::map features; + int32_t multiSoPendingCount = 0; + static constexpr size_t kLeastReleaseMemoryByteSize = 100 * 1024; // 100KiB (1KiB is 1024B) +}; +} // namespace maplert +#endif // MAPLE_RUNTIME_MPL_LINKER_MODEL_H_ diff --git a/src/mrt/compiler-rt/include/linker/linker_utils.h b/src/mrt/compiler-rt/include/linker/linker_utils.h new file mode 100644 index 0000000000..0bc353a689 --- /dev/null +++ b/src/mrt/compiler-rt/include/linker/linker_utils.h @@ -0,0 +1,93 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_LINKER_UTILS_H_ +#define MAPLE_LINKER_UTILS_H_ + +#include +#include +#include + +#include +#include +#include + +#include "linker_common.h" + +namespace maplert { +namespace linkerutils { +void ReleaseMemory(void *start, void *end); + +void *GetSymbolAddr(void *handle, const char *symbol, bool isFunction); + +int32_t GetMaxVersion(); +int32_t GetMinVersion(); +void GetMplVersion(const LinkerMFileInfo &mplInfo, MapleVersionT &item); +void GetMplCompilerStatus(const LinkerMFileInfo &mplInfo, uint32_t &status); +std::string GetAppPackageName(); +const std::string &GetAppInfo(); +void ClearAppInfo(); +void SetAppInfo(const char *dataPath, int64_t versionCode); +const LoadStateType &GetLoadState(); +void SetLoadState(const LoadStateType state); +const AppLoadState &GetAppLoadState(); +void SetAppLoadState(const AppLoadState state); +const std::string &GetAppBaseStr(); +void SetAppBaseStr(const std::string &str); + +bool NeedRelocateSymbol(const std::string &name); +void *GetDefTableAddress(const LinkerMFileInfo &mplInfo, const AddrSlice &pTable, size_t index, bool forMethod); +std::string GetMethodSymbolByOffset(const LinkerInfTableItem &pTable); +const char *GetMuidDefFuncTableFuncname(const LinkerInfTableItem &pTable); +std::string GetMuidDefFuncTableSigname(const LinkerInfTableItem &pTable); +const char *GetMuidDefFuncTableClassname(const LinkerInfTableItem &pTable); +// Return N microseconds. +// isEnd: false starts counting, true ends counting. +inline long CountTimeConsuming(bool isEnd) { + static struct timespec timeSpec[2]; // 2 means startTimeSpec and endTimeSpec + + if (!isEnd) { + clock_gettime(CLOCK_MONOTONIC, &timeSpec[0]); + return 0; + } else { + clock_gettime(CLOCK_MONOTONIC, &timeSpec[1]); + // 1000000 means MILLION, sec to microsec, 1000 means THOUAND, nsec to micromsec + return (timeSpec[1].tv_sec - timeSpec[0].tv_sec) * 1000000LL + (timeSpec[1].tv_nsec - timeSpec[0].tv_nsec) / 1000LL; + } +} + +inline void DumpMUID(const MUID &muid) { + for (unsigned int i = 0; i < kMuidLength; ++i) { + fprintf(stderr, "\t%02x", muid.data.bytes[i]); + } +} + +inline void GenerateMUID(const void *data, unsigned long size, MUID &muid) { + auto *srcStr = reinterpret_cast(data); + GetMUIDHash(*srcStr, size, muid); +} +inline void GenerateMUID(const char *const str, MUID &muid) { + GenerateMUID(str, strlen(str), muid); +} + +#ifdef LINKER_RT_CACHE +const std::string &GetLinkerCacheTypeStr(LinkerCacheType index); +bool FileExists(const char *name); +bool FolderExists(const char *name); +bool PrepareFolder(const std::string &dir); +bool GetInstallCachePath(LinkerMFileInfo &mplInfo, std::string &path, LinkerCacheType cacheType); +#endif +} +} +#endif // MAPLE_LINKER_UTILS_H_ diff --git a/src/mrt/compiler-rt/include/loader/hash_pool.h b/src/mrt/compiler-rt/include/loader/hash_pool.h new file mode 100644 index 0000000000..8e204c5101 --- /dev/null +++ b/src/mrt/compiler-rt/include/loader/hash_pool.h @@ -0,0 +1,108 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef __MAPLE_LOADER_HASH_POOL__ +#define __MAPLE_LOADER_HASH_POOL__ + +#include + +#include "gc_roots.h" +#include "mrt_object.h" +#include "mrt_reflection_class.h" +#include "allocator/page_allocator.h" +#include "mclass_inline.h" +#include +namespace maplert { +#ifndef __ANDROID__ +using ClassPtr = uintptr_t; +#else +using ClassPtr = uint32_t; +#endif + +#if defined(__arm__) +using BucketType = uint32_t; +#else +using BucketType = uint64_t; +#endif +struct ConflictClass { + std::vector conflictData; +}; +template +using HashPoolVector = std::vector>; +using NameCompare = std::function; +enum MClassFlag { + kIsConflict = 1ul, // low 1 bit as conflict flag + kIsDexClassOffset = 2ul, // low 2 bit as dexClassDef flag +}; + +class MClassHashPool { + public: + MClassHashPool(); + ~MClassHashPool(); + + void Create(uint32_t bucketCount); + void Destroy(); + size_t CalcMemoryCost() const; + double GetHashConflictRate() const; + double GetClassConflictRate() const; + void InitClass(uint32_t hashIndex); + void Collect(); + void Set(uint32_t hashIndex, ClassPtr classInfo); + void VisitClasses(const maple::rootObjectFunc &func); + MClass *Get(const std::string &name) const; + ClassPtr Get(const std::string &name, NameCompare cmp) const; + inline void Set(const std::string &name, ClassPtr classInfo) { + uint32_t hashIndex = GetHashIndex32(name); + Set(hashIndex, classInfo); + } + inline void Set(uint32_t hashIndex, const MClass &klass) { + Set(hashIndex, static_cast(reinterpret_cast(&klass))); + } + inline uint16_t GetHashIndex16(const std::string &name) const { + // 211 is a proper prime, which can reduce the conflict rate. + return static_cast(0xFFFF & BKDRHash(name, 211)); + } + inline uint32_t GetHashIndex32(const std::string &name) const { + // 211 is a proper prime, which can reduce the conflict rate. + return BKDRHash(name, 211); + } + protected: + inline uint32_t BitCount(uint32_t n) const; + inline uint32_t BitCount(uint64_t n) const; + size_t FindNextPrime(size_t nr) const; + inline uint32_t NextPrime(size_t n) const; + inline uint32_t BKDRHash(const std::string &key, uint32_t seed) const { + size_t len = key.length(); + size_t idx = 0; + uint32_t hash = 0; + while (idx < len) { + hash = hash * seed + static_cast(key[idx++]); + } + return hash; + } + + private: + static constexpr uint32_t kBucketBitNum = sizeof(BucketType) << 3; // calculate the number of bits + static constexpr uint32_t kMaxExtended = 20; + static constexpr ClassPtr kMClassAddressMask = 0x3; + uint32_t mBucketCount = 0; + uint32_t mClassCount = 0; + uint32_t mConflictBucketCount = 0; + uint32_t mFillBucketCount = 0; + HashPoolVector bitCounts; + HashPoolVector bitBuckets; + HashPoolVector bucketData; // 2-x : Ptr to Data, 1: DexClassOffset flag, 0: conflict Flag +}; +} // namespace maplert +#endif // __MAPLE_LOADER_HASH_POOL__ diff --git a/src/mrt/compiler-rt/include/loader/loader_utils.h b/src/mrt/compiler-rt/include/loader/loader_utils.h new file mode 100644 index 0000000000..86a460655e --- /dev/null +++ b/src/mrt/compiler-rt/include/loader/loader_utils.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef __MAPLE_LOADER_UTILS__ +#define __MAPLE_LOADER_UTILS__ + +#include + +#include "loader_api.h" +#include "version.h" +#include "base/macros.h" + +namespace maplert { +namespace loaderutils { + std::string GetNicePath(const std::string &fullPath); + std::string Dex2MplPath(const std::string &filePath); + bool CheckVersion(const ObjFile &mplFile, const maplert::LinkerMFileInfo &mplInfo); + bool CheckCompilerStatus(const ObjFile &mplFile, const maplert::LinkerMFileInfo &mplInfo); +} +} // end namespace maple +#endif // endif __MAPLE_LOADER_UTILS__ diff --git a/src/mrt/compiler-rt/include/loader/object_loader.h b/src/mrt/compiler-rt/include/loader/object_loader.h new file mode 100644 index 0000000000..b0fda3590f --- /dev/null +++ b/src/mrt/compiler-rt/include/loader/object_loader.h @@ -0,0 +1,102 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef __MAPLE_LOADER_OBJECT_LOADER__ +#define __MAPLE_LOADER_OBJECT_LOADER__ + +#include "object_locator.h" +#include "gc_roots.h" +#include "loader/loader_utils.h" + +namespace maplert { +enum FieldName { + kFieldParent = 0, + kFieldClassTable +}; + +class ObjectLoader : public LoaderAPI { + public: + ObjectLoader(); + virtual ~ObjectLoader(); + // API Interfaces Begin + virtual void PreInit(IAdapterEx interpEx) override; + virtual void PostInit(jobject systemClassLoader) override; + virtual void UnInit() override; + jobject GetCLParent(jobject classLoader) override; + void SetCLParent(jobject classLoader, jobject parentClassLoader) override; + bool IsBootClassLoader(jobject classLoader) override; + bool LoadClasses(jobject classLoader, ObjFile &objFile) override; + // get registered mpl file by exact path-name: the path should be canonicalized + size_t GetLoadedClassCount() override; + size_t GetAllHashMapSize() override; + bool IsLinked(jobject classLoader) override; + void SetLinked(jobject classLoader, bool isLinked) override; + bool GetClassNameList(jobject classLoader, ObjFile &objFile, std::vector &classVec) override; + void ReTryLoadClassesFromMplFile(jobject jobject, ObjFile &mplFile) override; + // MRT_EXPORT Split + // visit class loader gc roots + void VisitGCRoots(const RefVisitor &visitor) override; + jobject GetSystemClassLoader() override; + jobject GetBootClassLoaderInstance() override; + void SetClassCL(jclass klass, jobject classLoader) override; + IObjectLocator GetCLClassTable(jobject classLoader) override; + void SetCLClassTable(jobject classLoader, IObjectLocator classLocator) override; + // API Interfaces End + MObject *GetClassCL(MClass *klass); + void SetCLParent(const MObject *classLoader, const MObject *parentClassLoader); + void RegisterDynamicClass(const MObject *classLoader, const MClass *klass); + void UnregisterDynamicClass(const MObject *classLoader, const MClass *klass); + virtual MClass *GetPrimitiveClass(const std::string &mplClassName) = 0; + virtual void VisitPrimitiveClass(const maple::rootObjectFunc &func) = 0; + virtual MClass *CreateArrayClass(const std::string &mplClassName, MClass &componentClass) = 0; + IAdapterEx GetAdapterEx() override { + return pAdapterEx; + } + protected: + void UnloadClasses(const MObject *classLoader); + uint16_t PresetClassCL(const MObject *classLoader); + bool LocateIndex(const MObject *classLoader, int16_t &pos) const; + bool RecycleIndex(const MObject *classLoader); + void VisitClassesByLoader(const MObject *loader, const maple::rootObjectFunc &func); + bool LoadClassesFromMplFile(const MObject *classLoader, ObjFile &mplFile, + std::vector &infoList, bool hasSiblings = false); + bool LoadClasses(const MObject *classLoader, ObjFile &objFile); + bool LoadClasses(const MObject *classLoader, std::vector &objList); + + LinkerAPI *pLinker = nullptr; + AdapterExAPI *pAdapterEx = nullptr; + static const int kMaxClassloaderNum = 256; + const unsigned int kClIndexValueMask = 0x00FF; + const unsigned int kClIndexFlagMask = 0xFF00; + const unsigned int kClIndexFlag = 0xAB00; + const unsigned int kClIndexInintValue = 0xABCD; + // mCLTable[] stores all class loader, except index 0. + // In each class loader, classTable filed will store its classlocator instance. + // 'Cause NULL is for Bootstrap class loader, mCLTable[0]'d point to locator directly. + // Index ---> Value + // --- + // [0] -------------------------> MClassLocator_0_for_boot_class_loader + // [1] ---> CLASS_LOADER_1 ===> MClassLocator_1 + // [2] ---> CLASS_LOADER_2 ===> MClassLocator_2 + // ... ---> .............. ===> ... + // [255] ---> CLASS_LOADER_255 ===> MClassLocator_255 + const MObject *mCLTable[kMaxClassloaderNum] = { 0 }; + MObject *mBootClassLoader = nullptr; + MObject *mSystemClassLoader = nullptr; + std::vector primitiveClasses; + private: + jfieldID GetCLField(const FieldName &name); +}; +} // end namespace maple +#endif // endif __MAPLE_LOADER_OBJECT_LOADER__ diff --git a/src/mrt/compiler-rt/include/loader/object_locator.h b/src/mrt/compiler-rt/include/loader/object_locator.h new file mode 100644 index 0000000000..34f2501d37 --- /dev/null +++ b/src/mrt/compiler-rt/include/loader/object_locator.h @@ -0,0 +1,109 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef __MAPLE_LOADER_OBJECT_LOCATOR__ +#define __MAPLE_LOADER_OBJECT_LOCATOR__ + +#include +#include +#include +#include +#include + +#include "object_base.h" +#include "hash_pool.h" +#include "loader_api.h" +#include "linker_api.h" + +namespace maplert { +// this is for using class-name as the hash key. +struct ObjCmpStr { + bool operator()(const std::string &a, const std::string &b) const { + return a.compare(b) < 0; + } +}; + +struct ObjFileCmp { + bool operator()(const ObjFile *a, const ObjFile *b) const { + if (a == nullptr) { + return false; + } else if (b == nullptr) { + return true; + } + return a->GetUniqueID() < b->GetUniqueID(); + } +}; + +using ClassMapT = std::map; +using ClassMapIterT = std::map::iterator; +using ClassMapConstIterT = std::map::const_iterator; +using ObjFileHashPool = std::map; +// A MFileClasslocator loads class info from MFile. +class ClassLocator { + friend class MClassLocatorInterpEx; + public: + ClassLocator() = default; + ~ClassLocator() = default; + // Load classes from MFiles + bool LoadClasses(std::vector &objList, uint16_t clindex); + // Load classes from one MFile + bool LoadClasses(ObjFile &objFile, uint16_t clindex); + MClass *InquireClass(const std::string &internalName, SearchFilter &filter); + uint32_t GetClassCount(const ObjFile &objFile); + void UnloadClasses(); + void VisitClasses(const maple::rootObjectFunc &f, bool isBoot = false); + + bool GetClassNameList(const ObjFile &objFile, std::vector &classVec); + void UnregisterDynamicClassImpl(const ClassMapIterT &it) const; + // from classes loaded by this class-loader + bool RegisterDynamicClass(const std::string &className, MClass &classObj); + // MFileClassloader manage dynamic class info + bool UnregisterDynamicClass(const std::string &className); + // Unregister all dynamic classes + void UnregisterDynamicClasses(); + size_t GetLoadedClassCount() { + maple::SpinAutoLock lock(spLock); + return mLoadedClassCount; + } + size_t GetClassHashMapSize(); + + bool IsLinked() const { + return mLinked; + } + + void SetLinked(bool isLinked) { + mLinked = isLinked; + } + protected: + bool InitClasses(std::vector &objList, MClassHashPool &pool); + void InitClasses(const ObjFile &objFile, MClassHashPool &pool); + void SetClasses(std::vector &objList, MClassHashPool &pool, uint16_t clIndex); + void SetClasses(const ObjFile &objFile, MClassHashPool &pool, uint16_t clIndex); + private: + // Find a loaded class from mpl(all) or dex(on-demand) in the order of mhashpools. + MClass *FindClassInternal(const std::string &className, SearchFilter &filter); + // Find a loaded class from mpl(all) and dex(on-demand), or created array class in runtime. + MClass *FindLoadedClass(const MClassHashPool &pool, const std::string &className, SearchFilter &filter); + MClass *FindRuntimeClass(const std::string &mplClassName); + MClass *CreateArrayClassRecursively(const std::string &mplClassName, SearchFilter &filter); + + maple::SpinLock spLock; + ObjFileHashPool mHashPools; + pthread_rwlock_t dClassMapLock = PTHREAD_RWLOCK_INITIALIZER; + ClassMapT dClassMap; // map for dynamic classInfo + size_t mLoadedClassCount = 0; + bool mLinked = false; +}; +} // namespace maplert +#endif // __MAPLE_LOADER_OBJECT_LOCATOR__ diff --git a/src/mrt/compiler-rt/include/metadata_inline.h b/src/mrt/compiler-rt/include/metadata_inline.h new file mode 100644 index 0000000000..0bdf67a32a --- /dev/null +++ b/src/mrt/compiler-rt/include/metadata_inline.h @@ -0,0 +1,414 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef METADATA_INLINE_H +#define METADATA_INLINE_H + +#include "metadata_layout.h" + +template +inline T DataRef32::GetDataRef() const { + uint32_t format = refVal & kDataRefBitMask; + switch (format) { + case kDataRefIsDirect: { + // Note if *refVal* is a memory address, it should not be sign-extended + return reinterpret_cast(static_cast(refVal)); + } + case kDataRefIsOffset: { + // Note if *refVal* is an offset, it is a signed integer and should be sign-extended + int32_t offset = static_cast(refVal & ~static_cast(kDataRefBitMask)); + intptr_t addr = reinterpret_cast(this) + static_cast(offset); + return reinterpret_cast(addr); + } + case kDataRefIsIndirect: { + // Note if *refVal* is an offset, it is a signed integer and should be sign-extended + int32_t offset = static_cast(refVal & ~static_cast(kDataRefBitMask)); + intptr_t addr = reinterpret_cast(this) + static_cast(offset); + uintptr_t *pRef = reinterpret_cast(addr); + return reinterpret_cast(*pRef); + } + default: { + std::abort(); + } + } +} + +template +inline void DataRef32::SetDataRef(T ref, DataRefFormat format) { + static_assert(sizeof(T) == sizeof(uintptr_t), "wrong type."); + switch (format) { + case kDataRefIsDirect: { + // Note if *refVal* is a memory address, it should be an unsigned integer. + uintptr_t val = reinterpret_cast(ref); + if ((val & kDataRefBitMask) == 0 && val <= UINT32_MAX) { + refVal = static_cast(val); + return; + } else { + std::abort(); + } + } + case kDataRefIsOffset: { + intptr_t offset = static_cast(reinterpret_cast(ref)) - reinterpret_cast(this); + if ((offset & kDataRefBitMask) == 0 && INT32_MIN <= offset && offset <= INT32_MAX) { + refVal = static_cast(offset | kDataRefIsOffset); + return; + } else { + std::abort(); + } + } + default: { + std::abort(); + } + } +} + +template +inline T DataRef32::GetRawValue() const { + return reinterpret_cast(refVal); +} + +template +inline T DataRef::GetDataRef() const { + intptr_t ref = static_cast(refVal); + intptr_t format = ref & kDataRefBitMask; + switch (format) { + case kDataRefIsDirect: return reinterpret_cast(refVal); + case kDataRefIsOffset: { + ref &= ~static_cast(kDataRefBitMask); + ref += reinterpret_cast(this); + return reinterpret_cast(ref); + } + case kDataRefIsIndirect: { + ref &= ~static_cast(kDataRefBitMask); + ref += reinterpret_cast(this); + uintptr_t *pRef = reinterpret_cast(ref); + return reinterpret_cast(*pRef); + } + default: { + std::abort(); + } + } +} + +template +inline void DataRef::SetDataRef(const T ref, const DataRefFormat format) { + static_assert(sizeof(T) == sizeof(uintptr_t), "wrong type."); + uintptr_t uRef = reinterpret_cast(ref); + switch (format) { + case kDataRefIsDirect: { + refVal = reinterpret_cast(uRef); + return; + } + case kDataRefIsOffset: { + intptr_t offset = static_cast(uRef) - reinterpret_cast(this); + if ((offset & kDataRefBitMask) == 0) { + refVal = static_cast(offset | kDataRefIsOffset); + return; + } else { + std::abort(); + } + } + default: { + std::abort(); + } + } +} + +template +inline T DataRef::GetRawValue() const { + return reinterpret_cast(refVal); +} + +template +inline T GctibRef32::GetGctibRef() const { + uint32_t format = refVal & kGctibRefBitMask; + switch (format) { + case kGctibRefIsOffset: { + // Note if *refVal* is an offset, it is a signed integer and should be sign-extended + int32_t offset = static_cast(refVal); + intptr_t addr = reinterpret_cast(this) + static_cast(offset); + return reinterpret_cast(addr); + } + case kGctibRefIsIndirect: { + // Note if *refVal* is an offset, it is a signed integer and should be sign-extended + int32_t offset = static_cast(refVal & ~static_cast(kGctibRefBitMask)); + intptr_t addr = reinterpret_cast(this) + static_cast(offset); + uintptr_t *pRef = reinterpret_cast(addr); + return reinterpret_cast(*pRef); + } + default: { + std::abort(); + } + } +} + +template +inline void GctibRef32::SetGctibRef(T ref, GctibRefFormat format) { + static_assert(sizeof(T) == sizeof(uintptr_t), "wrong type."); + switch (format) { + case kGctibRefIsOffset: { + intptr_t offset = reinterpret_cast(ref) - reinterpret_cast(this); + if ((offset & kGctibRefBitMask) == 0 && INT32_MIN <= offset && offset <= INT32_MAX) { + refVal = static_cast(offset); + return; + } else { + std::abort(); + } + } + default: { + std::abort(); + } + } +} + +template +inline T GctibRef::GetGctibRef() const { + intptr_t ref = static_cast(refVal); + intptr_t format = ref & kGctibRefBitMask; + switch (format) { + case kGctibRefIsOffset: { + ref += reinterpret_cast(this); + return reinterpret_cast(ref); + } + case kGctibRefIsIndirect: { + ref &= ~static_cast(kGctibRefBitMask); + ref += reinterpret_cast(this); + uintptr_t *pRef = reinterpret_cast(ref); + return reinterpret_cast(*pRef); + } + default: { + std::abort(); + } + } +} + +template +inline void GctibRef::SetGctibRef(const T ref, const GctibRefFormat format) { + static_assert(sizeof(T) == sizeof(uintptr_t), "wrong type."); + switch (format) { + case kGctibRefIsOffset: { + intptr_t offset = static_cast(reinterpret_cast(ref)) - reinterpret_cast(this); + if ((offset & kDataRefBitMask) == 0) { + refVal = static_cast(offset); + return; + } else { + std::abort(); + } + } + default: { + std::abort(); + } + } +} + +#if defined(__aarch64__) +inline bool MByteRef::IsOffset() const { + uintptr_t offset = reinterpret_cast(refVal); + return (kEncodedPosOffsetMin < offset) && (offset < kEncodedPosOffsetMax); +} + +template +inline T MByteRef::GetRef() const { + if (IsOffset()) { + uint32_t offset = static_cast(reinterpret_cast(refVal) & ~kPositiveOffsetBias); + intptr_t ref = reinterpret_cast(this) + static_cast(offset); + return reinterpret_cast(ref); + } + uintptr_t ref = reinterpret_cast(refVal); + return reinterpret_cast(ref); +} + +template +inline void MByteRef::SetRef(const T ref) { + static_assert(sizeof(T) == sizeof(uintptr_t), "wrong type."); + refVal = reinterpret_cast(ref); + if (IsOffset()) { + std::abort(); + } +} +#elif defined(__arm__) +inline bool MByteRef::IsOffset() const { + intptr_t offset = static_cast(refVal); + return (kEncodedOffsetMin < offset) && (offset < kEncodedOffsetMax); +} + +template +inline T MByteRef::GetRef() const { + if (IsOffset()) { + int32_t offset = static_cast(static_cast(refVal) - kPositiveOffsetBias); + intptr_t ref = reinterpret_cast(this) + static_cast(offset); + return reinterpret_cast(ref); + } + uintptr_t ref = reinterpret_cast(refVal); + return reinterpret_cast(ref); +} + +template +inline void MByteRef::SetRef(const T ref) { + static_assert(sizeof(T) == sizeof(uintptr_t), "wrong type."); + refVal = reinterpret_cast(ref); + if (IsOffset()) { + std::abort(); + } +} +#endif + +inline bool MByteRef32::IsPositiveOffset() const { + return (kEncodedPosOffsetMin < refVal) && (refVal < kEncodedPosOffsetMax); +} + +inline bool MByteRef32::IsNegativeOffset() const { + int32_t offset = static_cast(refVal); + return (kNegativeOffsetMin <= offset) && (offset < kNegativeOffsetMax); +} + +inline bool MByteRef32::IsOffset() const { + return IsNegativeOffset() || IsPositiveOffset(); +} + +template +inline T MByteRef32::GetRef() const { + if (IsPositiveOffset()) { + uint32_t offset = refVal & ~kPositiveOffsetBias; + intptr_t ref = reinterpret_cast(this) + static_cast(offset); + return reinterpret_cast(ref); + } else if (IsNegativeOffset()) { + int32_t offset = static_cast(refVal); + intptr_t ref = reinterpret_cast(this) + static_cast(offset); + return reinterpret_cast(ref); + } + + uintptr_t ref = static_cast(refVal); + return reinterpret_cast(ref); +} + +template +inline void MByteRef32::SetRef(T ref) { + static_assert(sizeof(T) == sizeof(uintptr_t), "wrong type."); + uintptr_t addr = reinterpret_cast(ref); + refVal = static_cast(addr); +#if defined(__aarch64__) + if (IsOffset()) { + std::abort(); + } +#endif +} + +template +inline T DataRefOffset32::GetDataRef() const { + if (refOffset == 0) { + return 0; + } + intptr_t ref = static_cast(refOffset); + ref += reinterpret_cast(this); + return reinterpret_cast(static_cast(ref)); +} + +inline int32_t DataRefOffset32::GetRawValue() const { + return refOffset; +} + +inline void DataRefOffset32::SetRawValue(int32_t value) { + refOffset = value; +} + +// specialized for int32_t, check for out-of-boundary +template +inline void DataRefOffset32::SetDataRef(T ref) { + static_assert(sizeof(T) == sizeof(uintptr_t), "wrong type."); + if (ref == 0) { + refOffset = 0; + return; + } + uintptr_t uRef = reinterpret_cast(ref); + intptr_t offset = static_cast(uRef) - reinterpret_cast(this); + if (INT32_MIN <= offset && offset <= INT32_MAX) { + refOffset = static_cast(offset); + } else { + std::abort(); + } +} + +template +inline T DataRefOffsetPtr::GetDataRef() const { + if (refOffset == 0) { + return 0; + } + intptr_t ref = static_cast(refOffset); + ref += reinterpret_cast(this); + return reinterpret_cast(static_cast(ref)); +} + +template +inline void DataRefOffsetPtr::SetDataRef(T ref) { + static_assert(sizeof(T) == sizeof(uintptr_t), "wrong type."); + uintptr_t uRef = reinterpret_cast(ref); + refOffset = static_cast(uRef) - reinterpret_cast(this); +} + +inline intptr_t DataRefOffsetPtr::GetRawValue() const { + return refOffset; +} + +inline void DataRefOffsetPtr::SetRawValue(intptr_t value) { + refOffset = value; +} + +template +inline T DataRefOffset::GetDataRef() const { + return refOffset.GetDataRef(); +} + +template +inline void DataRefOffset::SetDataRef(T ref) { + refOffset.SetDataRef(ref); +} + +inline intptr_t DataRefOffset::GetRawValue() const { + return refOffset.GetRawValue(); +} + +inline void DataRefOffset::SetRawValue(intptr_t value) { + refOffset.SetRawValue(value); +} + +template +inline T MethodFieldRef::GetDataRef() const { + return refOffset.GetDataRef(); +} + +template +inline void MethodFieldRef::SetDataRef(T ref) { + refOffset.SetDataRef(ref); +} + +inline intptr_t MethodFieldRef::GetRawValue() const { + return refOffset.GetRawValue(); +} + +inline void MethodFieldRef::SetRawValue(intptr_t value) { + refOffset.SetRawValue(value); +} + +inline bool MethodFieldRef::IsCompact() const { + return (static_cast(refOffset.GetRawValue()) & kMethodFieldRefIsCompact) == kMethodFieldRefIsCompact; +} + +template +inline T MethodFieldRef::GetCompactData() const { + uint8_t *ref = GetDataRef(); + uintptr_t isCompact = kMethodFieldRefIsCompact; + return reinterpret_cast(reinterpret_cast(ref) & (~isCompact)); +} +#endif // METADATA_INLINE_H diff --git a/src/mrt/compiler-rt/include/metadata_layout.h b/src/mrt/compiler-rt/include/metadata_layout.h new file mode 100644 index 0000000000..86b975177a --- /dev/null +++ b/src/mrt/compiler-rt/include/metadata_layout.h @@ -0,0 +1,352 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef METADATA_LAYOUT_H +#define METADATA_LAYOUT_H +#include + +// metadata layout is shared between maple compiler and runtime, thus not in namespace maplert +// some of the reference field of metadata is stored as relative offset +// for example, declaring class of Fields/Methods +// which can be negative +#ifdef USE_32BIT_REF +using MetaRef = uint32_t; // consistent with reffield_t in address.h +#else +using MetaRef = uintptr_t; // consistent iwth reffield_t in address.h +#endif // USE_32BIT_REF + +// DataRefOffset aims to represent a reference to data in maple file, which is already an offset. +// DataRefOffset is meant to have pointer size. +// All Xx32 data types defined in this file aim to use 32 bits to save 64-bit address, and thus are +// specific for 64-bit platforms. +struct DataRefOffset32 { + int32_t refOffset; + template + inline void SetDataRef(T ref); + template + inline T GetDataRef() const; + inline int32_t GetRawValue() const; + inline void SetRawValue(int32_t value); +}; + +struct DataRefOffsetPtr { + intptr_t refOffset; + template + inline void SetDataRef(T ref); + template + inline T GetDataRef() const; + inline intptr_t GetRawValue() const; + inline void SetRawValue(intptr_t value); +}; + +struct DataRefOffset { +#ifdef USE_32BIT_REF + DataRefOffset32 refOffset; +#else + DataRefOffsetPtr refOffset; +#endif + template + inline void SetDataRef(T ref); + template + inline T GetDataRef() const; + inline intptr_t GetRawValue() const; + inline void SetRawValue(intptr_t value); +}; + +struct MethodFieldRef { + // MethodFieldRef aims to represent a reference to fields/methods in maple file, which is already an offset. + // also, offset LSB may set 1, to indicate that it is compact fields/methods. + enum MethodFieldRefFormat { + kMethodFieldRefIsCompact = 1, + }; + DataRefOffsetPtr refOffset; + template + inline void SetDataRef(T ref); + template + inline T GetDataRef() const; + inline bool IsCompact() const; + template + inline T GetCompactData() const; + inline intptr_t GetRawValue() const; + inline void SetRawValue(intptr_t value); +}; + +// DataRef aims for reference to data in maple file (generated by maple compiler) and is aligned to at least 4 bytes. +// Perhaps MDataRef is more fit, still DataRef is chosen to make it common. +// DataRef allows 4 formats of value: +// 0. "label_name" for direct reference +// 1. "label_name - . + 1" for padding unused +// 2. "label_name - . + 2" for reference in offset format +// 3. "indirect.label_name - . + 3" for indirect reference +// this format aims to support lld which does not support expression "global_symbol - ." +// DataRef is self-decoded by also encoding the format and is defined for binary compatibility. +// If no compatibility problem is involved, DataRefOffsetPtr is preferred. +enum DataRefFormat { + kDataRefIsDirect = 0, // must be 0 + kDataRefPadding = 1, // unused + kDataRefIsOffset = 2, + kDataRefIsIndirect = 3, // read-only + kDataRefBitMask = 3, +}; + +struct DataRef32 { + // be careful when *refVal* is treated as an offset which is a signed integer actually. + uint32_t refVal; + template + inline T GetDataRef() const; + template + inline void SetDataRef(T ref, DataRefFormat format = kDataRefIsDirect); + template + inline T GetRawValue() const; +}; + +struct DataRef { + uintptr_t refVal; + template + inline T GetDataRef() const; + template + inline void SetDataRef(const T ref, const DataRefFormat format = kDataRefIsDirect); + template + inline T GetRawValue() const; +}; +// GctibRef aims to represent a reference to gctib in maple file, which is an offset by default. +// GctibRef is meant to have pointer size and aligned to at least 4 bytes. +// GctibRef allows 2 formats of value: +// 0. "label_name - . + 0" for reference in offset format +// 1. "indirect.label_name - . + 1" for indirect reference +// this format aims to support lld which does not support expression "global_symbol - ." +// GctibRef is self-decoded by also encoding the format and is defined for binary compatibility. +// If no compatibility problem is involved, DataRef is preferred. +enum GctibRefFormat { + kGctibRefIsOffset = 0, // default + kGctibRefIsIndirect = 1, + kGctibRefBitMask = 3 +}; + +struct GctibRef32 { + // be careful when *refVal* is treated as an offset which is a signed integer actually. + uint32_t refVal; + template + inline T GetGctibRef() const; + template + inline void SetGctibRef(T ref, GctibRefFormat format = kGctibRefIsOffset); +}; + +struct GctibRef { + uintptr_t refVal; + template + inline T GetGctibRef() const; + template + inline void SetGctibRef(const T ref, const GctibRefFormat format = kGctibRefIsOffset); +}; + +// MByteRef is meant to represent a reference to data defined in maple file. It is a direct reference or an offset. +// MByteRef is self-encoded/decoded and aligned to 1 byte. +// Unlike DataRef, the format of MByteRef is determined by its value. +struct MByteRef { + uintptr_t refVal; // initializer prefers this field to be a pointer + +#if defined(__arm__) || defined(USE_ARM32_MACRO) + // assume address range 0 ~ 256MB is unused in arm runtime + // kEncodedOffsetMin ~ kEncodedOffsetMax is the value range of encoded offset + static constexpr intptr_t kOffsetBound = 128 * 1024 * 1024; + static constexpr intptr_t kOffsetMin = -kOffsetBound; + static constexpr intptr_t kOffsetMax = kOffsetBound; + + static constexpr intptr_t kPositiveOffsetBias = 128 * 1024 * 1024; + static constexpr intptr_t kEncodedOffsetMin = kPositiveOffsetBias + kOffsetMin; + static constexpr intptr_t kEncodedOffsetMax = kPositiveOffsetBias + kOffsetMax; +#else + enum { + kBiasBitPosition = sizeof(refVal) * 8 - 4, // the most significant 4 bits + }; + + static constexpr uintptr_t kOffsetBound = 256 * 1024 * 1024; // according to kDsoLoadedAddessEnd = 0xF0000000 + static constexpr uintptr_t kPositiveOffsetMin = 0; + static constexpr uintptr_t kPositiveOffsetMax = kOffsetBound; + + static constexpr uintptr_t kPositiveOffsetBias = static_cast(6) << kBiasBitPosition; + static constexpr uintptr_t kEncodedPosOffsetMin = kPositiveOffsetMin + kPositiveOffsetBias; + static constexpr uintptr_t kEncodedPosOffsetMax = kPositiveOffsetMax + kPositiveOffsetBias; +#endif + + template + inline T GetRef() const; + template + inline void SetRef(const T ref); + inline bool IsOffset() const; +}; + +struct MByteRef32 { + uint32_t refVal; + static constexpr uint32_t kOffsetBound = 256 * 1024 * 1024; // according to kDsoLoadedAddessEnd = 0xF0000000 + static constexpr uint32_t kPositiveOffsetMin = 0; + static constexpr uint32_t kPositiveOffsetMax = kOffsetBound; + + static constexpr uint32_t kPositiveOffsetBias = 0x60000000; // the most significant 4 bits 0110 + static constexpr uint32_t kEncodedPosOffsetMin = kPositiveOffsetMin + kPositiveOffsetBias; + static constexpr uint32_t kEncodedPosOffsetMax = kPositiveOffsetMax + kPositiveOffsetBias; + + static constexpr uint32_t kDirectRefMin = 0xC0000000; // according to kDsoLoadedAddessStart = 0xC0000000 + static constexpr uint32_t kDirectRefMax = 0xF0000000; // according to kDsoLoadedAddessEnd = 0xF0000000 + + static constexpr int32_t kNegativeOffsetMin = -(256 * 1024 * 1024); // -kOffsetBound + static constexpr int32_t kNegativeOffsetMax = 0; + + template + inline T GetRef() const; + template + inline void SetRef(T ref); + inline bool IsOffset() const; + inline bool IsPositiveOffset() const; + inline bool IsNegativeOffset() const; +}; + +// MethodMeta defined in methodmeta.h +// FieldMeta defined in fieldmeta.h +// MethodDesc contains MethodMetadata and stack map +struct MethodDesc { + // relative offset for method metadata relative to current PC. + // method metadata is in compact format if this offset is odd. + uint32_t metadataOffset; + + int16_t localRefOffset; + uint16_t localRefNumber; + + // stack map for a methed might be placed here +}; + +// Note: class init in maplebe and cg is highly dependent on this type. +// update aarch64rtsupport.h if you modify this definition. +struct ClassMetadataRO { + MByteRef className; + MethodFieldRef fields; // point to info of fields + MethodFieldRef methods; // point to info of methods + union { // Element classinfo of array, others parent classinfo + DataRef superclass; + DataRef componentClass; + }; + + uint16_t numOfFields; + uint16_t numOfMethods; + +#ifndef USE_32BIT_REF + uint16_t flag; + uint16_t numOfSuperclasses; + uint32_t padding; +#endif // !USE_32BIT_REF + + uint32_t mod; + DataRefOffset32 annotation; + DataRefOffset32 clinitAddr; +}; + +static constexpr size_t kPageSize = 4096; +static constexpr size_t kCacheLine = 64; + +// according to kSpaceAnchor and kFireBreak defined in bp_allocator.cpp +// the address of this readable page is set as kProtectedMemoryStart for java class +static constexpr uintptr_t kClInitStateAddrBase = 0xc0000000 - (1u << 20) * 2; + +// In Kirin 980, 2 mmap memory address with odd number of page distances may have unreasonable L1&L2 cache conflict. +// kClassInitializedState is used as the init state for class that has no method, it's will be loaded in many +// place for Decouple build App. if we set the value to kClInitStateAddrBase(0xbfe00000), it may conflict with the +// yieldpoind test address globalPollingPage which is defined in yieldpoint.cpp. +// Hence we add 1 cache line (64 byte) offset here to avoid such conflict +static constexpr uintptr_t kClassInitializedState = kClInitStateAddrBase + kCacheLine; + +extern "C" uint8_t classInitProtectRegion[]; + +// Note there is no state to indicate a class is already initialized. +// Any state beyond listed below is treated as initialized. +enum ClassInitState { + kClassInitStateMin = 0, + kClassUninitialized = 1, + kClassInitializing = 2, + kClassInitFailed = 3, + kClassInitialized = 4, + kClassInitStateMax = 4, +}; + +enum SEGVAddr { + kSEGVAddrRangeStart = kPageSize + 0, + + // Note any readable address is treated as Initialized. + kSEGVAddrForClassInitStateMin = kSEGVAddrRangeStart + kClassInitStateMin, + kSEGVAddrForClassUninitialized = kSEGVAddrForClassInitStateMin + kClassUninitialized, + kSEGVAddrForClassInitializing = kSEGVAddrForClassInitStateMin + kClassInitializing, + kSEGVAddrForClassInitFailed = kSEGVAddrForClassInitStateMin + kClassInitFailed, + kSEGVAddrFoClassInitStateMax = kSEGVAddrForClassInitStateMin + kClassInitStateMax, + + kSEGVAddrRangeEnd, +}; + +struct ClassMetadata { + // object common fields + MetaRef shadow; // point to classinfo of java/lang/Class + int32_t monitor; + + // other fields + uint16_t clIndex; // 8bit ClassLoader index, used for querying the address of related ClassLoader instance. + union { + uint16_t objSize; + uint16_t componentSize; + } sizeInfo; + +#ifdef USE_32BIT_REF // for alignment purpose + uint16_t flag; + uint16_t numOfSuperclasses; +#endif // USE_32BIT_REF + + DataRef iTable; // iTable of current class, used for interface call, will insert the content into classinfo + DataRef vTable; // vTable of current class, used for virtual call, will insert the content into classinfo + GctibRef gctib; // for rc + +#ifdef USE_32BIT_REF + DataRef32 classInfoRo; + DataRef32 cacheFalseClass; +#else + DataRef classInfoRo; +#endif + + union { + uintptr_t initState; // a readable address for initState means initialized + DataRef cacheTrueClass; + }; + + public: + static inline intptr_t OffsetOfInitState() { + ClassMetadata *base = nullptr; + return reinterpret_cast(&(base->initState)); + } + + uintptr_t GetInitStateRawValue() const { + return __atomic_load_n(&initState, __ATOMIC_ACQUIRE); + } + + template + void SetInitStateRawValue(T val) { + __atomic_store_n(&initState, reinterpret_cast(val), __ATOMIC_RELEASE); + } +}; + +// function to set Class/Field/Method metadata's shadow field to avoid type conversion +// Note 1: here we don't do NULL-check and type-compatibility check +// NOte 2: C should be of jclass/ClassMetata* type +template +static inline void MRTSetMetadataShadow(M *meta, C cls) { + meta->shadow = static_cast(reinterpret_cast(cls)); +} + +#endif // METADATA_LAYOUT_H diff --git a/src/mrt/compiler-rt/include/mm_config.h b/src/mrt/compiler-rt/include/mm_config.h new file mode 100644 index 0000000000..9b6858fc02 --- /dev/null +++ b/src/mrt/compiler-rt/include/mm_config.h @@ -0,0 +1,286 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_MM_CONFIG_H +#define MAPLE_RUNTIME_MM_CONFIG_H + +#include +#include +#include +#include +#include "securec.h" +#include "mrt_mm_config_common.h" +#include "base/logging.h" + +// Enable debug features. Codes that implement debug features are guarded by +// this macro. When this macro is set to 0, related codes will become no-ops +// and will have no impact on performance or code size. Affected features +// include (but are not limited to): +// +// - Assertion using __MRT_ASSERT (panic.h) +// - ROS allocator debug features: +// - Assertion, verification, dumping +// - Bitmap debug in rosallocator +// - Part of the statistics +// - Naive RC collector debug features: +// - Inc/Dec from zero detection (?) +// - Weak RC checking +// - LoadIncRef starvation detection +// - Saferegion checks +// - GC phase timer +// +#ifndef __MRT_DEBUG +#define __MRT_DEBUG 1 +#endif // __MRT_DEBUG + +// Environment variable-affected configuration +// +// Enable EnvConf. This may create a "undisclosed interface" (i.e. security +// exploit) in production, so we provide a way to easily disable it when +// compiling for production. +#ifndef MRT_ALLOW_ENVCONF +#define MRT_ALLOW_ENVCONF 1 +#endif + +#if MRT_ALLOW_ENVCONF +namespace maplert { +long MrtEnvConf(const char *name, long defaultValue); // do not use directly +} + +// Use this macro to get config from environment variable, defaulting to the +// value of a macro. +// +// For example: MRT_ENVCONF(MRT_GCLOG_USE_FILE, MRT_GCLOG_USE_FILE_DEFAULT) +// Will first check if an environment variable "MRT_GCLOG_USE_FILE" is present, +// and is a valid integer, and use its value. If not present or is not a valid +// integer, it will fall back to the current value of the MRT_GCLOG_USE_FILE +// macro. This lets the user override configuration at run time, most useful +// for debugging. +#define MRT_ENVCONF(conf, defaultValue) maplert::MrtEnvConf(#conf, defaultValue) +#else +#define MRT_ENVCONF(conf) (conf) +#endif + +// configuration control for Maple runtime memory manager +// HEAP_OBJ_CHECK, used for debugging. +#ifndef HEAP_OBJ_CHECK // When set, INC, DEC and other RC operations will print +#define HEAP_OBJ_CHECK 0 // detailed error messages when encountering a non-heap obj. +#endif + +#ifndef HEAP_PROFILE +#define HEAP_PROFILE 0 +#endif // HEAP_PROFILE + +#ifndef SAFEREGION_CHECK // When set, RC will print backtrace and abort the program +#define SAFEREGION_CHECK __MRT_DEBUG_COND_FALSE // if mutator execute inc, dec or other RC operations in saferegion. +#endif + +#ifndef RC_PROFILE +#define RC_PROFILE 0 +#endif // RC_PROFILE + +#ifndef ENABLE_HPROF +#define ENABLE_HPROF 1 +#endif + +#ifndef LOG_ALLOC_TIMESTAT // Set to 1 to log time stats for obj allocation +#define LOG_ALLOC_TIMESTAT 0 // and release in allocator +#endif + +// Set to 1 to enable collecting hot object data for RC. +#ifndef RC_HOT_OBJECT_DATA_COLLECT +#define RC_HOT_OBJECT_DATA_COLLECT 0 +#endif // RC_HOT_OBJECT_DATA_COLLECT + +#ifndef BT_CLEANUP_PROFILE // Back-up tracing cleanup profiling +#define BT_CLEANUP_PROFILE 1 +#endif // BT_CLEANUP_PROFILE + +#ifndef RC_TRACE_OBJECT +#define RC_TRACE_OBJECT __MRT_DEBUG_COND_FALSE +#endif // RC_TRACE_OBJECT + +#ifndef PATTERN_FROM_BACKUP_TRACING_DEFAULT +#define PATTERN_FROM_BACKUP_TRACING_DEFAULT 0 +#endif + +// Enforcing MRT_GCFini{Global,ThreadLocal} +#ifndef MRT_ENFORCE_FINI // When 1, not calling MRT_GCFini{Global,ThreadLocal} will abort and print error +#define MRT_ENFORCE_FINI 0 // When 0, We will call MRT_GCFini{Global,ThreadLocal} for the higher level +#endif // MRT_ENFORCE_FINI + +// Selection of different allocator +// define MRT_ALLOCATOR_MALLOC 1 +#define MRT_ALLOCATOR_ROS 3 + +#ifndef MRT_ALLOCATOR +#define MRT_ALLOCATOR MRT_ALLOCATOR_ROS +#endif + +// Enable/disable systrace in the allocator slow path +#ifndef MRT_SYSTRACE_ALLOCATOR +#define MRT_SYSTRACE_ALLOCATOR 0 +#endif + +// configuration for use-after-free debugging +// +// When enabled, newobj, freeobj, inc and dec will check if the object address +// to work with actually points to a live object. +#ifndef MRT_REF_VALIDITY_CHECK +#define MRT_REF_VALIDITY_CHECK 1 // Do checking by default. Can be disabled. +#endif // MRT_REF_VALIDITY_CHECK + +// This enables assertions at obj free +#ifndef MRT_DEBUG_DOUBLE_FREE +#define MRT_DEBUG_DOUBLE_FREE 1 +#endif + +// GCLog: logs specific to GC +// +// If this is on, write GC log to a disk file. This is useful when debugging, +// because logcat may be flooded by messages from other processes. +// +// If off, GCLog will not do anything. +// +// Can be overridden by the environment variable of the same name. +#ifndef MRT_GCLOG_USE_FILE_DEFAULT +#define MRT_GCLOG_USE_FILE_DEFAULT 0 +#endif // MRT_GCLOG_USE_FILE_DEFAULT + +// If true, the process will open the GC log file and write to it on startup. +// Otherwise, it will only open GC log file on fork(), i.e. when Zygote creates +// a sub-process. +// +// Can be overridden by the environment variable of the same name. +#ifndef MRT_GCLOG_OPEN_ON_STARTUP_DEFAULT +#define MRT_GCLOG_OPEN_ON_STARTUP_DEFAULT 0 +#endif // MRT_GCLOG_OPEN_ON_STARTUP_DEFAULT + +#ifndef MRT_RCTRACELOG_USE_FILE_DEFAULT +#define MRT_RCTRACELOG_USE_FILE_DEFAULT RC_TRACE_OBJECT +#endif // MRT_RCTRACELOG_USE_FILE_DEFAULT + +#ifndef MRT_RCTRACELOG_OPEN_ON_STARTUP_DEFAULT +#define MRT_RCTRACELOG_OPEN_ON_STARTUP_DEFAULT RC_TRACE_OBJECT +#endif // MRT_RCTRACELOG_OPEN_ON_STARTUP_DEFAULT + +#ifndef MRT_RPLOG_USE_FILE_DEFAULT +#define MRT_RPLOG_USE_FILE_DEFAULT 0 +#endif // MRT_RPLOG_USE_FILE_DEFAULT + +#ifndef MRT_RPLOG_OPEN_ON_STARTUP_DEFAULT +#define MRT_RPLOG_OPEN_ON_STARTUP_DEFAULT 0 +#endif // MRT_RPLOG_OPEN_ON_STARTUP_DEFAULT + +#ifndef MRT_CYCLELOG_USE_FILE_DEFAULT +#define MRT_CYCLELOG_USE_FILE_DEFAULT 0 +#endif // MRT_CYCLELOG_USE_FILE_DEFAULT + +#ifndef MRT_CYCLELOG_OPEN_ON_STARTUP_DEFAULT +#define MRT_CYCLELOG_OPEN_ON_STARTUP_DEFAULT 0 +#endif // MRT_CYCLELOG_OPEN_ON_STARTUP_DEFAULT + +#ifndef MRT_ALLOCFRAGLOG_USE_FILE_DEFAULT +#define MRT_ALLOCFRAGLOG_USE_FILE_DEFAULT 0 +#endif // MRT_ALLOCFRAGLOG_USE_FILE_DEFAULT + +#ifndef MRT_ALLOCFRAGLOG_OPEN_ON_STARTUP_DEFAULT +#define MRT_ALLOCFRAGLOG_OPEN_ON_STARTUP_DEFAULT 0 +#endif // MRT_ALLOCFRAGLOG_OPEN_ON_STARTUP_DEFAULT + +#ifndef MRT_ALLOCATORLOG_USE_FILE_DEFAULT +#define MRT_ALLOCATORLOG_USE_FILE_DEFAULT 0 +#endif // MRT_ALLOCFRAGLOG_USE_FILE_DEFAULT + +#ifndef MRT_ALLOCATOR_OPEN_ON_STARTUP_DEFAULT +#define MRT_ALLOCATOR_OPEN_ON_STARTUP_DEFAULT 0 +#endif // MRT_ALLOCFRAGLOG_OPEN_ON_STARTUP_DEFAULT + +#ifndef MRT_MIXLOG_USE_FILE_DEFAULT +#define MRT_MIXLOG_USE_FILE_DEFAULT 0 +#endif // MRT_ALLOCFRAGLOG_USE_FILE_DEFAULT + +#ifndef MRT_MIXLOG_OPEN_ON_STARTUP_DEFAULT +#define MRT_MIXLOG_OPEN_ON_STARTUP_DEFAULT 0 +#endif // MRT_ALLOCFRAGLOG_OPEN_ON_STARTUP_DEFAULT + +#ifndef MRT_STDERRLOG_USE_FILE +#define MRT_STDERRLOG_USE_FILE 0 +#endif // MRT_ALLOCFRAGLOG_USE_FILE + +#ifndef MRT_STDERR_OPEN_ON_STARTUP +#define MRT_STDERR_OPEN_ON_STARTUP 0 +#endif // MRT_ALLOCFRAGLOG_OPEN_ON_STARTUP + +// if set to 1 by the environment variable, MarkSweepCollector::concurrentMark and concurrentSweep will be false +#ifndef MRT_IS_NON_CONCURRENT_GC_DEFAULT +#define MRT_IS_NON_CONCURRENT_GC_DEFAULT 0 +#endif // MRT_IS_NON_CONCURRENT_GC_DEFAULT + +#define LOAD_INC_RC_MASK 1 // can only use lower 3bits on 64bit-platform + +#ifdef USE_32BIT_REF +#define GRT_DEADVALUE (0xdeaddeadul) +#else +#define GRT_DEADVALUE (0xdeaddeaddeaddeadul) +#endif // USE_32BIT_REF + +// --------------------------------------------------- +// NOTE: Don't forget update 'duplicateFunc.s' +// if you changed HEAP_START or HEAP_END. +// --------------------------------------------------- + +#ifdef __aarch64__ +// The first 64KB is protected by SELinux. +#define HEAP_START (1u << 16) +#define HEAP_END (1ul << 31) // max of 2GB space +#define PERM_BEGIN (9ul << 28) // perm start from 2.25G +#define PERM_END (3ul << 30) // perm end at 3G +#elif defined(__arm__) +// The first 64KB is protected by SELinux. +// 0~256MB is reserved to encode offset in mfile. +#define PERM_BEGIN (256ul << 20) +#define PERM_END (1ul << 30) +#define HEAP_START PERM_END +#define HEAP_END (2ul << 30) +#endif // __aarch64__ + +#ifndef PERMISSIVE_HEAP_COND +#define PERMISSIVE_HEAP_COND 0 // disabled for portability reason +#endif +namespace maplert { +constexpr uint64_t kMrtHeapAlign = 8; +// Maple uses i32 for array length and can be negative +constexpr uint32_t kMrtMaxArrayLength = ((1UL << 31) - 1); // the max of int32 is 2 ^ 31 - 1 +constexpr uint32_t kMfileHighAddress = 0xfe000000; +} + +#ifdef USE_32BIT_REF +#define ALLOC_USE_FAST_PATH (!CONFIG_JSAN && \ + !RC_HOT_OBJECT_DATA_COLLECT && \ + !RC_TRACE_OBJECT && \ + !LOG_ALLOC_TIMESTAT) +#else +#define ALLOC_USE_FAST_PATH false +#endif + +#ifndef LIKELY +#define LIKELY(x) __builtin_expect(!!(x), 1) +#endif + +#ifndef UNLIKELY +#define UNLIKELY(x) __builtin_expect(!!(x), 0) +#endif + +#endif // MAPLE_RUNTIME_MMCONFIG_H diff --git a/src/mrt/compiler-rt/include/mm_utils.h b/src/mrt/compiler-rt/include/mm_utils.h new file mode 100644 index 0000000000..0fb877b94e --- /dev/null +++ b/src/mrt/compiler-rt/include/mm_utils.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_MM_UTILS_H +#define MAPLE_RUNTIME_MM_UTILS_H + +#include +#include +#include +#include +#include +#include + +#include "gc_log.h" + +using address_t = uintptr_t; + +namespace maplert { +namespace util { +void PrintPCSymbol(const void *pc); + +void PrintPCSymbolToLog(const void *pc, uint32_t logtype = kLogtypeRcTrace, bool printBt = true); + +void PrintPCSymbolToLog(const void *pc, std::ostream &ofs, bool printBt); + +void PrintBacktrace(); + +void PrintBacktrace(int32_t logFile); + +void PrintBacktrace(size_t limit, int32_t logFile); + +extern "C" void DumpObject(address_t obj, std::ostream &ofs); + +void WaitUntilAllThreadsStopped(); + +std::string GetLogDir(); +} // namespace util +} // namespace maplert + +#endif diff --git a/src/mrt/compiler-rt/include/mrt_common.h b/src/mrt/compiler-rt/include/mrt_common.h new file mode 100644 index 0000000000..eeace7987b --- /dev/null +++ b/src/mrt/compiler-rt/include/mrt_common.h @@ -0,0 +1,26 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_COMMON_H +#define MRT_COMMON_H + +// This file is used to declare some common use variables between mrt and mapleall +// For decoupling load +constexpr uint32_t kMplStaticDecoupleMagicNumber = 0x1a0; // For non-lazy static decouple trigger SIGSEGV +constexpr uint32_t kMplLazyStaticDecoupleMagicNumber = 0x1a1; // For lazy static decouple trigger SIGSEGV +constexpr uint32_t kMplLazyLoadMagicNumber = 0x1a2; // For lazy decouple trigger SIGSEGV +constexpr uint64_t kMplLazyLoadSentryNumber = 0x1a27b10d10810ade; // Sentry for offset table +constexpr uint64_t kMplStaticLazyLoadSentryNumber = 0x1a27b10d10810ad1; // Sentry for static offset table +constexpr uint32_t kMplArrayClassCacheMagicNumber = 0x1a3; // For init array class cache trigger SIGSEGV +#endif // MRT_COMMON_H diff --git a/src/mrt/compiler-rt/include/muid.h b/src/mrt/compiler-rt/include/muid.h new file mode 100644 index 0000000000..8fa6889594 --- /dev/null +++ b/src/mrt/compiler-rt/include/muid.h @@ -0,0 +1,132 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MUID_H +#define MUID_H + +// This is shared between maple compiler and runtime. +#include +#include +#include +#include + +#ifdef USE_32BIT_REF +#define USE_64BIT_MUID +#endif // USE_32BIT_REF + +constexpr unsigned int kSystemNamespace = 0xc0; +constexpr unsigned int kApkNamespace = 0x80; +constexpr unsigned int kBitMask = 0x3f; +constexpr unsigned int kGroupSize = 64; +constexpr unsigned int kShiftAmount = 32; +constexpr unsigned int kBlockLength = 16; +constexpr unsigned int kByteLength = 8; +constexpr unsigned int kNumLowAndHigh = 2; + +#ifdef USE_64BIT_MUID +constexpr unsigned int kMuidLength = 8; +#else +constexpr unsigned int kMuidLength = 16; +#endif // USE_64BIT_MUID + +constexpr unsigned int kDigestShortHashLength = 8; +constexpr unsigned int kDigestHashLength = 16; +union DigestHash { + uint8_t bytes[kDigestHashLength]; + struct { + uint64_t first; + uint64_t second; + } data; +}; + +// muid-related files are shared between maple compiler and runtime, thus not in +// namespace maplert +struct MuidContext { + unsigned int a; + unsigned int b; + unsigned int c; + unsigned int d; + unsigned int count[kNumLowAndHigh]; + unsigned int block[kBlockLength]; + unsigned char buffer[kGroupSize]; +}; + +class MUID { + public: + union { +#ifdef USE_64BIT_MUID + uint32_t words[kNumLowAndHigh] = { 0 }; + uint8_t bytes[kMuidLength]; + uint64_t raw; +#else + uint64_t words[kNumLowAndHigh] = { 0 }; + uint8_t bytes[kMuidLength]; +#endif // USE_64BIT_MUID + } data; + + inline bool IsSystemNameSpace() const { + return (data.bytes[kMuidLength - 1] & ~kBitMask) == kSystemNamespace; + } + inline bool IsApkNameSpace() const { + return (data.bytes[kMuidLength - 1] & ~kBitMask) == kApkNamespace; + } + inline void SetSystemNameSpace() { + data.bytes[kMuidLength - 1] &= kBitMask; + data.bytes[kMuidLength - 1] |= kSystemNamespace; + } + inline void SetApkNameSpace() { + data.bytes[kMuidLength - 1] &= kBitMask; + data.bytes[kMuidLength - 1] |= kApkNamespace; + } + bool operator<(const MUID &muid) const { + return (data.words[1] < muid.data.words[1] || + (data.words[1] == muid.data.words[1] && data.words[0] < muid.data.words[0])); + } + bool operator>(const MUID &muid) const { + return (data.words[1] > muid.data.words[1] || + (data.words[1] == muid.data.words[1] && data.words[0] > muid.data.words[0])); + } + bool operator==(const MUID &muid) const { + return data.words[1] == muid.data.words[1] && data.words[0] == muid.data.words[0]; + } + bool operator!=(const MUID &muid) const { + return data.words[1] != muid.data.words[1] || data.words[0] != muid.data.words[0]; + } + std::string ToStr() const { + std::stringstream sbuf; +#ifdef USE_64BIT_MUID + // 8 spaces to 64 bit + sbuf << std::setfill('0') << std::setw(8) << std::hex << data.words[1] << std::setfill('0') << std::setw(8) << + std::hex << data.words[0]; +#else + // 16 spaces to 32 bit + sbuf << std::setfill('0') << std::setw(16) << std::hex << data.words[1] << std::setfill('0') << std::setw(16) << + std::hex << data.words[0]; +#endif // USE_64BIT_MUID + return sbuf.str(); + } +}; + +void MuidInit(MuidContext &status); +void MuidDecode(MuidContext &status, const unsigned char &data, uint64_t size); + +template +void FullEncode(T &result, MuidContext &status); +void MuidEncode(unsigned char (&result)[kDigestShortHashLength], MuidContext &status); +void MuidEncode(unsigned char (&result)[kDigestHashLength], MuidContext &status, bool use64Bit = false); + +void GetMUIDHash(const unsigned char &data, size_t size, MUID &muid); +DigestHash GetDigestHash(const unsigned char &bytes, uint32_t len); +MUID GetMUID(const std::string &symbolName, bool forSystem = true); +#endif diff --git a/src/mrt/compiler-rt/include/mutator_list.h b/src/mrt/compiler-rt/include/mutator_list.h new file mode 100644 index 0000000000..d4655f225a --- /dev/null +++ b/src/mrt/compiler-rt/include/mutator_list.h @@ -0,0 +1,196 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_MUTATOR_LIST_H +#define MAPLE_RUNTIME_MUTATOR_LIST_H + +#include +#include +#include +#include +#include +#include "mm_config.h" +#include "collector/collector.h" +#include "panic.h" +#include "syscall.h" +#include "allocator/page_allocator.h" + +#ifndef MRT_DEBUG_MUTATOR_LIST +#define MRT_DEBUG_MUTATOR_LIST __MRT_DEBUG_COND_FALSE +#endif + +namespace maplert { +class MutatorList { + public: + MutatorList() : lockOwnerTid(0) {} + ~MutatorList() = default; + // Gets the singleton instance. + static inline MutatorList &Instance() { + return *instance; + } + + // Replace the singleton instance with a new one. + // This should be called after forked from parent process. + static void reset(); + + // Add a mutator to the list. + void AddMutator(Mutator &mutator) { + std::lock_guard lock(mutatorListMutex); + lockOwnerTid = static_cast(maple::GetTid()); + mutator.active = kMutatorDebugTrue; + mutator.Init(); + mutatorList.push_back(&mutator); + lockOwnerTid = 0; +#if MRT_DEBUG_MUTATOR_LIST + auto result = debugMap.insert({ &mutator, mutator.tid }); + if (UNLIKELY(!result.second)) { + // insert failed, means there is a record leaved by an abnormlly exited mutator. + LOG(FATAL) << "Found abnormally exited mutator!" << + " mutator: " << result.first->first << + " tid: " << result.first->second << maple::endl; + } +#endif + } + + // Remove a mutator from the list. + template + void RemoveMutator(Mutator &mutator, Func &&func) { + std::lock_guard lock(mutatorListMutex); + lockOwnerTid = static_cast(maple::GetTid()); + size_t sizeBeforeRemove __MRT_UNUSED = mutatorList.size(); + mutatorList.remove(&mutator); +#if __MRT_DEBUG + size_t removedCount = sizeBeforeRemove - mutatorList.size(); + if (UNLIKELY(removedCount != 1)) { + LOG(FATAL) << "RemoveMutator: invalid removed count: " << removedCount << maple::endl; + } +#endif + func(&mutator); + mutator.active = kMutatorDebugFalse; + lockOwnerTid = 0; +#if MRT_DEBUG_MUTATOR_LIST + size_t removed = debugMap.erase(&mutator); + if (UNLIKELY(removed != 1)) { + LOG(FATAL) << "Remove an invalid mutator" << maple::endl; + } +#endif + } + + // Do things when lock is hold. + template + void LockGuard(Func &&func) { + std::lock_guard lock(mutatorListMutex); + lockOwnerTid = static_cast(maple::GetTid()); + func(); + lockOwnerTid = 0; + } + + // Lock the mutex. + void Lock() { + mutatorListMutex.lock(); + lockOwnerTid = static_cast(maple::GetTid()); + } + + // Unlock the mutex. + bool TryLock() { + if (mutatorListMutex.try_lock()) { + lockOwnerTid = static_cast(maple::GetTid()); + return true; + } + return false; + } + + // Unlock the mutex. + void Unlock() { + lockOwnerTid = 0; + mutatorListMutex.unlock(); + } + + // Number of mutators in the list. + size_t Size() const { + return mutatorList.size(); + } + + // Gets the mutator list. + const std::list> &List() const { + return mutatorList; + } + + // Before using this interface, you need to call instance.Lock, + // and after using mutator, you should call instance.Unlock. + Mutator *GetMutator(uint32_t tid) { + __MRT_ASSERT(MutatorList::Instance().IsLockedBySelf() == true, "mutator list is not locked"); + for (auto &mutator : mutatorList) { + if (mutator->GetTid() == tid) { + return mutator; + } + } + return nullptr; + } + + // Visit all mutators, should be called + // in collector thread after the world stopped. + // Func: void func(Mutator* mutator); + template + void VisitMutators(Func &&func) { + for (auto &mutator : mutatorList) { + func(mutator); + } + } + + // Get stack begin with given tid + uintptr_t GetStackBeginByTid(uint32_t tid) { + for (auto &mutator : mutatorList) { + if (mutator->GetTid() == tid) { + return mutator->GetStackBegin(); + } + } + return 0; + } + + void DebugShowCurrentMutators(); + + bool IsLockedBySelf() const { + if (lockOwnerTid == 0) { + return false; + } + return lockOwnerTid == static_cast(maple::GetTid()); + } + + uint32_t LockOwner() const { + return lockOwnerTid; + } + + private: + // mutex use to protect mutatorList. + // we also use it to block all mutators when the world is stopped. + std::mutex mutatorListMutex; + + // list of all mutators, protected by mutatorListMutex. + std::list> mutatorList; + + // holder for this lock + volatile uint32_t lockOwnerTid; + + // The Singleton instance. + __attribute__((visibility("default"))) static ImmortalWrapper instance; + +#if MRT_DEBUG_MUTATOR_LIST + // mutator --> tid map, for debug purpose. + std::unordered_map debugMap; +#endif +}; +} // namespace maplert + +#endif diff --git a/src/mrt/compiler-rt/include/namemangler.h b/src/mrt/compiler-rt/include/namemangler.h new file mode 100644 index 0000000000..d95e725b7d --- /dev/null +++ b/src/mrt/compiler-rt/include/namemangler.h @@ -0,0 +1,210 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef NAMEMANGLER_H +#define NAMEMANGLER_H +#include +#include + +// This is a general name mangler which is shared between maple compiler and runtime. +// maple-compiler-specific data structure may NOT be used here. +namespace namemangler { +#define TO_STR(s) TO_STR2(s) +#define TO_STR2(s) #s + + +#define VTAB_PREFIX __vtb_ +#define ITAB_PREFIX __itb_ +#define VTAB_AND_ITAB_PREFIX __vtb_and_itb_ +#define ITAB_CONFLICT_PREFIX __itbC_ +#define CLASSINFO_PREFIX __cinf_ +#define CLASSINFO_RO_PREFIX __classinforo__ +#define SUPERCLASSINFO_PREFIX __superclasses__ +#define PRIMITIVECLASSINFO_PREFIX __pinf_ +#define CLASS_INIT_BRIDGE_PREFIX __ClassInitBridge__ +#define GCTIB_PREFIX MCC_GCTIB__ +#define REF_PREFIX REF_ +#define JARRAY_PREFIX A + +#define VTAB_PREFIX_STR TO_STR(VTAB_PREFIX) +#define ITAB_PREFIX_STR TO_STR(ITAB_PREFIX) +#define VTAB_AND_ITAB_PREFIX_STR TO_STR(VTAB_AND_ITAB_PREFIX) +#define ITAB_CONFLICT_PREFIX_STR TO_STR(ITAB_CONFLICT_PREFIX) +#define CLASSINFO_PREFIX_STR TO_STR(CLASSINFO_PREFIX) +#define CLASSINFO_RO_PREFIX_STR TO_STR(CLASSINFO_RO_PREFIX) +#define SUPERCLASSINFO_PREFIX_STR TO_STR(SUPERCLASSINFO_PREFIX) +#define PRIMITIVECLASSINFO_PREFIX_STR TO_STR(PRIMITIVECLASSINFO_PREFIX) +#define CLASS_INIT_BRIDGE_PREFIX_STR TO_STR(CLASS_INIT_BRIDGE_PREFIX) +#define GCTIB_PREFIX_STR TO_STR(GCTIB_PREFIX) +#define REF_PREFIX_STR TO_STR(REF_PREFIX) +#define JARRAY_PREFIX_STR TO_STR(JARRAY_PREFIX) + +// Names of all compiler-generated tables and accessed by runtime +static constexpr const char kMuidPrefixStr[] = "__muid_"; +static constexpr const char kMuidRoPrefixStr[] = "__muid_ro"; +static constexpr const char kMuidFuncDefTabPrefixStr[] = "__muid_func_def_tab"; +static constexpr const char kMuidFuncDefOrigTabPrefixStr[] = "__muid_ro_func_def_orig_tab"; +static constexpr const char kMuidFuncInfTabPrefixStr[] = "__muid_ro_func_inf_tab"; +static constexpr const char kMuidFuncMuidIdxTabPrefixStr[] = "__muid_ro_func_muid_idx_tab"; +static constexpr const char kMuidDataDefTabPrefixStr[] = "__muid_data_def_tab"; +static constexpr const char kMuidDataDefOrigTabPrefixStr[] = "__muid_ro_data_def_orig_tab"; +static constexpr const char kMuidFuncUndefTabPrefixStr[] = "__muid_func_undef_tab"; +static constexpr const char kMuidDataUndefTabPrefixStr[] = "__muid_data_undef_tab"; +static constexpr const char kMuidFuncDefMuidTabPrefixStr[] = "__muid_ro_func_def_muid_tab"; +static constexpr const char kMuidDataDefMuidTabPrefixStr[] = "__muid_ro_data_def_muid_tab"; +static constexpr const char kMuidFuncUndefMuidTabPrefixStr[] = "__muid_ro_func_undef_muid_tab"; +static constexpr const char kMuidDataUndefMuidTabPrefixStr[] = "__muid_ro_data_undef_muid_tab"; +static constexpr const char kMuidVtabAndItabPrefixStr[] = "__muid_vtab_and_itab"; +static constexpr const char kMuidItabConflictPrefixStr[] = "__muid_itab_conflict"; +static constexpr const char kMuidColdVtabAndItabPrefixStr[] = "__muid_cold_vtab_and_itab"; +static constexpr const char kMuidColdItabConflictPrefixStr[] = "__muid_cold_itab_conflict"; +static constexpr const char kMuidVtabOffsetPrefixStr[] = "__muid_vtab_offset_tab"; +static constexpr const char kMuidFieldOffsetPrefixStr[] = "__muid_field_offset_tab"; +static constexpr const char kMuidVtabOffsetKeyPrefixStr[] = "__muid_vtable_offset_key_tab"; +static constexpr const char kMuidFieldOffsetKeyPrefixStr[] = "__muid_field_offset_key_tab"; +static constexpr const char kMuidValueOffsetPrefixStr[] = "__muid_offset_value_table"; +static constexpr const char kMuidLocalClassInfoStr[] = "__muid_local_classinfo_tab"; +static constexpr const char kMuidSuperclassPrefixStr[] = "__muid_superclass"; +static constexpr const char kMuidGlobalRootlistPrefixStr[] = "__muid_globalrootlist"; +static constexpr const char kMuidClassMetadataPrefixStr[] = "__muid_classmetadata"; +static constexpr const char kMuidClassMetadataBucketPrefixStr[] = "__muid_classmetadata_bucket"; +static constexpr const char kMuidJavatextPrefixStr[] = "java_text"; +static constexpr const char kMuidDataSectionStr[] = "__data_section"; +static constexpr const char kMuidRangeTabPrefixStr[] = "__muid_range_tab"; +static constexpr const char kMuidConststrPrefixStr[] = "__muid_conststr"; +static constexpr const char kVtabOffsetTabStr[] = "__vtable_offset_table"; +static constexpr const char kFieldOffsetTabKeyStr[] = "__field_offset_key_table"; +static constexpr const char kFieldOffsetTabStr[] = "__field_offset_table"; +static constexpr const char kVtableKeyOffsetTabStr[] = "__vtable_offset_key_table"; +static constexpr const char kVtableOffsetTabKeyStr[] = "__vtable_offset_key_table"; +static constexpr const char kFieldKeyOffsetTabStr[] = "__field_offset_table"; +static constexpr const char kOffsetTabStr[] = "__offset_value_table"; +static constexpr const char kInlineCacheTabStr[] = "__inline_cache_table"; +static constexpr const char kLocalClassInfoStr[] = "__local_classinfo_table"; +static constexpr const char kMethodsInfoPrefixStr[] = "__methods_info__"; +static constexpr const char kMethodsInfoCompactPrefixStr[] = "__methods_infocompact__"; +static constexpr const char kFieldsInfoPrefixStr[] = "__fields_info__"; +static constexpr const char kFieldsInfoCompactPrefixStr[] = "__fields_infocompact__"; +static constexpr const char kFieldOffsetDataPrefixStr[] = "__fieldOffsetData__"; +static constexpr const char kMethodAddrDataPrefixStr[] = "__methodAddrData__"; +static constexpr const char kMethodSignaturePrefixStr[] = "__methodSignature__"; +static constexpr const char kParameterTypesPrefixStr[] = "__parameterTypes__"; +static constexpr const char kRegJNITabPrefixStr[] = "__reg_jni_tab"; +static constexpr const char kRegJNIFuncTabPrefixStr[] = "__reg_jni_func_tab"; +static constexpr const char kReflectionStrtabPrefixStr[] = "__reflection_strtab"; +static constexpr const char kReflectionStartHotStrtabPrefixStr[] = "__reflection_start_hot_strtab"; +static constexpr const char kReflectionBothHotStrTabPrefixStr[] = "__reflection_both_hot_strtab"; +static constexpr const char kReflectionRunHotStrtabPrefixStr[] = "__reflection_run_hot_strtab"; +static constexpr const char kReflectionNoEmitStrtabPrefixStr[] = "__reflection_no_emit_strtab"; +static constexpr const char kMarkMuidFuncDefStr[] = "muid_func_def:"; +static constexpr const char kMarkMuidFuncUndefStr[] = "muid_func_undef:"; +static constexpr const char kGcRootList[] = "gcRootNewList"; +static constexpr const char kDecoupleOption[] = "__decouple_option"; +static constexpr const char kDecoupleStr[] = "__decouple"; +static constexpr const char kCompilerVersionNum[] = "__compilerVersionNum"; +static constexpr const char kCompilerVersionNumStr[] = "__compilerVersionNumTab"; +static constexpr const char kCompilerMfileStatus[] = "__compiler_mfile_status"; +static constexpr const char kMapleGlobalVariable[] = "maple_global_variable"; +static constexpr const char kMapleLiteralString[] = "maple_literal_string"; + +static constexpr const char kSourceMuid[] = "__sourceMuid"; +static constexpr const char kSourceMuidSectionStr[] = "__sourceMuidTab"; +static constexpr const char kDecoupleStaticKeyStr[] = "__staticDecoupleKeyOffset"; +static constexpr const char kDecoupleStaticValueStr[] = "__staticDecoupleValueOffset"; +static constexpr const char kMarkDecoupleStaticStr[] = "decouple_static:"; +static constexpr const char kClassInfoPrefix[] = "__cinf"; +static constexpr const char kBssSectionStr[] = "__bss_section"; +static constexpr const char kLinkerHashSoStr[] = "__linkerHashSo"; + +static constexpr const char kStaticFieldNamePrefixStr[] = "__static_field_name"; +static constexpr const char kMplSuffix[] = ".mpl"; +static constexpr const char kClinvocation[] = ".clinvocation"; +static constexpr const char kPackageNameSplitterStr[] = "_2F"; +static constexpr const char kFileNameSplitterStr[] = "$$"; +static constexpr const char kNameSplitterStr[] = "_7C"; // 7C is the ascii code for | +static constexpr const char kRigthBracketStr[] = "_29"; // 29 is the ascii code for ) +static constexpr const char kClassNameSplitterStr[] = "_3B_7C"; +static constexpr const char kJavaLangClassStr[] = "Ljava_2Flang_2FClass_3B"; +static constexpr const char kJavaLangObjectStr[] = "Ljava_2Flang_2FObject_3B"; +static constexpr const char kJavaLangClassloader[] = "Ljava_2Flang_2FClassLoader_3B"; +static constexpr const char kJavaLangObjectStrJVersion[] = "Ljava/lang/Object;"; +static constexpr const char kJavaLangStringStr[] = "Ljava_2Flang_2FString_3B"; +static constexpr const char kJavaLangExceptionStr[] = "Ljava_2Flang_2FException_3B"; +static constexpr const char kThrowClassStr[] = "Ljava_2Flang_2FThrowable_3B"; +static constexpr const char kReflectionClassesPrefixStr[] = "Ljava_2Flang_2Freflect_2F"; +static constexpr const char kReflectionClassMethodStr[] = "Ljava_2Flang_2Freflect_2FMethod_241_3B"; +static constexpr const char kClassMetadataTypeName[] = "__class_meta__"; +static constexpr const char kPtrPrefixStr[] = "_PTR"; +static constexpr const char kClassINfoPtrPrefixStr[] = "_PTR__cinf_"; +static constexpr const char kArrayClassInfoPrefixStr[] = "__cinf_A"; +static constexpr const char kShadowClassName[] = "shadow_24__klass__"; +static constexpr const char kClinitSuffix[] = "_7C_3Cclinit_3E_7C_28_29V"; +static constexpr const char kCinitStr[] = "_7C_3Cinit_3E_7C_28"; +static constexpr const char kClinitSubStr[] = "7C_3Cinit_3E_7C"; + +static constexpr const char kPreNativeFunc[] = "MCC_PreNativeCall"; +static constexpr const char kPostNativeFunc[] = "MCC_PostNativeCall"; +static constexpr const char kDecodeRefFunc[] = "MCC_DecodeReference"; +static constexpr const char kFindNativeFunc[] = "MCC_FindNativeMethodPtr"; +static constexpr const char kFindNativeFuncNoeh[] = "MCC_FindNativeMethodPtrWithoutException"; +static constexpr const char kDummyNativeFunc[] = "MCC_DummyNativeMethodPtr"; +static constexpr const char kCheckThrowPendingExceptionFunc[] = "MCC_CheckThrowPendingException"; +static constexpr const char kCallFastNative[] = "MCC_CallFastNative"; +static constexpr const char kCallFastNativeExt[] = "MCC_CallFastNativeExt"; +static constexpr const char kCallSlowNativeExt[] = "MCC_CallSlowNativeExt"; +static constexpr const char kSetReliableUnwindContextFunc[] = "MCC_SetReliableUnwindContext"; + +static constexpr const char kArrayClassCacheTable[] = "__arrayClassCacheTable"; +static constexpr const char kArrayClassCacheNameTable[] = "__muid_ro_arrayClassCacheNameTable"; +static constexpr const char kFunctionLayoutStr[] = "__func_layout__"; + +static constexpr const char kFunctionProfileTabPrefixStr[] = "__muid_profile_func_tab"; + +static constexpr const char kBBProfileTabPrefixStr[] = "__muid_prof_counter_tab"; +static constexpr const char kFuncIRProfInfTabPrefixStr[] = "__muid_prof_ir_desc_tab"; + +static constexpr const char kBindingProtectedRegionStr[] = "__BindingProtectRegion__"; + +static constexpr const char kClassNamePrefixStr[] = "L"; +static constexpr const char kClassMethodSplitterStr[] = "_3B"; +static constexpr const char kFuncGetCurrentCl[] = "MCC_GetCurrentClassLoader"; +// Serve as a global flag to indicate whether frequent strings have been compressed +extern bool doCompression; + +// Return the input string if the compression is not on; otherwise, return its compressed version +std::string GetInternalNameLiteral(std::string name); +std::string GetOriginalNameLiteral(std::string name); + +std::string EncodeName(const std::string &name); +std::string DecodeName(const std::string &name); +void DecodeMapleNameToJavaDescriptor(const std::string &nameIn, std::string &nameOut); + +std::string NativeJavaName(const std::string &name, bool overLoaded = true); + +__attribute__((visibility("default"))) unsigned UTF16ToUTF8(std::string &str, const std::u16string &str16, + unsigned short num = 0, bool isBigEndian = false); +__attribute__((visibility("default"))) unsigned UTF8ToUTF16(std::u16string &str16, const std::string &str, + unsigned short num = 0, bool isBigEndian = false); +void GetUnsignedLeb128Encode(std::vector &dest, uint32_t value); +uint32_t GetUnsignedLeb128Decode(const uint8_t **data); +uint64_t GetUleb128Encode(uint64_t val); +uint64_t GetSleb128Encode(int64_t val); +uint64_t GetUleb128Decode(uint64_t val); +int64_t GetSleb128Decode(uint64_t val); +size_t GetUleb128Size(uint64_t val); +size_t GetSleb128Size(int32_t val); +bool NeedConvertUTF16(const std::string &str8); +} // namespace namemangler + +#endif diff --git a/src/mrt/compiler-rt/include/panic.h b/src/mrt/compiler-rt/include/panic.h new file mode 100644 index 0000000000..6deb6c0840 --- /dev/null +++ b/src/mrt/compiler-rt/include/panic.h @@ -0,0 +1,92 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_PANIC_H +#define MAPLE_RUNTIME_PANIC_H + +#include +#include "mm_config.h" +#include "mm_utils.h" + + +extern "C" void abort_saferegister(void *addr); + +namespace maplert { +void MRT_Panic() __attribute__((noreturn)); + +// This macro controls all mrt debugging code +// submodules might define their own debug macro; they should use the format: +// define SUBMODULE_DEBUG __MRT_DEBUG_COND_TRUE +// if you want to enable the submodule debugging code, or +// define SUBMODULE_DEBUG __MRT_DEBUG_COND_FALSE +// if you want to turn off the submodule debugging code +// +// explanation: __MRT_DEBUG_COND_FALSE is always false +// __MRT_DEBUG_COND_TRUE is only true when __MRT_DEBUG is true +// have a look at ROSIMPL_ASSERT for example +#if __MRT_DEBUG +void __MRT_AssertBreakPoint() __attribute__((noinline)); +#define __MRT_DEBUG_COND_TRUE (true) +#define __MRT_DEBUG_COND_FALSE (false) +#ifdef __ANDROID__ +#define __MRT_ASSERT(p, msg) \ + do { \ + if (!(p)) { \ + LOG(ERROR) << __FILE__ << ":" << __LINE__ << ":" << msg << maple::endl; \ + maplert::__MRT_AssertBreakPoint(); \ + maplert::MRT_Panic(); \ + } \ + } while (0) +#define __MRT_ASSERT_ADDR(p, msg, addr) \ + do { \ + if (!(p)) { \ + LOG(ERROR) << __FILE__ << ":" << __LINE__ << ":" << msg << maple::endl; \ + mapelrt::__MRT_AssertBreakPoint(); \ + abort_saferegister(addr); \ + } \ + } while (0) +#else +#define __MRT_ASSERT(p, msg) \ + do { \ + if (!(p)) { \ + (void)printf("%s:%d:%s", __FILE__, __LINE__, msg); \ + util::PrintBacktrace(); \ + maplert::MRT_Panic(); \ + } \ + } while (0) +#define __MRT_ASSERT_ADDR(p, msg, addr) \ + do { \ + if (!(p)) { \ + (void)printf("%s:%d:%s", __FILE__, __LINE__, msg); \ + util::PrintBacktrace(); \ + abort_saferegister(addr); \ + } \ + } while (0) +#endif +#define __MRT_UNUSED +#define __MRT_CallAndAssertTrue(fcall, msg) __MRT_ASSERT(fcall, msg) +#define __MRT_CallAndAssertFalse(fcall, msg) __MRT_ASSERT(!fcall, msg) +#else +#define __MRT_AssertBreakPoint() +#define __MRT_DEBUG_COND_TRUE (false) +#define __MRT_DEBUG_COND_FALSE (false) +#define __MRT_ASSERT(p, msg) +#define __MRT_UNUSED __attribute__((unused)) +#define __MRT_CallAndAssertTrue(fcall, msg) (void) fcall +#define __MRT_CallAndAssertFalse(fcall, msg) (void) fcall +#endif +} // namespace maplert + +#endif // MAPLE_RUNTIME_PANIC_H + diff --git a/src/mrt/compiler-rt/include/profile.h b/src/mrt/compiler-rt/include/profile.h new file mode 100644 index 0000000000..45370a4420 --- /dev/null +++ b/src/mrt/compiler-rt/include/profile.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_PROFILE_H +#define MAPLE_RUNTIME_PROFILE_H + +#include +#include + +#include "address.h" + +#define RECORD_FUNC_NAME 0 + +namespace maplert { +void DumpRCAndGCPerformanceInfo(std::ostream &os); +void RecordMethod(uint64_t faddr, std::string &func, std::string &soname); +bool CheckMethodResolved(uint64_t faddr); +void DumpMethodUse(std::ostream &os); + +void RecordStaticField(address_t *addr, const std::string name); +void DumpStaticField(std::ostream &os); +void ClearFuncProfile(); +} + +#endif diff --git a/src/mrt/compiler-rt/include/profile_type.h b/src/mrt/compiler-rt/include/profile_type.h new file mode 100644 index 0000000000..26e59216c3 --- /dev/null +++ b/src/mrt/compiler-rt/include/profile_type.h @@ -0,0 +1,136 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef PROFILE_TYPE_H +#define PROFILE_TYPE_H + +static constexpr uint8_t kProfileMagic[] = { 'm', 'a', 'p', 'l', 'e', '.', 'p', 'r', 'o', 'f', 'i', 'l', 'e', '\0' }; +static constexpr uint8_t kVer[] = { 0, 0, 1 }; + +enum ProfileFileType : uint8_t { + kSystemServer = 0x00, + kApp = 0x01 +}; + +enum ProfileType : uint8_t { + kFunction = 0x00, + kClassMeta = 0x01, + kFieldMeta = 0x02, + kMethodMeta = 0x03, + kReflectionStr = 0x04, + kLiteral = 0x05, + kBBInfo = 0x06, + kIRCounter = 0x07, + kAll = 0x08, + kMethodSig = 0x09, + kFileDesc = 0xFF +}; + +enum FuncIRItemIndex : uint8_t { + kFuncIRItemNameIndex, + kCounterStartIndex, + kCounterEndIndex, + kHashIndex +}; + +enum FuncItemIndex : uint8_t { + kFuncItemNameIndex, + kFuncTypeIndex, + kFuncCallTimesIndex +}; + +enum FuncNameIndex : uint8_t { + kClassNameIndex, + kFuncNameIndex, + kSignatureNameIndex, +}; + +struct ProfileDataInfo { + uint32_t profileDataOff; + uint8_t profileType; + uint8_t mapleFileNum; + uint16_t pad = 0; + ProfileDataInfo() = default; + ProfileDataInfo(uint32_t profileDataOff, uint8_t profileType, uint8_t mapleFileNum) + : profileDataOff(profileDataOff), profileType(profileType), mapleFileNum(mapleFileNum) {} +}; + +struct FunctionItem { + uint32_t classIdx; + uint32_t methodIdx; + uint32_t sigIdx; + uint32_t callTimes; + uint8_t type; + FunctionItem(uint32_t classIdx, uint32_t methodIdx, uint32_t sigIdx, uint32_t callTimes, uint8_t type) + : classIdx(classIdx), methodIdx(methodIdx), sigIdx(sigIdx), callTimes(callTimes), type(type) {} +}; + +struct FunctionIRProfItem { + uint64_t hash; + uint32_t classIdx; + uint32_t methodIdx; + uint32_t sigIdx; + uint32_t counterStart; + uint32_t counterEnd; + FunctionIRProfItem(uint64_t hash, uint32_t classIdx, uint32_t methodIdx, uint32_t sigIdx, uint32_t start, + uint32_t end) + : hash(hash), classIdx(classIdx), methodIdx(methodIdx), sigIdx(sigIdx), counterStart(start), counterEnd(end) {} +}; + +struct FuncCounterItem { + uint32_t callTimes; + FuncCounterItem(uint32_t callTimes) : callTimes(callTimes) {} +}; + +struct MetaItem { + uint32_t idx; + MetaItem(uint32_t idx) : idx(idx) {} +}; + +struct MethodSignatureItem { + uint32_t methodIdx; + uint32_t sigIdx; + MethodSignatureItem(uint32_t methodIdx, uint32_t sigIdx) : methodIdx(methodIdx), sigIdx(sigIdx) {} +}; + +struct ReflectionStrItem { + uint8_t type; + uint32_t idx; + ReflectionStrItem(uint32_t idx, uint8_t type) : type(type), idx(idx) {} +}; + +struct MapleFileProf { + uint32_t idx; + uint32_t num; + uint32_t size; + MapleFileProf(uint32_t idx, uint32_t num, uint32_t size) : idx(idx), num(num), size(size) {} +}; + +constexpr int kMagicNum = 14; +constexpr int kVerNum = 3; +constexpr int kCheckSumNum = 4; +struct Header { + uint8_t magic[kMagicNum] = {}; + uint8_t ver[kVerNum] = {}; + uint8_t checkSum[kCheckSumNum] = {}; + uint8_t profileNum = 0; + uint8_t profileFileType = 0; + uint8_t pad = 0; + uint32_t headerSize = 0; + uint32_t stringCount = 0; + uint32_t stringTabOff = 0; + ProfileDataInfo data[1] = {}; // profile data info detemined by runtime +}; + +#endif diff --git a/src/mrt/compiler-rt/include/saferegion.h b/src/mrt/compiler-rt/include/saferegion.h new file mode 100644 index 0000000000..cb7b659810 --- /dev/null +++ b/src/mrt/compiler-rt/include/saferegion.h @@ -0,0 +1,247 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_SAFEREGION_H +#define MAPLE_RUNTIME_SAFEREGION_H + +#include "tls_store.h" +#include "collector/collector.h" +#include "mrt_exception_api.h" + +const uint64_t kSafeRegionStateMagic1 = 0x1122334455667788; +const uint64_t kSafeRegionStateMagic2 = 0x8877665544332211; + +// Saferegion inline functions +namespace maplert { +// CurrentMutator() is similar to TLMutator() but +// returns TLS mutator with the base class. +// we can use this function to avoid unnecessary dependence. +static inline Mutator &CurrentMutator() { + return *(reinterpret_cast(maple::tls::GetTLS(maple::tls::kSlotMutator))); +} + +static inline Mutator *CurrentMutatorPtr() { + if (LIKELY(maple::tls::HasTLS())) { + return reinterpret_cast(maple::tls::GetTLS(maple::tls::kSlotMutator)); + } + return nullptr; +} + +// The global saferegion state. +class SaferegionState { + public: + uint64_t magic1 = kSafeRegionStateMagic1; + union { + struct { + // we assume little endian. + uint32_t saferegionCount; // number of mutators in saferegion. + uint32_t pendingCount; // number of mutators pending for stop. + } asStruct; + std::atomic asAtomic; + uint64_t asUint64; + }; + uint64_t magic2 = kSafeRegionStateMagic2; + + // default ctor (value is undefined). + __attribute__((always_inline)) + SaferegionState() = default; + + __attribute__((always_inline)) + ~SaferegionState() = default; + + // init with value. + __attribute__((always_inline)) + explicit SaferegionState(uint64_t value) : asUint64(value) {} + + // copy ctor. + __attribute__((always_inline)) + SaferegionState(const SaferegionState &state) : asUint64(state.asUint64) {} + + // copy assign. + __attribute__((always_inline)) + SaferegionState &operator=(const SaferegionState &state) { + asUint64 = state.asUint64; + return *this; + } + + // we treat all mutator stopped when pendingCount == saferegionCount. + __attribute__((always_inline)) + inline bool AllMutatorStopped() const { + return asStruct.pendingCount == asStruct.saferegionCount; + } + + // static functions + __attribute__((always_inline)) + static inline int *SaferegionCountAddr() { + return reinterpret_cast(&instance.asStruct.saferegionCount); + } + + __attribute__((always_inline)) + static inline int *PendingCountAddr() { + return reinterpret_cast(&instance.asStruct.pendingCount); + } + + // Atomic load saferegion state. + __attribute__((always_inline)) + static inline SaferegionState Load() { + instance.CheckMagic(); + return SaferegionState(instance.asAtomic.load(std::memory_order_acquire)); + } + + // Set pending count by CAS, return the old saferegion state. + __attribute__((always_inline)) + static inline SaferegionState SetPendingCount(uint32_t pendingCount) { + SaferegionState newState; + uint64_t oldState = instance.asAtomic.load(std::memory_order_relaxed); + do { + newState.asUint64 = oldState; + newState.asStruct.pendingCount = pendingCount; + } while (!SaferegionState::instance.asAtomic.compare_exchange_weak(oldState, newState.asUint64, + std::memory_order_release, std::memory_order_relaxed)); + return SaferegionState(oldState); + } + + // Atomic Inc saferegion count, return the old saferegion state. + __attribute__((always_inline)) + static inline SaferegionState IncSaferegionCount() { + instance.CheckMagic(); + uint64_t oldState = instance.asAtomic.fetch_add(1, std::memory_order_release); + return SaferegionState(oldState); + } + + // Atomic Dec saferegion count, return the old saferegion state. + __attribute__((always_inline)) + static inline SaferegionState DecSaferegionCount() { + instance.CheckMagic(); + uint64_t oldState = instance.asAtomic.fetch_sub(1, std::memory_order_release); + return SaferegionState(oldState); + } + + private: + // global instance of saferegion state. + MRT_EXPORT static SaferegionState instance; + + __attribute__((always_inline)) + inline void CheckMagic() { + if (UNLIKELY(magic1 != kSafeRegionStateMagic1 || magic2 != kSafeRegionStateMagic2)) { + CheckMagicFailed(); + } + } + + MRT_EXPORT void CheckMagicFailed(); +}; + +// slow path of enter saferegion. +MRT_EXPORT void EnterSaferegionSlowPath(); + +// slow path of leave saferegion. +MRT_EXPORT void LeaveSaferegionSlowPath(); + +MRT_EXPORT void StackScanBarrierSlowPath(); + +// Force this mutator enter saferegion, internal use only. +inline void Mutator::DoEnterSaferegion() { +#if __MRT_DEBUG + if (UNLIKELY(!IsActive())) { + LOG(FATAL) << "DoEnterSaferegion on inactive mutator" << maple::endl; + } +#endif + + // increase saferegionCount and load pendingCount. + SaferegionState state = SaferegionState::IncSaferegionCount(); + + // let inc saferegionCount visible when we see saferegion state set to true. + std::atomic_thread_fence(std::memory_order_release); + + // set current mutator in saferegion. + SetInSaferegion(true); + + // compare old saferegionCount and pendingCount. + if (UNLIKELY(state.asStruct.saferegionCount + 1 == state.asStruct.pendingCount)) { + // slow path: + // if this is the last mutator entering saferegion, wakeup StopTheWorld(). + EnterSaferegionSlowPath(); + } +} + +// Force this mutator leave saferegion, internal use only. +inline void Mutator::DoLeaveSaferegion() { +#if __MRT_DEBUG + if (UNLIKELY(!IsActive())) { + LOG(FATAL) << "DoLeaveSaferegion on inactive mutator" << maple::endl; + } +#endif + + // decrease saferegion count and load pendingCount. + SaferegionState state = SaferegionState::DecSaferegionCount(); + // go slow path if pendingCount is set. + if (UNLIKELY(state.asStruct.pendingCount != 0)) { + // slow path: + // pendingCount is set, this means the world is stopping + // or stoppped, we should block until the world start. + LeaveSaferegionSlowPath(); + } + + // set in_saferegion flag to false. + SetInSaferegion(false); + + // check if need to help gc thread to do stack scan + StackScanBarrier(); +} + +// Let this mutator enter saferegion. +inline bool Mutator::EnterSaferegion(bool rememberLastJavaFrame) { + // if mutator not in saferegion. + if (LIKELY(!InSaferegion())) { + // save current stack pointer as the stack scan end pointer. + void *stackPointer = nullptr; + __asm__ volatile ("mov %0, sp" : "=r" (stackPointer)); + SaveStackEnd(stackPointer); + + if (rememberLastJavaFrame) { + const uint32_t *ip = reinterpret_cast(__builtin_return_address(0)); // caller pc + CallChain *fa = reinterpret_cast(__builtin_frame_address(1)); // caller frame + MRT_UpdateLastUnwindContextIfReliable(ip, fa); + } + + // let mutator enter saferegion. + DoEnterSaferegion(); + // mutator in_saferegion state changed. + return true; + } + // mutator in_saferegion state not changed. + return false; +} + +// Let this mutator leave saferegion. +inline bool Mutator::LeaveSaferegion() { + // if mutator in saferegion. + if (LIKELY(InSaferegion())) { + // let mutator leave saferegion. + DoLeaveSaferegion(); + // mutator in_saferegion state changed. + return true; + } + // mutator in_saferegion state not changed. + return false; +} + +inline void Mutator::StackScanBarrier() { + if (scanState.load() != kFinishScan) { + StackScanBarrierSlowPath(); + } +} +} // namespace maplert + +#endif diff --git a/src/mrt/compiler-rt/include/sizes.h b/src/mrt/compiler-rt/include/sizes.h new file mode 100644 index 0000000000..5cc6b14ff9 --- /dev/null +++ b/src/mrt/compiler-rt/include/sizes.h @@ -0,0 +1,738 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_SIZES_H +#define MAPLE_RUNTIME_SIZES_H + +#include +#include +#include +#include + +#ifdef OBJSCAN_DEBUG +#include +#include +#endif + +#include "thread_offsets.h" +#include "mm_utils.h" +#include "address.h" +#include "chelper.h" +#include "exception/mrt_exception.h" + +namespace maplert { +// Paranoid assertions. Please find clues in language specifications or ABIs +// that these predicates must hold. +#if defined(__aarch64__) +static_assert(sizeof(uint64_t) == sizeof(size_t), + "size_t does not have the same size as uint64_t. We are probably not " + "working on a 64-bit platform."); + +static_assert(sizeof(int64_t) == sizeof(ssize_t), + "ssize_t does not have the same size as int64_t. We are probably not " + "working on a 64-bit platform."); +#elif defined(__arm__) +static_assert(sizeof(uint32_t) == sizeof(size_t), + "size_t does not have the same size as uint32_t. We are probably not " + "working on a 32-bit platform."); + +static_assert(sizeof(int32_t) == sizeof(ssize_t), + "ssize_t does not have the same size as int32_t. We are probably not " + "working on a 64-bit platform."); +#endif + +static_assert(sizeof(uintptr_t) == sizeof(size_t), + "size_t does not have the same size as uintptr_t."); + +static_assert(sizeof(intptr_t) == sizeof(ssize_t), + "ssize_t does not have the same size as intptr_t."); + +// AArch64-specific sizes. +const size_t kHWordBytes = 2; +const size_t kWordBytes = 4; +const size_t kDWordBytes = 8; +const size_t kQWordBytes = 16; + +#if CONFIG_JSAN +const int kJsanHeaderSize = kDWordBytes; + +const offset_t kOffsetJsanHeader = -(kDWordBytes + kDWordBytes); +const offset_t kOffsetObjStatus = -(kDWordBytes + kWordBytes); + +static constexpr uint32_t kObjMagic = 0xabcabcab; +static constexpr uint32_t kObjStatusUnknown = 0x0; +static constexpr uint32_t kObjStatusAllocated = 0x1; +static constexpr uint32_t kObjStatusQuarantined = 0x2; + +static inline void JSANSetAllocated(address_t objAddr) { + *reinterpret_cast(objAddr + kOffsetJsanHeader) = kObjMagic; + *reinterpret_cast(objAddr + kOffsetObjStatus) = kObjStatusAllocated; +} + +static inline void JSANSetQuarantined(address_t objAddr) { + *reinterpret_cast(objAddr + kOffsetObjStatus) = kObjStatusQuarantined; +} + +static inline void JSANSetFreed(address_t objAddr) { + *reinterpret_cast(objAddr + kOffsetJsanHeader) = 0; + *reinterpret_cast(objAddr + kOffsetObjStatus) = kObjStatusUnknown; +} + +static inline bool JSANIsValidObjAddress(address_t objAddr) { + return *reinterpret_cast(objAddr + kOffsetJsanHeader) == kObjMagic; +} + +static inline uint32_t JSANGetObjStatus(address_t objAddr) { + if (!JSANIsValidObjAddress(objAddr)) { + return kObjStatusUnknown; + } + return *reinterpret_cast(objAddr + kOffsetObjStatus); +} +#else +const int kJsanHeaderSize = 0; +#endif + +#if RC_HOT_OBJECT_DATA_COLLECT +const size_t kTrackFrameNum = 4; +const size_t kRcHotHeaderSize = kDWordBytes + kDWordBytes * kTrackFrameNum; + +// The offset of the RC Operation count field. +const offset_t kOffsetRcOperationCount = -(kWordBytes + kDWordBytes); + +// The offset of the GC count field. +const offset_t kOffsetGcCountCount = -(kDWordBytes + kDWordBytes); + +const offset_t kOffsetTrackPC = -(kDWordBytes + kDWordBytes + kDWordBytes * kTrackFrameNum); + +static inline uint32_t RCOperationCount(address_t objAddr) { + return (*reinterpret_cast(objAddr + kOffsetRcOperationCount)); +} + +static inline uint32_t GCCount(address_t objAddr) { + return (*reinterpret_cast(objAddr + kOffsetGcCountCount)); +} + +static inline std::atomic &RCOperationCountAtomicLVal(address_t objAddr) { + return AddrToLValAtomic(objAddr + kOffsetRcOperationCount); +} + +static inline std::atomic &GCCountAtomicLVal(address_t objAddr) { + return AddrToLValAtomic(objAddr + kOffsetGcCountCount); +} + +extern std::atomic totalSkipedRCOpCount; +extern std::atomic totalRCOperationCount; + +static inline bool IsRCSkiped(address_t obj); + +static inline void StatsRCOperationCount(address_t obj) { + totalRCOperationCount.fetch_add(1, memory_order_relaxed); + if (IsRCSkiped(obj)) { + totalSkipedRCOpCount.fetch_add(1, memory_order_relaxed); + } else { + if (RCOperationCount(obj) < std::numeric_limits::max()) { + RCOperationCountAtomicLVal(obj).fetch_add(1, memory_order_relaxed); + } + } +} + +inline void StatsGCCount(address_t obj) { + if (GCCount(obj) < std::numeric_limits::max()) { + GCCountAtomicLVal(obj).fetch_add(1, memory_order_relaxed); + } +} +#else +const int kRcHotHeaderSize = 0; +#define StatsRCOperationCount(obj) +#endif + +// Heap object header size. +const size_t kHeaderSize = kDWordBytes + kJsanHeaderSize + kRcHotHeaderSize; + +// The offset of the reference count field. +const offset_t kOffsetRefCount = -kWordBytes; +// The offset of the GC header. +const offset_t kOffsetGCHeader = -kDWordBytes; + +// Object Header bits +// Total 64 bit, split int two 32 bits word (RC Header/GC Header) +// RC Header: record reference counting +// GC Header: record misc informations: attributes, GCITB prototypes +// +// RC Header +// [15 - 0] Strong RC +// [21 - 16] Resurrectable Weak RC: soft/weak/global weak reference +// [28 - 22] Weak RC: phantom reference/weak annotation +// [29 - 29] Weak Collected +// [31 - 30] RC Cycle Collect Color +// +// Strong RC: normal references like: stack reference\normal field +// reference\runtime reference(string table, classloader table, ..). +// In normals case object can be release when its Strong RC reach zero. +// If Weak RC is not zero, object release processing is differently +// according to Weak RC's type. +// +// Weak RC: If Object has Weak RC, it comes from phantom reference or +// @weak annotation. Object can be relased immedidately when Strong RC is zero +// while Weak RC not zero. +// +// Resurrectable Weak RC: special weak reference from soft/weak/weak global +// reference. Object can not be relased immedidately when Strong RC is zero +// while Resurrectable Weak RC not zero. Because these reference has cache +// semantic and need be resurrect with Reference.get(). Object with Resurrectable +// Weak RC is collected periodically in reference processor thread. +// +// Weak Collected: If set, weak reference to this object in invalidated. +// Reference.get returns null. +// +// RC Cycle Collect Color: Color used in Cycle pattern. Color is atmoiclly set +// and checked to avoiding racing condition in cycle pattern match and mutator +// operations. +// +static constexpr uint32_t kRCBits = 0x0000ffff; +static constexpr uint32_t kRCBitsMsb = 0x00008000; + +static constexpr uint32_t kResurrectWeakRcBits = 0x003f0000; +static constexpr uint32_t kResurrectWeakOneBit = 0x00010000; +static constexpr uint32_t KResurrectWeakRcBitsShift = 16; +static constexpr uint32_t kMaxResurrectWeakRC = kResurrectWeakRcBits >> KResurrectWeakRcBitsShift; + + +static constexpr uint32_t kWeakRcBits = 0x1fc00000; +static constexpr uint32_t kWeakRCOneBit = 0x00400000; +static constexpr uint32_t kWeakRcBitsShift = 22; +static constexpr uint32_t kMaxWeakRC = kWeakRcBits >> kWeakRcBitsShift; + +static constexpr uint32_t kWeakCollectedBit = 0x20000000; + +static constexpr uint32_t kRCCycleColorMask = 0xc0000000; +static constexpr uint32_t kRCCycleColorBlack = 0x00000000; +static constexpr uint32_t kRCCycleColorGray = 0x40000000; +static constexpr uint32_t kRCCycleColorWhite = 0x80000000; +static constexpr uint32_t kRCCycleColorBrown = 0xc0000000; // color for decref, just for debug + +// GC Header +// [0] Allocated: Flag to tell if the object is allocated *by the allocator* +// [1] Dirty: Tracing queued bit used in cocurrent marking +// [2] Release: RC released object during backup tracing concurrent marking +// [3] RCTracing: Object with this bit, all inc/dec will be traced, debug only +// [4] WrapWeakRef: Object need be put into a weak reference for periodlly dead check. +// [5] HasChildRef: Object has reference to other object. +// ... +// [20] ReferenceActive: Reference is recently get, used in reference processor +// [21] EnqueuedFinalizable: debug only, finalizalbe object is eqneued +// [22] Array: Object is array +// [23] Reference: Object is reference type +// [24] Finalizable: Object is finalizable +// [27-25] Cycle pattern RC min value +// [30-28] Cycle pattern RC max value +// [31] Cycle pattern: Object with this bit is candiate for cycle collection +// +// The bottom three bits of GCHeader(obj) are used for color. +// NOTE: Only used by cycle detection, not mark-sweep. +// +// Bit for flags word (-8 offset) +static constexpr uint32_t kAllocatedBit = 0x1; +static constexpr uint32_t kDirtyBit = 0x2; // set if child refs modified during concurrent marking. +static constexpr uint32_t kReleasedBit = 0x4; // set if freed during concurrent marking. +static constexpr uint32_t kRCTracingBit = 0x8; +static constexpr uint32_t kHasChildRef = 0x40; // object has no child reference, need update compiler +static constexpr uint32_t kMygoteObjBit = 0x80; // object is mygote object, no free, likely immutable + +static constexpr uint32_t kReferenceActiveBit = 0x00100000; // mark if reference is got recently and clear in rp +static constexpr uint32_t kEnqueuedFinalizableBit = 0x00200000; +static constexpr uint32_t kArrayBit = 0x00400000; +static constexpr uint32_t kReferenceBit = 0x00800000; +static constexpr uint32_t kFinalizableBit = 0x01000000; +static constexpr uint32_t kCycleRCMinMask = 0x0e000000; +static constexpr uint32_t kCycleRCMaxMask = 0x70000000; +static constexpr uint32_t kCyclePatternBit = 0x80000000; + +static constexpr uint32_t kCycleRCMinShift = 25; +static constexpr uint32_t kCycleRCMaxShift = 28; +#define CYCLE_MIN_RC(header) (((static_cast(header)) & kCycleRCMinMask) >> kCycleRCMinShift) +#define CYCLE_MAX_RC(header) (((static_cast(header)) & kCycleRCMaxMask) >> kCycleRCMaxShift) + +// ------------------------------------------------------------------ +// About kHasChildRef and kArrayBit: +// kHasChildRef kArrayBit +// object without child ref 0 0 +// object with child ref 1 0 +// primitive array 0 1 +// object array 1 1 +// ------------------------------------------------------------------ +const size_t kJavaObjAlignment = kDWordBytes; + +// Array content is 8 bytes align for long/double array type +const offset_t kJavaArrayLengthOffset = 12; // shadow + monitor + [padding], fixed. +const offset_t kJavaArrayContentOffset = 16; // fixed for 8B alignment + +struct DecoupleAllocHeader { + using FieldType = uint32_t; + FieldType size; + FieldType tag; + // Object header align to 8 , header align to 8 too + static constexpr size_t kHeaderSize = 8; + static constexpr size_t kSizeOffset = kHeaderSize; + static constexpr size_t kTagOffset = kHeaderSize - sizeof(FieldType); + +#if defined(__aarch64__) + static constexpr size_t kMaxSize = (1UL << 32); // 4GB +#elif defined(__arm__) + static constexpr size_t kMaxSize = (1UL << 31); // lower 2GB address space +#endif + + static inline size_t GetSize(address_t objAddr) { + return static_cast(*reinterpret_cast(objAddr - kSizeOffset)); + } + + static inline void SetSize(address_t objAddr, size_t size) { + *reinterpret_cast(objAddr - kSizeOffset) = static_cast(size); + } + + static inline int GetTag(address_t objAddr) { + return static_cast(*reinterpret_cast(objAddr - kTagOffset)); + } + + static inline void SetTag(address_t objAddr, int tag) { + *reinterpret_cast(objAddr - kTagOffset) = static_cast(tag); + } + + static inline void SetHeader(address_t objAddr, int tag, size_t size) { + SetTag(objAddr, tag); + SetSize(objAddr, size); + } +}; + +#define HEADER_CHECK_VALID_HEAP_OBJECT __MRT_DEBUG_COND_FALSE +#if HEADER_CHECK_VALID_HEAP_OBJECT +static inline void CheckValidHeapObject(address_t obj) { + if (!IS_HEAP_OBJ(obj)) { + LOG(FATAL) << "invalid heap obj addr: " << std::hex << obj << maple::endl; + } +} +#define CHECK_HEADER_VALID_OBJ(obj) CheckValidHeapObject(obj) +#else +#define CHECK_HEADER_VALID_OBJ(obj) +#endif + +// Accessors to Java array-specific fields +static inline uint32_t &ArrayLength(address_t objAddr) { + return AddrToLVal(objAddr + kJavaArrayLengthOffset); +} + +// Accessors for gctib pointer from class metadata +static inline address_t GCTibPtr(address_t objAddr) { + CHECK_HEADER_VALID_OBJ(objAddr); + return reinterpret_cast(MObject::Cast(objAddr)->GetClass()->GetGctib()); +} + +static inline GCTibGCInfo &GCInfo(address_t objAddr) { + CHECK_HEADER_VALID_OBJ(objAddr); + return AddrToLVal(GCTibPtr(objAddr)); +} + +static inline uint32_t GCHeader(address_t objAddr) { + CHECK_HEADER_VALID_OBJ(objAddr); + return *reinterpret_cast(objAddr + kOffsetGCHeader); +} + +static inline uint32_t &GCHeaderLVal(address_t objAddr) { + CHECK_HEADER_VALID_OBJ(objAddr); + return AddrToLVal(objAddr + kOffsetGCHeader); +} + +static inline std::atomic &GCHeaderAtomic(address_t objAddr) { + CHECK_HEADER_VALID_OBJ(objAddr); + return AddrToLValAtomic(objAddr + kOffsetGCHeader); +} + +static inline void SetGCHeader(address_t objAddr, uint32_t val) { + CHECK_HEADER_VALID_OBJ(objAddr); + uint32_t *gcHeaderAddr = reinterpret_cast(objAddr + kOffsetGCHeader); + *gcHeaderAddr = val; +} + +static inline uint32_t RCHeader(address_t objAddr) { + CHECK_HEADER_VALID_OBJ(objAddr); + return *reinterpret_cast(objAddr + kOffsetRefCount); +} + +static inline uint32_t GetRCFromRCHeader(uint32_t rcHeader) { + return rcHeader & kRCBits; +} + +static inline uint32_t GetResurrectWeakRCFromRCHeader(uint32_t rcHeader) { + return (rcHeader & kResurrectWeakRcBits) >> KResurrectWeakRcBitsShift; +} + +static inline uint32_t GetWeakRCFromRCHeader(uint32_t rcHeader) { + return (rcHeader & kWeakRcBits) >> kWeakRcBitsShift; +} + +static inline uint32_t GetTotalWeakRCFromRCHeader(uint32_t rcHeader) { + return (rcHeader & (kWeakRcBits | kResurrectWeakRcBits)); +} + +static inline uint32_t GetTotalRCFromRCHeader(uint32_t rcHeader) { + return (rcHeader & (kWeakRcBits | kResurrectWeakRcBits | kRCBits)); +} + +static inline bool IsWeakCollectedFromRCHeader(uint32_t rcHeader) { + return (rcHeader & kWeakCollectedBit) != 0; +} + +static inline bool IsRCCollectableFromRCHeader(uint32_t rcHeader) { + uint32_t totalRC = GetTotalRCFromRCHeader(rcHeader); + return (totalRC == 0) || ((totalRC == kWeakRCOneBit) && !IsWeakCollectedFromRCHeader(rcHeader)); +} + +static inline uint32_t RefCount(address_t obj) { + CHECK_HEADER_VALID_OBJ(obj); + return GetRCFromRCHeader(RCHeader(obj)); +} + +static inline uint32_t ResurrectWeakRefCount(address_t obj) { + CHECK_HEADER_VALID_OBJ(obj); + return GetResurrectWeakRCFromRCHeader(RCHeader(obj)); +} + +static inline uint32_t WeakRefCount(address_t obj) { + CHECK_HEADER_VALID_OBJ(obj); + return GetWeakRCFromRCHeader(RCHeader(obj)); +} + +static inline uint32_t TotalRefCount(address_t obj) { + CHECK_HEADER_VALID_OBJ(obj); + return GetTotalRCFromRCHeader(RCHeader(obj)); +} + +static inline uint32_t TotalWeakRefCount(address_t obj) { + CHECK_HEADER_VALID_OBJ(obj); + return GetTotalWeakRCFromRCHeader(RCHeader(obj)); +} + +static inline bool IsRCCollectable(address_t obj) { + CHECK_HEADER_VALID_OBJ(obj); + return IsRCCollectableFromRCHeader(RCHeader(obj)); +} + +static inline bool IsWeakCollected(address_t obj) { + CHECK_HEADER_VALID_OBJ(obj); + return IsWeakCollectedFromRCHeader(RCHeader(obj)); +} + +static inline uint32_t& RefCountLVal(address_t objAddr) { + CHECK_HEADER_VALID_OBJ(objAddr); + return AddrToLVal(objAddr + kOffsetRefCount); +} + +static inline std::atomic& RefCountAtomicLVal(address_t objAddr) { + CHECK_HEADER_VALID_OBJ(objAddr); + return AddrToLValAtomic(objAddr + kOffsetRefCount); +} + +// quickly check if need check cycle pattern for this object +// 1. has cycle pattern +// 2. rc in range +static inline bool IsValidForCyclePatterMatch(uint32_t rcFlags, uint32_t curRC) { + return (rcFlags & kCyclePatternBit) && (curRC <= CYCLE_MAX_RC(rcFlags)) && (curRC >= CYCLE_MIN_RC(rcFlags)); +} + +static inline void ClearCyclePatternBit(address_t objAddr) { + (void)GCHeaderAtomic(objAddr).fetch_and((~kCyclePatternBit), std::memory_order_release); +} + +static inline uint32_t SetCycleMaxRC(uint32_t oldRcFlags, uint32_t max) { + return (oldRcFlags & ~kCycleRCMaxMask) | (max << kCycleRCMaxShift); +} + +static inline uint32_t SetCycleMinRC(uint32_t oldRcFlags, uint32_t min) { + return (oldRcFlags & ~kCycleRCMinMask) | (min << kCycleRCMinShift); +} + +static inline void InitWithAllocatedBit(address_t objAddr) { + GCHeaderLVal(objAddr) = kAllocatedBit; +} + +static inline void ClearAllocatedBit(address_t objAddr) { + GCHeaderLVal(objAddr) &= ~kAllocatedBit; +} + +static inline bool IsAllocatedByAllocator(address_t objAddr) { + return (GCHeader(objAddr) & kAllocatedBit) != 0; +} + +static inline bool IsDirty(address_t objAddr) { + uint32_t header = GCHeaderAtomic(objAddr).load(std::memory_order_acquire); + return (header & kDirtyBit) != 0; +} + +static inline void SetDirty(address_t objAddr) { + (void)GCHeaderAtomic(objAddr).fetch_or(kDirtyBit, std::memory_order_release); +} + +static inline void ClearDirtyBit(address_t objAddr) { + // this is called when the world is stopped, so no need atomic. + GCHeaderLVal(objAddr) &= (~kDirtyBit); +} + +static inline void SetReleasedBit(address_t objAddr) { + (void)GCHeaderAtomic(objAddr).fetch_or(kReleasedBit, std::memory_order_release); +} + +static inline bool HasReleasedBit(address_t objAddr) { + return (GCHeader(objAddr) & kReleasedBit) != 0; +} + +static inline void SetMygoteBit(address_t objAddr) { + GCHeaderLVal(objAddr) |= kMygoteObjBit; // not clearable +} + +static inline bool IsMygoteObj(address_t objAddr) { + return (GCHeader(objAddr) & kMygoteObjBit) != 0; +} + +static inline void SetReferenceActive(address_t objAddr) { + if (!IsMygoteObj(objAddr)) { + GCHeaderLVal(objAddr) |= kReferenceActiveBit; + } +} + +static inline void ClearReferenceActive(address_t objAddr) { + if (!IsMygoteObj(objAddr)) { + GCHeaderLVal(objAddr) &= ~(kReferenceActiveBit); + } +} + +static inline bool IsReferenceActive(address_t objAddr) { + return (GCHeader(objAddr) & kReferenceActiveBit) != 0; +} + +static inline void SetObjFinalizable(address_t objAddr) { + // should be set once at creation time. + GCHeaderLVal(objAddr) |= kFinalizableBit; +} + +static inline void SetEnqueuedObjFinalizable(address_t objAddr) { + (void)GCHeaderAtomic(objAddr).fetch_or(kEnqueuedFinalizableBit, std::memory_order_relaxed); +} + +static inline void ClearObjFinalizable(address_t objAddr) { + // should be cleared only by FinalizerThread + (void)GCHeaderAtomic(objAddr).fetch_and(~(kFinalizableBit | kEnqueuedFinalizableBit), std::memory_order_relaxed); +} + +static inline bool IsEnqueuedObjFinalizable(address_t objAddr) { + return (GCHeader(objAddr) & kEnqueuedFinalizableBit) != 0; +} + +static inline bool IsObjFinalizable(address_t objAddr) { + return (GCHeader(objAddr) & kFinalizableBit) != 0; +} + +static inline bool IsObjResurrectable(address_t objAddr) { + return IsObjFinalizable(objAddr) && !IsEnqueuedObjFinalizable(objAddr); +} + +// Trace Bit is settting for debugging and trace object life cycle +static inline void SetTraceBit(address_t objAddr) { + (void)GCHeaderAtomic(objAddr).fetch_or(kRCTracingBit, std::memory_order_relaxed); +} + +static inline void ClearTraceBit(address_t objAddr) { + (void)GCHeaderAtomic(objAddr).fetch_and((~kRCTracingBit), std::memory_order_relaxed); +} + +static inline bool IsTraceObj(address_t objAddr) { + if (!IS_HEAP_OBJ(objAddr)) { + return false; + } + if (TotalRefCount(objAddr) == 0) { + return false; + } + return (GCHeader(objAddr) & kRCTracingBit) != 0; +} + +static inline bool HasChildRef(address_t objAddr) { + return (GCHeader(objAddr) & kHasChildRef) != 0; +} + +static inline void SetObjReference(address_t objAddr) { + GCHeaderLVal(objAddr) |= kReferenceBit;; +} + +static inline bool IsObjReference(address_t objAddr) { + return (GCHeader(objAddr) & kReferenceBit) != 0; +} + +static inline bool IsArray(address_t objAddr) { + return (GCHeader(objAddr) & kArrayBit) != 0; +} + +static inline bool IsObjectArray(address_t objAddr) { + return (GCHeader(objAddr) & (kArrayBit | kHasChildRef)) == (kArrayBit | kHasChildRef); +} + +static inline bool SkipRC(uint32_t rcHeader) { + return (rcHeader & kRCBitsMsb) != 0; +} + +static inline bool IsRCOverflow(uint32_t rc) { + return SkipRC(rc); +} + +static inline bool IsRCSkiped(address_t obj) { + CHECK_HEADER_VALID_OBJ(obj); + return SkipRC(RCHeader(obj)); +} + +static inline void SetRCOverflow(address_t objAddr) { + if (!IsRCSkiped(objAddr)) { + RefCountLVal(objAddr) |= kRCBitsMsb; + } +} + +extern "C" { +extern void MRT_BuiltinAbortSaferegister(maple::address_t addr, const char *clsName); +} + +#ifdef __ANDROID__ +static void inline AbortWithHeader(address_t obj) { + char *clsName = nullptr; + static constexpr address_t lowerMetaBound = 0x80000000; + static constexpr address_t higherMetaBound = 0xdfffffff; + if (IS_HEAP_OBJ(obj)) { + __MRT_ASSERT(MObject::Cast(obj) != nullptr, "AbortWithHeader: obj is a nullptr."); + MClass *cls = MObject::Cast(obj)->GetClass(); + address_t clsAddr = reinterpret_cast(cls); + if ((clsAddr >= lowerMetaBound) && (clsAddr < higherMetaBound) && + ((clsAddr % kJavaObjAlignment) == 0) && MRT_IsValidClass(*cls)) { + clsName = cls->GetName(); + } + } + MRT_BuiltinAbortSaferegister(obj, clsName); +} +#else +static void inline AbortWithHeader(address_t obj) { + __MRT_ASSERT(MObject::Cast(obj) != nullptr, "AbortWithHeader: obj is a nullptr."); + MClass *cls = MObject::Cast(obj)->GetClass(); + std::cout << "Inc/Dec from 0 " << std::hex << obj << " " << GCHeader(obj) << + " " << RCHeader(obj) << " lock " << *(reinterpret_cast(obj + kLockWordOffset)) << + std::dec << cls->GetName() << std::endl; + + MRT_BuiltinAbortSaferegister(obj, nullptr); +} +#endif + +#if MRT_DEBUG_DOUBLE_FREE && !ALLOC_USE_FAST_PATH +static inline void CheckDoubleFree(address_t objAddr) { + if (UNLIKELY(!IsAllocatedByAllocator(objAddr))) { + LOG(ERROR) << "double freeing " << objAddr; + AbortWithHeader(objAddr); + } +} +#else +#define CheckDoubleFree(x) +#endif + +// Object reference field iteration utilities, used in +// Recursive RC update when release +// Backup tracing mark/sweep +static const uint64_t kNotRefBits = 0; +static const uint64_t kNormalRefBits = 1; +static const uint64_t kWeakRefBits = 2; +static const uint64_t kUnownedRefBits = 3; +static const uint64_t kRefBitsMask = 3; +static const uint32_t kBitsPerRefWord = 2; + +constexpr uint32_t kBitsPerByte = 8; +static const uint32_t kRefWordPerMapWord = ((sizeof(uint64_t) * kBitsPerByte) / kBitsPerRefWord); + +template +void ForEachRefFieldNonArrayObj(address_t objAddr, RefFunc &&refFunc) { + struct GCTibGCInfo &gcInf = GCInfo(objAddr); + + // number of bitmap words from GCTIB. + uint32_t bitmapWordsCount = gcInf.nBitmapWords; + uint64_t *bitmapWords = gcInf.bitmapWords; + bool allNormal = Collector::Instance().Type() == kMarkSweep; + + // start address of fields. + address_t baseAddr = objAddr; + // for each bitmap word. + for (size_t i = 0; i < bitmapWordsCount; ++i) { + uint64_t bitmapWord = bitmapWords[i]; + address_t fieldAddr = baseAddr; + + // for each bit in bitmap. + while (LIKELY(bitmapWord != 0)) { + uint64_t wordBits = bitmapWord & kRefBitsMask; + if (wordBits != kNotRefBits) { + if (allNormal) { + wordBits = kNormalRefBits; + } + refFunc(AddrToLVal(fieldAddr), wordBits); + } + // go next ref word. + bitmapWord >>= kBitsPerRefWord; + fieldAddr += sizeof(reffield_t); + } + + // go next bitmap word. + baseAddr += (sizeof(reffield_t) * kRefWordPerMapWord); + } +} + +// Call func on each element in an object array. +template +void ForEachObjectArrayElement(address_t objAddr, UnaryFunction &&func) { + // we assume that both kHasChildRef & kArrayBit are set. + // take array length and content. + uint32_t arrayLengthVal = AddrToLVal(objAddr + kJavaArrayLengthOffset); + reffield_t *arrayContent = reinterpret_cast(objAddr + kJavaArrayContentOffset); + + // for each object in array. + for (uint32_t i = 0; i < arrayLengthVal; ++i) { + func(arrayContent[i], kNormalRefBits); + } +} + +// Use this macro to ensure that kHasChildRef & kArrayBit +// is checked before __ForEachXXXX() get called +// function should accept 'reffield_t&' as argument +// pattern for the call-back function: +// ForEachRefField(objAddr, [](reffield_t &field) { address_t ref = LoadRefField(field); } +// similar constraint applies to the other 2 macros. +#define ForEachRefField(obj, refFunc) do { \ + if (maplert::HasChildRef(obj)) { \ + if (UNLIKELY(IsObjectArray(obj))) { \ + ForEachObjectArrayElement(obj, refFunc); \ + } else { \ + ForEachRefFieldNonArrayObj(obj, refFunc); \ + } \ + } \ +} while (0) + +// Use this macro if kHasChildRef already checked. +#define DoForEachRefField(obj, refFunc) do { \ + if (UNLIKELY(IsObjectArray(obj))) { \ + ForEachObjectArrayElement(obj, refFunc); \ + } else { \ + ForEachRefFieldNonArrayObj(obj, refFunc); \ + } \ +} while (0) +} // namespace maplert + +#endif diff --git a/src/mrt/compiler-rt/include/version.h b/src/mrt/compiler-rt/include/version.h new file mode 100644 index 0000000000..cf59484897 --- /dev/null +++ b/src/mrt/compiler-rt/include/version.h @@ -0,0 +1,24 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef VERSION_H +#define VERSION_H + +namespace Version { +static constexpr const int kMajorMplVersion = 4; +static constexpr const int kMinorCompilerVersion = 0; +static constexpr const int kMinorRuntimeVersion = 0; +} + +#endif diff --git a/src/mrt/compiler-rt/include/yieldpoint.h b/src/mrt/compiler-rt/include/yieldpoint.h new file mode 100644 index 0000000000..0ca73ae29e --- /dev/null +++ b/src/mrt/compiler-rt/include/yieldpoint.h @@ -0,0 +1,94 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_YIELDPOINT_H +#define MAPLE_RUNTIME_YIELDPOINT_H + +#include +#include "mrt_api_common.h" +#include "collector/collector.h" + +// Yieldpoint runtime APIs +namespace maplert { +// Signal handler for yieldpoint. +// return true if signal is triggered by yieldpoint. +bool YieldpointSignalHandler(int sig, siginfo_t *info, ucontext_t *ctx); + +// reset yieldpoint module for forked process. +void YieldpointInitAfterFork(); + +// Initialize yieldpoint for mutator. +void InitYieldpoint(Mutator &mutator); + +// Finalize yieldpoint for mutator. +void FiniYieldpoint(Mutator &mutator); + +// Stop all muators. +MRT_EXPORT void StopTheWorld(); + +// Start all mutators suspended by StopTheWorld(). +MRT_EXPORT void StartTheWorld(); + +bool WorldStopped(); + +void LockStopTheWorld(); + +void UnlockStopTheWorld(); + +void DumpMutatorsListInfo(bool isFatal); +// Scoped stop the world, +class ScopedStopTheWorld { + public: + __attribute__ ((always_inline)) + explicit ScopedStopTheWorld() { + StopTheWorld(); + } + + __attribute__ ((always_inline)) + ~ScopedStopTheWorld() { + StartTheWorld(); + } +}; + +// Scoped start the world. +class ScopedStartTheWorld { + public: + __attribute__ ((always_inline)) + explicit ScopedStartTheWorld() { + StartTheWorld(); + } + + __attribute__ ((always_inline)) + ~ScopedStartTheWorld() { + StopTheWorld(); + } +}; + +// Scoped lock stop-the-world, this prevent other +// thread stop-the-world during the current scope. +class ScopedLockStopTheWorld { + public: + __attribute__ ((always_inline)) + explicit ScopedLockStopTheWorld() { + LockStopTheWorld(); + } + + __attribute__ ((always_inline)) + ~ScopedLockStopTheWorld() { + UnlockStopTheWorld(); + } +}; +} // namespace maplert + +#endif diff --git a/src/mrt/compiler-rt/public-headers/file_adapter.h b/src/mrt/compiler-rt/public-headers/file_adapter.h new file mode 100644 index 0000000000..ed5294957b --- /dev/null +++ b/src/mrt/compiler-rt/public-headers/file_adapter.h @@ -0,0 +1,210 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef __MAPLE_LOADER_FILE_ADAPTER__ +#define __MAPLE_LOADER_FILE_ADAPTER__ + +#include + +#include "linker/linker_common.h" +#include "utils/string_utils.h" +#include "allocator/page_allocator.h" +#include "utils/time_utils.h" +namespace maplert { +enum FileType { + kMFile, + kDFile, + kUnknown +}; +// file manager for (all) maple supported class-file types +// one instance per class-file +class ObjFile { + public: + ObjFile(jobject classLoader, const std::string &path, FileType fileType) + : fileAddr(nullptr), + classLoader(classLoader), + name(path), + fileType(fileType), + fileSize(0), + lazyBinding(false), + classTableLoaded(false), + mplInfo(nullptr) { + static int32_t globalUniqueKey = 0x100000; + uniqueID = __atomic_add_fetch(&globalUniqueKey, 1, __ATOMIC_ACQ_REL); + } + + virtual bool Open() = 0; + virtual bool Close() = 0; + virtual void Load() = 0; + + // check if the jni class in the maple file by jni class name + virtual bool CanRegisterNativeMethods(const std::string &jniClassName) = 0; + // if the register success, it will return true, othewise false; + virtual bool RegisterNativeMethods(IEnv env, jclass javaClass, const std::string &jniClassName, + INativeMethod methods, int32_t methodCount, bool fake) = 0; + virtual void DumpUnregisterNativeFunc(std::ostream &os) = 0; + + virtual void *GetHandle() const { // different File manager might use different handle + return fileAddr; + } + + virtual ~ObjFile() { + fileAddr = nullptr; + classLoader = nullptr; + mplInfo = nullptr; + }; + + inline int32_t GetUniqueID() const { + return uniqueID; + } + + inline LinkerMFileInfo *GetMplInfo() const { + return mplInfo; + } + + inline void SetMplInfo(LinkerMFileInfo &outMplInfo) { + mplInfo = &outMplInfo; + } + inline const std::string &GetName() const { + return name; + } + + inline const jobject &GetClassLoader() const { + return classLoader; + } + + inline void SetClassLoader(jobject loader) { + classLoader = loader; + } + + inline void SetUniqueID(int32_t id) { + this->uniqueID = id; + } + + inline FileType GetFileType() const { + return fileType; + } + + inline size_t GetFileSize() const { + return fileSize; + }; + + inline bool IsLazyBinding() const { + return lazyBinding; + } + + inline void SetLazyBinding() { + lazyBinding = true; + } + + inline bool IsClassTableLoaded() const { + return classTableLoaded; + } + + inline void SetClassTableLoaded() { + classTableLoaded = true; + } + + void *fileAddr; // dlopen/mmaped address + + protected: // need to be accessed by derived class + jobject classLoader; // class loader to load classes in the file + int32_t uniqueID; // use createTime to sort + std::string name; // name of class file to open + FileType fileType; // type of file + size_t fileSize; // file size + bool lazyBinding; // it's lazy binding file. + bool classTableLoaded; // whether the class table has been loaded + LinkerMFileInfo *mplInfo; // memory map for objfile +}; +class FileAdapter { + public: + template + using ObjFileMap = std::unordered_map, std::equal_to, + StdContainerAllocator, kClassLoaderAllocator>>; + FileAdapter() {}; + MRT_EXPORT FileAdapter(std::string srcPath); + MRT_EXPORT FileAdapter(std::string srcPath, IAdapterEx adapter); + + ~FileAdapter() { + mMplLibs.clear(); + mMplSeqList.clear(); + } + size_t GetRegisterSize() const { + std::lock_guard lock(mMplLibLock); + return mMplLibs.size(); + } + size_t GetSize() const { + return mMplSeqList.size(); + } + const std::vector &GetMplFiles() const { + return mMplSeqList; + } + const std::string &GetOriginPath() const { + return originalPath; + } + const std::string &GetConvertPath() const { + return convertPath; + } + bool IsThirdApp() const { + return isThirdApp; + } + bool IsPartialAot() const { + return isPartialAot; + } + bool HasStartUp() const { + return hasStartUp; + } + bool HasSiblings() const { + return hasSiblings; + } + void SetInterpExAPI(const IAdapterEx api) { + pAdapterEx = api; + } + ObjFile *OpenObjectFile(jobject classLoader, bool isFallBack = false, const std::string specialPath = ""); + bool CloseObjectFile(ObjFile &objFile); + void GetObjFiles(std::vector &bootClassPath); + void GetObjLoaders(std::set &classLoaders); + const ObjFile *Get(const std::string &path); + void Put(const std::string &path, const ObjFile &objFile); + bool Register(IEnv env, jclass javaClass, const std::string &jniClassName, + INativeMethod methods, int32_t methodCount, bool fake); + void DumpUnregisterNativeFunc(std::ostream &os); + void DumpMethodName(); + // Get MFile list from filePath, and store split file path into pathList + // returns: true if "startup MFile" found in the filePath false if not found in the filePath + void GetObjFileList(std::vector &pathList, bool isFallBack); + protected: + const ObjFile *GetLocked(const std::string &path); + bool GetObjFileListInternal(std::vector &pathList); + + std::string originalPath; + std::string convertPath; + // If it is third App + bool isThirdApp = false; + // If it is partial AOT + bool isPartialAot = false; + // If it is has startup so + bool hasStartUp = false; + // If it is has mulito so + bool hasSiblings = false; + // Input dex path + IAdapterEx pAdapterEx; + mutable std::mutex mMplLibLock; + // the string represents mplefile libpath + ObjFileMap mMplLibs; + std::vector mMplSeqList; +}; +} // namespace maplert +#endif // __MAPLE_LOADER_FILE_ADAPTER__ diff --git a/src/mrt/compiler-rt/public-headers/file_loader.h b/src/mrt/compiler-rt/public-headers/file_loader.h new file mode 100644 index 0000000000..b2637e626d --- /dev/null +++ b/src/mrt/compiler-rt/public-headers/file_loader.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef __MAPLE_LOADER_FILE_LOADER__ +#define __MAPLE_LOADER_FILE_LOADER__ + +#include + +#include "base/macros.h" +namespace maple { +class IMplLoader { + public: + virtual void *Open(const std::string &name) = 0; + virtual void Close(void *handle) = 0; + virtual ~IMplLoader() = default; +}; +} // end namespace maple +#endif // endif __MAPLE_LOADER_FILE_LOADER__ diff --git a/src/mrt/compiler-rt/public-headers/gc_callback.h b/src/mrt/compiler-rt/public-headers/gc_callback.h new file mode 100644 index 0000000000..adcc95f34f --- /dev/null +++ b/src/mrt/compiler-rt/public-headers/gc_callback.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_GCCALLBACK_H +#define MAPLE_RUNTIME_GCCALLBACK_H + +#include + +namespace maplert { +// This call back function type is called when GC finished. +using GCFinishCallbackFunc = std::function; + +using address_t = uintptr_t; +using offset_t = intptr_t; + +#ifdef USE_32BIT_REF +using reffield_t = uint32_t; +#else +using reffield_t = address_t ; +#endif // USE_32BIT_REF + +#ifdef USE_32BIT_REF +#define DEADVALUE (0xdeaddeadul) // only stores 32bit value +#else // !USE_32BIT_REF +#define DEADVALUE (0xdeaddeaddeaddeadul) +#endif // USE_32BIT_REF + +using AddressVisitor = std::function; +using RefVisitor = std::function; +using HeapRefVisitor = std::function; +} + +#endif // MAPLE_RUNTIME_GCCALLBACK_H diff --git a/src/mrt/compiler-rt/public-headers/gc_reason.h b/src/mrt/compiler-rt/public-headers/gc_reason.h new file mode 100644 index 0000000000..52556c5782 --- /dev/null +++ b/src/mrt/compiler-rt/public-headers/gc_reason.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_GCREASON_H +#define MAPLE_RUNTIME_GCREASON_H + +#include +#include "globals.h" + +namespace maplert { +constexpr float kGCWaterLevelLow = 1.1; // aggressive gc water level for sys app +constexpr float kGCWaterLevelGCOnly = 1.15; +constexpr float kGCWaterLevel = 1.2; // normal gc water level for sys app +constexpr float kHeapWaterLevel = 0.5; // more aggressive when space capacity exceeds 50% of max +constexpr float kAppGCWaterLevelLow = 1.4; // aggressive gc water level for normal app +constexpr float kAppGCWaterLevel = 1.6; // normal gc water level for normal app +constexpr float kAppStartupHeapHeurstic = 1.5; // startup require's less gc +constexpr uint64_t kMaxGCThresholdDelta = 15 * maple::MB; +constexpr uint64_t kInitSystemGCThreshold = 80 * maple::MB; +constexpr uint64_t kInitGCThreshold = 20 * maple::MB; + +// Used by Collector::TriggerGC and its wrapper MRT_TriggerGC. +// It tells the backup tracer why GC is triggered. +// +// [B] blocking: Calling Collector::TriggerGC or MRT_TriggerGC will block until +// the current GC finishes. +// +// [LB] lite-blocking: just wait one gc request. Mainly for heuristic & native gc. +// It's helpfull to reduce fragmentation both in java heap and native heap. +// +// [NB] non-blocking: Caller of Collector::TriggerGC or MRT_TriggerGC will +// continue, and the GC will try to stop the world using yieldpoints. +// Non-blocking GC will be triggered asynchronously, IsBlockingGCReason() deprecated. +enum GCReason : int { + kInvalidGCReason = -1, + kGCReasonUser = 0, // [B] Triggered by user (System.gc(), etc.) + kGCReasonUserNi = 1, // [B] Triggered by user and not interactive (System.gc(), etc.) + kGCReasonOOM = 2, // [B] Out of memory. Failed to allocate object. + kGCReasonForceGC = 3, // [B] A special reason that forces GC. + kGCReasonTransistBG = 4, // [B] App's processState changed to JankImperceptible(background). + kGCReasonHeu = 5, // [NB] Statistics show it is worth doing GC. Does not have to be immediate. + kGCReasonNative = 6, // [NB] Native-Allocation-Registry shows it's worth doing GC. + kGCReasonHeuBlocking = 7, // [LB] Just wait one gc request to reduce java heap fragmentation. + kGCReasonNativeBlocking = 8, // [LB] Just wait one gc request to reduce native heap consumption. + kGCReasonMax = 9, +}; + +enum GCReleaseSoType : unsigned int { + kReleaseNone = 0, // do not release any maple so's read only section + kReleaseAppSo, // do not release sys maple so's read only section + kReleaseAll, // release all maple so's read only section +}; + +struct GCReasonConfig { + static int64_t lastGCTimestamp; // last timestamp of all gc + const GCReason reason; + const char *name; // Human-readable names of GC reasons. + const bool isBlocking; + const bool isLiteBlocking; + const bool isNonBlocking; + const bool isConcurrent; + const int64_t minIntervelNs; + int64_t lastTriggerTimestamp; + + inline bool IsFrequentGC() const; + inline bool IsFrequentAsyncGC() const; + inline bool IsFrequentHeuristicGC() const; + bool ShouldIgnore() const; + void IgnoreCallback() const; + GCReleaseSoType ShouldReleaseSo() const; + bool ShouldTrimHeap() const; + bool ShouldCollectCycle() const; + void SetLastTriggerTime(int64_t timestamp); + bool IsLiteBlockingGC() const { + return isLiteBlocking; + } + bool IsBlockingGC() const { + return isBlocking; + } + bool IsNonBlockingGC() const { + return isNonBlocking; + } +}; + +// Defined in compiler-rt/src/gcreason.cpp +extern GCReasonConfig reasonCfgs[]; + +// The process state passed in from the activity manager, used to determine +// when to do cycle.pattern saving or other cases. +enum ProcessState { + kProcessStateJankPerceptible = 0, + kProcessStateJankImperceptible = 1, +}; +} // namespace maplert + +#endif diff --git a/src/mrt/compiler-rt/public-headers/linker_api.h b/src/mrt/compiler-rt/public-headers/linker_api.h new file mode 100644 index 0000000000..427d5fcbcd --- /dev/null +++ b/src/mrt/compiler-rt/public-headers/linker_api.h @@ -0,0 +1,159 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef __MAPLE_LINKER_API__ +#define __MAPLE_LINKER_API__ + +#include "linker/linker_utils.h" +#include "file_adapter.h" + +#ifdef __cplusplus +namespace maplert { +#define LINKER_LOG(level) LOG(level) << __FUNCTION__ << "(MplLinker), " +#define LINKER_VLOG(module) VLOG(module) << __FUNCTION__ << "(MplLinker), " +#define LINKER_DLOG(module) DLOG(module) << __FUNCTION__ << "(MplLinker), " +struct SignalInfo { + SignalInfo(void *pc, void *offset) : pc(pc), offset(offset) {}; + void *pc; + void *offset; +}; +struct StrTab { + char *startHotStrTab = nullptr; + char *bothHotStrTab = nullptr; + char *runHotStrTab = nullptr; + char *coldStrTab = nullptr; +}; +class LinkerAPI { + public: + MRT_EXPORT static LinkerAPI &Instance(); + template + static T As() { + return reinterpret_cast(Instance()); + } + LinkerAPI() {}; + virtual ~LinkerAPI() = default; + virtual void PreInit() = 0; + virtual void PostInit() = 0; + virtual void UnInit() = 0; + // reference by stack_unwinder.h | jsan.cpp | jsan_lite.cpp | mm_utils.cpp | fault_handler_arm64.cc | + // fault_handler_linux.cc + virtual bool IsJavaText(const void *addr) = 0; + // reference by stack_unwinder.h | eh_personality.cpp | jsan.cpp | jsan_lite.cpp | libs.cpp | + // mm_utils.cpp | stack_unwinder.cpp | mpl_linkerTest.cpp + virtual bool LocateAddress(const void *addr, LinkerLocInfo &info, bool getName) = 0; + // reference by chelper.cpp | mclass.cpp | mrt_class_init.cpp | mrt_mclasslocatormanager.cpp | + // mrt_reflection_class.cpp + virtual bool LinkClassLazily(jclass klass) = 0; + // reference by chelper.cpp | mrt_reflection_class | mpl_field_gctib.cpp | mclass.cpp + virtual void ResolveColdClassSymbol(jclass classinfo) = 0; + // reference by cinterface.cpp | collector_ms.cpp + virtual void ReleaseBootPhaseMemory(bool isZygote, bool isSystemServer) = 0; + // reference by collector_ms.cpp + virtual void ClearAllMplFuncProfile() = 0; + // reference by libs.cpp | dalvik_system_VMStack.cc | jvm.cpp + virtual std::string GetMFileNameByPC(const void *pc, bool isLazyBinding) = 0; + // reference by mm_utils.cpp | runtime.cc | mpl_linkerTest.cpp + virtual std::string GetAppInfo() = 0; + // reference by stack_unwinder.cpp + virtual LinkerMFileInfo *GetLinkerMFileInfoByName(const std::string &name) = 0; + virtual bool CheckLinkerMFileInfoElfBase(LinkerMFileInfo &mplInfo) = 0; + // reference by fieldmeta.cpp | methodmeta.cpp | mrt_annotation.cpp + virtual void GetStrTab(jclass dCl, StrTab &strTab) = 0; + virtual char *GetCString(jclass dCl, uint32_t index) = 0; + virtual void DestroyMFileCache() = 0; + // reference by mrt_class_init.cpp | mrt_profile.cpp + virtual LinkerMFileInfo *GetLinkerMFileInfoByAddress(const void *addr, bool isLazyBinding) = 0; + // reference by mrt_mclasslocatormanager.cpp + virtual jclass InvokeClassLoaderLoadClass(jobject classLoader, const std::string &className) = 0; + // reference by mrt_profile.cpp + virtual void DumpAllMplFuncProfile( + std::unordered_map> &funcProfileRaw) = 0; + virtual void DumpBBProfileInfo(std::ostream &os) = 0; + virtual void DumpAllMplFuncIRProfile(std::unordered_map &funcProfileRaw) = 0; + // reference by mrt_reflection_method.cpp + virtual bool UpdateMethodSymbolAddress(jmethodID method, uintptr_t addr) = 0; + // reference by dalvik_system_VMStack.cc | class_linker.cc | jvm.cpp + virtual LinkerMFileInfo *GetLinkerMFileInfoByClassMetadata(const void *addr, bool isClass) = 0; + // reference by class_linker.cc | mpl_linkerTest.cpp + virtual void *GetSymbolAddr(void *handle, const char *symbol, bool isFunction) = 0; + // reference by mpl_file_checker.cc + virtual void GetMplVersion(const LinkerMFileInfo &mplInfo, MapleVersionT &item) = 0; + virtual void GetMplCompilerStatus(const LinkerMFileInfo &mplInfo, uint32_t &status) = 0; + // reference by mpl_native_stack.cpp + virtual bool GetJavaTextInfo( + const void *addr, LinkerMFileInfo **mplInfo, LinkerLocInfo &info, bool getName) = 0; + // reference by signal_catcher.cc | mpl_linkerTest.cpp + virtual void DumpAllMplSectionInfo(std::ostream &os) = 0; + // reference by mpl_linkerTest.cpp | NativeEntry.cpp + virtual bool ContainLinkerMFileInfo(const std::string &name) = 0; + virtual bool ContainLinkerMFileInfo(const void *handle) = 0; + virtual void SetAppInfo(const char *dataPath, int64_t versionCode) = 0; + virtual AppLoadState GetAppLoadState() = 0; + virtual uint64_t DumpMetadataSectionSize(std::ostream &os, void *handle, const std::string sectionName) = 0; + virtual std::string GetMethodSymbolByOffset(const LinkerInfTableItem &pTable) = 0; + // reference by mclass_inline.h + virtual jclass GetSuperClass(ClassMetadata **addr) = 0; + // reference by jvm.cpp + virtual std::string GetMFileNameByClassMetadata(const void *addr, bool isLazyBinding) = 0; + // reference by mrt_prifole.cpp + virtual std::string GetAppPackageName() = 0; + // reference by mrt_mclasslocator_interpreter.cpp + virtual bool ReGenGctib4Class(jclass classInfo) = 0; + // reference by NativeEntry.cpp of cbg + virtual void *LookUpSymbolAddress(const MUID &muid) = 0; + virtual MUID GetMUID(const std::string symbol, bool forSystem) = 0; + virtual bool IsFrontPatchMode(const std::string &path) = 0; + + virtual bool Add(ObjFile &objFile, jobject classLoader) = 0; + virtual bool Resolve() = 0; + // Resolve the single maple file. + virtual bool Resolve(LinkerMFileInfo &mplInfo, bool decouple) = 0; +#ifdef LINKER_DECOUPLE + virtual bool HandleDecouple(std::vector &mplList) = 0; +#endif + virtual void FinishLink(jobject classLoader) = 0; + virtual bool Link() = 0; + // Resolve the single maple file. MUST invoked Add() before. + virtual bool Link(LinkerMFileInfo &mplInfo, bool decouple) = 0; + virtual void SetLoadState(LoadStateType state) = 0; + virtual void SetLinkerMFileInfoClassLoader(const ObjFile &objFile, jobject classLoader) = 0; + virtual void SetClassLoaderParent(jobject classLoader, jobject newParent) = 0; + virtual bool InsertClassesFront(ObjFile &objFile, jobject classLoader) = 0; + virtual void SetPatchPath(std::string &path, int32_t mode) = 0; + virtual void InitArrayCache(uintptr_t pc, uintptr_t addr) = 0; +#ifdef LINKER_RT_CACHE + // reference by runtime.cc + virtual void SetCachePath(const char *path) = 0; +#endif // LINKER_RT_CACHE + protected: + static LinkerAPI *pInstance; +}; +extern "C" { +#endif +extern uint8_t __BindingProtectRegion__[]; + +bool MRT_RequestLazyBindingForSignal(const SignalInfo &data); +bool MRT_RequestLazyBindingForInitiative(const void *data); +bool MRT_RequestLazyBinding(const void *offset, const void *pc, bool fromSignal); +void MRT_FixOffsetTableLazily(LinkerOffsetValItemLazyLoad &offsetEntry); +void MRT_FixStaticAddrTableLazily(LinkerStaticAddrItem &addrTableItem); +void MRT_FixStaticAddrTable(LinkerStaticAddrItem &addrTableItem); +void InitProtectedRegion(); +bool MRT_IsLazyBindingState(const uint8_t *address); +bool MRT_RequestInitArrayCache(SignalInfo *info); +#ifdef __cplusplus +} // extern "C" +} // namespace maplert +#endif +#endif // __MAPLE_LINKER_API__ diff --git a/src/mrt/compiler-rt/public-headers/loader_api.h b/src/mrt/compiler-rt/public-headers/loader_api.h new file mode 100644 index 0000000000..49690f369e --- /dev/null +++ b/src/mrt/compiler-rt/public-headers/loader_api.h @@ -0,0 +1,295 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef __MAPLE_LOADER_API__ +#define __MAPLE_LOADER_API__ + +#include "file_adapter.h" +#include "gc_roots.h" + +namespace maplert { +#define CL_LOG(level) LOG(level) << __FUNCTION__ << "(ClassLoader), " +#define CL_VLOG(module) VLOG(module) << __FUNCTION__ << "(ClassLoader), " +#define CL_DLOG(module) DLOG(module) << __FUNCTION__ << "(ClassLoader), " +enum AdapterFileList { + kMplFileBootList, + kMplFileOtherList +}; +class SearchFilter { + public: + jobject specificCL = nullptr; // assign by 'User', immutable + jobject contextCL = nullptr; // assign by 'Module', low mutable + jobject currentCL = nullptr; // assign by 'Condition', high mutable + ObjFile *outFile = nullptr; + ObjFile *specificFile = nullptr; // assign by 'User', immutable + ObjFile *currentFile = nullptr; // assign by 'Condition', high mutable + bool isInternalName = false; + bool isLowerDelegate = false; + bool isDelegateLast = false; + bool ignoreBootSystem = false; + SearchFilter() {}; + SearchFilter(jobject loader) : specificCL(loader) {}; + SearchFilter(jobject specific, jobject context) : specificCL(specific), contextCL(context) {}; + SearchFilter(jobject loader, bool lowerDelegate, ObjFile *file) + : specificCL(loader), + specificFile(file), + isLowerDelegate(lowerDelegate) {}; + SearchFilter(jobject loader, bool internalName, bool lowerDelegate, ObjFile *file) + : specificCL(loader), + specificFile(file), + isInternalName(internalName), + isLowerDelegate(lowerDelegate) {}; + SearchFilter(jobject loader, bool internalName, bool lowerDelegate, bool delegateLast) + : specificCL(loader), + isInternalName(internalName), + isLowerDelegate(lowerDelegate), + isDelegateLast(delegateLast) {}; + SearchFilter(const SearchFilter &filter) + : specificCL(filter.specificCL), + specificFile(filter.specificFile), + isInternalName(filter.isInternalName), + isLowerDelegate(filter.isLowerDelegate), + isDelegateLast(filter.isDelegateLast), + ignoreBootSystem(filter.ignoreBootSystem) {}; + ~SearchFilter() { + outFile = nullptr; + specificFile = nullptr; + currentFile = nullptr; + specificCL = nullptr; + currentCL = nullptr; + contextCL = nullptr; + } + inline SearchFilter &Clear() { + currentFile = nullptr; + currentCL = nullptr; + return *this; + } + inline SearchFilter &Reset() { + currentFile = specificFile; + currentCL = contextCL; + return *this; + } + inline SearchFilter &ClearFile() { + currentFile = nullptr; + return *this; + } + inline SearchFilter &ResetFile() { + currentFile = specificFile; + return *this; + } + inline SearchFilter &ResetClear() { + currentFile = nullptr; + currentCL = contextCL; + return *this; + } + inline bool IsBootOrSystem(const jobject systemLoader) const { + return (contextCL == nullptr) || (contextCL == systemLoader); + } + inline bool IsNullOrSystem(const jobject systemLoader) const { + return (systemLoader == nullptr) || (contextCL == systemLoader); + } + inline void Dump(const std::string &msg, const jclass klass) { + CL_LOG(INFO) << msg << ", isInternalName=" << isInternalName << ", isLowerDelegate=" << isLowerDelegate << + ", isDelegateLast=" << isDelegateLast << ", ignoreBootSystem=" << ignoreBootSystem << ", specificCL=" << + specificCL << ", contextCL=" << contextCL << ", currentCL=" << currentCL << ", outFile=" << outFile << + ", specificFile=" << specificFile << ", currentFile=" << currentFile << ", klass=" << klass << maple::endl; + } +}; +class LoaderAPI { + public: + MRT_EXPORT static LoaderAPI &Instance(); + template + static T As() { + return reinterpret_cast(Instance()); + } + LoaderAPI() {}; + virtual ~LoaderAPI() {}; + virtual void PreInit(IAdapterEx interpEx) = 0; + virtual void PostInit(jobject systemClassLoader) = 0; + virtual void UnInit() = 0; + virtual jobject GetCLParent(jobject classLoader) = 0; + virtual void SetCLParent(jobject classLoader, jobject parentClassLoader) = 0; + virtual bool IsBootClassLoader(jobject classLoader) = 0; + virtual bool LoadMplFileInBootClassPath(const std::string &pathString) = 0; +#ifndef __ANDROID__ + virtual bool LoadMplFileInUserClassPath(const std::string &pathString) = 0; +#endif + virtual bool LoadMplFileInAppClassPath(jobject classLoader, FileAdapter &adapter) = 0; + virtual bool LoadClasses(jobject classLoader, ObjFile &objFile) = 0; + virtual void RegisterMplFile(const ObjFile &mplFile) = 0; + virtual bool UnRegisterMplFile(const ObjFile &mplFile) = 0; + virtual bool RegisterJniClass(IEnv env, jclass javaClass, const std::string &filterName, + const std::string &jniClassName, INativeMethod methods, int32_t methodCount, bool fake) = 0; + virtual jclass FindClass(const std::string &className, const SearchFilter &filter) = 0; + // get registered mpl file by exact path-name: the path should be canonicalized + virtual const ObjFile *GetMplFileRegistered(const std::string &name) = 0; + virtual const ObjFile *GetAppMplFileRegistered(const std::string &package) = 0; + virtual size_t GetListSize(AdapterFileList type) = 0; + virtual size_t GetLoadedClassCount() = 0; + virtual size_t GetAllHashMapSize() = 0; + virtual void DumpUnregisterNativeFunc(std::ostream &os) = 0; + virtual void VisitClasses(maple::rootObjectFunc &func) = 0; + virtual bool IsLinked(jobject classLoader) = 0; + virtual void SetLinked(jobject classLoader, bool isLinked) = 0; + virtual bool GetClassNameList(jobject classLoader, ObjFile &objFile, std::vector &classVec) = 0; + virtual bool GetMappedClassLoaders(const jobject classLoader, + std::vector> &mappedPairs) = 0; + virtual bool GetMappedClassLoader(const std::string &fileName, jobject classLoader, jobject &realClassLoader) = 0; + virtual void ReTryLoadClassesFromMplFile(jobject classLoader, ObjFile &mplFile) = 0; + virtual bool RegisterNativeMethods(ObjFile &objFile, jclass klass, INativeMethod methods, int32_t methodCount) = 0; + // MRT_EXPORT Split + virtual void VisitGCRoots(const RefVisitor &visitor) = 0; + virtual jobject GetSystemClassLoader() = 0; + virtual jobject GetBootClassLoaderInstance() = 0; + virtual void SetClassCL(jclass klass, jobject classLoader) = 0; + virtual IObjectLocator GetCLClassTable(jobject classLoader) = 0; + virtual void SetCLClassTable(jobject classLoader, IObjectLocator classLocator) = 0; + virtual jclass LocateClass(const std::string &className, const SearchFilter &filter) = 0; + virtual IAdapterEx GetAdapterEx() = 0; + virtual void ResetCLCache() = 0; + virtual jclass GetCache(const jclass contextClass, const std::string &className, uint32_t &index, bool&) = 0; + virtual void WriteCache(const jclass klass, const jclass contextClass, uint32_t index) = 0; + protected: + static LoaderAPI *pInstance; +}; +class IAdapterExObj { + public: + IAdapterExObj() = default; + virtual ~IAdapterExObj() = default; + virtual bool IsSystemServer() = 0; + virtual bool IsGcOnly() = 0; + virtual bool IsStarted() = 0; +}; +template +class AdapterExObj : public IAdapterExObj { + public: + using TypeFlagCall = bool (Type::*)() const; + AdapterExObj(Type &obj, TypeFlagCall api) { + this->obj = &obj; + this->isSystemServer = api; + } + ~AdapterExObj() { + obj = nullptr; + isSystemServer = nullptr; + } + bool IsSystemServer() { + return (obj->*isSystemServer)(); + } + bool IsGcOnly() { + return (obj->*isGcOnly)(); + } + bool IsStarted() { + return (obj->*isStarted)(); + } + protected: + Type *obj; + union { + TypeFlagCall isSystemServer; + TypeFlagCall isGcOnly; + TypeFlagCall isStarted; + }; +}; +class AdapterExAPI { + public: + AdapterExAPI() + : openDexFile(nullptr), + openMplFile(nullptr), + enableMygote(nullptr), + getDexFileList(nullptr), + createThread(nullptr), + isSystemServer(nullptr), + isGcOnly(nullptr), + isStarted(nullptr) {}; + ~AdapterExAPI() { + openDexFile = nullptr; + openMplFile = nullptr; + enableMygote = nullptr; + getDexFileList = nullptr; + createThread = nullptr; + delete isSystemServer; + delete isGcOnly; + delete isStarted; + isSystemServer = nullptr; + isGcOnly = nullptr; + isStarted = nullptr; + } + using MplFileOpenCall = ObjFile *(*)(jobject classLoader, const std::string &path); + using LoadMplFileCall = bool (*)(LoaderAPI &loader, ObjFile &mplFile, jobject classLoader); + using EnableMygoteCall = int (*)(bool enable, const std::string &message); + using GetDexFileListCall = void (*)(const std::string &fileName, std::vector &fileList); + using CreateThreadCall = void (*)(void* (*loadingCallback)(void*), void* args); + using DexFileOpenCall = MplFileOpenCall; + void RegisterOpenDexFileAPI(DexFileOpenCall api) { + openDexFile = api; + } + void RegisterOpenMplFileAPI(MplFileOpenCall api) { + openMplFile = api; + } + void RegisterEnableMygoteAPI(EnableMygoteCall api) { + enableMygote = api; + } + void RegisterGetDexFileListAPI(GetDexFileListCall api) { + getDexFileList = api; + } + void RegisterCreateThreadAPI(CreateThreadCall api) { + createThread = api; + } + template + void RegisterIsSystemServerAPI(AdapterExObj &obj) { + isSystemServer = &obj; + } + template + void RegisterIsGconlyAPI(AdapterExObj &obj) { + isGcOnly = &obj; + } + template + void RegisterIsStartedAPI(AdapterExObj &obj) { + isStarted = &obj; + } + ObjFile *OpenDexFile(jobject classLoader, const std::string &path) const { + return (*openDexFile)(classLoader, path); + } + ObjFile *OpenMplFile(jobject classLoader, const std::string &path) const { + return (*openMplFile)(classLoader, path); + } + int EnableMygote(bool enable, const std::string &message) const { + return (*enableMygote)(enable, message); + } + void GetListOfDexFileToLoad(const std::string &fileName, std::vector &fileList) const { + (*getDexFileList)(fileName, fileList); + } + void CreateThreadAndLoadFollowingClasses(void* (*loadingCallback)(void*), void* args) const { + (*createThread)(loadingCallback, args); + } + bool IsSystemServer() const { + return isSystemServer->IsSystemServer(); + } + bool IsGcOnly() const { + return isGcOnly->IsGcOnly(); + } + bool IsStarted() const { + return isStarted->IsStarted(); + } + protected: + MplFileOpenCall openDexFile; + MplFileOpenCall openMplFile; + EnableMygoteCall enableMygote; + GetDexFileListCall getDexFileList; + CreateThreadCall createThread; + IAdapterExObj *isSystemServer; + IAdapterExObj *isGcOnly; + IAdapterExObj *isStarted; +}; +} // namespace maplert +#endif // __MAPLE_LOADER_API__ diff --git a/src/mrt/compiler-rt/public-headers/object_type.h b/src/mrt/compiler-rt/public-headers/object_type.h new file mode 100644 index 0000000000..b6ea3d4b51 --- /dev/null +++ b/src/mrt/compiler-rt/public-headers/object_type.h @@ -0,0 +1,88 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef __MAPLE_LOADER_OBJECT_TYPE__ +#define __MAPLE_LOADER_OBJECT_TYPE__ + +namespace maplert { +// compiler-rt API use IObject for JNI type (jclass/IObject) or Java type (MClass/MObject) and so +#define TYPE_CAST(T, D) reinterpret_cast(D) +#define CONST_TYPE_CAST(T, D) const_cast(reinterpret_cast(D)) +class IObject { + public: + IObject() = default; + ~IObject() { + data = nullptr; + } + IObject(const IObject &c) { + this->data = c.data; + } + IObject &operator=(const IObject &obj) { + this->data = obj.data; + return *this; + } + bool operator==(const IObject obj) const { + return data == obj.data; + } + bool operator!=(const IObject obj) const { + return data != obj.data; + } + bool operator<(const IObject obj) const { + return data < obj.data; + } + template + IObject(const Type &c) { + this->data = CONST_TYPE_CAST(void*, c); + } + template + Type As() { + return TYPE_CAST(Type, data); + } + template + Type As() const { + return TYPE_CAST(Type, data); + } + template + IObject &operator=(const Type &obj) { + data = CONST_TYPE_CAST(void*, obj); + return *this; + } + template + bool operator==(const Type obj) const { + return data == CONST_TYPE_CAST(void*, obj); + } + template + bool operator!=(const Type obj) const { + return data != CONST_TYPE_CAST(void*, obj); + } + template + bool operator<(const Type obj) const { + return data < CONST_TYPE_CAST(void*, obj); + } + bool Empty() const { + return data == nullptr; + } + const void *Get() const { + return data; + } + protected: + void *data = nullptr; +}; +using IEnv = IObject; +using INativeMethod = IObject; +using IAdapterEx = IObject; +using IObjectLocator = IObject; +} // namespace maplert +#endif // __MAPLE_LOADER_OBJECT_TYPE__ + diff --git a/src/mrt/compiler-rt/public-headers/tracer.h b/src/mrt/compiler-rt/public-headers/tracer.h new file mode 100644 index 0000000000..eacb348baa --- /dev/null +++ b/src/mrt/compiler-rt/public-headers/tracer.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef __MAPLE_RUNTIME_TRACER_H +#define __MAPLE_RUNTIME_TRACER_H + +#include +#include "mrt_api_common.h" + +namespace maplert { +class Tracer { + public: + // event, 0 means function enter, 1 means function exit + virtual void LogMethodTraceEvent(const std::string funcName, int event) = 0; + virtual ~Tracer() = default; +}; + +MRT_EXPORT void SetTracer(Tracer *tracer); + +MRT_EXPORT Tracer *GetTracer(); + +static inline bool IsTracingEnabled() { + return (GetTracer() == nullptr); +} +}; // namespace maplert + +#endif // __MAPLE_RUNTIME_TRACER_H diff --git a/src/mrt/compiler-rt/src/allocator.cpp b/src/mrt/compiler-rt/src/allocator.cpp new file mode 100644 index 0000000000..94ae6fb49c --- /dev/null +++ b/src/mrt/compiler-rt/src/allocator.cpp @@ -0,0 +1,85 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "allocator.h" + +#include +#include +#include + +#include "mm_config.h" +#include "address.h" +#include "sizes.h" +#include "imported.h" +#include "lock_word.h" +#include "deps.h" +#include "object_base.h" +#include "allocator/page_allocator.h" + +namespace maplert { +using namespace std; + +void Allocator::ReleaseResource(address_t obj) { + if (maple::ObjectBase::ReleaseResource(obj)) { + return; + } + MClass *classInfo = MObject::Cast(obj)->GetClass(); + // failed to release resource means fatal error. + if (classInfo == nullptr) { + LOG(FATAL) << "classInfo is nullptr in ReleaseResource, obj = " << obj << maple::endl; + } + LOG(FATAL) << "obj = " << obj << ", class name = " << classInfo->GetName() << maple::endl; +} + +Allocator::Allocator() +#if ALLOC_ENABLE_LOCK_CONTENTION_STATS + : oome(nullptr), oomeCreated(false), globalLock("allocator lock", maple::kAllocatorLock) {} +#else + : oome(nullptr), oomeCreated(false) {} +#endif + +void Allocator::NewOOMException() { + bool oldOOMECreated = false; + if (oome == nullptr && oomeCreated.compare_exchange_strong(oldOOMECreated, true)) { + ScopedObjectAccess soa; + MClass *exClass = MClass::JniCast(MRT_ReflectClassForCharName("java/lang/OutOfMemoryError", true, nullptr)); + if (exClass == nullptr) { + LOG(FATAL) << "New OOM exception object failed, because exClass is nullptr" << maple::endl; + return; + } + size_t size = exClass->GetObjectSize(); + // leave cause and stack null is fine, it will get default empty stack and null cause + oome = MObject::Cast(NewObj(size)); + if (oome == nullptr) { + LOG(FATAL) << "New OOM exception object failed, because oome is nullptr" << maple::endl; + return; + } + MRT_SetJavaClass(oome->AsUintptr(), exClass->AsUintptr()); + } +} + +// PageAlllocator +// the input parameter cat should be guaranteed in the range of value of enum type AllocationTag by +// external invoker, in order to avoid exceed the border of matrix +AggregateAllocator &AggregateAllocator::Instance(AllocationTag tag) { + static ImmortalWrapper instance[kMaxAllocationTag]; + return *(instance[tag]); +} + +// PagePool +PagePool &PagePool::Instance() { + static ImmortalWrapper instance; + return *instance; +} +} diff --git a/src/mrt/compiler-rt/src/allocator/bp_allocator.cpp b/src/mrt/compiler-rt/src/allocator/bp_allocator.cpp new file mode 100644 index 0000000000..99323d2254 --- /dev/null +++ b/src/mrt/compiler-rt/src/allocator/bp_allocator.cpp @@ -0,0 +1,209 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "allocator/bp_allocator.h" +#include "allocator/bp_allocator_inlined.h" +#include "collector/collector_tracing.h" + +namespace maplert { +// vaddr starts from (kSpaceAnchor - kMaxSpaceSize) +const uint32_t BumpPointerAlloc::kInitialSpaceSize = (1u << 18); // 256KB initially +const size_t BumpPointerAlloc::kExtendedSize = (1u << 16); // extend 64KB each time + +// The reason with this mem map is that we use multiple bp allocators, each +// having a mem map. If we let them map random addresses one by one, there +// is a good chance the attempted mmap() clobbers one another, and fails +// constantly, thus impeding performance. +// The solution is to reserve enough space for all of them, then each of +// them uses another mmap (with flag MAP_FIXED) to place their mappings +// in the reserved space in order. +// See the usage of flag MAP_FIXED: +// http://man7.org/linux/man-pages/man2/mmap.2.html +// Alternatively, we can make changes to MemMap so it coordinates +// multiple random mappings better, but by intuition that's more expensive. +static MemMap *reservedAddrSpace = nullptr; +static address_t reservedAddr = 0; +static address_t reservedEnd = 0; + +static inline void ReserveAddrSpace() { + if (reservedAddr != 0) { + return; + } + MemMap::Option opt = MemMap::kDefaultOptions; + opt.reqRange = true; + opt.lowestAddr = PERM_BEGIN; + opt.highestAddr = PERM_END - BumpPointerAlloc::kFireBreak; + opt.tag = "maple_alloc_reserved"; + // this must succeed otherwise it won't return + reservedAddrSpace = MemMap::MapMemory(kOffHeapSpaceSize, 0, opt); + reservedAddr = reinterpret_cast(reservedAddrSpace->GetBaseAddr()); + reservedEnd = reinterpret_cast(reservedAddrSpace->GetMappedEndAddr()); +} + +BumpPointerAlloc::BumpPointerAlloc(const string &name, size_t maxSize) + : memMap(nullptr), startAddr(0), currentAddr(0), endAddr(0), showmapName(name) { + // this should be called at a separate time! + Init(maxSize); +} + +BumpPointerAlloc::~BumpPointerAlloc() { + LOG2FILE(kLogTypeAllocator) << "Destructing BumpPointerAlloc" << std::endl; + MemMap::DestroyMemMap(memMap); +} + +void BumpPointerAlloc::Init(size_t growthLimit) { + ReserveAddrSpace(); + if (reservedAddr == 0 || reservedAddr > reservedEnd - growthLimit) { + LOG(FATAL) << "space size inconsistency: reserved from " << std::hex << reservedAddr << + " to " << reservedEnd << ", size requested " << growthLimit; + } + MemMap::Option opt = MemMap::kDefaultOptions; + opt.reqRange = false; + opt.tag = showmapName.c_str(); + opt.reqBase = reinterpret_cast(reservedAddr); + opt.flags |= MAP_FIXED; + reservedAddr += growthLimit + kOffHeapSpaceGap; + if (reservedAddr == reservedEnd + kOffHeapSpaceGap) { + MemMap::DestroyMemMap(reservedAddrSpace); + } + memMap = MemMap::MapMemory(growthLimit, kInitialSpaceSize, opt); + startAddr = reinterpret_cast(memMap->GetBaseAddr()); + currentAddr = startAddr; + endAddr = reinterpret_cast(memMap->GetCurrEnd()); +} + +void BumpPointerAlloc::Dump(std::basic_ostream &os) { + if (memMap == nullptr) { + os << "Fail to dump permanent space because memMap is null!" << "\n"; + return; + } + // invoked in stw + size_t usedSize = currentAddr - startAddr; + os << showmapName << " space used: " << usedSize << ", left: " << ((memMap->GetMappedSize()) - usedSize) << "\n"; + +#if BPALLOC_DEBUG + DumpUsage(os); +#endif +} + +address_t BumpPointerAlloc::Alloc(size_t size) { + size_t allocSize = AllocUtilRndUp(size, kBPAllocObjAlignment); + address_t resultAddr = AllocInternal(allocSize); + return resultAddr; +} + +bool BumpPointerAlloc::Contains(address_t obj) const { + if ((obj < currentAddr) && (obj >= startAddr)) { + return true; + } else { + return false; + } +} + +DecoupleAllocator::DecoupleAllocator(const string &name, size_t maxSize) : BumpPointerAlloc(name, maxSize) {} + +address_t DecoupleAllocator::Alloc(size_t size, DecoupleTag tag) { + address_t resultAddr = 0U; + if (UNLIKELY(size >= DecoupleAllocHeader::kMaxSize)) { + return resultAddr; + } + size_t allocSize = GetAllocSize(size); + resultAddr = AllocInternal(allocSize); + address_t allocAddr = BPALLOC_GET_OBJ_FROM_ADDR(resultAddr); + DecoupleAllocHeader::SetHeader(allocAddr, static_cast(tag), size); + return allocAddr; +} + +bool DecoupleAllocator::ForEachObj(function visitor) { + address_t objAddr = startAddr + DecoupleAllocHeader::kHeaderSize; + while (objAddr < currentAddr) { + size_t size = DecoupleAllocHeader::GetSize(objAddr); + if (size >= DecoupleAllocHeader::kMaxSize) { + // size broken! heap corruption detected. + return false; + } + visitor(objAddr); + size_t allocSize = GetAllocSize(size); + if (currentAddr - objAddr <= allocSize) { + break; + } + objAddr += allocSize; + } + return true; +} + +constexpr int kDecoupleTagNamesAlign = 20; + +void DecoupleAllocator::DumpUsage(std::basic_ostream &os) { + std::vector> profile; + for (int i = 0; i < static_cast(DecoupleTag::kTagMax); ++i) { + profile.push_back(std::make_pair(0, 0)); + } + address_t objAddr = startAddr + DecoupleAllocHeader::kHeaderSize; + while (objAddr < currentAddr) { + int tag = DecoupleAllocHeader::GetTag(objAddr); + if (tag < static_cast(DecoupleTag::kTagMax)) { + profile[tag].first++; + } else { + os << "tag broken at " << objAddr << ", tag " << tag << "\n"; + break; + } + size_t size = DecoupleAllocHeader::GetSize(objAddr); + if (size < DecoupleAllocHeader::kMaxSize) { + profile[tag].second += size; + } else { + os << "size broken at " << objAddr << ", tag " << tag << ", size " << size << "\n"; + break; + } + size_t allocSize = GetAllocSize(size); + if (currentAddr - objAddr <= allocSize) { + break; + } + objAddr += allocSize; + } + os << showmapName << " space usage:\n"; + for (uint16_t i = 0; i < DecoupleTag::kTagMax; ++i) { + os << " " << std::setw(kDecoupleTagNamesAlign) << kDecoupleTagNames[i] << ": count " << profile[i].first << + ", total size " << profile[i].second << "\n"; + } +} + +address_t MetaAllocator::Alloc(size_t size, MetaTag metaTag) { + size_t allocSize = AllocUtilRndUp(size, kBPAllocObjAlignment); + address_t resultAddr = AllocInternal(allocSize); + sizeUsed[metaTag] += allocSize; + return resultAddr; +} + +void MetaAllocator::DumpUsage(std::basic_ostream &os) { + os << "Class meta: " << sizeUsed[kClassMetaData] << "\n"; + os << "Field meta: " << sizeUsed[kFieldMetaData] << "\n"; + os << "Method meta: " << sizeUsed[kMethodMetaData] << "\n"; + os << "VTable meta: " << sizeUsed[kITabMetaData] << "\n"; + os << "Native string: " << sizeUsed[kNativeStringData] << "\n"; +} + +void ZterpStaticRootAllocator::VisitStaticRoots(const RefVisitor &visitor) { + std::lock_guard lock(globalLock); + size_t spaceAddr = startAddr; + while (spaceAddr < currentAddr) { + address_t *rootAddr = *(reinterpret_cast(spaceAddr)); + LinkerRef ref(rootAddr); + if (!ref.IsIndex()) { + visitor(*rootAddr); + } + spaceAddr += singleObjSize; + } +} +} // namespace maplert diff --git a/src/mrt/compiler-rt/src/allocator/mem_map.cpp b/src/mrt/compiler-rt/src/allocator/mem_map.cpp new file mode 100644 index 0000000000..f364a260ff --- /dev/null +++ b/src/mrt/compiler-rt/src/allocator/mem_map.cpp @@ -0,0 +1,224 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "allocator/mem_map.h" + +#include +#include +#include "sizes.h" +#include "syscall.h" +#include "libs.h" + +namespace maplert { +using namespace std; + +// not thread safe, do not call from multiple threads +MemMap *MemMap::MapMemory(size_t reqSize, size_t initSize, const Option &opt) { + void *mappedAddr = MAP_FAILED; + reqSize = AllocUtilRndUp(reqSize, ALLOCUTIL_PAGE_SIZE); + + LOG2FILE(kLogTypeAllocator) << "MemMap::MapMemory size " << reqSize << std::endl; + if (kEnableRange && opt.reqRange) { + // repeatedly map and guarantee the result is in the specified range + mappedAddr = MemMapInRange(opt.lowestAddr, opt.highestAddr, reqSize, PROT_NONE, opt.flags); + } else { + mappedAddr = mmap(opt.reqBase, reqSize, PROT_NONE, opt.flags, -1, 0); + } + + bool failure = false; + if (mappedAddr != MAP_FAILED) { + MRT_PRCTL(mappedAddr, reqSize, opt.tag); + // if protAll, all memory is protected at creation, and we never change it (save time) + size_t protSize = opt.protAll ? reqSize : initSize; + if (!ProtectMemInternal(mappedAddr, protSize, opt.prot)) { + failure = true; + LOG(ERROR) << "MemMap::MapMemory mprotect failed" << maple::endl; + ALLOCUTIL_MEM_UNMAP(mappedAddr, reqSize); + } + } else { + failure = true; + } + if (failure) { + LOG(FATAL) << "MemMap::MapMemory failed reqSize: " << reqSize << " initSize: " << initSize << + " reqRange: " << opt.reqRange; + } + + LOG2FILE(kLogTypeAllocator) << "MemMap::MapMemory size " << reqSize << + " successful at " << mappedAddr << std::endl; + return new MemMap(mappedAddr, initSize, reqSize, opt.protAll, opt.prot); +} + +MemMap *MemMap::CreateMemMapAtExactAddress(void *addr, size_t size, const Option &opt) { + const size_t pageSize = static_cast(sysconf(_SC_PAGESIZE)); + uintptr_t uaddr = reinterpret_cast(addr); + if (uaddr % pageSize != 0) { + LOG(FATAL) << "try to mmap at address " << std::hex << uaddr << std::dec << " which is not page-aligned" << + maple::endl; + } + + void *mappedAddr = mmap(addr, size, opt.prot, opt.flags | MAP_FIXED, -1, 0); + if (mappedAddr != MAP_FAILED && uaddr == reinterpret_cast(mappedAddr)) { + MRT_PRCTL(mappedAddr, size, opt.tag); + return new MemMap(mappedAddr, size, size, opt.protAll, opt.prot); + } else { + LOG(FATAL) << "MemMap::CreateMemMapAtExactAddress failed to map memory at 0x: " << std::hex << uaddr << + std::dec << " size: " << size << maple::endl; + } + + return nullptr; +} + +MemMap::MemMap(void *baseAddr, size_t initSize, size_t mappedSize, bool protAll, int prot) + : memBaseAddr(baseAddr), + memCurrSize(initSize), + memMappedSize(mappedSize), + protOnce(protAll), + memProt(prot) { + memCurrEndAddr = reinterpret_cast(reinterpret_cast(memBaseAddr) + memCurrSize); + memMappedEndAddr = reinterpret_cast(reinterpret_cast(memBaseAddr) + memMappedSize); +} + +bool MemMap::ProtectMemInternal(void *addr, size_t size, int prot) { + LOG2FILE(kLogTypeAllocator) << "MemMap::ProtectMem " << addr << ", size " << + size << ", prot " << prot << std::endl; + int ret = mprotect(addr, size, prot); + return (ret == 0); +} + +bool MemMap::ProtectMem(address_t addr, size_t size, int prot) const { + if (addr >= reinterpret_cast(memBaseAddr) && + addr + size <= reinterpret_cast(memCurrEndAddr)) { + return ProtectMemInternal(reinterpret_cast(addr), size, prot); + } + return false; +} + +void MemMap::UpdateCurrEndAddr() { + address_t startAddr = (reinterpret_cast(memBaseAddr)); + memCurrEndAddr = reinterpret_cast(startAddr + memCurrSize); +} + +bool MemMap::Extend(size_t reqSize) { + if (memCurrSize >= memMappedSize) { + LOG(ERROR) << "MemMap::Extend failed, curr size " << memCurrSize << + ", mapped size " << memMappedSize << maple::endl; + return false; + } + size_t newCurrSize = memCurrSize + reqSize; + if (newCurrSize > memMappedSize) { + LOG(ERROR) << "MemMap::Extend invalid new size " << newCurrSize << + ", mapped size " << memMappedSize << maple::endl; + return false; + } + if (!protOnce && !ProtectMemInternal(memCurrEndAddr, reqSize, memProt)) { + LOG(ERROR) << "MemMap::Extend mprotect failed" << maple::endl; + return false; + } + memCurrSize = newCurrSize; + UpdateCurrEndAddr(); + LOG2FILE(kLogTypeAllocator) << "MemMap::Extend successful, curr size " << + memCurrSize << ", mapped size " << memMappedSize << std::endl; + return true; +} + +bool MemMap::ReleaseMem(address_t releaseBeginAddr, size_t size) const { + address_t baseAddr = reinterpret_cast(memBaseAddr); + address_t currEndAddr = reinterpret_cast(memCurrEndAddr); + address_t releaseEndAddr = releaseBeginAddr + size; + if (releaseBeginAddr < baseAddr || releaseEndAddr > currEndAddr) { + return false; + } + ALLOCUTIL_MEM_MADVISE(releaseBeginAddr, size, MADV_DONTNEED); + return true; +} + +// resize the memory map by releasing the pages at the end (munmap) +bool MemMap::Shrink(size_t newSize) { + if (newSize >= memMappedSize) { + return false; + } + address_t newEndAddr = reinterpret_cast(memBaseAddr) + newSize; + memMappedEndAddr = reinterpret_cast(newEndAddr); + ALLOCUTIL_MEM_UNMAP(memMappedEndAddr, memMappedSize - newSize); + memMappedSize = newSize; + memCurrSize = std::min(newSize, memCurrSize); + return true; +} + +void *MemMap::MemMapInRange(address_t lowestAddr, address_t highestAddr, + size_t reqSize, int prot, int flags, int fd, int offset) { + lowestAddr = AllocUtilRndUp(lowestAddr, ALLOCUTIL_PAGE_SIZE); + highestAddr = AllocUtilRndDown(highestAddr, ALLOCUTIL_PAGE_SIZE); + if (lowestAddr >= highestAddr || highestAddr - lowestAddr < reqSize) { + LOG(ERROR) << "MemMap::MemMapInRange illegal range [" << lowestAddr << + ", " << highestAddr << "]" << maple::endl; + return MAP_FAILED; + } + void *mappedAddr = MAP_FAILED; + highestAddr -= reqSize; + AllocUtilRand randAddr(lowestAddr, highestAddr); + if (kEnableRandomMemStart) { + // The hint doesn't guarantee the actually mapped address is in the specified range. + // If the hint address is already occupied, mmap will try other arbitrary addresses. + // We need to check the result, and repeat if fails. + int repeat = 10; + while (mappedAddr == MAP_FAILED && (--repeat) != 0) { + address_t hint = AllocUtilRndUp(randAddr.next(), ALLOCUTIL_PAGE_SIZE); + mappedAddr = MemMapInRangeInternal(hint, lowestAddr, highestAddr, + reqSize, prot, flags | MAP_FIXED, fd, offset); + } + } + if (mappedAddr == MAP_FAILED) { + for (address_t addr = lowestAddr; addr <= highestAddr; addr += ALLOCUTIL_PAGE_SIZE) { + mappedAddr = MemMapInRangeInternal(addr, lowestAddr, highestAddr, + reqSize, prot, flags | MAP_FIXED, fd, offset); + if (mappedAddr != MAP_FAILED) { + break; + } + } + } + if (mappedAddr == MAP_FAILED) { + LOG(ERROR) << "MemMap::MemMapInRange failed" << maple::endl; + } else { + LOG2FILE(kLogTypeAllocator) << "MemMap::MemMapInRange returns " << mappedAddr << std::endl; + } + return mappedAddr; +} + +void *MemMap::MemMapInRangeInternal(address_t hint, + address_t lowestAddr, address_t highestAddr, + size_t reqSize, int prot, int flags, + int fd, int offset) { + void *mappedAddr = mmap(reinterpret_cast(hint), reqSize, prot, flags, fd, offset); + if (mappedAddr == MAP_FAILED) { + LOG2FILE(kLogTypeAllocator) << + FormatString("map memory at %p: hint %p, low %p, high %p, size %zu, prot %x, flags %x, fd %d, offset %d", + mappedAddr, hint, lowestAddr, highestAddr, reqSize, prot, flags, fd, offset); + } else if (reinterpret_cast(mappedAddr) < lowestAddr || + reinterpret_cast(mappedAddr) > highestAddr) { + LOG2FILE(kLogTypeAllocator) << + FormatString("map memory at %p: hint %p, low %p, high %p, size %zu, prot %x, flags %x, fd %d, offset %d", + mappedAddr, hint, lowestAddr, highestAddr, reqSize, prot, flags, fd, offset); + ALLOCUTIL_MEM_UNMAP(mappedAddr, reqSize); // roll back + mappedAddr = MAP_FAILED; + } + return mappedAddr; +} + +MemMap::~MemMap() { + memBaseAddr = nullptr; + memCurrEndAddr = nullptr; + memMappedEndAddr = nullptr; +} +} // namespace maplert diff --git a/src/mrt/compiler-rt/src/allocator/page_map.cpp b/src/mrt/compiler-rt/src/allocator/page_map.cpp new file mode 100644 index 0000000000..acbcccbef9 --- /dev/null +++ b/src/mrt/compiler-rt/src/allocator/page_map.cpp @@ -0,0 +1,131 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "allocator/page_map.h" +#include "allocator/alloc_config.h" + +// Definitions of page map +namespace maplert { +PageMap::PageMap() + : spaceBeginAddr(0), + spaceEndAddr(0), + maxMapSize(0), + pageMapSize(0), + memMap(nullptr), + map(nullptr) { +} + +PageMap::~PageMap() { + ROSIMPL_VERIFY_DUMP_PG_TABLE; + spaceBeginAddr = 0; + spaceEndAddr = 0; + MemMap::DestroyMemMap(memMap); + // memory will be released by memMap + map = nullptr; +} + +void PageMap::Init(address_t baseAddr, size_t maxSize, size_t size) { + spaceBeginAddr = baseAddr; + spaceEndAddr = baseAddr + size; + maxMapSize = ALLOCUTIL_PAGE_BYTE2CNT(maxSize); + pageMapSize = ALLOCUTIL_PAGE_BYTE2CNT(size); + + size_t reqSize = ALLOCUTIL_PAGE_RND_UP(maxMapSize); + MemMap::Option opt = MemMap::kDefaultOptions; + opt.tag = "maple_alloc_ros_pm"; + opt.reqBase = nullptr; + opt.reqRange = false; + memMap = MemMap::MapMemory(reqSize, reqSize, opt); + map = static_cast(memMap->GetBaseAddr()); + if (UNLIKELY(map == nullptr)) { + LOG(FATAL) << "page map initialisation failed"; + } + SetRange(0, maxMapSize, kPReleased); + + // As for now, the assignment to maxMapSize in this function is the only + // assignment. This means the heap memory mapped to the address space never + // exceeds this size. We initialize finProf to create a table of this size, + // so that the table is big enough even when the heap size is changed. + finProf.Init(maxMapSize); + + LOG2FILE(kLogTypeAllocator) << "[PageMap] number of entries " << pageMapSize << ". address " << map << std::endl; +} + +constexpr uint32_t kMaxNumPerLine = 10; + +void PageMap::Dump() { + LOG2FILE(kLogTypeAllocator) << "[PageMap] number of entries: " << pageMapSize << "\n"; + LOG2FILE(kLogTypeAllocator) << " max number of entries: " << maxMapSize << "\n"; + LOG2FILE(kLogTypeAllocator) << " spaceBeginAddr = 0x" << std::hex << spaceBeginAddr << "\n"; + LOG2FILE(kLogTypeAllocator) << " spaceEndAddr = 0x" << std::hex << spaceEndAddr << "\n"; + std::stringstream ss; + bool skipFreePages = true; + if (map != nullptr) { + for (IntType i = 0 ; i < GetMapSize(); ++i) { + if (skipFreePages && static_cast(map[i]) <= static_cast(kPFree)) { + continue; + } + if (i % kMaxNumPerLine == 0) { + ss << "\n [" << i << "]:" << static_cast(map[i]); + continue; + } + ss << " [" << i << "]:" << static_cast(map[i]); + } + } + LOG2FILE(kLogTypeAllocator) << ss.str().c_str() << std::endl; +} + +void PageMap::DumpFinalizableInfo(std::ostream &ost) { + ost << "Dumping finalizable information for each page" << std::endl; + + IntType touched = 0; + IntType hasFins = 0; + + for (IntType index = 0; index < maxMapSize; ++index) { + FinalizableProf::PageEntry &entry = finProf.GetEntry(index); +#if MRT_RESURRECTION_PROFILE == 1 + size_t incs = entry.incs.load(std::memory_order_relaxed); + size_t decs = entry.decs.load(std::memory_order_relaxed); +#endif // MRT_RESURRECTION_PROFILE + uint16_t fins = entry.fins.load(std::memory_order_relaxed); + if ( +#if MRT_RESURRECTION_PROFILE == 1 + incs == 0 && decs == 0 && +#endif // MRT_RESURRECTION_PROFILE + fins == 0 + ) { + continue; + } + + ++touched; + + if (fins != 0) { + ++hasFins; + } + + ost << " index: " << index << +#if MRT_RESURRECTION_PROFILE == 1 + " incs:" << incs << + " decs:" << decs << +#endif // MRT_RESURRECTION_PROFILE + " fins:" << fins << + std::endl; + } + + ost << "maxMapSize: " << maxMapSize << std::endl; + ost << "touched: " << touched << std::endl; + ost << "hasFins: " << hasFins << std::endl; + ost << "pageMapSize: " << pageMapSize << std::endl; +} +} // namespace maplert diff --git a/src/mrt/compiler-rt/src/allocator/ros_allocator.cpp b/src/mrt/compiler-rt/src/allocator/ros_allocator.cpp new file mode 100644 index 0000000000..334bcbe1ff --- /dev/null +++ b/src/mrt/compiler-rt/src/allocator/ros_allocator.cpp @@ -0,0 +1,1556 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include +#include +#include +#include +#include "chosen.h" +#include "allocator/alloc_callbacks.h" +#include "allocator/ros_allocator_inlined.h" +#include "yieldpoint.h" +#include "exception/mpl_exception.h" +#include "mstring_inline.h" + +namespace maplert { +using namespace maple; + +uint8_t RosAllocImpl::kRunMagic; + +// REMEMBER TO CHANGE kRunConfigs WHEN YOU ADD/REMOVE CONFIGS +// this stores a config for each kind of run (represented by an index) +const RunConfigType RunConfig::kCfgs[kRunConfigs] = { + { true, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 16 }, + + { true, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 24 }, + { true, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 32 }, + { true, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 40 }, + { true, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 48 }, + { true, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 56 }, + { true, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 64 }, + { true, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 72 }, + { true, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 80 }, + { true, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 88 }, + { true, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 96 }, + // [1] this size must be the same with kROSAllocLocalSize + // [2] all sizes smaller than this must also use local + // [3] all sizes smaller must increment by 8 + // ([2, 3] are optimisations, rather than restrictions) + { true, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 104 }, + + { false, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 112 }, + { false, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 120 }, + { false, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 128 }, + { false, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 136 }, + { false, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 144 }, + { false, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 152 }, + { false, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 160 }, + + { false, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 168 }, + { false, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 176 }, + + { false, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 184 }, + { false, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 192 }, + { false, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 200 }, + { false, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 208 }, + { false, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 216 }, + { false, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 224 }, + { false, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 232 }, + { false, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 240 }, + { false, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 248 }, + { false, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 256 }, + { false, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 264 }, + { false, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 272 }, + { false, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 280 }, + { false, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 288 }, + { false, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 296 }, + { false, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 304 }, + { false, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 312 }, + { false, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 320 }, + { false, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 328 }, + { false, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 336 }, + { false, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 344 }, + { false, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 352 }, + { false, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 360 }, + + { false, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 400 }, + { false, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 448 }, + { false, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 504 }, + { false, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 576 }, + { false, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 672 }, + { false, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 800 }, + { false, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 1008 }, + { false, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 1344 }, + // this size must be the same with kROSAllocLargeSize + { false, kRosimplDefaultMaxCacheRun, kRosimplDefaultPagePerRun, 2016 } +}; +// this map maps a size ((size >> 3 - 1) to be precise) to a run config +// this map takes 4 * kMaxRunConfigs == 1k +uint32_t RunConfig::size2idx[kMaxRunConfigs] = { 0 }; // all zero-initialised + +// this function inits the config map using the configs above +// so that for any size there is a config for it +// if the size doesn't match any of the configs exactly, we choose +// the closest config with a greater size, e.g., +// size2idx[456 >> 3 - 1] == size2idx[464 >> 3 - 1] == .. == size2idx[504 >> 3 - 1] +static void InitRunConfigMap() { + constexpr uint32_t runSizeShift = 3; + ROSIMPL_ASSERT(RunConfig::kRunConfigs <= RunConfig::kMaxRunConfigs, "too many configs"); + uint32_t idx = RunConfig::kRunConfigs; + uint32_t nextSize = RunConfig::kCfgs[RunConfig::kRunConfigs - 1].size; + ROSIMPL_ASSERT(nextSize <= (RunConfig::kMaxRunConfigs << runSizeShift), "size too big in config"); + uint32_t i = (RunConfig::kMaxRunConfigs - 1); + while (true) { + if (((i + 1) << runSizeShift) > nextSize) { + if (idx < RunConfig::kRunConfigs) { + RunConfig::size2idx[i] = idx; + } + } else { + ROSIMPL_ASSERT(((i + 1) << runSizeShift) == nextSize, "init run config error"); + ROSIMPL_ASSERT(idx > 0, "init run config error"); + RunConfig::size2idx[i] = --idx; + if (idx > 0 && idx < RunConfig::kRunConfigs) { + ROSIMPL_ASSERT(static_cast(RunConfig::kCfgs[idx - 1].size) < nextSize, "not in ascending order"); + nextSize = RunConfig::kCfgs[idx - 1].size; + } else { + nextSize = 0; + } + } + + if (i == 0) { + break; + } else { + --i; + } + } + +#if ROSIMPL_ENABLE_ASSERTS + for (i = 0; i <= ROSIMPL_RUN_IDX(kROSAllocLocalSize); ++i) { + ROSIMPL_ASSERT(ROSIMPL_IS_LOCAL_RUN_IDX(i), "run config inconsistent: small idx not local"); + } +#endif + ROSIMPL_ASSERT(ROSIMPL_RUN_IDX(kROSAllocLargeSize) + 1 == RunConfig::kRunConfigs, + "run config inconsistent: large size"); +} + +size_t RunSlots::maxSlots[RunConfig::kRunConfigs] = {}; + +// max number of pages for a single parallel task. +constexpr size_t kMaxPagesPerTask = 256; // 4K * 256 = 1M + +// FreeList +void FreeList::Init(address_t baseAddr, size_t slotSize, size_t slotCount) { + ROSIMPL_ASSERT(slotCount > 0, "cannot init free list with 0 slot"); + SetHead(baseAddr); + Slot *lastSlot = reinterpret_cast(baseAddr); + address_t currAddr = baseAddr; + for (size_t ind = 1; ind < slotCount; ++ind) { +#if (!ROSIMPL_MEMSET_AT_FREE) +#if CONFIG_JSAN || RC_HOT_OBJECT_DATA_COLLECT + // SetNext() will fail to clear the 'allocated' bit in jsan, so we manually clear it + ClearAllocatedBit(ROSIMPL_GET_OBJ_FROM_ADDR(currAddr)); +#endif +#endif + currAddr += slotSize; + lastSlot->SetNext(currAddr); + lastSlot = reinterpret_cast(currAddr); + } + + SetTail(currAddr); + lastSlot->SetNext(0); +#if (!ROSIMPL_MEMSET_AT_FREE) +#if CONFIG_JSAN || RC_HOT_OBJECT_DATA_COLLECT + // SetNext() will fail to clear the 'allocated' bit in jsan, so we manually clear it + ClearAllocatedBit(ROSIMPL_GET_OBJ_FROM_ADDR(currAddr)); +#endif +#endif +} + +// RunSlots +RunSlots::RunSlots(uint32_t idx) : padding(0) { +#if ROSIMPL_MEMSET_AT_FREE + ROSIMPL_ASSERT(magic == 0U, "initializing run with dirty memory"); +#endif + magic = RosAllocImpl::kRunMagic; + mIdx = static_cast(idx); + flags = 0; + SetNext(nullptr); + SetPrev(nullptr); +} + +void RunSlots::Init(bool setInitRelease) { + size_t slotsCount = GetMaxSlots(); + address_t slotAddr = GetBaseAddress(); + + freeList.Init(slotAddr, GetRunSize(), slotsCount); + nFree = static_cast(slotsCount); + if (setInitRelease) { + SetInitRelease(); + } else { + SetInit(); + } +} + +void RunSlots::ForEachObj(function visitor, OnlyVisit onlyVisit, size_t hint) { + size_t runUnitSize = GetRunSize(); + size_t slotsCount = GetMaxSlots(); + address_t slotAddr = GetBaseAddress(); + + if (UNLIKELY(slotsCount == 0)) { + return; + } + + // Hoist the test for onlyVisit outside the loop. This may result in some + // code duplication. + if (onlyVisit == OnlyVisit::kVisitFinalizable) { + size_t toVisit = hint; + for (size_t idx = 0; idx < slotsCount; ++idx) { + address_t objAddr = ROSIMPL_GET_OBJ_FROM_ADDR(slotAddr); + if (IsAllocatedByAllocator(objAddr) && IsObjResurrectable(objAddr)) { + visitor(objAddr); + --toVisit; + if (toVisit == 0) { + break; + } + } + slotAddr += runUnitSize; + } + } else { + for (size_t idx = 0; idx < slotsCount; ++idx) { + address_t objAddr = ROSIMPL_GET_OBJ_FROM_ADDR(slotAddr); + if (IsAllocatedByAllocator(objAddr)) { + visitor(objAddr); + } + slotAddr += runUnitSize; + } + } +} + +// return true if the given address represents a live object +bool RunSlots::IsLiveObjAddr(address_t objAddr) const { + // since there is no lock, a mutator might be allocating/freeing from this run + // at the same time, so there is a slight inaccuracy that the caller should be aware of + address_t slotAddr = ROSIMPL_GET_ADDR_FROM_OBJ(objAddr); + address_t firstSlotAddr = GetBaseAddress(); + size_t slotsCount = GetMaxSlots(); + address_t lastSlot = firstSlotAddr + (slotsCount - 1) * GetRunSize(); + if (!(slotAddr <= lastSlot && slotAddr >= firstSlotAddr)) { + return false; + } + if (((slotAddr - firstSlotAddr) % GetRunSize()) != 0) + return false; + + return IsAllocatedByAllocator(objAddr); +} + +bool RunSlots::Sweep(RosAllocImpl& allocator) { + // get & check sweep context. + SweepContext &context = allocator.sweepContext; + + // get page index of this run page. + size_t pageIndex = allocator.pageMap.GetPageIndex(reinterpret_cast(this)); + + // try to set page to sweeping state. + PageLabel pageType = kPMax; + if (!context.SetSweeping(pageIndex, pageType)) { + // page already swept. + return false; + } + + ROSIMPL_ASSERT(pageType == kPRun, "Sweep: incorrect page type"); + + // if set sweeping state success, run sweep for this run page. + return DoSweep(allocator, context, pageIndex); +} + +bool RunSlots::DoSweep(RosAllocImpl &allocator, SweepContext &context, size_t pageIndex) { + ROSIMPL_ASSERT(HasInit(), "run not initialised"); + const bool fullBeforeSweep = IsFull(); + const size_t slotSize = GetRunSize(); + std::vector deadNeighbours; + size_t releasedCount = 0; + size_t releasedBytes = 0; + bool updateState = false; + + // count scaned runs. + (void)context.scanedRuns.fetch_add(1, std::memory_order_relaxed); + + if (IsEmpty()) { + // if run is empty, do nothing but set page swept. + context.SetSwept(pageIndex); + return false; + } + + // for all dead objects in this RunSlots. + ForEachObj([this, &allocator, &deadNeighbours, &releasedCount, &releasedBytes, slotSize](address_t obj) { + // skip live objects. + if (!Collector::Instance().IsGarbage(obj)) { + return; + } + +#if CONFIG_JSAN + // when JSAN is enabled, skip objects held by JSAN. + if (JSANGetObjStatus(obj) == kObjStatusQuarantined) { + return; + } +#endif + + Collector::Instance().HandleNeighboursForSweep(obj, deadNeighbours); + + allocator.SweepSlot(*this, ROSIMPL_GET_ADDR_FROM_OBJ(obj)); + ++releasedCount; + releasedBytes += slotSize; + }); + + // To prevent dead lock, we do not release dead neighbours here, + // but save them to sweep context, so that we can release them later. + if (UNLIKELY(!deadNeighbours.empty())) { + context.AddDeadNeighbours(deadNeighbours); + } + + // if there are released objects. + if (releasedCount > 0) { + // statistics in sweep context. + (void)context.sweptRuns.fetch_add(1, std::memory_order_relaxed); + (void)context.releasedObjects.fetch_add(releasedCount, std::memory_order_relaxed); + (void)context.releasedBytes.fetch_add(releasedBytes, std::memory_order_relaxed); + FAST_ALLOC_ACCOUNT_SUB(releasedBytes); + + updateState = (*theAllocator).UpdateGlobalsAfterFree(*this, fullBeforeSweep); + } + + // set page swept. + context.SetSwept(pageIndex); + + return updateState; +} + +// RosAllocImpl +// the global mutator is a proxy for non-local allocation +// this mutator doesn't need to be initialised or finalised, we only need its lists/runs +ROSAllocMutator RosAllocImpl::globalMutator; + +#if ALLOC_ENABLE_LOCK_CONTENTION_STATS +uint32_t RosAllocImpl::pageLockContentionRec = 0; +uint64_t RosAllocImpl::pageLockWaitTimeRec = 0U; +#endif + +FastAllocData FastAllocData::data; + +FragmentationRecord fragRec; + +RosAllocImpl::RosAllocImpl() + : Allocator(), + pageMap(), + allocSpace("ROS memory space", "maple_alloc_ros", true, pageMap) { // managed space + LOG2FILE(kLogTypeAllocator) << "Initializing RosAllocImpl" << std::endl; + InitRunConfigMap(); + + // initialize run array to be full + for (int i = 0; i < kNumberROSRuns; ++i) { + size_t maxBytes = ALLOCUTIL_PAGE_CNT2BYTE(ROSIMPL_N_PAGES_PER_RUN(i)) - + RunSlots::GetContentOffset(); + RunSlots::maxSlots[i] = maxBytes / ROSIMPL_RUN_SIZE(i); + } + LOG2FILE(kLogTypeAllocator) << "Finished Initializing RosAllocImpl" << std::endl; +} + +void RosAllocImpl::Init(const VMHeapParam &vmHeapParam) { + AllocUtilRand rand(0, std::numeric_limits::max()); + kRunMagic = static_cast(rand.next()); + Space::SetHeapStartSize(vmHeapParam.heapStartSize); + Space::SetHeapSize(vmHeapParam.heapSize); + // We should use this one in future + Space::SetHeapGrowthLimit(vmHeapParam.heapGrowthLimit); + // this overwrites the previous Set(), because currently we don't + // distinguish large-heap processes from small-heap processes + Space::SetHeapGrowthLimit(vmHeapParam.heapSize); + Space::SetHeapMinFree(vmHeapParam.heapMinFree); + Space::SetHeapMaxFree(vmHeapParam.heapMaxFree); + Space::SetHeapTargetUtilization(vmHeapParam.heapTargetUtilization); + Space::SetIgnoreMaxFootprint(vmHeapParam.ignoreMaxFootprint); + allocSpace.Init(); + pageMap.Init(reinterpret_cast(allocSpace.GetBegin()), + allocSpace.GetMaxCapacity(), allocSpace.GetSize()); + if (VLOG_IS_ON(jsanlite)) { + JsanliteInit(); + } +} + +RosAllocImpl::~RosAllocImpl() { + LOG2FILE(kLogTypeAllocator) << "Destructing RosAllocImpl" << std::endl; +} + +address_t RosAllocImpl::AllocPagesInternal(size_t reqSize, size_t &actualSize, int forceLevel) { + actualSize = ALLOCUTIL_PAGE_RND_UP(reqSize); + // we allow extension, assuming that concurrent gc has done + // everything it can to reduce footprint + size_t oldSize = allocSpace.GetSizeRelaxed(); + address_t retAddress = allocSpace.Alloc(actualSize, true); + // update page map if the heap was extended + size_t newSize = allocSpace.GetSizeRelaxed(); + if (UNLIKELY(newSize > oldSize)) { + pageMap.UpdateSize(newSize); + // on successful extension, check the heap growth against the threshold + // and trigger non-blocking concurrent gc if condition met + // don't trigger if forceLevel too high (indicating other gc must have been done) + if (forceLevel < static_cast(kEagerLevelExtend)) { + size_t threshold = stats::gcStats->CurGCThreshold(); + size_t allocated = AllocatedMemory(); + if (allocated >= threshold) { + // we are holding the global lock, and the object hasn't been properly + // initialised, this is considered unsafe + Collector::Instance().InvokeGC(kGCReasonHeu, true); + } + } + } + + return retAddress; +} + +address_t RosAllocImpl::NewObj(size_t size) { + size_t allocSize = AllocUtilRndUp((size + ROSIMPL_HEADER_ALLOC_SIZE + JsanliteGetPayloadSize(size)), + kAllocAlign); + address_t allocAddr = AllocInternal(allocSize); + if (UNLIKELY(allocAddr == 0)) { + return allocAddr; + } + address_t resultAddr = ROSIMPL_GET_OBJ_FROM_ADDR(allocAddr); + + // Free slots in RoS begin with a pointer, which lowest 3 bits are always 0. + // By setting one of them 1 (currently the lowest bit), we can tell if a slot + // has been allocated. + // This bit is automatically cleared when a slot is created/freed. + // For large objects, we also set this bit, but we use + // other ways to tell if it is allocated by us (page type). + InitWithAllocatedBit(resultAddr); + + // When concurrent mark is running, we need to set the newly + // allocated object as marked to prevent it be swept by GC. + Collector::Instance().PostNewObject(resultAddr); + +#if ALLOC_USE_FAST_PATH + if (allocSize > kLargeObjSize) { + // this is a bit ugly but in fast-alloc mode, small alloc's stats is managed + // differently, we only need to account for the non-local allocs here + FAST_ALLOC_ACCOUNT_ADD(allocSize); + } +#else + PostObjAlloc(resultAddr, size, allocSize); +#endif + ROSIMPL_ASSERT(!IsMygotePageAlloc(reinterpret_cast(resultAddr)), "do not allocat in mygote page"); + return resultAddr; +} + +void RosAllocImpl::FreeObj(address_t objAddr) { + __MRT_ASSERT(!IsMygotePageAlloc(reinterpret_cast(objAddr)), "do not free object in mygote page"); +#if !CONFIG_JSAN + __MRT_ASSERT(Collector::Instance().Type() == kNaiveRC, "unexpected type"); +#endif +#if LOG_ALLOC_TIMESTAT + TheAllocMutator *mut = TLAllocMutatorPtr(); + if (mut != nullptr && mut->DoFreeObjTimeStat()) { + mut->StartTimer(); + } +#endif + // release monitor + maplert::Allocator::ReleaseResource(objAddr); + + size_t objSize = PreObjFree(objAddr); + size_t freedBytes = 0U; + JsanliteFree(objAddr); + JSAN_FREE(objAddr, FreeInternal, freedBytes); + if (freedBytes) { + PostObjFree(objAddr, objSize, freedBytes); + } +#if LOG_ALLOC_TIMESTAT + int typeInd = kTimeFreeGlobal; + if (mut != nullptr && mut->DoFreeObjTimeStat()) { + if (ROSIMPL_IS_LOCAL_RUN_SIZE(freedBytes)) { + typeInd = kTimeFreeLocal; + } else if (freedBytes > kLargeObjSize) { + typeInd = kTimeFreeLarge; + } + mut->StopTimer(typeInd); + } +#endif +} + +class FreeTask : public MplTask { + public: + FreeTask(RosAllocImpl &allocatorVal, PageMap &pageMapVal, size_t beginVal, + size_t endVal, const function &shouldFreeVal) + : allocator(allocatorVal), pageMap(pageMapVal), begin(beginVal), end(endVal), shouldFree(shouldFreeVal) {} + virtual ~FreeTask() {} + void Execute(size_t workerID __attribute__((unused))) override { + PageLabel pageType = kPReleased; + for (size_t pageIndex = begin; pageIndex < end; ++pageIndex) { + pageType = pageMap.GetType(pageIndex); + if (LIKELY(pageType == kPRun)) { + address_t runAddr = pageMap.GetPageAddr(pageIndex); + RunSlots *runSlots = reinterpret_cast(runAddr); + ROSIMPL_ASSERT(runSlots->HasInit(), "run not initialised"); + ROSIMPL_DEBUG(allocator.CheckRunMagic(*runSlots)); + ROSIMPL_ASSERT(runSlots->mIdx < RosAllocImpl::kNumberROSRuns, + "runSlots returned has wrong index"); + if (runSlots->IsLocal()) { + allocator.SweepLocalRun(*runSlots, shouldFree); + } else { + allocator.SweepRun(*runSlots, shouldFree); + } + } else if (UNLIKELY(pageType == kPLargeObj)) { + address_t memAddr = pageMap.GetPageAddr(pageIndex); + address_t lrgAddr = ROSIMPL_GET_OBJ_FROM_ADDR(memAddr); + if (shouldFree(lrgAddr)) { + maplert::Allocator::ReleaseResource(lrgAddr); + size_t objSize = allocator.PreObjFree(lrgAddr); + size_t freedBytes = 0U; +#if !ROSIMPL_MEMSET_AT_FREE + TagGCFreeObject(lrgAddr); +#endif + { + // FreeLargeObj will change page map, so we guard it with lock. + allocator.FreeLargeObj(lrgAddr, freedBytes); + } + allocator.PostObjFree(lrgAddr, objSize, freedBytes); + } + } + } + } + + private: + RosAllocImpl &allocator; + PageMap &pageMap; + size_t begin; + size_t end; + function shouldFree; +}; + +bool RosAllocImpl::ParallelFreeAllIf(MplThreadPool &threadPool, const function &shouldFree) { + ROSIMPL_ASSERT(WorldStopped(), "Invalid invoke"); + + const int32_t threadCount = threadPool.GetMaxThreadNum() + 1; + const size_t lastPageIndex = pageMap.GetPageIndex(GetSpaceEnd()); + const size_t chunkSize = std::min(lastPageIndex / static_cast(threadCount) + 1, kMaxPagesPerTask); + for (size_t pageIndex = 0; pageIndex < lastPageIndex;) { + const size_t delta = std::min(lastPageIndex - pageIndex, chunkSize); + threadPool.AddTask(new FreeTask(*this, pageMap, pageIndex, pageIndex + delta, shouldFree)); + pageIndex += delta; + } + threadPool.SetMaxActiveThreadNum(threadCount - 1); + threadPool.Start(); + threadPool.WaitFinish(true); + return true; +} + +class ForEachTask : public MplTask { + public: + struct Stats { + size_t pagesVisited = 0; + size_t pagesSkipped = 0; + size_t totalFinalizable = 0; + }; + ForEachTask(RosAllocImpl &allocatorVal, PageMap &pageMapVal, size_t beginVal, size_t endVal, + const function &visitorVal, OnlyVisit onlyVisitVal, + const function &onFinishVal) + : allocator(allocatorVal), + pageMap(pageMapVal), + begin(beginVal), + end(endVal), + visitor(visitorVal), + onlyVisit(onlyVisitVal), + onFinish(onFinishVal) {} + + ~ForEachTask() { + onFinish(stats); + } + + void Execute(size_t workerID __attribute__((unused))) override { + PageLabel pageType = kPReleased; + for (size_t pageIndex = begin; pageIndex < end; ++pageIndex) { + pageType = pageMap.GetType(pageIndex); + if (LIKELY(pageType == kPRun || pageType == kPMygoteRun)) { + address_t runAddr = pageMap.GetPageAddr(pageIndex); + size_t cnt = pageMap.RunPageCount(runAddr); + RunSlots *runSlots = reinterpret_cast(runAddr); + ROSIMPL_ASSERT(runSlots->HasInit(), "run not initialised"); + ROSIMPL_ASSERT(cnt > 0, "incorrect run page count"); + if (ShouldSkipThisRun(pageIndex)) { + stats.pagesSkipped += cnt; + pageIndex += cnt - 1; // skip all the kPRunRem + continue; + } + ROSIMPL_DEBUG(allocator.CheckRunMagic(*runSlots)); + ROSIMPL_ASSERT(runSlots->mIdx < RosAllocImpl::kNumberROSRuns, + "runSlots returned has wrong index"); + // Limit the number of object to be visited. + size_t hint = numeric_limits::max(); + if (onlyVisit == OnlyVisit::kVisitFinalizable) { + hint = pageMap.NumOfFinalizableObjectsInRun(pageIndex); + } + allocator.ForEachObjInRun(*runSlots, visitor, onlyVisit, hint); + stats.pagesVisited += cnt; + pageIndex += cnt - 1; // skip all the kPRunRem + } else if (UNLIKELY(pageType == kPLargeObj || pageType == kPMygoteLargeObj)) { + if (ShouldSkipThisPage(pageIndex)) { + continue; + } + address_t memAddr = pageMap.GetPageAddr(pageIndex); + address_t lrgAddr = ROSIMPL_GET_OBJ_FROM_ADDR(memAddr); + visitor(lrgAddr); + } + } + } + + private: + inline bool ShouldSkipThisPage(size_t pageIndex) { + if (onlyVisit == OnlyVisit::kVisitFinalizable) { + if (pageMap.PageHasFinalizableObject(pageIndex)) { + stats.totalFinalizable += pageMap.NumOfFinalizableObjectsInPage(pageIndex); + ++stats.pagesVisited; + return false; + } else { + ++stats.pagesSkipped; + return true; + } + } else { + ++stats.pagesVisited; + return false; + } + } + inline bool ShouldSkipThisRun(size_t pageIndex) { + if (onlyVisit == OnlyVisit::kVisitFinalizable) { + size_t n = pageMap.NumOfFinalizableObjectsInRun(pageIndex); + if (n > 0) { + stats.totalFinalizable += n; + return false; + } else { + return true; + } + } else { + return false; + } + } + + RosAllocImpl &allocator; + PageMap &pageMap; + size_t begin; + size_t end; + function visitor; + OnlyVisit onlyVisit; + Stats stats; + function onFinish; +}; + +bool RosAllocImpl::ParallelForEachObj(MplThreadPool &threadPool, VisitorFactory visitorFactory, + OnlyVisit onlyVisit) { + ROSIMPL_ASSERT(WorldStopped(), "ParallelForEachObj can only be invoked while world stopped"); + +#if CONFIG_JSAN + auto originalVisitorFactory = visitorFactory; + visitorFactory = [&originalVisitorFactory]() { + auto originalVisitor = originalVisitorFactory(); + auto visitor = [originalVisitor](address_t obj) { // NOTE: intentionally capture by value + if (JSANGetObjStatus(obj) != kObjStatusQuarantined) { + originalVisitor(obj); + } + }; + // NOTE: The variable originalVisitor goes out of scope here. + return visitor; + }; +#endif + + mutex statsMutex; + ForEachTask::Stats overallStats; + + function onFinish = [&statsMutex, &overallStats]( + const ForEachTask::Stats &taskStats) { + lock_guard lg(statsMutex); + overallStats.pagesVisited += taskStats.pagesVisited; + overallStats.pagesSkipped += taskStats.pagesSkipped; + overallStats.totalFinalizable += taskStats.totalFinalizable; + }; + + const int32_t threadCount = threadPool.GetMaxThreadNum() + 1; + const size_t lastPageIndex = pageMap.GetPageIndex(GetSpaceEnd()); + const size_t chunkSize = std::min(lastPageIndex / static_cast(threadCount) + 1, kMaxPagesPerTask); + for (size_t pageIndex = 0; pageIndex < lastPageIndex;) { + const size_t delta = std::min(lastPageIndex - pageIndex, chunkSize); + threadPool.AddTask(new ForEachTask(*this, pageMap, pageIndex, pageIndex + delta, + visitorFactory(), onlyVisit, onFinish)); + pageIndex += delta; + } + threadPool.SetMaxActiveThreadNum(threadCount - 1); + threadPool.Start(); + threadPool.WaitFinish(true); + + ostringstream ost; + ost << "ParallelForEachObj:\n"; + ost << " Visited pages: " << overallStats.pagesVisited << '\n'; + ost << " Skipped pages: " << overallStats.pagesSkipped << '\n'; + if (onlyVisit == OnlyVisit::kVisitFinalizable) { + ost << " Total finalizable objs (from counter): " << overallStats.totalFinalizable << '\n'; + } + LOG2FILE(kLogtypeGc) << ost.str() << std::endl; + + return true; +} + +void RosAllocImpl::ForEachObjUnsafe(const function &visitor, OnlyVisit onlyVisit) { + PageLabel pageType = kPReleased; + size_t endIndex = pageMap.GetPageIndex(GetSpaceEnd()); + for (size_t index = 0; index < endIndex; ++index) { + // ConcurrentPrepareResurrection bug. This synchronises with SetTypeRelease + // when allocating large objs. This ensures that when the heap scan finds a + // large obj page, it must look like an unallocated page at first (allocated bit is 0). + // If memset is done earlier, then there might not be a problem at all, + // assuming reorders don't pass through global locks. + // Caution, this atomic op is very expensive according to flame graphs. +#if ROSIMPL_MEMSET_AT_FREE + pageType = pageMap.GetType(index); +#else + pageType = pageMap.GetTypeAcquire(index); +#endif + if (LIKELY(pageType == kPRun || pageType == kPMygoteRun)) { + address_t runAddr = pageMap.GetPageAddr(index); + RunSlots &run = *reinterpret_cast(runAddr); + + if (onlyVisit == OnlyVisit::kVisitFinalizable && + pageMap.NumOfFinalizableObjectsInRun(index) == 0) { + continue; + } + + // unsafe mode, the finalizable count is inaccurate + ForEachObjInRunUnsafe(run, visitor, onlyVisit, std::numeric_limits::max()); + } else if (pageType == kPLargeObj || pageType == kPMygoteLargeObj) { + address_t pageAddr = pageMap.GetPageAddr(index); + address_t largeObjAddr = ROSIMPL_GET_OBJ_FROM_ADDR(pageAddr); + visitor(largeObjAddr); + } + } +} + +bool RosAllocImpl::ForEachObj(const function &visitor, bool debug) { + return ForPartialRunsObj(visitor, []() { return 1; }, debug); +} +// Sample heaps, used in cycle pattern learning +// Enumerate a subset of pages +bool RosAllocImpl::ForPartialRunsObj(function visitor, + const function &stepFunc, bool debug) { + ROSIMPL_ASSERT(WorldStopped(), "_ForEachObj can only be invoked while world stopped"); + +#if CONFIG_JSAN + auto orginalVisitor = visitor; + visitor = [&orginalVisitor](address_t obj) { + if (JSANGetObjStatus(obj) != kObjStatusQuarantined) { + orginalVisitor(obj); + } + }; +#endif + + PageLabel pageType = kPReleased; + size_t pageIndex = 0; + if (UNLIKELY(debug && + (pageMap.GetBeginAddr() != HeapStats::StartAddr() || + pageMap.GetEndAddr() != HeapStats::StartAddr() + HeapStats::CurrentSize()))) { + return false; + } + size_t endIndex = pageMap.GetPageIndex(GetSpaceEnd()); + while (pageIndex < endIndex) { + pageType = pageMap.GetType(pageIndex); + if (UNLIKELY(debug && + (pageMap.GetBeginAddr() != HeapStats::StartAddr() || + pageMap.GetEndAddr() != HeapStats::StartAddr() + HeapStats::CurrentSize()))) { + return false; + } + address_t pageAddr = pageMap.GetPageAddr(pageIndex); + if (LIKELY(pageType == kPRun || pageType == kPMygoteRun)) { + RunSlots *runSlots = reinterpret_cast(pageAddr); + ROSIMPL_ASSERT(runSlots->HasInit(), "run not initialised"); + ROSIMPL_DEBUG(CheckRunMagic(*runSlots)); + ROSIMPL_ASSERT(runSlots->mIdx < kNumberROSRuns, + "runSlots returned has wrong index"); + ForEachObjInRun(*runSlots, visitor); + } else if (UNLIKELY(pageType == kPLargeObj || pageType == kPMygoteLargeObj)) { + address_t lrgAddr = ROSIMPL_GET_OBJ_FROM_ADDR(pageAddr); + visitor(lrgAddr); + } + pageIndex += stepFunc(); + } + return true; +} + +// AccurateIsValidObjAddr and AccurateIsValidObjAddrConcurrent are used in +// conservative stack scan to identify valid obj addresses from random numbers +// +// in other times, we can theoretically assume non-heap objs do not share the +// same address range with heap objs, so we can just use a range-based check +// to distinguish them (FastIsValidObjAddr) +// +// check if an address is of an valid obj, only used in stw (parallel gc) +bool RosAllocImpl::AccurateIsValidObjAddr(address_t addr) { + ROSIMPL_ASSERT(WorldStopped(), "AccurateIsValidObjAddr invoked at non-STW"); + if (!FastIsValidObjAddr(addr)) { + return false; + } + return AccurateIsValidObjAddrUnsafe(addr); +} + +// check if an address is of an valid obj, used during concurrent marking (concurrent gc) +bool RosAllocImpl::AccurateIsValidObjAddrConcurrent(address_t addr) { + ROSIMPL_ASSERT(!WorldStopped(), "AccurateIsValidObjAddrConcurrent invoked at STW," + "please use the STW version instead"); + if (!FastIsValidObjAddr(addr)) { + return false; + } + ALLOC_LOCK_TYPE guard(ALLOC_CURRENT_THREAD globalLock); + return AccurateIsValidObjAddrUnsafe(addr); +} + +address_t RosAllocImpl::HeapLowerBound() const { + return reinterpret_cast(allocSpace.GetBegin()); +} + +address_t RosAllocImpl::HeapUpperBound() const { + return reinterpret_cast(allocSpace.GetEnd()); +} + +// this is called by mutator before free a large object +// when concurrent sweep is running. +void RosAllocImpl::SweepLargeObj(address_t objAddr) { + address_t pageAddr = ROSIMPL_GET_ADDR_FROM_OBJ(objAddr); + const size_t pageIndex = pageMap.GetPageIndex(pageAddr); + PageLabel pageType = kPMax; + + // try to set page to sweeping state. + if (!sweepContext.SetSweeping(pageIndex, pageType)) { + // page already swept. + return; + } + + __MRT_ASSERT(pageType == kPLargeObj, "incorrect large object page type"); + __MRT_ASSERT(!Collector::Instance().IsGarbage(objAddr), "mutator free a dead large object"); + + // set page to swept state. + sweepContext.SetSwept(pageIndex); +} + +void RosAllocImpl::FreeLargeObj(address_t objAddr, size_t &internalSize, bool delayFree) { + address_t memAddr = ROSIMPL_GET_ADDR_FROM_OBJ(objAddr); + ROSIMPL_ASSERT((memAddr & 0xfff) == 0, "big obj addr is not page aligned"); + size_t pageCnt = pageMap.ClearLargeObjPageAndCount(memAddr, false); + size_t totalObjSize = ALLOCUTIL_PAGE_CNT2BYTE(pageCnt); + CheckDoubleFree(objAddr); + + // ensure header is cleared in 8 bytes + *reinterpret_cast(memAddr) = 0; + +#if ROSIMPL_MEMSET_AT_FREE + ROSALLOC_MEMSET_S(memAddr, totalObjSize, 0, totalObjSize); +#endif + if (!delayFree) { + ALLOC_LOCK_TYPE guard(ALLOC_CURRENT_THREAD globalLock); + allocSpace.FreeRegion(memAddr, pageCnt); + } + internalSize += totalObjSize; +} + +address_t RosAllocImpl::AllocLargeObject(size_t &allocSize, int forceLevel) { + size_t actualSize = 0U; + address_t objAddr = AllocPagesInternal(allocSize, actualSize, forceLevel); + if (LIKELY(objAddr != 0U)) { + allocSize = actualSize; + size_t pgCnt = ALLOCUTIL_PAGE_BYTE2CNT(actualSize); + size_t converted = pageMap.SetAsLargeObjPage(objAddr, pgCnt); + // record the number of pages fetched from the kernel (previously released) + allocSpace.RecordReleasedToNonReleased(converted); + } + return objAddr; +} + +void RosAllocImpl::FreeRun(RunSlots &runSlots, bool delayFree) { + size_t pgCnt = static_cast(GetPagesPerRun(runSlots.mIdx)); + size_t totalRunSize = ALLOCUTIL_PAGE_CNT2BYTE(pgCnt); + address_t memAddr = reinterpret_cast(&runSlots); + +#if ALLOC_ENABLE_LOCK_CONTENTION_STATS + // this run is going to be deleted; we retrieve its lock stats first + RosAllocImpl::pageLockContentionRec += runSlots.lock.GetContentionCount(); + RosAllocImpl::pageLockWaitTimeRec += runSlots.lock.GetWaitTime(); +#endif +#if ROSIMPL_MEMSET_AT_FREE + // runSlots is created by placement new, we should explicity call + // destructor to ensure resources (such as mutex) are properly released. + runSlots.~RunSlots(); + ROSALLOC_MEMSET_S(memAddr, totalRunSize, 0, totalRunSize); +#else + constexpr size_t runHeaderSize = RunSlots::GetHeaderSize(); + // runSlots is created by placement new, we should explicity call + // destructor to ensure resources (such as mutex) are properly released. + runSlots.~RunSlots(); + ROSALLOC_MEMSET_S(memAddr, runHeaderSize, 0, runHeaderSize); +#endif + (void)totalRunSize; + // page map need not be cleared in lock: + // this assumes that all unsafe heap visit is during concurrent marking, + // where there can be no freeing of anything + pageMap.ClearRunPage(memAddr, pgCnt, false); + if (!delayFree) { + ALLOC_LOCK_TYPE guard(ALLOC_CURRENT_THREAD globalLock); + allocSpace.FreeRegion(memAddr, pgCnt); + } +} + +void RosAllocImpl::HandleAllocFailure(size_t, int& forceLevel) { + forceLevel += 1; + if (forceLevel == static_cast(kEagerLevelExtend)) { + Collector::Instance().InvokeGC(kGCReasonHeuBlocking); + return; + } + stats::gcStats->OnAllocAnomaly(); // already tried extending addr space so update stats + if (static_cast(kEagerLevelOOM) == forceLevel) { + Collector::Instance().InvokeGC(kGCReasonOOM); + // should revoke local run here + return; + } +} + +void RosAllocImpl::DumpStackBeforeOOM(size_t allocSize) { + // DFX utitlies, if OOM happen and allocate size is large than 10M + // print stack + constexpr size_t largeObjectSize = 10 * 1024 * 1024; + if (allocSize > largeObjectSize) { + LOG(ERROR) << "large object allocation fail before OOM" << maple::endl; + MplDumpStack("ALLOCATOR_OMM"); + } +} + +#if __MRT_DEBUG +void RosAllocImpl::GetMemoryInfoBeforeOOM(size_t allocSize, size_t newLargestChunk){ + // current fail alloc size + LOG(ERROR) << "alloc size : " << allocSize << maple::endl; + // heap size + uint64_t heapSize = GetCurrentSpaceCapacity(); + LOG(ERROR) << "heap size : " << heapSize << maple::endl; + // actual size of heap pages backed by physical memory + size_t actualHeapSize = GetActualSize(); + LOG(ERROR) << "actualHeapSize : " << actualHeapSize << maple::endl; + // total survived + LOG(ERROR) << "total bytes survived: " << AllocatedMemory() << maple::endl; + // largest chunk + LOG(ERROR) << "largest chunk : " << newLargestChunk << maple::endl; + // reference-collector queues + MRT_logRefqueuesSize(); +} +#else +void RosAllocImpl::GetMemoryInfoBeforeOOM(size_t, size_t) {} +#endif + +void RosAllocImpl::ForEachObjInRun(RunSlots &runSlots, + function visitor, + OnlyVisit onlyVisit, + size_t numHint) const { + ROSIMPL_ASSERT(WorldStopped(), "ForEachObjInRun must be invoked in STW"); + ForEachObjInRunUnsafe(runSlots, visitor, onlyVisit, numHint); +} + +void RosAllocImpl::VisitGCRoots(const RefVisitor &visitor) { + visitor(reinterpret_cast(oome)); +} + +// release the physical memory of free pages, using madvise() +bool RosAllocImpl::ReleaseFreePages(bool aggressive) { + { + ALLOC_LOCK_TYPE guard(ALLOC_CURRENT_THREAD globalLock); + size_t releasedBytes = allocSpace.ReleaseFreePages(aggressive); + if (releasedBytes == 0U) { + return aggressive ? true : false; + } + } + return true; +} + +void RosAllocImpl::OutOfMemory(bool isJNI) { + RosBasedMutator &mutator = reinterpret_cast(TLAllocMutator()); + if (mutator.throwingOOME) { + // this thread is already in the state of throwing OOME + // this means this is a nested OOM call from another OOM call (which news stuff) + // when this happens, throw the OOME object prepared earlier to end the recursion + // this object will contain less information about this incident though +#if UNIT_TEST + LOG(FATAL) << "out of memory"; + __MRT_Panic(); +#endif + __MRT_ASSERT(oome != nullptr, "OOME object null"); + RC_LOCAL_INC_REF(oome); + if (isJNI) { + MRT_ThrowExceptionSafe(oome->AsJobject()); + return; + } else { + ThrowExceptionUnw(oome); + __builtin_unreachable(); + } + } + mutator.throwingOOME = true; + LOG(ERROR) << "The heap is out of space. Allocator failed to allocate memory for objects." << maple::endl; + if (isJNI) { + MRT_ThrowNewException("java/lang/OutOfMemoryError", nullptr); + } else { + MRT_ThrowNewExceptionUnw("java/lang/OutOfMemoryError", nullptr); + } + mutator.throwingOOME = false; +} + +void RosAllocImpl::ForEachMutator(std::function visitor) { + ALLOC_LOCK_TYPE guard(ALLOC_CURRENT_THREAD globalLock); + for (auto mutator : allocatorMutators) { + visitor(*mutator); + } +} + +void RosAllocImpl::GetInstances(const MClass *klass, bool includeAssignable, + size_t maxCount, vector &instances) { + (void)ForEachObj([&klass, &includeAssignable, &maxCount, &instances](address_t obj) { + if (maxCount == 0 || (maxCount > instances.size())) { + MObject *currObj = MObject::Cast(obj); + if (currObj->IsInstanceOf(*klass)) { + instances.push_back(currObj->AsJobject()); + } else if (includeAssignable) { + if (klass->IsAssignableFrom(*currObj->GetClass())) { + instances.push_back(currObj->AsJobject()); + } + } + } + }); +} + +void RosAllocImpl::ClassInstanceNum(map &objNameCntMp) { + bool tmpResult = ForEachObj([&objNameCntMp](address_t obj) { + MClass *cl = reinterpret_cast(obj)->GetClass(); + string name = cl->GetName(); + ++objNameCntMp[name]; + }); + if (UNLIKELY(!tmpResult)) { + LOG(ERROR) << "ForEachObj(true) in RosAllocImpl::ClassInstanceNum() return false." << maple::endl; + } +} + +// This function requires stw or global lock. +// When using global lock, this can be inaccurate, but it's fine because it's debug only. +void RosAllocImpl::RecordFragment() { + RunSlots *run = nullptr; + address_t pageAddr; + size_t freeSlots = 0; + fragRec.Reset(); + fragRec.RecordInternalFrag(AllocatedMemory(), RequestedMemory()); + + address_t allocSpaceEndAddr = reinterpret_cast(allocSpace.GetEnd()); + size_t endIndex = pageMap.GetPageIndex(allocSpaceEndAddr); + // walk the heap to account for external fragmentation + for (size_t i = 0; i < endIndex; ++i) { + pageAddr = pageMap.GetPageAddr(i); + PageLabel ptype = pageMap.GetType(i); + if (ptype == kPFree) { + fragRec.IncFreePages(ALLOCUTIL_PAGE_SIZE); + } else if (ptype == kPRun) { + run = reinterpret_cast(pageAddr); + bool isFree = false; + size_t runMaxNSlots = run->GetMaxSlots(); + size_t slotSize = run->GetRunSize(); + if (!run->IsFull()) { + // add the free slots to the fragmentations + freeSlots = run->nFree; + fragRec.IncFreeSlots(slotSize * freeSlots); + if (freeSlots == runMaxNSlots) { + isFree = true; + } + } + size_t bytesPerRun = ALLOCUTIL_PAGE_CNT2BYTE(GetPagesPerRun(run->mIdx)); + // add the header of the run and the trailing fragmen + fragRec.IncRunOverhead(bytesPerRun - (slotSize * runMaxNSlots)); + fragRec.IncSlots(slotSize * runMaxNSlots); + fragRec.IncRun(bytesPerRun, isFree, run->IsLocal()); + } + if (ptype != kPReleased) { + fragRec.IncPages(static_cast(ALLOCUTIL_PAGE_SIZE)); + } + } +} + +// This function should only be invoked in stw or global lock. +// It can be inaccurate in global, but it's fine because it's debug only. +void RosAllocImpl::PrintPageFragment(std::basic_ostream &os, std::string tag) { + uint32_t nRuns = 0; // number of runs, caches excluded + uint16_t nRunsFull[kNumberROSRuns]; // number of full runs + uint16_t nRunsNonfull[kNumberROSRuns]; // number of partial full runs + uint16_t nRunsCached [kNumberROSRuns]; // number of cached runs (all free) + uint32_t sumNonfullRunsUsedslots[kNumberROSRuns]; + uint32_t sumNonfullRunsFreeslots[kNumberROSRuns]; + uint32_t largeobjs = 0; + uint32_t largeobjsPages = 0; + uint32_t pagesFreed = 0; + uint32_t pagesReleased = 0; + uint32_t pagesFull = 0; + uint32_t pagesNonfull = 0; + uint32_t pagesCached = 0; + + uint32_t sumRunsObjInternalBytesUsed = 0; + uint32_t sumRunsObjInternalBytesFree = 0; + uint32_t sumRunsOverheadBytes = 0; // run header + end of page remainder gap + + (void)memset_s(&nRunsFull[0], sizeof(nRunsFull), 0, sizeof(nRunsFull)); + (void)memset_s(&nRunsNonfull[0], sizeof(nRunsNonfull), 0, sizeof(nRunsNonfull)); + (void)memset_s(&nRunsCached[0], sizeof(nRunsCached), 0, sizeof(nRunsCached)); + (void)memset_s(&sumNonfullRunsUsedslots[0], sizeof(sumNonfullRunsUsedslots), 0, + sizeof(sumNonfullRunsUsedslots)); + (void)memset_s(&sumNonfullRunsFreeslots[0], sizeof(sumNonfullRunsFreeslots), 0, + sizeof(sumNonfullRunsFreeslots)); + + std::string date = timeutils::GetDigitDate(); + os << "Allocator Fragmentation Log Start - " << date << std::endl; + RecordFragment(); + fragRec.Print(os, tag); + + os << "Non-released page size [" << tag << "]: " << allocSpace.GetNonReleasedPageSize() << "\n"; + os << "Current heap size [" << tag << "]: " << allocSpace.GetSize() << "\n"; + + os << "Page fragment [" << tag << "]: "; + for (address_t i = reinterpret_cast(allocSpace.GetBegin()); + i < reinterpret_cast(allocSpace.GetEnd());) { + size_t pageIdx = pageMap.GetPageIndex(i); + PageLabel ptype = pageMap.GetType(pageIdx); + switch (ptype) { + case kPReleased: + ++pagesReleased; + os << " " << int(ptype); + break; + case kPFree: + ++pagesFreed; + os << " " << int(ptype); + break; + case kPLargeObj: + ++largeobjs; + FALLTHROUGH_INTENDED; + case kPLargeObjRem: + ++largeobjsPages; + os << " " << int(kPLargeObj); + break; + case kPRun: { + RunSlots *run = reinterpret_cast(i); + os << " " << int(ptype) << " [" << run->GetRunSize() << "]("; + size_t freeSlots = run->nFree; + os << (run->GetRunSize() * freeSlots) << "/" << (run->GetRunSize() * run->GetMaxSlots()) << ")"; + + int idx = run->mIdx; + if (freeSlots != 0) { + if (freeSlots == run->GetMaxSlots()) { + // completely free runs are either kept as cached runs or freed + ++nRunsCached[idx]; + } else { + ++nRunsNonfull[idx]; + sumNonfullRunsFreeslots[idx] += freeSlots; + sumNonfullRunsUsedslots[idx] += run->GetMaxSlots()-freeSlots; + } + } else { + nRunsFull[idx] += 1; + } + break; + } + case kPRunRem: + os << " " << int(kPRun); + break; + default: + LOG(ERROR) << "Unknown page type for print" << maple::endl; + break; + } + i += ALLOCUTIL_PAGE_SIZE; + } + os << "\n"; + + // Additional logging + os << "Extended Logging Start\n"; + os << "Bracket|Full |NonFull|Full Runs |NonFull Runs |NonFullRuns |Partial+Full Runs |Cached\n"; + os << "Size |Runs |Runs |Used SlotBytes|Used SlotBytes|Avail SlotBytes|TotalOverheadBytes|Free Runs\n"; + + for (unsigned int i = 0; i < kNumberROSRuns; ++i) { + size_t bracketSize = GetRunSize(i); + size_t slotCapacity = RunSlots::maxSlots[i]; + uint32_t pagesPerRun = GetPagesPerRun(i); + size_t runOverheadBytes = ALLOCUTIL_PAGE_CNT2BYTE(pagesPerRun) - bracketSize * slotCapacity; + + os << "[" << setw(4) << bracketSize << "]" << + setw(5) << nRunsFull[i] << " " << + setw(5) << nRunsNonfull[i] << " " << + setw(15) << (bracketSize * nRunsFull[i] * slotCapacity) << " " << + setw(15) << (bracketSize * sumNonfullRunsUsedslots[i]) << " " << + setw(15) << (bracketSize * sumNonfullRunsFreeslots[i]) << " " << + setw(15) << (runOverheadBytes * (nRunsFull[i] + nRunsNonfull[i]) * pagesPerRun) << " " << + setw(10) << nRunsCached[i] << "\n"; + + nRuns += nRunsFull[i] + nRunsNonfull[i]; + pagesFull += nRunsFull[i] * pagesPerRun; + pagesNonfull += nRunsNonfull[i] * pagesPerRun; + pagesCached += nRunsCached[i] * pagesPerRun; + sumRunsObjInternalBytesUsed += bracketSize * + ((slotCapacity * nRunsFull[i]) + sumNonfullRunsUsedslots[i]); + sumRunsObjInternalBytesFree += bracketSize * sumNonfullRunsFreeslots[i]; + sumRunsOverheadBytes += runOverheadBytes * (nRunsFull[i] + nRunsNonfull[i]); + } + uint64_t runPagePadding = sumRunsOverheadBytes - RunSlots::GetHeaderSize() * nRuns; + uint32_t runPages = pagesFull + pagesNonfull + pagesCached; + uint32_t heapPages = runPages + largeobjsPages + pagesFreed; + + uint64_t totalObjs = account.GetNetObjs(); // runObjs+lgeObjs + uint64_t totalObjBytes = account.GetNetObjBytes(); // newobj req java obj bytes + uint64_t totalBytes = account.GetNetBytes(); + uint64_t largeObjBytes = account.GetNetLargeObjBytes(); + uint64_t largeObjWastage = ALLOCUTIL_PAGE_CNT2BYTE(largeobjsPages) - largeObjBytes; + uint64_t internalFragment = totalBytes - totalObjBytes - (totalObjs * ROSIMPL_HEADER_ALLOC_SIZE); + uint64_t externalFragment = sumRunsObjInternalBytesFree + sumRunsOverheadBytes + + ALLOCUTIL_PAGE_CNT2BYTE(pagesFreed + pagesCached); +#define FRAG_PRINT_PAGE(title, pageCount) title << \ + std::setw(10) << ALLOCUTIL_PAGE_CNT2BYTE((pageCount)) << " Bytes / " << \ + std::setw(8) << (pageCount) << " pages" +#define FRAG_PRINT_SIZE(title, size) title << std::setw(10) << (size) << " Bytes" +#define FRAG_PRINT(title, num) title << std::setw(10) << (num) + os << "SUMMARY:\n"; + os << " Allocator Heap Size\n"; + os << "1 mapped (addr space):\n"; + os << FRAG_PRINT_PAGE("2 populated(run+lge+freed):", heapPages) << "\n"; + os << FRAG_PRINT_PAGE("3 pages freed: ", pagesFreed) << "\n"; + os << " Runs and run objects:\n"; + os << FRAG_PRINT_PAGE("4 pages full: ", pagesFull) << "\n"; + os << FRAG_PRINT_PAGE("5 pages partially full: ", pagesNonfull) << "\n"; + os << FRAG_PRINT_PAGE("6 pages cached: ", pagesCached) << "\n"; + os << FRAG_PRINT_SIZE("7 obj internal bytes used: ", sumRunsObjInternalBytesUsed) << "\n"; + os << FRAG_PRINT_SIZE("8 obj internal bytes free: ", sumRunsObjInternalBytesFree) << "\n"; + os << FRAG_PRINT_SIZE("9 Run page overheads: ", sumRunsOverheadBytes) << " (full+paritial full runs)\n"; + os << FRAG_PRINT_SIZE(" Run page end wastages: ", runPagePadding) << " (9)-run headers\n"; + os << " Large Objects:\n"; + os << FRAG_PRINT("10 objs: ", largeobjs) << "\n"; + os << FRAG_PRINT_SIZE("11 obj req bytes: ", largeObjBytes) << "\n"; + os << FRAG_PRINT_PAGE("12 obj internal bytes: ", largeobjsPages) << " (11)+(13)\n"; + os << FRAG_PRINT_SIZE("13 obj page end wastages: ", largeObjWastage) << "\n"; + os << "\n"; + os << FRAG_PRINT("14 Total Objs (incl largeobj):", totalObjs) << "\n"; + os << FRAG_PRINT_SIZE("15 Sum Objs Req Bytes: ", totalObjBytes) << "\n"; + os << FRAG_PRINT_SIZE("16 Sum Objs Internal Bytes: ", totalBytes) << " (7)+(12)\n"; + os << FRAG_PRINT("17 Sum Objs RC Header Bytes: ", account.GetNetObjs() * ROSIMPL_HEADER_ALLOC_SIZE) << "\n"; + os << FRAG_PRINT(" Internal Fragmentation: ", internalFragment) << " (16)-(15)-(17)\n"; + os << FRAG_PRINT(" External Fragmentation: ", externalFragment) << " (8)+(9)+(6)+(3)\n"; + os << "\n"; + os << FRAG_PRINT_SIZE(" CurrentSpaceCapacity: ", GetCurrentSpaceCapacity()) << "\n"; + os << FRAG_PRINT_SIZE(" AllocatedMemory: ", AllocatedMemory()) << " (16)\n"; + os << "Extended Logging End\n"; + os << "Allocator Fragmentation Log End\n"; + os << "\n"; + os.flush(); +} + +void RosAllocImpl::RevokeLocalRuns(RosBasedMutator &mutator) { + for (int i = 0; i < static_cast(kROSAllocLocalRuns); ++i) { + address_t localAddress = mutator.GetLocalAddress(i); + if (UNLIKELY(localAddress == 0)) { + continue; + } + RunSlots &run = *(reinterpret_cast(localAddress)); + if (UNLIKELY(IsConcurrentSweepRunning())) { + bool ROSIMPL_DUNUSED(needRemove) = run.Sweep(*this); + ROSIMPL_ASSERT(needRemove == false, "mutator run swept to empty"); + } + mutator.EnableLocalAfterSweep(i); + RevokeLocalRun(mutator, run); + } +} + +void RosAllocImpl::OnFinalizableObjCreated(address_t addr) { + pageMap.OnFinalizableObjCreated(addr); +} + +void RosAllocImpl::OnFinalizableObjResurrected(address_t addr) { + pageMap.OnFinalizableObjResurrected(addr); +} + +void RosAllocImpl::Debug_DumpFinalizableInfo(ostream& ost) { + size_t endIndex = pageMap.GetPageIndex(GetSpaceEnd()); + ost << "Last page:" << endIndex << "\n"; + pageMap.DumpFinalizableInfo(ost); +} + +// prepare concurrent sweep, run when world stopped. +void RosAllocImpl::PrepareConcurrentSweep() { + // Init sweep context + sweepContext.Init(*this, GetEndPageIndex(), pageMap); + + for (auto mutator : allocatorMutators) { + RosBasedMutator &m = *reinterpret_cast(mutator); + if (!m.useLocal) { + continue; + } + for (int i = 0; i < static_cast(kROSAllocLocalRuns); ++i) { + // temporarily disable fast alloc so that mutator can help sweep + // recover when sweeping done + m.DisableLocalBeforeSweep(i); + } + } + + // set sweep running flag. + SetConcurrentSweepRunning(true); +} + +// sweep from the page at pageIndex: +// if it's a run, sweep the run page(s); if it's a large obj, sweep its page(s). +// return whether the page becomes free, update the swept page count +bool RosAllocImpl::SweepPage(size_t pageIndex, size_t &pageCount) { + SweepContext &context = sweepContext; + + // try to set page to sweeping state. + PageLabel pageType = kPMax; + if (!context.SetSweeping(pageIndex, pageType)) { + // page already swept. + pageCount = 1; + return false; + } + + __MRT_ASSERT(pageType == pageMap.GetType(pageIndex), "page type changed before sweep"); + if (pageType == kPRun) { + // sweep run page. + address_t pageAddr = pageMap.GetPageAddr(pageIndex); + RunSlots *runSlots = reinterpret_cast(pageAddr); + uint8_t idx = runSlots->mIdx; + + // sweep the run. + if (runSlots->DoSweep(*this, context, pageIndex)) { + FreeRun(*runSlots, true); + pageCount = ROSIMPL_N_PAGES_PER_RUN(idx); + return true; + } + } else if (pageType == kPLargeObj) { + // sweep large object. + address_t pageAddr = pageMap.GetPageAddr(pageIndex); + address_t largeObjAddr = ROSIMPL_GET_OBJ_FROM_ADDR(pageAddr); + + // check if the large object dead. + // this should be done before SetSwept(), because the large + // object may be released by other thread after SetSwept(). + bool isDead = Collector::Instance().IsGarbage(largeObjAddr); + + // set large object page to swept state. + // this should be called before release the page, because + // mutator may reuse the released page that swept state not set. + context.SetSwept(pageIndex); + + // release the object if it is dead. + if (isDead) { + // for dead large object, + // dec neighbours before release it. + std::vector deadNeighbours; + Collector::Instance().HandleNeighboursForSweep(largeObjAddr, deadNeighbours); + + // To prevent dead lock, we do not release dead neighbours here, + // but save them to sweep context, so that we can release them later. + if (UNLIKELY(!deadNeighbours.empty())) { + context.AddDeadNeighbours(deadNeighbours); + } + + // release the large object. + maplert::Allocator::ReleaseResource(largeObjAddr); + size_t objSize = PreObjFree(largeObjAddr); + size_t internalSize = 0U; +#if !ROSIMPL_MEMSET_AT_FREE + TagGCFreeObject(largeObjAddr); +#endif + FreeLargeObj(largeObjAddr, internalSize, true); + PostObjFree(largeObjAddr, objSize, internalSize); + + // count released large objects and bytes. + (void)context.releasedLargeObjects.fetch_add(1, std::memory_order_relaxed); + (void)context.releasedBytes.fetch_add(internalSize, std::memory_order_relaxed); + pageCount = ALLOCUTIL_PAGE_BYTE2CNT(internalSize); + return true; + } + } else { + __MRT_ASSERT(pageType == kPMygoteRun || pageType == kPMygoteLargeObj, + "ConcurrentSweep: incorrect page type"); + } + pageCount = 1; + return false; +} + +void RosAllocImpl::SweepPages(size_t pageBegin, size_t pageEnd) { + // the cost of freeing a single region is around the us magnitude + // so freeing 64 regions together will likely not cause frame loss + // also the buffer size is exactly 1 page for 64-bit system + const uint32_t bufferSize = 64; + size_t freeBuffer[bufferSize][2] = {}; // all zero + auto clearBufferFunc = [this, &freeBuffer]() { + ALLOC_LOCK_TYPE guard(ALLOC_CURRENT_THREAD globalLock); + for (uint32_t idx = 0; idx < bufferSize; ++idx) { + if (freeBuffer[idx][1] == 0) { + return; + } + allocSpace.FreeRegion(pageMap.GetPageAddr(freeBuffer[idx][0]), freeBuffer[idx][1]); + freeBuffer[idx][1] = 0; + } + }; + + uint32_t regionCount = 0; + size_t adjacentIdx = pageBegin; + size_t adjacentSize = 0; + for (size_t idx = pageBegin; idx < pageEnd;) { + size_t pageCount = 1; + bool isFreed = SweepPage(idx, pageCount); + if (isFreed) { + if (adjacentSize == 0) { + adjacentIdx = idx; + } + adjacentSize += pageCount; + } else { + if (adjacentSize != 0) { + freeBuffer[regionCount][0] = adjacentIdx; + freeBuffer[regionCount++][1] = adjacentSize; + if (regionCount == bufferSize) { + clearBufferFunc(); + regionCount = 0; + } + adjacentSize = 0; + } + } + idx += pageCount; + } + if (adjacentSize != 0) { + freeBuffer[regionCount][0] = adjacentIdx; + freeBuffer[regionCount][1] = adjacentSize; + } + if (freeBuffer[0][1] != 0) { + clearBufferFunc(); + } +} + +// Note: we assume that heap will not trim when concurrent sweep is running. +void RosAllocImpl::ConcurrentSweep(MplThreadPool *threadPool) { + // sweepContext should be created by PrepareConcurrentSweep(). + ROSIMPL_ASSERT(IsConcurrentSweepRunning(), "sweep flag not set for concurrent sweep"); + + SweepContext& context = sweepContext; + const size_t endPageIndex = context.highestPageIndex; + + // sweep pages. + if (threadPool != nullptr) { + // parallel sweep pages. + MRT_PHASE_TIMER("Parallel Sweep Pages"); + const size_t threadCount = static_cast(threadPool->GetMaxActiveThreadNum()) + 1; + const size_t chunkSize = std::min(endPageIndex / threadCount + 1, kMaxPagesPerTask); + for (size_t pageIndex = 0; pageIndex < endPageIndex;) { + const size_t delta = std::min(endPageIndex - pageIndex, chunkSize); + threadPool->AddTask(new MplLambdaTask([this, pageIndex, delta](size_t) { + SweepPages(pageIndex, pageIndex + delta); + })); + pageIndex += delta; + } + + threadPool->Start(); + threadPool->WaitFinish(true); + } else { + // serial sweep pages. + MRT_PHASE_TIMER("Sweep Pages"); + SweepPages(0, endPageIndex); + } + + // release dead neighbours. + // only rc collector has work to do + { + MRT_PHASE_TIMER("Release dead neighbours"); + for (auto obj : context.deadNeighbours) { + __MRT_ASSERT(IsRCCollectable(obj), "not zero RC in dead_neighbours"); + RCReferenceProcessor::Instance().AddAsyncReleaseObj(obj, false); + } + } + + // unset sweep running flag. + SetConcurrentSweepRunning(false); + + // update gc statistic. + stats::gcStats->CurrentGCRecord().objectsCollected = context.releasedObjects + context.releasedLargeObjects; + stats::gcStats->CurrentGCRecord().bytesCollected = context.releasedBytes; + stats::gcStats->CurrentGCRecord().bytesSurvived = context.oldAllocatedBytes - context.releasedBytes; + + // GC logging. + LOG2FILE(kLogtypeGc) << "End of concurrent sweeping.\n" << + " pages before swept: " << Pretty(context.highestPageIndex) << '\n' << + " bytes before swept: " << Pretty(context.oldAllocatedBytes) << '\n' << + " swept small objects: " << Pretty(context.releasedObjects) << '\n' << + " swept large objects: " << Pretty(context.releasedLargeObjects) << '\n' << + " swept bytes: " << Pretty(context.releasedBytes) << '\n' << + " swept dead neighbours: " << Pretty(context.deadNeighbours.size()) << '\n' << + " swept run pages: " << Pretty(context.sweptRuns) << "/" << Pretty(context.scanedRuns) << '\n' << + " swept to empty runs: " << Pretty(context.emptyRuns) << '\n' << + " swept full runs: " << Pretty(context.nonFullRuns) << '\n'; + sweepContext.Release(); +} + +void RosAllocImpl::OnPreFork() { + if (!hasForked) { + // ignore all rc operations for mygote objs + auto visitor = [](address_t objAddr) { + SetRCOverflow(objAddr); + SetMygoteBit(objAddr); + }; + (*theAllocator).ForEachObjUnsafe(visitor, OnlyVisit::kVisitAll); + + // mark all mygote pages with a special page type + pageMap.SetAllAsMygotePage(); + // release all local run + globalMutator.ResetRuns(); + RosBasedMutator &mutator = static_cast(TLAllocMutator()); + mutator.ResetRuns(); + // release all nonfull run + for (size_t i = 0; i < kNumberROSRuns; ++i) { + nonFullRuns[i].Release(); + } + hasForked = true; + } +} + +template<> +void RosBasedMutator::Init() { + LOG2FILE(kLogTypeAllocator) << "Alloc Mutator " << this << " initializing." << std::endl; + int pid = getpid(); + int tid = maple::GetTid(); + static const int prioThreadAllowed = 3; + if (tid != 0 && tid < pid + prioThreadAllowed) { + // allow local allocation for main threads directly (no better ways to identify them than tid) + useLocal = true; + } + // we leave saferegion here to prevent Init() be interrupted by GC. + ScopedObjectAccess soa; + (*theAllocator).AddAllocMutator(*this); + LOG2FILE(kLogTypeAllocator) << "Alloc Mutator " << this << " initialized." << std::endl; +} + +template<> +void RosBasedMutator::Fini() { + LOG2FILE(kLogTypeAllocator) << "ROSAllocMutator::Fini() " << this << " started Fini." << std::endl; + // we leave saferegion here to prevent Fini() be interrupted by GC. + ScopedObjectAccess soa; + (*theAllocator).RemoveAllocMutator(*this); + + LOG2FILE(kLogTypeAllocator) << "ROSAllocMutator::Fini() " << this << " finished Fini." << std::endl; +} + +template<> void ROSAllocMutator::Init() {} +template<> void ROSAllocMutator::Fini() {} +} // namespace maplert diff --git a/src/mrt/compiler-rt/src/allocator/space.cpp b/src/mrt/compiler-rt/src/allocator/space.cpp new file mode 100644 index 0000000000..3292d554e7 --- /dev/null +++ b/src/mrt/compiler-rt/src/allocator/space.cpp @@ -0,0 +1,292 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "allocator/space.h" + +#include +#include +#include +#include +#include "chosen.h" +#include "collector/stats.h" +#include "yieldpoint.h" + +namespace maplert { +using namespace std; + +bool Space::kReleasePageAtFree = kRosimplReleasePageAtFree; +bool Space::kReleasePageAtTrim = kRosimplReleasePageAtTrim; +// this duplicates with heap initial size +size_t Space::kHeapStartSize = ROSIMPL_DEFAULT_HEAP_START_SIZE; +size_t Space::kHeapSize = ROSIMPL_DEFAULT_HEAP_SIZE; +size_t Space::kHeapGrowthLimit = ROSIMPL_DEFAULT_HEAP_GROWTH_LIMIT; +size_t Space::kHeapMinFree = ROSIMPL_DEFAULT_HEAP_MIN_FREE; +size_t Space::kHeapMaxFree = ROSIMPL_DEFAULT_HEAP_MAX_FREE; +// the following is unused +float Space::kHeapTargetUtilization = kRosimplDefaultHeapTargetUtilization; +bool Space::kIgnoreMaxFootprint = kRosimplDefaultIgnoreMaxFootprint; + +// SpacePageManager +uint32_t SpacePageManager::CalcPageIdx(uintptr_t pageAddr) const { + return static_cast(ALLOCUTIL_PAGE_BYTE2CNT(pageAddr - baseAddress)); +} + +// given the first page and the size of region in bytes, we add it +// to the set +void SpacePageManager::AddRegion(uintptr_t regionAddr, size_t regionSize) { + uint32_t pageCount = CalcChunkSize(regionSize); + AddPages(regionAddr, pageCount); +} + +// add contiguous pages of count page_count starting with first_page +void SpacePageManager::AddPages(uintptr_t firstPage, uint32_t pageCount) { + // we should check that the page is within the limit in future + uint32_t pageIdx = CalcPageIdx(firstPage); + // add the new entry to the set + if (UNLIKELY(!pageCTree.MergeInsert(pageIdx, pageCount))) { + LOG(ERROR) << "free page insertion failed, inconsistency, " << pageIdx << ", " << pageCount << maple::endl; + } +} + +void SpacePageManager::ReleaseAllFreePages(size_t freeBytes, const MemMap &memMap, bool isAggressive) { + if (pageCTree.Empty()) { + return; + } + uint64_t beginTime = timeutils::MicroSeconds(); + TreeType::Iterator it(pageCTree); + auto next = it.Next(); + size_t totalReleasedBytes = 0U; + size_t targetFreeBytes = allocSpace.TargetFreePageSizeByUtilization(); + size_t targetReleaseBytes = freeBytes > targetFreeBytes ? freeBytes - targetFreeBytes : 0U; + while (next != nullptr) { + // control the size of unreleased pages + uintptr_t releaseBeginAddr = GetRegAddrFromIdx(next->Idx()); + size_t releaseBytes = ALLOCUTIL_PAGE_CNT2BYTE(next->Cnt()); + if (UNLIKELY(!isAggressive && // aggressive trim ignores these conditions + (timeutils::MicroSeconds() - beginTime > Space::kMaxTrimTime || + totalReleasedBytes >= targetReleaseBytes))) { + // after we release enough many pages, we stop (reserve a certain number of free pages) + // also, 10ms timeout + break; + } + + // release + uint32_t relEndIdx = next->Idx() + next->Cnt(); + size_t c = allocSpace.pageMap.SetRangeAndCount(next->Idx(), relEndIdx, kPReleased); + if (c) { + (void)memMap.ReleaseMem(releaseBeginAddr, releaseBytes); + totalReleasedBytes += ALLOCUTIL_PAGE_CNT2BYTE(c); + } + next = it.Next(); + } + allocSpace.nonReleasedPageSize -= totalReleasedBytes; + size_t allocated = stats::gcStats->CurAllocBytes(); + float utilization = static_cast(allocated) / allocSpace.nonReleasedPageSize; + LOG2FILE(kLogTypeAllocFrag) << + (isAggressive ? " A" : "Non-a") << "ggressive trim time " << + (timeutils::MicroSeconds() - beginTime) << "us, released " << totalReleasedBytes << + ", post-trim utilization (" << allocated << + "/" << allocSpace.allocatedPageSize << + "/" << allocSpace.nonReleasedPageSize << ") " << + std::setprecision(2) << std::fixed << utilization << + " (target " << Space::kHeapTargetUtilization << ")\n"; +} + +// Given a request size in bytes, we split the first chunk and return the +// address to the first page +uintptr_t SpacePageManager::GetChunk(size_t reqSize) { + uint32_t idx = 0U; + return pageCTree.Find(idx, CalcChunkSize(reqSize)) ? GetRegAddrFromIdx(idx) : 0U; +} + +// given size in bytes we return the size normalized to pages (4k) +uint32_t SpacePageManager::CalcChunkSize(size_t regionSize) const { + return static_cast(ALLOCUTIL_PAGE_BYTE2CNT(ALLOCUTIL_PAGE_RND_UP(regionSize))); +} + +// given index of a region, calculate the page address +uintptr_t SpacePageManager::GetRegAddrFromIdx(uint32_t idx) const { + size_t offset = ALLOCUTIL_PAGE_CNT2BYTE(idx); + return offset + baseAddress; +} + +// Returns the size of largest chunk currently in the page set. +size_t SpacePageManager::GetLargestChunkSize() { + return ALLOCUTIL_PAGE_CNT2BYTE(pageCTree.Top()); +} + +// Space +Space::Space(const std::string &fullname, const std::string &tagVal, bool managed, PageMap &pageMapVal) + : name(fullname), + tag(tagVal), + isManaged(managed), + maxCapacity(0), + memMap(nullptr), + pageMap(pageMapVal), + begin(nullptr), + end(0), + allocatedPageSize(0), + nonReleasedPageSize(0), + pageManager(*this, 0U), + lastTrimTime(0) { +} + +void Space::Init(uint8_t *reqBegin) { + LOG2FILE(kLogTypeAllocator) << "Initializing the heap.." << std::endl; + LOG2FILE(kLogTypeAllocator) << " heap start size: " << Space::kHeapStartSize << std::endl; + LOG2FILE(kLogTypeAllocator) << " heap maximum size: " << Space::kHeapSize << std::endl; + LOG2FILE(kLogTypeAllocator) << " heap growth limit: " << Space::kHeapGrowthLimit << std::endl; + LOG2FILE(kLogTypeAllocator) << " heap minimum free size: " << Space::kHeapMinFree << std::endl; + LOG2FILE(kLogTypeAllocator) << " heap maximum free size: " << Space::kHeapMaxFree << std::endl; + LOG2FILE(kLogTypeAllocator) << " heap target utilization rate: " << Space::kHeapTargetUtilization << std::endl; + LOG2FILE(kLogTypeAllocator) << " is ignoring max footprint: " << Space::kIgnoreMaxFootprint << std::endl; + maxCapacity = Space::kHeapGrowthLimit; + MemMap::Option opt = MemMap::kDefaultOptions; + opt.tag = tag.c_str(); + opt.reqBase = reinterpret_cast(reqBegin); +#if !ROSIMPL_ENABLE_ASSERTS + opt.protAll = true; +#endif + memMap = MemMap::MapMemory(maxCapacity, Space::kHeapStartSize, opt); + begin = reqBegin; + + uint8_t *baseAddress = reinterpret_cast(memMap->GetBaseAddr()); + SetBegin(baseAddress); + SetEnd(reinterpret_cast(memMap->GetCurrEnd())); + uintptr_t startAddress = reinterpret_cast(baseAddress); + allocatedPageSize = 0U; + nonReleasedPageSize = 0U; + pageManager.pageCTree.Init(maxCapacity); + pageManager.SetBaseAddress(startAddress); + pageManager.AddRegion(startAddress, GetSize()); +} + + +// returns the number of pages free within the reserved memory space +size_t Space::GetAvailPageCount() const { + return ALLOCUTIL_PAGE_BYTE2CNT(GetSize() - allocatedPageSize); +} + +uintptr_t Space::GetChunk(size_t reqSize) { + uintptr_t addr = pageManager.GetChunk(reqSize); + if (addr != 0) { +#if DEBUG_RUNLOCK_OWNER + if (UNLIKELY(!memMap->ProtectMem(addr, reqSize, PROT_READ | PROT_WRITE))) { + LOG(FATAL) << "failed to mprotect " << std::hex << addr << " of size " << std::dec << reqSize; + } +#endif + allocatedPageSize += reqSize; + } + return addr; +} + +size_t Space::GetLargestChunkSize() { + return pageManager.GetLargestChunkSize(); +} + +size_t Space::TargetFreePageSizeByUtilization() const { + size_t allocated = stats::gcStats->CurAllocBytes(); + // this should be guaranteed by the option parser + __MRT_ASSERT(kHeapTargetUtilization > 0, "invalid utilization"); + __MRT_ASSERT(kHeapTargetUtilization < 1, "invalid utilization"); + size_t targetTotal = static_cast(allocated / kHeapTargetUtilization); + size_t targetFree = std::max(targetTotal, allocatedPageSize) - allocatedPageSize; + // we want to cap this: for larger heap, assuming usage do not grow that much faster + return std::min(targetFree, TargetFreePageSize()); +} + +void Space::Extend(size_t deltaSize) { + LOG2FILE(kLogTypeAllocator) << "extending space size delta " << deltaSize << std::endl; + size_t reqSize = ALLOCUTIL_PAGE_RND_UP(deltaSize); + size_t currentSize = GetSize(); + if (currentSize >= maxCapacity || reqSize > maxCapacity) { + LOG(ERROR) << "space exceeded maximum capacity." << maple::endl; + return; + } + size_t oneTimeReq = ALLOCUTIL_PAGE_CNT2BYTE(kPagesOneTime); + size_t remainingSize = maxCapacity - currentSize; + + size_t expandValue = std::min(remainingSize, std::max(reqSize, oneTimeReq)); + LOG2FILE(kLogTypeAllocator) << " requesting " << expandValue << std::endl; + bool extendResult = memMap->Extend(expandValue); + if (!extendResult) { + LOG(ERROR) << "allocator: extending space failed." << maple::endl; + return; + } + address_t oldEndAddr = reinterpret_cast(GetEndRelaxed()); + SetEnd(reinterpret_cast(memMap->GetCurrEnd())); + LOG2FILE(kLogTypeAllocator) << " setting new end address 0x" << + std::hex << reinterpret_cast(GetEndRelaxed()) << std::endl; + // we add the new chunk to the page manager so that it can coalesce with + // any existing chunk at the end of the space + pageManager.AddRegion(oldEndAddr, expandValue); +} + +address_t Space::Alloc(size_t reqSize, bool allowExtension) { + address_t retAddress = 0U; + if (GetSize() - allocatedPageSize >= reqSize) { + retAddress = GetChunk(reqSize); + if (retAddress != 0) { + return retAddress; + } + } + + if (allowExtension) { + Extend(reqSize); + retAddress = GetChunk(reqSize); + } + return retAddress; +} + +// release the physical memory of free pages, using madvise() +size_t Space::ReleaseFreePages(bool aggressive) { + // don't release when gc running, too slow + if (Collector::Instance().InStartupPhase() || + Collector::Instance().IsGcRunning() || WorldStopped()) { + return 0; + } + size_t free_bytes = GetFreePageSize(); + // if aggressive, it ignores the threshold and the timer + if (aggressive || (IsTrimAllowedAtTheMoment() && free_bytes > HeapMaxFreeByUtilization())) { + pageManager.ReleaseAllFreePages(free_bytes, *memMap, aggressive); + lastTrimTime = timeutils::MilliSeconds(); + } + // if any page was released, the size must have decreased + return free_bytes - GetFreePageSize(); +} + +void Space::FreeRegion(address_t addr, size_t pgCnt) { + pageManager.AddPages(addr, static_cast(pgCnt)); + size_t memSize = ALLOCUTIL_PAGE_CNT2BYTE(pgCnt); +#if DEBUG_RUNLOCK_OWNER + if (UNLIKELY(!memMap->ProtectMem(addr, memSize, PROT_NONE))) { + LOG(FATAL) << "failed to mprotect " << std::hex << addr << " of size " << std::dec << memSize; + } +#endif + allocatedPageSize -= memSize; + if (Space::kReleasePageAtFree) { + static_cast(ReleaseFreePages(false)); + } +} + +Space::~Space() { + LOG2FILE(kLogTypeAllocator) << "Destructing RoSSpace" << std::endl; + if (memMap != nullptr) { + delete(memMap); + memMap = nullptr; + } + begin = nullptr; + LOG2FILE(kLogTypeAllocator) << "Done Destructing RoSSpace" << std::endl; +} +} // namesapce maplert diff --git a/src/mrt/compiler-rt/src/arch/arm64/cc_native_method_stub_arm64.S b/src/mrt/compiler-rt/src/arch/arm64/cc_native_method_stub_arm64.S new file mode 100644 index 0000000000..c0ae29dc86 --- /dev/null +++ b/src/mrt/compiler-rt/src/arch/arm64/cc_native_method_stub_arm64.S @@ -0,0 +1,437 @@ +// This file defines stub to support calling java native methods. +// "cc_" means "compiled code" by maple. + +//------------------------------------------------------------------------ +// native wrapper num of args enter saferegion copy stack args +//------------------------------------------------------------------------ +// MCC_CallFastNative any no no +// MCC_CallFastNativeExt any no no +// MCC_CallSlowNative <=8 yes no +// MCC_CallSlowNativeExt any yes yes +//------------------------------------------------------------------------ + +//--------------------------------------------------------------- +// MCC_CallFastNative() calls a fast native function from +// managed code. We use x9 register to pass the native function pointer. +// +// MCC_CallFastNative(...) { +// br x9; // call the_native_function; +// } +//--------------------------------------------------------------- + + .text + .align 2 + .global MCC_CallFastNative + .global MCC_CallFastNativeExt + .type MCC_CallFastNative, %function + .type MCC_CallFastNativeExt, %function +MCC_CallFastNative: +MCC_CallFastNativeExt: + .cfi_startproc + // call the native function. + br x9 + .cfi_endproc + .size MCC_CallFastNative, .-MCC_CallFastNative + +//--------------------------------------------------------------- +// MCC_CallSlowNative() calls a slow or blocking native +// function from managed code. The number of arguments +// of the native function should less than or equal to 8. +// +// We use x9 register to pass the native function pointer. +// +// This stub will save callee-saved registers to stack and +// enter saferegion before calling the native function: +// +// MCC_CallSlowNative(the_native_function, ...) { +// save_callee_saved_registers(); // r19..r28 +// stateChanged = MRT_EnterSaferegion(); +// blr x9; // call the_native_function; +// if (stateChanged) MRT_LeaveSaferegion(); +// restore_callee_saved_registers(); +// return ret; +// } +// +// caller fp,sp -> -------- 0 +// | x28 | +// | x27 | [sp, 0x50] +// | ... | +// | x19 | [sp, 0x10] +// -------- +// | x30 | lr +// | x29 | fp +// fp, sp -> -------- -0x60 +// +//--------------------------------------------------------------- + .macro MCC_CallSlowNative, funcName, argSize + .align 2 + .global \funcName + .type \funcName, %function +\funcName: + .cfi_startproc + stp x29, x30, [sp, -0x60]! + .cfi_def_cfa_offset 0x60 + .cfi_offset 29, -0x60 + .cfi_offset 30, -0x58 + add x29, sp, 0 + .cfi_def_cfa_register 29 + + // save callee-saved registers. + stp x19, x20, [x29, 0x10] + .cfi_rel_offset x19, 0x10 + .cfi_rel_offset x20, 0x18 + + stp x21, x22, [x29, 0x20] + .cfi_rel_offset x21, 0x20 + .cfi_rel_offset x22, 0x28 + + stp x23, x24, [x29, 0x30] + .cfi_rel_offset x23, 0x30 + .cfi_rel_offset x24, 0x38 + + stp x25, x26, [x29, 0x40] + .cfi_rel_offset x25, 0x40 + .cfi_rel_offset x26, 0x48 + + stp x27, x28, [x29, 0x50] + .cfi_rel_offset x27, 0x50 + .cfi_rel_offset x28, 0x58 + + // save args to callee-saved reigsters. + mov x19, x9 // func ptr + .if \argSize == 1 + mov x20, x0 // arg0 + .endif + .if \argSize == 2 + mov x20, x0 // arg0 + mov x21, x1 // arg1 + .endif + .if \argSize == 3 + mov x20, x0 // arg0 + mov x21, x1 // arg1 + mov x22, x2 // arg2 + .endif + .if \argSize == 4 + mov x20, x0 // arg0 + mov x21, x1 // arg1 + mov x22, x2 // arg2 + mov x23, x3 // arg3 + .endif + .if \argSize == 5 + mov x20, x0 // arg0 + mov x21, x1 // arg1 + mov x22, x2 // arg2 + mov x23, x3 // arg3 + mov x24, x4 // arg4 + .endif + .if \argSize == 6 + mov x20, x0 // arg0 + mov x21, x1 // arg1 + mov x22, x2 // arg2 + mov x23, x3 // arg3 + mov x24, x4 // arg4 + mov x25, x5 // arg5 + .endif + .if \argSize == 7 + mov x20, x0 // arg0 + mov x21, x1 // arg1 + mov x22, x2 // arg2 + mov x23, x3 // arg3 + mov x24, x4 // arg4 + mov x25, x5 // arg5 + mov x26, x6 // arg6 + .endif + .if \argSize == 8 + mov x20, x0 // arg0 + mov x21, x1 // arg1 + mov x22, x2 // arg2 + mov x23, x3 // arg3 + mov x24, x4 // arg4 + mov x25, x5 // arg5 + mov x26, x6 // arg6 + mov x27, x7 // arg7 + .endif + +#if CONFIG_ASAN + stp d0, d1, [sp,-16]! + stp d2, d3, [sp,-16]! + stp d4, d5, [sp,-16]! + stp d6, d7, [sp,-16]! +#endif + + // save last java frame + ldr x1, [x29] + mov x0, x30 + bl MRT_SetRiskyUnwindContext + + // enter saferegion. + mov x0, #0 + bl MRT_EnterSaferegion + + // save result. + mov x28, x0 +#if CONFIG_ASAN + ldp d6, d7, [sp], 16 + ldp d4, d5, [sp], 16 + ldp d2, d3, [sp], 16 + ldp d0, d1, [sp], 16 +#endif + + // restore args. + .if \argSize == 1 + mov x0, x20 + .endif + .if \argSize == 2 + mov x0, x20 + mov x1, x21 + .endif + .if \argSize == 3 + mov x0, x20 + mov x1, x21 + mov x2, x22 + .endif + .if \argSize == 4 + mov x0, x20 + mov x1, x21 + mov x2, x22 + mov x3, x23 + .endif + .if \argSize == 5 + mov x0, x20 + mov x1, x21 + mov x2, x22 + mov x3, x23 + mov x4, x24 + .endif + .if \argSize == 6 + mov x0, x20 + mov x1, x21 + mov x2, x22 + mov x3, x23 + mov x4, x24 + mov x5, x25 + .endif + .if \argSize == 7 + mov x0, x20 + mov x1, x21 + mov x2, x22 + mov x3, x23 + mov x4, x24 + mov x5, x25 + mov x6, x26 + .endif + .if \argSize == 8 + mov x0, x20 + mov x1, x21 + mov x2, x22 + mov x3, x23 + mov x4, x24 + mov x5, x25 + mov x6, x26 + mov x7, x27 + .endif + + // call native function. + blr x19 + + // do not call MRT_LeaveSaferegion + // if MRT_EnterSaferegion returns 0 + cbz w28, .AFTER_CALL_SLOW_NATIVE_\argSize + + // save return value. + mov x28, x0 + fmov x27, d0 + + // leave saferegion + bl MRT_LeaveSaferegion + + // restore return value + mov x0, x28 + fmov d0, x27 + +.AFTER_CALL_SLOW_NATIVE_\argSize: + // restore callee-saved registers. + ldp x19, x20, [x29, 0x10] + .cfi_restore 19 + .cfi_restore 20 + ldp x21, x22, [x29, 0x20] + .cfi_restore 21 + .cfi_restore 22 + ldp x23, x24, [x29, 0x30] + .cfi_restore 23 + .cfi_restore 24 + ldp x25, x26, [x29, 0x40] + .cfi_restore 25 + .cfi_restore 26 + ldp x27, x28, [x29, 0x50] + .cfi_restore 27 + .cfi_restore 28 + + // restore fp, lr, sp + ldp x29, x30, [sp], 0x60 + .cfi_restore 29 + .cfi_restore 30 + .cfi_def_cfa 31, 0 + ret + .cfi_endproc + .size \funcName, .-\funcName + .endm + +MCC_CallSlowNative MCC_CallSlowNative0, 0 +MCC_CallSlowNative MCC_CallSlowNative1, 1 +MCC_CallSlowNative MCC_CallSlowNative2, 2 +MCC_CallSlowNative MCC_CallSlowNative3, 3 +MCC_CallSlowNative MCC_CallSlowNative4, 4 +MCC_CallSlowNative MCC_CallSlowNative5, 5 +MCC_CallSlowNative MCC_CallSlowNative6, 6 +MCC_CallSlowNative MCC_CallSlowNative7, 7 +MCC_CallSlowNative MCC_CallSlowNative8, 8 +//--------------------------------------------------------------- +// MCC_CallSlowNativeExt() calls a slow or blocking native +// function from managed code. this function accept arbitrary number +// of arguments of the native function. +// +// We use x9 register to pass the native function pointer. +// +// MCC_CallSlowNativeExt(...) { +// save_callee_saved_registers(); // x19..x28 +// copy_stack_args(); // copy stack args if found +// stateChanged = MRT_EnterSaferegion(); +// blr x9; // call the_native_function; +// if (stateChanged) MRT_LeaveSaferegion(); +// restore_callee_saved_registers(); +// return ret; +// } +// +// caller fp ---> -------- +// | stack| +// | args | +// caller sp ---> -------- 0 +// | x28 | +// | x27 | +// | ... | +// | x19 | +// -------- +// | x30 | lr +// | x29 | fp +// fp --> -------- -0x60 +// | stack| +// | args | copy from caller +// sp ---> -------- +// +//--------------------------------------------------------------- + .align 2 + .global MCC_CallSlowNativeExt + .type MCC_CallSlowNativeExt, %function +MCC_CallSlowNativeExt: + .cfi_startproc + stp x29, x30, [sp, -0x60]! + .cfi_def_cfa_offset 0x60 + .cfi_offset 29, -0x60 + .cfi_offset 30, -0x58 + add x29, sp, 0 + .cfi_def_cfa_register 29 + + // save callee-saved registers. + stp x19, x20, [x29, 0x10] + stp x21, x22, [x29, 0x20] + stp x23, x24, [x29, 0x30] + stp x25, x26, [x29, 0x40] + stp x27, x28, [x29, 0x50] + + // copy stack args. + ldr x19, [x29] // x19 = caller FP + add x20, sp, 0x60 // x20 = caller SP +.LOOP_COPY_ARG2: // for(;;) { + cmp x19, x20 // if (x19 == x20) + beq .COPY_ARG_END2 // break; + ldp x27, x28, [x19, -0x10]! // x19-=0x10; load [x19] into x27, x28; + stp x27, x28, [sp, -0x10]! // sp-=0x10; store x27,x28 to [sp]; + b .LOOP_COPY_ARG2 // } +.COPY_ARG_END2: + + // save args to callee-saved reigsters. + mov x19, x9 // func ptr + mov x20, x0 // arg0 + mov x21, x1 // arg1 + mov x22, x2 // arg2 + mov x23, x3 // arg3 + mov x24, x4 // arg4 + mov x25, x5 // arg5 + mov x26, x6 // arg6 + mov x27, x7 // arg7 + +#if CONFIG_ASAN + stp d0, d1, [sp,-16]! + stp d2, d3, [sp,-16]! + stp d4, d5, [sp,-16]! + stp d6, d7, [sp,-16]! +#endif + + // save last java frame + ldr x1, [x29] + mov x0, x30 + bl MRT_SetRiskyUnwindContext + + // enter saferegioni. + mov x0, #0 + bl MRT_EnterSaferegion + + // save result. + mov x28, x0 + + // restore args. + mov x0, x20 + mov x1, x21 + mov x2, x22 + mov x3, x23 + mov x4, x24 + mov x5, x25 + mov x6, x26 + mov x7, x27 + + +#if CONFIG_ASAN + ldp d6, d7, [sp], 16 + ldp d4, d5, [sp], 16 + ldp d2, d3, [sp], 16 + ldp d0, d1, [sp], 16 +#endif + + // call native function. + blr x19 + + // do not call MRT_LeaveSaferegion + // if MRT_EnterSaferegion returns 0 + cbz w28, .AFTER_CALL_NATIVE2 + + // save return value. + mov x28, x0 + fmov x27, d0 + + // leave saferegion + bl MRT_LeaveSaferegion + + // restore return value + mov x0, x28 + fmov d0, x27 + +.AFTER_CALL_NATIVE2: + // restore callee-saved registers. + ldp x19, x20, [x29, 0x10] + ldp x21, x22, [x29, 0x20] + ldp x23, x24, [x29, 0x30] + ldp x25, x26, [x29, 0x40] + ldp x27, x28, [x29, 0x50] + + // set sp to fp. + mov sp, x29 + + // restore fp, lr, sp + ldp x29, x30, [sp], 0x60 + .cfi_restore 30 + .cfi_restore 29 + .cfi_def_cfa 31, 0 + ret + .cfi_endproc + .size MCC_CallSlowNativeExt, .-MCC_CallSlowNativeExt diff --git a/src/mrt/compiler-rt/src/arch/arm64/dump_register_stub_arm64.S b/src/mrt/compiler-rt/src/arch/arm64/dump_register_stub_arm64.S new file mode 100644 index 0000000000..b79bf661b5 --- /dev/null +++ b/src/mrt/compiler-rt/src/arch/arm64/dump_register_stub_arm64.S @@ -0,0 +1,61 @@ +#define RegSaveAreaSize (8 * 32) +#define DumpFrameSize (RegSaveAreaSize + 8 * 2) + + .text + .align 2 + .global MRT_DumpRegisters + .type MRT_DumpRegisters, %function +MRT_DumpRegisters: + .cfi_startproc + stp x0, x1, [sp, #-RegSaveAreaSize + 16*0] + stp x2, x3, [sp, #-RegSaveAreaSize + 16*1] + stp x4, x5, [sp, #-RegSaveAreaSize + 16*2] + stp x6, x7, [sp, #-RegSaveAreaSize + 16*3] + stp x8, x9, [sp, #-RegSaveAreaSize + 16*4] + stp x10, x11, [sp, #-RegSaveAreaSize + 16*5] + stp x12, x13, [sp, #-RegSaveAreaSize + 16*6] + stp x14, x15, [sp, #-RegSaveAreaSize + 16*7] + stp x16, x17, [sp, #-RegSaveAreaSize + 16*8] + stp x18, x19, [sp, #-RegSaveAreaSize + 16*9] + stp x20, x21, [sp, #-RegSaveAreaSize + 16*10] + stp x22, x23, [sp, #-RegSaveAreaSize + 16*11] + stp x24, x25, [sp, #-RegSaveAreaSize + 16*12] + stp x26, x27, [sp, #-RegSaveAreaSize + 16*13] + str x28, [sp, #-RegSaveAreaSize + 16*14] + + str x29, [sp, #-RegSaveAreaSize + 16*14+8] + str x30, [sp, #-RegSaveAreaSize + 16*15] + mov x0, sp + str x0, [sp, #-RegSaveAreaSize + 16*15 + 8] + + // parameter for PrintAArch64RegSet + sub x0, sp, #RegSaveAreaSize + + // prologue + stp x29, x30, [sp, #-DumpFrameSize]! + mov x29, sp + + bl PrintAArch64RegSet + + // epilogue + ldp x29, x30, [sp], #DumpFrameSize + + ldp x0, x1, [sp, #-RegSaveAreaSize + 16*0] + ldp x2, x3, [sp, #-RegSaveAreaSize + 16*1] + ldp x4, x5, [sp, #-RegSaveAreaSize + 16*2] + ldp x6, x7, [sp, #-RegSaveAreaSize + 16*3] + ldp x8, x9, [sp, #-RegSaveAreaSize + 16*4] + ldp x10, x11, [sp, #-RegSaveAreaSize + 16*5] + ldp x12, x13, [sp, #-RegSaveAreaSize + 16*6] + ldp x14, x15, [sp, #-RegSaveAreaSize + 16*7] + ldp x16, x17, [sp, #-RegSaveAreaSize + 16*8] + ldp x18, x19, [sp, #-RegSaveAreaSize + 16*9] + ldp x20, x21, [sp, #-RegSaveAreaSize + 16*10] + ldp x22, x23, [sp, #-RegSaveAreaSize + 16*11] + ldp x24, x25, [sp, #-RegSaveAreaSize + 16*12] + ldp x26, x27, [sp, #-RegSaveAreaSize + 16*13] + ldr x28, [sp, #-RegSaveAreaSize + 16*14] + + ret + .cfi_endproc + .size MRT_DumpRegisters, .-MRT_DumpRegisters diff --git a/src/mrt/compiler-rt/src/arch/arm64/fastFuncs.S b/src/mrt/compiler-rt/src/arch/arm64/fastFuncs.S new file mode 100644 index 0000000000..1d721a7887 --- /dev/null +++ b/src/mrt/compiler-rt/src/arch/arm64/fastFuncs.S @@ -0,0 +1,1372 @@ +# ++++++++WARNING++++++++WARNING++++++++WARNING++++++++ +# + +# + NEED TO BE CAREFUL WHEN THIS FILE IS MODIFIED. +# + +# + This file has been hand modified to preserve x11 whenever it can. +# + It is accomplished by observing x29 == sp, and x11 is saved +# + in the same stack location as x29 instead of x29. +# + Further all .cfi directive for 29 is changed to 11 +# + In addition, there is no need to perform 'mv x29, sp' and the +# + corresponding .cfe for 29. +# + +# + If any of the string functions is used by a MRT* function then +# + internal to the string function the usage of x11 is prohibited. +# + See strcmp as example. +# + +# ++++++++WARNING++++++++WARNING++++++++WARNING++++++++ + +#ifndef ENABLE_ASSERT_RC_NZ +#define ENABLE_ASSERT_RC_NZ 1 +#endif +// following constant should be same with size.h +#define RC_COLOR_CLEAR_MASK 0x3fffffff +#define RC_CYCLE_COLOR_BROWN 0xc0000000 +#define STRONG_RC_BITS_MASK 0x0000ffff +#define RC_BITS_MSB_INDEX 15 +#define WEAK_COLLECTED_INDEX 29 + +#define WEAK_RC_ONE 0x00400000 +#define WEAK_RC_BITS_MASK 0x1fc00000 +#define WEAK_COLLECT 0x20000000 +#define RESURRECT_STRONG_MASK 0x003fffff + + .text + .align 2 + .p2align 3,,7 +#ifndef ENABLE_LOCAL_FAST_FUNCS + .section .text.MCC_Array_Boundary_Check,"ax",@progbits + .global MCC_Array_Boundary_Check +#else + .local MCC_Array_Boundary_Check +#endif + .type MCC_Array_Boundary_Check, %function +MCC_Array_Boundary_Check: +.LFB9064: + .cfi_startproc + stp x11, x30, [sp, -32]! + .cfi_def_cfa_offset 32 + .cfi_offset 11, -32 + .cfi_offset 30, -24 +# add x29, sp, 0 +# .cfi_def_cfa_register 29 + cbz x0, .L840 + ldr w2, [x0, 12] + tbnz w1, #31, .L838 + cmp w1, w2 + bge .L838 + ldp x11, x30, [sp], 32 + .cfi_remember_state + .cfi_restore 30 + .cfi_restore 11 + .cfi_def_cfa 31, 0 + ret + .p2align 3 +.L838: + .cfi_restore_state +# WARNING The value stored at [sp+0] should be x29 +# which has not been changed by this func +# ldr x1, [sp] +# store index and length + stp x1, x2, [sp, 16] + mov x1, x29 + mov x0, x30 + bl MRT_SetRiskyUnwindContext +# restore index and length + ldp x11, x30, [sp], 16 + ldp x1, x0, [sp], 16 + .cfi_remember_state + .cfi_restore 30 + .cfi_restore 11 + .cfi_def_cfa 31, 0 + b MRT_ThrowArrayIndexOutOfBoundsException + .p2align 3 +.L840: + .cfi_restore_state +# WARNING The value stored at [sp+0] should be x29 +# which has not been changed by this func +# ldr x1, [sp] + mov x1, x29 + mov x0, x30 + bl MRT_SetRiskyUnwindContext + ldp x11, x30, [sp], 32 + .cfi_restore 30 + .cfi_restore 11 + .cfi_def_cfa 31, 0 + b MRT_ThrowNullPointerExceptionUnw + .cfi_endproc +.LFE9064: + .size MCC_Array_Boundary_Check, .-MCC_Array_Boundary_Check + + .section .rodata, "a",%progbits + .local .L.str.interface.itab + .p2align 3 + .type .L.str.interface.itab,@object +.L.str.interface.itab: + .asciz "java/lang/AbstractMethodError" + .size .L.str.interface.itab, 30 + + .text +#ifndef ENABLE_LOCAL_FAST_FUNCS + .section .text.MCC_getFuncPtrFromItab // -- Begin function MCC_getFuncPtrFromItab,"ax",@progbits + .global MCC_getFuncPtrFromItab // -- Begin function MCC_getFuncPtrFromItab +#else + .local MCC_getFuncPtrFromItab // -- Begin function MCC_getFuncPtrFromItab +#endif + .p2align 3 + .type MCC_getFuncPtrFromItab,@function +MCC_getFuncPtrFromItab: // @MCC_getFuncPtrFromItab + .cfi_startproc +// %bb.0: + sub sp, sp, #64 // =64 + stp x22, x21, [sp, #16] // 16-byte Folded Spill + stp x20, x19, [sp, #32] // 16-byte Folded Spill + stp x29, x30, [sp, #48] // 16-byte Folded Spill + add x29, sp, #48 // =48 + .cfi_def_cfa w29, 16 + .cfi_offset w30, -8 + .cfi_offset w29, -16 + .cfi_offset w19, -24 + .cfi_offset w20, -32 + .cfi_offset w21, -40 + .cfi_offset w22, -48 + ldr w8, [x0, #92] + cbz w8, .LBB131_4 +// %bb.1: + cmp w1, #203 // =203 + add x10, x8, #8 // =8 + b.hi .LBB131_7 +.LBB131_2: // =>This Inner Loop Header: Depth=1 + ldr w8, [x10], #8 + cmp w8, w1 + b.ne .LBB131_2 +// %bb.3: + ldur w0, [x10, #-4] + b .LBB131_6 +.LBB131_4: + adrp x0, .L.str.interface.itab + add x0, x0, :lo12:.L.str.interface.itab + mov x1, x2 +.LBB131_5: + bl MRT_ThrowNewExceptionUnw + mov x0, xzr +.LBB131_6: + ldp x29, x30, [sp, #48] // 16-byte Folded Reload + ldp x20, x19, [sp, #32] // 16-byte Folded Reload + ldp x22, x21, [sp, #16] // 16-byte Folded Reload + add sp, sp, #64 // =64 + ret +.LBB131_7: + ldr w9, [x8] + mov w11, wzr + and w12, w9, #0xffff +.LBB131_8: // =>This Inner Loop Header: Depth=1 + add w13, w11, w12 + and w14, w13, #0xfffffffe + ldr w14, [x10, w14, uxtw #2] + cmp w14, w1 + b.eq .LBB131_10 +// %bb.9: // in Loop: Header=BB131_8 Depth=1 + lsr w15, w13, #1 + cmp w14, w1 + sub w14, w15, #1 // =1 + csel w12, w12, w14, lo + csinc w11, w11, w15, hs + cmp w11, w12 + b.ls .LBB131_8 +.LBB131_10: + orr w11, w13, #0x1 + ldr w0, [x10, w11, uxtw #2] + cmp w0, #1 // =1 + b.ne .LBB131_6 +// %bb.11: + stp x8, x2, [sp] // 16-byte Folded Spill + ubfx x8, x9, #15, #17 + cmp w9, #0 // =0 + and w8, w8, #0xfffe + orr w10, wzr, #0xfffffffe + csel w8, w8, w10, lt + cbz w8, .LBB131_15 +// %bb.12: + ubfiz x9, x9, #1, #16 + add x20, x9, #2 // =2 + ldr x9, [sp] // 8-byte Folded Reload + mov x19, xzr + mov w21, w8 + add x9, x9, x20, lsl #2 + sub x22, x9, #8 // =8 +.LBB131_13: // =>This Inner Loop Header: Depth=1 + add x8, x22, x19, lsl #2 + ldr w1, [x8, #8] + ldr x0, [sp, #8] // 8-byte Folded Reload + bl strcmp + cbz w0, .LBB131_16 +// %bb.14: // in Loop: Header=BB131_13 Depth=1 + add x19, x19, #2 // =2 + cmp x19, x21 + b.lo .LBB131_13 +.LBB131_15: + ldr x1, [sp, #8] // 8-byte Folded Reload + adrp x0, .L.str.interface.itab + add x0, x0, :lo12:.L.str.interface.itab + b .LBB131_5 +.LBB131_16: + ldr x8, [sp] // 8-byte Folded Reload + add x8, x8, x20, lsl #2 + add x8, x8, x19, lsl #2 + ldr w0, [x8, #4] + b .LBB131_6 +.Lfunc_end131: + .size MCC_getFuncPtrFromItab, .Lfunc_end131-MCC_getFuncPtrFromItab + .cfi_endproc + + +#define STRING_LENGTH_OFFSET 8 +#define STRING_HASH_OFFSET 12 + + .section .rodata + .align 2 +.Lmethod_desc.string_equals: + .word __methods_compact__string_equals - . + .short 16 + .short 0 + .section .java_text,"ax" +#ifndef ENABLE_LOCAL_FAST_FUNCS + .global __string_equals +#else + .local __string_equals +#endif + .type __string_equals, %function + .align 2 + .word .Lmethod_desc.string_equals - . +__string_equals: + .cfi_startproc + .cfi_personality 155, DW.ref.__mpl_personality_v0 + ldp x4, x2, [x0], #16 // read class shadow and count of this, replace assertnonnull + cbz x1, .Label.string_equals_false // if the other is null + cmp x0, x1 + beq .Label.string_equals_true // same address, return true + ldp x5, x3, [x1], #16 // read class shadow and count of the other + cmp w4, w5 + bne .Label.string_equals_false // if not the same class + cmp w2, w3 // Length and compression flag are different + bne .Label.string_equals_morecheck + and w3, w2, #1 + lsr w2, w2, w3 // get the number of bytes + cbz w2, .Label.string_equals_true // zero length + add w2, w2, #7 + lsr w5, w2, 4 + cbz w5, .Label.string_equals_tail2 // size is shorter than 8 + add x5, x0, x5, lsl 4 +.Label.string_equals_loophead: + ldp x3, x6, [x0], #16 + ldp x4, x7, [x1], #16 + cmp x3, x4 + bne .Label.string_equals_false + cmp x6, x7 + bne .Label.string_equals_false + cmp x0, x5 + bne .Label.string_equals_loophead +.Label.string_equals_tail: + tbz w2, #3, .Label.string_equals_true +.Label.string_equals_tail2: + ldr x3, [x0] + ldr x2, [x1] + cmp x3, x2 + bne .Label.string_equals_false +.Label.string_equals_true: + mov w0, 1 + ret +.Label.string_equals_false: + mov w0, 0 + ret +.Label.string_equals_morecheck: // check for compressed bits are diff byte size are equal + eor w2, w2, w3 + cmp w2, #1 + bne .Label.string_equals_false + tbnz w3, #0, .Label.string_equals_loop2 + mov x2, x0 + mov x0, x1 + mov x1, x2 // swap x0 and x1 to make sure that x0 is normal and x1 is compressed +.Label.string_equals_loop2: + lsr w3, w3, #1 + // x0: normal, x1; compressed, w3: length +.Label.string_equals_loop2_head: + subs w3, w3, #1 + tbnz w3, #31, .Label.string_equals_true + ldrh w4, [x0,w3,SXTW #1] // normal string + ldrb w5, [x1,w3,SXTW #0] // compressed string + cmp w4, w5 + beq .Label.string_equals_loop2_head + b .Label.string_equals_false + .cfi_endproc +.Label.end.__string_equals: + .size __string_equals, .-__string_equals + .word 0x55555555 + + .data + .align 3 + .type __ref__string_equals, %object + .local __ref__string_equals +__ref_string_equals: + .quad __string_equals + .size __ref_string_equals, . - __ref_string_equals + + .data + .align 3 + .type __methods_compact__string_equals, %object + .local __methods_compact__string_equals +__methods_compact__string_equals: + .long 0 // methodInVtabIndex + .long 0 // declaringClass + .long 0 // addr + .long 0 // mod + .long __namestr__string_equals - . // name + .long 0 // signature + .long 0 // annotation + .short 0 // flag + .short 2 // argsize + .data + .type __namestr__string_equals, %object + .local __namestr__string_equals +__namestr__string_equals: + .string "__string_equals" + + .section .rodata + .align 2 +.Lmethod_desc.string_hashCode: + .word __methods_compact_string_hashCode - . + .short 16 + .short 0 + .section .java_text,"ax" + .align 2 +#ifndef ENABLE_LOCAL_FAST_FUNCS + .global __string_hashCode +#else + .local __string_hashCode +#endif + .type __string_hashCode, %function + .word .Lmethod_desc.string_hashCode - . +__string_hashCode: +.Label.__string_hashCode9: + .cfi_startproc + .cfi_personality 155, DW.ref.__mpl_personality_v0 + ldr w2, [x0,#STRING_HASH_OFFSET] // Ljava/lang/String;.hash + cbnz w2, .Label.__string_hashCode1 + ldr w1, [x0,#STRING_LENGTH_OFFSET] // length + flag for compressed string + lsr w4, w1, #1 + cbz w4, .Label.__string_hashCode1 + mov w5, #0 // start offset + add x3, x0, #16 // start address + tbz w1, #0, .Label.__string_hashCode4 +.Label.__string_hashCode3: + ldrb w6, [x3,w5,SXTW #0] // compressed string + lsl w1, w2, #5 + add w5, w5, #1 + sub w1, w1, w2 + cmp w5, w4 + add w2, w1, w6 + blt .Label.__string_hashCode3 + b .Label.__string_hashCode6 +.Label.__string_hashCode4: + ldrh w6, [x3,w5,SXTW #1] // normal string + lsl w1, w2, #5 + add w5, w5, #1 + sub w1, w1, w2 + cmp w5, w4 + add w2, w1, w6 + blt .Label.__string_hashCode4 +.Label.__string_hashCode6: + str w2, [x0,#12] // hash = h +.Label.__string_hashCode1: + mov w0, w2 +.Label.__string_hashCode12: + ret +.Label.__string_hashCode11: + b .Label.__string_hashCode12 +.Label.__string_hashCode10: + .cfi_endproc +.Label.end.__string_hashCode: + .size __string_hashCode, .-__string_hashCode + .word 0x55555555 + + .data + .align 3 + .type __ref_string_hashCode, %object + .local __ref_string_hashCode +__ref_string_hashCode: + .quad __string_hashCode + .size __ref_string_hashCode, . - __ref_string_hashCode + + .data + .align 3 + .type __methods_compact_string_hashCode, %object + .local __methods_compact_string_hashCode +__methods_compact_string_hashCode: + .long 0 // methodInVtabIndex + .long 0 // declaringClass + .long 0 // addr + .long 0 // mod + .long __namestr__string_hashCode - . // name + .long 0 // signature + .long 0 // annotation + .short 0 // flag + .short 1 // argsize + + .data + .type __namestr__string_hashCode, %object + .local __namestr__string_hashCode +__namestr__string_hashCode: + .string "__string_hashCode" + + // The following implementation is android specific. + // ARM64-linux will call into libs-fast directly + // (with the cost of an extra plt) +#ifdef __ANDROID__ + .text + .align 2 + .p2align 3,,7 +#ifndef ENABLE_LOCAL_FAST_FUNCS + .section .text.MCC_PreNativeCall,"ax",@progbits + .global MCC_PreNativeCall +#else + .local MCC_PreNativeCall +#endif + .type MCC_PreNativeCall, %function +MCC_PreNativeCall: +.Lfunc_begin2: + .cfi_startproc + mrs x8, TPIDR_EL0 + ldr x8, [x8, #56] + ldr x8, [x8, #56] + ldr x9, [x8] + mov x0, x8 + ldr x1, [x9, #8] + br x1 +.Lfunc_end2: + .size MCC_PreNativeCall, .Lfunc_end2-MCC_PreNativeCall + .cfi_endproc + + .text + .align 2 + .p2align 3,,7 +#ifndef ENABLE_LOCAL_FAST_FUNCS + .section .text.MCC_PostNativeCall,"ax",@progbits + .global MCC_PostNativeCall +#else + .local MCC_PostNativeCall +#endif + .type MCC_PostNativeCall, %function +MCC_PostNativeCall: // @MCC_PostNativeCall +.Lfunc_begin3: + .cfi_startproc + mrs x8, TPIDR_EL0 + ldr x8, [x8, #56] + mov x1, x0 + ldr x8, [x8, #56] + ldr x9, [x8] + mov x0, x8 + ldr x2, [x9, #16] + br x2 +.Lfunc_end3: + .size MCC_PostNativeCall, .Lfunc_end3-MCC_PostNativeCall + .cfi_endproc + +#endif // __ANDROID__ + +#ifdef __ANDROID__ + .text +#ifndef ENABLE_LOCAL_FAST_FUNCS + .section .text.MCC_SetRiskyUnwindContext // -- Begin function MCC_SetRiskyUnwindContext,"ax",@progbits + .global MCC_SetRiskyUnwindContext // -- Begin function MCC_SetRiskyUnwindContext +#else + .local MCC_SetRiskyUnwindContext // -- Begin function MCC_SetRiskyUnwindContext +#endif + .p2align 2 + .type MCC_SetRiskyUnwindContext,@function +MCC_SetRiskyUnwindContext: +.Lfunc_begin18: + .cfi_startproc + mrs x8, TPIDR_EL0 + ldr x8, [x8, #56] + cbz x8, .LBB18_2 + ldr x8, [x8] +.LBB18_2: + orr w9, wzr, #0x2 + stp x1, x0, [x8, #96] + str xzr, [x8, #136] + stp xzr, xzr, [x8, #112] + str w9, [x8, #80] + ret +.Lfunc_end18: + .size MCC_SetRiskyUnwindContext, .Lfunc_end18-MCC_SetRiskyUnwindContext + .cfi_endproc + + .text +#ifndef ENABLE_LOCAL_FAST_FUNCS + .section .text.MCC_SetReliableUnwindContext // -- Begin function MCC_SetReliableUnwindContext,"ax",@progbits + .global MCC_SetReliableUnwindContext // -- Begin function MCC_SetReliableUnwindContext +#else + .local MCC_SetReliableUnwindContext // -- Begin function MCC_SetReliableUnwindContext +#endif + .p2align 2 + .type MCC_SetReliableUnwindContext,@function +MCC_SetReliableUnwindContext: // @MCC_SetReliableUnwindContext +.Lfunc_begin19: + .cfi_startproc + mrs x8, TPIDR_EL0 + ldr x8, [x8, #56] + orr w9, wzr, #0x1 + ldr x8, [x8] + str w9, [x8, #80] + ret +.Lfunc_end19: + .size MCC_SetReliableUnwindContext, .Lfunc_end19-MCC_SetReliableUnwindContext + .cfi_endproc + +//libmrtbase/include/thread-offsets.h + + .text + .align 2 + .p2align 3,,7 +#ifndef ENABLE_LOCAL_FAST_FUNCS + .section .text.MCC_SyncExitFast,"ax",@progbits + .global MCC_SyncExitFast +#else + .local MCC_SyncExitFast +#endif + .type MCC_SyncExitFast, %function +MCC_SyncExitFast: +.Lfunc_begin4: + .cfi_startproc + adrp x8, :got:_ZN5maple7IThread11is_started_E + ldr x8, [x8, :got_lo12:_ZN5maple7IThread11is_started_E] + ldr x8, [x8] + ldrb w8, [x8] + cmp w8, #1 + b.ne .LBB4_7 + mrs x8, TPIDR_EL0 + ldr x8, [x8, #56] + cbz x8, .LBB4_7 + cbz x0, .LBB4_7 + ldr x8, [x8, #56] + cbz x8, .LBB4_7 + add x9, x0, #4 + ldar w10, [x9] + ldr w11, [x8, #8] + and w12, w10, #0xc000ffff + cmp w11, w12 + b.ne .LBB4_7 + ldaxr w12, [x9] + and w11, w10, #0x10000000 + sub w13, w10, #16, lsl #12 // =65536 + tst w10, #0xfff0000 + csel w11, w11, w13, eq + cmp w12, w10 + b.ne .LBB4_8 + stlxr w10, w11, [x9] + cbz w10, .LBB4_9 +.LBB4_7: + b MRT_BuiltinSyncExit +.LBB4_8: + clrex + b MRT_BuiltinSyncExit +.LBB4_9: + ldr x9, [x8] + mov x0, x8 + ldr x1, [x9, #304] + br x1 +.Lfunc_end4: + .size MCC_SyncExitFast, .Lfunc_end4-MCC_SyncExitFast + .cfi_endproc + + .text + .align 2 + .p2align 3,,7 +#ifndef ENABLE_LOCAL_FAST_FUNCS + .section .text.MCC_SyncEnterFast2,"ax",@progbits + .global MCC_SyncEnterFast2 +#else + .local MCC_SyncEnterFast2 +#endif + .type MCC_SyncEnterFast2, %function +MCC_SyncEnterFast2: +.Lfunc_begin0: + .cfi_startproc + adrp x8, :got:_ZN5maple7IThread11is_started_E + ldr x8, [x8, :got_lo12:_ZN5maple7IThread11is_started_E] + mov x1, x0 + ldr x8, [x8] + ldrb w8, [x8] + cmp w8, #1 + b.ne .LBB0_13 + mrs x8, TPIDR_EL0 + ldr x8, [x8, #56] + cbz x8, .LBB0_13 + cbz x1, .LBB0_13 + ldr x0, [x8, #56] + cbz x0, .LBB0_13 + add x8, x1, #4 + ldar w9, [x8] + ldr w10, [x0, #8] + tst w9, #0xefffffff + b.ne .LBB0_8 + ldaxr w11, [x8] + cmp w11, w9 + b.ne .LBB0_12 + orr x9, x10, x9 + stlxr w10, w9, [x8] + cbnz w10, .LBB0_13 +.LBB0_7: + ldr x8, [x0] + orr w3, wzr, #0x1 + mov x2, x30 + ldr x4, [x8, #288] + br x4 +.LBB0_8: + + and w11, w9, #0xc000ffff + cmp w10, w11 + b.ne .LBB0_13 + and w10, w9, #0xfff0000 + orr w11, wzr, #0xfff0000 + cmp w10, w11 + b.eq .LBB0_13 + + ldaxr w10, [x8] + cmp w10, w9 + b.ne .LBB0_12 + add w9, w9, #16, lsl #12 // =65536 + stlxr w10, w9, [x8] + cbnz w10, .LBB0_13 + b .LBB0_7 +.LBB0_12: + clrex +.LBB0_13: + mov x0, x1 + b MRT_BuiltinSyncEnter +.Lfunc_end0: + .size MCC_SyncEnterFast2, .Lfunc_end0-MCC_SyncEnterFast2 + .cfi_endproc +#endif // __ANDROID__ + +#ifdef ENABLE_LOCAL_FAST_FUNCS +#if !DISABLE_RC_DUPLICATE + .text + .align 2 + .p2align 3,,7 + .local MCC_DecRefResetPair + .type MCC_DecRefResetPair, %function +MCC_DecRefResetPair: + .cfi_startproc + adrp x2, gcIsGCOnly + ldr x2, [x2, :lo12:gcIsGCOnly] + cbz x2, .L_RC_DECREF_RESET_PAIR + str xzr, [x0] + str xzr, [x1] + ret +.L_RC_DECREF_RESET_PAIR: +.LResetPairSTART: + stp x20, x30, [sp, -16]! + .cfi_def_cfa_offset 16 + .cfi_offset 20, -16 + .cfi_offset 30, -8 + ldr x2, [x0] + mov x20, x1 + str xzr, [x0] + mov x0, x2 + bl MCC_DecRef_NaiveRCFast + ldr x0, [x20] + str xzr, [x20] + bl MCC_DecRef_NaiveRCFast + ldp x20, x30, [sp], 16 + .cfi_remember_state + .cfi_restore 30 + .cfi_restore 20 + .cfi_def_cfa 31, 0 + ret + .cfi_endproc + .size MCC_DecRefResetPair, .-MCC_DecRefResetPair + + .text + .align 2 + .p2align 3,,7 + .local MCC_IncDecRefReset + .type MCC_IncDecRefReset, %function +MCC_IncDecRefReset: + .cfi_startproc + adrp x2, gcIsGCOnly + ldr x2, [x2, :lo12:gcIsGCOnly] + cbz x2, .L_RC_IncDec_Reset + str xzr, [x1] + ret +.L_RC_IncDec_Reset: + ldr x2, [x1] + str xzr, [x1] + mov x1, x2 + b MCC_IncDecRef_NaiveRCFast + .cfi_endproc + .size MCC_IncDecRefReset, .-MCC_IncDecRefReset + + .text + .align 2 + .p2align 3,,7 + .local MCC_IncRef_NaiveRCFast + .type MCC_IncRef_NaiveRCFast, %function +MCC_IncRef_NaiveRCFast: +.LFB7178: + .cfi_startproc + # WARNING - presersing x11 + # - replacing x29 with sp + # - there should be no usage of x29 + adrp x1, gcIsGCOnly + ldr x1, [x1, :lo12:gcIsGCOnly] + cbz x1, .L_RC_INC + ret +.L_RC_INC: + sub x1, x0, #16, lsl #12 // x1 = x0 - 65536 + mov w2, #2147418111 // x2 = 2^31 - 65537 + cmp x1, x2 // if (x1 >= x2) + bhi .L81 // goto .L81 + stp x11, x30, [sp, -32]! + .cfi_def_cfa_offset 32 + .cfi_offset 11, -32 + .cfi_offset 30, -24 + sub x2, x0, #4 +# add x29, sp, 0 +# .cfi_def_cfa_register 29 + .p2align 2 +.L73: + ldar w4, [x2] + tbnz w4, #RC_BITS_MSB_INDEX, .L80 + and w1, w4, RC_COLOR_CLEAR_MASK + add w1, w1, 1 + ldaxr w3, [x2] + cmp w3, w4 + bne .L73 + stlxr w5, w1, [x2] + cbnz w5, .L73 +#if ENABLE_ASSERT_RC_NZ + tst x4, STRONG_RC_BITS_MASK // check inc from 0 + beq .L83 +#endif +.L80: + ldp x11, x30, [sp], 32 + .p2align 2 +.L81: + .cfi_remember_state + .cfi_restore 30 + .cfi_restore 11 + .cfi_def_cfa 31, 0 + ret + .p2align 3 +.L83: + .cfi_restore_state + bl MRT_BuiltinAbortSaferegister + .cfi_endproc +.LFE7178: + .size MCC_IncRef_NaiveRCFast, .-MCC_IncRef_NaiveRCFast + + .text + .align 2 + .p2align 3,,7 + .local MCC_ClearLocalStackRef + .type MCC_ClearLocalStackRef, %function +MCC_ClearLocalStackRef: + .cfi_startproc + adrp x1, gcIsGCOnly + ldr x1, [x1, :lo12:gcIsGCOnly] + cbz x1, .L_RC_CLEAR_LOCAL + str xzr, [x0] + ret +.L_RC_CLEAR_LOCAL: + ldr x1, [x0] + str xzr, [x0] + mov x0, x1 + b MCC_DecRef_NaiveRCFast + .cfi_endproc + .size MCC_ClearLocalStackRef, .-MCC_ClearLocalStackRef + + .text + .align 2 + .p2align 3,,7 + .local MCC_LoadRefField_NaiveRCFast + .type MCC_LoadRefField_NaiveRCFast, %function +MCC_LoadRefField_NaiveRCFast: +.LFB7197: + .cfi_startproc + # WARNING - presersing x11 + # - replacing x29 with sp + # - there should be no usage of x29 + cbz x0, .L_RC_LOAD_REF + adrp x3, gcIsGCOnly + ldr x3, [x3, :lo12:gcIsGCOnly] + cbz x3, .L_RC_LOAD_REF +#ifdef USE_32BIT_REF + ldr w0, [x1] // load 32bit-ref-field +#else + ldr x0, [x1] // load 64bit-ref-field +#endif //USE_32BIT_REF + ret +.L_RC_LOAD_REF: + stp x11, x30, [sp, -64]! + .cfi_def_cfa_offset 64 + .cfi_offset 11, -64 + .cfi_offset 30, -56 +# add x29, sp, 0 +# .cfi_def_cfa_register 29 +.LEHB15: + stp x19, x20, [sp, 16] + .cfi_offset 19, -48 + .cfi_offset 20, -40 + mov x19, x0 + str x21, [sp, 32] + .cfi_offset 21, -32 + mov x20, x1 + mov x21, x30 + cbz x0, .L185 +.L174: +#ifdef USE_32BIT_REF + ldr w1, [x20] // load 32bit-ref-field +#else + ldr x1, [x20] // load 64bit-ref-field +#endif //USE_32BIT_REF + tbnz x1, #0, .L178 + mov x0, x1 + sub x2, x1, #16, lsl #12 // x2 = x1 - 65536 + mov w3, #2147418111 // x3 = 2^31 - 65537 + cmp x2, x3 // if (x2 < x3) + bls .L186 // goto .L186 + .p2align 2 +.L175: + ldp x19, x20, [sp, 16] + ldr x21, [sp, 32] +.LEHE15: + ldp x11, x30, [sp], 64 + .cfi_remember_state + .cfi_restore 30 + .cfi_restore 11 + .cfi_restore 21 + .cfi_restore 19 + .cfi_restore 20 + .cfi_def_cfa 31, 0 + ret + .p2align 3 +.L186: + .cfi_restore_state + sub x1, x1, #4 + ldar w2, [x1] + tbnz w2, #RC_BITS_MSB_INDEX, .L175 + ands w3, w2, STRONG_RC_BITS_MASK + str w2, [sp, 60] + beq .L178 + ands w3, w2, RC_COLOR_CLEAR_MASK + add w3, w3, 1 + ldaxr w4, [x1] + cmp w4, w2 + bne .L183 + stlxr w5, w3, [x1] + cmp w5, 0 +.L183: + beq .L175 + str w4, [sp, 60] +.L178: +# WARNING The value stored at [sp+0] should be x29 +# which has not been changed by this func +# ldr x1, [sp] + mov x1, x29 + mov x0, x21 +.LEHB16: + bl MRT_SetRiskyUnwindContext + mov x1, x20 + mov x0, x19 + bl MRT_LoadRefField + + mov x19, x0 + bl MRT_SetReliableUnwindContextStatus + mov x0, x19 + + ldp x19, x20, [sp, 16] + ldr x21, [sp, 32] + ldp x11, x30, [sp], 64 + .cfi_remember_state + .cfi_restore 20 + .cfi_restore 19 + .cfi_restore 21 + .cfi_restore 11 + .cfi_restore 30 + .cfi_def_cfa 31, 0 + ret + .p2align 3 +.L185: + .cfi_restore_state +# WARNING The value stored at [sp+0] should be x29 +# which has not been changed by this func +# ldr x1, [sp] + mov x1, x29 + mov x0, x30 + bl MRT_SetRiskyUnwindContext + bl MRT_ThrowNullPointerExceptionUnw +.LEHE16: + b .L174 + .cfi_endproc +.LFE7197: + .size MCC_LoadRefField_NaiveRCFast, .-MCC_LoadRefField_NaiveRCFast + + .text + .align 2 + .p2align 3,,7 + .local MCC_CleanupLocalStackRef_NaiveRCFast + .type MCC_CleanupLocalStackRef_NaiveRCFast, %function +MCC_CleanupLocalStackRef_NaiveRCFast: + .cfi_startproc + adrp x2, gcIsGCOnly + ldr x2, [x2, :lo12:gcIsGCOnly] + cbz x2, .L_RC_CLEANUP + ret +.L_RC_CLEANUP: +#if CONFIG_JSAN + stp x29, x30, [sp, -32]! + .cfi_def_cfa_offset 32 + .cfi_offset 29, -32 + .cfi_offset 30, -24 + mov x29, sp + str x11, [x29, 16] + .cfi_offset 11, -16 + bl MCC_CleanupLocalStackRef + ldr x11, [x29, 16] + ldp x29, x30, [sp], 32 + .cfi_restore 11 + .cfi_restore 29 + .cfi_restore 30 + .cfi_def_cfa 31, 0 + ret + .cfi_endproc +#else +.LFB7168: + # WARNING - presersing x11 + # - replacing x29 with sp + # - there should be no usage of x29 + cbz x1, .L150 + stp x11, x30, [sp, -48]! + .cfi_def_cfa_offset 48 + .cfi_offset 11, -48 + .cfi_offset 30, -40 +# add x29, sp, 0 +# .cfi_def_cfa_register 29 + stp x19, x20, [sp, 16] + .cfi_offset 19, -32 + .cfi_offset 20, -24 + mov x20, x1 + str x21, [sp, 32] + .cfi_offset 21, -16 + mov x19, 0 + mov x21, x0 + b .L120 + .p2align 3 +.L101: + cmp x20, x19 + beq .L110 +.L120: + ldr x0, [x21, x19, lsl 3] + add x19, x19, 1 + cbz x0, .L101 + bl MCC_DecRef_NaiveRCFast + cmp x20, x19 + bne .L120 +.L110: + ldp x19, x20, [sp, 16] + ldr x21, [sp, 32] + ldp x11, x30, [sp], 48 + .cfi_restore 30 + .cfi_restore 11 + .cfi_restore 21 + .cfi_restore 19 + .cfi_restore 20 + .cfi_def_cfa 31, 0 + ret +.L150: + ret + .cfi_endproc +.LFE7168: +#endif + .size MCC_CleanupLocalStackRef_NaiveRCFast, .-MCC_CleanupLocalStackRef_NaiveRCFast + + .text + .align 2 + .p2align 3,,7 + .local MCC_CleanupLocalStackRefSkip_NaiveRCFast + .type MCC_CleanupLocalStackRefSkip_NaiveRCFast, %function +MCC_CleanupLocalStackRefSkip_NaiveRCFast: + .cfi_startproc + adrp x3, gcIsGCOnly + ldr x3, [x3, :lo12:gcIsGCOnly] + cbz x3, .L_RC_CLEANUP_SKIP + ret +.L_RC_CLEANUP_SKIP: +#if CONFIG_JSAN + stp x29, x30, [sp, -32]! + .cfi_def_cfa_offset 32 + .cfi_offset 29, -32 + .cfi_offset 30, -24 + mov x29, sp + str x11, [x29, 16] + bl MCC_CleanupLocalStackRefSkip + .cfi_offset 11, -16 + ldr x11, [x29, 16] + ldp x29, x30, [sp], 32 + .cfi_restore 11 + .cfi_restore 29 + .cfi_restore 30 + .cfi_def_cfa 31, 0 + ret + .cfi_endproc +#else +.LFB7169: + # WARNING - presersing x11 + # - replacing x29 with sp + # - there should be no usage of x29 + cbz x1, .L121 + stp x11, x30, [sp, -48]! + .cfi_def_cfa_offset 48 + .cfi_offset 11, -48 + .cfi_offset 30, -40 +# add x29, sp, 0 +# .cfi_def_cfa_register 29 + stp x19, x20, [sp, 16] + .cfi_offset 19, -32 + .cfi_offset 20, -24 + mov x20, x1 + stp x21, x22, [sp, 32] + .cfi_offset 21, -16 + .cfi_offset 22, -8 + mov x19, 0 + mov x22, x0 + mov x21, x2 + b .L118 + .p2align 3 +.L113: + add x19, x19, 1 + cmp x20, x19 + beq .L122 +.L118: + cmp x21, x19 + beq .L113 + ldr x0, [x22, x19, lsl 3] + cbz x0, .L113 + bl MCC_DecRef_NaiveRCFast + add x19, x19, 1 + cmp x20, x19 + bne .L118 +.L122: + ldp x19, x20, [sp, 16] + ldp x21, x22, [sp, 32] + ldp x11, x30, [sp], 48 + .cfi_restore 30 + .cfi_restore 11 + .cfi_restore 21 + .cfi_restore 22 + .cfi_restore 19 + .cfi_restore 20 + .cfi_def_cfa 31, 0 + ret +.L121: + ret + .cfi_endproc +.LFE7169: +#endif + .size MCC_CleanupLocalStackRefSkip_NaiveRCFast, .-MCC_CleanupLocalStackRefSkip_NaiveRCFast + + .text + .align 2 + .p2align 3,,7 + .local MCC_DecRef_NaiveRCFast + .type MCC_DecRef_NaiveRCFast, %function +MCC_DecRef_NaiveRCFast: + .cfi_startproc + adrp x1, gcIsGCOnly + ldr x1, [x1, :lo12:gcIsGCOnly] + cbz x1, .L_RC_DEC_REF + ret +.L_RC_DEC_REF: +#if CONFIG_JSAN + stp x29, x30, [sp, -32]! + .cfi_def_cfa_offset 32 + .cfi_offset 29, -32 + .cfi_offset 30, -24 + mov x29, sp + str x11, [x29, 16] + .cfi_offset 11, -16 + bl MRT_DecRef + ldr x11, [x29, 16] + ldp x29, x30, [sp], 32 + .cfi_restore 11 + .cfi_restore 29 + .cfi_restore 30 + .cfi_def_cfa 31, 0 + ret + .cfi_endproc +#else +.LFB7179: + sub x1, x0, #16, lsl #12 // x1 = x0 - 65536 + mov w2, #2147418111 // x2 = 2^31 - 65537 + cmp x1, x2 // if (x1 >= x2) + bhi .L76 // goto .L76 + ldr w2, [x0, -8] + tbnz w2, #31, .L79 + sub x1, x0, #4 +.L91: + ldxr w2, [x1] + tbnz w2, #RC_BITS_MSB_INDEX, .L76 + sub w3, w2, #1 + orr w3, w3, #RC_CYCLE_COLOR_BROWN + // start check if weak collected bit can set + // 1. weak collected bit not set + tbnz w3, #WEAK_COLLECTED_INDEX, .L95 + // 2. newHeader's weak rc > 1 + and w4, w3, WEAK_RC_BITS_MASK + cmp w4, WEAK_RC_ONE + ble .L95 + // 3. newHeader's strong and resurrect rc is zero + and w4, w3, RESURRECT_STRONG_MASK + cbnz w4, .L95 + // 4. set weak collected bit + orr w3, w3, #WEAK_COLLECT + // end weak collected bit check +.L95: + stxr w4, w3, [x1] + cbnz w4, .L91 + // old rcheader in w2 and new rcheader in w3 + // as imm is too long, split release operation + // check strong rc first if old strong rc is 1, if not return + and w1, w2, STRONG_RC_BITS_MASK + cmp w1, 1 + bne .L94 + // check if release object + and w1, w3, RC_COLOR_CLEAR_MASK + cmp w1, WEAK_RC_ONE + beq .L92 + eor w1, w1, #WEAK_COLLECT + cbz w1, .L92 + // check if weak collected + tbz w3, #WEAK_COLLECTED_INDEX, .L94 + tbz w2, #WEAK_COLLECTED_INDEX, .L93 +.L94: +#if ENABLE_ASSERT_RC_NZ + and w1, w2, STRONG_RC_BITS_MASK + cbz w1, .L309 +#endif +.L76: + ret + .p2align 3 +.L79: + b MRT_DecRef +.L309: + stp x29, x30, [sp, -16]! + .cfi_def_cfa_offset 16 + .cfi_offset 29, -16 + .cfi_offset 30, -8 + add x29, sp, 0 + .cfi_def_cfa_register 29 + bl MRT_BuiltinAbortSaferegister + .p2align 3 +.L93: + .cfi_def_cfa 31, 0 + .cfi_restore 29 + .cfi_restore 30 + b MRT_CollectWeakObj +.L92: + .cfi_def_cfa 31, 0 + .cfi_restore 29 + .cfi_restore 30 + b MRT_ReleaseObj + .cfi_endproc +.LFE7179: +#endif + .size MCC_DecRef_NaiveRCFast, .-MCC_DecRef_NaiveRCFast + .text + .align 2 + .p2align 3,,7 + .local MCC_IncDecRef_NaiveRCFast + .type MCC_IncDecRef_NaiveRCFast, %function +MCC_IncDecRef_NaiveRCFast: + .cfi_startproc + adrp x2, gcIsGCOnly + ldr x2, [x2, :lo12:gcIsGCOnly] + cbz x2, .L_RC_INC_DEC + ret +.L_RC_INC_DEC: +#if CONFIG_JSAN + stp x29, x30, [sp, -32]! + .cfi_def_cfa_offset 32 + .cfi_offset 29, -32 + .cfi_offset 30, -24 + mov x29, sp + str x11, [x29, 16] + .cfi_offset 11, -16 + bl MRT_IncDecRef + ldr x11, [x29, 16] + ldp x29, x30, [sp], 32 + .cfi_restore 11 + .cfi_restore 29 + .cfi_restore 30 + .cfi_def_cfa 31, 0 + ret + .cfi_endproc +#else +.LFB7170: + # WARNING - presersing x11 + # - replacing x29 with sp + # - there should be no usage of x29 + cmp x0, x1 + beq .L142 + stp x11, x30, [sp, -32]! + .cfi_def_cfa_offset 32 + .cfi_offset 11, -32 + .cfi_offset 30, -24 + mov x2, x0 + mov x0, x1 + mov w7, #2147418111 // x7 = 2^31 - 65537 + sub x6, x2, #16, lsl #12 // x6 = x2 - 65536 + cmp x6, x7 // if (x6 < x7) + bls .L123 // goto .L123 + .p2align 2 +.L130: + sub x6, x0, #16, lsl #12 // x6 = x0 - 65536 + cmp x6, x7 // if (x6 >= x7) + bhi .L107 // goto .L107 + ldr w1, [x0, -8] + tbnz w1, #31, .L114 + sub x2, x0, #4 +.L119: + ldxr w1, [x2] + tbnz w1, #RC_BITS_MSB_INDEX, .L107 + sub w3, w1, #1 + orr w3, w3, #RC_CYCLE_COLOR_BROWN + // start check if weak collected bit can set + // 1. weak collected bit not set + tbnz w3, #WEAK_COLLECTED_INDEX, .L128 + // 2. newHeader's weak rc > 1 + and w4, w3, WEAK_RC_BITS_MASK + cmp w4, WEAK_RC_ONE + ble .L128 + // 3. newHeader's strong and resurrect rc is zero + and w4, w3, RESURRECT_STRONG_MASK + cbnz w4, .L128 + // 4. set weak collected bit + orr w3, w3, #WEAK_COLLECT + // end weak collected bit check +.L128: + stxr w4, w3, [x2] + cbnz w4, .L119 + // old rcheader in w1 and new rcheader in w3 + // as imm is too long, split release operation + // check strong rc first if old strong rc is 1, if not return + and w4, w1, STRONG_RC_BITS_MASK + cmp w4, 1 + bne .L127 + // check if release object + and w4, w3, RC_COLOR_CLEAR_MASK + cmp w4, WEAK_RC_ONE + beq .L124 + eor w4, w4, #WEAK_COLLECT + cbz w4, .L124 + // check if weak collected + tbz w3, #WEAK_COLLECTED_INDEX, .L127 + tbz w1, #WEAK_COLLECTED_INDEX, .L126 +.L127: +#if ENABLE_ASSERT_RC_NZ + and w1, w1, STRONG_RC_BITS_MASK // check old strong is not zero + cbz w1, .L343 +#endif +.L107: + ldp x11, x30, [sp], 32 + .cfi_remember_state + .cfi_restore 30 + .cfi_restore 11 + .cfi_def_cfa 31, 0 + ret +.L114: + .cfi_restore_state + bl MRT_DecRef + ldp x11, x30, [sp], 32 + .cfi_remember_state + .cfi_restore 11 + .cfi_restore 30 + .cfi_def_cfa 31, 0 + ret +.L123: + .cfi_restore_state + add x5, sp, 32 + sub x3, x2, #4 + str wzr, [x5, -4]! + .p2align 2 +.L112: + ldar w1, [x3] + tbnz w1, #RC_BITS_MSB_INDEX, .L130 + and w2, w1, RC_COLOR_CLEAR_MASK + add w2, w2, 1 + str w1, [sp, 28] + ldaxr w4, [x3] + cmp w4, w1 + bne .L140 + stlxr w8, w2, [x3] + cmp w8, 0 +.L140: + bne .L125 + ldr w1, [sp, 28] + tst x1, STRONG_RC_BITS_MASK + bne .L130 +.L343: + bl MRT_BuiltinAbortSaferegister + .p2align 3 +.L125: + str w4, [x5] + b .L112 +.L142: + .cfi_def_cfa 31, 0 + .cfi_restore 11 + .cfi_restore 30 + ret +.L124: + .cfi_def_cfa 29, 32 + .cfi_offset 11, -32 + .cfi_offset 30, -24 + bl MRT_ReleaseObj + ldp x11, x30, [sp], 32 + .cfi_restore 11 + .cfi_restore 30 + .cfi_def_cfa 31, 0 + ret +.L126: + .cfi_def_cfa 29, 32 + .cfi_offset 11, -32 + .cfi_offset 30, -24 + bl MRT_CollectWeakObj + ldp x11, x30, [sp], 32 + .cfi_restore 11 + .cfi_restore 30 + .cfi_def_cfa 31, 0 + ret + .cfi_endproc +.LFE7170: +#endif // CONFIG_JSAN + .size MCC_IncDecRef_NaiveRCFast, .-MCC_IncDecRef_NaiveRCFast + +.section .data.maple.gcInfo,"aw",%progbits + .type gcIsGCOnly, @object + .p2align 3 + .local gcIsGCOnly +gcIsGCOnly: + .quad 0 + .quad 0x4d634734 // maigc + .size gcIsGCOnly,.-gcIsGCOnly +// Add global symbol REF_gcIsGCOnly for indirect reference to gcIsGCOnly. +// Add this indirect because of lld relocation limitation + .type REF_gcIsGCOnly, @object + .p2align 3 + .global REF_gcIsGCOnly +REF_gcIsGCOnly: + .quad gcIsGCOnly - . + .size REF_gcIsGCOnly,.-REF_gcIsGCOnly +#endif // !DISABLE_RC_DUPLICATE +#endif diff --git a/src/mrt/compiler-rt/src/arch/arm64/i2r_stub_arm64.S b/src/mrt/compiler-rt/src/arch/arm64/i2r_stub_arm64.S new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/mrt/compiler-rt/src/arch/arm64/interp_native_method_stub_arm64.S b/src/mrt/compiler-rt/src/arch/arm64/interp_native_method_stub_arm64.S new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/mrt/compiler-rt/src/arch/arm64/memset.S b/src/mrt/compiler-rt/src/arch/arm64/memset.S new file mode 100644 index 0000000000..a5748d9fa8 --- /dev/null +++ b/src/mrt/compiler-rt/src/arch/arm64/memset.S @@ -0,0 +1,114 @@ +// this file implements memset or similar functionalities + + .text + .align 2 + .global ROSAllocZero + .hidden ROSAllocZero // remove plt within so + .type ROSAllocZero, %function +ROSAllocZero: + .cfi_startproc + +// this works on sizes >= 8, since rosalloc doesn't need any size +// smaller than that + +// based on profiling, the most frequent size should be [16, 32], +// followed by (32, 96], followed by the others, i.e., [8, 16) and (96, +inf); +// notice that size [8, 16) is not frequent at all + +// x0: dst, x1: size, x2: dstend, x3: dstmid + + dup v0.16B, wzr + add x2, x0, x1 + cmp x1, 32 + b.hi .Lhi32 + cmp x1, 16 + b.lo .Llo16 + + str q0, [x0] + str q0, [x2, -16] + ret + +.Lhi32: // size > 32 + stp q0, q0, [x0] // head 32 + stp q0, q0, [x2, -32] // tail 32 + cmp x1, 256 + b.hs .Lhs256 // optimisation for pages? + add x3, x0, x1, lsr #1 + stp q0, q0, [x3, -16] // mid 32 + cmp x1, 96 + b.hi .Lhi96 + ret + +.Lhi96: // size > 96 + // stp q0, q0, [x0] // head 32, done in .Lhi32 + stp q0, q0, [x0, 32] + stp q0, q0, [x0, 64] + stp q0, q0, [x3, -48] + // stp q0, q0, [x3, -16] // mid 32, done in .Lhi32 + stp q0, q0, [x3, 16] + stp q0, q0, [x2, -96] + stp q0, q0, [x2, -64] + // stp q0, q0, [x2, -32] // tail 32, done in .Lhi32 + ret + +.Llo16: // size < 16 + str xzr, [x0] + str xzr, [x2, -8] + ret + +.Lhs256: // size >= 256 + mrs x4, dczid_el0 + tbnz w4, 4, .Lnozva + and w4, w4, 15 + cmp w4, 4 + b.ne .Lzva128 + +.Lzva64: + // stp q0, q0, [x0] // head 32, done in .Lhi32 + stp q0, q0, [x0, 32] + bic x0, x0, 63 + sub x5, x2, 128 +1: + add x0, x0, 64 + dc zva, x0 + cmp x5, x0 + b.hi 1b + stp q0, q0, [x2, -64] + // stp q0, q0, [x2, -32] // tail 32, done in .Lhi32 + ret + +.Lzva128: + cmp w4, 5 + b.ne .Lnozva + + // stp q0, q0, [x0] // head 32, done in .Lhi32 + stp q0, q0, [x0, 32] + stp q0, q0, [x0, 64] + stp q0, q0, [x0, 96] + bic x0, x0, 127 + sub x5, x2, 128 + sub x5, x5, 128 +1: + add x0, x0, 128 + dc zva, x0 + cmp x5, x0 + b.hi 1b + stp q0, q0, [x2, -128] + stp q0, q0, [x2, -96] + stp q0, q0, [x2, -64] + // stp q0, q0, [x2, -32] // tail 32, done in .Lhi32 + ret + +.Lnozva: + // stp q0, q0, [x0] // head 32, done in .Lhi32 + sub x5, x2, 64 +1: + add x0, x0, 32 + stp q0, q0, [x0] + cmp x5, x0 + b.hi 1b + // stp q0, q0, [x2, -32] // tail 32, done in .Lhi32 + ret + + .cfi_endproc + .size ROSAllocZero, .-ROSAllocZero diff --git a/src/mrt/compiler-rt/src/arch/arm64/prepare_args_for_exception_catcher_arm64.S b/src/mrt/compiler-rt/src/arch/arm64/prepare_args_for_exception_catcher_arm64.S new file mode 100644 index 0000000000..20dd113ad7 --- /dev/null +++ b/src/mrt/compiler-rt/src/arch/arm64/prepare_args_for_exception_catcher_arm64.S @@ -0,0 +1,38 @@ +#define SavedRegisterSize (8 * 2) +#define PrepareArgsStructSize (8 * 4) + + .text + .align 2 + .global PrepareArgsForExceptionCatcher + .hidden PrepareArgsForExceptionCatcher + .type PrepareArgsForExceptionCatcher, %function +PrepareArgsForExceptionCatcher: + .cfi_startproc + stp x29, x30, [sp, #-SavedRegisterSize]! + .cfi_adjust_cfa_offset SavedRegisterSize + .cfi_rel_offset x29, 0 + .cfi_rel_offset x30, 8 + + // fp points to caller fp + mov x29, sp + .cfi_def_cfa_register x29 + + sub sp, sp, #PrepareArgsStructSize + + mov x0, sp + bl MRT_GetHandlerCatcherArgs + + ldr x0, [sp] + ldr x1, [sp, #16] + str x1, [x29, #8] + ldr x1, [sp, #8] + + mov sp, x29 + .cfi_def_cfa_register sp + ldp x29, x30, [sp], #SavedRegisterSize + .cfi_adjust_cfa_offset -SavedRegisterSize + .cfi_restore x29 + .cfi_restore x30 + ret + .cfi_endproc + .size PrepareArgsForExceptionCatcher, .-PrepareArgsForExceptionCatcher diff --git a/src/mrt/compiler-rt/src/arch/arm64/proxy_stub_arm64.S b/src/mrt/compiler-rt/src/arch/arm64/proxy_stub_arm64.S new file mode 100644 index 0000000000..c4149e8c0c --- /dev/null +++ b/src/mrt/compiler-rt/src/arch/arm64/proxy_stub_arm64.S @@ -0,0 +1,853 @@ +// ProxyStubXX used by proxy method entrypoint, the func address fill in vtab/itab for proxy class. +// this stub builds a stub frame, copy caller arguments to stack frame +// and we can restructure arguments according to MethodMeta +// On execution of "bl StubFuncwithReturnTypeArm32", the top frame layout of stack(growing downwards) looks like: +// r0~r3: hold the first 4 possible arguments arg0~arg3 if existed +// lr: return address of "bl ProxyStubXX" +// all on-stack arguments are addressable by SP as the frame layout shows. +// | ... | +// | lr(r14) | lr for the caller of ProxyStubXX +// caller fp --> | fp(r11) | +// | ... | +// | arg7 | +// | arg6 | +// | arg5 | +// caller sp --> | arg4 | + +// the frame layout of stack(growing downwards) after ProxyStubXX frame is built looks like: +// | ... | +// | lr(r14) | lr for the caller of ProxyStubXX +// caller fp --> | fp(r11) | +// | ... | +// | arg7 | +// | arg6 | +// | arg5 | +// caller sp --> | arg4 | +// | caller lr | <== ProxyStubXX frame starts from here +// stub fp --> | caller fp | fp for the caller of ProxyStubXX +// callee saved | r28 | +// | r27 | +// | r26 | +// | r25 | +// | r24 | +// | r23 | +// | r22 | +// | r21 | +// | r20 | +// callee saved | r19 | +// | ... | +// | arg7 | +// | arg6 | +// | arg5 | +// | arg4 | +// | d7 | +// | ... | +// | d0 | +// | x7 | +// | ... | +// stub sp --> | x0 | <== ProxyStubXX frame ends at here +#define cfi_adjust_cfa_offset(off) .cfi_adjust_cfa_offset off +#define cfi_rel_offset(reg, off) .cfi_rel_offset reg, off +#define cfi_restore(reg) .cfi_restore reg +#define cfi_def_cfa_register(reg) .cfi_def_cfa_register reg + +#define StubFrameSize (8 * 18) +#define StubCalleeSaveArea (8 * 8) + .text + .align 2 + .global MCC_DeferredInvoke + .type MCC_DeferredInvoke , %function +MCC_DeferredInvoke: + .cfi_startproc + stp x29, x30, [sp, #-StubFrameSize]! + cfi_adjust_cfa_offset (StubFrameSize) + cfi_rel_offset (x29, 0) + cfi_rel_offset (x30, 8) + + // save all used callee-saved registers. + stp x19, x20, [sp, #StubCalleeSaveArea] + cfi_rel_offset (x19, StubCalleeSaveArea) + cfi_rel_offset (x20, StubCalleeSaveArea+8) + + stp x21, x22, [sp, #StubCalleeSaveArea+0x10] + cfi_rel_offset (x21, StubCalleeSaveArea+0x10) + cfi_rel_offset (x22, StubCalleeSaveArea+0x18) + + stp x23, x24, [sp, #StubCalleeSaveArea+0x20] + cfi_rel_offset (x23, StubCalleeSaveArea+0x20) + cfi_rel_offset (x24, StubCalleeSaveArea+0x28) + + stp x25, x26, [sp, #StubCalleeSaveArea+0x30] + cfi_rel_offset (x25, StubCalleeSaveArea+0x30) + cfi_rel_offset (x26, StubCalleeSaveArea+0x38) + + stp x27, x28, [sp, #StubCalleeSaveArea+0x40] + cfi_rel_offset (x27, StubCalleeSaveArea+0x40) + cfi_rel_offset (x28, StubCalleeSaveArea+0x48) + + // back up caller sp and fp for copying caller's outgoing arguments + mov x19, x29 + add x20, sp, #StubFrameSize + + mov x29, sp + cfi_def_cfa_register (x29) + + // buffer for caller's arguments. + sub x21, x19, x20 + add x21, x21, #(8 + 8) * 8 // size for arguments register + sub sp, sp, x21 + mov x21, sp + + stp x0, x1, [x21], #16 + stp x2, x3, [x21], #16 + stp x4, x5, [x21], #16 + stp x6, x7, [x21], #16 + + stp d0, d1, [x21], #16 + stp d2, d3, [x21], #16 + stp d4, d5, [x21], #16 + stp d6, d7, [x21], #16 + + // copy on-stack arguments if existed +.LCopy: + cmp x20, x19 + bge .LCopyEnd + ldp x4, x5, [x20], #16 + stp x4, x5, [x21], #16 + b .LCopy +.LCopyEnd: + + mov x0, sp + bl EnterDeferredInvoke + /* set potential return value, float/double save in x0 */ + fmov d0, x0 + + mov sp, x29 + cfi_def_cfa_register (sp) + + // restore all used callee-saved registers. + ldp x19, x20, [sp, #StubCalleeSaveArea] + cfi_restore (x19) + cfi_restore (x20) + ldp x21, x22, [sp, #StubCalleeSaveArea+0x10] + cfi_restore (x21) + cfi_restore (x22) + ldp x23, x24, [sp, #StubCalleeSaveArea+0x20] + cfi_restore (x23) + cfi_restore (x24) + ldp x25, x26, [sp, #StubCalleeSaveArea+0x30] + cfi_restore (x25) + cfi_restore (x26) + ldp x27, x28, [sp, #StubCalleeSaveArea+0x40] + cfi_restore (x27) + cfi_restore (x28) + + ldp x29, x30, [sp], #StubFrameSize + cfi_adjust_cfa_offset (-StubFrameSize) + cfi_restore (x29) + cfi_restore (x30) + ret + .cfi_endproc + .size MCC_DeferredInvoke, .-MCC_DeferredInvoke + +.macro PROXYFUNCENTER, funcName, number + .text + .align 2 + .global \funcName\number + .hidden \funcName\number + .type \funcName\number , %function +\funcName\number: + .cfi_startproc + stp x29, x30, [sp, #-StubFrameSize]! + cfi_adjust_cfa_offset (StubFrameSize) + cfi_rel_offset (x29, 0) + cfi_rel_offset (x30, 8) + + // save all used callee-saved registers. + stp x19, x20, [sp, #StubCalleeSaveArea] + cfi_rel_offset (x19, StubCalleeSaveArea) + cfi_rel_offset (x20, StubCalleeSaveArea+8) + + stp x21, x22, [sp, #StubCalleeSaveArea+0x10] + cfi_rel_offset (x21, StubCalleeSaveArea+0x10) + cfi_rel_offset (x22, StubCalleeSaveArea+0x18) + + stp x23, x24, [sp, #StubCalleeSaveArea+0x20] + cfi_rel_offset (x23, StubCalleeSaveArea+0x20) + cfi_rel_offset (x24, StubCalleeSaveArea+0x28) + + stp x25, x26, [sp, #StubCalleeSaveArea+0x30] + cfi_rel_offset (x25, StubCalleeSaveArea+0x30) + cfi_rel_offset (x26, StubCalleeSaveArea+0x38) + + stp x27, x28, [sp, #StubCalleeSaveArea+0x40] + cfi_rel_offset (x27, StubCalleeSaveArea+0x40) + cfi_rel_offset (x28, StubCalleeSaveArea+0x48) + + // back up caller sp and fp for copying caller's outgoing arguments + mov x19, x29 + add x20, sp, #StubFrameSize + + mov x29, sp + cfi_def_cfa_register (x29) + + // buffer for caller's arguments. + sub x21, x19, x20 + add x21, x21, #(8 + 8) * 8 // size for arguments register + sub sp, sp, x21 + mov x21, sp + + stp x0, x1, [x21], #16 + stp x2, x3, [x21], #16 + stp x4, x5, [x21], #16 + stp x6, x7, [x21], #16 + + stp d0, d1, [x21], #16 + stp d2, d3, [x21], #16 + stp d4, d5, [x21], #16 + stp d6, d7, [x21], #16 + + // copy on-stack arguments if existed +.L_copy\number: + cmp x20, x19 + bge .L_copy_end\number + ldp x4, x5, [x20], #16 + stp x4, x5, [x21], #16 + b .L_copy\number +.L_copy_end\number: + + mov x0, sp + mov x1, #\number + bl EnterProxyMethodInvoke + /* set potential return value, float/double save in x0 */ + fmov d0, x0 + + mov sp, x29 + cfi_def_cfa_register (sp) + + // restore all used callee-saved registers. + ldp x19, x20, [sp, #StubCalleeSaveArea] + cfi_restore (x19) + cfi_restore (x20) + ldp x21, x22, [sp, #StubCalleeSaveArea+0x10] + cfi_restore (x21) + cfi_restore (x22) + ldp x23, x24, [sp, #StubCalleeSaveArea+0x20] + cfi_restore (x23) + cfi_restore (x24) + ldp x25, x26, [sp, #StubCalleeSaveArea+0x30] + cfi_restore (x25) + cfi_restore (x26) + ldp x27, x28, [sp, #StubCalleeSaveArea+0x40] + cfi_restore (x27) + cfi_restore (x28) + + ldp x29, x30, [sp], #StubFrameSize + cfi_adjust_cfa_offset (-StubFrameSize) + cfi_restore (x29) + cfi_restore (x30) + ret + .cfi_endproc + .size \funcName\number, .-\funcName\number + .endm + +PROXYFUNCENTER sfp_jlong_, 0 +PROXYFUNCENTER sfp_jlong_, 1 +PROXYFUNCENTER sfp_jlong_, 2 +PROXYFUNCENTER sfp_jlong_, 3 +PROXYFUNCENTER sfp_jlong_, 4 +PROXYFUNCENTER sfp_jlong_, 5 +PROXYFUNCENTER sfp_jlong_, 6 +PROXYFUNCENTER sfp_jlong_, 7 +PROXYFUNCENTER sfp_jlong_, 8 +PROXYFUNCENTER sfp_jlong_, 9 +PROXYFUNCENTER sfp_jlong_, 10 +PROXYFUNCENTER sfp_jlong_, 11 +PROXYFUNCENTER sfp_jlong_, 12 +PROXYFUNCENTER sfp_jlong_, 13 +PROXYFUNCENTER sfp_jlong_, 14 +PROXYFUNCENTER sfp_jlong_, 15 +PROXYFUNCENTER sfp_jlong_, 16 +PROXYFUNCENTER sfp_jlong_, 17 +PROXYFUNCENTER sfp_jlong_, 18 +PROXYFUNCENTER sfp_jlong_, 19 +PROXYFUNCENTER sfp_jlong_, 20 +PROXYFUNCENTER sfp_jlong_, 21 +PROXYFUNCENTER sfp_jlong_, 22 +PROXYFUNCENTER sfp_jlong_, 23 +PROXYFUNCENTER sfp_jlong_, 24 +PROXYFUNCENTER sfp_jlong_, 25 +PROXYFUNCENTER sfp_jlong_, 26 +PROXYFUNCENTER sfp_jlong_, 27 +PROXYFUNCENTER sfp_jlong_, 28 +PROXYFUNCENTER sfp_jlong_, 29 +PROXYFUNCENTER sfp_jlong_, 30 +PROXYFUNCENTER sfp_jlong_, 31 +PROXYFUNCENTER sfp_jlong_, 32 +PROXYFUNCENTER sfp_jlong_, 33 +PROXYFUNCENTER sfp_jlong_, 34 +PROXYFUNCENTER sfp_jlong_, 35 +PROXYFUNCENTER sfp_jlong_, 36 +PROXYFUNCENTER sfp_jlong_, 37 +PROXYFUNCENTER sfp_jlong_, 38 +PROXYFUNCENTER sfp_jlong_, 39 +PROXYFUNCENTER sfp_jlong_, 40 +PROXYFUNCENTER sfp_jlong_, 41 +PROXYFUNCENTER sfp_jlong_, 42 +PROXYFUNCENTER sfp_jlong_, 43 +PROXYFUNCENTER sfp_jlong_, 44 +PROXYFUNCENTER sfp_jlong_, 45 +PROXYFUNCENTER sfp_jlong_, 46 +PROXYFUNCENTER sfp_jlong_, 47 +PROXYFUNCENTER sfp_jlong_, 48 +PROXYFUNCENTER sfp_jlong_, 49 +PROXYFUNCENTER sfp_jlong_, 50 +PROXYFUNCENTER sfp_jlong_, 51 +PROXYFUNCENTER sfp_jlong_, 52 +PROXYFUNCENTER sfp_jlong_, 53 +PROXYFUNCENTER sfp_jlong_, 54 +PROXYFUNCENTER sfp_jlong_, 55 +PROXYFUNCENTER sfp_jlong_, 56 +PROXYFUNCENTER sfp_jlong_, 57 +PROXYFUNCENTER sfp_jlong_, 58 +PROXYFUNCENTER sfp_jlong_, 59 +PROXYFUNCENTER sfp_jlong_, 60 +PROXYFUNCENTER sfp_jlong_, 61 +PROXYFUNCENTER sfp_jlong_, 62 +PROXYFUNCENTER sfp_jlong_, 63 +PROXYFUNCENTER sfp_jlong_, 64 +PROXYFUNCENTER sfp_jlong_, 65 +PROXYFUNCENTER sfp_jlong_, 66 +PROXYFUNCENTER sfp_jlong_, 67 +PROXYFUNCENTER sfp_jlong_, 68 +PROXYFUNCENTER sfp_jlong_, 69 +PROXYFUNCENTER sfp_jlong_, 70 +PROXYFUNCENTER sfp_jlong_, 71 +PROXYFUNCENTER sfp_jlong_, 72 +PROXYFUNCENTER sfp_jlong_, 73 +PROXYFUNCENTER sfp_jlong_, 74 +PROXYFUNCENTER sfp_jlong_, 75 +PROXYFUNCENTER sfp_jlong_, 76 +PROXYFUNCENTER sfp_jlong_, 77 +PROXYFUNCENTER sfp_jlong_, 78 +PROXYFUNCENTER sfp_jlong_, 79 +PROXYFUNCENTER sfp_jlong_, 80 +PROXYFUNCENTER sfp_jlong_, 81 +PROXYFUNCENTER sfp_jlong_, 82 +PROXYFUNCENTER sfp_jlong_, 83 +PROXYFUNCENTER sfp_jlong_, 84 +PROXYFUNCENTER sfp_jlong_, 85 +PROXYFUNCENTER sfp_jlong_, 86 +PROXYFUNCENTER sfp_jlong_, 87 +PROXYFUNCENTER sfp_jlong_, 88 +PROXYFUNCENTER sfp_jlong_, 89 +PROXYFUNCENTER sfp_jlong_, 90 +PROXYFUNCENTER sfp_jlong_, 91 +PROXYFUNCENTER sfp_jlong_, 92 +PROXYFUNCENTER sfp_jlong_, 93 +PROXYFUNCENTER sfp_jlong_, 94 +PROXYFUNCENTER sfp_jlong_, 95 +PROXYFUNCENTER sfp_jlong_, 96 +PROXYFUNCENTER sfp_jlong_, 97 +PROXYFUNCENTER sfp_jlong_, 98 +PROXYFUNCENTER sfp_jlong_, 99 +PROXYFUNCENTER sfp_jlong_, 100 +PROXYFUNCENTER sfp_jlong_, 101 +PROXYFUNCENTER sfp_jlong_, 102 +PROXYFUNCENTER sfp_jlong_, 103 +PROXYFUNCENTER sfp_jlong_, 104 +PROXYFUNCENTER sfp_jlong_, 105 +PROXYFUNCENTER sfp_jlong_, 106 +PROXYFUNCENTER sfp_jlong_, 107 +PROXYFUNCENTER sfp_jlong_, 108 +PROXYFUNCENTER sfp_jlong_, 109 +PROXYFUNCENTER sfp_jlong_, 110 +PROXYFUNCENTER sfp_jlong_, 111 +PROXYFUNCENTER sfp_jlong_, 112 +PROXYFUNCENTER sfp_jlong_, 113 +PROXYFUNCENTER sfp_jlong_, 114 +PROXYFUNCENTER sfp_jlong_, 115 +PROXYFUNCENTER sfp_jlong_, 116 +PROXYFUNCENTER sfp_jlong_, 117 +PROXYFUNCENTER sfp_jlong_, 118 +PROXYFUNCENTER sfp_jlong_, 119 +PROXYFUNCENTER sfp_jlong_, 120 +PROXYFUNCENTER sfp_jlong_, 121 +PROXYFUNCENTER sfp_jlong_, 122 +PROXYFUNCENTER sfp_jlong_, 123 +PROXYFUNCENTER sfp_jlong_, 124 +PROXYFUNCENTER sfp_jlong_, 125 +PROXYFUNCENTER sfp_jlong_, 126 +PROXYFUNCENTER sfp_jlong_, 127 +PROXYFUNCENTER sfp_jlong_, 128 +PROXYFUNCENTER sfp_jlong_, 129 +PROXYFUNCENTER sfp_jlong_, 130 +PROXYFUNCENTER sfp_jlong_, 131 +PROXYFUNCENTER sfp_jlong_, 132 +PROXYFUNCENTER sfp_jlong_, 133 +PROXYFUNCENTER sfp_jlong_, 134 +PROXYFUNCENTER sfp_jlong_, 135 +PROXYFUNCENTER sfp_jlong_, 136 +PROXYFUNCENTER sfp_jlong_, 137 +PROXYFUNCENTER sfp_jlong_, 138 +PROXYFUNCENTER sfp_jlong_, 139 +PROXYFUNCENTER sfp_jlong_, 140 +PROXYFUNCENTER sfp_jlong_, 141 +PROXYFUNCENTER sfp_jlong_, 142 +PROXYFUNCENTER sfp_jlong_, 143 +PROXYFUNCENTER sfp_jlong_, 144 +PROXYFUNCENTER sfp_jlong_, 145 +PROXYFUNCENTER sfp_jlong_, 146 +PROXYFUNCENTER sfp_jlong_, 147 +PROXYFUNCENTER sfp_jlong_, 148 +PROXYFUNCENTER sfp_jlong_, 149 +PROXYFUNCENTER sfp_jlong_, 150 +PROXYFUNCENTER sfp_jlong_, 151 +PROXYFUNCENTER sfp_jlong_, 152 +PROXYFUNCENTER sfp_jlong_, 153 +PROXYFUNCENTER sfp_jlong_, 154 +PROXYFUNCENTER sfp_jlong_, 155 +PROXYFUNCENTER sfp_jlong_, 156 +PROXYFUNCENTER sfp_jlong_, 157 +PROXYFUNCENTER sfp_jlong_, 158 +PROXYFUNCENTER sfp_jlong_, 159 +PROXYFUNCENTER sfp_jlong_, 160 +PROXYFUNCENTER sfp_jlong_, 161 +PROXYFUNCENTER sfp_jlong_, 162 +PROXYFUNCENTER sfp_jlong_, 163 +PROXYFUNCENTER sfp_jlong_, 164 +PROXYFUNCENTER sfp_jlong_, 165 +PROXYFUNCENTER sfp_jlong_, 166 +PROXYFUNCENTER sfp_jlong_, 167 +PROXYFUNCENTER sfp_jlong_, 168 +PROXYFUNCENTER sfp_jlong_, 169 +PROXYFUNCENTER sfp_jlong_, 170 +PROXYFUNCENTER sfp_jlong_, 171 +PROXYFUNCENTER sfp_jlong_, 172 +PROXYFUNCENTER sfp_jlong_, 173 +PROXYFUNCENTER sfp_jlong_, 174 +PROXYFUNCENTER sfp_jlong_, 175 +PROXYFUNCENTER sfp_jlong_, 176 +PROXYFUNCENTER sfp_jlong_, 177 +PROXYFUNCENTER sfp_jlong_, 178 +PROXYFUNCENTER sfp_jlong_, 179 +PROXYFUNCENTER sfp_jlong_, 180 +PROXYFUNCENTER sfp_jlong_, 181 +PROXYFUNCENTER sfp_jlong_, 182 +PROXYFUNCENTER sfp_jlong_, 183 +PROXYFUNCENTER sfp_jlong_, 184 +PROXYFUNCENTER sfp_jlong_, 185 +PROXYFUNCENTER sfp_jlong_, 186 +PROXYFUNCENTER sfp_jlong_, 187 +PROXYFUNCENTER sfp_jlong_, 188 +PROXYFUNCENTER sfp_jlong_, 189 +PROXYFUNCENTER sfp_jlong_, 190 +PROXYFUNCENTER sfp_jlong_, 191 +PROXYFUNCENTER sfp_jlong_, 192 +PROXYFUNCENTER sfp_jlong_, 193 +PROXYFUNCENTER sfp_jlong_, 194 +PROXYFUNCENTER sfp_jlong_, 195 +PROXYFUNCENTER sfp_jlong_, 196 +PROXYFUNCENTER sfp_jlong_, 197 +PROXYFUNCENTER sfp_jlong_, 198 +PROXYFUNCENTER sfp_jlong_, 199 +PROXYFUNCENTER sfp_jlong_, 200 +PROXYFUNCENTER sfp_jlong_, 201 +PROXYFUNCENTER sfp_jlong_, 202 +PROXYFUNCENTER sfp_jlong_, 203 +PROXYFUNCENTER sfp_jlong_, 204 +PROXYFUNCENTER sfp_jlong_, 205 +PROXYFUNCENTER sfp_jlong_, 206 +PROXYFUNCENTER sfp_jlong_, 207 +PROXYFUNCENTER sfp_jlong_, 208 +PROXYFUNCENTER sfp_jlong_, 209 +PROXYFUNCENTER sfp_jlong_, 210 +PROXYFUNCENTER sfp_jlong_, 211 +PROXYFUNCENTER sfp_jlong_, 212 +PROXYFUNCENTER sfp_jlong_, 213 +PROXYFUNCENTER sfp_jlong_, 214 +PROXYFUNCENTER sfp_jlong_, 215 +PROXYFUNCENTER sfp_jlong_, 216 +PROXYFUNCENTER sfp_jlong_, 217 +PROXYFUNCENTER sfp_jlong_, 218 +PROXYFUNCENTER sfp_jlong_, 219 +PROXYFUNCENTER sfp_jlong_, 220 +PROXYFUNCENTER sfp_jlong_, 221 +PROXYFUNCENTER sfp_jlong_, 222 +PROXYFUNCENTER sfp_jlong_, 223 +PROXYFUNCENTER sfp_jlong_, 224 +PROXYFUNCENTER sfp_jlong_, 225 +PROXYFUNCENTER sfp_jlong_, 226 +PROXYFUNCENTER sfp_jlong_, 227 +PROXYFUNCENTER sfp_jlong_, 228 +PROXYFUNCENTER sfp_jlong_, 229 +PROXYFUNCENTER sfp_jlong_, 230 +PROXYFUNCENTER sfp_jlong_, 231 +PROXYFUNCENTER sfp_jlong_, 232 +PROXYFUNCENTER sfp_jlong_, 233 +PROXYFUNCENTER sfp_jlong_, 234 +PROXYFUNCENTER sfp_jlong_, 235 +PROXYFUNCENTER sfp_jlong_, 236 +PROXYFUNCENTER sfp_jlong_, 237 +PROXYFUNCENTER sfp_jlong_, 238 +PROXYFUNCENTER sfp_jlong_, 239 +PROXYFUNCENTER sfp_jlong_, 240 +PROXYFUNCENTER sfp_jlong_, 241 +PROXYFUNCENTER sfp_jlong_, 242 +PROXYFUNCENTER sfp_jlong_, 243 +PROXYFUNCENTER sfp_jlong_, 244 +PROXYFUNCENTER sfp_jlong_, 245 +PROXYFUNCENTER sfp_jlong_, 246 +PROXYFUNCENTER sfp_jlong_, 247 +PROXYFUNCENTER sfp_jlong_, 248 +PROXYFUNCENTER sfp_jlong_, 249 +PROXYFUNCENTER sfp_jlong_, 250 +PROXYFUNCENTER sfp_jlong_, 251 +PROXYFUNCENTER sfp_jlong_, 252 +PROXYFUNCENTER sfp_jlong_, 253 +PROXYFUNCENTER sfp_jlong_, 254 +PROXYFUNCENTER sfp_jlong_, 255 +PROXYFUNCENTER sfp_jlong_, 256 +PROXYFUNCENTER sfp_jlong_, 257 +PROXYFUNCENTER sfp_jlong_, 258 +PROXYFUNCENTER sfp_jlong_, 259 +PROXYFUNCENTER sfp_jlong_, 260 +PROXYFUNCENTER sfp_jlong_, 261 +PROXYFUNCENTER sfp_jlong_, 262 +PROXYFUNCENTER sfp_jlong_, 263 +PROXYFUNCENTER sfp_jlong_, 264 +PROXYFUNCENTER sfp_jlong_, 265 +PROXYFUNCENTER sfp_jlong_, 266 +PROXYFUNCENTER sfp_jlong_, 267 +PROXYFUNCENTER sfp_jlong_, 268 +PROXYFUNCENTER sfp_jlong_, 269 +PROXYFUNCENTER sfp_jlong_, 270 +PROXYFUNCENTER sfp_jlong_, 271 +PROXYFUNCENTER sfp_jlong_, 272 +PROXYFUNCENTER sfp_jlong_, 273 +PROXYFUNCENTER sfp_jlong_, 274 +PROXYFUNCENTER sfp_jlong_, 275 +PROXYFUNCENTER sfp_jlong_, 276 +PROXYFUNCENTER sfp_jlong_, 277 +PROXYFUNCENTER sfp_jlong_, 278 +PROXYFUNCENTER sfp_jlong_, 279 +PROXYFUNCENTER sfp_jlong_, 280 +PROXYFUNCENTER sfp_jlong_, 281 +PROXYFUNCENTER sfp_jlong_, 282 +PROXYFUNCENTER sfp_jlong_, 283 +PROXYFUNCENTER sfp_jlong_, 284 +PROXYFUNCENTER sfp_jlong_, 285 +PROXYFUNCENTER sfp_jlong_, 286 +PROXYFUNCENTER sfp_jlong_, 287 +PROXYFUNCENTER sfp_jlong_, 288 +PROXYFUNCENTER sfp_jlong_, 289 +PROXYFUNCENTER sfp_jlong_, 290 +PROXYFUNCENTER sfp_jlong_, 291 +PROXYFUNCENTER sfp_jlong_, 292 +PROXYFUNCENTER sfp_jlong_, 293 +PROXYFUNCENTER sfp_jlong_, 294 +PROXYFUNCENTER sfp_jlong_, 295 +PROXYFUNCENTER sfp_jlong_, 296 +PROXYFUNCENTER sfp_jlong_, 297 +PROXYFUNCENTER sfp_jlong_, 298 +PROXYFUNCENTER sfp_jlong_, 299 +PROXYFUNCENTER sfp_jlong_, 300 +PROXYFUNCENTER sfp_jlong_, 301 +PROXYFUNCENTER sfp_jlong_, 302 +PROXYFUNCENTER sfp_jlong_, 303 +PROXYFUNCENTER sfp_jlong_, 304 +PROXYFUNCENTER sfp_jlong_, 305 +PROXYFUNCENTER sfp_jlong_, 306 +PROXYFUNCENTER sfp_jlong_, 307 +PROXYFUNCENTER sfp_jlong_, 308 +PROXYFUNCENTER sfp_jlong_, 309 +PROXYFUNCENTER sfp_jlong_, 310 +PROXYFUNCENTER sfp_jlong_, 311 +PROXYFUNCENTER sfp_jlong_, 312 +PROXYFUNCENTER sfp_jlong_, 313 +PROXYFUNCENTER sfp_jlong_, 314 +PROXYFUNCENTER sfp_jlong_, 315 +PROXYFUNCENTER sfp_jlong_, 316 +PROXYFUNCENTER sfp_jlong_, 317 +PROXYFUNCENTER sfp_jlong_, 318 +PROXYFUNCENTER sfp_jlong_, 319 +PROXYFUNCENTER sfp_jlong_, 320 +PROXYFUNCENTER sfp_jlong_, 321 +PROXYFUNCENTER sfp_jlong_, 322 +PROXYFUNCENTER sfp_jlong_, 323 +PROXYFUNCENTER sfp_jlong_, 324 +PROXYFUNCENTER sfp_jlong_, 325 +PROXYFUNCENTER sfp_jlong_, 326 +PROXYFUNCENTER sfp_jlong_, 327 +PROXYFUNCENTER sfp_jlong_, 328 +PROXYFUNCENTER sfp_jlong_, 329 +PROXYFUNCENTER sfp_jlong_, 330 +PROXYFUNCENTER sfp_jlong_, 331 +PROXYFUNCENTER sfp_jlong_, 332 +PROXYFUNCENTER sfp_jlong_, 333 +PROXYFUNCENTER sfp_jlong_, 334 +PROXYFUNCENTER sfp_jlong_, 335 +PROXYFUNCENTER sfp_jlong_, 336 +PROXYFUNCENTER sfp_jlong_, 337 +PROXYFUNCENTER sfp_jlong_, 338 +PROXYFUNCENTER sfp_jlong_, 339 +PROXYFUNCENTER sfp_jlong_, 340 +PROXYFUNCENTER sfp_jlong_, 341 +PROXYFUNCENTER sfp_jlong_, 342 +PROXYFUNCENTER sfp_jlong_, 343 +PROXYFUNCENTER sfp_jlong_, 344 +PROXYFUNCENTER sfp_jlong_, 345 +PROXYFUNCENTER sfp_jlong_, 346 +PROXYFUNCENTER sfp_jlong_, 347 +PROXYFUNCENTER sfp_jlong_, 348 +PROXYFUNCENTER sfp_jlong_, 349 +PROXYFUNCENTER sfp_jlong_, 350 +PROXYFUNCENTER sfp_jlong_, 351 +PROXYFUNCENTER sfp_jlong_, 352 +PROXYFUNCENTER sfp_jlong_, 353 +PROXYFUNCENTER sfp_jlong_, 354 +PROXYFUNCENTER sfp_jlong_, 355 +PROXYFUNCENTER sfp_jlong_, 356 +PROXYFUNCENTER sfp_jlong_, 357 +PROXYFUNCENTER sfp_jlong_, 358 +PROXYFUNCENTER sfp_jlong_, 359 +PROXYFUNCENTER sfp_jlong_, 360 +PROXYFUNCENTER sfp_jlong_, 361 +PROXYFUNCENTER sfp_jlong_, 362 +PROXYFUNCENTER sfp_jlong_, 363 +PROXYFUNCENTER sfp_jlong_, 364 +PROXYFUNCENTER sfp_jlong_, 365 +PROXYFUNCENTER sfp_jlong_, 366 +PROXYFUNCENTER sfp_jlong_, 367 +PROXYFUNCENTER sfp_jlong_, 368 +PROXYFUNCENTER sfp_jlong_, 369 +PROXYFUNCENTER sfp_jlong_, 370 +PROXYFUNCENTER sfp_jlong_, 371 +PROXYFUNCENTER sfp_jlong_, 372 +PROXYFUNCENTER sfp_jlong_, 373 +PROXYFUNCENTER sfp_jlong_, 374 +PROXYFUNCENTER sfp_jlong_, 375 +PROXYFUNCENTER sfp_jlong_, 376 +PROXYFUNCENTER sfp_jlong_, 377 +PROXYFUNCENTER sfp_jlong_, 378 +PROXYFUNCENTER sfp_jlong_, 379 +PROXYFUNCENTER sfp_jlong_, 380 +PROXYFUNCENTER sfp_jlong_, 381 +PROXYFUNCENTER sfp_jlong_, 382 +PROXYFUNCENTER sfp_jlong_, 383 +PROXYFUNCENTER sfp_jlong_, 384 +PROXYFUNCENTER sfp_jlong_, 385 +PROXYFUNCENTER sfp_jlong_, 386 +PROXYFUNCENTER sfp_jlong_, 387 +PROXYFUNCENTER sfp_jlong_, 388 +PROXYFUNCENTER sfp_jlong_, 389 +PROXYFUNCENTER sfp_jlong_, 390 +PROXYFUNCENTER sfp_jlong_, 391 +PROXYFUNCENTER sfp_jlong_, 392 +PROXYFUNCENTER sfp_jlong_, 393 +PROXYFUNCENTER sfp_jlong_, 394 +PROXYFUNCENTER sfp_jlong_, 395 +PROXYFUNCENTER sfp_jlong_, 396 +PROXYFUNCENTER sfp_jlong_, 397 +PROXYFUNCENTER sfp_jlong_, 398 +PROXYFUNCENTER sfp_jlong_, 399 +PROXYFUNCENTER sfp_jlong_, 400 +PROXYFUNCENTER sfp_jlong_, 401 +PROXYFUNCENTER sfp_jlong_, 402 +PROXYFUNCENTER sfp_jlong_, 403 +PROXYFUNCENTER sfp_jlong_, 404 +PROXYFUNCENTER sfp_jlong_, 405 +PROXYFUNCENTER sfp_jlong_, 406 +PROXYFUNCENTER sfp_jlong_, 407 +PROXYFUNCENTER sfp_jlong_, 408 +PROXYFUNCENTER sfp_jlong_, 409 +PROXYFUNCENTER sfp_jlong_, 410 +PROXYFUNCENTER sfp_jlong_, 411 +PROXYFUNCENTER sfp_jlong_, 412 +PROXYFUNCENTER sfp_jlong_, 413 +PROXYFUNCENTER sfp_jlong_, 414 +PROXYFUNCENTER sfp_jlong_, 415 +PROXYFUNCENTER sfp_jlong_, 416 +PROXYFUNCENTER sfp_jlong_, 417 +PROXYFUNCENTER sfp_jlong_, 418 +PROXYFUNCENTER sfp_jlong_, 419 +PROXYFUNCENTER sfp_jlong_, 420 +PROXYFUNCENTER sfp_jlong_, 421 +PROXYFUNCENTER sfp_jlong_, 422 +PROXYFUNCENTER sfp_jlong_, 423 +PROXYFUNCENTER sfp_jlong_, 424 +PROXYFUNCENTER sfp_jlong_, 425 +PROXYFUNCENTER sfp_jlong_, 426 +PROXYFUNCENTER sfp_jlong_, 427 +PROXYFUNCENTER sfp_jlong_, 428 +PROXYFUNCENTER sfp_jlong_, 429 +PROXYFUNCENTER sfp_jlong_, 430 +PROXYFUNCENTER sfp_jlong_, 431 +PROXYFUNCENTER sfp_jlong_, 432 +PROXYFUNCENTER sfp_jlong_, 433 +PROXYFUNCENTER sfp_jlong_, 434 +PROXYFUNCENTER sfp_jlong_, 435 +PROXYFUNCENTER sfp_jlong_, 436 +PROXYFUNCENTER sfp_jlong_, 437 +PROXYFUNCENTER sfp_jlong_, 438 +PROXYFUNCENTER sfp_jlong_, 439 +PROXYFUNCENTER sfp_jlong_, 440 +PROXYFUNCENTER sfp_jlong_, 441 +PROXYFUNCENTER sfp_jlong_, 442 +PROXYFUNCENTER sfp_jlong_, 443 +PROXYFUNCENTER sfp_jlong_, 444 +PROXYFUNCENTER sfp_jlong_, 445 +PROXYFUNCENTER sfp_jlong_, 446 +PROXYFUNCENTER sfp_jlong_, 447 +PROXYFUNCENTER sfp_jlong_, 448 +PROXYFUNCENTER sfp_jlong_, 449 +PROXYFUNCENTER sfp_jlong_, 450 +PROXYFUNCENTER sfp_jlong_, 451 +PROXYFUNCENTER sfp_jlong_, 452 +PROXYFUNCENTER sfp_jlong_, 453 +PROXYFUNCENTER sfp_jlong_, 454 +PROXYFUNCENTER sfp_jlong_, 455 +PROXYFUNCENTER sfp_jlong_, 456 +PROXYFUNCENTER sfp_jlong_, 457 +PROXYFUNCENTER sfp_jlong_, 458 +PROXYFUNCENTER sfp_jlong_, 459 +PROXYFUNCENTER sfp_jlong_, 460 +PROXYFUNCENTER sfp_jlong_, 461 +PROXYFUNCENTER sfp_jlong_, 462 +PROXYFUNCENTER sfp_jlong_, 463 +PROXYFUNCENTER sfp_jlong_, 464 +PROXYFUNCENTER sfp_jlong_, 465 +PROXYFUNCENTER sfp_jlong_, 466 +PROXYFUNCENTER sfp_jlong_, 467 +PROXYFUNCENTER sfp_jlong_, 468 +PROXYFUNCENTER sfp_jlong_, 469 +PROXYFUNCENTER sfp_jlong_, 470 +PROXYFUNCENTER sfp_jlong_, 471 +PROXYFUNCENTER sfp_jlong_, 472 +PROXYFUNCENTER sfp_jlong_, 473 +PROXYFUNCENTER sfp_jlong_, 474 +PROXYFUNCENTER sfp_jlong_, 475 +PROXYFUNCENTER sfp_jlong_, 476 +PROXYFUNCENTER sfp_jlong_, 477 +PROXYFUNCENTER sfp_jlong_, 478 +PROXYFUNCENTER sfp_jlong_, 479 +PROXYFUNCENTER sfp_jlong_, 480 +PROXYFUNCENTER sfp_jlong_, 481 +PROXYFUNCENTER sfp_jlong_, 482 +PROXYFUNCENTER sfp_jlong_, 483 +PROXYFUNCENTER sfp_jlong_, 484 +PROXYFUNCENTER sfp_jlong_, 485 +PROXYFUNCENTER sfp_jlong_, 486 +PROXYFUNCENTER sfp_jlong_, 487 +PROXYFUNCENTER sfp_jlong_, 488 +PROXYFUNCENTER sfp_jlong_, 489 +PROXYFUNCENTER sfp_jlong_, 490 +PROXYFUNCENTER sfp_jlong_, 491 +PROXYFUNCENTER sfp_jlong_, 492 +PROXYFUNCENTER sfp_jlong_, 493 +PROXYFUNCENTER sfp_jlong_, 494 +PROXYFUNCENTER sfp_jlong_, 495 +PROXYFUNCENTER sfp_jlong_, 496 +PROXYFUNCENTER sfp_jlong_, 497 +PROXYFUNCENTER sfp_jlong_, 498 +PROXYFUNCENTER sfp_jlong_, 499 +PROXYFUNCENTER sfp_jlong_, 500 +PROXYFUNCENTER sfp_jlong_, 501 +PROXYFUNCENTER sfp_jlong_, 502 +PROXYFUNCENTER sfp_jlong_, 503 +PROXYFUNCENTER sfp_jlong_, 504 +PROXYFUNCENTER sfp_jlong_, 505 +PROXYFUNCENTER sfp_jlong_, 506 +PROXYFUNCENTER sfp_jlong_, 507 +PROXYFUNCENTER sfp_jlong_, 508 +PROXYFUNCENTER sfp_jlong_, 509 +PROXYFUNCENTER sfp_jlong_, 510 +PROXYFUNCENTER sfp_jlong_, 511 +PROXYFUNCENTER sfp_jlong_, 512 +PROXYFUNCENTER sfp_jlong_, 513 +PROXYFUNCENTER sfp_jlong_, 514 +PROXYFUNCENTER sfp_jlong_, 515 +PROXYFUNCENTER sfp_jlong_, 516 +PROXYFUNCENTER sfp_jlong_, 517 +PROXYFUNCENTER sfp_jlong_, 518 +PROXYFUNCENTER sfp_jlong_, 519 +PROXYFUNCENTER sfp_jlong_, 520 +PROXYFUNCENTER sfp_jlong_, 521 +PROXYFUNCENTER sfp_jlong_, 522 +PROXYFUNCENTER sfp_jlong_, 523 +PROXYFUNCENTER sfp_jlong_, 524 +PROXYFUNCENTER sfp_jlong_, 525 +PROXYFUNCENTER sfp_jlong_, 526 +PROXYFUNCENTER sfp_jlong_, 527 +PROXYFUNCENTER sfp_jlong_, 528 +PROXYFUNCENTER sfp_jlong_, 529 +PROXYFUNCENTER sfp_jlong_, 530 +PROXYFUNCENTER sfp_jlong_, 531 +PROXYFUNCENTER sfp_jlong_, 532 +PROXYFUNCENTER sfp_jlong_, 533 +PROXYFUNCENTER sfp_jlong_, 534 +PROXYFUNCENTER sfp_jlong_, 535 +PROXYFUNCENTER sfp_jlong_, 536 +PROXYFUNCENTER sfp_jlong_, 537 +PROXYFUNCENTER sfp_jlong_, 538 +PROXYFUNCENTER sfp_jlong_, 539 +PROXYFUNCENTER sfp_jlong_, 540 +PROXYFUNCENTER sfp_jlong_, 541 +PROXYFUNCENTER sfp_jlong_, 542 +PROXYFUNCENTER sfp_jlong_, 543 +PROXYFUNCENTER sfp_jlong_, 544 +PROXYFUNCENTER sfp_jlong_, 545 +PROXYFUNCENTER sfp_jlong_, 546 +PROXYFUNCENTER sfp_jlong_, 547 +PROXYFUNCENTER sfp_jlong_, 548 +PROXYFUNCENTER sfp_jlong_, 549 +PROXYFUNCENTER sfp_jlong_, 550 +PROXYFUNCENTER sfp_jlong_, 551 +PROXYFUNCENTER sfp_jlong_, 552 +PROXYFUNCENTER sfp_jlong_, 553 +PROXYFUNCENTER sfp_jlong_, 554 +PROXYFUNCENTER sfp_jlong_, 555 +PROXYFUNCENTER sfp_jlong_, 556 +PROXYFUNCENTER sfp_jlong_, 557 +PROXYFUNCENTER sfp_jlong_, 558 +PROXYFUNCENTER sfp_jlong_, 559 +PROXYFUNCENTER sfp_jlong_, 560 +PROXYFUNCENTER sfp_jlong_, 561 +PROXYFUNCENTER sfp_jlong_, 562 +PROXYFUNCENTER sfp_jlong_, 563 +PROXYFUNCENTER sfp_jlong_, 564 +PROXYFUNCENTER sfp_jlong_, 565 +PROXYFUNCENTER sfp_jlong_, 566 +PROXYFUNCENTER sfp_jlong_, 567 +PROXYFUNCENTER sfp_jlong_, 568 +PROXYFUNCENTER sfp_jlong_, 569 +PROXYFUNCENTER sfp_jlong_, 570 +PROXYFUNCENTER sfp_jlong_, 571 +PROXYFUNCENTER sfp_jlong_, 572 +PROXYFUNCENTER sfp_jlong_, 573 +PROXYFUNCENTER sfp_jlong_, 574 +PROXYFUNCENTER sfp_jlong_, 575 +PROXYFUNCENTER sfp_jlong_, 576 +PROXYFUNCENTER sfp_jlong_, 577 +PROXYFUNCENTER sfp_jlong_, 578 +PROXYFUNCENTER sfp_jlong_, 579 +PROXYFUNCENTER sfp_jlong_, 580 +PROXYFUNCENTER sfp_jlong_, 581 +PROXYFUNCENTER sfp_jlong_, 582 +PROXYFUNCENTER sfp_jlong_, 583 +PROXYFUNCENTER sfp_jlong_, 584 +PROXYFUNCENTER sfp_jlong_, 585 +PROXYFUNCENTER sfp_jlong_, 586 +PROXYFUNCENTER sfp_jlong_, 587 +PROXYFUNCENTER sfp_jlong_, 588 +PROXYFUNCENTER sfp_jlong_, 589 +PROXYFUNCENTER sfp_jlong_, 590 +PROXYFUNCENTER sfp_jlong_, 591 +PROXYFUNCENTER sfp_jlong_, 592 +PROXYFUNCENTER sfp_jlong_, 593 +PROXYFUNCENTER sfp_jlong_, 594 +PROXYFUNCENTER sfp_jlong_, 595 +PROXYFUNCENTER sfp_jlong_, 596 +PROXYFUNCENTER sfp_jlong_, 597 +PROXYFUNCENTER sfp_jlong_, 598 +PROXYFUNCENTER sfp_jlong_, 599 \ No newline at end of file diff --git a/src/mrt/compiler-rt/src/arch/arm64/r2c_stub_arm64.S b/src/mrt/compiler-rt/src/arch/arm64/r2c_stub_arm64.S new file mode 100644 index 0000000000..812195ebd1 --- /dev/null +++ b/src/mrt/compiler-rt/src/arch/arm64/r2c_stub_arm64.S @@ -0,0 +1,412 @@ +#define cfi_adjust_cfa_offset(off) .cfi_adjust_cfa_offset off +#define cfi_rel_offset(reg, off) .cfi_rel_offset reg, off +#define cfi_restore(reg) .cfi_restore reg +#define cfi_def_cfa_register(reg) .cfi_def_cfa_register reg + +//////////////////////////////////////////////////////////////////////////////// +// there are 2 kinds of stubs to invoke a compiled java method: ForwardStub and BoxedStub. +// +// ForwardStub simply forwards arguments passed by runtime, i.e., arguments for compiled java method are passed +// according to C/C++ calling convention, which usually means efficiency. +// +// BoxedStub takes a general way: maple runtime first boxes all java arguments into an array composed of first integer +// arguments which are passed via general registers, then floating-point arguments which are passed via fp registers, +// and arguments which can not be passed by register (they will be passed on stack). +// BoxedStub unboxes all the arguments according to C/C++ calling convention. +//////////////////////////////////////////////////////////////////////////////// + +#define ForwardStubFrameSize (8 * 18) +#define ForwardStubCalleeSaveArea (8 * 8) + +// R2CForwardStubXX builds a stub frame to invoke the target java_method according to the previous frame which invokes +// R2CForwardStubXX(java_method, std::forward(args)...). +// R means runtime, while C means compiled java method. XX indicates the return type of this java method. + +// On execution of "bl R2CForwardStubXX", the frame layout of stack(growing downwards) looks like: +// x0: the entry point of Java method to be invoked +// x1~x7: hold the first 7 arguments arg0~arg6 if existed +// x30: return address of "bl R2CForwardStubXX" +// all on-stack arguments are addressable by SP as the frame layout shows. +// arg7 will be passed to x7 from caller's stack after R2CForwardStub is built. +// | ... | +// | x30 | lr for the caller of R2CForwardStubXX +// caller fp --> | x29 | +// | ... | +// | arg11 | +// | arg10 | +// | arg9 | +// | arg8 | +// caller sp --> | arg7 | + +// the frame layout of stack(growing downwards) after R2CForwardStub frame is built looks like: +// | ... | +// | x30 | lr for the caller of R2CForwardStubXX +// caller fp --> | x29 | +// | ... | +// | arg11 | +// | arg10 | +// | arg9 | +// | arg8 | +// caller sp --> | arg7 | +// callee saved | r28 | <== R2CForwardStub frame starts from here +// | r27 | +// | r26 | +// | r25 | +// | r24 | +// | r23 | +// | r22 | +// | r21 | +// | r20 | +// callee saved | r19 | +// unwind context | direct call | directly invoke callee java method +// | shadowframe | the information of caller frame which is interpreted +// | UC Status | unwind context status of caller frame +// | Context LR | LR of unwind context frame +// | Context FP | FP of unwind context frame +// unwind context | Context PC | PC of unwind context frame +// | x30 | +// stub fp --> | caller fp | +// | ... | +// | arg11 | +// | arg10 | +// | arg9 | +// stub sp --> | arg8 | <== R2CForwardStub frame ends at here + + .text + .align 2 + .global R2CForwardStubLong + .hidden R2CForwardStubLong + .global R2CForwardStubVoid + .hidden R2CForwardStubVoid + .global R2CForwardStubFloat + .hidden R2CForwardStubFloat + .global R2CForwardStubDouble + .hidden R2CForwardStubDouble + .type R2CForwardStubVoid , %function + .type R2CForwardStubLong , %function + .type R2CForwardStubFloat , %function + .type R2CForwardStubDouble, %function +R2CForwardStubLong: +R2CForwardStubVoid: +R2CForwardStubFloat: +R2CForwardStubDouble: + .cfi_startproc + .cfi_personality 155, DW.ref.__n2j_stub_personality + add x30, x30, #1 + stp x29, x30, [sp, #-ForwardStubFrameSize]! + cfi_adjust_cfa_offset (ForwardStubFrameSize) + cfi_rel_offset (x29, 0) + cfi_rel_offset (x30, 8) + + // save all used callee-saved registers. + stp x19, x20, [sp, #ForwardStubCalleeSaveArea] + cfi_rel_offset (x19, ForwardStubCalleeSaveArea) + cfi_rel_offset (x20, ForwardStubCalleeSaveArea+8) + + stp x21, x22, [sp, #ForwardStubCalleeSaveArea+0x10] + cfi_rel_offset (x21, ForwardStubCalleeSaveArea+0x10) + cfi_rel_offset (x22, ForwardStubCalleeSaveArea+0x18) + + stp x23, x24, [sp, #ForwardStubCalleeSaveArea+0x20] + cfi_rel_offset (x23, ForwardStubCalleeSaveArea+0x20) + cfi_rel_offset (x24, ForwardStubCalleeSaveArea+0x28) + + stp x25, x26, [sp, #ForwardStubCalleeSaveArea+0x30] + cfi_rel_offset (x25, ForwardStubCalleeSaveArea+0x30) + cfi_rel_offset (x26, ForwardStubCalleeSaveArea+0x38) + + stp x27, x28, [sp, #ForwardStubCalleeSaveArea+0x40] + cfi_rel_offset (x27, ForwardStubCalleeSaveArea+0x40) + cfi_rel_offset (x28, ForwardStubCalleeSaveArea+0x48) + + // x19 <- previous fp + mov x19, x29 + + mov x20, x0 // the entry point of Java method + + mov x21, x1 + mov x22, x2 + mov x23, x3 + mov x24, x4 + mov x25, x5 + mov x26, x6 + mov x27, x7 + + // x28 <- previous sp + add x28, sp, #ForwardStubFrameSize + + mov x29, sp + cfi_def_cfa_register (x29) + + // frame info: tls -> stub + mov x0, x29 + bl MRT_SaveLastUnwindContext + + // java code is always reliable for stack unwinding + bl MRT_SetReliableUnwindContextStatus + + // prepare arguments for invoking target Java method + mov x0, x21 + mov x1, x22 + mov x2, x23 + mov x3, x24 + mov x4, x25 + mov x5, x26 + mov x6, x27 + + // passing on-stack arguments. start from arg8. + // put the 8th argument into register x7 from on-stack + ldr x7, [x28], #8 + add x19, x19, #8 + + // copy arg9, arg10, ... (if existed) +.L_copy: + cmp x19, x28 + ble .L_copy_end + ldp x25, x26, [x19, #-16]! + // SP is always 16 byte-aligned. + stp x25, x26, [sp, #-16]! + b .L_copy +.L_copy_end: + + // set x19 to the polling page address + adrp x19, REF_globalPollingPage + add x19, x19, :lo12:REF_globalPollingPage + ldr x19, [x19] + ldr x19, [x19] + + blr x20 + + /* keep potential return value */ + mov x28, x0 + fmov x27, d0 + + /* restore last_java_frame */ + mov x0, x29 + bl MRT_RestoreLastUnwindContext + + /* set potential return value */ + mov x0, x28 + fmov d0, x27 + + mov sp, x29 + cfi_def_cfa_register (sp) + + // restore all used callee-saved registers. + ldp x19, x20, [sp, #ForwardStubCalleeSaveArea] + cfi_restore (x19) + cfi_restore (x20) + ldp x21, x22, [sp, #ForwardStubCalleeSaveArea+0x10] + cfi_restore (x21) + cfi_restore (x22) + ldp x23, x24, [sp, #ForwardStubCalleeSaveArea+0x20] + cfi_restore (x23) + cfi_restore (x24) + ldp x25, x26, [sp, #ForwardStubCalleeSaveArea+0x30] + cfi_restore (x25) + cfi_restore (x26) + ldp x27, x28, [sp, #ForwardStubCalleeSaveArea+0x40] + cfi_restore (x27) + cfi_restore (x28) + + ldp x29, x30, [sp], #ForwardStubFrameSize + cfi_adjust_cfa_offset (-ForwardStubFrameSize) + cfi_restore (x29) + cfi_restore (x30) + sub x30, x30, #1 + ret + .cfi_endproc + .size R2CForwardStubLong, .-R2CForwardStubLong + +//////////////////////////////////////////////////////////////////////////////// + +#define BoxedStubFrameSize (8 * 16) +#define BoxedStubCalleeSaveArea (8 * 8) + +#define JVALUE_SIZE 8 + +// extern"C" void R2CBoxedStubXXX(void *func_ptr, x0 +// jvalue *argJvalue, x1 +// uint_32 stackSize, x2 +// uint_32 dregSize x3) + +// On execution of "bl R2CBoxedStub", the top frame of stack(growing downwards) looks like: +// x0: the entry point of Java method to be invoked +// x1: argJvalue +// x2: stackSize +// x3: dregSize +// | ... | +// | x30 | +// sp, fp --> | x29 | fp for the caller of R2CBoxedStub + +// this routine builds a stub frame to invoke java method +// x20: the entry point of Java method to be invoked +// x0~x7: hold the first 8 integer arguments if existed +// d0~d7: hold the first 8 float arguments if existed +// | ... | +// | x30 | +// caller fp --> | x29 | +// callee saved | --- | +// | --- | +// | r24 | +// | r23 | +// | r22 | +// | r21 | +// | r20 | +// callee saved | r19 | +// unwind context | direct call | directly invoke callee java method +// | shadowframe | the information of caller frame which is interpreted +// | UCStatus | unwind context status of caller frame +// | Caller LR | LR of caller frame +// | Caller FP | FP of caller frame +// unwind context | Caller PC | PC of caller frame +// | x30 | +// stub sp,fp --> | caller fp | + + .text + .align 2 + .global R2CBoxedStubLong + .hidden R2CBoxedStubLong + .global R2CBoxedStubVoid + .hidden R2CBoxedStubVoid + .global R2CBoxedStubFloat + .hidden R2CBoxedStubFloat + .global R2CBoxedStubDouble + .hidden R2CBoxedStubDouble + .type R2CBoxedStubLong, %function + .type R2CBoxedStubVoid, %function + .type R2CBoxedStubFloat, %function + .type R2CBoxedStubDouble, %function +R2CBoxedStubLong: +R2CBoxedStubVoid: +R2CBoxedStubFloat: +R2CBoxedStubDouble: + .cfi_startproc + .cfi_personality 155, DW.ref.__n2j_stub_personality + add x30, x30, #1 + stp x29, x30, [sp, #-BoxedStubFrameSize]! + cfi_adjust_cfa_offset (BoxedStubFrameSize) + cfi_rel_offset (x29, 0) + cfi_rel_offset (x30, 8) + + // save all used callee-saved registers. + stp x19, x20, [sp, #BoxedStubCalleeSaveArea] + cfi_rel_offset (x19, BoxedStubCalleeSaveArea) + cfi_rel_offset (x20, BoxedStubCalleeSaveArea+8) + + stp x21, x22, [sp, #BoxedStubCalleeSaveArea+0x10] + cfi_rel_offset (x21, BoxedStubCalleeSaveArea+0x10) + cfi_rel_offset (x22, BoxedStubCalleeSaveArea+0x18) + + stp x23, x24, [sp, #BoxedStubCalleeSaveArea+0x20] + cfi_rel_offset (x23, BoxedStubCalleeSaveArea+0x20) + cfi_rel_offset (x24, BoxedStubCalleeSaveArea+0x28) + + mov x20, x0 // the entry point of Java method + + mov x21, x1 // jvalue start + mov x22, x2 // stack size + mov x23, x3 // dreg size + + mov x29, sp + cfi_def_cfa_register (x29) + + // frame info: tls -> stub + mov x0, x29 + bl MRT_SaveLastUnwindContext + + // java code is always reliable for stack unwinding + bl MRT_SetReliableUnwindContextStatus + + cbz x22, .LskipStack + // copy parameter to stack + // size align to 16 byte. + add x22, x22, #(16 - 1) + and x22, x22, #0xFFFFFFFFFFFFFFF0; + sub sp, sp, x22 + mov x0, sp + add x24, x21, #128 // 16 * 8, jvalus offset 16 + add x22, x24, x22 +.LCopy: + cmp x24, x22 + bge .LCopyEnd + ldp x1, x2, [x24], #16 + stp x1, x2, [x0], #16 + b .LCopy +.LCopyEnd: +.LskipStack: + ldp x0, x1, [x21, #0] + ldp x2, x3, [x21, #16] + ldp x4, x5, [x21, #32] + ldp x6, x7, [x21, #48] + + cbz x23, .LcallFunction + ldp d0, d1, [x21, #64 + 0] + ldp d2, d3, [x21, #64 + 16] + ldp d4, d5, [x21, #64 + 32] + ldp d6, d7, [x21, #64 + 48] + +.LcallFunction: + // set x19 to the polling page address + adrp x19, REF_globalPollingPage + add x19, x19, :lo12:REF_globalPollingPage + ldr x19, [x19] + ldr x19, [x19] + + blr x20 + + /* keep potential return value */ + mov x20, x0 + fmov x19, d0 + + /* restore last_java_frame */ + mov x0, x29 + bl MRT_RestoreLastUnwindContext + + /* set potential return value */ + mov x0, x20 + fmov d0, x19 + + mov sp, x29 + cfi_def_cfa_register (sp) + + // restore all used callee-saved registers. + ldp x19, x20, [sp, #BoxedStubCalleeSaveArea] + cfi_restore (x19) + cfi_restore (x20) + ldp x21, x22, [sp, #BoxedStubCalleeSaveArea+0x10] + cfi_restore (x21) + cfi_restore (x22) + ldp x23, x24, [sp, #BoxedStubCalleeSaveArea+0x20] + cfi_restore (x23) + cfi_restore (x24) + + ldp x29, x30, [sp], #BoxedStubFrameSize + cfi_adjust_cfa_offset (-BoxedStubFrameSize) + cfi_restore (x29) + cfi_restore (x30) + sub x30, x30, #1 + ret + .cfi_endproc + .size R2CBoxedStubLong, .-R2CBoxedStubLong + +//////////////////////////////////////////////////////////////////////////////// + + .hidden DW.ref.__n2j_stub_personality + .weak DW.ref.__n2j_stub_personality + .section .data.DW.ref.__n2j_stub_personality,"awG",%progbits,DW.ref.__n2j_stub_personality,comdat + .align 3 + .type DW.ref.__n2j_stub_personality, %object + .size DW.ref.__n2j_stub_personality,8 +DW.ref.__n2j_stub_personality: + .xword __n2j_stub_personality + +/////////////////////////////////////////////////////////////////////////////// + // Add this section for lld relocation limitation +.section .data.ref.lld_relocation_limitation,"aw",%progbits + .type REF_globalPollingPage,@object + .align 3 + .local REF_globalPollingPage +REF_globalPollingPage: + .xword globalPollingPage + .size REF_globalPollingPage,.-REF_globalPollingPage diff --git a/src/mrt/compiler-rt/src/arch/arm64/signal_handler_arm64.cpp b/src/mrt/compiler-rt/src/arch/arm64/signal_handler_arm64.cpp new file mode 100644 index 0000000000..f752e8c7b3 --- /dev/null +++ b/src/mrt/compiler-rt/src/arch/arm64/signal_handler_arm64.cpp @@ -0,0 +1,597 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "libs.h" +#include "chelper.h" +#include "sizes.h" // runtime/include +#include "exception/mpl_exception.h" +#include "yieldpoint.h" +#include "exception_store.h" +#include "linker_api.h" +#include "thread_helper.h" +#include "mrt_common.h" +#include "exception/stack_unwinder.h" +#include "utils/time_utils.h" + +namespace maplert { +// Access a label defined in assembly from within C +// 1. Export the label using .globl in the assembly +// 1.1 For _ZTI37Ljava_2Flang_2FArithmeticException_3B and +// _ZTI38Ljava_2Flang_2FNullPointerException_3B, +// the labels are defined in the same compilation unit +// (see the below comment about a couple of dummy functions) +// we don't need the step. +// 2. Declare that the labels are external ones. +// 3. Don't for get to take the ADDRESS of the symbol. +using namespace maple; + +extern "C" ClassInitState MRT_TryInitClassOnDemand(const MClass &classinfo); +extern "C" bool MRT_ClassInitialized(jclass classinfo); + +// Exception handling through signals +// +// Throwing an exception from a signal handler will not automatically notify the +// kernel that we have finished signal handling. Therefore, the kernel still +// thinks we are handlign exceptions. The kernel keeps the same signal blocked +// when we are handling it, and it will prevent a second SIGSEGV to be thrown +// when a hardware trap is triggered by a subsequent NULL pointer access. +// +// This function is a workaround. It unblocks the SIGSEGV before throwing a +// Java exception. This allows repeated NULL pointer accesses to throw NPE, but +// the correct implementation needs to actually look at the PC to find out if +// the SIGSEGV is actually caused by a Java instruction. We also need to +// thoroughly investigate the side effects of rt_sigreturn (the function which +// is called after a signal handler returns) in addition to unblocking the +// signal and restoring the mcontext. +// +// PrepareToHandleJavaSignal implicitly does the job by RealHandler1 when signal +// stub frame is retreated. +using RealHandler2 = void (*)(void*, void*); +struct SignalHandler { + // pc is utilized to restore control flow after signal is handled + void *mPC; // the program point where this signal is raised + void *mReturnPC; // the return pc after this signal is handled + RealHandler1 mHandler; // the real signal handler + void *mArg1; // the first argument of mHandler + RealHandler1 mResetHandler; // the page reset handler called after mHandler done + void *mArgr1; // the first argument of mResetHandler +}; + +// this is unreliable because sh is deleted after real signal handler is excecuted. +// we can not use this in HandleJavaSignalStub after calling InvokeJavaSignalHandler +// since sh is already deleted. +extern "C" void *GetRaisedSignalPC() { + SignalHandler *sh = reinterpret_cast(maple::ExceptionVisitor::GetSignalHandler()); + if (sh == nullptr) { + LOG(FATAL) << "signale handler should not be nullptr" << maple::endl; + return nullptr; + } + return sh->mPC; +} + +extern "C" void MRT_InvokeResetHandler() { + SignalHandler *sh = reinterpret_cast(maple::ExceptionVisitor::GetSignalHandler()); + if (sh == nullptr) { + return; + } + maple::ExceptionVisitor::SetSignalHandler(nullptr); + RealHandler1 handler = sh->mResetHandler; + if (handler != nullptr) { + handler(sh->mArgr1); + } + delete sh; + sh = nullptr; +} + +extern "C" void *InvokeJavaSignalHandler() { + auto sh = reinterpret_cast(maple::ExceptionVisitor::GetSignalHandler()); + if (sh == nullptr) { + LOG(FATAL) << "signale handler should not be nullptr" << maple::endl; + return nullptr; + } + if (sh->mResetHandler == nullptr) { + maple::ExceptionVisitor::SetSignalHandler(nullptr); + } + RealHandler1 handler = sh->mHandler; + void *arg1 = sh->mArg1; + void *returnPC = sh->mReturnPC; + if (sh->mResetHandler == nullptr) { + delete sh; + sh = nullptr; + } + handler(arg1); + return returnPC; +} + +extern "C" void HandleJavaSignalStub(); + +// this is a stub only for handling signals raised in java methods +extern "C" bool MRT_PrepareToHandleJavaSignal(ucontext_t *ucontext, + RealHandler1 handler, void *arg1, + RealHandler1 rhandler, void *r1, + int32_t returnPCOffset) { + // For parameter returnPCOffset, its default valus is 1, that is pc + 4 , mean to issue the next instructions after + // signale handling. but signals like lazybinding or decouple would like to redo the instruction bundle + // after handling the signal. We can not set register x17 to the alctually return pc but pc + 4 because + // the value in x17 will set to the eh frame which is used to determin if the EH is inside a try block. So + // here we set the x17 as (pc + 4) and sh->mReturnPC as pc + 4 * returnPCOffset. + mcontext_t *mcontext = &(ucontext->uc_mcontext); + + auto sh = new (std::nothrow) SignalHandler; + if (sh == nullptr) { + LOG(FATAL) << "new SignalHandler failed" << maple::endl; + } + sh->mHandler = handler; + sh->mArg1 = arg1; + sh->mPC = reinterpret_cast(mcontext->pc); + sh->mReturnPC = reinterpret_cast(mcontext->pc + kAarch64InsnSize * returnPCOffset); + sh->mResetHandler = rhandler; + sh->mArgr1 = r1; + ExceptionVisitor::SetSignalHandler(sh); + + { + // signal handler frame is specifically structured, and can not be uniwinded according to ABI, which is more or less + // like the frames constructed by third-party native code, thus we have to update this java context as *risky*. + MRT_UpdateLastUnwindContext(reinterpret_cast(mcontext->pc + kAarch64InsnSize), + reinterpret_cast(mcontext->regs[kFP]), UnwindContextIsRisky); + // make lr valid for java method without an frame. + if (JavaFrame::JavaMethodHasNoFrame(reinterpret_cast(mcontext->pc))) { + MRT_UpdateLastUnwindFrameLR(reinterpret_cast(mcontext->regs[kLR])); + } + } + + // backup the pc in the abnormal frame to x17. + // set x17 to "pc + 4", and mimic as if this is a bl instruction. + // Note _Unwind_GetIP relies on this value to get + // the context where an exception is raised. __mpl_personality_v0 should cooperate + // with this backup. otherwise, search exception table will fail. + // + // Furthermore, _Unwind APIs assume the value of pc points to the next instruction + // of the instruction which raises a signal. Otherwise stack unwinding fails when + // the first instruction of a method in an abnormal frame raises a signal. + // refer to uw_update_context in libgcc. + mcontext->regs[kX17] = mcontext->pc + kAarch64InsnSize; + + // mimic the return address for HandleJavaSignalStub, this is necessary for gdb + // as well as _Unwind_xx APIs, since control flow *jumps* to HandleJavaSignalStub, + // and does not save exceptioning instruction pointer automatically. + // We need a precise site where raises an exception for _Unwind_xx APIs to work. + // backup x30 is an alternative option. + mcontext->pc = reinterpret_cast(&HandleJavaSignalStub); // ready to perform signal handling + return true; +} + +static int UnblockSignal(int sig) { + sigset_t sigSet; + if (UNLIKELY(sigemptyset(&sigSet) == -1)) { + LOG(ERROR) << "sigemptyset() in UnblockSignal() return -1 for failed." << maple::endl; + } + if (UNLIKELY(sigaddset(&sigSet, sig) == -1)) { + LOG(ERROR) << "sigaddset() in UnblockSignal() return -1 for failed." << maple::endl; + } + sigset_t oldSet; + int err = sigprocmask(SIG_UNBLOCK, &sigSet, &oldSet); + if (err != 0) { + PLOG(ERROR) << "UnblockSignal failed" << maple::endl; + } + return err; +} + +static void *AnalyzeLazyloadInstructionSequence(uint32_t *pc4, ucontext_t *ucontext) { + uint32_t *pc0 = pc4 - 1; + // 0: ldr wm [xn] // xm for 64bit + // 4: ldr wm [wm] +#ifdef USE_32BIT_REF + constexpr static uint32_t prefixCode0 = 0xb9400; +#else + constexpr static uint32_t prefixCode0 = 0xf9400; +#endif + constexpr static uint32_t prefixCode4 = 0xb9400; + constexpr static uint32_t regMask = 0b11111; + constexpr static uint32_t kMplOffsetFixFlag = 0x80000000; + constexpr static uint16_t regCodeOffset = 12; + constexpr static uint16_t regNumOffset = 5; + + uint32_t code0 = (*pc0 >> regCodeOffset); + uint32_t code4 = (*pc4 >> regCodeOffset); + uint32_t distRegNum0 = *pc0 & regMask; + uint32_t srcRegNum0 = (*pc0 >> regNumOffset) & regMask; + uint32_t srcRegNum4 = (*pc4 >> regNumOffset) & regMask; + + if (code0 == prefixCode0 && code4 == prefixCode4 && distRegNum0 != srcRegNum0 && + distRegNum0 == srcRegNum4 && ucontext != nullptr) { + // Further check the LazyLoad Sentry Number + LinkerOffsetValItemLazyLoad *offValTabAddr = + reinterpret_cast(ucontext->uc_mcontext.regs[srcRegNum0]); + uint32_t offsetValue = offValTabAddr->offset; + if (offsetValue & kMplOffsetFixFlag) { + uint32_t offsetIndex = (offsetValue & (~kMplOffsetFixFlag)); + if (*(reinterpret_cast(offValTabAddr - offsetIndex) - 1) != kMplLazyLoadSentryNumber) { + return nullptr; + } + } + return offValTabAddr; + } + return nullptr; +} + +const uint64_t kAdrpBinaryCode = 0b10010000; +const uint64_t kLdrBinaryCode = 0b1011100101; +const uint64_t kAdrBinaryCode = 0b00010000; +const uint64_t kAddBinaryCode = 0b00010001; +const uint64_t kShortCmdMask = 0b10011111; +const uint64_t kLongCmdMask = 0b1011111111; +const uint64_t kLdrXtMask = 0b11111; +const uint64_t kXzrReg = 31; +const uint32_t kLongCmdMaskOffset = 22; +const uint32_t kShortCmdMaskOffset = 24; + +uint64_t *PICAdrpCodeCheck(const uint32_t *pc) { + // case 1.1: PIC code + // 0: 0x90012c20 adrp x0, 0x555a844000 # (code >> 24) & 0b10011111 == 0b10010000 + // 4: 0xf9400000 ldr x0, [x0,#1024] # (code >> 22) & 0b1011111111 == 0b1011100101 + // 8: 0xf9400000 ldr x0, [x0,#112] # (code >> 22) & 0b1011111111 == 0b1011100101 + // 12: 0xf9400000 ldr x0, [x0] # (code >> 22) & 0b1011111111 == 0b1011100101 + if ((((*(pc + 0) >> kShortCmdMaskOffset) & kShortCmdMask) == kAdrpBinaryCode) && + (((*(pc + 1) >> kLongCmdMaskOffset) & kLongCmdMask) == kLdrBinaryCode)) { + { + // decode adrp (refer to ARM Architecture Reference Manual ARMv8 C6.2.10) + uintptr_t adrpCode = *pc; + // 19 + 2 + 12 bits + uintptr_t adrpOffset = ((((adrpCode >> 5) & 0x7FFFF) << 2) | ((adrpCode >> 29) & 0b11)) << 12; + // 19 + 2 + 12 bits sign-extend to 64 bits + adrpOffset = (adrpOffset << (64 - 33)) >> (64 - 33); + // relative to pc page address. + uintptr_t targetPage = ((reinterpret_cast(pc) >> 12) << 12) + adrpOffset; + + // decode add (refer to ARM Architecture Reference Manual ARMv8 C6.2.4) + uint32_t ldrCode = *(pc + 1); + uintptr_t ldrOffset = (ldrCode >> 10) & 0xFFF; // 12 bits + uint32_t ldrScale = (ldrCode >> 30) & 0b11; // 2 bits + return reinterpret_cast(targetPage + (ldrOffset << ldrScale)); + } + } + return nullptr; +} + +uint64_t *PICAdrCodeCheck(const uint32_t *pc, uint32_t secondLdrOffset) { + // case 1.2: PIC code. CG does not generate these code, but assembler will replace adrp for adr + // 0: 0x90012c20 adr x0, 0xffff7030d000 # (code >> 24) & 0b10011111 == 0b10010000 + // 4: 0xf9400000 ldr x0, [x0,#1024] # (code >> 22) & 0b1011111111 == 0b1011100101 + // 8: 0xf9400000 ldr x0, [x0,#112] # (code >> 22) & 0b1011111111 == 0b1011100101 + // 12: 0xf9400000 ldr x0, [x0] # (code >> 22) & 0b1011111111 == 0b1011100101 + if ((((*(pc + 0) >> kShortCmdMaskOffset) & kShortCmdMask) == kAdrBinaryCode) && + (((*(pc + 1) >> kLongCmdMaskOffset) & kLongCmdMask) == kLdrBinaryCode) && + (((*(pc + secondLdrOffset) >> kLongCmdMaskOffset) & kLongCmdMask) == kLdrBinaryCode)) { + { + // decode adrp (refer to ARM Architecture Reference Manual ARMv8 C6.2.10) + uintptr_t adrCode = *pc; + // 19 + 2 bits + uintptr_t adrOffset = ((((adrCode >> 5) & 0x7FFFF) << 2) | ((adrCode >> 29) & 0b11)); + // 19 + 2 bits sign-extend to 64 bits + adrOffset = (adrOffset << (64 - 21)) >> (64 - 21); + // relative to pc address. + uintptr_t targetPage = reinterpret_cast(pc) + adrOffset; + + // decode add (refer to ARM Architecture Reference Manual ARMv8 C6.2.4) + uint32_t ldrCode = *(pc + 1); + uintptr_t ldrOffset = (ldrCode >> 10) & 0xFFF; // 12 bits + uint32_t ldrScale = (ldrCode >> 30) & 0b11; // 2 bits + return reinterpret_cast(targetPage + (ldrOffset << ldrScale)); + } + } + return nullptr; +} + +uint64_t *NonPICCodeCheck(const uint32_t *pc) { + // case 2: non-PIC code + // 0: 0x90012c20 adrp x0, 0x555a844000 # (code >> 24) & 0b10011111 == 0b10010000 + // 4: 0x911e2000 add x0, x0, #0x788 # (code >> 24) & 0b01111111 == 0b00010001 + // 8: 0xf9400000 ldr x0, [x0,#112] # (code >> 22) & 0b1011111111 == 0b1011100101 + // 12: 0xf9400000 ldr x0, [x0] # (code >> 22) & 0b1011111111 == 0b1011100101 + if ((((*(pc + 0) >> kShortCmdMaskOffset) & kShortCmdMask) == kAdrpBinaryCode) && + (((*(pc + 1) >> kShortCmdMaskOffset) & kShortCmdMask) == kAddBinaryCode)) { + { + // decode adrp (refer to ARM Architecture Reference Manual ARMv8 C6.2.10) + uintptr_t adrpCode = *pc; + + // 19 + 2 + 12 bits + uintptr_t adrpOffset = ((((adrpCode >> 5) & 0x7FFFF) << 2) | ((adrpCode >> 29) & 0b11)) << 12; + // 19 + 2 + 12 bits sign-extend to 64 bits + adrpOffset = (adrpOffset << (64 - 33)) >> (64 - 33); + // relative to pc page address. + uintptr_t targetPage = ((reinterpret_cast(pc) >> 12) << 12) + adrpOffset; + + // decode add (refer to ARM Architecture Reference Manual ARMv8 C6.2.4) + uint32_t addCode = *(pc + 1); + uintptr_t addOffset = (addCode >> 10) & 0xFFF; // 12 bits + uint32_t addShift = (addCode >> 22) & 0b11; // 2 bits + __MRT_ASSERT(addShift < 0b10, "AnalyzeClinitInstructionSequence addShift error"); + if (addShift == 0b01) { + addOffset <<= 12; + } + return reinterpret_cast(targetPage + addOffset); + } + } + return nullptr; +} + +// analyze following instructions: +static uint64_t *AnalyzeClinitInstructionSequence(const uint32_t *pc) { + uint32_t secondLdrOffset = 2; + // check the case with a lazybind instruction + // -4: 0xd0000080 adrp x0, 14000 <_GLOBAL_OFFSET_TABLE_+0xf0> + // 0: 0xf9425000 ldr x0, [x0,#1184] + // 4: 0xf940001f ldr xzr, [x0] // for lazy binnding + // 8: 0xf9401811 ldr x17, [x0,#48] + // c: 0xf940023f ldr xzr, [x17] + if ((((*(pc + 0) >> kLongCmdMaskOffset) & kLongCmdMask) == kLdrBinaryCode) && + (((*(pc + 1) >> kLongCmdMaskOffset) & kLongCmdMask) == kLdrBinaryCode) && + ((*(pc + 1) & kLdrXtMask) == kXzrReg)) { + --pc; + ++secondLdrOffset; + } + + uint64_t *result = nullptr; + + result = PICAdrpCodeCheck(pc); + if (result != nullptr) { + return result; + } + + result = PICAdrCodeCheck(pc, secondLdrOffset); + if (result != nullptr) { + return result; + } + + result = NonPICCodeCheck(pc); + if (result != nullptr) { + return result; + } + + return nullptr; +} + +static int64_t sigvCausedClinit = 0; + +static inline int64_t IncSigvCausedClinitCount(int64_t count) { + return __atomic_add_fetch(&sigvCausedClinit, count, __ATOMIC_ACQ_REL); +} + +namespace { +constexpr uint16_t kAdrpPcOffsetForLazy = 2; +} + +static inline uint32_t GetLdrSrcRegNum(uint32_t pc) { + constexpr static uint16_t srcRegNumOffset = 5; + constexpr static uint32_t regMask = 0b11111; + return (pc >> srcRegNumOffset) & regMask; +} + +static bool AnalyzeLoadArrayCacheInstructionSequence(uint32_t *pc, ucontext_t &ucontext) { + // adrp x1, 1798000 + // ldr w1, [x1,#2400] + // ldr wzr, [x1] + constexpr uint16_t adrpPCOffsetForArrayCache = 2; + uint32_t *adrpPC = pc - adrpPCOffsetForArrayCache; + uint64_t *addr = AnalyzeClinitInstructionSequence(adrpPC); + if (addr == nullptr) { + return MRT_PrepareToHandleJavaSignal(&ucontext, &MRT_ThrowImplicitNullPointerExceptionUnw, pc); + } + + SignalInfo *pSignalInfo = new (std::nothrow) SignalInfo(pc, addr); + MplCheck(pSignalInfo != nullptr, "pSignalInfo is nullptr"); + constexpr int32_t arrayCacheReturnPCOffset = -2; + return MRT_PrepareToHandleJavaSignal(&ucontext, reinterpret_cast(MRT_RequestInitArrayCache), + pSignalInfo, nullptr, nullptr, arrayCacheReturnPCOffset); +} + +static bool HandleStaticDecoupleSignal(ucontext_t &ucontext) { + uint32_t *pc = reinterpret_cast(ucontext.uc_mcontext.pc); + constexpr int32_t kDecoupleStaticReturnPCOffset = -2; + + auto *item = reinterpret_cast( + AnalyzeClinitInstructionSequence(pc - kAdrpPcOffsetForLazy)); + if (item == nullptr) { + return MRT_PrepareToHandleJavaSignal(&ucontext, &MRT_ThrowImplicitNullPointerExceptionUnw, pc); + } + MplStaticAddrTabHead *head = reinterpret_cast(item - item->index) - 1; + if (head->magic == kMplStaticLazyLoadSentryNumber) { + int32_t returnPCOffset = kDecoupleStaticReturnPCOffset; + if (item->dcpAddr != 0) { // kMplStaticItemUnResolved + ucontext.uc_mcontext.regs[GetLdrSrcRegNum(*pc)] = item->dcpAddr; // this solves the static call in + returnPCOffset = 1; + } + + if (item->address == kMplLazyStaticDecoupleMagicNumber) { + return MRT_PrepareToHandleJavaSignal(&ucontext, + reinterpret_cast(MRT_FixStaticAddrTableLazily), + item, nullptr, nullptr, returnPCOffset); + } else { + return MRT_PrepareToHandleJavaSignal(&ucontext, + reinterpret_cast(MRT_FixStaticAddrTable), + item, nullptr, nullptr, returnPCOffset); + } + } else { + return MRT_PrepareToHandleJavaSignal(&ucontext, &MRT_ThrowImplicitNullPointerExceptionUnw, pc); + } +} + +static bool HandleSigsegvFromJava(siginfo_t *info, ucontext_t *ucontext) { + constexpr uint16_t adrpPcOffsetForClinit = 3; + constexpr int32_t kLazyBindingReturnPcOffset = -2; + constexpr int32_t kDecoupleReturnPcOffset = -1; + + // must unblock SIGSEGV before calling + int tmpResult = UnblockSignal(SIGSEGV); + if (UNLIKELY(tmpResult != 0)) { + LOG(ERROR) << "UnblockSignal in HandleSigsegvFromJava() return " << tmpResult << " rather than 0." << + maple::endl; + } + if (ucontext == nullptr) { + LOG(FATAL) << "ucontext is nullptr, lead to crash." << maple::endl; + } + + mcontext_t *mcontext = &(ucontext->uc_mcontext); + uint32_t *pc = reinterpret_cast(mcontext->pc); + + if (reinterpret_cast(info->si_addr) == static_cast(kSEGVAddrForClassUninitialized) || + reinterpret_cast(info->si_addr) == static_cast(kSEGVAddrForClassInitializing) || + info->si_addr == &classInitProtectRegion[static_cast(kClassUninitialized) - 1]) { + // case 1: segv triggered by class init + uint32_t *adrpPc = pc - adrpPcOffsetForClinit; + int64_t sigvCausedClinitCount = IncSigvCausedClinitCount(1); + VLOG(classinit) << "clinit segv " << sigvCausedClinit << std::endl; + VLOG(classinit) << "sigvCausedClinitCount" << sigvCausedClinitCount << std::endl; +#ifdef USE_32BIT_REF + void *classinfo = MRT_GetAddress32ByAddress(AnalyzeClinitInstructionSequence(adrpPc)); +#else + void *classinfo = MRT_GetAddressByAddress(AnalyzeClinitInstructionSequence(adrpPc)); +#endif // USE_32BIT_REF + if (classinfo == nullptr) { + maplert::MRT_Panic(); + } + return MRT_PrepareToHandleJavaSignal(ucontext, (RealHandler1)&MRT_TryInitClassOnDemand, classinfo); + } else if (reinterpret_cast(info->si_addr) == static_cast(kSEGVAddrForClassInitFailed)) { + // case 2: segv triggered by class init failure + uint32_t *adrpPc = pc - adrpPcOffsetForClinit; + int64_t sigvCausedClinitCount = IncSigvCausedClinitCount(1); + VLOG(classinit) << "clinit segv " << sigvCausedClinit << std::endl; + VLOG(classinit) << "sigvCausedClinitCount" << sigvCausedClinitCount << std::endl; +#ifdef USE_32BIT_REF + void *classinfo = MRT_GetAddress32ByAddress(AnalyzeClinitInstructionSequence(adrpPc)); +#else + void *classinfo = MRT_GetAddressByAddress(AnalyzeClinitInstructionSequence(adrpPc)); +#endif // USE_32BIT_REF + char msg[kBufferSize] = { 0 }; + if (sprintf_s(msg, sizeof(msg), "unexpected class initialization failure for classinfo encoded at %p", + adrpPc) < 0) { + LOG(ERROR) << "HandleSigsegvFromJava sprintf_s fail" << maple::endl; + } + MplCheck(classinfo != nullptr, msg); + // this will actually invoke MRT_ThrowNoClassDefFoundErrorClassUnw(classinfo) + return MRT_PrepareToHandleJavaSignal(ucontext, + &MRT_ThrowNoClassDefFoundErrorClassUnw, + reinterpret_cast(classinfo)); +#ifdef LINKER_LAZY_BINDING + } else if (info->si_addr >= __BindingProtectRegion__ && + info->si_addr < &__BindingProtectRegion__[kBindingStateMax]) { + // SEGV triggered by Linker binding. + VLOG(lazybinding) << "HandleSigsegvFromJava(), info->si_addr=" << info->si_addr << + ", between {" << reinterpret_cast(__BindingProtectRegion__) << + ", " << reinterpret_cast(&__BindingProtectRegion__[kBindingStateMax]) << "}" << maple::endl; + uint32_t *adrpPc = pc - kAdrpPcOffsetForLazy; + void *addr = reinterpret_cast(AnalyzeClinitInstructionSequence(adrpPc)); + SignalInfo *pSignalInfo = new (std::nothrow) SignalInfo(pc, addr); + if (pSignalInfo == nullptr) { + LOG(FATAL) << "new SignalInfo failed" << maple::endl; + } + return MRT_PrepareToHandleJavaSignal(ucontext, reinterpret_cast(MRT_RequestLazyBindingForSignal), + pSignalInfo, nullptr, nullptr, kLazyBindingReturnPcOffset); +#endif // LINKER_LAZY_BINDING + } else if (reinterpret_cast(info->si_addr) == kMplLazyLoadMagicNumber) { + // segv triggerd by lazy load + // 0: ldr wm [xn] // xm for 64bit + // 4: ldr wm [wm] + void *offsetEntry = AnalyzeLazyloadInstructionSequence(pc, ucontext); + if (offsetEntry != nullptr) { + return MRT_PrepareToHandleJavaSignal(ucontext, reinterpret_cast(MRT_FixOffsetTableLazily), + offsetEntry, nullptr, nullptr, kDecoupleReturnPcOffset); + } else { + return MRT_PrepareToHandleJavaSignal(ucontext, &MRT_ThrowImplicitNullPointerExceptionUnw, pc); + } + } else if (reinterpret_cast(info->si_addr) == kMplLazyStaticDecoupleMagicNumber || + reinterpret_cast(info->si_addr) == kMplStaticDecoupleMagicNumber) { + return HandleStaticDecoupleSignal(*ucontext); + } else if (reinterpret_cast(info->si_addr) == kMplArrayClassCacheMagicNumber) { + return AnalyzeLoadArrayCacheInstructionSequence(pc, *ucontext); + } else if (reinterpret_cast(info->si_addr) < static_cast(maple::kPageSize)) { + // case 3: segv triggered by NPE + // this will actually invoke MRT_ThrowImplicitNullPointerExceptionUnw(pc) + // if not null may have compiler + if (info->si_addr != nullptr) { + LOG(ERROR) << " info->si_addr not null: " << std::hex << info->si_addr << " PC: " << + std::hex << mcontext->pc << maple::endl; + } + return MRT_PrepareToHandleJavaSignal(ucontext, &MRT_ThrowImplicitNullPointerExceptionUnw, pc); + } + return false; +} + +// Signal handler For Android, return value matters. +extern "C" bool MRT_FaultHandler(int sig, siginfo_t *info, ucontext_t *ucontext, bool isFromJava) { + auto origErrno = errno; + + if (ucontext == nullptr) { + LOG(FATAL) << "ucontext is nullptr, lead to crash." << maple::endl; + } + if (isFromJava) { + // signal handler is some kind of java-to-runtime stub frame, so we need to update the unwind context + mcontext_t *mcontext = &(ucontext->uc_mcontext); + MRT_UpdateLastUnwindContextIfReliable(reinterpret_cast(mcontext->pc + kAarch64InsnSize), + reinterpret_cast(mcontext->regs[kFP] /* FP */)); + } + + if (YieldpointSignalHandler(sig, info, ucontext)) { + errno = origErrno; + return true; + } else if (UNLIKELY(ThreadHelper::SuspendHandler())) { + timeutils::SleepForever(); + } else if (isFromJava) { + bool ret = false; + switch (sig) { + case SIGSEGV: + ret = HandleSigsegvFromJava(info, ucontext); + break; + default: { + LOG(ERROR) << "maple runtime internal error: unexpected signal from java frame." << maple::endl; + } + } + return ret; + } + + errno = origErrno; + return false; +} + +// siginfo_t* info for future extention +extern "C" bool MRT_FaultDebugHandler(int sig, siginfo_t *info __attribute__((unused)), void *context) { + if (context == nullptr) { + LOG(FATAL) << "context is nullptr, lead to crash." << maple::endl; + } + ucontext_t *ucontext = reinterpret_cast(context); + mcontext_t *mcontext = &(ucontext->uc_mcontext); + switch (sig) { + case SIGTRAP: { + // x0 is addr of obj + JsanliteError(mcontext->regs[0]); + break; + } + default: { + break; + } + } + return false; +} +} diff --git a/src/mrt/compiler-rt/src/arch/arm64/signal_handler_stub_arm64.S b/src/mrt/compiler-rt/src/arch/arm64/signal_handler_stub_arm64.S new file mode 100644 index 0000000000..15e21ab524 --- /dev/null +++ b/src/mrt/compiler-rt/src/arch/arm64/signal_handler_stub_arm64.S @@ -0,0 +1,167 @@ +#define cfi_adjust_cfa_offset(off) .cfi_adjust_cfa_offset off +#define cfi_rel_offset(reg, off) .cfi_rel_offset reg, off +#define cfi_restore(reg) .cfi_restore reg +#define cfi_def_cfa_register(reg) .cfi_def_cfa_register reg + +#define SignalStubFrameSize (0x2c0) + +// This signal handler stub relies on MRT_PrepareToHandleJavaSignal to put continuation pc in x17. +// Continuation pc describes the control flow after signal handling completes. + + .text + .align 2 + .global HandleJavaSignalStub + .hidden HandleJavaSignalStub + .type HandleJavaSignalStub, %function +HandleJavaSignalStub: + .cfi_startproc + .cfi_return_column x17 + sub sp, sp, # SignalStubFrameSize + cfi_adjust_cfa_offset (SignalStubFrameSize) + stp x29, x17, [sp] + cfi_rel_offset (x29, 0) + cfi_rel_offset (x17, 8) + + mov x29, sp + cfi_def_cfa_register (x29) + + // save all non callee-saved registers which may be contaminated by calling + // the real signal handler. + stp x0, x1, [sp, #0x10] + cfi_rel_offset (x0, 0x10) + cfi_rel_offset (x1, 0x18) + + stp x2, x3, [sp, #0x20] + cfi_rel_offset (x2, 0x20) + cfi_rel_offset (x3, 0x28) + + stp x4, x5, [sp, #0x30] + cfi_rel_offset (x4, 0x30) + cfi_rel_offset (x5, 0x38) + + stp x6, x7, [sp, #0x40] + cfi_rel_offset (x6, 0x40) + cfi_rel_offset (x7, 0x48) + + stp x8, x9, [sp, #0x50] + cfi_rel_offset (x8, 0x50) + cfi_rel_offset (x9, 0x58) + + stp x10, x11, [sp, #0x60] + cfi_rel_offset (x10, 0x60) + cfi_rel_offset (x11, 0x68) + + stp x12, x13, [sp, #0x70] + cfi_rel_offset (x12, 0x70) + cfi_rel_offset (x13, 0x78) + + stp x14, x15, [sp, #0x80] + cfi_rel_offset (x14, 0x80) + cfi_rel_offset (x15, 0x88) + + stp x16, x30, [sp, #0x90] + cfi_rel_offset (x16, 0x90) + cfi_rel_offset (x30, 0x98) + + mrs x17, nzcv + stp x18, x17, [sp, #0xa0] + cfi_rel_offset (x18, 0xa0) + cfi_rel_offset (nzcv, 0xa8) + + stp q0, q1, [sp, #0xc0] + stp q2, q3, [sp, #0xe0] + stp q4, q5, [sp, #0x100] + stp q6, q7, [sp, #0x120] + stp q8, q9, [sp, #0x140] + stp q10, q11, [sp, #0x160] + stp q12, q13, [sp, #0x180] + stp q14, q15, [sp, #0x1a0] + stp q16, q17, [sp, #0x1c0] + stp q18, q19, [sp, #0x1e0] + stp q20, q21, [sp, #0x200] + stp q22, q23, [sp, #0x220] + stp q24, q25, [sp, #0x240] + stp q26, q27, [sp, #0x260] + stp q28, q29, [sp, #0x280] + stp q30, q31, [sp, #0x2a0] + + bl InvokeJavaSignalHandler + cbz x0, .L_nochangex17 + str x0, [sp, #0x8] +.L_nochangex17: + + bl MRT_SetReliableUnwindContextStatus + + bl IsThrowingExceptionByRet + cbz x0, .L_noexception + str x0, [sp, #0x8] +.L_noexception: + ldp q0, q1, [sp, #0xc0] + ldp q2, q3, [sp, #0xe0] + ldp q4, q5, [sp, #0x100] + ldp q6, q7, [sp, #0x120] + ldp q8, q9, [sp, #0x140] + ldp q10, q11, [sp, #0x160] + ldp q12, q13, [sp, #0x180] + ldp q14, q15, [sp, #0x1a0] + ldp q16, q17, [sp, #0x1c0] + ldp q18, q19, [sp, #0x1e0] + ldp q20, q21, [sp, #0x200] + ldp q22, q23, [sp, #0x220] + ldp q24, q25, [sp, #0x240] + ldp q26, q27, [sp, #0x260] + ldp q28, q29, [sp, #0x280] + ldp q30, q31, [sp, #0x2a0] + + ldp x0, x1, [sp, #0x10] + cfi_restore (x0) + cfi_restore (x1) + + ldp x2, x3, [sp, #0x20] + cfi_restore (x2) + cfi_restore (x3) + + ldp x4, x5, [sp, #0x30] + cfi_restore (x4) + cfi_restore (x5) + + ldp x6, x7, [sp, #0x40] + cfi_restore (x6) + cfi_restore (x7) + + ldp x8, x9, [sp, #0x50] + cfi_restore (x8) + cfi_restore (x9) + + ldp x10, x11, [sp, #0x60] + cfi_restore (x10) + cfi_restore (x11) + + ldp x12, x13, [sp, #0x70] + cfi_restore (x12) + cfi_restore (x13) + + ldp x14, x15, [sp, #0x80] + cfi_restore (x14) + cfi_restore (x15) + + ldp x16, x30, [sp, #0x90] + cfi_restore (x16) + cfi_restore (x30) + + ldp x18, x17, [sp, #0xa0] + cfi_restore (x18) + msr nzcv, x17 + cfi_restore(nzcv) + + ldp x29, x17, [sp] + cfi_restore (x29) + cfi_restore (x17) + + add sp, sp, # SignalStubFrameSize + cfi_adjust_cfa_offset (-SignalStubFrameSize) + + // we backup PC with x17 in PrepareToHandleJavaSignal already + ret x17 + .cfi_endproc + .size HandleJavaSignalStub, .-HandleJavaSignalStub diff --git a/src/mrt/compiler-rt/src/arch/arm64/yieldpoint_arm64.S b/src/mrt/compiler-rt/src/arch/arm64/yieldpoint_arm64.S new file mode 100644 index 0000000000..a6e06eabfe --- /dev/null +++ b/src/mrt/compiler-rt/src/arch/arm64/yieldpoint_arm64.S @@ -0,0 +1,146 @@ +//--------------------------------------------------------------- +// This file defines assembly functions to support yieldpoints. +//--------------------------------------------------------------- + .text + +//--------------------------------------------------------------- +// MRT_YieldpointStub() will called when yieldpoint is taken. +// +// void MRT_YieldpointStub() { +// save_live_registers(); +// lr = MRT_GetThreadYieldpointPC(); +// MRT_YieldpointHandler(last_fp); +// restore_live_registers(); +// return to lr; +// } +// +// fp ---> -------- 0 +// | q31 | +// | ... | +// | q0 | +// -------- +// | x29 | +// | x28 | +// | ... | +// | x0 | +// -------- +// | x30 | lr +// | x29 | fp +// sp ---> -------- -0x300 +// +//--------------------------------------------------------------- + .align 2 + .global MRT_YieldpointStub + .type MRT_YieldpointStub, %function +MRT_YieldpointStub: + .cfi_startproc + sub sp, sp, 0x300 + .cfi_def_cfa_offset 0x300 + + // save fp,lr on stack, follow the calling convention. + stp x29, x30, [sp] + .cfi_offset 29, -0x300 + .cfi_offset 30, -0x2f8 + + // save integer registers except LR(x30) + stp x0, x1, [sp, 0x10] + stp x2, x3, [sp, 0x20] + stp x4, x5, [sp, 0x30] + stp x6, x7, [sp, 0x40] + stp x8, x9, [sp, 0x50] + stp x10, x11, [sp, 0x60] + stp x12, x13, [sp, 0x70] + stp x14, x15, [sp, 0x80] + stp x16, x17, [sp, 0x90] + stp x18, x19, [sp, 0xa0] + stp x20, x21, [sp, 0xb0] + stp x22, x23, [sp, 0xc0] + stp x24, x25, [sp, 0xd0] + stp x26, x27, [sp, 0xe0] + stp x28, x29, [sp, 0xf0] + + // save scalar registers + stp q0, q1, [sp, 0x100] + stp q2, q3, [sp, 0x120] + stp q4, q5, [sp, 0x140] + stp q6, q7, [sp, 0x160] + stp q8, q9, [sp, 0x180] + stp q10, q11, [sp, 0x1a0] + stp q12, q13, [sp, 0x1c0] + stp q14, q15, [sp, 0x1e0] + stp q16, q17, [sp, 0x200] + stp q18, q19, [sp, 0x220] + stp q20, q21, [sp, 0x240] + stp q22, q23, [sp, 0x260] + stp q24, q25, [sp, 0x280] + stp q26, q27, [sp, 0x2a0] + stp q28, q29, [sp, 0x2c0] + stp q30, q31, [sp, 0x2e0] + + // set current fp + add x29, sp, 0 + .cfi_def_cfa_register 29 + + // save condition flags + mrs x28, NZCV + + // set LR as the PC of yieldpoint, + // and save it to the proper slot of stack. + bl MRT_GetThreadYieldpointPC + mov x30, x0 + str x30, [sp, 0x08] + + // call yieldpoint handler with current fp + mov x0, x29 + bl MRT_YieldpointHandler + + // restore condition flags + msr NZCV, x28 + + // restore integer registers except lr + ldp x0, x1, [sp, 0x10] + ldp x2, x3, [sp, 0x20] + ldp x4, x5, [sp, 0x30] + ldp x6, x7, [sp, 0x40] + ldp x8, x9, [sp, 0x50] + ldp x10, x11, [sp, 0x60] + ldp x12, x13, [sp, 0x70] + ldp x14, x15, [sp, 0x80] + ldp x16, x17, [sp, 0x90] + ldp x18, x19, [sp, 0xa0] + ldp x20, x21, [sp, 0xb0] + ldp x22, x23, [sp, 0xc0] + ldp x24, x25, [sp, 0xd0] + ldp x26, x27, [sp, 0xe0] + ldp x28, x29, [sp, 0xf0] + + // restore scalar registers + ldp q0, q1, [sp, 0x100] + ldp q2, q3, [sp, 0x120] + ldp q4, q5, [sp, 0x140] + ldp q6, q7, [sp, 0x160] + ldp q8, q9, [sp, 0x180] + ldp q10, q11, [sp, 0x1a0] + ldp q12, q13, [sp, 0x1c0] + ldp q14, q15, [sp, 0x1e0] + ldp q16, q17, [sp, 0x200] + ldp q18, q19, [sp, 0x220] + ldp q20, q21, [sp, 0x240] + ldp q22, q23, [sp, 0x260] + ldp q24, q25, [sp, 0x280] + ldp q26, q27, [sp, 0x2a0] + ldp q28, q29, [sp, 0x2c0] + ldp q30, q31, [sp, 0x2e0] + + // restore fp, lr + ldp x29, x30, [sp] + + // restore stack pointer. + add sp, sp, 0x300 + + .cfi_restore 29 + .cfi_restore 30 + .cfi_def_cfa 31, 0 + ret + .cfi_endproc + .size MRT_YieldpointStub, .-MRT_YieldpointStub diff --git a/src/mrt/compiler-rt/src/chelper.cpp b/src/mrt/compiler-rt/src/chelper.cpp new file mode 100644 index 0000000000..6ff4d9adb6 --- /dev/null +++ b/src/mrt/compiler-rt/src/chelper.cpp @@ -0,0 +1,550 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "chelper.h" +#include +#include +#include "sizes.h" +#include "chosen.h" +#include "fast_alloc_inline.h" +#include "libs.h" +namespace maplert { +extern "C" void *MRT_CLASSINFO(Ljava_2Flang_2Fref_2FWeakReference_3B); + +extern "C" inline void MRT_SetJavaClass(address_t objAddr, address_t klass) { + // set class. + StoreRefField(objAddr, kMrtKlassOffset, klass); + // initialize gc header from prototype. + MClass *cls = MObject::Cast(klass); + if (auto gcInfo = reinterpret_cast(cls->GetGctib())) { + GCHeaderLVal(objAddr) |= gcInfo->headerProto; + } + +#if RC_TRACE_OBJECT + if (IsTraceObj(objAddr)) { + void *callerPc = __builtin_return_address(0); + LOG2FILE(kLogtypeRcTrace) << "Obj " << std::hex << reinterpret_cast(objAddr) << std::dec << + " RC= " << RefCount(objAddr) << " New at " << std::endl; + util::PrintPCSymbolToLog(callerPc); + } +#endif + // mark finalizable object. + // check class flag. + uint32_t classFlag = cls->GetFlag(); + if (modifier::hasFinalizer(classFlag)) { + SetObjFinalizable(objAddr); + (*theAllocator).OnFinalizableObjCreated(objAddr); + } + if (cls->IsLazyBinding()) { + (void)LinkerAPI::Instance().LinkClassLazily(cls->AsJclass()); + } else if (modifier::IsColdClass(classFlag)) { + LinkerAPI::Instance().ResolveColdClassSymbol(cls->AsJclass()); + } +} + +extern "C" void MCC_SetJavaClass(address_t objAddr, address_t klass) __attribute__((alias("MRT_SetJavaClass"))); +extern "C" inline void MRT_SetJavaArrayClass(address_t objAddr, address_t klass) { + // set class. + StoreRefField(objAddr, kMrtKlassOffset, klass); + + // initialize gc header from prototype. + if (auto gcInfo = reinterpret_cast(MObject::Cast(klass)->GetGctib())) { + GCHeaderLVal(objAddr) |= gcInfo->headerProto; + } + + // array has no finalizer and it is not a reference, + // so we skip class flag check for array. + // make the store above visible to other thread + std::atomic_thread_fence(std::memory_order_release); +} + +address_t MCC_Reflect_ThrowInstantiationError() { + MRT_ThrowNewException("java/lang/InstantiationError", nullptr); + return 0; +} + +extern "C" void MRT_ReflectThrowNegtiveArraySizeException() { +#if UNITTEST + __MRT_ASSERT(0 && "Array element number is negtive."); +#else + MRT_ThrowNewExceptionUnw("java/lang/NegativeArraySizeException", nullptr); + __builtin_unreachable(); +#endif +} + +extern "C" void MRT_SetThreadPriority(pid_t tid, int32_t priority) { + errno = 0; + int ret = ::setpriority(static_cast(PRIO_PROCESS), static_cast(tid), priority); + if (UNLIKELY(ret != 0 && errno != 0)) { + char errMsg[maple::kMaxStrErrorBufLen]; + (void)strerror_r(errno, errMsg, sizeof(errMsg)); + LOG(ERROR) << "::setpriority() in MRT_SetThreadPriority() failed with errno " << + errno << ": " << errMsg << maple::endl; + } +} + +#if RC_HOT_OBJECT_DATA_COLLECT +const int kMaxClassNameLen = 256; +const int kMaxProfileClassCount = 5; +std::vector LoadTraceClassName() { +#ifdef __ANDROID__ + std::string fileName = "/etc/maple_rc_profile_classes_name.txt"; +#else + std::string fileName = "./maple_rc_profile_classes_name.txt"; +#endif + std::ifstream ifs(fileName, std::ifstream::in); + std::vector classNames; + if (!ifs.is_open()) { + LOG(INFO) << "LoadTraceClassName open file failed, no permission or not exist\n"; + return classNames; + } + char className[kMaxClassNameLen]; + int profileClassCount = 0; + // only read kMaxProfileClassCount line of file + while (ifs.getline(className, kMaxClassNameLen) && (profileClassCount++ < kMaxProfileClassCount)) { + std::string name(className); + if (name.size() <= 0) { + continue; + } + // class name should start with 'L' or '[' + if ((name[0] == 'L') || (name[0] == '[')) { + classNames.push_back(name); + } + } + LOG(INFO) << "LoadTraceClassName success, trace class size = " << classNames.size() << maple::endl; + ifs.close(); + return classNames; +} + +static std::vector traceClassesName = LoadTraceClassName(); + +static bool IsTraceClass(address_t klass) { + if (klass == 0) { + return false; + } + char *className = MObject::Cast(klass)->GetName(); + if (className == nullptr) { + return false; + } + for (auto name : traceClassesName) { + if (name.compare(0, kMaxClassNameLen, className) == 0) { + return true; + } + } + return false; +} + +static inline void setObjPc(address_t objAddr, unsigned long *pc) { + for (size_t i = 0; i < kTrackFrameNum; ++i) { + *reinterpret_cast(objAddr + kOffsetTrackPC + kDWordBytes * i) = pc[i]; + } +} + +static void DumpObjectBackTrace(address_t obj, address_t klass) { + if (!IsTraceClass(klass)) { + return; + } + size_t limit = 7; // the stack depth limit of recording new object in debug + std::vector stackArray; + MapleStack::FastRecordCurrentStackPCsByUnwind(stackArray, limit); + size_t size = stackArray.size(); + unsigned long pc[kTrackFrameNum] = { 0 }; + // only track last kTrackFrameNum frame + size_t skip = limit - kTrackFrameNum; + for (size_t i = skip; i < limit && i < size; ++i) { + pc[i - skip] = static_cast(stackArray[i]); + } + // set track PCs to object header + setObjPc(obj, pc); +} +#endif // RC_HOT_OBJECT_DATA_COLLECT + +#if ALLOC_USE_FAST_PATH +#define MCC_SLOW_NEW_OBJECT MRT_NewObject +#else +#define MCC_SLOW_NEW_OBJECT MCC_NewObj_fixed_class +#endif + +extern "C" address_t MCC_SLOW_NEW_OBJECT(address_t klass) { + MClass *cls = MObject::Cast(klass); + uint32_t mod = cls->GetModifier(); + if (UNLIKELY(modifier::IsAbstract(mod) || modifier::IsInterface(mod))) { + std::string clsName; + cls->GetBinaryName(clsName); + MRT_ThrowNewExceptionUnw("java/lang/InstantiationError", clsName.c_str()); + return 0; + } + address_t result = (*theAllocator).NewObj(cls->GetObjectSize()); + if (UNLIKELY(result == 0)) { + (*theAllocator).OutOfMemory(); + } + MRT_SetJavaClass(result, klass); +#if RC_HOT_OBJECT_DATA_COLLECT + DumpObjectBackTrace(result, klass); +#endif + return result; +} + +static inline address_t AllocatorNewArray(size_t elemSize, size_t nElems) { + if (UNLIKELY(nElems > kAllocArrayMaxSize / elemSize)) { + return 0; + } + size_t size = kJavaArrayContentOffset + elemSize * nElems; + return (*theAllocator).NewObj(size); +} + +extern "C" address_t MRT_ChelperNewobjFlexible(size_t elemSize, size_t len, address_t klass, bool isJNI) { + address_t result = AllocatorNewArray(elemSize, len); + if (UNLIKELY(result == 0)) { + (*theAllocator).OutOfMemory(isJNI); + return result; + } + ArrayLength(result) = static_cast(len); + MRT_SetJavaArrayClass(result, klass); + return result; +} + +const unsigned int kClassObjectFlag = 0xF0; + +static inline MClass *GetArrayClass(const char *classNameOrClassObj, address_t callerObj, unsigned long classFlag) { + if (classFlag & kClassObjectFlag) { + return reinterpret_cast(const_cast(classNameOrClassObj)); + } else if (classFlag & (~kClassObjectFlag)) { + return WellKnown::GetWellKnowClassWithFlag(static_cast(classFlag & (~kClassObjectFlag)), + *MObject::Cast(callerObj), + classNameOrClassObj); + } else { + return MClass::JniCast(MCC_GetClass(MObject::Cast(callerObj)->AsJclass(), classNameOrClassObj)); + } +} + +#define MCC_SLOW_NEW_ARRAY MCC_NewObj_flexible_cname +address_t MCC_NewObj_flexible_cname(size_t elemSize, + size_t nElems, + const char *classNameOrClassObj, + address_t callerObj, + unsigned long classFlag) { + // array length is set in NewArray + if (nElems > kMrtMaxArrayLength) { + MRT_ReflectThrowNegtiveArraySizeException(); + } + MClass *klass = GetArrayClass(classNameOrClassObj, callerObj, classFlag); + __MRT_ASSERT(klass != nullptr, "MCC_NewObj_flexible_cname klass nullptr"); + address_t result = AllocatorNewArray(elemSize, nElems); + if (UNLIKELY(result == 0)) { + (*theAllocator).OutOfMemory(); + } + ArrayLength(result) = static_cast(nElems); + MRT_SetJavaArrayClass(result, klass->AsUintptr()); + +#if RC_HOT_OBJECT_DATA_COLLECT + DumpObjectBackTrace(result, klass->AsUintptr()); +#endif + return result; +} + +#if ALLOC_USE_FAST_PATH +void MRT_SetFastAlloc(ClassMetadata *classMetadata) { + __MRT_ASSERT(classMetadata != nullptr, "classMetadata is nullptr in MRT_SetFastAlloc"); + uint16_t flag = classMetadata->flag; + MClass *cls = reinterpret_cast(classMetadata); + if (UNLIKELY(modifier::IsFastAllocClass(flag))) { + LOG(ERROR) << "fast path: init repeated or bad flag " << cls->GetName() << maple::endl; + classMetadata->flag &= ~modifier::kClassFastAlloc; + return; + } + if (UNLIKELY(modifier::hasFinalizer(flag) || + modifier::IsArrayClass(flag) || + // potentially, we can enable fast path for lazy/cold classes, + // but, watch out for race condition + modifier::IsLazyBindingClass(flag) || + modifier::IsColdClass(flag))) { + return; + } + uint32_t mod = cls->GetModifier(); + if (UNLIKELY(modifier::IsAbstract(mod) || modifier::IsInterface(mod))) { + return; + } + size_t objSize = cls->GetObjectSize(); + size_t alignedSize = AllocUtilRndUp(objSize, kAllocAlign); + if (LIKELY(ROSIMPL_IS_LOCAL_RUN_SIZE((alignedSize + ROSIMPL_HEADER_ALLOC_SIZE)))) { + classMetadata->sizeInfo.objSize = alignedSize; + classMetadata->flag |= modifier::kClassFastAlloc; + } +} +#endif + +// try to inline in this .cpp +__attribute__((always_inline)) +extern "C" address_t MCC_NewObject(address_t klass) { + address_t objAddr = MRT_TryNewObject(klass); + if (LIKELY(objAddr != 0)) { + return objAddr; + } + return MCC_SLOW_NEW_OBJECT(klass); +} + +#if ALLOC_USE_FAST_PATH +// change compiler invocation so that this redirection can be removed! +// inline helps when this function is used in this .cpp (e.g., new permanent) +inline address_t MCC_NewObj_fixed_class(address_t klass) { + return MCC_NewObject(klass); +} +#endif + +extern "C" address_t MCC_NewArray8(size_t nElems, address_t klass) { + address_t objAddr = MRT_TryNewArray(nElems, klass); + if (LIKELY(objAddr != 0)) { + return objAddr; + } + const char *param = reinterpret_cast(klass); + return MCC_SLOW_NEW_ARRAY(1, nElems, param, 0, kClassObjectFlag); // 1 is 8bit +} + +extern "C" address_t MCC_NewArray16(size_t nElems, address_t klass) { + address_t objAddr = MRT_TryNewArray(nElems, klass); + if (LIKELY(objAddr != 0)) { + return objAddr; + } + const char *param = reinterpret_cast(klass); + return MCC_SLOW_NEW_ARRAY(2, nElems, param, 0, kClassObjectFlag); // 2 is 16bit +} + +extern "C" address_t MCC_NewArray32(size_t nElems, address_t klass) { + address_t objAddr = MRT_TryNewArray(nElems, klass); + if (LIKELY(objAddr != 0)) { + return objAddr; + } + const char *param = reinterpret_cast(klass); + return MCC_SLOW_NEW_ARRAY(4, nElems, param, 0, kClassObjectFlag); // 4 is 32bit +} + +extern "C" address_t MCC_NewArray64(size_t nElems, address_t klass) { + address_t objAddr = MRT_TryNewArray(nElems, klass); + if (LIKELY(objAddr != 0)) { + return objAddr; + } + const char *param = reinterpret_cast(klass); + return MCC_SLOW_NEW_ARRAY(8, nElems, param, 0, kClassObjectFlag); // 8 is 64bit +} + +extern "C" address_t MCC_NewArray(size_t nElems, const char *descriptor, address_t callerObj) { + address_t objAddr = MRT_TryNewArray(nElems, reinterpret_cast( + MCC_GetClass(MObject::Cast(callerObj)->AsJclass(), descriptor))); + if (LIKELY(objAddr != 0)) { + return objAddr; + } + // this will go the get class path + return MCC_SLOW_NEW_ARRAY(4, nElems, descriptor, callerObj, 0); // 4 is 32bit +} + +#ifdef USE_32BIT_REF +#if !defined(__ANDROID__) && defined(__arm__) +constexpr address_t kKlassHighestBoundary = 0xffffffff; +#else +constexpr address_t kKlassHighestBoundary = 0xdfffffff; +#endif // __ANDROID__ +#if __MRT_DEBUG +constexpr address_t kKlassLowestBoundary = 0x80000000; +#endif +#endif //USE_32BIT_REF + +static inline void SetPermanentObjectClass(address_t objAddr, address_t klass) { +#ifdef USE_32BIT_REF +#if __MRT_DEBUG + if ((klass > kKlassHighestBoundary) || (klass < kKlassLowestBoundary)) { + LOG(FATAL) << "klass is not correct: " << std::hex << klass << std::dec << + MObject::Cast(klass)->GetName() << maple::endl; + } +#endif +#endif //USE_32BIT_REF + // set class. + StoreRefField(objAddr, kMrtKlassOffset, klass); + + // handle cold class. + if (MObject::Cast(klass)->IsLazyBinding()) { + (void)LinkerAPI::Instance().LinkClassLazily(reinterpret_cast(klass)); + } else if (MObject::Cast(klass)->IsColdClass()) { + LinkerAPI::Instance().ResolveColdClassSymbol(reinterpret_cast(klass)); + } +} + +static inline void SetPermanentArrayClass(address_t objAddr, address_t klass) { +#ifdef USE_32BIT_REF + if (UNLIKELY(klass > kKlassHighestBoundary)) { + LOG(FATAL) << "klass is not correct: " << std::hex << klass << std::dec << + MObject::Cast(klass)->GetName() << maple::endl; + } +#endif //USE_32BIT_REF + // set class. + StoreRefField(objAddr, kMrtKlassOffset, klass); +} + +static inline bool HasChildReference(const MClass *klass) { + __MRT_ASSERT(klass != nullptr, "kclass is nullptr in HasChildReference"); + const GCTibGCInfo *gcInfo = reinterpret_cast(klass->GetGctib()); + return (gcInfo == nullptr || (gcInfo->headerProto & kHasChildRef) != 0); +} + +address_t MCC_NewPermanentObject(address_t klass) { + // allocate permanent Reference or finalizable objects is not allowed, + // we fallback to allocate them in heap space. this means the @Permanent + // annotation for Reference or finalizable objects is ignored. + const MClass *javaClass = MObject::Cast(klass); + const uint32_t classFlag = javaClass->GetFlag(); + if (UNLIKELY(modifier::IsReferenceClass(classFlag) || modifier::hasFinalizer(classFlag))) { + return MCC_NewObj_fixed_class(klass); + } + + // if object has child reference, we allocate it in heap + // and set it as rc overflowed so that rc operations are ignored. + if (HasChildReference(javaClass)) { + address_t result = MCC_NewObj_fixed_class(klass); + SetRCOverflow(result); + return result; + } + + // allocate object in permanent space, without object header. + address_t result = MRT_AllocFromPerm(javaClass->GetObjectSize()); + if (UNLIKELY(result == 0)) { + return 0; + } + // set class. + SetPermanentObjectClass(result, klass); + + return result; +} + +extern "C" address_t MCC_NewPermObject(address_t klass) __attribute__((alias("MCC_NewPermanentObject"))); + +address_t MCC_NewPermanentArray(size_t elemSize, + size_t nElems, + const char *classNameOrClassObj, + address_t callerObj, + unsigned long classFlag) { + // determine array class. + MClass *klass = GetArrayClass(classNameOrClassObj, callerObj, classFlag); + __MRT_ASSERT(klass != nullptr, "MCC_NewPermanentArray klass nullptr"); + + // for object array and length > 0, we allocate it in heap + // and set it as rc overflowed so that rc operations are ignored. + if (HasChildReference(klass) && nElems != 0) { + // this still calls slow path, should switch to fast path + address_t result = MCC_NewObj_flexible_cname(elemSize, nElems, classNameOrClassObj, callerObj, classFlag); + SetRCOverflow(result); + return result; + } + + // check length. + if (UNLIKELY(nElems > kMrtMaxArrayLength)) { + MRT_ReflectThrowNegtiveArraySizeException(); + } + + // allocate array in permanent space, without object header. + const size_t totalSize = kJavaArrayContentOffset + elemSize * nElems; + address_t result = MRT_AllocFromPerm(totalSize); + if (UNLIKELY(result == 0)) { + return 0; + } + // set class. + SetPermanentArrayClass(result, reinterpret_cast(klass)); + + // set array length. + ArrayLength(result) = static_cast(nElems); + + return result; +} + +template +static address_t MRT_NewPermArray(size_t nElems, address_t klass) { + // objects in permanent space cannot have child references, use normal heap + if (HasChildReference(MObject::Cast(klass)) && nElems != 0) { + address_t objAddr = MRT_TryNewArray(nElems, klass); + if (UNLIKELY(objAddr == 0)) { + const char *param = reinterpret_cast(klass); + objAddr = MCC_SLOW_NEW_ARRAY(1 << elemSizeExp, nElems, param, 0, kClassObjectFlag); + } + SetRCOverflow(objAddr); + return objAddr; + } + + if (UNLIKELY(nElems > kMrtMaxArrayLength)) { + MRT_ReflectThrowNegtiveArraySizeException(); + } + size_t objSize = kJavaArrayContentOffset + (nElems << elemSizeExp); + address_t objAddr = MRT_AllocFromPerm(objSize); + if (UNLIKELY(objAddr == 0)) { + return 0; + } + // permanent allocator does not properly throw OOM, it will just abort + SetPermanentArrayClass(objAddr, klass); + ArrayLength(objAddr) = static_cast(nElems); + return objAddr; +} + +extern "C" address_t MCC_NewPermArray8(size_t nElems, address_t klass) { + return MRT_NewPermArray(nElems, klass); +} + +extern "C" address_t MCC_NewPermArray16(size_t nElems, address_t klass) { + return MRT_NewPermArray(nElems, klass); +} + +extern "C" address_t MCC_NewPermArray32(size_t nElems, address_t klass) { + return MRT_NewPermArray(nElems, klass); +} + +extern "C" address_t MCC_NewPermArray64(size_t nElems, address_t klass) { + return MRT_NewPermArray(nElems, klass); +} + +address_t MCC_NewPermArray(size_t nElems, const char *descriptor, address_t callerObj) { + return MRT_NewPermArray(nElems, reinterpret_cast( + MCC_GetClass(MObject::Cast(callerObj)->AsJclass(), descriptor))); +} + + // set object rc overflow to ignore rc operation +extern "C" void MRT_SetObjectPermanent(address_t objAddr) { + if (!IS_HEAP_OBJ(objAddr)) { + return; + } + SetRCOverflow(objAddr); +} + +extern "C" void MCC_SetObjectPermanent(address_t objAddr) __attribute__((alias("MRT_SetObjectPermanent"))); + +extern "C" void MRT_CheckRefCount(address_t objAddr, uint32_t index) { + if (!IS_HEAP_OBJ(objAddr)) { + return; + } + + uint32_t rc = RefCount(objAddr); + if (rc == 0) { +#ifdef __ANDROID__ + LOG(ERROR) << "Obj = 0x" << std::hex << objAddr << " RCHeader = 0x" << RCHeader(objAddr) << + " GCHeader = 0x" << GCHeader(objAddr) << + " LockWord = 0x" << *(reinterpret_cast(objAddr + kLockWordOffset)) << + " Index = " << std::dec << index << ", RC Fatal Error is detected." << std::endl; +#else + std::cout << "Obj = 0x" << std::hex << objAddr << " RCHeader = 0x" << RCHeader(objAddr) << + " GCHeader = 0x" << GCHeader(objAddr) << + " LockWord = 0x" << *(reinterpret_cast(objAddr + kLockWordOffset)) << + " Index = " << std::dec << index << ", RC Fatal Error is detected." << std::endl; +#endif + HandleRCError(objAddr); + } +} + +extern "C" void MCC_CheckRefCount(address_t objAddr, uint32_t index) __attribute__((alias("MRT_CheckRefCount"))); +} // namespace maplert diff --git a/src/mrt/compiler-rt/src/chosen.cpp b/src/mrt/compiler-rt/src/chosen.cpp new file mode 100644 index 0000000000..e9b99e5975 --- /dev/null +++ b/src/mrt/compiler-rt/src/chosen.cpp @@ -0,0 +1,26 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "chosen.h" + +namespace maplert { +// Actual instances +ImmortalWrapper permAllocator("maple_alloc_perm", kPermMaxSpaceSize); +ImmortalWrapper zterpMetaAllocator("maple_alloc_zterp_meta", kZterpMaxSpaceSize); +ImmortalWrapper metaAllocator("maple_alloc_meta", kMetaMaxSpaceSize); +ImmortalWrapper decoupleAllocator("maple_alloc_decouple", kDecoupleMaxSpaceSize); +ImmortalWrapper zterpStaticRootAllocator("maple_alloc_zterp_static_root", + kZterpMaxSpaceSize); +ImmortalWrapper theAllocator; +} // namespace maplert diff --git a/src/mrt/compiler-rt/src/cinterface.cpp b/src/mrt/compiler-rt/src/cinterface.cpp new file mode 100644 index 0000000000..e49e651eb1 --- /dev/null +++ b/src/mrt/compiler-rt/src/cinterface.cpp @@ -0,0 +1,1665 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include "mm_config.h" +#include "mm_utils.h" +#include "sizes.h" +#include "collector/stats.h" +#include "chosen.h" +#include "yieldpoint.h" +#include "mutator_list.h" +#include "profile.h" +#include "collector/cp_generator.h" +#include "allocator/bp_allocator_inlined.h" +#include "mrt_poisonstack.h" +#include "collector/native_gc.h" +#include "java_primitive_ops.h" +#include "libs.h" + +#if SAFEREGION_CHECK && __MRT_DEBUG +static void LogSlow(const char *fmt, ...) { + const int bufSize = 128; + char buf[bufSize] = { '\0' }; + const char *toPrint = buf; + va_list ap; + va_start(ap, fmt); + int rv = vsnprintf_s(buf, bufSize, bufSize - 1, fmt, ap); + if (rv == -1) { + toPrint = "Error when calling vsnprintf_s."; + } + va_end(ap); + +#ifdef __ANDROID__ + LOG(ERROR) << toPrint << maple::endl; +#else + fprintf(stderr, "%s\n", toPrint); + fflush(stderr); +#endif +} +#endif + +#if SAFEREGION_CHECK && __MRT_DEBUG +#define CHECK_SAFEREGION(MSG, ...) do { \ + if (UNLIKELY(TLMutator().InSaferegion())) { \ + LogSlow("In saferegion: " MSG, ##__VA_ARGS__); \ + util::print_backtrace(); \ + abort(); \ + } \ +} while (0) +#else +#define CHECK_SAFEREGION(MSG, ...) ((void)0) +#endif + +#if __MRT_DEBUG +#define MRT_ASSERT_RC_NZ(rc, msg, obj, rcHeader) do { \ + if (rc == 0) { \ + LOG(ERROR) << msg << "; obj: " << std::hex << reinterpret_cast(obj) << \ + ", rc header: 0x" << std::hex << rcHeader; \ + HandleRCError(obj); \ + } \ +} while (0) +// this is used to check rc updates +// the update function must return an rc header +#define MRT_CallAndCheckRC(fcall, obj) do { \ + uint32_t rcHeader = fcall; \ + MRT_ASSERT_RC_NZ(GetRCFromRCHeader(rcHeader), "rc == 0", obj, rcHeader); \ +} while (0) +#else +#define MRT_ASSERT_RC_NZ(rc, msg, obj, rcHeader) +// we still call the update function, but do not check the returned rc header +#define MRT_CallAndCheckRC(fcall, obj) (void)fcall +#endif +namespace maplert { +static inline void CheckObjAllocated(address_t obj) { +#ifdef DISABLE_RC_DUPLICATE + if (IS_HEAP_ADDR(obj) && !IsAllocatedByAllocator(obj)) { + HandleRCError(obj); + } +#else + (void)obj; +#endif +} + +void MCC_CheckObjAllocated(address_t obj) { + CheckObjAllocated(obj); +} + +template +static inline void GCWriteField(address_t obj, address_t *fieldAddr, address_t value) { + CheckObjAllocated(value); + CheckObjAllocated(obj); + TLMutator().SatbWriteBarrier(obj, *reinterpret_cast(fieldAddr)); + if (isVolatile) { + std::atomic &volatileFieldAddr = AddrToLValAtomic(reinterpret_cast(fieldAddr)); + volatileFieldAddr.store(AddressToRefField(value), std::memory_order_release); + } else { + StoreRefField(fieldAddr, value); + } +} + +template +static inline address_t GCLoadField(address_t *fieldAddr) { + address_t value; + if (isVolatile) { + std::atomic &volatileFieldAddr = AddrToLValAtomic(reinterpret_cast(fieldAddr)); + value = volatileFieldAddr.load(std::memory_order_acquire); + } else { + value = LoadRefField(fieldAddr); + } + CheckObjAllocated(value); + return value; +} + +extern "C" { + +// Initialize the global heap allocator. +bool MRT_GCInitGlobal(const VMHeapParam &vmHeapParam) { + Collector::Create(vmHeapParam.gcOnly); + GCLog().Init(vmHeapParam.enableGCLog); + (*theAllocator).Init(vmHeapParam); + Collector::Instance().Init(); + NativeGCStats::Instance().SetIsEpochBasedTrigger(vmHeapParam.isZygote); + return true; +} + +// finalize the global heap allocator +bool MRT_GCFiniGlobal() { + Collector::Instance().Fini(); + return true; +} + +void MRT_GCPreFork() { + // Close the GC log + GCLog().OnPreFork(); + + // stop threads before fork. + Collector::Instance().StopThread(); + MRT_StopProcessReferences(); + + // wait threads stopped. + Collector::Instance().JoinThread(); + MRT_WaitProcessReferencesStopped(); + + // confirm all threads stopped. + util::WaitUntilAllThreadsStopped(); + CreateAppStringPool(); + Collector &collector = Collector::Instance(); + __MRT_ASSERT(collector.IsZygote(), "not zygote in perfork"); + if (collector.Type() == kNaiveRC && static_cast(collector).HasWeakRelease()) { + uint64_t startTime = timeutils::NanoSeconds(); + auto clearWeakField = [](reffield_t &field, uint64_t kind) { + address_t ref = RefFieldToAddress(field); + if (kind == kWeakRefBits && IS_HEAP_ADDR(ref) && IsWeakCollected(ref)) { + field = 0; + // maintian rc but not release, to avoid new weak collector object + // rc verify can pass but might leave some object and wait gc clear + // only thread here, no racing + (void)UpdateRC<0, -1, 0>(ref); + } + }; + (*theAllocator).ForEachObjUnsafe([&clearWeakField](address_t obj) { + if (!IsArray(obj)) { + ForEachRefField(obj, clearWeakField); + } + }, OnlyVisit::kVisitAll); + LOG(INFO) << "prefork clear weak takes " << (timeutils::NanoSeconds() - startTime) << "ns"; + static_cast(collector).SetHasWeakRelease(false); + } + (*theAllocator).OnPreFork(); +} + +void MRT_GCPostForkChild(bool isSystem) { + // Re-open the GC log + GCLog().OnPostFork(); + NativeGCStats::Instance().SetIsEpochBasedTrigger(isSystem); + maplert::stats::gcStats->InitialGCThreshold(isSystem); + maplert::stats::gcStats->InitialGCProcessName(); + // init collector after fork in child. + Collector::Instance().SetIsSystem(isSystem); + Collector::Instance().InitAfterFork(); + + if (!isSystem) { + MRT_SendBackgroundGcJob(true); + } + + // init yieldpoint after fork. + YieldpointInitAfterFork(); +} + +void MRT_ForkInGC(bool runInGC) { + if (runInGC && Collector::Instance().Type() == kNaiveRC) { + uint64_t startTime = timeutils::NanoSeconds(); + Collector::SwitchToGCOnFork(); + LOG(INFO) << "switch collector success " << (static_cast(Collector::Instance().Type())) << + " spent " << (timeutils::NanoSeconds() - startTime) << "ns" << maple::endl; + } +} + +void MRT_GCPostForkCommon(bool isZygote) { + if (isZygote) { + LinkerAPI::Instance().ReleaseBootPhaseMemory(true, false); + } + Collector::Instance().SetIsZygote(isZygote); + // start gc thread(s) after fork. + Collector::Instance().StartThread(isZygote); +} + +void MRT_RegisterNativeAllocation(size_t byte) { + NativeGCStats::Instance().RegisterNativeAllocation(byte); +} + +void MRT_RegisterNativeFree(size_t byte) { + NativeGCStats::Instance().RegisterNativeFree(byte); +} + +void MRT_NotifyNativeAllocation() { + NativeGCStats::Instance().NotifyNativeAllocation(); +} + +#if RC_TRACE_OBJECT +address_t tracingObject = 0; +void __attribute__((noinline)) TraceRefRC(address_t obj, uint32_t rc, const char *msg) { + if (Collector::Instance().Type() == kNaiveRC) { + if ((tracingObject != 0 && obj == tracingObject) || IsTraceObj(obj)) { + void *callerPc = __builtin_return_address(1); + LOG2FILE(kLogtypeRcTrace) << "Obj " << std::hex << reinterpret_cast(obj) << std::dec << + " RC=" << rc << (SkipRC(rc) ? " (inaccurate: rc operation skipped) " : " ") << msg << " "; + util::PrintPCSymbolToLog(callerPc); + } + } +} +#endif + +// Start GC-module for the process. It should always called from the "main" +// thread. +void MRT_GCStart() { + // main-thread doesn't have this created yet. + (*theAllocator).NewOOMException(); +} + +// Initialize the thread-local heap allocator. It should be called when a +// mutator thread is created. +// isMain: whether it's the "main" thread. +bool MRT_GCInitThreadLocal(bool isMain) { + // we should not call this function if current mutator already initialized. +#if __MRT_DEBUG + if (UNLIKELY(TLMutatorPtr() != nullptr || TLAllocMutatorPtr() != nullptr)) { + LOG(FATAL) << "MRT_GCInitThreadLocal() called on active mutator: " << + TLMutatorPtr() << " " << TLAllocMutatorPtr() << maple::endl; + } +#endif + + // create thread local mutator. + Mutator *mutator = nullptr; + if (Collector::Instance().Type() == kNaiveRC) { + mutator = new (std::nothrow) NaiveRCMutator(Collector::Instance()); + } else { + mutator = new (std::nothrow) MarkSweepMutator(Collector::Instance()); + } + TheAllocMutator *allocMutator = new (std::nothrow) TheAllocMutator(); + + // ensure mutator successfully created. + __MRT_ASSERT(mutator != nullptr && allocMutator != nullptr, "Out of memory"); + + // add key thread local variable's handle to thread's local-storage-area to allow + // fast access. + maple::tls::StoreTLS(mutator, maple::tls::kSlotMutator); + maple::tls::StoreTLS(allocMutator, maple::tls::kSlotAllocMutator); + + // Set up the stack begin pointer. We assume the caller calls this function + // before calling any Java function. It is primarily used by conservative + // stack scanners, because an exact stack scanner can always identify the + // stack-bottom function. + pthread_attr_t attr; + void *stackAddr = nullptr; + size_t stackSize; + int tmpResult = pthread_getattr_np(pthread_self(), &attr); + if (tmpResult != 0) { + LOG(FATAL) << "pthread_getattr_np() in MRT_GCInitThreadLocal() return " << + tmpResult << " rather than 0" << maple::endl; + }; + tmpResult = pthread_attr_getstack(&attr, &stackAddr, &stackSize); + if (tmpResult != 0) { + LOG(FATAL) << "pthread_attr_getstack() in MRT_GCInitThreadLocal() return " << + tmpResult << " rather than 0" << maple::endl; + }; + void *stackBegin = reinterpret_cast(reinterpret_cast(stackAddr) + stackSize); + mutator->SaveStackBegin(stackBegin); + + // mutator->Init() will be called in InitYieldpoint(). + InitYieldpoint(*mutator); + + // allocMutator->Init() need to create an OOMError instance, + // it should be called after thread local GC mutator initialized. + allocMutator->Init(); + + if (!isMain) { + (*theAllocator).NewOOMException(); + } + tmpResult = pthread_attr_destroy(&attr); + if (tmpResult != 0) { + LOG(ERROR) << "pthread_attr_destroy() in MRT_GCInitThreadLocal() return " << + tmpResult << " rather than 0" << maple::endl; + return false; + } + return true; +} + +// Finalize the thread-local heap allocator. It should be called when a +// mutator thread exits. +bool MRT_GCFiniThreadLocal() { + // Get thread local mutator. + Mutator *mutator = TLMutatorPtr(); + TheAllocMutator *allocMutator = TLAllocMutatorPtr(); + + // we should not call this function if current mutator not initialized + // or has been removed from mutator list. +#if __MRT_DEBUG + if (UNLIKELY(mutator == nullptr || mutator->IsActive() == false || allocMutator == nullptr)) { + LOG(FATAL) << "MRT_GCFiniThreadLocal() called on inactive mutator: " << + mutator << " " << allocMutator << maple::endl; + } +#endif + + // check tid. +#if __MRT_DEBUG + uint32_t tid = static_cast(maple::GetTid()); + if (UNLIKELY(mutator->GetTid() != tid)) { + mutator->DumpRaw(); + LOG(FATAL) << "MRT_GCFiniThreadLocal() invalid mutator tid: " << + mutator->GetTid() << " != " << tid << maple::endl; + } +#endif + + // clean up tl_alloc_mutator. + allocMutator->Fini(); + + // mutator->Fini() will be called in FiniYieldpoint(). + FiniYieldpoint(*mutator); + + // release mutator objects. + delete allocMutator; + delete mutator; + + // clear TLS slots. + maple::tls::StoreTLS(nullptr, maple::tls::kSlotMutator); + maple::tls::StoreTLS(nullptr, maple::tls::kSlotAllocMutator); + + return true; +} + +address_t MRT_AllocFromPerm(size_t size) { + return (*permAllocator).AllocThrowExp(size); +} + +address_t MRT_AllocFromMeta(size_t size, MetaTag metaTag) { + return (*metaAllocator).Alloc(size, metaTag); +} + +address_t MRT_AllocFromDecouple(size_t size, DecoupleTag tag) { + return (*decoupleAllocator).Alloc(size, tag); +} + +bool MRT_IsPermJavaObj(address_t obj){ + return (*permAllocator).Contains(obj); +} + +void MRT_VisitDecoupleObjects(maple::rootObjectFunc f) { + if (!(*decoupleAllocator).ForEachObj(f)) { + LOG(ERROR) << "(*decoupleAllocator).ForEachObj() in MRT_VisitDecoupleObjects() return false." << maple::endl; + } +} + +// deprecated +address_t MCC_NewObj(size_t size, size_t) { + address_t addr = (*theAllocator).NewObj(size); + if (UNLIKELY(addr == 0)) { + (*theAllocator).OutOfMemory(false); + } + return addr; +} + +void MRT_ClassInstanceNum(std::map &objNameCntMp){ + ScopedStopTheWorld sstw; + (*theAllocator).ClassInstanceNum(objNameCntMp); +} + +void MRT_FreeObj(address_t obj) { + if (Collector::Instance().Type() == kNaiveRC) { + (*theAllocator).FreeObj(obj); + } +} + +void MRT_PrintHeapStats() { + heapStats.PrintHeapStats(); +} + +void MRT_ResetHeapStats() { + heapStats.ResetHeapStats(); +} + +size_t MRT_TotalHeapObj() { + return (*theAllocator).AllocatedObjs(); +} + +size_t MRT_TotalMemory() { + return (*theAllocator).GetCurrentSpaceCapacity(); +} + +size_t MRT_MaxMemory() { + return (*theAllocator).GetMaxCapacity(); +} + +size_t MRT_FreeMemory() { + return (*theAllocator).GetCurrentFreeBytes(); +} + +void MRT_GetInstances(jclass klass, bool includeAssignable, size_t maxCount, std::vector &instances) { + if (klass != nullptr) { + (*theAllocator).GetInstances(MClass::JniCast(klass), includeAssignable, maxCount, instances); + } +} + + +void MRT_Trim(bool aggressive) { + if (!(*theAllocator).ReleaseFreePages(aggressive)) { + LOG(ERROR) << "(*theAllocator).ReleaseFreePages() in MRT_Trim() return false."; + } +} + +extern "C" void MRT_RequestTrim() { + if (!(*theAllocator).ReleaseFreePages(false)) { + LOG(ERROR) << "release requested but not effective"; + } +} + +size_t MRT_AllocSize() { + return heapStats.GetAllocSize(); +} + +size_t MRT_AllocCount() { + return heapStats.GetAllocCount(); +} + +size_t MRT_FreeSize() { + return heapStats.GetFreeSize(); +} + +size_t MRT_FreeCount() { + return heapStats.GetFreeCount(); +} + +size_t MRT_GetNativeAllocBytes() { + return heapStats.GetNativeAllocBytes(); +} + +void MRT_SetNativeAllocBytes(size_t size) { + heapStats.SetNativeAllocBytes(size); +} + +void MRT_PrintRCStats() { + RCCollector::PrintStats(); +} + +void MRT_ResetRCStats() { + RCCollector::ResetStats(); +} + +void MRT_GetGcCounts(size_t &gcCount, uint64_t &maxGcMs) { + gcCount = maplert::stats::gcStats->NumGCTriggered(); + maxGcMs = maplert::stats::gcStats->MaxSTWNanos(); + maplert::stats::gcStats->ResetNumGCTriggered(); + maplert::stats::gcStats->ResetMaxSTWNanos(); +} + +void MRT_GetMemLeak(size_t &avgLeak, size_t &peakLeak) { + avgLeak = maplert::stats::gcStats->AverageMemoryLeak(); + peakLeak = maplert::stats::gcStats->TotalMemoryLeak(); + maplert::stats::gcStats->ResetMemoryLeak(); +} + +void MRT_GetMemAlloc(float &util, size_t &abnormalCount) { + util = maplert::stats::gcStats->MemoryUtilization(); + abnormalCount = maplert::stats::gcStats->NumAllocAnomalies(); + maplert::stats::gcStats->ResetNumAllocAnomalies(); +} + +void MRT_GetRCParam(size_t &abnormalCount) { + abnormalCount = maplert::stats::gcStats->NumRCAnomalies(); + maplert::stats::gcStats->ResetNumRCAnomalies(); +} + +address_t MRT_LoadVolatileField(address_t obj, address_t *fieldAddr) { + CHECK_SAFEREGION("MRT_LoadVolatileField: fieldAddr %p", static_cast(fieldAddr)); + if (obj == 0) { + MRT_ThrowNullPointerExceptionUnw(); + } + if (Collector::Instance().Type() == kNaiveRC) { + return NRCMutator().LoadRefVolatile(fieldAddr); + } else { + auto atomicField = reinterpret_cast*>(fieldAddr); + return atomicField->load(std::memory_order_acquire); + } +} + +void MRT_WriteVolatileField(address_t obj, address_t *fieldAddr, address_t value) { + CHECK_SAFEREGION("MRT_WriteVolatileField: fieldAddr %p, value %p", + reinterpret_cast(fieldAddr), reinterpret_cast(value)); + if (obj == 0) { + MRT_ThrowNullPointerExceptionUnw(); + } + if (Collector::Instance().Type() == kNaiveRC) { + NRCMutator().WriteRefFieldVolatile(obj, fieldAddr, value); + } else { + GCWriteField(obj, fieldAddr, value); + } +} + +address_t MCC_LoadVolatileStaticField(address_t *fieldAddr) { + CHECK_SAFEREGION("MCC_LoadVolatileStaticField: fieldAddr %p", static_cast(fieldAddr)); + if (Collector::Instance().Type() == kNaiveRC) { + return NRCMutator().LoadRefVolatile(fieldAddr); + } else { + return GCLoadField(fieldAddr); + } +} + +void MRT_WriteVolatileStaticField(address_t *fieldAddr, address_t value) + __attribute__((alias("MCC_WriteVolatileStaticField"))); + +void MCC_WriteVolatileStaticField(address_t *fieldAddr, address_t value) { + CHECK_SAFEREGION("MCC_WriteVolatileStaticField: fieldAddr %p, value %p", + reinterpret_cast(fieldAddr), reinterpret_cast(value)); + if (Collector::Instance().Type() == kNaiveRC) { + NRCMutator().WriteRefFieldVolatile(kDummyAddress, fieldAddr, value); + } else { + GCWriteField(kDummyAddress, fieldAddr, value); + } +} + +uint32_t MRT_RefCount(address_t obj) { + if (Collector::Instance().Type() == kNaiveRC) { + if (obj == 0) { + return 0; + } + return RefCount(obj); + } else { + constexpr uint32_t gcOnlyRefCount = 1; + MRT_DummyUse(obj); + return gcOnlyRefCount; + } +} + +address_t MCC_LoadVolatileField(address_t obj, address_t *fieldAddr) { + CHECK_SAFEREGION("MCC_LoadVolatileField: fieldAddr %p", static_cast(fieldAddr)); + if (obj == 0) { + MRT_ThrowNullPointerExceptionUnw(); + } + CheckObjAllocated(obj); + if (Collector::Instance().Type() == kNaiveRC) { + return NRCMutator().LoadRefVolatile(fieldAddr); + } else { + return GCLoadField(fieldAddr); + } +} + +// write barrier +void MCC_WriteRefField(address_t obj, address_t *field, address_t value) { + CHECK_SAFEREGION("write ref field: obj %p, field %p, value %p", + reinterpret_cast(obj), reinterpret_cast(field), reinterpret_cast(value)); + if (UNLIKELY(obj == 0)) { + MRT_ThrowNullPointerExceptionUnw(); + } + if (Collector::Instance().Type() == kNaiveRC) { + NRCMutator().WriteRefField(obj, field, value); + } else { + GCWriteField(obj, field, value); + } +} + +void MCC_WriteVolatileField(address_t obj, address_t *fieldAddr, address_t value) { + CHECK_SAFEREGION("MCC_WriteVolatileField: obj %p, fieldAddr %p, value %p", + static_cast(obj), reinterpret_cast(fieldAddr), reinterpret_cast(value)); + if (obj == 0) { + MRT_ThrowNullPointerExceptionUnw(); + } + if (Collector::Instance().Type() == kNaiveRC) { + NRCMutator().WriteRefFieldVolatile(obj, fieldAddr, value); + } else { + GCWriteField(obj, fieldAddr, value); + } +} + +// write barrier +void MRT_WriteRefField(address_t obj, address_t *field, address_t value) { + CHECK_SAFEREGION("write ref field: obj %p, field %p, value %p", + reinterpret_cast(obj), reinterpret_cast(field), reinterpret_cast(value)); + if (obj == 0) { + MRT_ThrowNullPointerExceptionUnw(); + } + if (Collector::Instance().Type() == kNaiveRC) { + NRCMutator().WriteRefField(obj, field, value); + } else { + GCWriteField(obj, field, value); + } +} + +void MRT_WriteRefFieldStatic(address_t *field, address_t value) __attribute__((alias("MCC_WriteRefFieldStatic"))); +void MCC_WriteRefFieldStatic(address_t *field, address_t value) { + CHECK_SAFEREGION("MCC_WriteRefFieldStatic: field %p, value %p", + reinterpret_cast(field), reinterpret_cast(value)); + if (Collector::Instance().Type() == kNaiveRC) { + NRCMutator().WriteRefField(kDummyAddress, field, value); + } else { + GCWriteField(kDummyAddress, field, value); + } +} + +void MRT_WeakRefGetBarrier(address_t referent) { + if (Collector::Instance().Type() == kNaiveRC) { + NRCMutator().WeakRefGetBarrier(referent); + } else { + // When concurrent marking is enabled, remember the referent and + // prevent it from being reclaimed in this GC cycle. + if (LIKELY(IS_HEAP_OBJ(referent))) { + TLMutator().SatbWriteBarrier(referent); + } + } +} + +address_t MRT_LoadRefField(address_t obj, address_t *fieldAddr) { + if (obj == 0) { + MRT_ThrowNullPointerExceptionUnw(); + } + CHECK_SAFEREGION("MRT_LoadRefField: fieldAddr %p", reinterpret_cast(fieldAddr)); + CheckObjAllocated(obj); + if (Collector::Instance().Type() == kNaiveRC) { + return NRCMutator().LoadIncRef(fieldAddr); + } else { + return GCLoadField(fieldAddr); + } +} + +// extract first 4k and write to os. +// find single cycle boundary delmeter ";" +void MRT_GetCyclePattern(ostream &os) { + string &patterns = ClassCycleManager::GetPatternsCache(); + if (patterns.size() <= kMaxBigdataUploadSize) { + LOG2FILE(kLogtypeCycle) << "flush all patterns " << patterns << "end" << std::endl; + os << patterns; + patterns.clear(); + return; + } + LOG2FILE(kLogtypeCycle) << "loading_patterns current " << patterns << "end" << std::endl; + size_t pos = patterns.rfind(";", kMaxBigdataUploadSize); + // If first cycle pattern is too large, find its end and truncate to kMaxBigdataUploadSize + if (pos == string::npos) { + size_t cycleEndPos = patterns.find(";", kMaxBigdataUploadSize); + os << (patterns.substr(0, kMaxBigdataUploadSize)); + LOG2FILE(kLogtypeCycle) << "truncate loading_patterns " << (patterns.substr(0, kMaxBigdataUploadSize)) << + "end" << std::endl; + patterns = patterns.substr(cycleEndPos + 1); + LOG2FILE(kLogtypeCycle) << "partial loading_patterns remain " << patterns << "end" << std::endl; + } else { + os << (patterns.substr(0, pos + 1)); + LOG2FILE(kLogtypeCycle) << "partial loading_patterns " << (patterns.substr(0, pos + 1)) << "end" << std::endl; + patterns = patterns.substr(pos + 1); + LOG2FILE(kLogtypeCycle) << "partial loading_patterns remain " << patterns << "end" << std::endl; + } +} + +ostream *MRT_GetCycleLogFile() { + return &GCLog().Stream(kLogtypeCycle); +} + +void MRT_IncDecRef(address_t incAddr, address_t decAddr) { + if (Collector::Instance().Type() == kNaiveRC) { + if (incAddr == decAddr) { + return; + } + MRT_IncRef(incAddr); + MRT_DecRef(decAddr); + } +} + +void MCC_ClearLocalStackRef(address_t *var) { + if (Collector::Instance().Type() == kNaiveRC) { + address_t slot = *var; + *var = 0; + MRT_DecRef(slot); + } else { + *var = 0; + } +} + +void MCC_DecRefResetPair(address_t *incAddr, address_t *decAddr) { + if (Collector::Instance().Type() == kNaiveRC) { + address_t slot0 = *incAddr; + *incAddr = 0; + MRT_DecRef(slot0); + address_t slot1 = *decAddr; + *decAddr = 0; + MRT_DecRef(slot1); + } else { + *incAddr = 0; + *decAddr = 0; + } +} + +void MCC_IncDecRefReset(address_t incAddr, address_t *decAddr) { + CheckObjAllocated(incAddr); + if (Collector::Instance().Type() == kNaiveRC) { + address_t slot = *decAddr; + *decAddr = 0; + if (incAddr == slot) { + return; + } + MRT_IncRef(incAddr); + MRT_DecRef(slot); + } else { + *decAddr = 0; + } +} + +void MRT_IncRef(address_t obj) { + if (Collector::Instance().Type() == kNaiveRC) { + JSAN_CHECK_OBJ(obj); + CHECK_SAFEREGION("inc %p", reinterpret_cast(obj)); + NRCMutator().IncRef(obj); + } +} + +address_t MCC_IncRef_NaiveRCFast(address_t obj) __attribute__((alias("MRT_IncRefNaiveRCFast"))); + +address_t MRT_IncRefNaiveRCFast(address_t obj) { + CheckObjAllocated(obj); + if (Collector::Instance().Type() != kNaiveRC) { + return obj; + } + // skip non-heap objects. + if (UNLIKELY(!IS_HEAP_ADDR(obj))) { + return obj; + } + + MRT_CallAndCheckRC((AtomicUpdateRC<1, 0, 0>(obj)), obj); + +#if RC_TRACE_OBJECT + TraceRefRC(obj, RefCount(obj), "After MCC_IncRef_NaiveRCFast"); +#endif + return obj; +} + +address_t MCC_LoadRefField_NaiveRCFast(address_t obj, address_t *fieldAddr) { + if (obj == 0) { + MRT_ThrowNullPointerExceptionUnw(); + } + CheckObjAllocated(obj); + address_t fld = LoadRefField(fieldAddr); + if (Collector::Instance().Type() != kNaiveRC) { + return fld; + } + + // return if field is non-heap object. + if (UNLIKELY((fld & LOAD_INC_RC_MASK) == LOAD_INC_RC_MASK)) { + return NRCMutator().LoadRefVolatile(fieldAddr); + } else if (UNLIKELY(!IS_HEAP_ADDR(fld))) { + return fld; + } + + if (TryAtomicIncStrongRC(fld)) { +#if RC_TRACE_OBJECT + TraceRefRC(fld, RefCount(fld), "After MCC_LoadRefField_NaiveRCFast"); +#endif + return fld; + } + return MRT_LoadRefField(obj, fieldAddr); +} + +static inline void WriteVolatileFieldNoInc(address_t obj, address_t *fieldAddr, address_t value) { + NRCMutator().WriteRefFieldVolatileNoInc(obj, fieldAddr, value); + if (UNLIKELY(obj == value)) { + MRT_DecRef(obj); + } +} + +void MRT_WriteVolatileFieldNoInc(address_t obj, address_t *fieldAddr, address_t value) { + CHECK_SAFEREGION("MRT_WriteVolatileFieldNoInc: fieldAddr %p, value %p", + reinterpret_cast(fieldAddr), reinterpret_cast(value)); + if (obj == 0) { + // compensate early inc + MRT_DecRef(value); + MRT_ThrowNullPointerExceptionUnw(); + } + if (Collector::Instance().Type() == kNaiveRC) { + WriteVolatileFieldNoInc(obj, fieldAddr, value); + } else { + GCWriteField(obj, fieldAddr, value); + } +} + +void MCC_WriteVolatileStaticFieldNoInc(address_t *fieldAddr, address_t value) { + CHECK_SAFEREGION("MCC_WriteVolatileStaticFieldNoInc: fieldAddr %p, value %p", + reinterpret_cast(fieldAddr), reinterpret_cast(value)); + if (Collector::Instance().Type() == kNaiveRC) { + NRCMutator().WriteRefFieldVolatileNoInc(kDummyAddress, fieldAddr, value); + } else { + GCWriteField(kDummyAddress, fieldAddr, value); + } +} + +void MCC_WriteVolatileStaticFieldNoDec(address_t *fieldAddr, address_t value) { + CHECK_SAFEREGION("MCC_WriteVolatileStaticFieldNoDec: fieldAddr %p, value %p", + reinterpret_cast(fieldAddr), reinterpret_cast(value)); + if (Collector::Instance().Type() == kNaiveRC) { + NRCMutator().WriteRefFieldVolatileNoDec(kDummyAddress, fieldAddr, value); + } else { + GCWriteField(kDummyAddress, fieldAddr, value); + } +} + +void MCC_WriteVolatileStaticFieldNoRC(address_t *fieldAddr, address_t value) { + CHECK_SAFEREGION("MCC_WriteVolatileStaticFieldNoRC: fieldAddr %p, value %p", + reinterpret_cast(fieldAddr), reinterpret_cast(value)); + if (Collector::Instance().Type() == kNaiveRC) { + NRCMutator().WriteRefFieldVolatileNoRC(kDummyAddress, fieldAddr, value); + } else { + GCWriteField(kDummyAddress, fieldAddr, value); + } +} + +static inline void WriteWeakField(address_t obj, address_t *field, address_t value, bool isVolatile) { + NRCMutator().WriteWeakField(obj, field, value, isVolatile); +} + +void MRT_WriteWeakField(address_t obj, address_t *field, address_t value, bool isVolatile) { + CHECK_SAFEREGION("MRT_WriteWeakField: obj_addr %p, value %p", + reinterpret_cast(field), reinterpret_cast(value)); + if (obj == 0) { + MRT_ThrowNullPointerExceptionUnw(); + } + if (Collector::Instance().Type() == kNaiveRC) { + WriteWeakField(obj, field, value, isVolatile); + } else { + if (isVolatile) { + GCWriteField(obj, field, value); + } else { + GCWriteField(obj, field, value); + } + } +} + +address_t MRT_LoadWeakField(address_t obj, address_t *field, bool isVolatile) { + CHECK_SAFEREGION("MRT_LoadWeakField: fieldAddr %p", reinterpret_cast(field)); + if (obj == 0) { + MRT_ThrowNullPointerExceptionUnw(); + } + CheckObjAllocated(obj); + if (Collector::Instance().Type() == kNaiveRC) { + return NRCMutator().LoadWeakField(field, isVolatile); + } else { + if (isVolatile) { + return GCLoadField(field); + } else { + return GCLoadField(field); + } + } +} + +address_t MRT_LoadWeakFieldCommon(address_t obj, address_t *field) { + CHECK_SAFEREGION("MRT_LoadWeakFieldCommon: fieldAddr %p", reinterpret_cast(field)); + if (obj == 0) { + MRT_ThrowNullPointerExceptionUnw(); + } + CheckObjAllocated(obj); + if (Collector::Instance().Type() == kNaiveRC) { + return NRCMutator().LoadWeakRefCommon(field); + } else { + return GCLoadField(field); + } +} + +void MCC_WriteWeakField(address_t obj, address_t *fieldAddr, address_t value) { + CHECK_SAFEREGION("MCC_WriteWeakField: obj_addr %p, value %p", + reinterpret_cast(fieldAddr), reinterpret_cast(value)); + if (obj == 0) { + MRT_ThrowNullPointerExceptionUnw(); + } + if (Collector::Instance().Type() == kNaiveRC) { + WriteWeakField(obj, fieldAddr, value, false); + } else { + GCWriteField(obj, fieldAddr, value); + } +} + +address_t MCC_LoadWeakField(address_t obj, address_t *fieldAddr) { + CHECK_SAFEREGION("MCC_LoadWeakField: fieldAddr %p", reinterpret_cast(fieldAddr)); + if (obj == 0) { + MRT_ThrowNullPointerExceptionUnw(); + } + CheckObjAllocated(obj); + if (Collector::Instance().Type() == kNaiveRC) { + return NRCMutator().LoadWeakField(fieldAddr, false); + } else { + return GCLoadField(fieldAddr); + } +} + +void MCC_WriteVolatileWeakField(address_t obj, address_t *fieldAddr, address_t value) { + CHECK_SAFEREGION("MCC_WriteWeakField: obj_addr %p, value %p", + reinterpret_cast(fieldAddr), reinterpret_cast(value)); + if (obj == 0) { + MRT_ThrowNullPointerExceptionUnw(); + } + if (Collector::Instance().Type() == kNaiveRC) { + WriteWeakField(obj, fieldAddr, value, true); + } else { + GCWriteField(obj, fieldAddr, value); + } +} + +address_t MCC_LoadVolatileWeakField(address_t obj, address_t *fieldAddr) { + CHECK_SAFEREGION("MCC_LoadWeakField: fieldAddr %p", reinterpret_cast(fieldAddr)); + if (obj == 0) { + MRT_ThrowNullPointerExceptionUnw(); + } + CheckObjAllocated(obj); + if (Collector::Instance().Type() == kNaiveRC) { + return NRCMutator().LoadWeakField(fieldAddr, true); + } else { + return GCLoadField(fieldAddr); + } +} + +void MRT_CollectWeakObj(address_t obj) { + if (Collector::Instance().Type() == kNaiveRC) { + __MRT_ASSERT(IsWeakCollected(obj), "weak rc not collected"); + if (!RCReferenceProcessor::Instance().CheckAndAddFinalizable(obj)) { + NRCMutator().WeakReleaseObj(obj); + NRCMutator().DecWeak(obj); + } + } +} + +void MRT_ReleaseObj(address_t obj) { + if (Collector::Instance().Type() == kNaiveRC) { + NRCMutator().ReleaseObj(obj); + } +} + +address_t MCC_DecRef_NaiveRCFast(address_t obj) __attribute__((alias("MRT_DecRefNaiveRCFast"))); +address_t MRT_DecRefNaiveRCFast(address_t obj) { + CheckObjAllocated(obj); + if (Collector::Instance().Type() != kNaiveRC) { + return obj; + } + // skip non-heap objects. + if (UNLIKELY(!IS_HEAP_ADDR(obj))) { + return obj; + } + + // Check Cycle before DecRef, avoid data racing release object in other thread while match + uint32_t rcFlags = GCHeader(obj); + if ((rcFlags & kCyclePatternBit) == 0) { + uint32_t releaseState = kNotRelease; + uint32_t oldHeader __MRT_UNUSED = AtomicDecRCAndCheckRelease<-1, 0, 0>(obj, releaseState); +#if RC_TRACE_OBJECT + TraceRefRC(obj, RefCount(obj), "After MCC_DecRef_NaiveRCFast"); +#endif + if (releaseState == kReleaseObject) { + MRT_ReleaseObj(obj); + } else if (releaseState == kCollectedWeak) { + if (!RCReferenceProcessor::Instance().CheckAndAddFinalizable(obj)) { + NRCMutator().WeakReleaseObj(obj); + NRCMutator().DecWeak(obj); + } + } + MRT_ASSERT_RC_NZ(GetRCFromRCHeader(oldHeader), "Dec from 0", obj, oldHeader); + } else { + MRT_DecRef(obj); + } + return obj; +} + +void MCC_IncDecRef_NaiveRCFast(address_t incAddr, address_t decAddr) + __attribute__((alias("MRT_IncDecRefNaiveRCFast"))); +void MRT_IncDecRefNaiveRCFast(address_t incAddr, address_t decAddr) { + if (Collector::Instance().Type() != kNaiveRC) { + return; + } + if (incAddr == decAddr) { + return; + } + + (void)MCC_IncRef_NaiveRCFast(incAddr); + (void)MCC_DecRef_NaiveRCFast(decAddr); +} + +void MCC_CleanupLocalStackRef_NaiveRCFast(address_t *localStart, size_t count) { + if (Collector::Instance().Type() != kNaiveRC) { + return; + } + for (size_t i = 0; i < count; ++i) { + address_t slot = localStart[i]; + if (slot != 0) { + (void)MRT_DecRefNaiveRCFast(slot); + } + } +} + +void MCC_CleanupLocalStackRefSkip_NaiveRCFast(address_t *localStart, size_t count, size_t skip) { + if (Collector::Instance().Type() != kNaiveRC) { + return; + } + for (size_t i = 0; i < count; ++i) { + if (i == skip) { + continue; + } + address_t slot = localStart[i]; + if (slot != 0) { + (void)MRT_DecRefNaiveRCFast(slot); + } + } +} + +void MRT_DecRef(address_t obj) { + if (Collector::Instance().Type() == kNaiveRC) { + JSAN_CHECK_OBJ(obj); + CHECK_SAFEREGION("dec %p", reinterpret_cast(obj)); + NRCMutator().DecRef(obj); + } +} + +void MRT_DecRefUnsync(address_t obj) { + if (Collector::Instance().Type() == kNaiveRC) { + (void)UpdateRC<-1, 0, 0>(obj); +#if RC_TRACE_OBJECT + TraceRefRC(obj, RefCount(obj), "After MRT_DecRefUnsync"); +#endif + } +} + +static inline void WriteRefFieldNoDec(address_t obj, address_t *field, address_t value) { + NRCMutator().WriteRefFieldNoDec(obj, field, value); +} + +void MCC_WriteRefFieldNoDec(address_t obj, address_t *field, address_t value) { + CHECK_SAFEREGION("MCC_WriteRefFieldNoDec: obj %p, field %p, value %p", + reinterpret_cast(obj), reinterpret_cast(field), reinterpret_cast(value)); + if (obj == 0) { + MRT_ThrowNullPointerExceptionUnw(); + } + if (Collector::Instance().Type() == kNaiveRC) { + WriteRefFieldNoDec(obj, field, value); + } else { + GCWriteField(obj, field, value); + } +} + +static inline void WriteRefFieldNoInc(address_t obj, address_t *field, address_t value) { + NRCMutator().WriteRefFieldNoInc(obj, field, value); + if (UNLIKELY(obj == value)) { + MRT_DecRef(obj); + } +} + +void MCC_WriteRefFieldNoInc(address_t obj, address_t *field, address_t value) { + CHECK_SAFEREGION("MCC_WriteRefFieldNoInc: obj %p, field %p, value %p", + reinterpret_cast(obj), reinterpret_cast(field), reinterpret_cast(value)); + if (obj == 0) { + // compensate early inc + MRT_DecRef(value); + MRT_ThrowNullPointerExceptionUnw(); + } + if (Collector::Instance().Type() == kNaiveRC) { + WriteRefFieldNoInc(obj, field, value); + } else { + GCWriteField(obj, field, value); + } +} + + +static inline void WriteRefFieldNoRC(address_t obj, address_t *field, address_t value) { + NRCMutator().WriteRefFieldNoRC(obj, field, value); + if (UNLIKELY(obj == value)) { + MRT_DecRef(obj); + } +} + +void MCC_WriteRefFieldNoRC(address_t obj, address_t *field, address_t value) { + CHECK_SAFEREGION("MCC_WriteRefFieldNoRC: obj %p, field %p, value %p", + reinterpret_cast(obj), reinterpret_cast(field), reinterpret_cast(value)); + if (obj == 0) { + // compensate early inc + MRT_DecRef(value); + MRT_ThrowNullPointerExceptionUnw(); + } + if (Collector::Instance().Type() == kNaiveRC) { + WriteRefFieldNoRC(obj, field, value); + } else { + GCWriteField(obj, field, value); + } +} + +void MCC_WriteVolatileFieldNoDec(address_t obj, address_t *fieldAddr, address_t value) { + CHECK_SAFEREGION("MCC_WriteVolatileFieldNoDec: fieldAddr %p, value %p", + reinterpret_cast(fieldAddr), reinterpret_cast(value)); + if (obj == 0) { + MRT_ThrowNullPointerExceptionUnw(); + } + if (Collector::Instance().Type() == kNaiveRC) { + NRCMutator().WriteRefFieldVolatileNoDec(obj, fieldAddr, value); + } else { + GCWriteField(obj, fieldAddr, value); + } +} + +void MCC_WriteVolatileFieldNoRC(address_t obj, address_t *fieldAddr, address_t value) { + CHECK_SAFEREGION("MCC_WriteVolatileFieldNoRC: fieldAddr %p, value %p", + reinterpret_cast(fieldAddr), reinterpret_cast(value)); + if (obj == 0) { + // compensate early inc + MRT_DecRef(value); + MRT_ThrowNullPointerExceptionUnw(); + } + + if (Collector::Instance().Type() == kNaiveRC) { + NRCMutator().WriteRefFieldVolatileNoRC(obj, fieldAddr, value); + if (UNLIKELY(obj == value)) { + // compensate dec + MRT_DecRef(obj); + } + } else { + GCWriteField(obj, fieldAddr, value); + } +} + +void MCC_WriteVolatileFieldNoInc(address_t obj, address_t *fieldAddr, address_t value) { + CHECK_SAFEREGION("MCC_WriteVolatileFieldNoInc: fieldAddr %p, value %p", + reinterpret_cast(fieldAddr), reinterpret_cast(value)); + if (obj == 0) { + // compensate early inc + MRT_DecRef(value); + MRT_ThrowNullPointerExceptionUnw(); + } + if (Collector::Instance().Type() == kNaiveRC) { + WriteVolatileFieldNoInc(obj, fieldAddr, value); + } else { + GCWriteField(obj, fieldAddr, value); + } +} + +address_t MRT_LoadRefFieldCommon(address_t obj, address_t *fieldAddr) { + if (obj == 0) { + MRT_ThrowNullPointerExceptionUnw(); + } + CHECK_SAFEREGION("MRT_LoadRefFieldCommon: fieldAddr %p", reinterpret_cast(fieldAddr)); + CheckObjAllocated(obj); + if (Collector::Instance().Type() == kNaiveRC) { + return NRCMutator().LoadIncRefCommon(fieldAddr); + } else { + return GCLoadField(fieldAddr); + } +} + +address_t MCC_LoadRefStatic(address_t *fieldAddr) { + CHECK_SAFEREGION("MCC_LoadRefStatic: fieldAddr %p", reinterpret_cast(fieldAddr)); + if (Collector::Instance().Type() == kNaiveRC) { + return NRCMutator().LoadIncRef(fieldAddr); + } else { + return GCLoadField(fieldAddr); + } +} + +void MRT_WriteRefFieldNoDec(address_t obj, address_t *field, address_t value) { + CHECK_SAFEREGION("MRT_WriteRefFieldNoDec: obj %p, field %p, value %p", + reinterpret_cast(obj), reinterpret_cast(field), reinterpret_cast(value)); + if (obj == 0) { + MRT_ThrowNullPointerExceptionUnw(); + } + if (Collector::Instance().Type() == kNaiveRC) { + WriteRefFieldNoDec(obj, field, value); + } else { + GCWriteField(obj, field, value); + } +} + +void MRT_WriteRefFieldNoInc(address_t obj, address_t *field, address_t value) { + CHECK_SAFEREGION("MRT_WriteRefFieldNoInc: obj %p, field %p, value %p", + reinterpret_cast(obj), reinterpret_cast(field), reinterpret_cast(value)); + if (obj == 0) { + // compensate early inc + MRT_DecRef(value); + MRT_ThrowNullPointerExceptionUnw(); + } + if (Collector::Instance().Type() == kNaiveRC) { + WriteRefFieldNoInc(obj, field, value); + } else { + GCWriteField(obj, field, value); + } +} + +void MRT_WriteRefFieldNoRC(address_t obj, address_t *field, address_t value) { + CHECK_SAFEREGION("MRT_WriteRefFieldNoRC: obj %p, field %p, value %p", + reinterpret_cast(obj), reinterpret_cast(field), reinterpret_cast(value)); + if (obj == 0) { + // compensate early inc + MRT_DecRef(value); + MRT_ThrowNullPointerExceptionUnw(); + } + if (Collector::Instance().Type() == kNaiveRC) { + WriteRefFieldNoRC(obj, field, value); + } else { + GCWriteField(obj, field, value); + } +} + +void MCC_WriteRefFieldStaticNoInc(address_t *field, address_t value) { + CHECK_SAFEREGION("MCC_WriteRefFieldStaticNoInc: field %p, value %p", + reinterpret_cast(field), reinterpret_cast(value)); + if (Collector::Instance().Type() == kNaiveRC) { + NRCMutator().WriteRefFieldNoInc(kDummyAddress, field, value); + } else { + GCWriteField(kDummyAddress, field, value); + } +} + +void MCC_WriteRefFieldStaticNoDec(address_t *field, address_t value) { + CHECK_SAFEREGION("MCC_WriteRefFieldStaticNoDec: field %p, value %p", + reinterpret_cast(field), reinterpret_cast(value)); + if (Collector::Instance().Type() == kNaiveRC) { + NRCMutator().WriteRefFieldNoDec(kDummyAddress, field, value); + } else { + GCWriteField(kDummyAddress, field, value); + } +} + +void MCC_WriteRefFieldStaticNoRC(address_t *field, address_t value) { + CHECK_SAFEREGION("MCC_WriteRefFieldStaticNoRC: field %p, value %p", + reinterpret_cast(field), reinterpret_cast(value)); + if (Collector::Instance().Type() == kNaiveRC) { + NRCMutator().WriteRefFieldNoRC(kDummyAddress, field, value); + } else { + GCWriteField(kDummyAddress, field, value); + } +} + +void MRT_WriteReferentField(address_t obj, address_t *fieldAddr, address_t value, bool isResurrectWeak) { + CHECK_SAFEREGION("MRT_WriteReferentField: obj_addr %p, value %p", + reinterpret_cast(fieldAddr), reinterpret_cast(value)); + if (obj == 0) { + MRT_ThrowNullPointerExceptionUnw(); + } + if (Collector::Instance().Type() == kNaiveRC) { + NRCMutator().WriteRefFieldVolatile(obj, fieldAddr, value, true, isResurrectWeak); + } else { + GCWriteField(obj, fieldAddr, value); + } +} + +address_t MCC_LoadReferentField(address_t obj, address_t *fieldAddr) __attribute__((alias("MRT_LoadReferentField"))); +address_t MRT_LoadReferentField(address_t obj, address_t *fieldAddr) { + CHECK_SAFEREGION("MRT_LoadReferentField: obj_addr %p, value %p", reinterpret_cast(fieldAddr)); + if (obj == 0) { + MRT_ThrowNullPointerExceptionUnw(); + } + CheckObjAllocated(obj); + if (Collector::Instance().Type() == kNaiveRC) { + return NRCMutator().LoadRefVolatile(fieldAddr, true); + } else { + return GCLoadField(fieldAddr); + } +} + +void MRT_IncResurrectWeak(address_t obj) { + JSAN_CHECK_OBJ(obj); + CHECK_SAFEREGION("inc resurrect weak %p", reinterpret_cast(obj)); + NRCMutator().IncResurrectWeak(obj); +} + +void MCC_WriteReferent(address_t obj, address_t value) { + if (Collector::Instance().Type() != kNaiveRC) { + SetObjReference(obj); + return; + } + // When referent is null, the collector always treats the referent as marked in art & hotspot. + // Not adding null referent reference to rp list will achieve the same effect, because rp + // thread will not collect the reference. + if (IS_HEAP_OBJ(value)) { + MClass *klass = reinterpret_cast(obj)->GetClass(); + uint32_t classFlag = klass->GetFlag(); + if (classFlag & (modifier::kClassCleaner | modifier::kClassPhantomReference)) { + (void)AtomicUpdateRC<-1, 1, 0>(value); + } else { + (void)AtomicUpdateRC<-1, 0, 1>(value); + } + AddNewReference(obj, classFlag); + } +} + +// pre-write barrier for concurrent marking. +void MRT_PreWriteRefField(address_t obj) { + CHECK_SAFEREGION("mrt pre-write barrier: obj %p", reinterpret_cast(obj)); + TLMutator().SatbWriteBarrier(obj); +} + +// pre-write barrier for compiled code. +void MCC_PreWriteRefField(address_t obj) { + CHECK_SAFEREGION("pre-write barrier: obj %p", reinterpret_cast(obj)); + if (UNLIKELY(obj == 0)) { + // skip pre-write barrier for null object. + return; + } + // pre-write barrier for concurrent marking. + TLMutator().SatbWriteBarrier(obj); +} + +void MRT_SetTracingObject(address_t obj) { + if (!IS_HEAP_ADDR(obj)) { + return; + } + + SetTraceBit(obj); +#if RC_TRACE_OBJECT + void *callerPc = __builtin_return_address(0); + LOG2FILE(kLogtypeRcTrace) << "Obj " << std::hex << reinterpret_cast(obj) << std::dec << " RC= " << + RefCount(obj) << " SetTrace "; + util::PrintPCSymbolToLog(callerPc); +#endif +} + +bool MRT_IsValidObjectAddress(address_t obj) { + if ((obj & LOAD_INC_RC_MASK) == LOAD_INC_RC_MASK) { + return false; + } + return true; +} + +// not used now +void MCC_ReleaseRefVar(address_t obj) __attribute__((alias("MRT_ReleaseRefVar"))); +void MRT_ReleaseRefVar(address_t obj) { + if (Collector::Instance().Type() == kNaiveRC) { + CHECK_SAFEREGION("release var: %p", reinterpret_cast(obj)); + NRCMutator().ReleaseRefVar(obj); + } +} + +void MRT_TriggerGC(maplert::GCReason reason) { + if (reason == kGCReasonUser && VLOG_IS_ON(opengclog)) { + util::PrintBacktrace(kLogtypeGc); + } + + Collector::Instance().InvokeGC(reason); +} + +bool MRT_IsNaiveRCCollector() { + return Collector::Instance().Type() == kNaiveRC; +} + +bool MRT_IsGcRunning() { + return Collector::Instance().IsGcRunning(); +} + +bool MRT_IsGcThread() { + return static_cast(reinterpret_cast(maple::tls::GetTLS(maple::tls::kSlotIsGcThread))); +} + +void MRT_DebugCleanup() { +#if !defined(NDEBUG) + // In debug builds, main thread will skip vm->DetachCurrentThread() (see: mplsh.cc), + // so we should ensure MRT_GCFiniThreadLocal() is called before debug cleanup. + Mutator *mutator = TLMutatorPtr(); + if (mutator != nullptr && mutator->IsActive()) { + (void)MRT_GCFiniThreadLocal(); + } +#endif + Collector::Instance().DebugCleanup(); +} + +void MRT_RegisterGCRoots(address_t *gcroots[], size_t len) { + GCRegisteredRoots::Instance().Register(gcroots, len); +} + +void MRT_RegisterRCCheckAddr(uint64_t *addr) { + RegisteredCollectorTypeCheckAddrs::Instance().Register(addr); +} + +// only used in STW +bool MRT_IsValidObjAddr(address_t obj) { + __MRT_ASSERT(WorldStopped(), "invoke MRT_IsValidObjAddr in none STW"); + return (*theAllocator).AccurateIsValidObjAddr(obj); +} + +bool MRT_FastIsValidObjAddr(address_t obj) { + return (*theAllocator).FastIsValidObjAddr(obj); +} + +// deprecated, use the two above +bool MRT_CheckHeapObj(address_t obj) { + return IS_HEAP_ADDR(obj); +} + +bool MRT_IsGarbage(address_t obj) { + return Collector::Instance().IsGarbage(obj); +} + +ATTR_NO_SANITIZE_ADDRESS +void MCC_InitializeLocalStackRef(address_t *localStart, size_t count) { + for (size_t i = 0; i < count; ++i) { + localStart[i] = 0; + } +} + +static inline void CleanupLocalStackRef(const address_t &localStart) { + address_t slot = localStart; + if (slot != 0) { + MRT_DecRef(slot); + } +} + +void MCC_CleanupLocalStackRef(const address_t *localStart, size_t count) { + if (Collector::Instance().Type() == kNaiveRC) { + for (size_t i = 0; i < count; ++i) { + CleanupLocalStackRef(localStart[i]); + } + MRT_InitPoisonStack(reinterpret_cast(localStart)); + } +} + +void MCC_CleanupLocalStackRefSkip(const address_t *localStart, size_t count, size_t skip) { + if (Collector::Instance().Type() == kNaiveRC) { + for (size_t i = 0; i < count; ++i) { + if (i == skip) { + continue; + } + CleanupLocalStackRef(localStart[i]); + } +#if CONFIG_JSAN + MRT_InitPoisonStack(reinterpret_cast(localStart)); +#endif + } +} + +#if LOG_ALLOC_TIMESTAT +void MRT_ResetAllocTimers() { + (*theAllocator).ForEachMutator([](AllocMutator &mutator) { + mutator.ResetTimers(); + }); +} + +void MRT_PrintAllocTimers() { + TimeStat timers[kTimeMax]; + (*theAllocator).ForEachMutator([&timers](AllocMutator &mutator) { + for (int i = 0; i < kTimeMax; ++i) { + timers[i].tmMin = std::min(mutator.GetTimerMin(i), timers[i].GetMin()); + timers[i].tmMax = std::max(mutator.GetTimerMax(i), timers[i].GetMax()); + timers[i].tmSum += mutator.GetTimerSum(i); + timers[i].tmCnt += mutator.GetTimerCnt(i); + } + }); + int i; + i = kTimeAllocLocal; + if (timers[i].HasStat()) { + LOG(INFO) << "[ATIME] local min: " << timers[i].GetMin() << ", max: " << timers[i].GetMax() << ", avg: " << + timers[i].GetAvg() << "(" << timers[i].GetCnt() << ")" << maple::endl; + } + i = kTimeAllocGlobal; + if (timers[i].HasStat()) { + LOG(INFO) << "[ATIME] global min: " << timers[i].GetMin() << ", max: " << timers[i].GetMax() << ", avg: " << + timers[i].GetAvg() << "(" << timers[i].GetCnt() << ")" << maple::endl; + } + i = kTimeAllocLarge; + if (timers[i].HasStat()) { + LOG(INFO) << "[ATIME] large min: " << timers[i].GetMin() << ", max: " << timers[i].GetMax() << ", avg: " << + timers[i].GetAvg() << "(" << timers[i].GetCnt() << ")" << maple::endl; + } + i = kTimeReleaseObj; + if (timers[i].HasStat()) { + LOG(INFO) << "[ATIME] release min: " << timers[i].GetMin() << ", max: " << timers[i].GetMax() << ", avg: " << + timers[i].GetAvg() << "(" << timers[i].GetCnt() << ")" << maple::endl; + } + i = kTimeFreeLocal; + if (timers[i].HasStat()) { + LOG(INFO) << "[ATIME] free local min: " << timers[i].GetMin() << ", max: " << timers[i].GetMax() << ", avg: " << + timers[i].GetAvg() << "(" << timers[i].GetCnt() << ")" << maple::endl; + } + i = kTimeFreeGlobal; + if (timers[i].HasStat()) { + LOG(INFO) << "[ATIME] free global min: " << timers[i].GetMin() << ", max: " << timers[i].GetMax() << ", avg: " << + timers[i].GetAvg() << "(" << timers[i].GetCnt() << ")" << maple::endl; + } + i = kTimeFreeLarge; + if (timers[i].HasStat()) { + LOG(INFO) << "[ATIME] free large min: " << timers[i].GetMin() << ", max: " << timers[i].GetMax() << ", avg: " << + timers[i].GetAvg() << "(" << timers[i].GetCnt() << ")" << maple::endl; + } +} +#endif + +void MCC_CleanupNonescapedVar(address_t obj) { + if (Collector::Instance().Type() == kNaiveRC) { + // obj is a stack address which can not be null, but the + // classinfo field may be null due to diverged control flow + if (LIKELY(reinterpret_cast(obj)->GetClass() != 0)) { + NRCMutator().DecChildrenRef(obj); + } + } +} + +void MRT_DebugShowCurrentMutators() { + MutatorList::Instance().DebugShowCurrentMutators(); +} + +void MRT_CheckSaferegion(bool expect, const char *msg) { + bool safeRegionState = TLMutator().InSaferegion(); + if (UNLIKELY(safeRegionState != expect)) { + util::PrintBacktrace(); + LOG(FATAL) << msg << " check saferegion failed! in saferegion: " << safeRegionState << maple::endl; + } +} + +// Dump Heap content at yield point +// index and msg give a unique log file name for dumped result +void MRT_DumpHeap(const std::string &tag) { + Collector::Instance().DumpHeap(tag); +} + +void MRT_DumpRCAndGCPerformanceInfo(std::ostream &os) { + DumpRCAndGCPerformanceInfo(os); +} + +void MRT_DumpRCAndGCPerformanceInfo_Stderr() { + DumpRCAndGCPerformanceInfo(cerr); +} + +void MRT_VisitAllocatedObjects(maple::rootObjectFunc func) { + if (!(*theAllocator).ForEachObj(func)) { + LOG(ERROR) << "(*theAllocator).ForEachObj() in MRT_VisitAllocatedObjects() return false." << maple::endl; + } +} + +address_t MRT_GetHeapLowerBound() { + return (*theAllocator).HeapLowerBound(); +} + +address_t MRT_GetHeapUpperBound() { + return (*theAllocator).HeapUpperBound(); +} + +void MRT_DumpDynamicCyclePatterns(std::ostream &os, size_t limit) { + if (Collector::Instance().Type() == kNaiveRC) { + ClassCycleManager::DumpDynamicCyclePatterns(os, limit, false); + } +} + +bool MRT_IsCyclePatternUpdated() { + if (Collector::Instance().Type() == kNaiveRC) { + return ClassCycleManager::IsCyclePatternUpdated(); + } + return true; +} + + +void MRT_UpdateProcessState(ProcessState processState, bool isSystemServer) { + Collector::Instance().UpdateProcessState(processState, isSystemServer); +} + +void MRT_WaitGCStopped() { + Collector::Instance().WaitGCStopped(); +} + +void MCC_RecordStaticField(address_t *field, const char *name) { + RecordStaticField(field, name); +} + +void MRT_DumpStaticField(std::ostream &os) { + DumpStaticField(os); +} + +void MRT_PreRenewObject(address_t obj) { + TLMutator().SatbWriteBarrier(obj); +} + +void MRT_GCLogPostFork() { + GCLog().OnPostFork(); +} + +void MRT_SetAllocRecordingCallback(std::function callback) { + (*theAllocator).SetAllocRecordingCallback(callback); +} + +// --- MCC read barriers for GCONLY --- // +// +// For GCONLY, compiler will optimize read barrier calls to inline codes. +// we keep read barriers here to support O0 build and debug. +address_t MCC_LoadRefField(address_t obj, address_t *fieldAddr) { + if (UNLIKELY(obj == 0)) { + MRT_ThrowNullPointerExceptionUnw(); + } + CHECK_SAFEREGION("MCC_LoadRefField: fieldAddr %p", static_cast(fieldAddr)); + return LoadRefField(fieldAddr); +} + +// replace MCC_IncRef when gconly is enabled and peephole optimize disabled. +address_t MCC_Dummy(address_t obj) { + return obj; +} + +bool MRT_UnsafeCompareAndSwapObject(address_t obj, ssize_t offset, address_t expectedValue, address_t newValue) { + CHECK_SAFEREGION("MRT_UnsafeCompareAndSwapObject: obj %p, offset %lld expectedValue %p, newValue %p", + reinterpret_cast(obj), offset, + reinterpret_cast(expectedValue), reinterpret_cast(newValue)); + return Collector::Instance().UnsafeCompareAndSwapObject(obj, offset, expectedValue, newValue); +} + +address_t MRT_UnsafeGetObjectVolatile(address_t obj, ssize_t offset) { + CHECK_SAFEREGION("MRT_UnsafeGetObjectVolatile: obj %p, offset %lld", + reinterpret_cast(obj), offset); + return Collector::Instance().UnsafeGetObjectVolatile(obj, offset); +} + +address_t MRT_UnsafeGetObject(address_t obj, ssize_t offset) { + CHECK_SAFEREGION("MRT_UnsafeGetObject: obj %p, offset %lld", + reinterpret_cast(obj), offset); + return Collector::Instance().UnsafeGetObject(obj, offset); +} + +void MRT_UnsafePutObject(address_t obj, ssize_t offset, address_t newValue) { + CHECK_SAFEREGION("MRT_UnsafePutObject: obj %p, offset %lld, newValue %p", + reinterpret_cast(obj), offset, reinterpret_cast(newValue)); + Collector::Instance().UnsafePutObject(obj, offset, newValue); +} + +void MRT_UnsafePutObjectVolatile(address_t obj, ssize_t offset, address_t newValue) { + CHECK_SAFEREGION("MRT_UnsafePutObjectVolatile: obj %p, offset %lld, newValue %p", + reinterpret_cast(obj), offset, reinterpret_cast(newValue)); + Collector::Instance().UnsafePutObjectVolatile(obj, offset, newValue); +} + +void MRT_UnsafePutObjectOrdered(address_t obj, ssize_t offset, address_t newValue) { + CHECK_SAFEREGION("MRT_UnsafePutObjectOrdered: obj %p, offset %lld, newValue %p", + reinterpret_cast(obj), offset, reinterpret_cast(newValue)); + Collector::Instance().UnsafePutObjectOrdered(obj, offset, newValue); +} + +int64_t MCC_JDouble2JLong(double num) { + return JavaFPToSInt(num); +} + +int64_t MCC_JFloat2JLong(float num) { + return JavaFPToSInt(num); +} + +} // extern "C" +} // namespace maplert diff --git a/src/mrt/compiler-rt/src/collector/arena.cpp b/src/mrt/compiler-rt/src/collector/arena.cpp new file mode 100644 index 0000000000..3e4eb75fac --- /dev/null +++ b/src/mrt/compiler-rt/src/collector/arena.cpp @@ -0,0 +1,45 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "collector/arena.h" +#include "chosen.h" +namespace maplert { +void HandleArena::VisitGCRoots(const RefVisitor &visitor) { + VisitTopSnapShot(visitor, nullptr, nullptr); +} + +ScopedHandles::ScopedHandles() { + snapshot = TLMutator().GetHandleArena(); +} + +ScopedHandles::~ScopedHandles() { + if (Collector::Instance().Type() == kNaiveRC) { + RefVisitor decVisitor = [](address_t &obj) { + MRT_DecRef(obj); + }; + TLMutator().GetHandleArena().VisitTopSnapShot(decVisitor, snapshot); + } + TLMutator().GetHandleArena().PopBanks(snapshot); + snapshot.Clear(); // clear to avoid destructor handling again +} + +void HandleBase::Push(address_t ref) { + handle = TLMutator().GetHandleArena().AllocateSlot(); + if (handle == nullptr) { + LOG(FATAL) << "Get Arena handle failed" << maple::endl; + return; + } + *handle = ref; +} +} // namespace maplert diff --git a/src/mrt/compiler-rt/src/collector/collector.cpp b/src/mrt/compiler-rt/src/collector/collector.cpp new file mode 100644 index 0000000000..c84f61965b --- /dev/null +++ b/src/mrt/compiler-rt/src/collector/collector.cpp @@ -0,0 +1,211 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "collector/collector.h" +#include "collector/rp_base.h" +#include "chosen.h" +#include "mutator_list.h" + +namespace maplert { +namespace { +const std::string kCollectorName[] = { + "NoCollector", "NaiveRC", "MarkSweep", "NaiveRCMarkSweep" +}; +} + +Collector *Collector::instance = nullptr; +RegisteredCollectorTypeCheckAddrs *RegisteredCollectorTypeCheckAddrs::instance = + new (std::nothrow) RegisteredCollectorTypeCheckAddrs(); + +void RegisteredCollectorTypeCheckAddrs::Register(uint64_t *addr) { + if (addr == nullptr) { + LOG(FATAL) << "Register get a null pointer as parameter" << maple::endl; + } + std::lock_guard lock(registerLock); + addrs.push_back(addr); + if (Collector::InstancePtr() != nullptr) { + bool isGCOnly = Collector::Instance().Type() != kNaiveRC; + if (isGCOnly) { + __MRT_ASSERT(*(addr + 1) == kRegistedMagicNumber, "Invalid number"); + *addr = 1; + } + } +} + +void RegisteredCollectorTypeCheckAddrs::PostCollectorCreate() { + std::lock_guard lock(registerLock); + bool isGCOnly = Collector::Instance().Type() != kNaiveRC; + if (isGCOnly) { + LOG(INFO) << "RegisterRCCheckAddr switch to GC " << addrs.size(); + for (uint64_t *addr : addrs) { + __MRT_ASSERT(*(addr + 1) == kRegistedMagicNumber, "Invalid number"); + *addr = 1; + } + } +} + +Collector::Collector() + : processState(kProcessStateJankPerceptible) {} + +void Collector::Create(bool gcOnly) { + bool createNaivRCCollector = true; +#if ((MRT_COLLECTOR == MRT_COLLECTOR_NONE) || (MRT_COLLECTOR == MRT_COLLECTOR_NAIVERC)) + createNaivRCCollector = !VLOG_IS_ON(gconly) && !gcOnly; +#elif (MRT_COLLECTOR == MRT_COLLECTOR_MS) + UNUSED(gcOnly); + createNaivRCCollector = false; +#else +#error "Invalid MRT_COLLECTOR" +#endif + if (createNaivRCCollector) { + instance = new (std::nothrow) NaiveRCCollector(); + FastAllocData::data.isGCOnly = false; + } else { + instance = new (std::nothrow) MarkSweepCollector(); + FastAllocData::data.isGCOnly = true; + } + if (UNLIKELY(instance == nullptr)) { + LOG(FATAL) << "Create Collector instance failed!" << maple::endl; + } + RegisteredCollectorTypeCheckAddrs::Instance().PostCollectorCreate(); +} + +void Collector::SwitchToGCOnFork() { + __MRT_ASSERT(instance != nullptr, "null collector"); + __MRT_ASSERT(instance->Type() == kNaiveRC, "not RC collector"); + // update all object's RC to overflow, avoid RC check + __MRT_ASSERT((static_cast(instance)->HasWeakRelease()) == false, "has weak field"); + // switch reference processor + ReferenceProcessor::SwitchToGCOnFork(); + // switch collector + __MRT_ASSERT(instance->IsSystem() == false, "switch to gc for SS"); + instance->Fini(); + delete instance; + instance = new (std::nothrow) MarkSweepCollector(); + FastAllocData::data.isGCOnly = true; + if (instance == nullptr) { + LOG(FATAL) << "create new collector fail" << maple::endl; + } + RegisteredCollectorTypeCheckAddrs::Instance().PostCollectorCreate(); + instance->SetIsSystem(false); + instance->InitAfterFork(); + MRT_SendBackgroundGcJob(true); + // switch mutator + __MRT_ASSERT(MutatorList::Instance().Size() == 1, "only main thread allowed"); + NaiveRCMutator &rcMutator = NRCMutator(); + Mutator *gcMutator = new (std::nothrow) MarkSweepMutator(*instance); + if (gcMutator == nullptr) { + LOG(FATAL) << "create new mutator fail" << maple::endl; + } + gcMutator->CopyStateOnFork(rcMutator); + gcMutator->InitAfterFork(); + // close rc mutator + MutatorList::Instance().RemoveMutator(rcMutator, [](Mutator *mut) { + if (mut == nullptr) { + LOG(FATAL) << "Wrong mutator pointer" << maple::endl; + } + mut->Fini(); + }); + delete &rcMutator; + // add new gc mutator + MutatorList::Instance().AddMutator(*gcMutator); + maple::tls::StoreTLS(gcMutator, maple::tls::kSlotMutator); + std::atomic_thread_fence(std::memory_order_release); +} + +void Collector::UpdateProcessState(ProcessState processStateVal, bool isSystemServer) { + // ensure the new state is different from the old state + if (processStateVal == processState) { + return; + } + if ((processStateVal == kProcessStateJankImperceptible) && !isSystemServer) { + LOG(INFO) << "UpdateProcessState to JankImperceptible triggers GC" << maple::endl; + MRT_SendBackgroundGcJob(false); + } + processState = processStateVal; +} + +const std::string &Collector::GetName() { + return kCollectorName[type]; +} + +void Collector::Init() { + ReferenceProcessor::Create(type); + stats::gcStats->OnCollectorInit(); +} + +void Mutator::InitTid() { + tid = static_cast(maple::GetTid()); + if (UNLIKELY(tid == 0)) { + LOG(FATAL) << "Mutator::InitTid(): invalid tid = 0" << maple::endl; + } +} + +void Mutator::DebugShow() const { + fprintf(stderr, "Mutator: %p", this); + fprintf(stderr, " tid = %" PRIu32, GetTid()); + fprintf(stderr, " active = %s", IsActive() ? "true" : "false"); + fprintf(stderr, " in_saferegion = %s", InSaferegion() ? "true" : "false"); + fprintf(stderr, " stack: %p to %p", reinterpret_cast(stackBegin), reinterpret_cast(stackEnd)); +} + +// Copy state: active, in safe region and stack begin +// Copy last java context +// Copy LocalValueArena and force current LocalValueArena skip desctructor +void Mutator::CopyStateOnFork(Mutator &orig) { + __MRT_ASSERT(orig.active == kMutatorDebugTrue, "Base not active"); + active = orig.active; + __MRT_ASSERT(orig.inSaferegion == kMutatorDebugTrue, "Base in saferegion"); + inSaferegion = orig.inSaferegion; + stackBegin = orig.stackBegin; + initialUnwindContext.CopyFrom(orig.initialUnwindContext); + arena = orig.arena; + orig.arena.Clear(); +} + +void Mutator::SatbWriteBarrier(address_t obj, const reffield_t &field) { + if (!concurrentMarking) { + return; + } + reffield_t ref = field; + if (UNLIKELY((ref & LOAD_INC_RC_MASK) != 0)) { + ref = Collector::Instance().RefFieldLoadBarrier(obj, field); + } + PushIntoSatbBuffer(ref); +} + +void Mutator::PushChildrenToSatbBuffer(address_t obj) { + if (!IS_HEAP_ADDR(obj)) { + return; + } + auto func = [&](const reffield_t &field, uint64_t) { + reffield_t ref = field; + if (UNLIKELY((ref & LOAD_INC_RC_MASK) != 0)) { + ref = Collector::Instance().RefFieldLoadBarrier(obj, field); + } + PushIntoSatbBuffer(ref); + }; + DoForEachRefField(obj, func); +} + +// satb buffer +SatbBuffer &SatbBuffer::Instance() { + static ImmortalWrapper instance; + return *instance; +} + +struct GCTibGCInfo MCC_GCTIB___EmptyObject = { .headerProto = 0, .nBitmapWords = 0 }; +struct GCTibGCInfo MCC_GCTIB___ArrayOfObject = { .headerProto = kHasChildRef | kArrayBit, .nBitmapWords = 0 }; +struct GCTibGCInfo MCC_GCTIB___ArrayOfPrimitive = { .headerProto = kArrayBit, .nBitmapWords = 0 }; +} // namespace maplert. diff --git a/src/mrt/compiler-rt/src/collector/collector_ms.cpp b/src/mrt/compiler-rt/src/collector/collector_ms.cpp new file mode 100644 index 0000000000..4bf993ff03 --- /dev/null +++ b/src/mrt/compiler-rt/src/collector/collector_ms.cpp @@ -0,0 +1,1762 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "collector/collector_ms.h" + +#include +#include +#include +#include +#include +#include "mm_config.h" +#include "address.h" +#include "chosen.h" +#include "collector/mrt_bitmap.h" +#include "collector/stats.h" +#include "yieldpoint.h" +#include "profile.h" +#include "collector/native_gc.h" +#include "mutator_list.h" +#include "collie.h" +#include "mrt_class_api.h" + +namespace maplert { +// number of nanoseconds in a microsecond. +constexpr uint64_t kNsPerUs = 1000; + +#if BT_CLEANUP_PROFILE +size_t BTCleanupStats::rootSetSize; +size_t BTCleanupStats::totalRemain; +size_t BTCleanupStats::reachableRemain; +size_t BTCleanupStats::unreachableRemain; +#endif + +// Small queue implementation, for prefetching. +#define MRT_MAX_PREFETCH_QUEUE_SIZE_LOG 5UL +#define MRT_MAX_PREFETCH_QUEUE_SIZE (1UL << MRT_MAX_PREFETCH_QUEUE_SIZE_LOG) +#if MRT_MAX_PREFETCH_QUEUE_SIZE <= MARK_PREFETCH_DISTANCE +#error Prefetch queue size must be strictly greater than prefetch distance. +#endif +class PrefetchQueue { + public: + explicit PrefetchQueue(size_t d) : elems {}, distance(d), tail(0), head(0) {} + ~PrefetchQueue() {} + inline void Add(address_t objaddr) { + size_t t = tail; + elems[t] = objaddr; + tail = (t + 1) & (MRT_MAX_PREFETCH_QUEUE_SIZE - 1UL); + + __builtin_prefetch(reinterpret_cast(objaddr - kJavaObjAlignment), 1, kPrefetchLocality); + __builtin_prefetch(reinterpret_cast(objaddr), 0, kPrefetchLocality); + } + + inline address_t Remove() { + size_t h = head; + address_t objaddr = elems[h]; + head = (h + 1) & (MRT_MAX_PREFETCH_QUEUE_SIZE - 1UL); + + return objaddr; + } + + inline size_t Length() const { + return (tail - head) & (MRT_MAX_PREFETCH_QUEUE_SIZE - 1UL); + } + + inline bool Empty() const { + return head == tail; + } + + inline bool Full() const { + return Length() == distance; + } + + private: + static constexpr int kPrefetchLocality = 3; + address_t elems[MRT_MAX_PREFETCH_QUEUE_SIZE]; + size_t distance; + size_t tail; + size_t head; +}; // End of small queue implementation + +class MarkTask : public MplTask { + public: + MarkTask(MarkSweepCollector &tc, + MplThreadPool *pool, + size_t workStackSize, + TracingCollector::WorkStack::iterator workStackData, + bool isFollowReferent) + : collector(tc), threadPool(pool), followReferent(isFollowReferent) { + workStack.reserve(workStackSize + kMaxMarkTaskSize); + workStack.insert(workStack.begin(), workStackData, workStackData + workStackSize); + } + + // single work task without thread pool + MarkTask(MarkSweepCollector &tc, TracingCollector::WorkStack &stack, bool isFollowReferent) + : collector(tc), threadPool(nullptr), followReferent(isFollowReferent) { + workStack.reserve(stack.size()); + workStack.insert(workStack.begin(), stack.begin(), stack.end()); + } + virtual ~MarkTask() { + threadPool = nullptr; + } + void Execute(size_t workerID __attribute__((unused))) override { + size_t nNewlyMarked = 0; + const size_t prefetchDistance = kMarkPrefetchDistance; + PrefetchQueue pq(prefetchDistance); + for (;;) { + // Prefetch as much as possible. + while (!pq.Full() && !workStack.empty()) { + address_t objaddr = workStack.back(); + pq.Add(objaddr); + workStack.pop_back(); + } + + // End if pq is empty. This implies that workStack is also empty. + if (pq.Empty()) { + break; + } + + address_t objaddr = pq.Remove(); + bool wasMarked = collector.MarkObject(objaddr); + if (!wasMarked) { + LOG2FILE(kLogTypeMix) << "Newly marked: 0x%" << objaddr << std::endl; + ++nNewlyMarked; + // If we mark before enqueing, we should have checked if it has children. + if (!HasChildRef(objaddr)) { + continue; + } + + if (LIKELY(!IsObjReference(objaddr)) || UNLIKELY(followReferent)) { + collector.EnqueueNeighbors(objaddr, workStack); + } else { + collector.EnqueueNeighborsForRef(objaddr, workStack); + } + + if (threadPool != nullptr && UNLIKELY(workStack.size() > kMaxMarkTaskSize)) { + // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task. + size_t newSize = workStack.size() >> 1; + threadPool->AddTask(new (std::nothrow) MarkTask(collector, + threadPool, + workStack.size() - newSize, + workStack.begin() + newSize, + followReferent)); + workStack.resize(newSize); + } + } else { + LOG2FILE(kLogTypeMix) << "Already marked: 0x" << objaddr << std::endl; + } + } // for loop + + // newly marked statistics. + (void)collector.newlyMarked.fetch_add(nNewlyMarked, std::memory_order_relaxed); + } + + private: + TracingCollector::WorkStack workStack; + MarkSweepCollector &collector; + MplThreadPool *threadPool; + bool followReferent; +}; + +class ConcurrentMarkTask : public MplTask { + public: + ConcurrentMarkTask(MarkSweepCollector &tc, + MplThreadPool *pool, + TracingCollector::WorkStack::iterator workStackStart, + size_t workStackSize) + : collector(tc), threadPool(pool) { + workStack.reserve(workStackSize + kMaxMarkTaskSize); + workStack.insert(workStack.begin(), workStackStart, workStackStart + workStackSize); + } + + // create concurrent mark task without thread pool. + ConcurrentMarkTask(MarkSweepCollector &tc, TracingCollector::WorkStack &&stack) + : collector(tc), threadPool(nullptr), workStack(std::move(stack)) {} + + virtual ~ConcurrentMarkTask() { + threadPool = nullptr; + } + + // when parallel is enabled, fork new task if work stack overflow. + inline void TryForkTask() { + if (threadPool != nullptr && UNLIKELY(workStack.size() > kMaxMarkTaskSize)) { + // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task. + size_t newSize = workStack.size() >> 1; + threadPool->AddTask(new (std::nothrow) ConcurrentMarkTask(collector, threadPool, workStack.begin() + newSize, + workStack.size() - newSize)); + workStack.resize(newSize); + } + } + + // Mark object. + // return true if success set object from unmarked to marked. + inline bool Mark(address_t obj) { + // try to mark object and load old mark state. + bool wasMarked = collector.MarkObject(obj); + // return false if object already marked. + if (UNLIKELY(wasMarked)) { + return false; + } + // make success. + ++newlyMarked; + return true; + } + + // run concurrent marking task. + void Execute(size_t) override { + // loop until work stack empty. + for (;;) { + if (workStack.empty()) { + break; + } + // get next object from work stack. + address_t objaddr = workStack.back(); + workStack.pop_back(); + + // skip dangling reference (such as: object already released). + if (UNLIKELY(!IsAllocatedByAllocator(objaddr))) { + // kAllocatedBit not set, means the object address is a dangling reference. + LOG(ERROR) << "Mark encounter dangling reference: " << objaddr << maple::endl; + continue; + } + + bool wasMarked = collector.MarkObject(objaddr); + if (!wasMarked) { + if (!HasChildRef(objaddr)) { + continue; + } + + collector.CopyChildRefs(objaddr, workStack); + } + // try to fork new task if need. + TryForkTask(); + } // end of mark loop. + // newly marked statistics. + (void)collector.newlyMarked.fetch_add(newlyMarked, std::memory_order_relaxed); + } + + private: + MarkSweepCollector &collector; + MplThreadPool *threadPool; + TracingCollector::WorkStack workStack; + size_t newlyMarked = 0; +}; + +#if MRT_TEST_CONCURRENT_MARK +struct RootInfo { + std::unordered_set staticRoots; + std::unordered_set extRoots; + std::unordered_set stringRoots; + std::unordered_set refRoots; + std::unordered_set allocatorRoots; + std::unordered_set classloaderRoots; + std::unordered_set stackRoots; + + std::string WhatRoot(address_t obj) const { + std::ostringstream oss; + if (staticRoots.count(obj) != 0) { + oss << ":static"; + } + if (extRoots.count(obj) != 0) { + oss << ":ext"; + } + if (stringRoots.count(obj) != 0) { + oss << ":string"; + } + if (refRoots.count(obj) != 0) { + oss << ":ref"; + } + if (allocatorRoots.count(obj) != 0) { + oss << ":alloc"; + } + if (classloaderRoots.count(obj) != 0) { + oss << ":cloader"; + } + if (stackRoots.count(obj) != 0) { + oss << ":stack"; + } + return oss.str(); + }; +}; + +static void GCLogPrintObjects(const char *title, const std::vector &objects, MrtBitmap &bitmap, + bool findOwner = false, const RootInfo *rootInfo = nullptr) { + constexpr size_t kMaxPrintCount = 10; + LOG2FILE(kLogtypeGc) << title << '\n'; + for (size_t i = 0; i < objects.size() && i < kMaxPrintCount; ++i) { + address_t obj = objects.at(i); + MClass *cls = reinterpret_cast(obj)->GetClass(); + const char *cname = (cls == nullptr) ? "" : cls->GetName(); + LOG2FILE(kLogtypeGc) << reinterpret_cast(obj) << + std::hex << + " rc:" << RefCountLVar(obj) << + " hd:" << GCHeader(obj) << + std::dec << + " mark:" << bitmap.IsObjectMarked(obj) << + " " << ((rootInfo != nullptr) ? rootInfo->WhatRoot(obj) : "") << + " " << cname << + '\n'; + if (findOwner) { + (*theAllocator).ForEachObj([obj, rootInfo, &bitmap](address_t owner) { + auto refFunc = [rootInfo, &bitmap, owner, obj](reffield_t &field, uint64_t kind) { + if ((static_cast(field) == obj) && (kind != kUnownedRefBits)) { + address_t fieldOffset = (address_t)(&field) - owner; + MClass *ownerCls = reinterpret_cast(owner)->GetClass(); + const char *ownerCname = (ownerCls == nullptr) ? "" : ownerCls->GetClass(); + LOG2FILE(kLogtypeGc) << (kind == kNormalRefBits ? " owner: " : " weak owner: ") << + reinterpret_cast(owner) << + std::hex << + " +0x" << fieldOffset << + " rc:" << RefCount(owner) << + " hd:" << GCHeader(owner) << + std::dec << + " mark:" << bitmap.IsObjectMarked(owner) << + " " << ((rootInfo != nullptr) ? rootInfo->WhatRoot(owner) : "") << + " " << ownerCname << + '\n'; + } + }; + ForEachRefField(owner, refFunc); + }); + } + } +} + +// we need some Bitmap debug functions when testing concurrent mark. +#if !(MRT_DEBUG_BITMAP) +#error "Please enable MRT_DEBUG_BITMAP in bitmap.h when MRT_TEST_CONCURRENT_MARK enabled." +#endif + +static void CompareBitmap(const MrtBitmap &bitmap1, + const MrtBitmap &bitmap2, + std::vector &unmarked1, + std::vector &unmarked2) { + const address_t heapStart = (*theAllocator).HeapLowerBound(); + + auto words1 = bitmap1.Data(); + auto words2 = bitmap2.Data(); + size_t nWords = bitmap1.Size() >> kLogBytesPerWord; + using WordType = decltype(*words1); + + // compare word by word. + for (size_t i = 0; i < nWords; ++i) { + WordType w1 = words1[i]; + WordType w2 = words2[i]; + + // continue if two words are equal. + if (w1 == w2) { + continue; + } + // compare bit by bit in the word. + for (size_t nBit = 0; nBit < kBitsPerWord; ++nBit) { + WordType mask = ((WordType)1 << nBit); + bool bit1 = ((w1 & mask) != 0); + bool bit2 = ((w2 & mask) != 0); + + // continue if two bits are equal. + if (bit1 == bit2) { + continue; + } + // calculate object address by bit position. + address_t obj = heapStart + ((i * kBitsPerWord + (kBitsPerWord - 1 - nBit)) << kLogObjAlignment); + + if (bit1) { + // bitmap1 marked, but bitmap2 unmarked. + unmarked2.push_back(obj); + } else { + // bitmap2 marked, but bitmap1 unmakred. + unmarked1.push_back(obj); + } + } + } +} +#endif // MRT_TEST_CONCURRENT_MARK + +void MarkSweepCollector::ParallelMark(WorkStack &workStack, bool followReferent) { + LOG2FILE(kLogTypeMix) << "Parallel mark work stack size: " << workStack.size() << std::endl; + + newlyMarked.store(0, std::memory_order_relaxed); + if (workStack.size() > kMaxMarkTaskSize) { + MplThreadPool *threadPool = GetThreadPool(); + __MRT_ASSERT(threadPool != nullptr, "null thread pool"); + const int32_t threadCount = GetThreadCount(false); + __MRT_ASSERT(threadCount > 1, "incorrect thread count"); + const size_t kChunkSize = std::min(workStack.size() / threadCount + 1, kMaxMarkTaskSize); + // Split the current work stack into work tasks. + auto end = workStack.end(); + for (auto it = workStack.begin(); it < end;) { + const size_t delta = std::min(static_cast(end - it), kChunkSize); + threadPool->AddTask(new (std::nothrow) MarkTask(*this, threadPool, delta, it, followReferent)); + it += delta; + } + workStack.clear(); + threadPool->SetMaxActiveThreadNum(threadCount - 1); + threadPool->Start(); + threadPool->WaitFinish(true); + + LOG2FILE(kLogtypeGc) << "Parallel Newly Marked " << newlyMarked.load(std::memory_order_relaxed) << + " objects in this phase.\n"; + } else { + // serial marking with a single mark task. + MarkTask markTask(*this, workStack, followReferent); + markTask.Execute(0); + } +} + +void MarkSweepCollector::AddMarkTask(RootSet &rs) { + if (rs.size() == 0) { + return; + } + + MplThreadPool *threadPool = GetThreadPool(); + const size_t kChunkSize = kMaxMarkTaskSize; + bool followReferent = false; + auto end = rs.end(); + for (auto it = rs.begin(); it < end;) { + const size_t delta = std::min(static_cast(end - it), kChunkSize); + threadPool->AddTask(new (std::nothrow) MarkTask(*this, threadPool, delta, it, followReferent)); + it += delta; + } + rs.clear(); +} + +void MarkSweepCollector::ParallelScanMark(RootSet *rootSets, bool processWeak, bool rootString) { + MplThreadPool *threadPool = GetThreadPool(); + const size_t kThreadCount = threadPool->GetMaxThreadNum() + 1; + + // task to scan external roots. + threadPool->AddTask(new (std::nothrow) MplLambdaTask([this, processWeak, rootSets](size_t workerID) { + ScanExternalRoots(rootSets[workerID], processWeak); + AddMarkTask(rootSets[workerID]); + })); + + // task to scan reference, allocator and classloader roots. + // those scan are very fast, so we combine them into one single task. + threadPool->AddTask(new (std::nothrow) MplLambdaTask([this, rootSets](size_t workerID) { + ScanReferenceRoots(rootSets[workerID]); + ScanAllocatorRoots(rootSets[workerID]); + ScanClassLoaderRoots(rootSets[workerID]); + AddMarkTask(rootSets[workerID]); + })); + // task to scan zterp static field roots. + threadPool->AddTask(new (std::nothrow) MplLambdaTask([this, rootSets](size_t workerID) { + ScanZterpStaticRoots(rootSets[workerID]); + AddMarkTask(rootSets[workerID]); + })); + // task to scan static field roots. + staticRootsTaskIndex.store(0); + for (size_t t = 0; t < kThreadCount; ++t) { + threadPool->AddTask(new (std::nothrow) MplLambdaTask([this, rootSets](size_t workerID) { + while (true) { + size_t old = staticRootsTaskIndex.fetch_add(1, std::memory_order_release); + address_t **list = nullptr; + size_t len = 0; + { + // Even in STW, *.so loading is in safeRegion, we also need to add a lock to avoid racing. + bool success = GCRegisteredRoots::Instance().GetRootsLocked(old, list, len); + if (!success) { + break; + } + for (size_t i = 0; i < len; ++i) { + MaybeAddRoot(LoadStaticRoot(list[i]), rootSets[workerID], true); + } + } + } + + AddMarkTask(rootSets[workerID]); + })); + } + + // task to scan stack roots. + stackTaskIndex.store(0); + for (size_t t = 0; t < kThreadCount; ++t) { + threadPool->AddTask(new (std::nothrow) MplLambdaTask([this, rootSets](size_t workerID) { + RootSet &rs = rootSets[workerID]; + auto &mutatorList = MutatorList::Instance().List(); + const size_t mutatorLen = mutatorList.size(); + + while (true) { + size_t old = stackTaskIndex.fetch_add(1, std::memory_order_release); + if (old >= mutatorLen) { + break; + } + auto it = mutatorList.begin(); + size_t nop = 0; + while (nop < old) { + ++it; + ++nop; + } + if (doConservativeStackScan) { + ScanStackRoots(**it, rs); + } else { + // for nearly-precise stack scanning + (*it)->VisitJavaStackRoots([&rs](address_t ref) { + // currently only scan & collect the local var + if (LIKELY((*theAllocator).AccurateIsValidObjAddr(ref))) { + rs.push_back(ref); + } + }); + } + // both conservtive and accurate scan, need scan scoped local refs + (*it)->VisitNativeStackRoots([&rs](address_t &ref) { + if (LIKELY((*theAllocator).AccurateIsValidObjAddr(ref))) { + rs.push_back(ref); + } + }); + } + + AddMarkTask(rootSets[workerID]); + })); + } + + // task to scan string roots. + if (rootString) { + threadPool->AddTask(new (std::nothrow) MplLambdaTask([this, rootSets](size_t workerID) { + ScanStringRoots(rootSets[workerID]); + AddMarkTask(rootSets[workerID]); + })); + } + + PostParallelScanMark(processWeak); +} + +void MarkSweepCollector::DoWeakGRT() { + size_t numIterated = 0; + size_t numCleared = 0; + + RefVisitor rootVisitor = [this, &numIterated, &numCleared](address_t &obj) { + ++numIterated; + if (InHeapBoundry(obj) && IsGarbage(obj)) { + __MRT_ASSERT(obj != DEADVALUE, "must be a valid obj"); + ++numCleared; + DecReferentUnsyncCheck(obj, true); + obj = DEADVALUE; + } + }; + + maple::GCRootsVisitor::VisitWeakGRT(rootVisitor); + + LOG2FILE(kLogtypeGc) << " iterated WeakGRT = " << numIterated << '\n'; + LOG2FILE(kLogtypeGc) << " cleared WeakGRT = " << numCleared << '\n'; +} + +void MarkSweepCollector::ConcurrentPrepareResurrection() { + const size_t vectorCapacity = 100; + resurrectCandidates.reserve(vectorCapacity); + function visitor = [this](address_t objAddr) { + if (UNLIKELY(IsUnmarkedResurrectable(objAddr))) { + resurrectCandidates.push_back(objAddr); + } + }; + + // an unsafe scan of the heap, only works because we are in concurrent mark + // this assumes no objs are actually released by the allocator + // assumes it won't crash when we scan any uninitialised memory (e.g., uninitialised obj) + // assumes all new objs are marked + // assumes the finalizable count always >= actual finalizable objs in page + // assumes all resurrected obj during cm must not unset enqueue flag + (*theAllocator).ForEachObjUnsafe(visitor, OnlyVisit::kVisitFinalizable); +} + +void MarkSweepCollector::ConcurrentMarkFinalizer() { + WorkStack workStack = resurrectCandidates; + + size_t markedObjs = 0; + // loop until work stack and prefetch queue empty. + for (;;) { + if (workStack.empty()) { + break; + } + + // get next object from prefetch queue. + address_t objAddr = workStack.back(); + workStack.pop_back(); + + // skip dangling reference (such as: object already released). + if (UNLIKELY(!IsAllocatedByAllocator(objAddr))) { + // kAllocatedBit not set, means the object address is a dangling reference. + LOG(ERROR) << "Mark encounter dangling reference: " << objAddr << maple::endl; + continue; + } + + // skip if the object already marked. + if (markBitmap.IsObjectMarked(objAddr) || finalBitmap.IsObjectMarked(objAddr)) { + continue; + } + + // if object has no child refs. + if (!HasChildRef(objAddr)) { + ++markedObjs; + // mark object and skip child refs. + (void)MarkObjectForFinalizer(objAddr); + continue; + } + + // handle child refs. + // remember work stack size before child refs added, + // so that we can discard newly added refs if needed. + const size_t kOldWorkStackSize = workStack.size(); + + // check dirty bit. + bool dirty = IsDirty(objAddr); + if (LIKELY(!dirty)) { + // if object was not modified, + // try to copy object child refs into work stack. + CopyChildRefs(objAddr, workStack, true); + std::atomic_thread_fence(std::memory_order_acq_rel); + dirty = IsDirty(objAddr); + } + + if (UNLIKELY(dirty)) { + // discard copied child refs in work stack. + workStack.resize(kOldWorkStackSize); + } else { + ++markedObjs; + (void)MarkObjectForFinalizer(objAddr); + } + } // end of mark loop. + LOG2FILE(kLogtypeGc) << "\tmarked objects for finalizer: " << markedObjs << "\n"; +} + +void MarkSweepCollector::ConcurrentAddFinalizerToRP() { + size_t nResurrected = 0; + WorkStack deadFinalizers; + deadFinalizers.reserve(resurrectCandidates.size()); + for (auto addr : resurrectCandidates) { + if (IsGarbageBeforeResurrection(addr) && IsObjResurrectable(addr)) { + ++nResurrected; + deadFinalizers.push_back(addr); + } + } + ReferenceProcessor::Instance().AddFinalizables( + deadFinalizers.data(), static_cast(deadFinalizers.size()), true); + LOG2FILE(kLogtypeGc) << nResurrected << " objects resurrected in " << resurrectCandidates.size() << " candidates.\n"; +} + +void MarkSweepCollector::DoResurrection(WorkStack&) { + if (Type() == kNaiveRCMarkSweep) { + size_t nResurrected = 0; + for (auto addr : resurrectCandidates) { + if (IsUnmarkedResurrectable(addr)) { + ++nResurrected; + ReferenceProcessor::Instance().AddFinalizable(addr, false); + } + } + LOG2FILE(kLogtypeGc) << nResurrected << " objects resurrected in " << resurrectCandidates.size() << " candidates\n"; + } else { + // gc need discover Reference + for (auto ref : finalizerFindReferences) { + GCReferenceProcessor::Instance().DiscoverReference(ref); + } + } + useFinalBitmap = true; +} + +void MarkSweepCollector::ResurrectionCleanup() { + WorkStack().swap(resurrectCandidates); + WorkStack().swap(finalizerFindReferences); +} + +void MarkSweepCollector::ParallelResurrection(WorkStack &workStack) { + if (VLOG_IS_ON(dumpgarbage)) { + DumpFinalizeGarbage(); + } + // WARNING: Cannot use vector on the outer level. We don't know how many + // threads the thread pool has. Lists never invalidate references to its + // elements when adding new elements, while vectors may need to be + // re-allocated when resizing, causing all references to existing elements to + // be invalidated. + list> finalizablesFromEachTask; + // The factory is called when creating each task. + Allocator::VisitorFactory visitorFactory = [&finalizablesFromEachTask]() { + // NOTE: No locking here, because (1) tasks are created sequentially, and + // (2) even if new tasks can be created when old tasks are running, + // emplace_back on std::list adds new nodes, but does not modify or + // reallocate existing **elements**. + finalizablesFromEachTask.emplace_back(); + vector &myFinalizables = finalizablesFromEachTask.back(); + // NOTE: my_finalizables is an L-value of an element of the + // finalizablesFromEachTask vector. Each returned lambda function + // captures a different element of the finalizablesFromEachTask by + // reference (capturing the address), therefore different tasks (and + // threads) do not share any vectors. + return [&myFinalizables](address_t objaddr) { + if (IsUnmarkedResurrectable(objaddr)) { + myFinalizables.push_back(objaddr); // No need for locking because each task uses its own vector + } + }; + }; + if (UNLIKELY(!(*theAllocator).ParallelForEachObj(*GetThreadPool(), visitorFactory, + OnlyVisit::kVisitFinalizable))) { + LOG(ERROR) << "(*theAllocator).ParallelForEachObj() in TracingCollector::ParallelResurrection() return false."; + } + GCLog().Stream() << "Number of parallel tasks: " << finalizablesFromEachTask.size() << std::endl; + + size_t numResurrected = 0; + for (auto &finalizables : finalizablesFromEachTask) { + for (auto objaddr : finalizables) { +#if __MRT_DEBUG + if (!IsUnmarkedResurrectable(objaddr)) { + LOG(FATAL) << "Attempted to resurrect non-resurrectable object. " << + reinterpret_cast(reinterpret_cast(objaddr))->GetClass()->GetName() << maple::endl; + } +#endif + // Add to the finalization queue instead of freeing the object + // NOTE: __MRT_addFinalizableObj internally holds a mutex before adding + // the object to the queue, so it may be faster to create a batch version, + // such as __MRT_addManyFinalizableObj. But tests show that the current + // single-threaded performance is good enough for now because the mutex is + // never contended. + ReferenceProcessor::Instance().AddFinalizable(objaddr, true); + { + ++numResurrected; + + // Put it into the tracing work stack so that we continue marking from it. + Enqueue(objaddr, workStack); + } + } + } + + LOG2FILE(kLogtypeGc) << numResurrected << " objects resurrected.\n"; +} + +void MarkSweepCollector::EnqueueNeighbors(address_t objAddr, WorkStack &workStack) { + auto refFunc = [this, &workStack, objAddr](reffield_t &field, uint64_t kind) { + address_t ref = RefFieldToAddress(field); + if ((kind != kUnownedRefBits) && InHeapBoundry(ref)) { + LOG2FILE(kLogTypeMix) << "enqueueing neighbor: 0x" << std::hex << ref << std::endl; + // This might hurt cache + if (UNLIKELY(!IsAllocatedByAllocator(ref))) { + LOG(ERROR) << "EnqueueNeighbors Adding released object into work queue " << std::hex << + ref << " " << RCHeader(ref) << " " << GCHeader(ref) << + " " << reinterpret_cast(ref)->GetClass()->GetName() << + objAddr << " " << RCHeader(objAddr) << " " << GCHeader(objAddr) << + " " << reinterpret_cast(objAddr)->GetClass()->GetName() << + std::dec << maple::endl; + HandleRCError(ref); + } + Enqueue(ref, workStack); + } + }; + ForEachRefField(objAddr, refFunc); +} + +void MarkSweepCollector::EnqueueNeighborsForRef(address_t objAddr, WorkStack &workStack) { + address_t referentOffset = WellKnown::kReferenceReferentOffset; + MClass *klass = reinterpret_cast(objAddr)->GetClass(); + uint32_t classFlag = klass->GetFlag(); + if ((classFlag & modifier::kClassSoftReference) && !ReferenceProcessor::Instance().ShouldClearReferent(gcReason)) { + referentOffset = 0; + } + auto refFunc = [this, &workStack, objAddr, referentOffset](reffield_t &field, uint64_t kind) { + address_t ref = RefFieldToAddress(field); + if ((kind != kUnownedRefBits) && InHeapBoundry(ref)) { + LOG2FILE(kLogTypeMix) << " enqueueing neighbor for ref: 0x" << std::hex << ref << std::endl; + if (static_cast(reinterpret_cast(&field) - objAddr) != + static_cast(referentOffset)) { + if (UNLIKELY(!IsAllocatedByAllocator(ref))) { + LOG(ERROR) << "EnqueueNeighbors Adding released object into work queue " << std::hex << + ref << " " << RCHeader(ref) << " " << GCHeader(ref) << + " " << reinterpret_cast(ref)->GetClass()->GetName() << + objAddr << " " << RCHeader(objAddr) << " " << GCHeader(objAddr) << + " " << reinterpret_cast(objAddr)->GetClass()->GetName() << + std::dec << maple::endl; + HandleRCError(ref); + } + Enqueue(ref, workStack); + } else if (Type() != kNaiveRCMarkSweep) { + // visit referent and running with GC + if (ref != 0 && ReferenceGetPendingNext(objAddr) == 0 && IsGarbage(ref)) { + GCReferenceProcessor::Instance().DiscoverReference(objAddr); + } + } + } + }; + ForEachRefField(objAddr, refFunc); +} + +#if MRT_TEST_CONCURRENT_MARK +static MrtBitmap snapshotBitmap; +#endif + +void MarkSweepCollector::ConcurrentMSPreSTW1() { + SatbBuffer::Instance().Init(&markBitmap); +} + +void MarkSweepCollector::ConcurrentMarkPrepare() { +#if MRT_TEST_CONCURRENT_MARK + // To test the correctness of concurrent mark, we run a marking + // in stop-the-world-1 stage before concurrent mark started, after + // concurrent mark finished we compare their mark bitmaps to find + // out objects which are not correctlly marked. + { + LOG2FILE(kLogtypeGc) << "TestConcurrentMark: snapshot mark...\n"; + MRT_PHASE_TIMER("TestConcurrentMark: snapshot mark"); + WorkStack workStack; + ParallelScanRoots(workStack, true, false); + ParallelMark(workStack); + + // save bitmap to snapshotBitmap. + snapshotBitmap.CopyBitmap(markBitmap); + markBitmap.ResetBitmap(); + } +#endif + + // reset statistic. + newlyMarked.store(0, std::memory_order_relaxed); + newObjDuringMarking.store(0, std::memory_order_relaxed); + freedObjDuringMarking.store(0, std::memory_order_relaxed); + renewObjDuringMarking.store(0, std::memory_order_relaxed); + + // set flag to indicate that concurrent marking is started. + SetConcurrentMarkRunning(true); + for (Mutator *mutator : MutatorList::Instance().List()) { + mutator->SetConcurrentMarking(true); + } +} + +void MarkSweepCollector::ScanStackAndMark(Mutator &mutator) { + TracingCollector::WorkStack workStack; + if (doConservativeStackScan) { + mutator.VisitStackSlotsContent( + [&workStack](address_t ref) { + // most of stack slots are not java heap address. + if (UNLIKELY((*theAllocator).AccurateIsValidObjAddrConcurrent(ref))) { + workStack.push_back(ref); + } + } + ); + mutator.VisitJavaStackRoots([&workStack](address_t ref) { + // currently only scan & collect the local var + if (LIKELY((*theAllocator).AccurateIsValidObjAddrConcurrent(ref))) { + workStack.push_back(ref); + } + }); + } else { + mutator.VisitJavaStackRoots([&workStack](address_t ref) { + // currently only scan & collect the local var + if (LIKELY((*theAllocator).AccurateIsValidObjAddrConcurrent(ref))) { + workStack.push_back(ref); + } + }); + } + + // add a mark task + if (workStack.size() > 0) { + MplThreadPool *threadPool = GetThreadPool(); + threadPool->AddTask(new (std::nothrow) ConcurrentMarkTask(*this, threadPool, workStack.begin(), workStack.size())); + } + + // the stack is scanned, dec needScanMutators + if (numMutatorToBeScan.fetch_sub(1, std::memory_order_relaxed) == 1) { + // notify gc thread + concurrentPhaseBarrier.notify_all(); + } +} + +void MarkSweepCollector::StackScanBarrierInMutator() { + // check if need to scan + Mutator &mutator = TLMutator(); + if (mutator.TrySetScanState(true)) { + // help gc thread to finish + // scan the stack of the mutator + ScanStackAndMark(mutator); + mutator.FinishStackScan(false); + + std::lock_guard guard(snapshotMutex); + snapshotMutators.erase(&mutator); + } +} + +void MarkSweepCollector::ConcurrentStackScan() { + while (true) { + Mutator *mutator = nullptr; + bool needScan = false; + { + std::lock_guard guard(snapshotMutex); + if (snapshotMutators.size() != 0) { + mutator = *(snapshotMutators.begin()); + snapshotMutators.erase(mutator); + } else { + return; + } + needScan = mutator->TrySetScanState(false); + } + if (needScan) { + // scan the stack of the mutator + ScanStackAndMark(*mutator); + mutator->FinishStackScan(true); + } + } +} + +void MarkSweepCollector::ScanSingleStaticRoot(address_t *rootAddr, TracingCollector::WorkStack &workStack) { + const reffield_t ref = LoadStaticRoot(rootAddr); + // most of static fields are null or non-heap objects (e.g. literal strings). + if (UNLIKELY(InHeapBoundry(ref))) { + address_t value = RefFieldToAddress(ref); + if (LIKELY(!IsObjectMarked(value))) { + workStack.push_back(ref); + } + } +} + +void MarkSweepCollector::ConcurrentStaticRootsScan(bool parallel) { + TracingCollector::WorkStack workStack; + + // scan static field roots. + address_t **staticList; + size_t staticListLen; + size_t index = 0; + + while (GCRegisteredRoots::Instance().GetRootsLocked(index++, staticList, staticListLen)) { + for (size_t i = 0; i < staticListLen; ++i) { + ScanSingleStaticRoot(staticList[i], workStack); + } + } + // zterp static field roots + address_t spaceAddr = (*zterpStaticRootAllocator).GetStartAddr(); + for (size_t i = 0; i < (*zterpStaticRootAllocator).GetObjNum(); ++i) { + ScanSingleStaticRoot(*(reinterpret_cast(spaceAddr)), workStack); + spaceAddr += ZterpStaticRootAllocator::singleObjSize; + } + // add a concurrent mark task. + MplThreadPool *threadPool = GetThreadPool(); + if (parallel && threadPool != nullptr) { + AddConcurrentMarkTask(workStack); + } else { + // serial marking with a single mark task. + ConcurrentMarkTask markTask(*this, std::move(workStack)); + markTask.Execute(0); + } +} + +void MarkSweepCollector::ConcurrentMark(WorkStack &workStack, bool parallel, bool scanRoot) { + __MRT_ASSERT(IsConcurrentMarkRunning(), "running flag not set"); + + // enable parallel marking if we have thread pool. + MplThreadPool *threadPool = GetThreadPool(); + __MRT_ASSERT(threadPool != nullptr, "thread pool is null"); + if (parallel) { + // parallel marking. + AddConcurrentMarkTask(workStack); + workStack.clear(); + threadPool->Start(); + + if (scanRoot) { + // precise stack scan + ConcurrentStackScan(); + + // concurrent mark static roots. + ConcurrentStaticRootsScan(parallel); + } + + threadPool->WaitFinish(true); + } else { + if (scanRoot) { + // scan stack roots and add into task queue + ConcurrentStackScan(); + // mark static roots. + ConcurrentStaticRootsScan(false); + } + // serial marking with a single mark task. + ConcurrentMarkTask markTask(*this, std::move(workStack)); + markTask.Execute(0); + threadPool->DrainTaskQueue(); // drain stack roots task + } + + // wait if the mutator is scanning the stack + if (numMutatorToBeScan.load(std::memory_order_relaxed) != 0) { + std::unique_lock lk(snapshotMutex); + concurrentPhaseBarrier.wait(lk, [this]{ + return numMutatorToBeScan.load(std::memory_order_relaxed) == 0; + }); + } +} + +void MarkSweepCollector::ConcurrentReMark(bool parallel) { + constexpr int kReMarkRounds = 2; + for (int i = 0; i < kReMarkRounds; ++i) { + // find out unmarked dirty objects. + WorkStack remarkStack; + { + MRT_PHASE_TIMER("Get re-mark stack"); + SatbBuffer::Instance().GetRetiredObjects(remarkStack); + } + + MplThreadPool *threadPool = GetThreadPool(); + if (remarkStack.empty() && (threadPool->GetTaskNumber() == 0)) { + // stop re-mark if work stack is empty. + return; + } + + LOG2FILE(kLogtypeGc) << " re-mark stack: " << Pretty(remarkStack.size()) << '\n'; + + // run re-mark if remarkStack not empty. + { + MRT_PHASE_TIMER("Run re-mark"); + ConcurrentMark(remarkStack, parallel && (remarkStack.size() > kMaxMarkTaskSize), false); + } + } +} + +// should be called in the beginning of stop-the-world-2. +void MarkSweepCollector::ConcurrentMarkCleanup() { + __MRT_ASSERT(IsConcurrentMarkRunning(), "running flag not set"); + + // statistics. + const size_t markObjects = newlyMarked.load(std::memory_order_relaxed); + const size_t newObjects = newObjDuringMarking.load(std::memory_order_relaxed); + const size_t freeObjects = freedObjDuringMarking.load(std::memory_order_relaxed); + const size_t renewObjects = renewObjDuringMarking.load(std::memory_order_relaxed); + + LOG2FILE(kLogtypeGc) << " mark " << Pretty(markObjects) << '\n' << + " new " << Pretty(newObjects) << '\n' << + " free " << Pretty(freeObjects) << '\n' << + " renew " << Pretty(renewObjects) << '\n'; + +#if MRT_TEST_CONCURRENT_MARK + // compare concurrent mark bitmap with snapshot bitmap. + std::vector stwUnmarked; + std::vector cmUnmarked; + CompareBitmap(snapshotBitmap, markBitmap, stwUnmarked, cmUnmarked); + + LOG2FILE(kLogtypeGc) << "TestConcurrentMark:\n" << + " snapshot unmarked: " << Pretty(stwUnmarked.size()) << '\n' << + " concurrent unmarked: " << Pretty(cmUnmarked.size()) << '\n'; + + if (cmUnmarked.size() > 0) { + GCLogPrintObjects("=== concurrent unmarked ===", cmUnmarked, markBitmap, true); + LOG2FILE(kLogtypeGc) << "TestConcurrentMark failed!" << std::endl; // flush gclog. + LOG(FATAL) << "TestConcurrentMark failed! found unmarked alives: " << cmUnmarked.size() << maple::endl; + } + + stwUnmarked.clear(); + cmUnmarked.clear(); +#endif + + // find out unmarked dirty objects. + WorkStack remarkStack; + { + MRT_PHASE_TIMER("Find re-mark objects"); + SatbBuffer::Instance().GetRetiredObjects(remarkStack); + auto func = [&](Mutator *mutator) { + const SatbBuffer::Node *node = mutator->GetSatbBufferNode(); + mutator->SetConcurrentMarking(false); + if (node != nullptr) { + node->GetObjects(remarkStack); + mutator->ResetSatbBufferNode(); + } + }; + MutatorList::Instance().VisitMutators(func); + } + + LOG2FILE(kLogtypeGc) << " stw re-mark stack: " << Pretty(remarkStack.size()) << '\n'; + +#if MRT_TEST_CONCURRENT_MARK + // print remark stack if test is enabled. + if (remarkStack.size() > 0) { + GCLogPrintObjects("=== Need Re-mark ===", remarkStack, markBitmap); + } +#endif + + // start re-mark if needed, re-mark is marking on heap snapshot. + // we are more likely have an empty remark stack because of concurrent re-marking. + MplThreadPool *threadPool = GetThreadPool(); + if (UNLIKELY((remarkStack.size() > 0) || (threadPool->GetTaskNumber() > 0))) { + MRT_PHASE_TIMER("Re-mark"); + ConcurrentMark(remarkStack, (remarkStack.size() > kMaxMarkTaskSize) || (threadPool->GetTaskNumber() > 0), false); + } + + // set flag to indicate that concurrent marking is done. + SetConcurrentMarkRunning(false); + + // update heap bound again in the beginning of STW-2 stage. + InitTracing(); + +#if MRT_TEST_CONCURRENT_MARK + // To test the correctness of concurrent mark, we run a marking again + // after concurrent mark finished in stop-the-world-2 stage, and then + // compare their mark bitmaps to find out objects which are not + // correctlly marked. + LOG2FILE(kLogtypeGc) << "TestConcurrentMark...\n"; + MRT_PHASE_TIMER("TestConcurrentMark"); + + // before we start another marking, make a copy of current mark bitmap, + // and then reset current mark bitmap. + MrtBitmap bitmap; + bitmap.CopyBitmap(markBitmap); + markBitmap.ResetBitmap(); + + // scan roots and marking. + { + MRT_PHASE_TIMER("TestConcurrentMark: scan roots & mark"); + WorkStack workStack; + ParallelScanRoots(workStack, true, false); + ParallelMark(workStack); + } + + // compare two bitmaps to find out error. + CompareBitmap(markBitmap, bitmap, stwUnmarked, cmUnmarked); + + // We treat objects which are marked by concurrent marking, but + // not marked by STW marking, and not released as floating garbages. + size_t nFloatingGarbages = 0; + for (address_t obj : stwUnmarked) { + if (!HasReleasedBit(obj)) { + ++nFloatingGarbages; + } + } + + LOG2FILE(kLogtypeGc) << "TestConcurrentMark:\n" << + " stw unmarked: " << stwUnmarked.size() << '\n' << + " floating garbages: " << nFloatingGarbages << '\n' << + " unmarked live objects: " << cmUnmarked.size() << '\n'; + + if (cmUnmarked.size() > 0) { + // scan root info. + RootInfo rootInfo; + std::vector rs; + + ScanStaticFieldRoots(rs); + rootInfo.staticRoots.insert(rs.begin(), rs.end()); + rs.clear(); + + ScanExternalRoots(rs, true); + rootInfo.extRoots.insert(rs.begin(), rs.end()); + rs.clear(); + + ScanStringRoots(rs); + rootInfo.stringRoots.insert(rs.begin(), rs.end()); + rs.clear(); + + ScanReferenceRoots(rs); + rootInfo.refRoots.insert(rs.begin(), rs.end()); + rs.clear(); + + ScanAllocatorRoots(rs); + rootInfo.allocatorRoots.insert(rs.begin(), rs.end()); + rs.clear(); + + ScanClassLoaderRoots(rs); + rootInfo.classloaderRoots.insert(rs.begin(), rs.end()); + rs.clear(); + + ScanAllStacks(rs); + rootInfo.stackRoots.insert(rs.begin(), rs.end()); + rs.clear(); + + GCLogPrintObjects("=== unmarked live objects ===", cmUnmarked, markBitmap, true, &rootInfo); + LOG2FILE(kLogtypeGc) << "WARNING: check unmarked objects!" << std::endl; // flush gclog. + + // we set rc to 0 for unmarked objects, so that 'inc/dec from 0' occurs if mutator access them. + for (address_t obj : cmUnmarked) { + RefCountLVal(obj) = 0; + } + } + + LOG2FILE(kLogtypeGc) << "TestConcurrentMark done." << std::endl; // flush gclog. + + // restore bitmap after test. + markBitmap.CopyBitmap(bitmap); +#endif // MRT_TEST_CONCURRENT_MARK +} + +void MarkSweepCollector::ConcurrentMSPostSTW2() { + SatbBuffer::Instance().Reset(); +} + +void MarkSweepCollector::RunFullCollection(uint64_t gcIndex) { + // prevent other threads stop-the-world during GC. + ScopedLockStopTheWorld lockStopTheWorld; + + PreMSHook(); + + // Run mark-and-sweep gc. + RunMarkAndSweep(gcIndex); + + PostMSHook(gcIndex); +} + +void MarkSweepCollector::RunMarkAndSweep(uint64_t gcIndex) { + // prepare thread pool. + MplThreadPool *threadPool = GetThreadPool(); + const int32_t threadCount = GetThreadCount(false); + __MRT_ASSERT(threadCount > 1, "unexpected thread count"); + threadPool->SetPriority(maple::kGCThreadStwPriority); + threadPool->SetMaxActiveThreadNum(threadCount); + + const uint64_t gcStartNs = timeutils::NanoSeconds(); + const GCReason reason = gcReason; + const bool concurrent = IsConcurrent(reason); + LOG(INFO) << "[GC] Start " << reasonCfgs[reason].name << " gcIndex= " << gcIndex << maple::endl; + + stats::gcStats->BeginGCRecord(); + stats::gcStats->CurrentGCRecord().reason = reason; + stats::gcStats->CurrentGCRecord().async = reasonCfgs[reason].IsNonBlockingGC(); + stats::gcStats->CurrentGCRecord().isConcurrentMark = concurrent; + + // run mark & sweep. + if (LIKELY(concurrent)) { + ConcurrentMarkAndSweep(); + } else { + ParallelMarkAndSweep(); + } + + // releaes mark bitmap memory after GC. + ResetBitmap(); + + // it's the end of boot phase or it's a user_ni GC + GCReleaseSoType releaseSoType = reasonCfgs[reason].ShouldReleaseSo(); + if (releaseSoType != kReleaseNone) { + MRT_PHASE_TIMER("ReleaseBootMemory"); + MRT_ClearMetaProfile(); + LinkerAPI::Instance().ClearAllMplFuncProfile(); + LOG(INFO) << "Force GC finished" << maple::endl; + // release boot-phase maple*.so memory + LinkerAPI::Instance().ReleaseBootPhaseMemory(false, (releaseSoType == kReleaseAppSo) ? IsSystem() : true); + } + + // release pages in PagePool + PagePool::Instance().Trim(); + + // update NativeGCStats after gc finished. + NativeGCStats::Instance().OnGcFinished(); + + // total GC time. + uint64_t gcTimeNs = timeutils::NanoSeconds() - gcStartNs; + stats::gcStats->CurrentGCRecord().totalGcTime = gcTimeNs; + LOG2FILE(kLogtypeGc) << "Total GC time: " << Pretty(gcTimeNs / kNsPerUs) << "us" << "\n"; + + // trigger reference processor after GC finished. + ReferenceProcessor::Instance().Notify(true); +} + +void MarkSweepCollector::PreMSHook() { + // Notify that GC has started. We need to set the gc_running_ flag here + // because it is a guarantee that when TriggerGCAsync returns, the caller sees GC running. + SetGcRunning(true); +} + +void MarkSweepCollector::PreSweepHook() { +} + +void MarkSweepCollector::PostMSHook(uint64_t gcIndex) { + if (UNLIKELY(VLOG_IS_ON(allocatorfragmentlog))) { + if (IsConcurrent(gcReason)) { + ScopedStopTheWorld stopTheWorld; + std::stringstream ss; + (*theAllocator).PrintPageFragment(ss, "AfterGC"); + LOG2FILE(kLogTypeAllocFrag) << ss.str() << std::flush; + } + } + + GCReason reason = gcReason; + SetGcRunning(false); + NotifyGCFinished(gcIndex); + + // some jobs can be done after NotifyGCFinished(), if they don't block the waiting threads + { + MRT_PHASE_TIMER("Post-MS hook: release free pages"); + bool aggressive = reasonCfgs[reason].ShouldTrimHeap(); + // release the physical memory of free pages + if (!(*theAllocator).ReleaseFreePages(aggressive)) { + LOG(INFO) << "no page released"; + } + } + + // commit gc statistics. + stats::gcStats->CommitGCRecord(); + + // flush gclog. + GCLog().OnGCEnd(); +} + +// stop the world parallel mark & sweep. +void MarkSweepCollector::ParallelMarkAndSweep() { + ScopedStopTheWorld stw; + const uint64_t stwStartNs = timeutils::NanoSeconds(); + PrepareTracing(); + ParallelMarkPhase(); + ResurrectionPhase(false); + ParallelSweepPhase(); + FinishTracing(); + DumpAfterGC(); + const uint64_t stwTimeNs = timeutils::NanoSeconds() - stwStartNs; + stats::gcStats->CurrentGCRecord().stw1Time = stwTimeNs; + stats::gcStats->CurrentGCRecord().stw2Time = 0; + TheAllocMutator::gcIndex++; + LOG2FILE(kLogtypeGc) << "Stop-the-world time: " << Pretty(stwTimeNs / kNsPerUs) << "us\n"; +} + +// concurrent mark & sweep. +void MarkSweepCollector::ConcurrentMarkAndSweep() { + ConcurrentMSPreSTW1(); + WorkStack workStack = NewWorkStack(); + WorkStack inaccurateRoots = NewWorkStack(); + { + ScopedStopTheWorld stw1; + const uint64_t stw1StartNs = timeutils::NanoSeconds(); + PrepareTracing(); + ConcurrentMarkPreparePhase(workStack, inaccurateRoots); + const uint64_t stw1TimeNs = timeutils::NanoSeconds() - stw1StartNs; + stats::gcStats->CurrentGCRecord().stw1Time = stw1TimeNs; + LOG2FILE(kLogtypeGc) << "Stop-the-world-1 time: " << Pretty(stw1TimeNs / kNsPerUs) << "us\n"; + } + ConcurrentMarkPhase(std::move(workStack), std::move(inaccurateRoots)); + { + ScopedStopTheWorld stw2; + const uint64_t stw2StartNs = timeutils::NanoSeconds(); + ConcurrentMarkCleanupPhase(); + ResurrectionPhase(true); + ConcurrentSweepPreparePhase(); + FinishTracing(); + const uint64_t stw2TimeNs = timeutils::NanoSeconds() - stw2StartNs; + stats::gcStats->CurrentGCRecord().stw2Time = stw2TimeNs; + TheAllocMutator::gcIndex++; + LOG2FILE(kLogtypeGc) << "Stop-the-world-2 time: " << Pretty(stw2TimeNs / kNsPerUs) << "us\n"; + } + ConcurrentMSPostSTW2(); + ConcurrentSweepPhase(); +} + +void MarkSweepCollector::PrepareTracing() { + GCLog().OnGCStart(); + LOG2FILE(kLogtypeGc) << "GCReason: " << reasonCfgs[gcReason].name << '\n'; + + if (VLOG_IS_ON(opengclog)) { + (*theAllocator).DumpContention(GCLog().Stream()); + } + + if (VLOG_IS_ON(allocatorfragmentlog)) { + std::stringstream ss; + (*theAllocator).PrintPageFragment(ss, "BeforeGC"); + LOG2FILE(kLogTypeAllocFrag) << ss.str() << std::flush; + } + + if (VLOG_IS_ON(dumpheapbeforegc)) { + DumpHeap("before_gc"); + } + +#if RC_HOT_OBJECT_DATA_COLLECT + DumpHotObj(); +#endif + + InitTracing(); +} + +void MarkSweepCollector::FinishTracing() { + EndTracing(); + + // Call the registerd callback before starting the world. + { + MRT_PHASE_TIMER("Calling GC-finish callback"); + GCFinishCallBack(); + } +} + +void MarkSweepCollector::DumpAfterGC() { + if (VLOG_IS_ON(dumpheapaftergc)) { + DumpHeap("after_gc"); + } + + if (VLOG_IS_ON(allocatorfragmentlog)) { + std::stringstream ss; + (*theAllocator).PrintPageFragment(ss, "AfterGC"); + LOG2FILE(kLogTypeAllocFrag) << ss.str() << std::flush; + } +} + +void MarkSweepCollector::ParallelMarkPhase() { + MplThreadPool *threadPool = GetThreadPool(); + const size_t threadCount = threadPool->GetMaxThreadNum() + 1; + clockid_t cid[threadCount]; + struct timespec workerStart[threadCount]; + struct timespec workerEnd[threadCount]; + uint64_t workerCpuTime[threadCount]; +#ifdef __ANDROID__ + // qemu does not support sys_call: sched_getcpu() + // do not support profile executing cpu of workers + const bool profileSchedCore = true; +#else + const bool profileSchedCore = false; +#endif + // record executing cpu of workers in each sampling point + std::vector schedCores[threadCount]; + + if (GCLog().IsWriteToFile(kLogTypeMix)) { + // debug functionality: set sched_core and cputime + size_t index = 1; + for (auto worker: threadPool->GetThreads()) { + pthread_t thread = worker->GetThread(); + worker->schedCores = profileSchedCore ? &schedCores[index] : nullptr; + pthread_getcpuclockid(thread, &cid[index]); + clock_gettime(cid[index], &workerStart[index]); + ++index; + } + pthread_getcpuclockid(pthread_self(), &cid[0]); + clock_gettime(cid[0], &workerStart[0]); + } + + { + MRT_PHASE_TIMER("Parallel Scan Roots & Mark"); + // prepare + threadPool->Start(); + RootSet rootSets[threadCount]; + + ParallelScanMark(rootSets, true, false); + + threadPool->WaitFinish(true, profileSchedCore ? &schedCores[0] : nullptr); + } + + if (GCLog().IsWriteToFile(kLogTypeMix)) { + // debug functionality: print cputime & sched core + for (size_t i = 0; i < threadCount; ++i) { + clock_gettime(cid[i], &workerEnd[i]); + workerCpuTime[i] = static_cast(workerEnd[i].tv_sec - workerStart[i].tv_sec) * + maple::kSecondToNanosecond + static_cast((workerEnd[i].tv_nsec - workerStart[i].tv_nsec)); + } + int cpus[threadCount][threadCount]; + errno_t ret = memset_s(cpus, sizeof(cpus), 0, sizeof(cpus)); + if (UNLIKELY(ret != EOK)) { + LOG(ERROR) << "memset_s(cpus, sizeof(cpus), 0, sizeof(cpus)) in ParallelMarkPhase return " << + ret << " rather than 0." << maple::endl; + } + for (size_t i = 0; i < threadCount; ++i) { + for (auto num: schedCores[i]) { + cpus[i][num % static_cast(threadCount)] += 1; + } + } + + for (size_t i = 0; i < threadCount; ++i) { + LOG2FILE(kLogtypeGc) << "worker " << i << " cputime:" << workerCpuTime[i] << ",\t"; + for (size_t j = 0; j < threadCount; ++j) { + LOG2FILE(kLogtypeGc) << cpus[i][j] << "\t"; + } + LOG2FILE(kLogtypeGc) << '\n'; + } + } +} + +void MarkSweepCollector::ResurrectionPhase(bool isConcurrent) { + if (Type() == kNaiveRCMarkSweep) { + MRT_PHASE_TIMER("Reference: Release Queue"); +#if __MRT_DEBUG + MrtVisitReferenceRoots([this](address_t obj) { + // As stack scan is conservative, it is poissble that released object on stack and marked + if (!(InHeapBoundry(obj) && (IsGarbage(obj) || IsRCCollectable(obj)))) { + LOG(FATAL) << "Live object in release queue. " << + std::hex << obj << std::dec << + " IsObjectMarked=" << IsObjectMarked(obj) << + " refCount=" << RefCount(obj) << + " weakRefCount=" << WeakRefCount(obj) << + " resurrect weakRefCount=" << ResurrectWeakRefCount(obj) << + (IsWeakCollected(obj) ? " weak collected " : " not weak collected ") << + reinterpret_cast(obj)->GetClass()->GetName() << maple::endl; + return; + } + }, RPMask(kRPReleaseQueue)); +#endif + RCReferenceProcessor::Instance().ClearAsyncReleaseObjs(); + } + + // Handle Soft/Weak Reference. + if (UNLIKELY(VLOG_IS_ON(dumpgarbage))) { + DumpWeakSoft(); + } + + if (Type() == kNaiveRCMarkSweep) { + if (LIKELY(isConcurrent)) { + MRT_PHASE_TIMER("Soft/Weak Refinement"); + ReferenceRefinement(RPMask(kRPSoftRef) | RPMask(kRPWeakRef)); + } else { + MRT_PHASE_TIMER("Parallel Reference: Soft/Weak"); + if (Type() == kNaiveRCMarkSweep) { + ParallelDoReference(RPMask(kRPSoftRef) | RPMask(kRPWeakRef)); + } + } + } else { + MRT_PHASE_TIMER("Soft/Weak Discover Processing"); + GCReferenceProcessor::Instance().ProcessDiscoveredReference(RPMask(kRPSoftRef) | RPMask(kRPWeakRef)); + } + + // Finalizable resurrection phase. + WorkStack workStack = NewWorkStack(); + { + MRT_PHASE_TIMER("Parallel Resurrection"); + if (isConcurrent) { + DoResurrection(workStack); + } else { + ParallelResurrection(workStack); + } + } + + LOG2FILE(kLogtypeGc) << "Mark 2:\n" << " workStack size = " << workStack.size() << '\n'; + if (!isConcurrent) { + MRT_PHASE_TIMER("Parallel Mark 2"); + ParallelMark(workStack); + } + + if (UNLIKELY(VLOG_IS_ON(dumpgarbage))) { + DumpCleaner(); + } + + // Handle Cleaner. + if (Type() == kNaiveRCMarkSweep) { + if (LIKELY(isConcurrent)) { + MRT_PHASE_TIMER("Cleaner Refinement"); + ReferenceRefinement(RPMask(kRPPhantomRef)); + } else { + MRT_PHASE_TIMER("Parallel Reference: Cleaner"); + ParallelDoReference(RPMask(kRPPhantomRef)); + } + } else { + MRT_PHASE_TIMER("Cleaner: Discover Processing"); + uint32_t rpFlag = RPMask(kRPSoftRef) | RPMask(kRPWeakRef) | RPMask(kRPPhantomRef); + GCReferenceProcessor::Instance().ProcessDiscoveredReference(rpFlag); + } + + { + MRT_PHASE_TIMER("Reference: WeakGRT"); + DoWeakGRT(); + } +} + +void MarkSweepCollector::ConcurrentMarkPreparePhase(WorkStack& workStack, WorkStack& inaccurateRoots) { + { + // use fast root scan for concurrent marking. + MRT_PHASE_TIMER("Fast Root scan"); + FastScanRoots(workStack, inaccurateRoots, true, false); + } + // prepare for concurrent marking. + ConcurrentMarkPrepare(); +} + +void MarkSweepCollector::ConcurrentMarkPhase(WorkStack&& workStack, WorkStack&& inaccurateRoots) { + // prepare root set before mark, filter out inaccurate roots. + { + MRT_PHASE_TIMER("Prepare root set"); + const size_t accurateSize = workStack.size(); + const size_t inaccurateSize = inaccurateRoots.size(); + PrepareRootSet(workStack, std::move(inaccurateRoots)); + LOG2FILE(kLogtypeGc) << "accurate: " << accurateSize << + " inaccurate: " << inaccurateSize << + " final roots: " << workStack.size() << + '\n'; + } + + // BT statistic for root size. + BTCleanupStats::rootSetSize = workStack.size(); + MplThreadPool *threadPool = GetThreadPool(); + __MRT_ASSERT(threadPool != nullptr, "null thread pool"); + + // use fewer threads and lower priority for concurrent mark. + const int32_t stwWorkers = threadPool->GetMaxActiveThreadNum(); + const int32_t maxWorkers = GetThreadCount(true) - 1; + if (maxWorkers > 0) { + threadPool->SetMaxActiveThreadNum(maxWorkers); + threadPool->SetPriority(maple::kGCThreadConcurrentPriority); + } + MRT_SetThreadPriority(maple::GetTid(), maple::kGCThreadConcurrentPriority); + LOG2FILE(kLogtypeGc) << "Concurrent mark with " << (maxWorkers + 1) << " threads" << + ", workStack: " << workStack.size() << '\n'; + + // run concurrent marking. + { + MRT_PHASE_TIMER("Concurrent marking"); + ConcurrentMark(workStack, maxWorkers > 0, true); + } + + // concurrent do references + if (Type() == kNaiveRCMarkSweep) { + InitReferenceWorkSet(); + { + MRT_PHASE_TIMER("Concurrent Do Reference: Soft/Weak"); + ConcurrentDoReference(RPMask(kRPSoftRef) | RPMask(kRPWeakRef)); + } + { + MRT_PHASE_TIMER("Concurrent Do Reference: Cleaner"); + ConcurrentDoReference(RPMask(kRPPhantomRef)); + } + } else { + MRT_PHASE_TIMER("Concurrent Do Reference"); + GCReferenceProcessor::Instance().ConcurrentProcessDisovered(); + } + + { + MRT_PHASE_TIMER("Concurrent prepare resurrection"); + ConcurrentPrepareResurrection(); + } + + { + MRT_PHASE_TIMER("Concurrent mark finalizer"); + ConcurrentMarkFinalizer(); + } + + // run concurrent re-marking. + { + MRT_PHASE_TIMER("Concurrent re-marking"); + ConcurrentReMark(maxWorkers > 0); + } + + // restore thread pool max workers and priority after concurrent marking. + if (maxWorkers > 0) { + threadPool->SetMaxActiveThreadNum(stwWorkers); + threadPool->SetPriority(maple::kGCThreadStwPriority); + } + MRT_SetThreadPriority(maple::GetTid(), maple::kGCThreadStwPriority); +} + +// concurrent marking clean-up, run in STW2. +void MarkSweepCollector::ConcurrentMarkCleanupPhase() { + MRT_PHASE_TIMER("Concurrent marking clean-up"); + ConcurrentMarkCleanup(); +} + +void MarkSweepCollector::ConcurrentSweepPreparePhase() { + if (VLOG_IS_ON(dumpgarbage)) { + DumpGarbage(); + } + PreSweepHook(); + { + MRT_PHASE_TIMER("Sweep: prepare concurrent for StringTable"); + StringPrepareConcurrentSweeping(); + } + + { + MRT_PHASE_TIMER("Sweep: prepare concurrent sweep"); + (*theAllocator).PrepareConcurrentSweep(); + } +} + +void MarkSweepCollector::ConcurrentSweepPhase() { + // reduce the number of thread in concurrent stage. + MplThreadPool *threadPool = GetThreadPool(); + const int32_t maxWorkers = GetThreadCount(true) - 1; + if (maxWorkers > 0) { + threadPool->SetMaxActiveThreadNum(maxWorkers); + threadPool->SetPriority(maple::kGCThreadConcurrentPriority); + } + MRT_SetThreadPriority(maple::GetTid(), maple::kGCThreadConcurrentPriority); + + if (Type() != kNaiveRCMarkSweep) { + // concurrent sweep only sweep unmarked objects in both bitmap. + // it's safe to concurrent add finalizer to reference processor. + MRT_PHASE_TIMER("Concurrent Add Finalizer"); + ConcurrentAddFinalizerToRP(); + } + + { + MRT_PHASE_TIMER("Resurrection cleanup"); + ResurrectionCleanup(); + } + + { + MRT_PHASE_TIMER("Concurrent sweep"); + (*theAllocator).ConcurrentSweep((maxWorkers > 0) ? threadPool : nullptr); + } + + { + MRT_PHASE_TIMER("Concurrent sweep StringTable"); + size_t deadStrings = ConcurrentSweepDeadStrings((maxWorkers > 0) ? threadPool : nullptr); + LOG2FILE(kLogtypeGc) << " Dead strings in StringTable: " << deadStrings << '\n'; + } + if (Type() == kNaiveRCMarkSweep) { + ResetReferenceWorkSet(); + } + MRT_SetThreadPriority(maple::GetTid(), maple::kGCThreadPriority); +} + +void MarkSweepCollector::ParallelSweepPhase() { + MRT_PHASE_TIMER("Parallel Sweep"); + + size_t oldLiveBytes = (*theAllocator).AllocatedMemory(); + size_t oldLiveObjBytes = (*theAllocator).RequestedMemory(); + size_t oldTotalObjects = (*theAllocator).AllocatedObjs(); + + if (VLOG_IS_ON(dumpgarbage)) { + DumpGarbage(); + } + + PreSweepHook(); + + size_t deadStrings; + { + MRT_PHASE_TIMER("Sweep: Removing String from StringTable"); + deadStrings = RemoveDeadStringFromPool(); + } + + MRT_PHASE_TIMER("Parallel Sweep: enumerating and sweeping"); + auto sweeper = [this](address_t addr) { + return CheckAndPrepareSweep(addr); + }; + +#if CONFIG_JSAN + auto checkAndSweep = [&sweeper](address_t addr) { + if (sweeper(addr)) { + (*theAllocator).FreeObj(addr); + } + }; + (*theAllocator).ParallelForEachObj(*GetThreadPool(), [&checkAndSweep] { + return checkAndSweep; + }, OnlyVisit::kVisitAll); +#else + if (UNLIKELY(!(*theAllocator).ParallelFreeAllIf(*GetThreadPool(), sweeper))) { + LOG(ERROR) << "(*theAllocator).ParallelFreeAllIf() in ParallelSweepPhase() return false." << maple::endl; + } +#endif + + size_t newLiveBytes = (*theAllocator).AllocatedMemory(); + size_t newLiveObjBytes = (*theAllocator).RequestedMemory(); + size_t newTotalObjects = (*theAllocator).AllocatedObjs(); + + size_t totalObjBytesCollected = oldLiveObjBytes - newLiveObjBytes; + size_t totalBytesCollected = oldLiveBytes - newLiveBytes; + size_t totalBytesSurvived = newLiveBytes; + size_t totalGarbages = oldTotalObjects - newTotalObjects; + stats::gcStats->CurrentGCRecord().objectsCollected = totalGarbages; + stats::gcStats->CurrentGCRecord().bytesCollected = totalBytesCollected; + stats::gcStats->CurrentGCRecord().bytesSurvived = totalBytesSurvived; + + LOG2FILE(kLogtypeGc) << "End of parallel sweeping.\n" << + " Total objects: " << oldTotalObjects << '\n' << + " Live objects: " << newTotalObjects << '\n' << + " Total garbages: " << totalGarbages << '\n' << + " Object Survived(bytes) " << Pretty(newLiveObjBytes) << + " Object Collected(bytes) " << Pretty(totalObjBytesCollected) << '\n' << + " Total Survived(bytes) " << Pretty(totalBytesSurvived) << + " Total Collected(bytes) " << Pretty(totalBytesCollected) << '\n' << + " Dead strings in StringTable: " << deadStrings << '\n'; +} + +void MarkSweepCollector::Fini() { + TracingCollector::Fini(); +} + +void MarkSweepCollector::AddConcurrentMarkTask(RootSet &rs) { + if (rs.size() == 0) { + return; + } + MplThreadPool *threadPool = GetThreadPool(); + size_t threadCount = threadPool->GetMaxActiveThreadNum() + 1; + const size_t kChunkSize = std::min(rs.size() / threadCount + 1, kMaxMarkTaskSize); + // Split the current work stack into work tasks. + auto end = rs.end(); + for (auto it = rs.begin(); it < end;) { + const size_t delta = std::min(static_cast(end - it), kChunkSize); + threadPool->AddTask(new (std::nothrow) ConcurrentMarkTask(*this, threadPool, it, delta)); + it += delta; + } +} +} // namespace maplert diff --git a/src/mrt/compiler-rt/src/collector/collector_naiverc.cpp b/src/mrt/compiler-rt/src/collector/collector_naiverc.cpp new file mode 100644 index 0000000000..66e3402a78 --- /dev/null +++ b/src/mrt/compiler-rt/src/collector/collector_naiverc.cpp @@ -0,0 +1,1168 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "collector/collector_naiverc.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mm_config.h" +#include "mm_utils.h" +#include "chosen.h" +#include "yieldpoint.h" +#include "mutator_list.h" +#include "collector/stats.h" +#include "mrt_array.h" +#if CONFIG_JSAN +#include "jsan.h" +#endif + +#ifdef __ANDROID__ +#include "collie.h" +#endif + +namespace maplert { +using namespace std; +namespace { +const int kMaxReleaseCount = 2000; +constexpr uint32_t kMaxWeakRelaseDepth = 20; +} +// Perfrom DecRef on strong/weak/resurrect weak rc. +// Before update RC, try match cycle pattern. +// +// Check Cycle before atomic update, avoid data racing. +// If other thread dec and release obj while current thread still matching cycle. +// It might have unexpected errors and obj already released. +// Current implemenation might cause some leak in rare case, for example +// Two thread perform dec at same time and see same oldRC and oldRC bigger +// than cycle pattern threshold. +template +static inline uint32_t AtomicDecRef(address_t obj, uint32_t &releaseState) { + static_assert(strongRCDelta + weakRCDelta + resurrectWeakRCDelta == -1, "only one with -1"); + static_assert(strongRCDelta * weakRCDelta * resurrectWeakRCDelta == 0, "only one with -1"); + + uint32_t oldHeader = RCHeader(obj); + bool released = false; + if (weakRCDelta == 0) { + // cycle pattern check condition: after dec, strong RC > 0 and all weak are zero + uint32_t oldRC = GetRCFromRCHeader(oldHeader); + if (oldRC > (-strongRCDelta)) { + if (GetResurrectWeakRCFromRCHeader(oldHeader) == (-resurrectWeakRCDelta)) { + uint32_t rcFlags = GCHeader(obj); + if (IsValidForCyclePatterMatch(rcFlags, oldRC + strongRCDelta)) { + released = CycleCollector::TryFreeCycleAtMutator(obj, -strongRCDelta, false); + } + } + } + } + if (!released) { + oldHeader = AtomicDecRCAndCheckRelease(obj, releaseState); + } else { + releaseState = kNotRelease; + } + return oldHeader; +} + +template +static inline void IncRefInline(address_t obj) { + static_assert(strongRCDelta + weakRCDelta + resurrectWeakRCDelta == 1, "only one with 1"); + static_assert(strongRCDelta * weakRCDelta * resurrectWeakRCDelta == 0, "only one with 1"); + if (UNLIKELY(!IS_HEAP_ADDR(obj))) { + return; + } + + uint32_t oldHeader = AtomicUpdateRC(obj); + uint32_t oldRC = GetRCFromRCHeader(oldHeader); + // check if inc from 0. + if (oldRC == 0) { + HandleRCError(obj); + } + +#if RC_TRACE_OBJECT + TraceRefRC(obj, RefCount(obj), "IncRefInline at"); +#endif +} + +#if RC_HOT_OBJECT_DATA_COLLECT +const uint32_t kRCOperationCountThreshold = 1000000; +static uint64_t hotReleseObjCount = 0; +static uint64_t noRCReleseObjCount = 0; +static uint64_t totalReleseObjCount = 0; +static uint64_t hotReleseRCOpCount = 0; +static uint64_t totalReleaseRCOpCount = 0; +std::atomic totalSkipedRCOpCount(0); +std::atomic totalRCOperationCount(0); + +void StatsFreeObject(address_t obj) { + uint32_t count = RCOperationCount(obj); + ++totalReleseObjCount; + totalReleaseRCOpCount += count; + if (count > kRCOperationCountThreshold) { + ++hotReleseObjCount; + hotReleseRCOpCount += count; + } else if (count == 0) { + ++noRCReleseObjCount; + } +} + +static void DumpObjectBT(address_t obj, std::ostream &ofs) { + // print object info + MObject *mObject = reinterpret_cast(obj); + MClass *classInfo = mObject->GetClass(); + ofs << "[obj]" << std::dec << " " << GCCount(obj) << " " << RCOperationCount(obj) << " " << RefCount(obj) << + " " << GetObjectDWordSize(*mObject) << std::hex << " " << obj << + " " << classInfo->GetName() << std::endl; + + // print traced class new obj callstack + void *pc[kTrackFrameNum] = { 0 }; + int trackCount = kTrackFrameNum; + for (size_t i = 0; i < kTrackFrameNum; ++i) { + // get track PCs from object header + pc[i] = reinterpret_cast(*reinterpret_cast(obj + kOffsetTrackPC + kDWordBytes * i)); + if (pc[i] == 0) { + --trackCount; + } + } + if (trackCount == 0) { + // all PC is zero, not track class + return; + } + + for (size_t i = 0; i < kTrackFrameNum; ++i) { + if (pc[i] == 0) { + continue; + } + util::PrintPCSymbolToLog(pc[i], ofs, false); + } +} + +void DumpHotObj() { + pid_t pid = getpid(); +#ifdef __ANDROID__ + std::string dirName = util::GetLogDir(); + std::string filename = dirName + "/hot_obj_dump_" + std::to_string(pid) + "_" + + timeutils::GetDigitDate() + ".txt"; +#else + std::string filename = "./hot_obj_dump_" + std::to_string(pid) + "_" + + timeutils::GetDigitDate() + ".txt"; +#endif + std::ofstream ofs (filename, std::ofstream::out); + uint64_t noRCObjCount = 0; + uint64_t hotObjCount = 0; + uint64_t totalObjCount = 0; + + uint64_t hotRCOpCount = 0; + uint64_t totalRCOpCount = 0; + auto func = [&ofs, &noRCObjCount, &hotObjCount, &totalObjCount, &hotRCOpCount, &totalRCOpCount](address_t obj) { + // track object gc count + StatsGCCount(obj); + // print hot object backtrace + uint32_t count = RCOperationCount(obj); + if (count > kRCOperationCountThreshold) { + DumpObjectBT(obj, ofs); + ++hotObjCount; + hotRCOpCount += count; + } else if (count == 0) { + ++noRCObjCount; + } + ++totalObjCount; + totalRCOpCount += count; + } + bool tmpResult = (*theAllocator).ForEachObj(func); + // all retain objet total info + ofs << "[rc total retain]" << std::dec << + " totalObjCount " << totalObjCount << + " hotObjCount " << hotObjCount << + " noRCObjCount " << noRCObjCount << + " totalRCOpCount " << totalRCOpCount << + " hotRCOpCount " << hotRCOpCount << std::endl; + + // all release objet total info + ofs << "[rc total release]" << std::dec << + " totalReleseObjCount " << totalReleseObjCount << + " hotReleseObjCount " << hotReleseObjCount << + " noRCReleseObjCount " << noRCReleseObjCount << + " totalReleaseRCOpCount " << totalReleaseRCOpCount << + " hotReleseRCOpCount " << hotReleseRCOpCount << std::endl; + + // all objet total rc info + ofs << "[rc total ]" << std::dec << + " totalRCOperationCount " << totalRCOperationCount << + " totalSkipedRCOpCount " << totalSkipedRCOpCount << std::endl; + + if (UNLIKELY(!tmpResult)) { + LOG(ERROR) << "ForEachObj() in NaiveRCCollector::DumpHotObj() return false." << maple::endl; + } +} +#endif // RC_HOT_OBJECT_DATA_COLLECT + +// Free Object. +static inline void FreeObject(address_t obj) { + StatsFreeObject(obj); + // when concurrent mark is running, we are not directly free the object, + // instead we set flags on it, this is done by PreFreeObject(), + // which will return true if concurrent mark is running. + NaiveRCMutator &mutator = NRCMutator(); + if (LIKELY(!mutator.PreFreeObject(obj))) { + // directly free the object if concurrent mark not running. + (*theAllocator).FreeObj(obj); + } +} + +// Dec ref count for children before release the object. +static inline void ReleaseObjectWithChildren(address_t obj, std::deque &releaseQueue, + address_t firstobj __MRT_UNUSED, const MClass *firstObjCls __MRT_UNUSED, + uint32_t &releaseCount) { + auto refFunc = [&](reffield_t &field, uint64_t kind) { + address_t child = RefFieldToAddress(field); + if (UNLIKELY((!IS_HEAP_ADDR(child)) || child == obj || kind == kUnownedRefBits)) { + return; + } + + // dec ref. + uint32_t oldHeader; + uint32_t releaseState = kNotRelease; + if (kind == kNormalRefBits) { + oldHeader = AtomicDecRef<-1, 0, 0>(child, releaseState); + } else { + oldHeader = AtomicDecRef<0, -1, 0>(child, releaseState); + } + +#if RC_TRACE_OBJECT + TraceRefRC(child, RefCount(child), "Dec at Release Object, referer from"); +#endif + if (releaseState == kReleaseObject) { + // check and handle finalizable object before release it. + if (RCReferenceProcessor::Instance().CheckAndAddFinalizable(child)) { + return; + } + + // check HashChildRef bit. + if (HasChildRef(child)) { + // push object to release_queue if it has children to handle. + releaseQueue.push_back(child); + } else { + // directly release object if it has no children. + ++releaseCount; + FreeObject(child); + } + } else if (releaseState == kCollectedWeak) { + if (!RCReferenceProcessor::Instance().CheckAndAddFinalizable(child)) { + NRCMutator().WeakReleaseObj(child); + NRCMutator().DecWeak(child); + } + } else if (((kind == kNormalRefBits) && IsInvalidDec<-1, 0, 0>(oldHeader)) || + ((kind == kWeakRefBits) && IsInvalidDec<0, -1, 0>(oldHeader))) { +#if __MRT_DEBUG +#if CONFIG_JSAN + JsanUseAfterFreeDeath(child); +#endif + LOG(ERROR) << "Dec from 0, Invalid " << std::hex << child << + " " << RCHeader(child) << " " << oldHeader << " " << GCHeader(child) << + " parent: " << obj << " " << MObject::Cast(obj)->GetClass()->GetName() << + " firstobj: " << firstobj << " " << firstObjCls->GetName() << + " kind " << kind << + std::dec << maple::endl; + HandleRCError(child); +#endif + } + }; + DoForEachRefField(obj, refFunc); + // release object. + ++releaseCount; + FreeObject(obj); +} + +// Release Object that ref count became zero. +static inline void ReleaseObject(address_t obj, std::deque &releaseQueue) { +#if LOG_ALLOC_TIMESTAT + TheAllocMutator &mut = TLAllocMutator(); + mut.StartTimer(); +#endif + // check and handle finalizable object before release it. + if (RCReferenceProcessor::Instance().CheckAndAddFinalizable(obj)) { + return; + } + // simply release the object if it has no children. + if (!HasChildRef(obj)) { + // release object. + FreeObject(obj); + return; + } + address_t firstobj = obj; + MClass *firstObjCls = MObject::Cast(firstobj)->GetClass(); + address_t curObj = obj; + uint32_t releaseCount = 0; + +#if LOG_ALLOC_TIMESTAT + // Do not timestat individual FreeObjs when doing + // timestat on release obj with children to keep the + // overhead from skewing the result. + mut.SuspendFreeObjTimeStat(); +#endif + + // loop until all children handled. + for (;;) { + // dec ref children and then release the object. + ReleaseObjectWithChildren(curObj, releaseQueue, firstobj, firstObjCls, releaseCount); + + // exit loop when release_queue is empty. + if (releaseQueue.empty()) { + break; + } + +#if CONFIG_JSAN + if ((releaseCount > kMaxReleaseCount) && (!ReferenceProcessor::Instance().IsCurrentRPThread())) { + LOG(INFO) << "Skip Aysnc release for JSAN" << maple::endl; + } +#else + // async release lefted objects + if ((releaseCount > kMaxReleaseCount) && (!ReferenceProcessor::Instance().IsCurrentRPThread())) { + for (auto it = releaseQueue.begin(); it != releaseQueue.end(); ++it) { + RCReferenceProcessor::Instance().AddAsyncReleaseObj(*it, false); + } + releaseQueue.clear(); + break; + } +#endif + + // take an object from release queue. + // the object must be: valid heap object && rc == 0 && kHasChildRef == 1. + curObj = releaseQueue.front(); + releaseQueue.pop_front(); + } + std::deque().swap(releaseQueue); +#if LOG_ALLOC_TIMESTAT + mut.StopTimer(kTimeReleaseObj); + mut.ResumeFreeObjTimeStat(); +#endif +} + +template +static inline void DecRefInline(address_t obj, std::deque &releaseQueue) { + static_assert(strongRCDelta + weakRCDelta + resurrectWeakRCDelta == -1, "only one with -1"); + static_assert(strongRCDelta * weakRCDelta * resurrectWeakRCDelta == 0, "only one with -1"); + if (UNLIKELY(!IS_HEAP_OBJ(obj))) { + return; + } + uint32_t releaseState = kNotRelease; + uint32_t oldHeader __MRT_UNUSED = AtomicDecRef(obj, releaseState); + + if (releaseState == kReleaseObject) { + ReleaseObject(obj, releaseQueue); + } else if (releaseState == kCollectedWeak) { + if (!RCReferenceProcessor::Instance().CheckAndAddFinalizable(obj)) { + NRCMutator().WeakReleaseObj(obj); + NRCMutator().DecWeak(obj); + } + } +#if __MRT_DEBUG + else if (IsInvalidDec(oldHeader)) { + LOG(ERROR) << "DecRefInline fail " << std::hex << obj << + " " << RCHeader(obj) << " " << oldHeader << " " << GCHeader(obj) << + (strongRCDelta != 0 ? " dec strong" : (weakRCDelta != 0 ? " dec weak" : " dec resurect weak")) << + std::dec << maple::endl; + HandleRCError(obj); + } +#endif +#if RC_TRACE_OBJECT + TraceRefRC(obj, RefCount(obj), "After DecRefInline at"); +#endif +} + +// Mutator implementation +void NaiveRCMutator::IncWeak(address_t obj) { + IncRefInline<0, 1, 0>(obj); +} + +void NaiveRCMutator::IncResurrectWeak(address_t obj) { + IncRefInline<0, 0, 1>(obj); +} + +void NaiveRCMutator::DecWeak(address_t obj) { + DecRefInline<0, -1, 0>(obj, *releaseQueue); +} + +void NaiveRCMutator::IncRef(address_t obj) { + IncRefInline<1, 0, 0>(obj); +} + +void NaiveRCMutator::DecRef(address_t obj) { + DecRefInline<-1, 0, 0>(obj, *releaseQueue); +} + +bool NaiveRCMutator::EnterWeakRelease() { + __MRT_ASSERT(weakReleaseDepth <= kMaxWeakRelaseDepth, "Invalid status"); + if (weakReleaseDepth == kMaxWeakRelaseDepth) { + LOG2FILE(kLogtypeGc) << "EnterWeakRelease return false" << std::endl; + return false; + } + ++weakReleaseDepth; + return true; +} + +void NaiveRCMutator::ExitWeakRelease() { + --weakReleaseDepth; + __MRT_ASSERT(weakReleaseDepth <= kMaxWeakRelaseDepth, "Invalid status"); +} + +// Weak release: release object external reference (dec and clear slot) +// Invoked when weak collected bit is set and object is not finalizable +void NaiveRCMutator::WeakReleaseObj(address_t obj) { +#if LOG_ALLOC_TIMESTAT + TheAllocMutator &mut = TLAllocMutator(); + mut.StartTimer(); +#endif + __MRT_ASSERT(IsWeakCollected(obj), "obj is not weak collected"); + __MRT_ASSERT(!IsEnqueuedObjFinalizable(obj) && !IsObjFinalizable(obj), "obj is finalizable"); + if (UNLIKELY(collector.IsZygote())) { + if (collector.HasWeakRelease() == false) { + collector.SetHasWeakRelease(true); + } + } + + if (!HasChildRef(obj)) { + return; + } + + NaiveRCMutator &mutator = NRCMutator(); + if (UNLIKELY(!mutator.EnterWeakRelease())) { + return; + } + +#if LOG_ALLOC_TIMESTAT + // Do not timestat individual FreeObjs when doing + // timestat on release obj with children to keep the + // overhead from skewing the result. + mut.SuspendFreeObjTimeStat(); +#endif + auto refFunc = [&, this](reffield_t& field, uint64_t kind) { + address_t child = RefFieldToAddress(field); + SatbWriteBarrier(child); + field = 0; + if (child == obj || kind == kUnownedRefBits) { + return; + } + if (kind == kNormalRefBits) { + DecRefInline<-1, 0, 0>(child, *releaseQueue); + } else { + DecRefInline<0, -1, 0>(child, *releaseQueue); + } + }; + DoForEachRefField(obj, refFunc); + mutator.ExitWeakRelease(); + +#if LOG_ALLOC_TIMESTAT + mut.StopTimer(kTimeReleaseObj); + mut.ResumeFreeObjTimeStat(); +#endif +} + +void NaiveRCMutator::ReleaseObj(address_t obj) { + ReleaseObject(obj, *releaseQueue); +} + +#if !STRICT_NAIVE_RC +static inline bool SpinIncRef(address_t obj, uint32_t &yieldCount) { + if (TryAtomicIncStrongRC(obj) == false) { + int tmpResult = sched_yield(); + if (UNLIKELY(tmpResult != 0)) { + LOG(INFO) << "sched_yield() fail " << tmpResult << maple::endl; + } + ++yieldCount; +#if CONFIG_JSAN + if (yieldCount == 1) { + LOG(ERROR) << "[JSAN] spin found condition" << maple::endl; + JsanUseAfterFreeDeath(obj); + } +#endif + if ((yieldCount & 0x7f) == 0) { +#if CONFIG_JSAN + JsanUseAfterFreeDeath(obj); +#endif + LOG(ERROR) << "LoadIncRef hang exceed limit " << std::hex << obj << " gcheader " << GCHeader(obj) << maple::endl; + HandleRCError(obj); + } + return false; + } +#if RC_TRACE_OBJECT + TraceRefRC(obj, RefCount(obj), "Inc at LoadIncRef"); +#endif + return true; +} +#endif + +address_t NaiveRCMutator::LoadIncRefCommon(address_t *fieldAddr) { +#if RC_PROFILE + ++RCCollector::numLoadIncRef; +#endif +#if !STRICT_NAIVE_RC + uint32_t yieldCount = 0; +#endif + do { + address_t obj = LoadRefField(fieldAddr); + if (obj == 0) { + return obj; + } else if ((obj & LOAD_INC_RC_MASK) == LOAD_INC_RC_MASK) { + return LoadRefVolatile(fieldAddr, false); + } else if (!IS_HEAP_ADDR(obj)) { + return obj; + } +#if STRICT_NAIVE_RC + IncRefInline<1, 0, 0>(obj); + return obj; +#else + if (SpinIncRef(obj, yieldCount)) { + return obj; + } +#endif + } while (true); +} + +address_t NaiveRCMutator::LoadIncRef(address_t *fieldAddr) { +#if RC_PROFILE + ++RCCollector::numLoadIncRef; +#endif + +#if STRICT_NAIVE_RC + address_t obj = LoadRefField(fieldAddr); + IncRefInline<1, 0, 0>(obj); + return obj; +#else + // if yield exceed limit, force enter yield point to sync memory + // If still fail, then its a real early release + uint32_t yieldCount = 0; + do { + address_t obj = LoadRefField(fieldAddr); + if (UNLIKELY((obj & LOAD_INC_RC_MASK) == LOAD_INC_RC_MASK)) { + return LoadRefVolatile(fieldAddr, false); + } else if (!IS_HEAP_ADDR(obj)) { + return obj; + } + if (SpinIncRef(obj, yieldCount)) { + return obj; + } + } while (true); +#endif +} + +// load thread need lock this slot, by compare and swap it to muator_ptr|1 +// store thread, compare and swap old value to new_value +// loadReferent is true when load volatile is load referent in: +// 1. Referent.get() +// 2. WeakGlobal.get() +// Similar with LoadRefVolatile but if kWeakCollectedBit is set for referent, return null +const uint32_t kVolatileFieldTidShift = 3; +address_t NaiveRCMutator::LoadRefVolatile(address_t *objAddr, bool loadReferent) { + size_t count = 0xf; + while (true) { + atomic &volatileFieldAddr = AddrToLValAtomic(reinterpret_cast(objAddr)); + address_t obj = RefFieldToAddress(volatileFieldAddr.load(memory_order_acquire)); + if (obj == 0 || obj == GRT_DEADVALUE) { + return obj; + } else if ((obj & LOAD_INC_RC_MASK) == LOAD_INC_RC_MASK) { + // lock by other thread load and inc + --count; + if (count == 0) { + count = 0xf; + LOG(ERROR) << "LoadRefVolatile " << (obj >> kVolatileFieldTidShift) << " " << objAddr << maple::endl; + } + int tmpResult = sched_yield(); + if (UNLIKELY(tmpResult != 0)) { + LOG(ERROR) << "sched_yield() in NaiveRCMutator::LoadRefVolatile() return " << tmpResult << "rather than 0." << + maple::endl; + } + continue; + } else if (!IS_HEAP_OBJ(obj)) { + return obj; + } else { + // obj is in heap, try swap it + // must be unique, better with thread address + reffield_t holder = ((static_cast(GetTid())) << kVolatileFieldTidShift) | LOAD_INC_RC_MASK; + reffield_t objRef = AddressToRefField(obj); + if (!volatileFieldAddr.compare_exchange_weak(objRef, holder, memory_order_release, memory_order_relaxed)) { + continue; + } + if (loadReferent) { + obj = AtomicIncLoadWeak(obj); + volatileFieldAddr.store(objRef, memory_order_release); + return obj; + } + // locked + IncRefInline<1, 0, 0>(obj); + // free and return + volatileFieldAddr.store(objRef, memory_order_release); + return obj; + } + } +} + +void NaiveRCMutator::WriteRefFieldVolatileNoInc(address_t obj, address_t *fieldAddr, address_t value, + bool writeReferent, bool isResurrectWeak) { + // we skip pre-write barrier for referent write, because we do not follow referent + // during marking, and sometime we need call this function on a non-heap address, + // for example: EntryElement::clearobjAndDecRef() in indirect_reference_table.h. + if (LIKELY(!writeReferent)) { + // pre-write barrier for concurrent marking. + SatbWriteBarrier(obj, *reinterpret_cast(fieldAddr)); + } + size_t count = 0xf; + while (true) { + atomic &volatileFieldAddr = AddrToLValAtomic(reinterpret_cast(fieldAddr)); + reffield_t oldValueRef = volatileFieldAddr.load(memory_order_acquire); + if ((oldValueRef & LOAD_INC_RC_MASK) == LOAD_INC_RC_MASK) { + // lock by other thread load and inc + --count; + if (count == 0) { + count = 0xf; + LOG(ERROR) << "LoadRefVolatile " << (oldValueRef >> kVolatileFieldTidShift) << " " << fieldAddr << + maple::endl; + } + int tmpResult = sched_yield(); + if (UNLIKELY(tmpResult != 0)) { + LOG(ERROR) << "sched_yield() in NaiveRCMutator::WriteRefFieldVolatileNoInc() return " << tmpResult << + "rather than 0." << maple::endl; + } + continue; + } else { + // valid object here + reffield_t valueRef = AddressToRefField(value); + if (!volatileFieldAddr.compare_exchange_weak(oldValueRef, valueRef, + memory_order_release, memory_order_relaxed)) { + continue; + } + // successful + address_t oldValue = RefFieldToAddress(oldValueRef); + if (oldValue != obj) { + if (writeReferent) { + if (isResurrectWeak) { + DecRefInline<0, 0, -1>(oldValue, *releaseQueue); + } else { + DecRefInline<0, -1, 0>(oldValue, *releaseQueue); + } + } else { + DecRefInline<-1, 0, 0>(oldValue, *releaseQueue); + } + } + return; + } + } +} + +void NaiveRCMutator::WriteRefFieldVolatileNoRC(address_t obj, address_t *fieldAddr, address_t value, + bool writeReferent) { + // we skip pre-write barrier for referent write, because we do not follow referent + // during marking, and sometime we need call this function on a non-heap address, + // for example: EntryElement::clearobjAndDecRef() in indirect_reference_table.h. + if (LIKELY(!writeReferent)) { + // pre-write barrier for concurrent marking. + SatbWriteBarrier(obj, *reinterpret_cast(fieldAddr)); + } + size_t count = 0xf; + while (true) { + atomic &volatileFieldAddr = AddrToLValAtomic(reinterpret_cast(fieldAddr)); + reffield_t oldValueRef = volatileFieldAddr.load(memory_order_acquire); + if ((oldValueRef & LOAD_INC_RC_MASK) == LOAD_INC_RC_MASK) { + // lock by other thread load and inc + --count; + if (count == 0) { + count = 0xf; + LOG(ERROR) << "LoadRefVolatile " << (oldValueRef >> kVolatileFieldTidShift) << " " << fieldAddr << maple::endl; + } + int tmpResult = sched_yield(); + if (UNLIKELY(tmpResult != 0)) { + LOG(ERROR) << "sched_yield() in NaiveRCMutator::WriteRefFieldVolatileNoInc() return " << tmpResult << + "rather than 0." << maple::endl; + } + continue; + } else { + // valid object here + reffield_t valueRef = AddressToRefField(value); + if (!volatileFieldAddr.compare_exchange_weak(oldValueRef, valueRef, memory_order_release, memory_order_relaxed)) { + continue; + } + // successful + return; + } + } +} + +void NaiveRCMutator::WriteRefFieldVolatileNoDec(address_t obj, address_t *fieldAddr, address_t value, + bool writeReferent, bool isResurrectWeak) { + if (writeReferent) { + if (isResurrectWeak) { + IncRefInline<0, 0, 1>(value); + } else { + IncRefInline<0, 1, 0>(value); + } + } else { + if (value != obj) { + IncRefInline<1, 0, 0>(value); + } + } + WriteRefFieldVolatileNoRC(obj, fieldAddr, value, writeReferent); +} + +void NaiveRCMutator::WriteRefFieldVolatile(address_t obj, address_t *fieldAddr, address_t value, + bool writeReferent, bool isResurrectWeak) { + if (value != obj) { + if (writeReferent) { + if (isResurrectWeak) { + IncRefInline<0, 0, 1>(value); + } else { + IncRefInline<0, 1, 0>(value); + } + } else { + IncRefInline<1, 0, 0>(value); + } + } + WriteRefFieldVolatileNoInc(obj, fieldAddr, value, writeReferent, isResurrectWeak); +} + +// write barrier for local reference variable update. +void NaiveRCMutator::WriteRefVar(address_t *var, address_t value) { + // inc ref for the new value. + IncRefInline<1, 0, 0>(value); + WriteRefVarNoInc(var, value); +} + +void NaiveRCMutator::WriteRefVarNoInc(address_t *var, address_t value) { +#if RC_PROFILE + ++RCCollector::numWriteRefVar; +#endif + // dec ref count for the old one. + DecRefInline<-1, 0, 0>(*var, *releaseQueue); + + // no need atomic, since var is on the stack, + // only current thread can access it. + *var = value; +} + +// writer barrier for object reference field update. +void NaiveRCMutator::WriteRefField(address_t obj, address_t *field, address_t value) { + if (obj != value) { + IncRefInline<1, 0, 0>(value); + } + WriteRefFieldNoInc(obj, field, value); +} + +void NaiveRCMutator::WriteRefFieldNoDec(address_t obj, address_t *field, address_t value) { + if (obj != value) { + IncRefInline<1, 0, 0>(value); + } + + WriteRefFieldNoRC(obj, field, value); +} + +void NaiveRCMutator::WriteRefFieldNoRC(address_t obj, address_t *field, address_t value) { +#if RC_PROFILE + ++RCCollector::numWriteRefField; +#endif + + SatbWriteBarrier(obj, *reinterpret_cast(field)); + +#ifdef USE_32BIT_REF + *reinterpret_cast(field) = static_cast(value); +#else + *field = value; +#endif // USE_32BIT_REF +} + +void NaiveRCMutator::WriteRefFieldNoInc(address_t obj, address_t *field, address_t value) { +#if RC_PROFILE + ++RCCollector::numWriteRefField; +#endif + + SatbWriteBarrier(obj, *reinterpret_cast(field)); + + // use atomic exchange to prevent race condition. + // In case of field is loaded as volatile and compare exchange might get unexpected object + atomic &fieldAddr = AddrToLValAtomic(reinterpret_cast(field)); + while (true) { + reffield_t oldValue = fieldAddr.load(memory_order_acquire); + if (UNLIKELY((oldValue & LOAD_INC_RC_MASK) == LOAD_INC_RC_MASK)) { + WriteRefFieldVolatileNoInc(obj, field, value); + return; + } else { + reffield_t valueRef = AddressToRefField(value); + if (!fieldAddr.compare_exchange_weak(oldValue, valueRef, memory_order_release, memory_order_relaxed)) { + continue; + } + // dec ref count for old one. + if (oldValue != obj) { + DecRefInline<-1, 0, 0>(oldValue, *releaseQueue); + } + return; + } + } +} + +void NaiveRCMutator::WriteWeakField(address_t obj, address_t *field, address_t value, + bool isVolatile __attribute__((unused))) { + return WriteRefFieldVolatile(obj, field, value, true, false); +} + +address_t NaiveRCMutator::LoadWeakField(address_t *fieldAddr, bool isVolatile __attribute__((unused))) { + return LoadRefVolatile(fieldAddr, true); +} + +// Load a ref field object and increase its weak rc count. +// There could be racing here if other thread is writting field +// 1. If field is volatile, load volatile field: dec strong and inc weak rc +// 2. If object a)has no weak reference 2) weak collected bit set return 0 +// 3. Otherwise, try inc weak rc and return obj +address_t NaiveRCMutator::LoadWeakRefCommon(address_t *field) { + address_t obj = LoadRefField(field); + if (obj == 0) { + return obj; + } else if ((obj & LOAD_INC_RC_MASK) == LOAD_INC_RC_MASK) { + address_t result = LoadRefVolatile(field, true); + if (IS_HEAP_ADDR(result)) { + IncRefInline<0, 1, 0>(result); + DecRefInline<-1, 0, 0>(result, *releaseQueue); + } + return result; + } else if (!IS_HEAP_ADDR(obj)) { + return obj; + } + return AtomicIncLoadWeak(obj); +} + +// release local reference variable. +void NaiveRCMutator::ReleaseRefVar(address_t obj) { +#if RC_PROFILE + ++RCCollector::numReleaseRefVar; +#endif + + // dec ref for released object. + DecRefInline<-1, 0, 0>(obj, *releaseQueue); +} + +void NaiveRCCollector::HandleNeighboursForSweep(address_t obj, std::vector &deads) { + // if the object was set as released during concurrent marking, + // we should skip dec neighbours because mutator already did this in MRT_ReleaseObj(). + if (HasReleasedBit(obj)) { + return; + } + auto refFunc = [obj, &deads](reffield_t field, uint64_t kind) { + address_t neighbour = RefFieldToAddress(field); + if ((kind != kUnownedRefBits) && + IS_HEAP_OBJ(neighbour) && + !Collector::Instance().IsGarbage(neighbour) && + (neighbour != obj)) { + uint32_t releaseState = kNotRelease; + if (kind == kNormalRefBits) { + uint32_t oldHeader __MRT_UNUSED = AtomicDecRCAndCheckRelease<-1, 0, 0, false>(neighbour, releaseState); + __MRT_ASSERT(IsRCOverflow(oldHeader) || (GetRCFromRCHeader(oldHeader) != 0), "unexpected dec from 0"); + } else { + uint32_t oldHeader __MRT_UNUSED = AtomicDecRCAndCheckRelease<0, -1, 0, false>(neighbour, releaseState); + __MRT_ASSERT(IsRCOverflow(oldHeader) || (GetWeakRCFromRCHeader(oldHeader) != 0), "unexpected dec weak from 0"); + } + if (releaseState == kReleaseObject) { + // if rc became zero, we found a dead neighbour. +#if LOGGING_DEAD_NEIGHBOURS + MClass *nebCls = reinterpret_cast(neighbour)->GetClass(); + MClass *objCls = reinterpret_cast(obj)->GetClass(); + LOG2FILE(LOGTYPE_GC) << "Dead neighbour: " << + std::hex << + neighbour << + " rc: " << RefCount(neighbour) << + " hd: " << GCHeader(neighbour) << + std::dec << + " cls: " << + nebCls == nullptr ? "" : nebCls->GetName() << + " <-- " << reinterpret_cast(obj) << " " << + objCls->GetName() << + '\n'; +#else + MRT_DummyUse(obj); // avoid unused warning. +#endif + // save dead neighbour for later release. + deads.push_back(neighbour); + } else if (releaseState == kCollectedWeak) { + __MRT_ASSERT(false, "can not collect weak in concurrent sweep"); + } + } + }; + ForEachRefField(obj, refFunc); +} + +// Set to 1 to try to reduce inc/dec operations during arraycopy at the cost of +// load+inc atomicity. +#define FAST_UNSAFE_ARRAYCOPY 1 + +static void ModifyElemRef(address_t javaSrc, int32_t srcPos, int32_t dstPos, int32_t length) { + int32_t minPos = min(srcPos, dstPos); + int32_t maxPos = max(srcPos, dstPos); + int32_t offset = maxPos - minPos; + + int32_t startPos = minPos; + NaiveRCMutator &mutator = NRCMutator(); + MArray *mArray = reinterpret_cast(javaSrc); + if (srcPos < dstPos) { + for (int32_t i = 0; i < offset; ++i) { + address_t frontElem = reinterpret_cast(mArray->GetObjectElementNoRc(startPos + i)); + address_t backElem = reinterpret_cast(mArray->GetObjectElementNoRc(startPos + length + i)); + if ((frontElem != javaSrc) && (backElem != javaSrc)) { + mutator.IncRef(frontElem); + mutator.DecRef(backElem); + } else if ((frontElem == javaSrc) && (backElem != javaSrc)) { + mutator.DecRef(backElem); + } else if ((frontElem != javaSrc) && (backElem == javaSrc)) { + mutator.IncRef(frontElem); + } + } + } else { + for (int32_t i = 0; i < offset; ++i) { + address_t backElem = reinterpret_cast(mArray->GetObjectElementNoRc(startPos + i)); + address_t frontElem = reinterpret_cast(mArray->GetObjectElementNoRc(startPos + length + i)); + if ((frontElem != javaSrc) && (backElem != javaSrc)) { + mutator.IncRef(frontElem); + mutator.DecRef(backElem); + } else if ((frontElem == javaSrc) && (backElem != javaSrc)) { + mutator.DecRef(backElem); + } else if ((frontElem != javaSrc) && (backElem == javaSrc)) { + mutator.IncRef(frontElem); + } + } + } +} + +void NaiveRCCollector::ObjectArrayCopy(address_t javaSrc, address_t javaDst, int32_t srcPos, + int32_t dstPos, int32_t length, bool check) { + size_t elemSize = sizeof(reffield_t); + MArray *srcMarray = reinterpret_cast(javaSrc); + MArray *dstMarray = reinterpret_cast(javaDst); + char *srcCarray = reinterpret_cast(srcMarray->ConvertToCArray()); + char *dstCarray = reinterpret_cast(dstMarray->ConvertToCArray()); + reffield_t *src = reinterpret_cast(srcCarray + elemSize * srcPos); + reffield_t *dst = reinterpret_cast(dstCarray + elemSize * dstPos); + + TLMutator().SatbWriteBarrier(javaDst); + + if ((javaSrc == javaDst) && (abs(srcPos - dstPos) < length)) { + ModifyElemRef(javaSrc, srcPos, dstPos, length); + // most of the copy here are for small length. inline it here + // assumption: length > 0; aligned to 8bytes + if (length < kLargArraySize) { + if (srcPos > dstPos) { // copy to front + for (int i = 0; i < length; ++i) { + dst[i] = src[i]; + } + } else { // copy to back + for (int i = length - 1; i >= 0; --i) { + dst[i] = src[i]; + } + } + } else { + if (memmove_s(dst, elemSize * length, src, elemSize * length) != EOK) { + LOG(FATAL) << "Function memmove_s() failed." << maple::endl; + } + } + } else { + NaiveRCMutator &mutator = NRCMutator(); +#if FAST_UNSAFE_ARRAYCOPY + MClass *dstClass = dstMarray->GetClass(); + MClass *dstComponentType = dstClass->GetComponentClass(); + MClass *lastAssignableComponentType = dstComponentType; + + for (int32_t i = 0; i < length; ++i) { + reffield_t srcelem; + reffield_t dstelem; + srcelem = src[i]; + dstelem = dst[i]; + address_t se = RefFieldToAddress(srcelem); + address_t de = RefFieldToAddress(dstelem); + MObject *srcComponent = reinterpret_cast(se); + if (!check || AssignableCheckingObjectCopy(*dstComponentType, lastAssignableComponentType, srcComponent)) { + dst[i] = srcelem; + } else { + ThrowArrayStoreException(*srcComponent, i, *dstComponentType); + return; + } + if ((se != javaDst) && (de != javaDst)) { + mutator.IncRef(RefFieldToAddress(srcelem)); + mutator.DecRef(RefFieldToAddress(dstelem)); + } else if ((se == javaDst) && (de != javaDst)) { + mutator.DecRef(de); + } else if ((se != javaDst) && (de == javaDst)) { + mutator.IncRef(se); + } + } +#else + for (int32_t i = 0; i < length; ++i) { + MObject *srcComponent = srcMarray->GetObjectElement(srcPosition + i); + dstMarray->SetObjectArrayElement(dstPos + i, srcComponent); + mutator.DecRef(reinterpret_cast(srcComponent)); + } +#endif + } +} + +void NaiveRCCollector::PostObjectClone(address_t src, address_t dst) { + // no need to traverse field of offheap object + if (!IS_HEAP_ADDR(src)) { + return; + } + // to avoid unsafe clone, object is clone and being modified be other thread + // thread1 clone + // 1. memcpy jobj, newobj + // 2. incref jObj.f old value + // + // thread 2 modify + // 1. jObj.f = new field decref and release + // + // if release happens before thread1 step2, then incref from 0 + // issues: what happen if clone Reference? need update weakrc + auto refFunc = [src, dst](reffield_t &field, uint64_t kind) { + if (kind == kUnownedRefBits) { + return; + } + address_t offset = (reinterpret_cast(&field)) - src; + // field can be volatile, can not simply use MRT_LoadRefField + address_t ref; + if (kind == kNormalRefBits) { + ref = MRT_LoadRefFieldCommon(src, reinterpret_cast(&field)); + } else { + // kind is kWeakRefBits + ref = MRT_LoadWeakFieldCommon(src, reinterpret_cast(&field)); + } + StoreRefField(dst, offset, ref); + }; + ForEachRefField(src, refFunc); +} + +bool NaiveRCCollector::UnsafeCompareAndSwapObject(address_t obj, ssize_t offset, + address_t expectedValue, address_t newValue) { + JSAN_CHECK_OBJ(obj); + TLMutator().SatbWriteBarrier(obj, *reinterpret_cast(obj + offset)); + + NaiveRCMutator &mutator = NRCMutator(); + mutator.IncRef(newValue); + bool result = false; + for (;;) { + reffield_t expected = AddressToRefField(expectedValue); + result = __atomic_compare_exchange_n(reinterpret_cast(obj + offset), + &expected, + AddressToRefField(newValue), + false, + __ATOMIC_SEQ_CST, + __ATOMIC_SEQ_CST); + if (LIKELY(result || (expected & LOAD_INC_RC_MASK) == 0)) { + break; + } + // try again if CAS on a locked volatile object. + (void)(::sched_yield()); + } + if (result) { + // avoid self cycle inc dec + if (obj == newValue) { + mutator.DecRef(newValue); + } + if (obj != expectedValue) { + mutator.DecRef(expectedValue); + } + } else { + mutator.DecRef(newValue); + } + return result; +} + +address_t NaiveRCCollector::UnsafeGetObjectVolatile(address_t obj, ssize_t offset) { + JSAN_CHECK_OBJ(obj); + return NRCMutator().LoadRefVolatile(reinterpret_cast(obj + offset)); +} + +address_t NaiveRCCollector::UnsafeGetObject(address_t obj, ssize_t offset) { + JSAN_CHECK_OBJ(obj); + return NRCMutator().LoadIncRef(reinterpret_cast(obj + offset)); +} + +void NaiveRCCollector::UnsafePutObject(address_t obj, ssize_t offset, address_t newValue) { + JSAN_CHECK_OBJ(obj); + NRCMutator().WriteRefField(obj, reinterpret_cast(obj + offset), newValue); +} + +void NaiveRCCollector::UnsafePutObjectVolatile(address_t obj, ssize_t offset, address_t newValue) { + JSAN_CHECK_OBJ(obj); + NRCMutator().WriteRefFieldVolatile(obj, reinterpret_cast(obj + offset), newValue); +} + +void NaiveRCCollector::UnsafePutObjectOrdered(address_t obj, ssize_t offset, address_t newValue) { + JSAN_CHECK_OBJ(obj); + UnsafePutObjectVolatile(obj, offset, newValue); +} + +// (global) Collector implementation +void NaiveRCCollector::Init() { + RCCollector::Init(); + + LOG2FILE(kLogtypeGc) << "NaiveRCCollector::Init()" << std::endl; + if (VLOG_IS_ON(rcverify)) { + ms.SetRCVerify(true); + } + ms.Init(); +} + +void NaiveRCCollector::InitAfterFork() { + // post fork child + RCCollector::InitAfterFork(); + ms.InitAfterFork(); +} + +void NaiveRCCollector::StartThread(bool isZygote) { + ms.StartThread(isZygote); +} + +void NaiveRCCollector::StopThread() { + ms.StopThread(); +} + +void NaiveRCCollector::JoinThread() { + ms.JoinThread(); +} + +void NaiveRCCollector::Fini() { + LOG2FILE(kLogtypeGc) << "NaiveRCCollector::Fini()" << std::endl; + RCCollector::Fini(); + ms.Fini(); +} + +void NaiveRCCollector::InvokeGC(GCReason reason, bool unsafe) { + ms.InvokeGC(reason, unsafe); +} + +// This is invoked from release on stack allocated object only +void NaiveRCMutator::DecChildrenRef(address_t obj) { + // Dec ref for object's children. + auto refFunc = [&, this](reffield_t &field, uint64_t kind) { + address_t child = RefFieldToAddress(field); + if (child == obj || kind == kUnownedRefBits) { + return; + } + if (kind == kNormalRefBits) { + DecRefInline<-1, 0, 0>(child, *releaseQueue); + } else { + DecRefInline<0, -1, 0>(child, *releaseQueue); + } + }; + DoForEachRefField(obj, refFunc); +} +} // namespace maplert diff --git a/src/mrt/compiler-rt/src/collector/collector_naiverc_ms.cpp b/src/mrt/compiler-rt/src/collector/collector_naiverc_ms.cpp new file mode 100644 index 0000000000..ecf0a3cb17 --- /dev/null +++ b/src/mrt/compiler-rt/src/collector/collector_naiverc_ms.cpp @@ -0,0 +1,892 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "collector/collector_naiverc_ms.h" + +#include "chosen.h" +#include "collie.h" +#include "collector/conn_comp.h" +#include "collector/cp_generator.h" +#include "yieldpoint.h" +#include "mutator_list.h" +#include "mstring_inline.h" + +namespace maplert { +namespace { + // Minimum time between cycle pattern learning + constexpr uint64_t kMinTimeBetweenCPLearningMS = 10LL * 60 * 1000; // 10 min +} + +NaiveRCMarkSweepCollector::NaiveRCMarkSweepCollector() : MarkSweepCollector() { + type = kNaiveRCMarkSweep; + alwaysCreateCyclePattern = MRT_ENVCONF(PATTERN_FROM_BACKUP_TRACING, PATTERN_FROM_BACKUP_TRACING_DEFAULT); + lastCyclePatternLearnMS = timeutils::MilliSeconds() - kMinTimeBetweenCPLearningMS; +} + +void NaiveRCMarkSweepCollector::PreMSHook() { + MarkSweepCollector::PreMSHook(); +} + +void NaiveRCMarkSweepCollector::PostMSHook(uint64_t gcIndex) { + MarkSweepCollector::PostMSHook(gcIndex); +} + +void NaiveRCMarkSweepCollector::PreSweepHook() { + MarkSweepCollector::PreSweepHook(); + if (VLOG_IS_ON(opencyclelog)) { + DumpCycleProfile(); + } + + uint64_t now = timeutils::MilliSeconds(); + if (alwaysCreateCyclePattern || + (reasonCfgs[gcReason].ShouldCollectCycle() && + (now - lastCyclePatternLearnMS) >= kMinTimeBetweenCPLearningMS)) { + MRT_PHASE_TIMER("Sweep: learn hot cycles"); + lastCyclePatternLearnMS = now; + DumpCycleLeak(*GetThreadPool()); + } +} + +address_t NaiveRCMarkSweepCollector::LoadStaticRoot(address_t *rootAddr) { + LinkerRef ref(rootAddr); + if (ref.IsIndex()) { + return 0; + } + + address_t obj = LoadRefField(rootAddr); + // The static field may be volatile, and other thread is loading. + // If read the spinlock, it needs wait for the volatile loading. + if ((obj & LOAD_INC_RC_MASK) == LOAD_INC_RC_MASK) { + size_t count = 0xf; + while (true) { + int ret = sched_yield(); + if (UNLIKELY(ret != 0)) { + LOG(ERROR) << "sched_yield() in LoadStaticRoot return " << ret << "rather than 0." << maple::endl; + } + + std::atomic &volatileAddr = AddrToLValAtomic(reinterpret_cast(rootAddr)); + obj = RefFieldToAddress(volatileAddr.load(std::memory_order_relaxed)); + if ((obj & LOAD_INC_RC_MASK) != LOAD_INC_RC_MASK) { + return obj; + } else { + --count; + if (count == 0) { + count = 0xf; + LOG(ERROR) << "LoadStaticRoot current value: " << obj << " addr: " << rootAddr << maple::endl; + } + } + } + } + return obj; +} + +// Atomic decrement all live neighbors of a deadobject. +// Only called during parallel backup tracing. +void NaiveRCMarkSweepCollector::DecNeighborsAtomic(uintptr_t obj) { + __MRT_ASSERT(!HasReleasedBit(obj), "object is marked released in parallel sweep"); + auto refFunc = [obj, this](reffield_t &field, uint64_t kind) { + address_t ref = RefFieldToAddress(field); + if ((kind != kUnownedRefBits) && InHeapBoundry(ref) && (ref != obj) && !IsGarbage(ref) && !IsMygoteObj(ref)) { + uint32_t oldHeader, oldRC; + if (kind == kNormalRefBits) { + oldHeader = AtomicUpdateRC<-1, 0, 0>(ref); + oldRC = GetRCFromRCHeader(oldHeader); + } else { + oldHeader = AtomicUpdateRC<0, -1, 0>(ref); + oldRC = GetWeakRCFromRCHeader(oldHeader); + } + // in parallel gc, ref is not garbage, it must have reference and can not be released or weak released + if (UNLIKELY(oldRC == 0)) { + MClass *childClass = reinterpret_cast(ref)->GetClass(); + MClass *objClass = reinterpret_cast(obj)->GetClass(); + LOG2FILE(kLogtypeGc) << "DecNeighborsAtomic from zero " << std::hex << ref << " " << oldHeader << + " " << RCHeader(ref) << " " << GCHeader(ref) << " " << std::dec << + ((childClass == nullptr) ? "already freed" : childClass->GetName()) << + " parent is " << objClass->GetName() << '\n'; + HandleRCError(ref); + } +#if __MRT_DEBUG + uint32_t releaseState; + if (kind == kNormalRefBits) { + releaseState = CanReleaseObj<-1, 0, 0>(oldHeader); + } else { + releaseState = CanReleaseObj<0, -1, 0>(oldHeader); + } + if (releaseState == kReleaseObject) { + if (!doConservativeStackScan) { + LOG(FATAL) << "live object can be released after parall sweep " << std::hex << ref << " " << oldHeader << + " " << RCHeader(ref) << " " << GCHeader(ref) << std::dec << maple::endl; + } else { + // ref might live due to conservertive stack scan + // put object into release queue + RCReferenceProcessor::Instance().AddAsyncReleaseObj(ref, false); + } + } +#endif +#if RC_TRACE_OBJECT + TraceRefRC(ref, RefCount(ref), + (kind == kNormalRefBits ? "After DecNeighborsAtomic strong" : "After DecNeighborsAtomic weak")); +#endif + LOG2FILE(kLogTypeMix) << "Atomic DEC neighbor: 0x" << ref << " from " << oldHeader << std::endl; + } + }; + ForEachRefField(obj, refFunc); +} + +void NaiveRCMarkSweepCollector::ConcurrentDoReference(uint32_t flags) { + function visitRoot = [this, flags](address_t &reference) -> void { + // reference processor may modify the reference, using C++ Reference may cause racing problem. + // use a stack variable to avoid the racing. + address_t ref = reference; + if (ref == 0) { + return; + } + + bool referenceAlive = !IsGarbage(ref); + + // there will be refinement, it's ok to use the following unsafe get + address_t referent = ReferenceGetReferent(ref); + + if (!referenceAlive) { + if (!(flags & RPMask(kRPPhantomRef))) { + // soft & weak + deadSoftWeaks.push_back(&reference); + } else { + // cleaner & phantom + deadPhantoms.push_back(&reference); + } + } else { + // if mutator is accessing the referent, delay the decision to STW phase + bool racing = ((referent & LOAD_INC_RC_MASK) == LOAD_INC_RC_MASK); + + // reference is alive, none-heap referent should keep alive + if (racing || (referent && InHeapBoundry(referent) && IsGarbage(referent))) { + if (!(flags & RPMask(kRPPhantomRef))) { + // soft & weak + clearReferentSoftWeaks.push_back(ref); + } else { + // cleaner & phantom + clearReferentPhantoms.push_back(ref); + } + } + } + }; + + MRT_GCVisitReferenceRoots(visitRoot, flags); +} + +void NaiveRCMarkSweepCollector::ReferenceRefinement(uint32_t flags) { + std::vector *deadsPointer = nullptr; + std::vector *clearReferentsPointer = nullptr; + if (flags & RPMask(kRPPhantomRef)) { + deadsPointer = &deadPhantoms; + clearReferentsPointer = &clearReferentPhantoms; + } else { + deadsPointer = &deadSoftWeaks; + clearReferentsPointer = &clearReferentSoftWeaks; + } + + std::vector &deads = *deadsPointer; + std::vector &clearReferents = *clearReferentsPointer; + uint32_t trueDead = 0; + uint32_t trueNeedClear = 0; + + for (address_t *reference : deads) { + __MRT_ASSERT(reference != nullptr, "should be valid reference address"); + if (*reference == 0) { + continue; + } + if (IsGarbage(*reference)) { + address_t referent = ReferenceGetReferent(*reference); + if (referent) { + DecReferentSyncCheck(referent, !(flags & RPMask(kRPPhantomRef))); + ReferenceClearReferent(*reference); + } + (void)AtomicUpdateRC<-1, 0, 0>(*reference); + *reference = 0; + ++trueDead; + } else { + address_t referent = ReferenceGetReferent(*reference); + if (referent && InHeapBoundry(referent) && IsGarbage(referent)) { + DecReferentSyncCheck(referent, !(flags & RPMask(kRPPhantomRef))); + ReferenceClearReferent(*reference); + ++trueNeedClear; + } + } + } + + for (address_t reference : clearReferents) { + if (!IsGarbage(reference)) { + address_t referent = ReferenceGetReferent(reference); + if (referent && InHeapBoundry(referent) && IsGarbage(referent)) { + DecReferentSyncCheck(referent, !(flags & RPMask(kRPPhantomRef))); + ReferenceClearReferent(reference); + ++trueNeedClear; + } + } + } + + LOG2FILE(kLogtypeGc) << " truely dead references = " << trueDead << "\n"; + LOG2FILE(kLogtypeGc) << " truely need clear refernt references = " << trueNeedClear << "\n"; +} + +void NaiveRCMarkSweepCollector::ParallelDoReference(uint32_t flags) { + __MRT_ASSERT((flags & ~kRPAllFlags) == 0, "DoReference flag kMrtScanAll"); + + std::atomic numProcessed = { 0 }; + std::atomic numIterated = { 0 }; + std::atomic numDead = { 0 }; + std::atomic numRcOne = { 0 }; + + function visitRoot = + [this, &numIterated, &numProcessed, &numDead, &numRcOne, flags](address_t &reference) -> void { + numIterated.fetch_add(1, std::memory_order_relaxed); + if (reference == 0) { + return; + } + + uint32_t referenceRc = RefCount(reference); + bool referenceAlive = !IsGarbage(reference); + + if (referenceRc == 0) { + __MRT_ASSERT(!referenceAlive, "reference_alive"); + // RefCount(reference) == 0, IsGarbage(reference): processed, do nothing here. + } + else { + // RefCount(reference) > 0; + address_t referent = ReferenceGetReferent(reference); + + if (!referenceAlive) { + // reference in dead cycle. + // in case finalize resurrect this reference, cleare referent early + numDead.fetch_add(1, std::memory_order_relaxed); + if (referenceRc == 1) { + numRcOne.fetch_add(1, std::memory_order_relaxed); + } + if (referent) { + DecReferentSyncCheck(referent, !(flags & RPMask(kRPPhantomRef))); + ReferenceClearReferent(reference); + } + MRT_DecRefUnsync(reference); + reference = 0; + } else { + // reference is alive. + // Clear referent and enqueue when referent is not alive. + // reference is alive, none-heap referent should keep alive + if (referent && InHeapBoundry(referent) && IsGarbage(referent)) { + DecReferentSyncCheck(referent, !(flags & RPMask(kRPPhantomRef))); + ReferenceClearReferent(reference); + numProcessed.fetch_add(1, std::memory_order_relaxed); + LOG2FILE(kLogTypeMix) << "Clear referent in reference[" << flags << "] " << reference << std::endl; + } + } + } + }; + + MRT_ParallelVisitReferenceRoots(*GetThreadPool(), visitRoot, flags); + LOG2FILE(kLogtypeGc) << numProcessed.load(std::memory_order_relaxed) << " references[" << + flags << "] processed" << std::endl; + LOG2FILE(kLogtypeGc) << " iterated references = " << numIterated.load(std::memory_order_relaxed) << "\n"; + LOG2FILE(kLogtypeGc) << " processed references = " << numProcessed.load(std::memory_order_relaxed) << "\n"; + LOG2FILE(kLogtypeGc) << " dead references = " << numDead.load(std::memory_order_relaxed) << "\n"; + LOG2FILE(kLogtypeGc) << " rc one references = " << numRcOne.load(std::memory_order_relaxed) << "\n"; + LOG2FILE(kLogtypeGc) << " uncollected references = " << (numIterated.load(std::memory_order_relaxed) - + numDead.load(std::memory_order_relaxed) - numProcessed.load(std::memory_order_relaxed)) << "\n"; +} + +static vector FindNeighbors(address_t objaddr) { + vector neighbors; + auto refFunc = [&neighbors, objaddr](reffield_t &field, uint64_t kind) { + address_t ref = RefFieldToAddress(field); + if (kind == kNormalRefBits && IS_HEAP_ADDR(ref) && (objaddr != ref)) { + neighbors.push_back(ref); + } + }; + ForEachRefField(objaddr, refFunc); + return neighbors; +} + +// Invoke only at STW before sweep, Collect garbage cycles in GC garbage, steps: +// 1. collect garbage objects +// exclude: object without child, garbage rc is 0 +// Can be collected in parallel with threadPool +// 2. Compute SCC in garbage set, initial garbage with WorkItem(ENTER, obj) +// exclude: self cycle, too big cycles(if big data is off) +// Output: CycleGarbage or CyclePattern +// 3. Merge and log for big data +// +// 1. in qemu test, collect all and study all +// 2. in andorid, limit STW time to 300ms total +// 2.1 if bigdata string is cached, only study, skip large/duplicate scc +// 2.2 if bigdata string is empty, study all +void NaiveRCMarkSweepCollector::DumpCycleLeak(MplThreadPool &threadPool __attribute__((unused))) { + LOG2FILE(kLogtypeCycle) << "Cycle Leak start" << std::endl; + + CyclePatternGenerator cpg; + { + ConnectedComponentFinder finder(FindNeighbors); + finder.RunInBackupTrace(*this, cpg); + } + + // print cycle pattern generated by backup tracing + { + MRT_PHASE_TIMER("DumpCycleLeak: Cycle pattern merge"); + ClassCycleManager::MergeCycles(cpg.Cycles()); + ClassCycleManager::RemoveDeadPatterns(); + } + // bigdata + LOG2FILE(kLogtypeCycle) << "Cycle Leak end" << std::endl; + MRT_SendSaveCpJob(); +} + +void NaiveRCMarkSweepCollector::DumpCycleProfile() { + constexpr size_t kDumpLimit = 100LL * maple::MB; + ClassCycleManager::DumpDynamicCyclePatterns(GCLog().Stream(kLogtypeCycle), kDumpLimit, true); +} + +void NaiveRCMarkSweepCollector::DebugCleanup() { + // GC is stoped in GC.Finalize() + const char *mapleReportRCLeak = getenv("MAPLE_REPORT_RC_LEAK"); + if (mapleReportRCLeak != nullptr) { + CheckLeakAndCycle(); + } + + const char *mapleVerifyRC = getenv("MAPLE_VERIFY_RC"); + if (mapleVerifyRC != nullptr) { + DebugVerifyRC(); + } +} + +void NaiveRCMarkSweepCollector::CheckLeakAndCycle() { + LOG(INFO) << "Start CheckLeakAndCycle" << maple::endl; + ScopedStopTheWorld pauseTheWorld; + LOG(INFO) << "World stopped" << maple::endl; + + InitTracing(); + RootSet rootSet; + ParallelScanRoots(rootSet, false, true); + MrtVisitReferenceRoots([&rootSet](address_t obj) { + if (obj != 0) { + rootSet.push_back(obj); + } + }, kRPAllFlags); + // skip RC overflowed object + (void)(*theAllocator).ForEachObj([&rootSet](address_t obj) { + if (IsRCSkiped(obj)) { + rootSet.push_back(obj); + } + }); + // trigger tracing mark which follows referent. if not follow referent here, qemu test will fail + ParallelMark(rootSet, true); + + // leak/cycle dection + DetectLeak(); + EndTracing(); + ResetBitmap(); + LOG(INFO) << "World restarted" << maple::endl; +} + +void NaiveRCMarkSweepCollector::StatReferentRootRC(RCHashMap referentRoots[], uint32_t rpTypeNum) { + RootSet rs; + function visitRefRoots = [&rs](address_t obj) { + if (obj != 0) { + rs.push_back(obj); + } + }; + + rs.clear(); + MrtVisitReferenceRoots(visitRefRoots, RPMask(kRPSoftRef)); + __MRT_ASSERT(kRPSoftRef < rpTypeNum, "Invalid index"); + CollectReferentRoot(rs, referentRoots[kRPSoftRef]); + + rs.clear(); + MrtVisitReferenceRoots(visitRefRoots, RPMask(kRPWeakRef)); + __MRT_ASSERT(kRPWeakRef < rpTypeNum, "Invalid index"); + CollectReferentRoot(rs, referentRoots[kRPWeakRef]); + + rs.clear(); + MrtVisitReferenceRoots(visitRefRoots, RPMask(kRPPhantomRef)); + __MRT_ASSERT(kRPPhantomRef < rpTypeNum, "Invalid index"); + CollectReferentRoot(rs, referentRoots[kRPPhantomRef]); +} + +void NaiveRCMarkSweepCollector::StatHeapRC() { + // stat heap reference + (void)(*theAllocator).ForEachObj([this](address_t obj) { + auto refFunc = [&](reffield_t &field, uint64_t kind) { + address_t ref = RefFieldToAddress(field); + if (InHeapBoundry(ref) && ref != obj && kind != kUnownedRefBits) { + if (IsMygoteObj(obj)) { + if (kind == kNormalRefBits) { + UpdateRCMap(mygoteObjs, ref); + } else { + UpdateRCMap(mygoteWeakObjs, ref); + } + } else { + if (kind == kNormalRefBits) { + UpdateRCMap(heapObjs, ref); + } else { + UpdateRCMap(heapWeakObjs, ref); + } + } + } + }; + ForEachRefField(obj, refFunc); + }); +} + +void NaiveRCMarkSweepCollector::VerifyRC() { + RCHashMap referentRoots[kRPTypeNum]; + StatReferentRootRC(referentRoots, kRPTypeNum); + StatHeapRC(); + + uint32_t potentialEarlyRelease = 0U; + uint32_t potentialLeak = 0U; + uint32_t wrongWeakRCObjs = 0U; + std::map weakRCDistribution; + + (void)(*theAllocator).ForEachObj([&, this](address_t obj) { + StatRC statRC = NewStatRC(referentRoots, kRPTypeNum, obj); + if (statRC.weakTotal > 0) { + if (weakRCDistribution.find(statRC.weakTotal) == weakRCDistribution.end()) { + weakRCDistribution[statRC.weakTotal] = 1; + } else { + weakRCDistribution[statRC.weakTotal] += 1; + } + } + uint32_t rc = RefCount(obj); + bool danger = IsRCOverflow(rc) ? false : (rc < (statRC.accurate + statRC.heaps) && (statRC.mygote == 0)); + bool leak = IsRCOverflow(rc) ? false : (rc > (statRC.accurate + statRC.heaps + statRC.stacks)); + uint32_t resurrectWeakRC = ResurrectWeakRefCount(obj); + uint32_t weakRC = (IsWeakCollected(obj) && (!IsEnqueuedObjFinalizable(obj))) ? WeakRefCount(obj) : + (WeakRefCount(obj) - 1); + bool weakWrong = IsRCOverflow(rc) ? false : + ((weakRC < statRC.weakCount) && (weakRC != kMaxWeakRC) && + ((weakRC > statRC.weakCount) && (statRC.mygoteWeak == 0))) || + ((resurrectWeakRC != statRC.resurrectWeakCount) && (resurrectWeakRC != kMaxResurrectWeakRC)); + if (danger || leak || weakWrong) { + if (danger) { + potentialEarlyRelease += 1U; + } else if (leak) { + potentialLeak += 1U; + } else { + wrongWeakRCObjs += 1U; + } + string errMsg = danger ? "DANGER" : weakWrong ? "WEAK_WRONG" : "LEAK"; +#if __MRT_DEBUG + PrintRCWrongDetails(obj, statRC, errMsg, rc, weakRC); +#endif + } + }); + +#if __MRT_DEBUG + PrintRCVerifyResult(weakRCDistribution, potentialEarlyRelease, potentialLeak, wrongWeakRCObjs); +#endif + + if ((potentialEarlyRelease + potentialLeak + wrongWeakRCObjs) != 0U) { + LOG(ERROR) << "===============>RC Verfiy Failed!" << std::endl; + } +} + +void NaiveRCMarkSweepCollector::PrintRCVerifyResult(std::map &weakRCDistribution, + uint32_t potentialEarlyRelease, uint32_t potentialLeak, uint32_t wrongWeakRCObjs) { + LOG2FILE(kLogtypeGcOrStderr) << "[MS] [RC Verify] total " << potentialEarlyRelease << + " objects potential early release" << std::endl; + LOG2FILE(kLogtypeGcOrStderr) << "[MS] [RC Verify] total " << potentialLeak << " objects potential leak" << std::endl; + LOG2FILE(kLogtypeGcOrStderr) << "[MS] [RC Verify] total " << wrongWeakRCObjs << + " objects weak rc are wrong" << std::endl; + + for (auto item : weakRCDistribution) { + LOG2FILE(kLogtypeGcOrStderr) << "[MS] weakRC = " << item.first << ", ref_num = " << item.second << std::endl; + } +} + +void NaiveRCMarkSweepCollector::PrintRCWrongDetails(address_t obj, const StatRC &statRC, const string &errMsg, + uint32_t rc, uint32_t weakRC) { + // something bad has happened + MClass *classInfo = reinterpret_cast(obj)->GetClass(); + LOG2FILE(kLogtypeGcOrStderr) << "[MS] [RC Verify] " << errMsg << + " obj " << obj << " rc wrong: rc = " << rc << std::endl; + + LOG2FILE(kLogtypeGcOrStderr) << "[MS] \t\tsummary: accurate = " << statRC.accurate << + ", conservative = " << statRC.stacks << ", heap = " << statRC.heaps << ", weak_accuate = " << statRC.weakTotal << + ", weakRC = " << weakRC << ", resurrect weakRC = " << statRC.resurrectWeakCount << " " << + (IsWeakCollected(obj) ? "collected" : "not collectecd") << std::endl; + + LOG2FILE(kLogtypeGcOrStderr) << "[MS] \t\tclass: " << classInfo->GetName() << std::endl; + LOG2FILE(kLogtypeGcOrStderr) << "[MS] \t\tstatic & const string roots = " << statRC.staticFields << std::endl; + LOG2FILE(kLogtypeGcOrStderr) << "[MS] \t\texternal roots = " << statRC.externals << std::endl; + LOG2FILE(kLogtypeGcOrStderr) << "[MS] \t\tstring roots = " << statRC.strings << std::endl; + LOG2FILE(kLogtypeGcOrStderr) << "[MS] \t\treference roots = " << statRC.references << std::endl; + LOG2FILE(kLogtypeGcOrStderr) << "[MS] \t\tallocator roots = " << statRC.allocators << std::endl; + LOG2FILE(kLogtypeGcOrStderr) << "[MS] \t\tclassloader roots = " << statRC.classloaders << std::endl; + LOG2FILE(kLogtypeGcOrStderr) << "[MS] \t\tstack roots = " << statRC.stacks << std::endl; + LOG2FILE(kLogtypeGcOrStderr) << "[MS] \t\tweak global roots = " << statRC.weakGlobals << std::endl; + LOG2FILE(kLogtypeGcOrStderr) << "[MS] \t\tsoft = " << statRC.soft << std::endl; + LOG2FILE(kLogtypeGcOrStderr) << "[MS] \t\tweak = " << statRC.weak << std::endl; + LOG2FILE(kLogtypeGcOrStderr) << "[MS] \t\tphantom and cleaner = " << statRC.phantom << std::endl; + LOG2FILE(kLogtypeGcOrStderr) << "[MS] \t\theap weak field = " << statRC.weakHeaps << std::endl; +} + +StatRC NaiveRCMarkSweepCollector::NewStatRC(RCHashMap referentRoots[], uint32_t rpTypeNum, address_t obj) { + StatRC statRC; + statRC.staticFields = GetRCFromMap(obj, staticFieldRoots); + statRC.weakGlobals = GetRCFromMap(obj, weakGlobalRoots); + statRC.externals = GetRCFromMap(obj, externalRoots); + statRC.strings = GetRCFromMap(obj, stringRoots); + statRC.references = GetRCFromMap(obj, referenceRoots); + statRC.allocators = GetRCFromMap(obj, allocatorRoots); + statRC.classloaders = GetRCFromMap(obj, classloaderRoots); + statRC.stacks = GetRCFromMap(obj, stackRoots); + statRC.heaps = GetRCFromMap(obj, heapObjs); + statRC.weakHeaps = GetRCFromMap(obj, heapWeakObjs); + statRC.mygote = GetRCFromMap(obj, mygoteObjs); + statRC.mygoteWeak = GetRCFromMap(obj, mygoteWeakObjs); + + __MRT_ASSERT(kRPSoftRef < rpTypeNum, "Invalid index"); + statRC.soft = GetRCFromMap(obj, referentRoots[kRPSoftRef]); + __MRT_ASSERT(kRPWeakRef < rpTypeNum, "Invalid index"); + statRC.weak = GetRCFromMap(obj, referentRoots[kRPWeakRef]); + __MRT_ASSERT(kRPPhantomRef < rpTypeNum, "Invalid index"); + statRC.phantom = GetRCFromMap(obj, referentRoots[kRPPhantomRef]); + + statRC.heaps = statRC.heaps - (statRC.soft + statRC.weak + statRC.phantom) + statRC.mygote; + statRC.weakCount = statRC.phantom + statRC.weakHeaps + statRC.mygoteWeak; + statRC.resurrectWeakCount = statRC.weakGlobals + statRC.soft + statRC.weak; + statRC.weakTotal = statRC.resurrectWeakCount + statRC.weakCount; + statRC.accurate = statRC.staticFields + statRC.externals + statRC.strings + statRC.references + statRC.allocators + + statRC.classloaders; + return statRC; +} + +void NaiveRCMarkSweepCollector::DebugVerifyRC() { + LOG(INFO) << "Start RC Verification " << maple::endl; + ScopedStopTheWorld pauseTheWorld; + LOG(INFO) << "World stopped" << maple::endl; +#ifdef __ANDROID__ + mplCollie.SetSTWPanic(false); +#endif + + InitTracing(); + RootSet rootSet; + SetRCVerify(true); + ParallelScanRoots(rootSet, true, false); + EndTracing(); + ResetBitmap(); + LOG(INFO) << "World restarted" << maple::endl; +} + +void NaiveRCMarkSweepCollector::DetectLeak() { + size_t totalObjCount = 0; + size_t totalSize = 0; + size_t totalLeakSize = 0; + set leakObjectSet; + bool tmpResult = (*theAllocator).ForEachObj([&, this](address_t obj) { + size_t objSize = reinterpret_cast(obj)->GetSize(); + totalSize += objSize; + if (IsGarbage(obj)) { + totalLeakSize += objSize; + if (UNLIKELY(!leakObjectSet.insert(obj).second)) { + LOG(ERROR) << "leakObjectSet.insert() in TracingCollector::DetectLeak() failed." << maple::endl; + } + } + ++totalObjCount; + }); + if (UNLIKELY(!tmpResult)) { + LOG(ERROR) << "(*theAllocator).ForEachObj() in TracingCollector::DetectLeak() return false." << maple::endl; + } + + LOG2FILE(kLogtypeGcOrStderr) << "Total Objects " << totalObjCount << ", Total Leak Count " << + leakObjectSet.size() << std::endl; + LOG2FILE(kLogtypeGcOrStderr) << "Total Objects size " << totalSize << ", Total Leak size " << + totalLeakSize << std::endl; + PrintLeakRootAndRetainCount(leakObjectSet); +} + +void NaiveRCMarkSweepCollector::PrintLeakRootAndRetainCount(set &garbages) { + // check if leak object is root, when + // 1. not referenced by other leak object + // 2. can be referenced by it self + set rootSet; + rootSet.insert(garbages.begin(), garbages.end()); + const char *mapleReportRCLeakDetail = getenv("MAPLE_REPORT_RC_LEAK_DETAIL"); + for (auto it = garbages.begin(); it != garbages.end(); ++it) { + address_t leakObj = *it; + auto refFunc = [&garbages, &rootSet](reffield_t &field, uint64_t kind) { + address_t ref = RefFieldToAddress(field); + if ((kind != kUnownedRefBits) && (garbages.find(ref) != garbages.end())) { + rootSet.erase(ref); + } + }; + ForEachRefField(leakObj, refFunc); + if (mapleReportRCLeakDetail != nullptr) { + MClass *classInfo = reinterpret_cast(leakObj)->GetClass(); + const char *className = classInfo->GetName(); + LOG2FILE(kLogtypeGcOrStderr) << "Leak Object "" 0x" << std::hex << leakObj << " RC=" << + RefCount(leakObj) << " " << className << std::endl; + } + } + LOG2FILE(kLogtypeGcOrStderr) << " Total none-cycle root objects " << rootSet.size() << std::endl; + + // find cycles + vector garbagesVec; + std::copy(garbages.begin(), garbages.end(), std::back_inserter(garbagesVec)); + + ConnectedComponentFinder finder(garbagesVec, FindNeighbors); + finder.Run(); + vector> components = finder.GetResults(); + PrintMultiNodeCycleCount(components); + + size_t componentIdx = 0; + for (auto &component : components) { + ++componentIdx; + // if component has root, calcuate its retain sizes + bool isRoot = (component.size() > 1) ? true : rootSet.find(component.at(0)) != rootSet.end(); + if (isRoot) { + set reachingSet; + for (auto objaddr : component) { + if (UNLIKELY(!reachingSet.insert(objaddr).second)) { + LOG(ERROR) << "reachingSet.insert() in PrintLeakRootAndRetainCount() failed." << maple::endl; + } + } + InsertSetForEachRefField(reachingSet, garbages); +#if __MRT_DEBUG + PrintLeakRoots(reachingSet, component, componentIdx); +#endif + } + } +} + +void NaiveRCMarkSweepCollector::PrintMultiNodeCycleCount(vector> &components) { + size_t multNodeCycleCount = 0; + for (auto &component : components) { + if (component.size() > 1) { + ++multNodeCycleCount; + } + } + LOG2FILE(kLogtypeGcOrStderr) << " Total multi-node cycle count " << multNodeCycleCount << std::endl; +} + +void NaiveRCMarkSweepCollector::InsertSetForEachRefField(set &reachingSet, set &garbages) { + set visitingSet; + visitingSet.insert(reachingSet.begin(), reachingSet.end()); + while (true) { + set newAddedSet; + for (auto it = visitingSet.begin(); it != visitingSet.end(); ++it) { + address_t leakObj = *it; + auto refFunc = [&garbages, &reachingSet, &newAddedSet](reffield_t &field, uint64_t kind) { + address_t ref = RefFieldToAddress(field); + // garbage and not added into reaching set + if ((kind == kNormalRefBits) && + (garbages.find(ref) != garbages.end()) && + (reachingSet.find(ref) == reachingSet.end())) { + if (UNLIKELY(!reachingSet.insert(ref).second)) { + LOG(ERROR) << "reachingSet.insert() in InsertSetForEachRefField() failed." << maple::endl; + } + if (UNLIKELY(!newAddedSet.insert(ref).second)) { + LOG(ERROR) << "newAddedSet.insert() in InsertSetForEachRefField() failed." << maple::endl; + } + } + }; + ForEachRefField(leakObj, refFunc); + } + if (newAddedSet.size() == 0) { + break; + } + visitingSet.clear(); + visitingSet.insert(newAddedSet.begin(), newAddedSet.end()); + newAddedSet.clear(); + } +} + +void NaiveRCMarkSweepCollector::PrintLeakRoots(set &reachingSet, vector &component, + size_t componentIdx) { + // print root, calculate reaching set object size + size_t reachingObjectsSize = 0; + for (auto objaddr : reachingSet) { + reachingObjectsSize += reinterpret_cast(objaddr)->GetSize(); + } + + if (component.size() > 1) { + LOG2FILE(kLogtypeGcOrStderr) << "[" << componentIdx << "] Cycle Leak: retain num " << reachingSet.size() << + ", retain sz " << reachingObjectsSize << ", cycle count " << component.size() << std::endl; + for (auto objaddr : component) { + uint32_t rc = RefCount(objaddr); + MClass *classInfo = reinterpret_cast(reinterpret_cast(objaddr))->GetClass(); + const char *className = classInfo->GetName(); + LOG2FILE(kLogtypeGcOrStderr) << "[" << componentIdx << "] 0x" << std::hex << objaddr << + " RC=" << rc << " " << className << std::endl; + } + } else { + __MRT_ASSERT(component.size() == 1, "Invalid component"); + auto objaddr = component.at(0); + MClass *classInfo = reinterpret_cast(reinterpret_cast(objaddr))->GetClass(); + const char *className = classInfo->GetName(); + LOG2FILE(kLogtypeGcOrStderr) << "[" << componentIdx << "] Leak: retain num " << reachingSet.size() << + ", retain sz " << reachingObjectsSize << ", "" 0x" << std::hex << objaddr << " RC=" << RefCount(objaddr) << + " " << WeakRefCount(objaddr) << " " << ResurrectWeakRefCount(objaddr) << " " << + (IsWeakCollected(objaddr) ? "weak collected" : "not weak collected") << " " << className << std::endl; + } +} + +void NaiveRCMarkSweepCollector::CollectReferentRoot(RootSet &rs, RCHashMap &map) { + for (auto it = rs.begin(); it != rs.end(); ++it) { + address_t reference = *it; + if (FastIsHeapObject(reference)) { + address_t referent = ReferenceGetReferent(reference); + if (FastIsHeapObject(referent)) { + UpdateRCMap(map, referent); + } + } + } +} + +void NaiveRCMarkSweepCollector::ClearRootsMap() { + if (UNLIKELY(rcVerification)) { + staticFieldRoots.clear(); + externalRoots.clear(); + weakGlobalRoots.clear(); + stringRoots.clear(); + referenceRoots.clear(); + allocatorRoots.clear(); + classloaderRoots.clear(); + stackRoots.clear(); + heapObjs.clear(); + heapWeakObjs.clear(); + } +} + +void NaiveRCMarkSweepCollector::PostParallelAddTask(bool processWeak) { + if (LIKELY(!rcVerification)) { + return; + } + + MplThreadPool *threadPool = GetThreadPool(); + threadPool->AddTask(new (std::nothrow) MplLambdaTask([this](size_t workerID __attribute__((unused))) { + RootSet rs; + ScanStringRoots(rs); + CollectRootRC(rs, stringRoots); + })); + + threadPool->AddTask(new (std::nothrow) MplLambdaTask([this, processWeak](size_t workerID __attribute__((unused))) { + RootSet rs; + ScanExternalRoots(rs, processWeak); + CollectRootRC(rs, externalRoots); + })); + + threadPool->AddTask(new (std::nothrow) MplLambdaTask([this](size_t workerID __attribute__((unused))) { + // static-fields root and const-string field root may overlap. deduplicate it to make RC-count correct. + std::unordered_set uniqStaticFields; + RefVisitor visitor = [&uniqStaticFields](address_t &field) { + if (UNLIKELY(!uniqStaticFields.insert(&field).second)) { + VLOG(gc) << "uniqStaticFields.insert() in TracingCollector::ParallelScanRoots() failed." << maple::endl; + } + }; + GCRegisteredRoots::Instance().Visit(visitor); + RootSet rs; + for (auto field: uniqStaticFields) { + MaybeAddRoot(LoadRefField(field), rs, true); + } + CollectRootRC(rs, staticFieldRoots); + })); + + threadPool->AddTask(new (std::nothrow) MplLambdaTask([this](size_t workerID __attribute__((unused))) { + RootSet rs; + ScanWeakGlobalRoots(rs); + CollectRootRC(rs, weakGlobalRoots); + })); + + threadPool->AddTask(new (std::nothrow) MplLambdaTask([this](size_t workerID __attribute__((unused))) { + RootSet rs1; + RootSet rs2; + ScanAllocatorRoots(rs1); + ScanClassLoaderRoots(rs2); + CollectRootRC(rs1, allocatorRoots); + CollectRootRC(rs2, classloaderRoots); + })); + + // add remaining reference roots + threadPool->AddTask(new (std::nothrow) MplLambdaTask([this](size_t workerID __attribute__((unused))) { + RootSet rs; + function visitRefRoots = [&rs](address_t obj) { + if (obj != 0) { + rs.push_back(obj); + } + }; + MrtVisitReferenceRoots(visitRefRoots, RPMask(kRPSoftRef) | RPMask(kRPWeakRef) | RPMask(kRPPhantomRef)); + // Some SoftRefs will be the Roots(e.g. Force will push all SoftRefs to roots), + // VisitReferenceRoots and ScanReferencesRoots will scan the soft twice, + // so create a dummy reason--OOM, to skip softrefs, when ScanReferencesRoots + GCReason oldReason = gcReason; + SetGCReason(kGCReasonOOM); + ScanReferenceRoots(rs); + SetGCReason(oldReason); + CollectRootRC(rs, referenceRoots); + })); + + threadPool->AddTask(new (std::nothrow) MplLambdaTask([this](size_t workerID __attribute__((unused))) { + RootSet rs; + auto &list = MutatorList::Instance().List(); + for (auto it = list.begin(); it != list.end(); ++it) { + ScanStackRoots(**it, rs); + } + RefVisitor visitor = [&rs](address_t &obj) { + if (IS_HEAP_OBJ(obj)) { + rs.push_back(obj); + } + }; + for (auto it = list.begin(); it != list.end(); ++it) { + (*it)->VisitNativeStackRoots(visitor); + } + CollectRootRC(rs, stackRoots); + })); +} + +void NaiveRCMarkSweepCollector::PostParallelScanMark(bool processWeak) { + if (UNLIKELY(rcVerification)) { + MplThreadPool *threadPool = GetThreadPool(); + PostParallelAddTask(processWeak); + threadPool->WaitFinish(true); + VerifyRC(); + } +} + +void NaiveRCMarkSweepCollector::PostParallelScanRoots() { + if (UNLIKELY(rcVerification)) { + VerifyRC(); + } +} + +void NaiveRCMarkSweepCollector::ConcurrentMarkPreparePhase(WorkStack &workStack, WorkStack &inaccurateRoots) { + if (UNLIKELY(rcVerification)) { + // when rc verification is enabled, use parallel scan roots. + MRT_PHASE_TIMER("Parallel Root scan and verify rc"); + ParallelScanRoots(workStack, true, false); + } else { + // use fast root scan for concurrent marking. + MRT_PHASE_TIMER("Fast Root scan"); + FastScanRoots(workStack, inaccurateRoots, true, false); + } + // prepare for concurrent marking. + ConcurrentMarkPrepare(); +} + +void NaiveRCMarkSweepCollector::PostInitTracing() { + ClearRootsMap(); +} + +void NaiveRCMarkSweepCollector::PostEndTracing() { + ClearRootsMap(); +} +} // namespace maplert diff --git a/src/mrt/compiler-rt/src/collector/collector_rc.cpp b/src/mrt/compiler-rt/src/collector/collector_rc.cpp new file mode 100644 index 0000000000..f2cf968612 --- /dev/null +++ b/src/mrt/compiler-rt/src/collector/collector_rc.cpp @@ -0,0 +1,73 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "collector/collector_rc.h" + +#include "chosen.h" +#include "mutator_list.h" + +namespace maplert { +#if RC_PROFILE +std::atomic RCCollector::numLoadIncRef(0); +std::atomic RCCollector::numWriteRefVar(0); +std::atomic RCCollector::numWriteRefField(0); +std::atomic RCCollector::numReleaseRefVar(0); +std::atomic RCCollector::numNativeInc(0); +std::atomic RCCollector::numNativeDec(0); +std::atomic RCCollector::numIncRef(0); +std::atomic RCCollector::numDecRef(0); +std::atomic RCCollector::numIncNull(0); +std::atomic RCCollector::numDecNull(0); + +void RCCollector::PrintStats() { + LOG2FILE(kLogtypeGc) << "[RCSTATS] RC statistics:" << std::endl; + LOG2FILE(kLogtypeGc) << "[RCSTATS] # LoadIncRefVar : " << RCCollector::numLoadIncRef.load() << std::endl; + LOG2FILE(kLogtypeGc) << "[RCSTATS] # WriteRefVar : " << RCCollector::numWriteRefVar.load() << std::endl; + LOG2FILE(kLogtypeGc) << "[RCSTATS] # WriteRefField : " << RCCollector::numWriteRefField.load() << std::endl; + LOG2FILE(kLogtypeGc) << "[RCSTATS] # ReleaseRefVar : " << RCCollector::numReleaseRefVar.load() << std::endl; + LOG2FILE(kLogtypeGc) << "[RCSTATS] # IncRef : " << RCCollector::numIncRef.load() << std::endl; + LOG2FILE(kLogtypeGc) << "[RCSTATS] # IncNull : " << RCCollector::numIncNull.load() << std::endl; + LOG2FILE(kLogtypeGc) << "[RCSTATS] # DecRef : " << RCCollector::numDecRef.load() << std::endl; + LOG2FILE(kLogtypeGc) << "[RCSTATS] # DecNull : " << RCCollector::numDecNull.load() << std::endl; + LOG2FILE(kLogtypeGc) << "[RCSTATS] # NativeInc : " << RCCollector::numNativeInc.load() << std::endl; + LOG2FILE(kLogtypeGc) << "[RCSTATS] # NativeDec : " << RCCollector::numNativeDec.load() << std::endl; +} + +void RCCollector::ResetStats() { + RCCollector::numLoadIncRef = 0; + RCCollector::numWriteRefVar = 0; + RCCollector::numWriteRefField = 0; + RCCollector::numReleaseRefVar = 0; + RCCollector::numIncRef = 0; + RCCollector::numDecRef = 0; + RCCollector::numIncNull = 0; + RCCollector::numDecNull = 0; + RCCollector::numNativeInc = 0; + RCCollector::numNativeDec = 0; +} + +#else + +void RCCollector::PrintStats() { + LOG2FILE(kLogtypeGc) << "[RCSTATS] RC statistics is disabled." << std::endl; +} + +void RCCollector::ResetStats() {} + +#endif // RC_PROFILE + +void RCMutator::Fini() { + Mutator::Fini(); +} +} diff --git a/src/mrt/compiler-rt/src/collector/collector_tracing.cpp b/src/mrt/compiler-rt/src/collector/collector_tracing.cpp new file mode 100644 index 0000000000..2c89c8abcc --- /dev/null +++ b/src/mrt/compiler-rt/src/collector/collector_tracing.cpp @@ -0,0 +1,1100 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "collector/collector_tracing.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "syscall.h" +#include "mm_config.h" +#include "mm_utils.h" +#include "address.h" +#include "chosen.h" +#include "imported.h" +#include "mutator_list.h" +// cycle pattern generator depend on the conn-comp +#include "collector/conn_comp.h" +#include "collector/cp_generator.h" +#include "collector/stats.h" +#include "collector/native_gc.h" +#include "collie.h" +#include "mstring_inline.h" +#include "mrt_array.h" + +namespace maplert { +namespace { +constexpr int kDefaultWaitGCLiteTimeoutMs = 500; // lite wait gc, timeout by 500ms +constexpr uint64_t kDefaultSystemAutoGcIntervalNs = 4ULL * 60 * 1000 * 1000 * 1000; // 4mins +constexpr uint32_t kStaticFieldRootsBatchSize = 256; // static roots batch size is 256 +} + +ImmortalWrapper GCRegisteredRoots::instance; + +void GCRegisteredRoots::Register(address_t **gcRootsList, size_t len) { + std::lock_guard lock(staticRootsLock); + address_t **list = gcRootsList; + size_t remain = len; + while (true) { + if (remain <= kStaticFieldRootsBatchSize) { + roots.push_back({remain, list}); + break; + } else { + roots.push_back({kStaticFieldRootsBatchSize, list}); + list += kStaticFieldRootsBatchSize; + remain -= kStaticFieldRootsBatchSize; + } + } + totalRootsCount += len; +} + +void GCRegisteredRoots::Visit(RefVisitor &visitor) { + { + std::lock_guard lock(staticRootsLock); + for (RegisteredRoots &item : roots) { + for (size_t i = 0; i < item.length; ++i) { + LinkerRef ref(item.roots[i]); + if (!ref.IsIndex()) { + visitor(*(item.roots[i])); + } + } + } + } + (*zterpStaticRootAllocator).VisitStaticRoots(visitor); +} + +bool GCRegisteredRoots::GetRootsLocked(size_t index, address_t **&list, size_t &len) { + std::lock_guard lock(staticRootsLock); + if (index >= roots.size()) { + return false; + } + list = roots[index].roots; + len = roots[index].length; + return true; +} + +void TracingCollector::Init() { + Collector::Init(); + StartThread(true); +} + +void TracingCollector::InitAfterFork() { + // post fork child + Collector::InitAfterFork(); +} + +void TracingCollector::Fini() { + Collector::Fini(); + StopThread(); + JoinThread(); +} + +// Start the GC thread(s). +void TracingCollector::StartThread(bool isZygote) { + bool expected = false; + if (gcThreadRunning.compare_exchange_strong(expected, true, std::memory_order_acquire) == false) { + return; + } + // starts the thread pool. + lastTriggerTime = timeutils::NanoSeconds(); + if (workerThreadPool == nullptr) { + // if it's the zygote thread, 1 thread is enough for GC + // in 8 cores, use 4 thread parallel with 3 GC worker thread and 1 GC thread + int32_t numThreads = isZygote ? 1 : ((std::thread::hardware_concurrency() / maple::kGCThreadNumAdjustFactor) - 1); + numThreads = (numThreads < 1) ? 1 : numThreads; + parallelThreadCount = numThreads + 1; + if (IsSystem()) { + concurrentThreadCount = (parallelThreadCount > kSystemServerConcurrentThreadCount) ? + kSystemServerConcurrentThreadCount : parallelThreadCount; + } else { + concurrentThreadCount = 1; + } + LOG2FILE(kLogtypeGc) << "concurrent thread count " << concurrentThreadCount << + " parallel thread count " << parallelThreadCount << + " thread pool size " << numThreads << std::endl; + workerThreadPool = new (std::nothrow) MplThreadPool("gc", numThreads, maple::kGCThreadPriority); + if (workerThreadPool == nullptr) { + LOG(FATAL) << "new MplThreadPool failed" << maple::endl; + } + } + + // create the collector thread. + if (::pthread_create(&gcThread, nullptr, TracingCollector::CollectorThreadEntry, this) != 0) { + __MRT_ASSERT(0, "pthread_create failed!"); + } + // set thread name. + int ret = pthread_setname_np(gcThread, "GC"); + if (UNLIKELY(ret != 0)) { + LOG(ERROR) << "pthread_setname_np() in MarkSweepCollector::StartThread() return " << + ret << " rather than 0" << maple::endl; + } +#ifdef __ANDROID__ + mplCollie.Init(); +#endif +} + +void TracingCollector::StopThread() { + // tell the collector thread to exit. + if (gcThreadRunning.load(std::memory_order_acquire) == false) { + return; + } + + TaskQueue::TaskFilter filter = [](GCTask&, GCTask&) { + return false; + }; + GCTask task(ScheduleTaskBase::ScheduleTaskType::kScheduleTaskTypeTerminate); + (void)taskQueue.Enqueue(task, filter); // enqueue to sync queue + if (workerThreadPool != nullptr) { + workerThreadPool->Exit(); + } +#ifdef __ANDROID__ + mplCollie.Fini(); +#endif +} + +void TracingCollector::JoinThread() { + // wait for collector thread to exit. + // Collector::Fini() usually called from main thread, + // if Collector::Fini() called before Mutator::Fini(), + // we should enter saferegion when blocking on pthread_join(). + if (gcThreadRunning.load(std::memory_order_acquire) == false) { + return; + } + ScopedEnterSaferegion enterSaferegion; + int ret = ::pthread_join(gcThread, nullptr); + if (UNLIKELY(ret != 0)) { + LOG(ERROR) << "::pthread_join() in MarkSweepCollector::JoinThread() return " << + ret << "rather than 0." << maple::endl; + } + // wait the thread pool stopped. + if (workerThreadPool != nullptr) { + delete workerThreadPool; + workerThreadPool = nullptr; + } + gcThreadRunning.store(false, std::memory_order_release); +#ifdef __ANDROID__ + mplCollie.JoinThread(); +#endif +} + +void *TracingCollector::CollectorThreadEntry(void *arg) { + // set current thread as a gc thread. + (void)maple::tls::CreateTLS(); + StoreTLS(reinterpret_cast(true), maple::tls::kSlotIsGcThread); + + LOG(INFO) << "[GC] Thread begin." << maple::endl; + __MRT_ASSERT(arg != nullptr, "CollectorThreadEntry arg=nullptr"); + + // set thread priority. + MRT_SetThreadPriority(maple::GetTid(), maple::kGCThreadPriority); + + // run event loop in this thread. + TracingCollector *self = reinterpret_cast(arg); + self->RunTaskLoop(); + + LOG(INFO) << "[GC] Thread end." << maple::endl; + maple::tls::DestoryTLS(); + return nullptr; +} + +bool TracingCollector::GCTask::Execute(void *owner) { + __MRT_ASSERT(owner != nullptr, "task queue owner ptr should not be null!"); + TracingCollector *traceCollector = reinterpret_cast(owner); + switch (taskType) { + case ScheduleTaskBase::ScheduleTaskType::kScheduleTaskTypeTerminate: { + return false; + } + case ScheduleTaskBase::ScheduleTaskType::kScheduleTaskTypeTimeout: { + if (NativeEpochStats::Instance().isEnabled()) { + uint64_t curTime = timeutils::NanoSeconds(); + if ((curTime - traceCollector->lastTriggerTime) > kDefaultSystemAutoGcIntervalNs) { + traceCollector->gcReason = kGCReasonForceGC; + traceCollector->lastTriggerTime = timeutils::NanoSeconds(); + GCReasonConfig &cfg = reasonCfgs[gcReason]; + cfg.SetLastTriggerTime(static_cast(traceCollector->lastTriggerTime)); + traceCollector->isGcTriggered.store(true, std::memory_order_relaxed); + traceCollector->RunFullCollection(ScheduleTaskBase::kAsyncIndex); + } + } + break; + } + case ScheduleTaskBase::ScheduleTaskType::kScheduleTaskTypeInvokeGC: { + traceCollector->gcReason = gcReason; + traceCollector->lastTriggerTime = timeutils::NanoSeconds(); + GCReasonConfig &cfg = reasonCfgs[gcReason]; + cfg.SetLastTriggerTime(static_cast(traceCollector->lastTriggerTime)); + traceCollector->isGcTriggered.store(true, std::memory_order_relaxed); + traceCollector->RunFullCollection(syncIndex); + break; + } + default: + LOG(ERROR) << "[GC] Error task type: " << static_cast(taskType) << " ignored!" << maple::endl; + break; + } + return true; +} + +void TracingCollector::RunTaskLoop() { + LOG(ERROR) << "[GC] RunTaskLoop start" << maple::endl; + finishedGcIndex = ScheduleTaskBase::kSyncIndexStartValue; + gcTid.store(maple::GetTid(), std::memory_order_release); + taskQueue.Init(); + taskQueue.LoopDrainTaskQueue(this); + LOG(INFO) << "[GC] GC thread exit!!!" << maple::endl; + NotifyGCFinished(ScheduleTaskBase::kIndexExit); +} + +void TracingCollector::OnUnsuccessfulInvoke(GCReason reason) { + if (reason == kGCReasonHeu) { + return; + } + GCReasonConfig &cfg = reasonCfgs[reason]; + if (!cfg.IsNonBlockingGC() && isGcTriggered.load(std::memory_order_seq_cst)) { + ScopedEnterSaferegion safeRegion; + WaitGCStopped(); + } +} + +void TracingCollector::InvokeGC(GCReason reason, bool unsafe) { + // check if trigger GC thread holds thread list lock and mutator list lock + if (maple::GCRootsVisitor::IsThreadListLockLockBySelf()) { + LOG(ERROR) << "[GC] thread list is locked by self, skip GC" << maple::endl; + return; + } + if (MutatorList::Instance().IsLockedBySelf()) { + LOG(ERROR) << "[GC] mutator list is locked by self, skip GC" << maple::endl; + return; + } + + GCReasonConfig &cfg = reasonCfgs[reason]; + if (cfg.ShouldIgnore()) { + OnUnsuccessfulInvoke(reason); + } else if (unsafe) { + RequestGCUnsafe(reason); + } else { + RequestGCAndWait(reason); + } + + return; +} + +void TracingCollector::RequestGCUnsafe(GCReason reason) { + // this function is to trigger gc from an unsafe context, e.g., while holding + // a lock, or while in the middle of an allocation + // + // the difference with the following function is that the reason must be + // non-blocking, and that we don't enter safe region: + // non-blocking because mutator must finish what it's doing (releasing lock + // or finishing the allocation); + // not entering safe region because it will cause deadlocks + __MRT_ASSERT(reasonCfgs[reason].IsNonBlockingGC(), + "trigger from unsafe context must not be blocked"); + GCTask gcTask(ScheduleTaskBase::ScheduleTaskType::kScheduleTaskTypeInvokeGC, reason); + // we use async enqueue because this doesn't have locks, lowering the risk + // of timeouts when entering safe region due to thread scheduling + taskQueue.EnqueueAsync(gcTask); +} + +void TracingCollector::RequestGCAndWait(GCReason reason) { + // Enter saferegion since current thread may blocked by locks. + ScopedEnterSaferegion enterSaferegion; + GCTask gcTask(ScheduleTaskBase::ScheduleTaskType::kScheduleTaskTypeInvokeGC, reason); + uint64_t curThreadSyncIndex = 0; + TaskQueue::TaskFilter filter = [](GCTask &oldTask, GCTask &newTask) { + return oldTask.GetGCReason() == newTask.GetGCReason(); + }; + + // adjust task order/priority by sync mode + GCReasonConfig &cfg = reasonCfgs[reason]; + if (cfg.IsNonBlockingGC()) { + (void)taskQueue.Enqueue(gcTask, filter); + return; + } else { + curThreadSyncIndex = taskQueue.Enqueue(gcTask, filter); + } + + // wait gc or not by sync mode + if (cfg.IsLiteBlockingGC()) { + WaitGCStoppedLite(); + return; + } + std::unique_lock lock(gcFinishedCondMutex); + // wait until GC finished + std::function pred = [this, curThreadSyncIndex] { + return ((finishedGcIndex >= curThreadSyncIndex) || (finishedGcIndex == ScheduleTaskBase::kIndexExit)); + }; + { +#ifdef __ANDROID__ + MplCollieScope mcs(kGCCollie, (MPLCOLLIE_FLAG_ABORT | MPLCOLLIE_FLAG_PROMOTE_PRIORITY), gcTid.load()); +#endif + gcFinishedCondVar.wait(lock, pred); + } +} + +void TracingCollector::NotifyGCFinished(uint64_t gcIndex) { + std::unique_lock lock(gcFinishedCondMutex); + isGcTriggered.store(false, std::memory_order_relaxed); + if (gcIndex != ScheduleTaskBase::kAsyncIndex) { // sync gc, need set syncIndex + finishedGcIndex.store(gcIndex); + } + gcFinishedCondVar.notify_all(); +} + +void TracingCollector::WaitGCStoppedLite() { + std::unique_lock lock(gcFinishedCondMutex); + std::chrono::milliseconds timeout(kDefaultWaitGCLiteTimeoutMs); + (void)gcFinishedCondVar.wait_for(lock, timeout); +} + +void TracingCollector::WaitGCStopped() { + std::unique_lock lock(gcFinishedCondMutex); + uint64_t curWaitGcIndex = finishedGcIndex.load(); + std::function pred = [this, curWaitGcIndex] { + return (!IsGcTriggered() || (curWaitGcIndex != finishedGcIndex) || + (finishedGcIndex == ScheduleTaskBase::kIndexExit)); + }; + { +#ifdef __ANDROID__ + MplCollieScope mcs(kGCCollie, (MPLCOLLIE_FLAG_ABORT | MPLCOLLIE_FLAG_PROMOTE_PRIORITY), gcTid.load()); +#endif + gcFinishedCondVar.wait(lock, pred); + } +} + +void TracingCollector::DumpHeap(const std::string &tag) { + pid_t pid = getpid(); +#ifdef __ANDROID__ + std::string dirName = util::GetLogDir(); + std::string filename = dirName + "/rc_heap_dump_" + tag + "_" + + std::to_string(pid) + "_" + timeutils::GetDigitDate() + ".txt"; +#else + std::string filename = "./rc_heap_dump_" + tag + "_" + std::to_string(pid) + + timeutils::GetDigitDate() + ".txt"; +#endif + std::ofstream ofs(filename, std::ofstream::out); + if (!ofs.is_open()) { + LOG(ERROR) << "racingCollector::DumpHeap open file failed" << maple::endl; + return; + } + InitTracing(); + + // dump roots + DumpRoots(ofs); + // dump object contents + bool ret = (*theAllocator).ForEachObj([&ofs](address_t obj) { + util::DumpObject(obj, ofs); + }); + if (UNLIKELY(!ret)) { + LOG(ERROR) << "(*theAllocator).ForEachObj() in DumpHeap() return false." << maple::endl; + } + + // dump object types + ofs << "Print Type information" << std::endl; + set classinfoSet; + ret = (*theAllocator).ForEachObj([&classinfoSet](address_t obj) { + MClass *classInfo = reinterpret_cast(obj)->GetClass(); + // No need to check the result of insertion, because there're multiple-insertions. + (void)classinfoSet.insert(classInfo); + }); + if (UNLIKELY(!ret)) { + LOG(ERROR) << "(*theAllocator).ForEachObj()#2 in DumpHeap() return false." << maple::endl; + } + + for (auto it = classinfoSet.begin(); it != classinfoSet.end(); it++) { + MClass *classInfo = *it; + ofs << std::hex << classInfo << " " << classInfo->GetName() << std::endl; + } + + ofs << "Dump Allocator" << std::endl; + (*theAllocator).PrintPageFragment(ofs, "DumpHeap"); + ofs.close(); + + EndTracing(); + ResetBitmap(); +} + +void TracingCollector::InitTracing() { + ReferenceProcessor::Instance().InitSoftRefPolicy(); + const char *oldStackScan = getenv("USE_OLD_STACK_SCAN"); + doConservativeStackScan = VLOG_IS_ON(conservativestackscan) || (oldStackScan != nullptr); + + // create bitmap for mark sweep + if (UNLIKELY(!markBitmap.Initialized())) { +#if ALLOC_USE_FAST_PATH + FastAllocData::data.bm = &markBitmap; +#endif + markBitmap.Initialize(); + finalBitmap.Initialize(); + } else { + markBitmap.ResetCurEnd(); + finalBitmap.ResetCurEnd(); + } + + PostInitTracing(); +} + +void TracingCollector::EndTracing() { + PostEndTracing(); +} + +// Add to root set if it actually points to an object. This does not mark the object. +void TracingCollector::MaybeAddRoot(address_t data, RootSet &rootSet, bool useFastCheck) { + bool isValid = useFastCheck ? FastIsHeapObject(data) : + ((*theAllocator).AccurateIsValidObjAddr(data) +#if CONFIG_JSAN + && (JSANGetObjStatus(data) != kObjStatusQuarantined) +#endif + ); + if (isValid) { + rootSet.push_back(data); + } +} + +void TracingCollector::ParallelScanRoots(RootSet &rootSet, bool processWeak, bool rootString) { + MplThreadPool *threadPool = GetThreadPool(); + const size_t threadCount = threadPool->GetMaxThreadNum() + 1; + RootSet rootSetsInstance[threadCount]; + RootSet *rootSets = rootSetsInstance; // work_around the crash of clang parser + + // task to scan external roots. + threadPool->AddTask(new (std::nothrow) MplLambdaTask([this, processWeak, rootSets](size_t workerID) { + ScanExternalRoots(rootSets[workerID], processWeak); + })); + + // task to scan reference, allocator and classloader roots. + // those scan are very fast, so we combine them into one single task. + threadPool->AddTask(new (std::nothrow) MplLambdaTask([this, rootSets](size_t workerID) { + ScanReferenceRoots(rootSets[workerID]); + ScanAllocatorRoots(rootSets[workerID]); + ScanClassLoaderRoots(rootSets[workerID]); + })); + + // task to scan static field roots. + threadPool->AddTask(new (std::nothrow) MplLambdaTask([this, rootSets](size_t workerID) { + ScanStaticFieldRoots(rootSets[workerID]); + })); + + // task to scan stack roots. + stackTaskIndex.store(0); + auto &mutatorList = MutatorList::Instance().List(); + const size_t mutatorLen = mutatorList.size(); + for (size_t t = 0; t < threadCount; ++t) { + threadPool->AddTask(new (std::nothrow) MplLambdaTask([this, &mutatorList, &mutatorLen, rootSets](size_t workerID) { + RootSet &rs = rootSets[workerID]; + while (true) { + size_t old = stackTaskIndex.fetch_add(1, std::memory_order_release); + if (old >= mutatorLen) { + break; + } + auto it = mutatorList.begin(); + size_t nop = 0; + while (nop < old) { + ++it; + ++nop; + } + if (doConservativeStackScan) { + ScanStackRoots(**it, rs); + } else { + // for nearly-precise stack scanning + (*it)->VisitJavaStackRoots([&rs](address_t ref) { + // currently only scan & collect the local var + if (LIKELY((*theAllocator).AccurateIsValidObjAddr(ref))) { + rs.push_back(ref); + } + }); + } + // both conservtive and accurate scan, need scan scoped local refs + (*it)->VisitNativeStackRoots([&rs](address_t &ref) { + if (LIKELY((*theAllocator).AccurateIsValidObjAddr(ref))) { + rs.push_back(ref); + } + }); + } + })); + } + + // task to scan string roots. + if (rootString) { + threadPool->AddTask(new (std::nothrow) MplLambdaTask([this, rootSets](size_t workerID) { + ScanStringRoots(rootSets[workerID]); + })); + } + + PostParallelAddTask(processWeak); + threadPool->Start(); + threadPool->WaitFinish(true); + + for (size_t i = 0; i < threadCount; ++i) { + rootSet.insert(rootSet.end(), rootSets[i].begin(), rootSets[i].end()); + } + + LOG2FILE(kLogtypeGc) << "Total roots: " << rootSet.size() << '\n'; + PostParallelScanRoots(); +} + +void TracingCollector::FastScanRoots(RootSet &rootSet, RootSet &inaccurateRoots, + bool processWeak, bool rootString) { + // fast scan stack roots. + const size_t totalStackSize = 0; + size_t oldSize = inaccurateRoots.size(); + snapshotMutators.reserve(MutatorList::Instance().Size()); + numMutatorToBeScan = static_cast(MutatorList::Instance().Size()); + for (Mutator *mutator : MutatorList::Instance().List()) { + mutator->SetScanState(Mutator::kNeedScan); + snapshotMutators.insert(mutator); + // both conservtive and accurate scan, need scan scoped local refs + mutator->VisitNativeStackRoots([this, &inaccurateRoots](address_t &ref) { + if (LIKELY(InHeapBoundry(ref))) { + inaccurateRoots.push_back(ref); + } + }); + } + + const size_t nStackRoots = inaccurateRoots.size() - oldSize; + + // scan external roots. + oldSize = rootSet.size(); + ScanExternalRoots(rootSet, processWeak); + const size_t nExtRoots = rootSet.size() - oldSize; + + // scan reference roots. + oldSize = rootSet.size(); + ScanReferenceRoots(rootSet); + const size_t nRefRoots = rootSet.size() - oldSize; + + // scan allocator roots. + oldSize = rootSet.size(); + ScanAllocatorRoots(rootSet); + const size_t nAllocRoots = rootSet.size() - oldSize; + + // scan class loader roots. + oldSize = rootSet.size(); + ScanClassLoaderRoots(rootSet); + const size_t nClassloaderRoots = rootSet.size() - oldSize; + + LOG2FILE(kLogtypeGc) << + " mutator: " << MutatorList::Instance().Size() << '\n' << + " stack slots: " << nStackRoots << '/' << (totalStackSize / sizeof(reffield_t)) << '\n' << + " ext roots: " << nExtRoots << '\n' << + " ref roots: " << nRefRoots << '\n' << + " alloc roots: " << nAllocRoots << '\n' << + " cl roots: " << nClassloaderRoots << '\n'; + + // scan string roots, disabled by default. + if (UNLIKELY(rootString)) { + oldSize = rootSet.size(); + ScanStringRoots(rootSet); + const size_t nStringRoots = rootSet.size() - oldSize; + LOG2FILE(kLogtypeGc) << " string roots: " << nStringRoots << '\n'; + } +} + +void TracingCollector::PrepareRootSet(RootSet &rootSet, RootSet &&inaccurateRoots) { + for (address_t addr : inaccurateRoots) { + if ((*theAllocator).AccurateIsValidObjAddrConcurrent(addr)) { + rootSet.push_back(addr); + } + } +} + +ATTR_NO_SANITIZE_ADDRESS +void TracingCollector::ScanStackRoots(Mutator &mutator, RootSet &rootSet) { + mutator.VisitStackSlotsContent( + [this, &rootSet](address_t ref) { + MaybeAddRoot(ref, rootSet); + } + ); + mutator.VisitJavaStackRoots([&rootSet](address_t ref) { + // currently only scan & collect the local var + if (LIKELY((*theAllocator).AccurateIsValidObjAddr(ref))) { + rootSet.push_back(ref); + } + }); +} + +void TracingCollector::ScanAllStacks(RootSet &rootSet) { + // visit all mutators when the world is stopped. + MutatorList::Instance().VisitMutators([this, &rootSet](Mutator *mutator) { + ScanStackRoots(*mutator, rootSet); + }); +} + +void TracingCollector::ScanStaticFieldRoots(RootSet &rootSet) { + RefVisitor visitor = [this, &rootSet](address_t &obj) { + MaybeAddRoot(obj, rootSet, true); + }; + GCRegisteredRoots::Instance().Visit(visitor); +} + +void TracingCollector::ScanClassLoaderRoots(RootSet &rootSet) { + // Using MaybeAddRoot, because reference table may have meta object + RefVisitor addRootF = [&rootSet, this](const address_t &addr) { + MaybeAddRoot(addr, rootSet, true); + }; + LoaderAPI::Instance().VisitGCRoots(addRootF); +} + +void TracingCollector::ScanZterpStaticRoots(RootSet &rootSet) { + RefVisitor visitor = [this, &rootSet](address_t obj) { + MaybeAddRoot(obj, rootSet, true); + }; + (*zterpStaticRootAllocator).VisitStaticRoots(visitor); +} +void TracingCollector::ScanReferenceRoots(RootSet &rs) const { + RefVisitor visitor = [&rs](address_t &obj) { + if (!IS_HEAP_OBJ(obj)) { + MRT_BuiltinAbortSaferegister(obj, nullptr); + } + rs.push_back(obj); + }; + ReferenceProcessor::Instance().VisitGCRoots(visitor); +} + +void TracingCollector::ScanExternalRoots(RootSet &rootSet, bool processWeak) { + // Using MaybeAddRoot, because reference table may have meta object + RefVisitor addRootF = [&rootSet, this](address_t &addr) { + MaybeAddRoot(addr, rootSet, true); + }; + maple::GCRootsVisitor::Visit(addRootF); + if (!processWeak) { + maple::GCRootsVisitor::VisitWeakGRT(addRootF); + } +} + +void TracingCollector::ScanWeakGlobalRoots(RootSet &rootSet) { + RefVisitor addRootF = [&rootSet, this](address_t &addr) { + MaybeAddRoot(addr, rootSet, true); + }; + maple::GCRootsVisitor::VisitWeakGRT(addRootF); +} + +void TracingCollector::ScanLocalRefRoots(RootSet &rootSet) { + // Using MaybeAddRoot, because reference table may have meta object + RefVisitor addRootF = [&rootSet, this](address_t &addr) { + MaybeAddRoot(addr, rootSet, true); + }; + maple::GCRootsVisitor::VisitLocalRef(addRootF); +} + +void TracingCollector::ScanGlobalRefRoots(RootSet &rootSet) { + // Using MaybeAddRoot, because reference table may have meta object + RefVisitor addRootF = [&rootSet, this](address_t &addr) { + MaybeAddRoot(addr, rootSet, true); + }; + maple::GCRootsVisitor::VisitGlobalRef(addRootF); +} + +void TracingCollector::ScanThreadExceptionRoots(RootSet &rootSet) { + // Using MaybeAddRoot, because reference table may have meta object + maple::rootObjectFunc addRootF = [&rootSet, this](address_t addr) { + MaybeAddRoot(addr, rootSet, true); + }; + maple::GCRootsVisitor::VisitThreadException(addRootF); +} + +void TracingCollector::ScanAllocatorRoots(RootSet &rootSet) { + // Using MaybeAddRoot, because reference table may have meta object + RefVisitor addRootF = [&rootSet, this](const address_t &addr) { + MaybeAddRoot(addr, rootSet, true); + }; + (*theAllocator).VisitGCRoots(addRootF); +} + +void TracingCollector::ScanStringRoots(RootSet &rootSet) { + RefVisitor visitor = [this, &rootSet](const address_t &addr) { + if (IS_HEAP_OBJ(addr)) { + AddRoot(addr, rootSet); + } + }; + VisitStringPool(visitor); +} + +void TracingCollector::DumpFinalizeGarbage() { + pid_t pid = getpid(); +#ifdef __ANDROID__ + std::string dirName = util::GetLogDir(); + std::string filename = dirName + "/final_garbage_" + std::to_string(pid) + "_" + timeutils::GetDigitDate() + ".txt"; +#else + std::string filename = "./rc_heap_dump_final_garbage_" + std::to_string(pid) + + "_" + timeutils::GetDigitDate() + ".txt"; +#endif + std::ofstream ofs(filename, std::ofstream::out); + if (!ofs.is_open()) { + LOG(ERROR) << "TracingCollector::DumpFinalizeGarbage open file failed" << maple::endl; + return; + } + bool tmpResult = (*theAllocator).ForEachObj([&ofs](address_t objAddr) { + if (IsUnmarkedResurrectable(objAddr)) { + MClass *classInfo = reinterpret_cast(objAddr)->GetClass(); + ofs << "[final] " << std::hex << objAddr << std::dec << " " << RefCount(objAddr) << " " << + namemangler::EncodeName(std::string(classInfo->GetName())) << std::endl; + util::DumpObject(objAddr, ofs); + } + }); + if (UNLIKELY(!tmpResult)) { + LOG(ERROR) << "(*theAllocator).ForEachObj() in TracingCollector::DumpFinalizeGarbage() return false." << + maple::endl; + } + ofs.close(); +} + +void TracingCollector::DumpWeakSoft() { + pid_t pid = getpid(); +#ifdef __ANDROID__ + std::string dirName = util::GetLogDir(); + std::string filename = dirName + "/softweak_" + std::to_string(pid) + "_" + timeutils::GetDigitDate() + ".txt"; +#else + std::string filename = "./rc_heap_dump_softweak_" + std::to_string(pid) + "_" + timeutils::GetDigitDate() + ".txt"; +#endif + std::ofstream ofs(filename, std::ofstream::out); + if (!ofs.is_open()) { + LOG(ERROR) << "TracingCollector::DumpWeakSoft open file failed" << maple::endl; + return; + } + MrtVisitReferenceRoots([this, &ofs](address_t reference) { + if (reference == 0) { + return; + } + address_t referent = ReferenceGetReferent(reference); + if (referent && InHeapBoundry(referent) && IsGarbage(referent)) { + MClass *classInfo = reinterpret_cast(referent)->GetClass(); + ofs << "weak " << std::hex << referent << std::dec << " " << + namemangler::EncodeName(std::string(classInfo->GetName())) << " " << RefCount(referent) << " " << + WeakRefCount(referent) << " " << ResurrectWeakRefCount(referent) << std::endl; + } + }, RPMask(kRPWeakRef)); + MrtVisitReferenceRoots([this, &ofs](address_t reference) { + if (reference == 0) { + return; + } + address_t referent = ReferenceGetReferent(reference); + if (referent && InHeapBoundry(referent) && IsGarbage(referent)) { + MClass *classInfo = reinterpret_cast(referent)->GetClass(); + ofs << "soft " << std::hex << referent << std::dec << " " << + namemangler::EncodeName(std::string(classInfo->GetName())) << " " << RefCount(referent) << " " << + WeakRefCount(referent) << " " << ResurrectWeakRefCount(referent) << std::endl; + } + }, RPMask(kRPSoftRef)); + maple::irtVisitFunc visitor = [this, &ofs](uint32_t index, address_t obj) { + if (InHeapBoundry(obj) && IsGarbage(obj)) { + MClass *classInfo = reinterpret_cast(obj)->GetClass(); + ofs << "globalweak " << index << " " << std::hex << obj << std::dec << " " << + namemangler::EncodeName(std::string(classInfo->GetName())) << " " << RefCount(obj) << + " " << WeakRefCount(obj) << " " << ResurrectWeakRefCount(obj) << std::endl; + } + }; + maple::GCRootsVisitor::VisitWeakGRT(visitor); + ofs.close(); +} + +void TracingCollector::DumpCleaner() { + pid_t pid = getpid(); +#ifdef __ANDROID__ + std::string dirName = util::GetLogDir(); + std::string filename = dirName + "/cleaner_" + std::to_string(pid) + "_" + timeutils::GetDigitDate() + ".txt"; +#else + std::string filename = "./rc_heap_dump_cleaner_" + std::to_string(pid) + "_" + timeutils::GetDigitDate() + ".txt"; +#endif + std::ofstream ofs(filename, std::ofstream::out); + if (!ofs.is_open()) { + LOG(ERROR) << "TracingCollector::DumpWeakSoft open file failed" << maple::endl; + return; + } + MrtVisitReferenceRoots([this, &ofs](address_t reference) { + if (reference == 0) { + return; + } + address_t referent = ReferenceGetReferent(reference); + if (referent && InHeapBoundry(referent) && IsGarbage(referent)) { + MClass *classInfo = reinterpret_cast(referent)->GetClass(); + ofs << "cleaner and phantom refs " << std::hex << referent << std::dec << + " " << namemangler::EncodeName(std::string(classInfo->GetName())) << + " " << RefCount(referent) << + " " << WeakRefCount(referent) << + " " << ResurrectWeakRefCount(referent) << + std::endl; + } + }, RPMask(kRPPhantomRef)); + ofs.close(); +} + +void TracingCollector::DumpGarbage() { + pid_t pid = getpid(); +#ifdef __ANDROID__ + std::string dirName = util::GetLogDir(); + std::string filename = dirName + "/garbage_" + std::to_string(pid) + "_" + timeutils::GetDigitDate() + ".txt"; +#else + std::string filename = "./rc_heap_dump_garbage_" + std::to_string(pid) + "_" + timeutils::GetDigitDate() + ".txt"; +#endif + std::ofstream ofs(filename, std::ofstream::out); + if (!ofs.is_open()) { + LOG(ERROR) << "TracingCollector::DumpGarbage open file failed" << maple::endl; + return; + } + std::map metaToSoNameMap; + bool tmpResult = (*theAllocator).ForEachObj([&ofs, &metaToSoNameMap, this](address_t objAddr) { + if (IsGarbage(objAddr)) { + MObject *obj = reinterpret_cast(objAddr); + MClass *cls = obj->GetClass(); + if (metaToSoNameMap.find(cls) == metaToSoNameMap.end()) { + metaToSoNameMap[cls] = GetSoNameFromCls(cls); + } + ofs << "[garbage] " << std::hex << objAddr << " " << reinterpret_cast(cls) << std::dec << " " << + RefCount(objAddr) << " " << + namemangler::EncodeName(std::string(cls->GetName())) << " " << + obj->GetSize() << std::endl; + // dump ref child + auto refFunc = [objAddr, &ofs, this](const reffield_t &field, uint64_t kind) { + address_t ref = RefFieldToAddress(field); + if (IS_HEAP_ADDR(ref) && IsGarbage(ref)) { + ofs << (reinterpret_cast(&field) - objAddr) << " " << std::hex << ref << + (kind == kWeakRefBits ? " w" : (kind == kUnownedRefBits ? " u" : "")) << + std::dec << std::endl; + } + }; + ForEachRefField(objAddr, refFunc); + ofs << std::endl; + } + }); + + for (auto &entry : metaToSoNameMap) { + ofs << "[class] " << std::hex << reinterpret_cast(entry.first) << " " << entry.second << std::endl; + } + if (UNLIKELY(!tmpResult)) { + LOG(ERROR) << "(*theAllocator).ForEachObj() in TracingCollector::DumpGarbage() return false." << maple::endl; + } + ofs.close(); +} + +ATTR_NO_SANITIZE_ADDRESS +void TracingCollector::DumpRoots(std::ofstream &ofs) { + maple::rootObjectFunc maybePrintRootsF = [this, &ofs] (address_t rootObj) { + if (rootObj == 0) { + return; + } + if (VLOG_IS_ON(dumpheapsimple)) { + if (FastIsHeapObject(rootObj)) { + ofs << std::hex << rootObj << + " F " << FastIsHeapObject(rootObj) << + " A " << (*theAllocator).AccurateIsValidObjAddr(rootObj) << + std::hex << std::endl; + } + } else { + ofs << std::hex << rootObj << + " Fast Check " << FastIsHeapObject(rootObj) << + " Accurate Check " << (*theAllocator).AccurateIsValidObjAddr(rootObj) << + std::hex << std::endl; + } + }; + RefVisitor mayPrintVisitor = [&](const address_t &obj) { + maybePrintRootsF(obj); + }; + + ofs << "Thread stack roots" << std::endl; + MutatorList::Instance().VisitMutators([&maybePrintRootsF](Mutator *mutator) { + // do conservative stack scanning for the mutator. + mutator->VisitStackSlotsContent(maybePrintRootsF); + }); + + ofs << "static field roots" << std::endl; + VisitStaticFieldRoots(maybePrintRootsF); + + ofs << "allocator roots" << std::endl; + VisitAllocatorRoots(mayPrintVisitor); + + ofs << "string table roots" << std::endl; + VisitStringRoots(mayPrintVisitor); + + ofs << "reference processor roots" << std::endl; + VisitReferenceRoots(maybePrintRootsF); + + ofs << "GCRoots in threads/pending exception" << std::endl; + maple::GCRootsVisitor::VisitThreadException(maybePrintRootsF); + + ofs << "GCRoots in local reference table" << std::endl; + maple::GCRootsVisitor::VisitLocalRef(mayPrintVisitor); + + ofs << "GCRoots in global reference table" << std::endl; + maple::GCRootsVisitor::VisitGlobalRef(mayPrintVisitor); + + // visit weak roots + ofs << "GCRoots in weak global" << std::endl; + maple::irtVisitFunc weakFunc = [&maybePrintRootsF](uint32_t, address_t obj) { maybePrintRootsF(obj); }; + maple::GCRootsVisitor::VisitWeakGRT(weakFunc); + + ofs << "GCRoots in classloader" << std::endl; + LoaderAPI::Instance().VisitGCRoots(mayPrintVisitor); + ofs << "GCRoots end" << std::endl; +} + +void TracingCollector::VisitStaticFieldRoots(const maple::rootObjectFunc &func) { + RefVisitor visitor = [&func] (address_t obj) { + // final static fields of java.lang.String type may actually contain + // pointers to utf16-encoded raw strings. As we now allocate our heap at + // low addresses, some string patterns may be interpreted as addresses into + // the heap. + // + // We now add an "AccurateIsValidObjAddr" call to workaround this problem. + // This basically make static roots conservative. Please fix the issue and + // remove AccurateIsValidObjAddr check. + if (obj != 0 && IS_HEAP_OBJ(obj)) { + func(obj); + } + }; + GCRegisteredRoots::Instance().Visit(visitor); +} + +void TracingCollector::VisitAllocatorRoots(const RefVisitor &func) const { + (*theAllocator).VisitGCRoots(func); +} + +void TracingCollector::VisitStringRoots(const RefVisitor &func) const { + VisitStringPool(func); +} + +void TracingCollector::VisitReferenceRoots(const maple::rootObjectFunc &func) const { + MrtVisitReferenceRoots(func, kRPAllFlags); +} + +int32_t TracingCollector::GetThreadCount(bool isConcurrent) { + if (GetThreadPool() == nullptr) { + return 1; + } + if (isConcurrent) { + return concurrentThreadCount; + } + if ((!IsSystem()) && Collector::Instance().InJankImperceptibleProcessState()) { + // allow STW, later + __MRT_ASSERT(parallelThreadCount >= 2, "invalid parallelThreadCount"); + LOG(INFO) << "GetThreadCount in InJankImperceptibleProcessState "; + return 2; + } else { + return parallelThreadCount; + } +} + +void TracingCollector::DefaultGCFinishCallback() { +#if __MRT_DEBUG + std::stringstream ss; + (*permAllocator).Dump(ss); + (*metaAllocator).Dump(ss); + (*decoupleAllocator).Dump(ss); + LOG2FILE(kLogtypeGc) << ss.str(); +#endif +} + +void TracingCollector::ObjectArrayCopy(address_t javaSrc, address_t javaDst, int32_t srcPos, + int32_t dstPos, int32_t length, bool check) { + size_t elemSize = sizeof(reffield_t); + char *srcCarray = reinterpret_cast(reinterpret_cast(javaSrc)->ConvertToCArray()); + char *dstCarray = reinterpret_cast(reinterpret_cast(javaDst)->ConvertToCArray()); + reffield_t *src = reinterpret_cast(srcCarray + elemSize * srcPos); + reffield_t *dst = reinterpret_cast(dstCarray + elemSize * dstPos); + + TLMutator().SatbWriteBarrier(javaDst); + + if ((javaSrc == javaDst) && (abs(srcPos - dstPos) < length)) { + // most of the copy here are for small length. inline it here + // assumption: length > 0; aligned to 8bytes + if (length < kLargArraySize) { + if (srcPos > dstPos) { // copy to front + for (int32_t i = 0; i < length; ++i) { + dst[i] = src[i]; + } + } else { // copy to back + for (int32_t i = length - 1; i >= 0; --i) { + dst[i] = src[i]; + } + } + } else { + if (memmove_s(dst, elemSize * length, src, elemSize * length) != EOK) { + LOG(FATAL) << "Function memmove_s() failed." <(javaDst)->GetClass(); + MClass *dstComponentType = dstClass->GetComponentClass(); + MClass *lastAssignableComponentType = dstComponentType; + for (int32_t i = 0; i < length; ++i) { + reffield_t srcelem = src[i]; + MObject *srcComponent = reinterpret_cast(RefFieldToAddress(srcelem)); + if (AssignableCheckingObjectCopy(*dstComponentType, lastAssignableComponentType, srcComponent)) { + dst[i] = srcelem; + } else { + ThrowArrayStoreException(*srcComponent, i, *dstComponentType); + return; + } + } + } + } +} + +void TracingCollector::PostObjectClone(address_t src, address_t dst) { + // nothing to do for tracing collector + (void)src; + (void)dst; +} + +bool TracingCollector::UnsafeCompareAndSwapObject(address_t obj, ssize_t offset, + address_t expectedValue, address_t newValue) { + JSAN_CHECK_OBJ(obj); + TLMutator().SatbWriteBarrier(obj, *reinterpret_cast(obj + offset)); + reffield_t expectedRef = AddressToRefField(expectedValue); + reffield_t newRef = AddressToRefField(newValue); + auto atomicField = reinterpret_cast*>(obj + offset); + return atomicField->compare_exchange_strong(expectedRef, newRef, std::memory_order_seq_cst); +} + +address_t TracingCollector::UnsafeGetObjectVolatile(address_t obj, ssize_t offset) { + JSAN_CHECK_OBJ(obj); + auto atomicField = reinterpret_cast*>(obj + offset); + return atomicField->load(std::memory_order_acquire); +} + +address_t TracingCollector::UnsafeGetObject(address_t obj, ssize_t offset) { + JSAN_CHECK_OBJ(obj); + return LoadRefField(obj, offset); +} + +void TracingCollector::UnsafePutObject(address_t obj, ssize_t offset, address_t newValue) { + JSAN_CHECK_OBJ(obj); + TLMutator().SatbWriteBarrier(obj, *reinterpret_cast(obj + offset)); + StoreRefField(obj, offset, newValue); +} + +void TracingCollector::UnsafePutObjectVolatile(address_t obj, ssize_t offset, address_t newValue) { + JSAN_CHECK_OBJ(obj); + TLMutator().SatbWriteBarrier(obj, *reinterpret_cast(obj + offset)); + auto atomicField = reinterpret_cast*>(obj + offset); + atomicField->store(static_cast(newValue), std::memory_order_release); +} + +void TracingCollector::UnsafePutObjectOrdered(address_t obj, ssize_t offset, address_t newValue) { + JSAN_CHECK_OBJ(obj); + UnsafePutObjectVolatile(obj, offset, newValue); +} +} // namespace maplert diff --git a/src/mrt/compiler-rt/src/collector/conn_comp.cpp b/src/mrt/compiler-rt/src/collector/conn_comp.cpp new file mode 100644 index 0000000000..6f22bfb75d --- /dev/null +++ b/src/mrt/compiler-rt/src/collector/conn_comp.cpp @@ -0,0 +1,267 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "collector/conn_comp.h" + +#include +#include +#include + +#include "mm_config.h" +#include "mm_utils.h" +#include "chosen.h" +#include "collector/cp_generator.h" + +namespace maplert { +// default find scc run in milliseconds +const int kDefaultFindSccTime = 200000; +const int kInitalGarbageCount = 100000; +const uint32_t kActionEnter = 0; +const uint32_t kActionExit = 1; +bool inline IsInHeapObject(address_t obj) { + return IS_HEAP_ADDR(static_cast(obj)); +} +uint32_t inline GetAction(reffield_t ref) { + return ref & kActionExit; +} +reffield_t inline GetNodeAddr(reffield_t ref) { + return ref & ~kActionExit; +} + +void ConnectedComponentFinder::ProcessActionEnter(vector &workingList, + unordered_map &nodeInfoMap, uint32_t &curSeqNum, address_t node) { + NodeInfo &curNodeInfo = nodeInfoMap[static_cast(node)]; + if (curNodeInfo.seqNum == 0) { // Newly visited. + InitNode(curNodeInfo, curSeqNum); + PushCandidate(node); + workingList.push_back(static_cast(node | kActionExit)); + + auto refFunc = [node, &workingList, &nodeInfoMap](reffield_t &field, uint64_t kind) { + address_t ref = RefFieldToAddress(field); + if ((kind != kNormalRefBits) || (ref == node) || !IS_HEAP_ADDR(ref)) { + return; + } + if ((nodeInfoMap.find(field) != nodeInfoMap.end()) && + (nodeInfoMap[field].seqNum == 0)) { + workingList.push_back(ref); + } + }; + ForEachRefField(node, refFunc); + } // Otherwise ignore, because already visited. +} + +void ConnectedComponentFinder::ProcessActionExit(unordered_map &nodeInfoMap, address_t node, + CyclePatternGenerator &cpg) { + NodeInfo &curNodeInfo = nodeInfoMap[static_cast(node)]; + uint32_t lowestLink = curNodeInfo.lowLink; + auto refFunc = [this, &lowestLink, node, &nodeInfoMap](reffield_t &field, uint64_t kind) { + address_t ref = RefFieldToAddress(field); + if ((kind == kNormalRefBits) && (ref != node) && IsCandidate(ref)) { + uint32_t neighborLowLink = nodeInfoMap[static_cast(ref)].lowLink; + lowestLink = min(lowestLink, neighborLowLink); + } + }; + ForEachRefField(node, refFunc); + curNodeInfo.lowLink = lowestLink; + + uint32_t mySeqNum = curNodeInfo.seqNum; + if (lowestLink == mySeqNum) { + if (candidateStack.back() == node) { + (void)PopCandidate(); + } else { + // last poped and no duplicate type can be root in CycleGarbage + static vector foundComponent; + address_t componentNode; + bool skip = false; + do { + componentNode = PopCandidate(); + if (!skip) { + foundComponent.push_back(componentNode); + bool collectAll = kMergeAllPatterns || (ClassCycleManager::GetPatternsCache().length() == 0); + if ((LIKELY(!collectAll)) && (foundComponent.size() > kCyclepPatternMaxNodeNum)) { + skip = true; + } + } + } while (componentNode != node); + if ((!skip) && foundComponent.size() > 1) { + cpg.CollectCycleGarbage(foundComponent); + } + foundComponent.clear(); + } + } +} + +// Different with cycle leak check on qemu +// 1. faster speed, save intermedidate copy and setup +// Maybe combined later +// +// In worklist, LSB 1 means Exit +// Skip enter already visisted object +void ConnectedComponentFinder::RunInBackupTrace(TracingCollector &collector, CyclePatternGenerator &cpg) { + vector workingList; + unordered_map nodeInfoMap; + workingList.reserve(kInitalGarbageCount); + bool collectAll = kMergeAllPatterns || (ClassCycleManager::GetPatternsCache().length() == 0); + // skip complex pattern + // when collect garbages: + // 1. skip object rc > cycle_pattern_node_max + // when finding SCC + // 2. skip scc node number > cycle_pattern_node_max + // bool skip_complex_pattern = (ClassCycleManager::GetPatternsCache().length() != 0) + // + // if kMergeAllPatterns is true, no limit on processing time 200ms + { + MRT_PHASE_TIMER("DumpCycleLeak: collect garbage nodes"); + (void)(*theAllocator).ForEachObj( + [&collector, &workingList, &nodeInfoMap, collectAll](address_t objaddr) { + if ((HasChildRef(objaddr)) && (RefCount(objaddr) > 0) && (collector.IsGarbage(objaddr))) { + if (UNLIKELY(collectAll) || RefCount(objaddr) <= kMaxRcInCyclePattern) { + workingList.push_back(static_cast(objaddr)); + nodeInfoMap[static_cast(objaddr)] = NewNode(); + } + } + } + ); + LOG2FILE(kLogtypeCycle) << "garbage objects count " << workingList.size() << std::endl; + } + + { + MRT_PHASE_TIMER("DumpCycleLeak: Find CycleGarbage"); + uint64_t startTime = timeutils::NanoSeconds(); + uint32_t curSeqNum = 1; + while (!workingList.empty()) { + reffield_t curRef = workingList.back(); + workingList.pop_back(); + uint32_t action = GetAction(curRef); + address_t node = static_cast(GetNodeAddr(curRef)); + if (action == kActionEnter) { + ProcessActionEnter(workingList, nodeInfoMap, curSeqNum, node); + } else { // action is kExit + ProcessActionExit(nodeInfoMap, node, cpg); + } + } + if (LIKELY(!collectAll)) { + uint64_t endTime = timeutils::NanoSeconds(); + uint64_t costTime = (endTime - startTime) / 1000UL; // 1nanoSeconds = 1000milliseconds + if (costTime > kDefaultFindSccTime) { + return; + } + } + } +} + +void ConnectedComponentFinder::ProcessEnterAction(Node node) { + LOG2FILE(kLogTypeMix) << "Visiting node " << node << ", action: kEnter" << std::endl; + if (!IsVisited(node)) { // Newly visited. + InitializeNodeInfo(node); // Mark as visited + PushCandidate(node); + + workList.push_back(WorkItem(kExit, node)); + + vector neighbors = nnf(node); + for (auto it = neighbors.rbegin(); it != neighbors.rend(); ++it) { + if (rootsOnly && !IsRoot(*it)) { + continue; + } + workList.push_back(WorkItem(kEnter, *it)); + } + } // Otherwise ignore, because already visited. +} + +void ConnectedComponentFinder::ProcessExitAction(Node node) { + LOG2FILE(kLogTypeMix) << "Visiting node " << node << ", action: kExit" << std::endl; + NodeInfoInClass &myInfo = nodeToInfo[node]; + SeqNum lowestLink = myInfo.lowLink; + + vector neighbors = nnf(node); + for (auto neighbor : neighbors) { + if (rootsOnly && !IsRoot(neighbor)) { + continue; + } + if (IsCandidate(neighbor)) { + SeqNum neighborLowLink = nodeToInfo[neighbor].lowLink; + lowestLink = min(lowestLink, neighborLowLink); + } + } + + myInfo.lowLink = lowestLink; + + SeqNum mySeqNum = myInfo.seqNum; + if (lowestLink == mySeqNum) { + LOG2FILE(kLogTypeMix) << " Creating new component from " << node << " " << mySeqNum << std::endl; + vector myComponent; + + Node componentNode; + do { + componentNode = PopCandidate(); + myComponent.push_back(componentNode); + } while (componentNode != node); + + results.push_back(move(myComponent)); + } +} + +void ConnectedComponentFinder::Run() { + // Initialize work list. + for (auto it = roots.rbegin(); it != roots.rend(); ++it) { + if (UNLIKELY(!rootsSet.insert(*it).second)) { + LOG(ERROR) << "rootsSet.insert() in ConnectedComponentFinder::Run() failed." << maple::endl; + } + workList.push_back(WorkItem(kEnter, *it)); + } + + while (!workList.empty()) { + WorkItem item = workList.back(); + workList.pop_back(); + + Action action = item.first; + Node node = item.second; + + if (action == kEnter) { + ProcessEnterAction(node); + } else { // action is kExit + ProcessExitAction(node); + } + } +} + +void ConnectedComponentFinder::InitializeNodeInfo(Node node) { + SeqNum mySeqNum = nextSeqNum; + ++nextSeqNum; + + LOG2FILE(kLogTypeMix) << "seqNum[" << node << "] = " << mySeqNum << std::endl; + + nodeToInfo[node] = NodeInfoInClass { + .seqNum = mySeqNum, + .lowLink = mySeqNum, + }; +} + +void ConnectedComponentFinder::PushCandidate(Node node) { + candidateStack.push_back(node); + if (UNLIKELY(!candidateSet.insert(node).second)) { + LOG(ERROR) << "candidateSet.insert() in ConnectedComponentFinder::PushCandidate() failed." << maple::endl; + } +} + +ConnectedComponentFinder::Node ConnectedComponentFinder::PopCandidate() { + if (!candidateStack.empty()) { + Node node = candidateStack.back(); + candidateStack.pop_back(); + candidateSet.erase(node); + return node; + } + return static_cast(0); +} +} // namespace diff --git a/src/mrt/compiler-rt/src/collector/cp_generator.cpp b/src/mrt/compiler-rt/src/collector/cp_generator.cpp new file mode 100644 index 0000000000..d706bef622 --- /dev/null +++ b/src/mrt/compiler-rt/src/collector/cp_generator.cpp @@ -0,0 +1,1228 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "collector/cp_generator.h" + +#include "chosen.h" + +namespace maplert { +namespace { +const char *kSkipRootType[] = { + "array", "set;", "list;", "map;", + "map$", "hashtable", "collections" +}; +} +unordered_map ClassCycleManager::dynamicLoadedCycleClasses; +string ClassCycleManager::patternsCache; +mutex ClassCycleManager::cycleMutex; +bool ClassCycleManager::cpUpdated = false; + +bool SortByObjMetaRC(address_t obj1, address_t obj2) { + uintptr_t classInfo1 = MObject::Cast(obj1)->GetClass()->AsUintptr(); + uintptr_t classInfo2 = MObject::Cast(obj2)->GetClass()->AsUintptr(); + if (classInfo1 != classInfo2) { + return classInfo1 < classInfo2; + } + uint32_t rc1 = RefCount(obj1); + uint32_t rc2 = RefCount(obj2); + return rc1 < rc2; +} + +// Output format: +// Class Name:soname +// Cycle information +// node1 +// node2 +// edge0 +// edge1 +// .. +// edgen +// +// class: Ljava_2Fnio_2Fchannels_2Fspi_2FAbstractInterruptibleChannel_241_3B:libmaplecore-all.so +// Cycle: 2, 2, 1 +// Node: 0, 16, Lsun_2Fnio_2Fch_2FFileChannelImpl_3B:libmaplecore-all.so, 2 +// Node: 1, 80, Ljava_2Fio_2FRandomAccessFile_3B:libmaplecore-all.so, 1 +// Edge: 1, 0, 32 +// Edge: 2, 1, 16 +void CyclePattern::Print(ostream &os) { + os << "#SCC count " << count << std::endl; + os << "class: " << namemangler::EncodeName(std::string(MObject::Cast(nodes[0].expectType)->GetName())) << + ":" << GetSoNameFromCls(MObject::Cast(nodes[0].expectType)) << std::endl; + os << "Cycle: " << (NumNodes() - 1) << ", " << NumEdges() << ", " << nodes[0].expectRc << std::endl; + for (int32_t i = 1; i < NumNodes(); ++i) { + os << "Node: " << nodes[i].loadIndex << + ", " << nodes[i].loadOffset << + ", " << namemangler::EncodeName(std::string(MObject::Cast(nodes[i].expectType)->GetName())) << + ":" << GetSoNameFromCls(MObject::Cast(nodes[i].expectType)) << + ", " << nodes[i].expectRc << std::endl; + } + for (int32_t i = 0; i < NumEdges(); ++i) { + os << "Edge: " << edges[i].srcNodeIndex << ", " << + edges[i].destNodeIndex << ", " << edges[i].loadOffset << std::endl; + } + os << std::endl; +} + +// Cycle Header: +// NodeInfo +// EdgeInfo +void CyclePattern::Emit(char *buffer, size_t limit) { + if (UNLIKELY(!IsValid(false))) { + LOG(ERROR) << "Emit pattern failed, node has invalid rc" << maple::endl; + Print(LOG_STREAM(ERROR)); + return; + } + if (UNLIKELY(headerInfo.expectRc != nodes[0].expectRc)) { + LOG(ERROR) << "Emit pattern failed, root's expectRc not equals header" << maple::endl; + return; + } + if (UNLIKELY(headerInfo.hasNextFlag != 0)) { + LOG(ERROR) << "Emit pattern failed, invalid next flag" << maple::endl; + return; + } + if (UNLIKELY(((reinterpret_cast(reinterpret_cast(buffer))) & 0x7) != 0)) { + LOG(ERROR) << "Emit pattern failed, invalid buffer's address" << maple::endl; + return; + } + if (UNLIKELY(limit < kDWordBytes)) { + LOG(ERROR) << "Emit pattern failed, invalid buffer's size" << maple::endl; + return; + } + CyclePatternInfo *cyclePatternInfo = reinterpret_cast((buffer)); + *cyclePatternInfo = headerInfo; + // subtract to match with current pattern + cyclePatternInfo->nNodes -= 1; + // cause of during constructing CyclePattern and clean stale cyclepattern + // use the match_counts, now need to reset + // + // Emit nodes, skip first one + char *curBuf = buffer + sizeof(CyclePatternInfo); + for (int32_t i = 1; i < NumNodes(); ++i) { + __MRT_ASSERT(static_cast(curBuf - reinterpret_cast(buffer)) < limit, "invalid"); + nodes[i].flags = 0; + *(reinterpret_cast(curBuf)) = nodes[i]; + curBuf += sizeof(CyclePatternNodeInfo); + } + + // Emit edges + for (int32_t i = 0; i < NumEdges(); ++i) { + __MRT_ASSERT(static_cast(curBuf - reinterpret_cast(buffer)) < limit, "invalid"); + *(reinterpret_cast(curBuf)) = edges[i]; + curBuf += sizeof(CyclePatternEdgeInfo); + } + __MRT_ASSERT(static_cast(curBuf - reinterpret_cast(buffer)) <= limit, "invalid"); + // the statistic of class's pattern has been updated + ClassCycleManager::SetCpUpdatedFlag(); +} + +void CyclePattern::ReindexNodesAndEdges(const CyclePattern &master, int8_t indexMap[], int32_t numTotalEdges, + const CyclePatternEdgeInfo edgesAll[], bool edgeUsed[]) { + int8_t currentNodeIndex = 1; + // search all nodes from startIndex with edgesAll + for (int32_t i = 0; i < NumNodes(); ++i) { + int8_t origSrcIndex = static_cast(nodes[i].flags); + if (origSrcIndex == -1) { + count = 0; + mergeCandiate = false; + return; + } + bool foundAll = false; + // search all edges from origSrcIndex + for (int32_t j = 0; j < numTotalEdges; ++j) { + if ((!edgeUsed[j]) && edgesAll[j].srcNodeIndex == origSrcIndex) { + int8_t origDestIndex = edgesAll[j].destNodeIndex; + if (indexMap[origDestIndex] == -1) { + // dest not added + indexMap[origDestIndex] = currentNodeIndex; + nodes[currentNodeIndex].expectType = master.nodes[origDestIndex].expectType; + nodes[currentNodeIndex].expectRc = master.nodes[origDestIndex].expectRc; + nodes[currentNodeIndex].loadIndex = i; + nodes[currentNodeIndex].loadOffset = edgesAll[j].loadOffset; + nodes[currentNodeIndex].flags = origDestIndex; // save index in master + edgeUsed[j] = true; + ++currentNodeIndex; + foundAll = (currentNodeIndex == NumNodes()); + } + } + } + if (foundAll) { + break; + } + } + + __MRT_ASSERT(currentNodeIndex == NumNodes(), "Invalid case"); + + // Setup other edges from new index and all edges + int32_t currentEdgeIndex = 0; + for (int32_t i = 0; i < numTotalEdges; ++i) { + if (edgeUsed[i]) { + continue; + } + edges[currentEdgeIndex].srcNodeIndex = indexMap[edgesAll[i].srcNodeIndex]; + edges[currentEdgeIndex].destNodeIndex = indexMap[edgesAll[i].destNodeIndex];; + edges[currentEdgeIndex].loadOffset = edgesAll[i].loadOffset; + currentEdgeIndex++; + } + __MRT_ASSERT(currentEdgeIndex == NumEdges(), "Invalid case"); +} + +// Used when iterate all cycles from different root node. +// +// 1. Find all edges start from root_index +// 2. Reindex nodes +// 3. List all implicit/explicit edges +// 4. Setup new cycle from new index and all edges +CyclePattern::CyclePattern(CyclePattern &master, int32_t startIndex) : objAddrs {} { + __MRT_ASSERT(startIndex != 0, "invalid"); + count = master.count; + headerInfo = master.headerInfo; + headerInfo.expectRc = master.nodes[startIndex].expectRc; + mergeCandiate = false; + + // find all edges + int32_t numTotalEdges = NumNodes() + NumEdges() - 1; + CyclePatternEdgeInfo edgesAll[numTotalEdges]; + bool edgeUsed[numTotalEdges]; + for (int32_t i = 0; i < numTotalEdges; ++i) { + edgeUsed[i] = false; + } + for (int32_t i = 1; i < NumNodes(); ++i) { + edgesAll[i - 1].srcNodeIndex = master.nodes[i].loadIndex; + edgesAll[i - 1].destNodeIndex = i; + edgesAll[i - 1].loadOffset = master.nodes[i].loadOffset; + } + for (int32_t i = 0; i < NumEdges(); ++i) { + edgesAll[NumNodes() + i - 1] = master.edges[i]; + } + + // reindex from startIndex, mapping from index in master to new index + // creating nodes at same time + int8_t indexMap[NumNodes()]; + for (int32_t i = 0; i < NumNodes(); ++i) { + indexMap[i] = -1; + nodes[0].flags = -1; + } + indexMap[startIndex] = 0; + nodes[0] = master.nodes[startIndex]; + nodes[0].loadIndex = -1; + nodes[0].loadOffset = -1; + nodes[0].flags = startIndex; // save index in master + ReindexNodesAndEdges(master, indexMap, numTotalEdges, edgesAll, edgeUsed); +} + +bool CyclePattern::FindEdge(int8_t srcIndex, int8_t destIndex, int16_t loadOffset) const { + __MRT_ASSERT(srcIndex >= 0 && srcIndex < NumNodes() && destIndex >= 0 && destIndex < NumNodes(), "unexpected"); + if (destIndex != 0 && + nodes[destIndex].loadIndex == srcIndex && + nodes[destIndex].loadOffset == loadOffset) { + return true; + } + for (int32_t i = 0; i < NumEdges(); ++i) { + if (edges[i].srcNodeIndex == srcIndex && + edges[i].destNodeIndex == destIndex && + edges[i].loadOffset == loadOffset) { + return true; + } + } + return false; +} + +// exclude add pattern on collection and collection node +// name contains Map, Set, List and their Node and Entry +// +// Use sub class check is not suitable, for example +// Ljava_2Flang_2FThreadLocal_24ThreadLocalMap_3B, 20, Ljava_2Flang_2FObject_3B +// Ljava_2Futil_2Fconcurrent_2FCopyOnWriteArrayList_3B, 16, Ljava_2Flang_2FObject_3B +// Landroid_2Futil_2FArrayMap_3B, 28, Ljava_2Flang_2FObject_3B +const size_t kSkipRootCount = sizeof(kSkipRootType) / sizeof(const char*); + +static inline bool IsCollectionType(const char *kClsName) { + for (size_t i = 0; i < kSkipRootCount; ++i) { + if (strcasestr(kClsName, kSkipRootType[i]) != nullptr) { + return true; + } + } + return false; +} + +bool CyclePattern::IsSamePattern(int8_t index, CyclePatternInfo &cyclePatternInfo, size_t &origGctibByteSize) const { + // accumulate size + origGctibByteSize += GetCyclePatternSize(cyclePatternInfo); + // check node edge num + if (cyclePatternInfo.nNodes + 1 != NumNodes() || cyclePatternInfo.nEdges != NumEdges()) { + return false; + } + // check type and rc, setup indexMap + // mapping node index in GCTIB cycle pattern to its index in current CyclePattern + int8_t indexMap[NumNodes()]; + for (int32_t i = 0; i < NumNodes(); ++i) { + indexMap[i] = -1; + } + indexMap[0] = index; + if (nodes[index].expectRc != cyclePatternInfo.expectRc) { + return false; + } + CyclePatternNodeInfo *curNodeInfo = GetCyclePatternNodeInfo(cyclePatternInfo); + // might need check same cls issue + for (int32_t i = 1; i < NumNodes(); ++i) { + void *curCls = curNodeInfo->expectType; + int32_t clsIdx = IndexofClass(curCls); + if (clsIdx == -1 || nodes[clsIdx].expectRc != curNodeInfo->expectRc) { + return false; + } + indexMap[i] = clsIdx; + curNodeInfo = reinterpret_cast( + ((reinterpret_cast(curNodeInfo)) + sizeof(CyclePatternNodeInfo))); + } + + // check edges, implicit edges in nodes, revisit GetNodeInfo agian + curNodeInfo = GetCyclePatternNodeInfo(cyclePatternInfo); + for (int32_t i = 1; i < NumNodes(); ++i) { + int8_t gctibLoadIndex = curNodeInfo->loadIndex; + int8_t loadIndex = indexMap[gctibLoadIndex]; + int16_t loadOffset = curNodeInfo->loadOffset; + if (!FindEdge(loadIndex, indexMap[i], loadOffset)) { + return false; + } + curNodeInfo = reinterpret_cast( + ((reinterpret_cast(curNodeInfo)) + sizeof(CyclePatternNodeInfo))); + } + + // check explicit edges + CyclePatternEdgeInfo *curEdgeInfo = GetCyclePatternEdgeInfo(cyclePatternInfo); + for (int32_t i = 0; i < NumEdges(); ++i) { + int8_t gctibSrcIndex = curEdgeInfo->srcNodeIndex; + int8_t gctibDestIndex = curEdgeInfo->destNodeIndex; + int16_t loadOffset = curEdgeInfo->loadOffset; + int8_t srcIndex = indexMap[gctibSrcIndex]; + int8_t destIndex = indexMap[gctibDestIndex]; + if (!FindEdge(srcIndex, destIndex, loadOffset)) { + return false; + } + curEdgeInfo = reinterpret_cast( + ((reinterpret_cast(curEdgeInfo)) + sizeof(CyclePatternEdgeInfo))); + } + return true; +} + +bool CyclePattern::AppendPattern(MClass *cls, int8_t index, size_t origGctibByteSize, + GCTibGCInfo *origGctibInfo, const CyclePatternInfo *lastCyclePattern) { + size_t newGctibByteSize = origGctibByteSize + EmitByteSize(); + GCTibGCInfo *newGctibInfo = reinterpret_cast(malloc(newGctibByteSize)); + if (newGctibInfo == nullptr) { + return false; + } + errno_t tmpResult = memcpy_s(reinterpret_cast(newGctibInfo), origGctibByteSize, + reinterpret_cast(origGctibInfo), origGctibByteSize); + if (UNLIKELY(tmpResult != EOK)) { + LOG(ERROR) << "memcpy_s() in CyclePattern::FindAndAppendPattern() return " << + tmpResult << " rather than 0." << maple::endl; + free(newGctibInfo); + return false; + } + + // update header and next flag + uint32_t origHeaderProto = origGctibInfo->headerProto; + if ((origHeaderProto & kCyclePatternBit) != 0) { + int32_t origMaxRc = static_cast(CYCLE_MAX_RC(origHeaderProto)); + int32_t origMinRc = static_cast(CYCLE_MIN_RC(origHeaderProto)); + if (nodes[index].expectRc > origMaxRc) { + newGctibInfo->headerProto = SetCycleMaxRC(origHeaderProto, static_cast(nodes[index].expectRc)); + } else if (nodes[index].expectRc < origMinRc) { + newGctibInfo->headerProto = SetCycleMinRC(origHeaderProto, static_cast(nodes[index].expectRc)); + } + + __MRT_ASSERT(lastCyclePattern != nullptr && lastCyclePattern->hasNextFlag == 0, "unexpected"); + // set has next flag + uintptr_t lastCycleNextFlagOffset = + (const_cast(reinterpret_cast(&(lastCyclePattern->hasNextFlag)))) - + reinterpret_cast(origGctibInfo); + *reinterpret_cast((reinterpret_cast(newGctibInfo)) + lastCycleNextFlagOffset) = 1; + } else { + newGctibInfo->headerProto = SetCycleMaxRC(origHeaderProto, static_cast(nodes[index].expectRc)) | + SetCycleMinRC(origHeaderProto, static_cast(nodes[index].expectRc)) | + kCyclePatternBit; + } + + // emit new cycle into buffer + if (index == 0) { + Emit((reinterpret_cast(newGctibInfo)) + origGctibByteSize, newGctibByteSize - origGctibByteSize); + Print(GCLog().Stream(kLogtypeCycle)); + } else { + CyclePattern switchedPattern(*this, index); + switchedPattern.Emit((reinterpret_cast(newGctibInfo)) + origGctibByteSize, + newGctibByteSize - origGctibByteSize); + switchedPattern.Print(GCLog().Stream(kLogtypeCycle)); + } + + // replace + cls->SetGctib(reinterpret_cast(newGctibInfo)); + if (ClassCycleManager::HasDynamicLoadPattern(cls)) { + free(origGctibInfo); + } else { + ClassCycleManager::AddDynamicLoadPattern(cls, false); + } + return true; +} + +// Iterate current cycles and check if need append new cycle +// Append new cycle +// +// Check flow +// 1. jclass has cycle patterns +// 2. iterate each cycle in gctib +// 2.1 check num_node and num_edge +// 2.2 check node type and expect rc are same +// 2.3 check if each edge is same +// if found same, log warning +// +// Append flow +// 1. calcuate new gctib size, original size + current cycle size +// 2. copy original content into new space +// 3. update header: rc threshold, cyclePattern bit +// 4. change last cycle's has next bit +// 5. emit new cycle into remain buffers +// 6. replace GCTIB with new GCTIB (if original GCTIB is dynamic linked, free) +bool CyclePattern::FindAndAppendPattern(int8_t index) { + MClass *cls = MObject::Cast(nodes[index].expectType); + GCTibGCInfo *origGctibInfo = reinterpret_cast(cls->GetGctib()); + if (origGctibInfo == nullptr) { + return false; + } + + // reduce the pattern install time + // 1. if the class' pattern duplicated value > 0, just skip + // 2. if the class' pattern duplicated value == 0, continue + if (LIKELY(!kMergeAllPatterns) && !ClassCycleManager::DynamicPatternTryMerge(cls)) { + return false; + } + if (cls->IsArrayClass()) { + LOG2FILE(kLogtypeCycle) << "Skip Object array " << cls->GetName() << std::endl; + return false; + } + const char *kClsName = cls->GetName(); + if (IsCollectionType(kClsName)) { + LOG2FILE(kLogtypeCycle) << "Skip collection type " << kClsName << std::endl; + return false; + } + + size_t origGctibByteSize = sizeof(GCTibGCInfo) + (origGctibInfo->nBitmapWords * kDWordBytes); + CyclePatternInfo *lastCyclePattern = nullptr; + int32_t numCycles = 0; + // find if pattern exist + if ((origGctibInfo->headerProto & kCyclePatternBit) != 0) { + CyclePatternInfo *cyclePatternInfo = GetCyclePatternInfo(*origGctibInfo); + for (; cyclePatternInfo != nullptr; cyclePatternInfo = GetNextCyclePattern(*cyclePatternInfo), numCycles++) { + lastCyclePattern = cyclePatternInfo; + if (!IsSamePattern(index, *cyclePatternInfo, origGctibByteSize)) { + continue; + } + // find same cycle pattern + ClassCycleManager::findDuplicate(cls); + LOG2FILE(kLogtypeCycle) << "Find duplicated pattern " << + (cyclePatternInfo->invalidated == kCycleNotValid ? "invalid" : "valid") << std::endl; + Print(GCLog().Stream(kLogtypeCycle)); + return false; + } + } + + if (numCycles >= kCyclepMaxNum) { + GCLog().Stream() << "numCycles exceed limit for " << cls->GetName() << " " << numCycles << std::endl; + Print(GCLog().Stream()); + return false; + } + + if (!AppendPattern(cls, index, origGctibByteSize, origGctibInfo, lastCyclePattern)) { + return false; + } + LOG2FILE(kLogtypeCycle) << "Add pattern for class " << + cls->GetName() << " cycle num " << (numCycles + 1) << std::endl; + return true; +} + +void CyclePattern::Merge() { + for (int32_t i = 0; i < NumNodes(); ++i) { + (void)FindAndAppendPattern(i); + } + return; +} + +// Perform two tasks here +// 1. merge suitable cycle into runtine.(hot and fit size) +// 2. log cycle pattern into big data cache if necessary +void ClassCycleManager::MergeCycles(vector &cycles) { + CyclePattern pattern; + (void)memset_s(reinterpret_cast(&pattern), sizeof(CyclePattern), 0, sizeof(CyclePattern)); + ostringstream oss; + streampos startPos = oss.tellp(); + + lock_guard guard(cycleMutex); + bool needLogBigdata = (patternsCache.length() == 0); + for (CycleGarbage *cycle : cycles) { + if (cycle->ToCyclePattern(pattern) && pattern.NeedMerge()) { + pattern.Merge(); + } + if (needLogBigdata) { + streampos curPos = oss.tellp(); + if ((curPos - startPos) < kMaxBigDataCacheSize) { + cycle->AppendToString(oss); + } + } + (void)memset_s(reinterpret_cast(&pattern), sizeof(CyclePattern), 0, sizeof(CyclePattern)); + } + if (needLogBigdata) { + patternsCache.append(oss.str()); + LOG2FILE(kLogtypeCycle) << "pattern cache " << patternsCache << std::endl; + } +} + +bool UpdateClassGCTibInfo(GCTibGCInfo *curGCTibInfo, uint32_t minRC, uint32_t maxRC, + GCTibGCInfo *newGCTibInfo, size_t newGCTibSize) { + size_t origGCTibMapSize = sizeof(GCTibGCInfo) + (curGCTibInfo->nBitmapWords * kDWordBytes); + errno_t tmpResult = memcpy_s(reinterpret_cast(newGCTibInfo), origGCTibMapSize, + reinterpret_cast(curGCTibInfo), origGCTibMapSize); + if (UNLIKELY(tmpResult != EOK)) { + LOG(ERROR) << "memcpy_s() in RemoveDeadPatterns return " << tmpResult << " rather than 0." << maple::endl; + free(newGCTibInfo); + return false; + } + + // update new min/max match rc + uint32_t curHeaderProto = curGCTibInfo->headerProto; + __MRT_ASSERT(minRC >= 1 && maxRC <= kMaxRcInCyclePattern && minRC <= maxRC, "unepxected rc"); + newGCTibInfo->headerProto = SetCycleMaxRC(curHeaderProto, maxRC) | SetCycleMinRC(curHeaderProto, minRC) | + kCyclePatternBit; + // copy valid pattern into new space + CyclePatternInfo *cyclePatternInfo = GetCyclePatternInfo(*curGCTibInfo); + CyclePatternInfo *lastPatternInfo = nullptr; + char *curPos = (reinterpret_cast(newGCTibInfo)) + origGCTibMapSize; + bool copySuccess = true; + for (; cyclePatternInfo != nullptr; cyclePatternInfo = GetNextCyclePattern(*cyclePatternInfo)) { + if (cyclePatternInfo->invalidated == kCycleNotValid) { + continue; + } + size_t patternSize = GetCyclePatternSize(*cyclePatternInfo); + errno_t tmpResult1 = memcpy_s(curPos, patternSize, reinterpret_cast(cyclePatternInfo), patternSize); + if (UNLIKELY(tmpResult1 != EOK)) { + LOG(ERROR) << "memcpy_s() in RemoveDeadPatterns return " << tmpResult << " rather than 0." << maple::endl; + copySuccess = false; + break; + } + lastPatternInfo = reinterpret_cast(curPos); + curPos += patternSize; + } + if (!copySuccess) { + free(newGCTibInfo); + return false; + } + __MRT_ASSERT((lastPatternInfo != nullptr) && + (static_cast(curPos - reinterpret_cast(newGCTibInfo))) == newGCTibSize, "not equal"); + lastPatternInfo->hasNextFlag = 0; + return true; +} + +bool CheckAndUpdateClassPatterns(MClass *cls, GCTibGCInfo *curGCTibInfo, + uint32_t &validPatternCount, uint32_t &deadPatternCount) { + uint32_t minRC = kMaxRcInCyclePattern; + uint32_t maxRC = 0; + size_t newGCTibSize = sizeof(GCTibGCInfo) + (curGCTibInfo->nBitmapWords * kDWordBytes); + size_t origGCTibMapSize = newGCTibSize; + + if ((curGCTibInfo->headerProto & kCyclePatternBit) != 0) { + CyclePatternInfo *cyclePatternInfo = GetCyclePatternInfo(*curGCTibInfo); + for (; cyclePatternInfo != nullptr; cyclePatternInfo = GetNextCyclePattern(*cyclePatternInfo)) { + if (cyclePatternInfo->invalidated == kCycleNotValid) { + ++deadPatternCount; + continue; + } + // calculate remaining size + ++validPatternCount; + newGCTibSize += GetCyclePatternSize(*cyclePatternInfo); + + // calculate min/max rc + uint32_t expectRc = static_cast(cyclePatternInfo->expectRc); + minRC = expectRc < minRC ? expectRc : minRC; + maxRC = expectRc > maxRC ? expectRc : maxRC; + } + } + + // remove dead pattern and construct new gctib + if (validPatternCount == 0) { + LOG2FILE(kLogtypeCycle) << cls->GetName() << " Remove Dead Class " << std::endl; + GCTibGCInfo *newGCTibInfo = reinterpret_cast(malloc(newGCTibSize)); + if (newGCTibInfo == nullptr) { + // leave unchanged + return false; + } + errno_t tmpResult = memcpy_s(reinterpret_cast(newGCTibInfo), + origGCTibMapSize, reinterpret_cast(curGCTibInfo), origGCTibMapSize); + if (UNLIKELY(tmpResult != EOK)) { + LOG(ERROR) << "memcpy_s() in RemoveDeadPatterns return " << tmpResult << " rather than 0." << maple::endl; + free(newGCTibInfo); + return false; + } + newGCTibInfo->headerProto = newGCTibInfo->headerProto & (~kCyclePatternBit); + // delete original gctib space + (reinterpret_cast(cls))->gctib.SetGctibRef(newGCTibInfo); + free(curGCTibInfo); + } else if (deadPatternCount > 0) { + // remove dead patterns in valid class + LOG2FILE(kLogtypeCycle) << cls->GetName() << " Remove Dead Pattern " << + deadPatternCount << " Remain Valid pattern " << validPatternCount << std::endl; + // copy bitmap and valid pattern to new space + GCTibGCInfo *newGCTibInfo = reinterpret_cast(malloc(newGCTibSize)); + if (newGCTibInfo == nullptr) { + // leave unchanged + return false; + } + if (!UpdateClassGCTibInfo(curGCTibInfo, minRC, maxRC, newGCTibInfo, newGCTibSize)) { + return false; + } + __MRT_ASSERT(ClassCycleManager::CheckValidPattern(cls, reinterpret_cast(newGCTibInfo)), "verify fail"); + // delete original gctib space + (reinterpret_cast(cls))->gctib.SetGctibRef(newGCTibInfo); + free(curGCTibInfo); + } + return true; +} + +// Try remove dead patterns in dynamic loaded patterns +// 1. iterate all cycle patterns, remove dead patterns +// 2. remove class from dynamicLoadedCycleClasses, if no pattern exist +// +// Remove dead pattern before study +void ClassCycleManager::RemoveDeadPatterns() { + lock_guard guard(cycleMutex); + for (auto it = dynamicLoadedCycleClasses.begin(); it != dynamicLoadedCycleClasses.end();) { + MClass *cls = it->first; + GCTibGCInfo *curGCTibInfo = reinterpret_cast(cls->GetGctib()); + if (curGCTibInfo == nullptr) { + LOG(FATAL) << "The class has no gctibInfo:" << cls->GetName() << maple::endl; + continue; + } + uint32_t validPatternCount = 0; + uint32_t deadPatternCount = 0; + + if (!CheckAndUpdateClassPatterns(cls, curGCTibInfo, validPatternCount, deadPatternCount)) { + continue; + } + + if (validPatternCount == 0) { + LOG2FILE(kLogtypeCycle) << cls->GetName() << " Remove No Pattern Class " << std::endl; + it = dynamicLoadedCycleClasses.erase(it); + } else { + ++it; + } + + if (deadPatternCount > 0) { + ClassCycleManager::SetCpUpdatedFlag(); + } + } +} + +static string GetIndexOfSoName(vector &soSet, const MClass *cls) { + string rawName = GetSoNameFromCls(cls); + size_t pos = 0; + auto iElement = find(soSet.begin(), soSet.end(), rawName); + if (iElement != soSet.end()) { + pos = static_cast(distance(soSet.begin(), iElement)); + } else { + soSet.push_back(rawName); + pos = soSet.size() - 1; + } + return to_string(pos); +} + +static inline void DumpSOSet(std::ostream &os, bool dupThreshold, const vector &soSet) { + if (dupThreshold) { + return; + } + os << "....." << std::endl; + uint32_t index = 0; + for (auto soName : soSet) { + os << index++ << ":" << soName << std::endl; + } +} + +// class: Lsun_2Fnio_2Fcs_2FStreamDecoder_3B:libmaplecore-all.so +// Header: ROOT, 1 +// Cycle: 1, 1, 1 +// Node: 0, 16, Ljava_2Fio_2FInputStreamReader_3B:libmaplecore-all.so, 1 +// Edge: 1, 0, 32 +static void DumpClassNameSo(std::ostream &os, const MClass *cls, vector &soSet, bool dupThreshold) { + string soName = dupThreshold ? GetSoNameFromCls(cls) : GetIndexOfSoName(soSet, cls); + os << "class: " << namemangler::EncodeName(std::string(cls->GetName())) << + ":" << soName << std::endl; +} + +static void DumpCycleHeader(std::ostream &os, const CyclePatternInfo &cyclePatternInfo, bool dumpProfile) { + os << ((cyclePatternInfo.invalidated == kCyclePermernant) ? "C_P: " : "C: ") << + static_cast(cyclePatternInfo.nNodes) << ", " << + static_cast(cyclePatternInfo.nEdges) << ", " << + static_cast(cyclePatternInfo.expectRc) << std::endl; + if (dumpProfile) { + os << "check_count " << static_cast(cyclePatternInfo.matchProfiling) << " hit count " << + static_cast(cyclePatternInfo.matchProfiling >> kCyclepPatternProfileMatchCountShift) << + " prof " << static_cast(cyclePatternInfo.matchCount) << + " invalid " << static_cast(cyclePatternInfo.invalidated) << std::endl; + } +} + +static void DumpCycleNodeInfo(std::ostream &os, const CyclePatternNodeInfo &nodeInfo, vector &soSet, + bool dupThreshold) { + MClass *cls = MObject::Cast(nodeInfo.expectType); + string soName = dupThreshold ? GetSoNameFromCls(cls) : GetIndexOfSoName(soSet, cls); + os << ((static_cast(nodeInfo.flags) & kCycleNodeSubClass) ? "N_C_D: " : "N_D: ") << + static_cast(nodeInfo.loadIndex) << ", " << + static_cast(nodeInfo.loadOffset) << ", " << + namemangler::EncodeName(std::string(cls->GetName())) << + ":" << soName << ", " << + static_cast(nodeInfo.expectRc) << std::endl; +} + +static void DumpCycleEdgeInfo(std::ostream &os, const CyclePatternEdgeInfo &edgeInfo) { + os << ((static_cast(edgeInfo.flags) & kCycleEdgeSkipMatch) ? "E_S_D: " : "E_D: ") << + static_cast(edgeInfo.srcNodeIndex) << ", " << + static_cast(edgeInfo.destNodeIndex) << ", " << + static_cast(edgeInfo.loadOffset) << std::endl; +} + +static void DumpClassCyclePatterns(std::ostream &os, bool dumpProfile, const MClass *cls, vector &soSet) { + GCTibGCInfo *gctibInfo = reinterpret_cast(cls->GetGctib()); + if (gctibInfo == nullptr) { + LOG(FATAL) << "The class has no gctibInfo:" << cls->GetName() << maple::endl; + return; + } + if (((gctibInfo->headerProto & kCyclePatternBit) == 0) && !dumpProfile) { + LOG2FILE(kLogtypeCycle) << "skip dump class: " << cls->GetName() << std::endl; + return; + } + CyclePatternInfo *cyclePatternInfo = GetCyclePatternInfo(*gctibInfo); + // dump class + DumpClassNameSo(os, cls, soSet, dumpProfile); + uint32_t pattern_index = 0; + for (; cyclePatternInfo != nullptr; + cyclePatternInfo = GetNextCyclePattern(*cyclePatternInfo), pattern_index++) { + if (cyclePatternInfo->invalidated == kCycleNotValid && !dumpProfile) { + LOG2FILE(kLogtypeCycle) << "skip dump pattern: " << cls->GetName() << + "index " << pattern_index << std::endl; + continue; + } + // dump header + DumpCycleHeader(os, *cyclePatternInfo, dumpProfile); + // dump cycles: cycle header, Nodes, Edges + CyclePatternNodeInfo *curNodeInfo = GetCyclePatternNodeInfo(*cyclePatternInfo); + for (int32_t i = 0; i < cyclePatternInfo->nNodes; ++i) { + DumpCycleNodeInfo(os, *curNodeInfo, soSet, dumpProfile); + curNodeInfo = reinterpret_cast( + ((reinterpret_cast(curNodeInfo)) + sizeof(CyclePatternNodeInfo))); + } + CyclePatternEdgeInfo *curEdgeInfo = GetCyclePatternEdgeInfo(*cyclePatternInfo); + for (int32_t i = 0; i < cyclePatternInfo->nEdges; ++i) { + DumpCycleEdgeInfo(os, *curEdgeInfo); + curEdgeInfo = reinterpret_cast( + ((reinterpret_cast(curEdgeInfo)) + sizeof(CyclePatternEdgeInfo))); + } + } + os << std::endl; +} + +void ClassCycleManager::DumpDynamicCyclePatterns(std::ostream &os, size_t limit, bool dupThreshold) { + // prefer to skip pattern not matched at runtime + streampos startSize = os.tellp(); + lock_guard guard(cycleMutex); + vector soSet; + for (auto it : dynamicLoadedCycleClasses) { + PatternAgeFlags &flag = it.second; + if (flag.preDefined) { + MClass *cls = it.first; + streampos curSize = os.tellp(); + if (curSize - startSize >= static_cast(limit)) { + break; + } + LOG2FILE(kLogtypeCycle) << "Dump Predefine " << cls->GetName() << std::endl; + DumpClassCyclePatterns(os, dupThreshold, cls, soSet); + } + } + for (auto it : dynamicLoadedCycleClasses) { + PatternAgeFlags &flag = it.second; + if (!flag.preDefined) { + streampos curSize = os.tellp(); + if (curSize - startSize >= static_cast(limit)) { + break; + } + MClass *cls = it.first; + LOG2FILE(kLogtypeCycle) << "Dump learned " << cls->GetName() << std::endl; + DumpClassCyclePatterns(os, dupThreshold, cls, soSet); + } + } + DumpSOSet(os, dupThreshold, soSet); + cpUpdated = false; +} + +static bool CheckGctibBitAtOffset(const MClass &cls, uint32_t offset) { + if ((offset % sizeof(reffield_t)) != 0) { + return false; + } + + if (cls.IsObjectArrayClass()) { + return true; + } + + struct GCTibGCInfo *gcinfo = reinterpret_cast(cls.GetGctib()); + __MRT_ASSERT(gcinfo != nullptr, "emtpy gctib"); + + uint32_t refWordIndex = offset / sizeof(reffield_t); + uint32_t bitmapWordIndex = refWordIndex / kRefWordPerMapWord; + if (bitmapWordIndex >= gcinfo->nBitmapWords) { + return false; + } + + // return true if bit mask is normal ref + uint32_t inBitmapWordOffset = (refWordIndex % kRefWordPerMapWord) * kBitsPerRefWord; + return (((gcinfo->bitmapWords[bitmapWordIndex]) >> inBitmapWordOffset) & kRefBitsMask) == kNormalRefBits; +} + +bool CheckPatternNodesAndEdges(CyclePattern &newPattern) { + // skip root node + for (int32_t i = 1; i < newPattern.NumNodes(); ++i) { + // check load offset + const CyclePatternNodeInfo *curNode = newPattern.GetNodeInfo(i); + if (curNode == nullptr) { + return false; + } + if (curNode->loadIndex >= newPattern.NumNodes()) { + LOG2FILE(kLogtypeCycle) << "cycle_check: load index excceed num nodes, node " << i << std::endl; + return false; + } + + if (curNode->loadIndex >= i) { + LOG2FILE(kLogtypeCycle) << "cycle_check: load index excceed current stack index, node " << i << std::endl; + return false; + } + + MClass *jcls = MObject::Cast(newPattern.GetNodeInfo(curNode->loadIndex)->expectType); + // check if reference + if (!CheckGctibBitAtOffset(*jcls, static_cast(curNode->loadOffset))) { + LOG2FILE(kLogtypeCycle) << "cycle_check: not valid reference offset, node " << i << std::endl; + return false; + } + } + + // check edge reference + for (int32_t i = 0; i < newPattern.NumEdges(); ++i) { + // check reference + const CyclePatternEdgeInfo *curEdge = newPattern.GetEdgeInfo(i); + if (curEdge == nullptr) { + return false; + } + // check src index and dest index + if (curEdge->srcNodeIndex >= newPattern.NumNodes() || curEdge->destNodeIndex >= newPattern.NumNodes()) { + LOG2FILE(kLogtypeCycle) << "cycle_check: not valid node index, edge " << i << std::endl; + return false; + } + + MClass *jcls = MObject::Cast(newPattern.GetNodeInfo(curEdge->srcNodeIndex)->expectType); + // check if reference + if (!CheckGctibBitAtOffset(*jcls, static_cast(curEdge->loadOffset))) { + LOG2FILE(kLogtypeCycle) << "cycle_check: not valid reference offset, edge " << i << std::endl; + return false; + } + } + return true; +} + +bool ClassCycleManager::CheckValidPattern(MClass *cls, char *buffer) { + // check if the jclass and it's buffer is valid + CyclePattern newPattern; + struct GCTibGCInfo *gctibInfo = reinterpret_cast(buffer); + CyclePatternInfo *cyclePatternInfo = GetCyclePatternInfo(*gctibInfo); + uint32_t patternCount = 0; + for (; cyclePatternInfo != nullptr; cyclePatternInfo = GetNextCyclePattern(*cyclePatternInfo)) { + patternCount++; + if (patternCount > kCyclepMaxNum) { + LOG2FILE(kLogtypeCycle) << "cycle_check: too many patterns" << std::endl; + return false; + } + if (!newPattern.constructFromBinary(*cyclePatternInfo, cls)) { + LOG2FILE(kLogtypeCycle) << "cycle_check: construct fail" << std::endl; + return false; + } + + if (!newPattern.IsValid(false)) { + LOG2FILE(kLogtypeCycle) << "cycle_check: not valid inner cycle rc" << std::endl; + return false; + } + + if (!CheckPatternNodesAndEdges(newPattern)) { + return false; + } + } + LOG2FILE(kLogtypeCycle) << "cycle_check: pass for " << cls->GetName() << std::endl; + return true; +} + +bool ClassCycleManager::GetRCThreshold(uint32_t &rcMax, uint32_t &rcMin, char *cyclepatternBinary) { + CyclePatternInfo *cyclePatternInfo = reinterpret_cast(cyclepatternBinary); + rcMax = 0; + rcMin = kMaxRcInCyclePattern; + uint32_t patternCount = 0; + for (; cyclePatternInfo != nullptr; cyclePatternInfo = GetNextCyclePattern(*cyclePatternInfo)) { + ++patternCount; + if (patternCount > kCyclepMaxNum) { + LOG2FILE(kLogtypeCycle) << "cycle_check: too many patterns" << std::endl; + return false; + } + + uint32_t expectRc = static_cast(cyclePatternInfo->expectRc); + if (expectRc > rcMax) { + rcMax = expectRc; + } + if (expectRc < rcMin) { + rcMin = expectRc; + } + } + return (rcMax != 0) && (rcMin != 0) && (rcMax <= kMaxRcInCyclePattern) && (rcMax >= rcMin); +} + +void CyclePatternGenerator::CollectCycleGarbage(vector &sccNodes) { + std::unique_ptr cycle(new CycleGarbage()); + if (!cycle->Construct(sccNodes)) { + return; + } + for (CycleGarbage *savedCycle : resultCycles) { + if (*savedCycle == *cycle) { + savedCycle->IncCount(); + // cout << "Find duplicated" << std::endl; + savedCycle->Print(cout); + return; + } + } + resultCycles.push_back(cycle.release()); +} + +// GarbageNode equal +// 1. type and rc equal +// 2. child index and offset equal +// Because all nodes are sorted with type and rc, same node likeley has same index +bool GarbageNode::operator==(const GarbageNode &other) const { + if (type != other.type || internalRc != other.internalRc) { + return false; + } + if (references.size() != other.references.size()) { + return false; + } + for (size_t i = 0; i < references.size(); ++i) { + GarbageNode *child = references.at(i).first; + GarbageNode *otherChild = other.references.at(i).first; + if (child->GetIndex() != otherChild->GetIndex()) { + return false; + } + } + return true; +} + +// CycleGarbage equal +// 1. hash equal +// 2. num node is equal +// 3. total edges equal +// 4. GarbageNode equal +// +// Same Cycle might have different represenation, this is fixed in construct +// class A-> class B1 16 +// class A-> class B2 20 +// class B1-> class A 8 +// class B2-> class A 8 +// +// Cycle1: +// A -> 16 B1 20 B2 +// B1 +// B2 +// +// A -> 16 B1 20 B2 +// B2 +// B1 +bool CycleGarbage::operator==(const CycleGarbage &other) const { + if (Hash() != other.Hash() || + nodesVec.size() != other.nodesVec.size() || + totalEdges != other.totalEdges) { + return false; + } + for (size_t i = 0; i < nodesVec.size(); ++i) { + GarbageNode *node = nodesVec.at(i); + GarbageNode *otherNode = other.nodesVec.at(i); + if (!(*node == *otherNode)) { + return false; + } + } + return true; +} + +void CycleGarbage::ConstructCycle(std::vector &sccNodeAddrs, address_t rootAddr) { + GarbageNode *firstNode = AddNode(rootAddr, MObject::Cast(rootAddr)->GetClass()); + deque workingDeque; + workingDeque.push_back(firstNode); + while (!workingDeque.empty()) { + GarbageNode *curNode = workingDeque.front(); + workingDeque.pop_front(); + reffield_t curAddr = curNode->GetAddr(); + auto refFunc = [this, curNode, curAddr, &sccNodeAddrs, &workingDeque](reffield_t &field, uint64_t kind) { + address_t ref = RefFieldToAddress(field); + // skip self cycle and off heap object + if ((kind != kNormalRefBits) || (field == curAddr) || !IS_HEAP_ADDR(ref)) { + return; + } + if (std::find(sccNodeAddrs.begin(), sccNodeAddrs.end(), ref) == sccNodeAddrs.end()) { + return; + } + auto it = nodesMap.find(field); + GarbageNode *child = nullptr; + if (it == nodesMap.end()) { + child = AddNode(ref, MObject::Cast(ref)->GetClass()); + workingDeque.push_back(child); + } else { + child = it->second; + if (child == curNode) { + return; + } + } + int32_t offset = static_cast(reinterpret_cast(&field) - curAddr); + curNode->addChild(*child, offset); + ++totalEdges; + }; + ForEachRefField(curAddr, refFunc); + } +} + +// Construct Cycle from Node, to Construct a unique cycle +// 1. sort objects with type +// 2. search a unique type node as root +// 3. if all duplicated, Cycle is not valid and only compare with type +// 4. construct cycle +// 5. check validity +// 6. compuate hash +bool CycleGarbage::Construct(std::vector &sccNodeAddrs) { + // 1. sort objects with type + size_t totalObjs = sccNodeAddrs.size(); + if (totalObjs > 1) { + std::sort(sccNodeAddrs.begin(), sccNodeAddrs.end(), SortByObjMetaRC); + } + + // 2. find a unique root in sccNodeAddrs + address_t rootAddr = 0; + for (size_t i = 0; i < totalObjs; ++i) { + MClass *cls = reinterpret_cast(reinterpret_cast(sccNodeAddrs.at(i)))->GetClass(); + bool found = false; + for (size_t j = i + 1; j < totalObjs; ++j) { + MClass *clsOther = reinterpret_cast(reinterpret_cast(sccNodeAddrs.at(j)))->GetClass(); + if (cls == clsOther) { + found = true; + break; + } + } + if (!found) { + rootAddr = sccNodeAddrs.at(i); + break; + } + } + for (size_t i = 0; i < totalObjs - 1; ++i) { + MClass *cls = reinterpret_cast(reinterpret_cast(sccNodeAddrs.at(i)))->GetClass(); + MClass *clsNext = reinterpret_cast(reinterpret_cast(sccNodeAddrs.at(i + 1)))->GetClass(); + if (cls == clsNext) { + hasDuplicateType = true; + break; + } + } + + if (rootAddr == 0) { + // 3. if all duplicated, Cycle is not valid and only compare with type + valid = false; + for (address_t addr : sccNodeAddrs) { + (void)AddNode(addr, MObject::Cast(addr)->GetClass()); + } + } else { + ConstructCycle(sccNodeAddrs, rootAddr); + } + + if ((nodesVec.size() != totalObjs) || (CheckAndUpdate() == false)) { + return false; + } + ComputeHash(); + return true; +} + +// Check if Cycle garbage is valid to covert to cycle pattern +// return true if can convert to pattern +// return false if can not convert to pattern +// 1. nodes count +// 2. edges count +// 3. each node's max rc no exceed limit +// 4. each edge's offset is valid +// 5. all nodes has same type and node count exceed 1, this likely data structurs +// like doubly-linked list, tree structure etc +bool CycleGarbage::ToCyclePattern(CyclePattern &cyclePattern) { + size_t totalObjs = nodesVec.size(); + if (totalObjs > kCyclepPatternMaxNodeNum) { + return false; + } + if (totalEdges - (nodesVec.size() - 1) > kCyclepPatternMaxEdgeNum) { + return false; + } + if (hasDuplicateType) { + return false; + } + GarbageNode *node = nodesVec.at(0); + cyclePattern.count = count; + // add root node + if (!cyclePattern.AddNode(node->GetAddr(), node->GetType(), -1, -1, node->GetInternalRc())) { + return false; + } + node->SetVisited(); + for (size_t i = 0; i < totalObjs; ++i) { + GarbageNode *curNode = nodesVec.at(i); + // iterate edges and add node or edge + for (auto it: curNode->GetReferences()) { + GarbageNode *child = it.first; + int32_t offset = it.second; + if (offset >= kCyclepMaxOffset) { + return false; + } + if (child->IsVisited()) { + if (!cyclePattern.AddEdge(curNode->GetIndex(), child->GetIndex(), offset)) { + return false; + } + } else { + if (!cyclePattern.AddNode( + child->GetAddr(), child->GetType(), curNode->GetIndex(), offset, child->GetInternalRc())) { + return false; + } + child->SetVisited(); + } + } + } + + return cyclePattern.IsValid(false); +} + +// Update limit, single update can load exceed kMaxBigdataUploadSize +// Write content into cache with kMaxBigDataCacheSize +// When upload split string and upload as big as possible. +// +// Delimer is ; +// Example: +// class_a class_b 0 1 24 1 0 32; +// If has duplicated type: +// class_a class_a class_a 0 1 16 1 2 16 2 0 16 +// 0 class_a c0 c0 c0 0 1 16 1 2 16 2 0 16 +// +// If single pattern exceed kMaxBigdataUploadSize, +// then split it into multiple string, or use compact result +void CycleGarbage::AppendToString(ostringstream &oss) { + // single pattern string must smaller than kMaxBigdataUploadSize + int64_t startPos = oss.tellp(); + if (hasDuplicateType) { + static unordered_map classToIndexMap; + uint32_t i = 0; + for (auto node : nodesVec) { + MClass *cls = node->GetType(); + if (classToIndexMap.find(cls) != classToIndexMap.end()) { + oss << "n" << classToIndexMap[cls] << " "; + } else { + oss << namemangler::EncodeName(std::string(cls->GetName())) << + ":" << GetSoNameFromCls(cls) << " "; + classToIndexMap[cls] = i; + } + ++i; + } + classToIndexMap.clear(); + } else { + for (auto node : nodesVec) { + oss << namemangler::EncodeName(std::string(node->GetType()->GetName())) << + ":" << GetSoNameFromCls(node->GetType()) << " "; + } + } + int i = 0; + for (auto node : nodesVec) { + for (auto it : node->GetReferences()) { + GarbageNode *child = it.first; + oss << i << " " << child->GetIndex() << " " << it.second << " "; + } + ++i; + } + if (adjusted) { + oss << "adjust"; // indicate this cycle has external reference + } + int64_t curPos = oss.tellp(); + // output as more as possible for big cycles + if ((curPos - startPos) >= (kMaxBigdataUploadSize - kMaxBigDataUploadStringEndSize)) { + int64_t newPos = startPos + kMaxBigdataUploadSize - kMaxBigDataUploadStringEndSize; + (void)oss.seekp(newPos); + } + oss << ";" << std::endl; +} + +void CycleGarbage::Print(std::ostream &os) { + os << "count " << count << std::endl; + for (auto node : nodesVec) { + os << node->GetType()->GetName() << std::endl; + } + int i = 0; + for (auto node : nodesVec) { + for (auto it : node->GetReferences()) { + GarbageNode *child = it.first; + os << i << " " << child->GetIndex() << " " << it.second << std::endl; + } + ++i; + } +} + +// Return false whe +// 1. node doesn't reference any other node in cycle +// 2. node doesn't referenced by any other node in cycle +// 3. node's rc is less than internal rc +// If has external rc (internal rc < actual rc), set adjusted true +bool CycleGarbage::CheckAndUpdate() { + if (valid == false) { + return true; + } + if (totalEdges < nodesVec.size()) { + return false; + } + for (auto node : nodesVec) { + if (node->GetInternalRc() == 0) { + return false; + } + if (node->GetReferences().empty()) { + return false; + } + if (node->GetInternalRc() > RefCount(node->GetAddr())) { + return false; + } else if (node->GetInternalRc() < RefCount(node->GetAddr())) { + adjusted = true; + } + } + return true; +} + +// Compute hash for this Cycle, for fast compare in Cycle pattern aggragate +// foreach jclass, internal rc +const uint64_t kHashMagicMultiplier = 31; +void CycleGarbage::ComputeHash() { + uint64_t hash = 0; + for (auto node : nodesVec) { + hash = (hash * kHashMagicMultiplier) + node->GetType()->AsUintptr(); + } + hashValue = hash; +} +} // namespace diff --git a/src/mrt/compiler-rt/src/collector/cycle_collector.cpp b/src/mrt/compiler-rt/src/collector/cycle_collector.cpp new file mode 100644 index 0000000000..1485ee27af --- /dev/null +++ b/src/mrt/compiler-rt/src/collector/cycle_collector.cpp @@ -0,0 +1,636 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "collector/cycle_collector.h" + +#include +#include +#include + +#include "file_system.h" +#include "mm_config.h" +#include "sizes.h" +#include "chosen.h" +#include "collector/rc_reference_processor.h" +#include "collector/collector_naiverc.h" +#include "interp_support.h" + +namespace maplert { +namespace { +const string kCyclePatternValidityStrs[] = { "Valid", "InValid", "Permanent" }; +} +const bool kMergeAllPatterns = MRT_ENVCONF(PATTERN_FROM_BACKUP_TRACING, PATTERN_FROM_BACKUP_TRACING_DEFAULT); + +string MRT_CyclePatternValidityStr(int validStatus) { + if (validStatus < kCycleValid || validStatus > kCyclePermernant) { + return "error"; + } + return kCyclePatternValidityStrs[validStatus]; +} + +static inline bool CycleDestArrayCheck(uint32_t offset, address_t refObj) { + if (IsArray(refObj)) { + uint32_t arrayLen = ArrayLength(refObj); + if ((offset < kJavaArrayContentOffset) || + (offset >= (arrayLen * sizeof(reffield_t) + kJavaArrayContentOffset))) { + return false; + } + } + return true; +} + +static inline address_t CycleNodeLoadObject(const address_t stack[], const CyclePatternNodeInfo &nodeInfo) { + address_t srcObj = stack[nodeInfo.loadIndex]; + // check for array offset + if (UNLIKELY(!CycleDestArrayCheck(nodeInfo.loadOffset, srcObj))) { + return static_cast(0); + } + + // so far, it looks good + return LoadRefField(srcObj, nodeInfo.loadOffset); +} + +static inline address_t CycleEdgeLoadObject(const address_t stack[], const CyclePatternEdgeInfo &edgeInfo) { + address_t srcObj = stack[edgeInfo.srcNodeIndex]; + // check for array offset + if (UNLIKELY(!CycleDestArrayCheck(edgeInfo.loadOffset, srcObj))) { + return static_cast(0); + } + + return LoadRefField(srcObj, edgeInfo.loadOffset); +} + +// Cycle pattern profiling and abandant rules +// 1. match count profiling +// CyclePatternInfo.matchProfiling record if cycle match count and success count +// in TryFreeCycleAtMutator. low 32 bit for check count, high 32 bit for match sucess count. +// +// 2. CyclePatternInfo.matchCount +// If check count reach kCyclepPatternProfileCheckThreshold, check if pattern is matched during +// this period. If match success count exceed kCyclepPatternProfileCleanThreshold, matchCount++. +// Otherwise matchCount--; +// +// 3. If matchCount exceed kCyclepPatternProfileCancelThreshold, no change. +// +// 4. If matchCount is less than -kCyclepPatternProfileCancelThreshold, disable this cycle pattern. +// +// +// update match count. +// success is true iff match success count is less than threshold +// +// return true, if keep cycle pattern +// return false, if not keep cycle pattern +static inline bool UpdateCycleMatchCount(CyclePatternInfo &pattern, bool success) { + if (success) { + if (pattern.matchCount < kCyclepPatternProfileCancelThreshold) { + pattern.matchCount = kCyclepPatternProfileCancelThreshold; + } + return true; + } else { + // delta is -1 + if (pattern.matchCount <= -kCyclepPatternProfileCancelThreshold) { + return false; + } + --(pattern.matchCount); + return true; + } +} + +static void UpdateClassPatternAtCancel(const MClass *cls) { + GCTibGCInfo *gctibInfo = reinterpret_cast(cls->GetGctib()); + if (gctibInfo == nullptr) { + LOG(FATAL) << "The class has no gctibInfo:" << cls->GetName() << maple::endl; + return; + } + if (UNLIKELY(gctibInfo->headerProto & kCyclePatternBit) == 0) { + LOG2FILE(kLogtypeCycle) << "already canceled by other " << cls->GetName() << std::endl; + return; + } + CyclePatternInfo *cyclePatternInfo = GetCyclePatternInfo(*gctibInfo); + for (; cyclePatternInfo != nullptr; cyclePatternInfo = GetNextCyclePattern(*cyclePatternInfo)) { + if (cyclePatternInfo->invalidated != kCycleNotValid) { + return; + } + } + // gctib is updated only in + // STW: cycle pattern study + // Startup: dynamic load + // runtime: clear cycle pattern bit here + // need update min/max rc, this could be updated by multiple thread + gctibInfo->headerProto &= ~kCyclePatternBit; + LOG2FILE(kLogtypeCycle) << "prof_cancel_class: " << cls->GetName() << + std::hex << gctibInfo->headerProto << std::dec << std::endl; +} + +static void AddProfilingMatch(CyclePatternInfo &pattern, const MClass *cls, uint32_t patternIndex, bool matchSuccess) { + uint64_t matchProfile = pattern.matchProfiling; + uint64_t count = 0; + if (matchSuccess) { + count = matchProfile + 1 + (1ULL << kCyclepPatternProfileMatchCountShift); + } else { + count = matchProfile + 1; + } + if (static_cast(count) >= kCyclepPatternProfileCheckThreshold) { + pattern.matchProfiling = 0; + if ((count >> kCyclepPatternProfileMatchCountShift) < kCyclepPatternProfileCleanThreshold) { + if (!UpdateCycleMatchCount(pattern, false)) { + if (pattern.invalidated != kCyclePermernant) { + LOG2FILE(kLogtypeCycle) << "prof_cancel: " << + namemangler::EncodeName(std::string(cls->GetName())) << + ":" << GetSoNameFromCls(cls) << + " index " << (patternIndex) << + " " << MRT_CyclePatternValidityStr(static_cast(pattern.invalidated)) << + std::endl; + pattern.invalidated = kCycleNotValid; + UpdateClassPatternAtCancel(cls); + } + } + LOG2FILE(kLogtypeCycle) << "prof_abandon: " << + namemangler::EncodeName(std::string(cls->GetName())) << + ":" << GetSoNameFromCls(cls) << + " " << count << " " << + static_cast(count >> kCyclepPatternProfileMatchCountShift) << + " index " << (patternIndex) << + " prof " << static_cast(pattern.matchCount) << + " " << MRT_CyclePatternValidityStr(static_cast(pattern.invalidated)) << + std::endl; + } else { + (void)UpdateCycleMatchCount(pattern, true); + LOG2FILE(kLogtypeCycle) << "prof_ok: " << + namemangler::EncodeName(std::string(cls->GetName())) << + ":" << GetSoNameFromCls(cls) << + " " << static_cast(count) << " " << + static_cast(count >> kCyclepPatternProfileMatchCountShift) << + " index " << (patternIndex) << + " prof " << static_cast(pattern.matchCount) << + " " << MRT_CyclePatternValidityStr(static_cast(pattern.invalidated)) << + std::endl; + } + } else { + pattern.matchProfiling = count; + } +} + +bool CycleCollector::CheckAndReleaseCycle(address_t obj, uint32_t rootDelta, bool isRefProcess, + CyclePatternInfo &cyclePatternInfo, CyclePatternInfo *prevPattern) { + address_t stack[kCyclepPatternMaxNodeNum] = {}; + bool weakCollectedSet[kCyclepPatternMaxNodeNum] = {}; + uint32_t expectRC[kCyclepPatternMaxNodeNum] = {}; + // match first node, only need match rc + // low cost to skip not suitable pattern, avoid atomic + uint32_t node0ExpectRc = static_cast(static_cast(cyclePatternInfo.expectRc)); + if (((RefCount(obj) - rootDelta) != node0ExpectRc) || IsRCSkiped(obj)) { + prevPattern = nullptr; + return false; + } + uint32_t oldHeader = AtomicUpdateColor(obj, kRCCycleColorGray); + uint32_t rc = GetRCFromRCHeader(oldHeader); + if (((rc - rootDelta) != node0ExpectRc) || SkipRC(oldHeader)) { + // first node rc not match, skip prfoling, as this cycle is not checked intensively + prevPattern = nullptr; + return false; + } else if (isRefProcess) { + __MRT_ASSERT(IsRCOverflow(oldHeader) || (GetTotalWeakRCFromRCHeader(oldHeader) != 0), "root doesn't have weak rc"); + } + + // other thread might incref throgh weak proxy + bool hasFinal = IsObjFinalizable(obj); + stack[0] = obj; + expectRC[0] = rc; + + // match other nodes and edges + // check concurrent modification + CyclePatternNodeInfo *nodeInfos = GetCyclePatternNodeInfo(cyclePatternInfo); + CyclePatternEdgeInfo *edgeInfos = GetCyclePatternEdgeInfo(cyclePatternInfo); + if ((!MatchNodes(stack, *nodeInfos, cyclePatternInfo.nNodes + 1, hasFinal, isRefProcess, expectRC)) || + (!MatchEdges(stack, *edgeInfos, cyclePatternInfo.nNodes + 1, cyclePatternInfo.nEdges)) || + (!CheckStackColorGray(stack, cyclePatternInfo.nNodes + 1, weakCollectedSet, expectRC))) { + return false; + } + + // release objects in stack + if (hasFinal) { + if (!isRefProcess) { + RefCountLVal(stack[0]) -= rootDelta; + uint32_t resurrectWeakRC = ResurrectWeakRefCount(stack[0]); + if (resurrectWeakRC > 0) { + __MRT_ASSERT(resurrectWeakRC == 1, "unexpected weak count"); + RefCountLVal(stack[0]) &= ~(kResurrectWeakRcBits); + } + } + FinalizeCycleObjects(stack, cyclePatternInfo.nNodes + 1, weakCollectedSet); + } else if (!isRefProcess) { + ReleaseCycleObjects(stack, *nodeInfos, cyclePatternInfo.nNodes + 1, *edgeInfos, + cyclePatternInfo.nEdges, weakCollectedSet); + } else { + for (int32_t i = 0; i < (cyclePatternInfo.nNodes + 1); ++i) { + if (weakCollectedSet[i]) { + NRCMutator().WeakReleaseObj(stack[i]); + NRCMutator().DecWeak(stack[i]); + } + } + } + return true; +} + +// Match Cycle pattern and perf actions (add finalize, release, checkonly). +// +// This method is invoked when: +// 1. Dec Strong RC +// 2. Dec Weak RC +// 3. Reference processor check +// +// rootDelta: strong rc delta for root object +// is_weak: tobe removed +// cycle_finals: record finalizable objects in cycle +// cycle_weaks: record object and its strong rc iff object has weak rc in cycle +// +// return true if match success +// +// Implementation flows +// Loop1: Itreate root object's all cycle patterns and match +// 1. prepare work +// 1.1 update profile +// 1.2 clear state: stack/cycle_finals/cycle_weaks/all flags +// 2. check root object if strong rc is same with expected rc +// 2.1 quick check before atomic operation and check again after atomic +// 2.2 if cycleWeaks is needed, record root object, root must have weak rc +// 3. match other nodes, iterate pattern nodes, for each node +// 3.1 read object from src node with valid offset, if fail return false +// 3.2 check strong rc match expected rc, if fail return false +// 3.3 check object match expected type, if fail return false +// 4. match edges, itrate pattern edges +// 4.1 load object from source object with offset and check with expected object +// 5. check concurrent modification: if color still gray and success covert to white +// 6. matched pattern processing +// 6.1 not check and no finalizable object: direct release +// 6.2 not check and has finalizable object: add to finalizable +// 6.3 check only: add finalizable object into list +// 7. update profile +bool CycleCollector::TryFreeCycleAtMutator(address_t obj, uint32_t rootDelta, bool isRefProcess) { + MClass *cls = reinterpret_cast(obj)->GetClass(); + GCTibGCInfo *gctibInfo = reinterpret_cast(cls->GetGctib()); + if (gctibInfo == nullptr) { + LOG(FATAL) << "The class has no gctibInfo:" << cls->GetName() << maple::endl; + return false; + } + if (UNLIKELY(gctibInfo->headerProto & kCyclePatternBit) == 0) { + // This can heappen if object is created before pattern is invalidated. + ClearCyclePatternBit(obj); + return false; + } + CyclePatternInfo *cyclePatternInfo = GetCyclePatternInfo(*gctibInfo); + uint32_t patternIndex = 0; // current processing pattern index + uint32_t prevPatternIndex = 0; // prev valid pattern index + CyclePatternInfo *prevPattern = nullptr; // prev points to last valid pattern, skip canceled pattern. + for (; cyclePatternInfo != nullptr; cyclePatternInfo = GetNextCyclePattern(*cyclePatternInfo), ++patternIndex) { + if (prevPattern != nullptr && prevPattern->invalidated != kCyclePermernant) { + // add check count and judge if invalidate + AddProfilingMatch(*prevPattern, cls, prevPatternIndex, false); + } + if (cyclePatternInfo->invalidated == kCycleNotValid) { + continue; + } + prevPattern = cyclePatternInfo; + prevPatternIndex = patternIndex; + + if (!CheckAndReleaseCycle(obj, rootDelta, isRefProcess, *cyclePatternInfo, prevPattern)) { + continue; + } + + if (cyclePatternInfo->invalidated != kCyclePermernant) { + // add hit count and check count + AddProfilingMatch(*cyclePatternInfo, cls, patternIndex, true); + } + return true; // a match found + } + if (prevPattern != nullptr && prevPattern->invalidated != kCyclePermernant) { + // add check count and judge if invalidate + AddProfilingMatch(*prevPattern, cls, prevPatternIndex, false); + } + return false; // doesn't match any pattern +} + +static inline bool CheckSubClass(const MClass *objCls, const MClass *expectCls) { + MClass *parent = objCls->GetSuperClass(); + return parent == expectCls; +} + +// search pattern from obj, match all nodes in node patterns +bool CycleCollector::MatchNodes(address_t stack[], CyclePatternNodeInfo &infos, + int32_t nNodes, bool &hasFinal, bool isRefProcess, uint32_t expectRC[]) { + CyclePatternNodeInfo *curInfo = &infos; + // match start from second nodes in cycle + for (int32_t i = 1; i < nNodes; ++i) { + __MRT_ASSERT(curInfo->loadIndex < i, "invlaid load index"); + address_t curObj = CycleNodeLoadObject(stack, *curInfo); + if (!IS_HEAP_OBJ(curObj)) { + return false; + } + + uint32_t nodeExpectRc = static_cast(static_cast(curInfo->expectRc)); + if ((RefCount(curObj) != nodeExpectRc) || IsRCSkiped(curObj)) { + return false; + } + // mark gray atomic + uint32_t oldHeader = AtomicUpdateColor(curObj, kRCCycleColorGray); + uint32_t rc = GetRCFromRCHeader(oldHeader); + // check pattern type, rc + if (rc != nodeExpectRc || SkipRC(oldHeader)) { + return false; + } + + if (!isRefProcess) { + if (GetResurrectWeakRCFromRCHeader(oldHeader) != 0 && !IsWeakCollectedFromRCHeader(oldHeader)) { + return false; + } + } + MClass *classInfo = MObject::Cast(curObj)->GetClass(); + MClass *expectCls = MObject::Cast(curInfo->expectType); + if (classInfo != expectCls) { + // 1. not check sub class + // 2. check sub class and fail + if (((static_cast(curInfo->flags) & kCycleNodeSubClass) == 0) || + (!CheckSubClass(classInfo, expectCls))) { + return false; + } + } + + // next info + curInfo = reinterpret_cast( + ((reinterpret_cast(curInfo)) + sizeof(CyclePatternNodeInfo))); + stack[i] = curObj; + expectRC[i] = rc; + hasFinal = hasFinal || IsObjFinalizable(curObj); + } + return true; +} + +// int8_t srcNodeIndex; +// int8_t destNodeIndex; +// int16_t loadOffset; +bool CycleCollector::MatchEdges(const address_t stack[], CyclePatternEdgeInfo &infos, int32_t nNodes, int32_t nEdges) { + CyclePatternEdgeInfo *curInfo = &infos; + for (int32_t i = 0; i < nEdges; ++i) { + if ((static_cast(curInfo->flags) & kCycleEdgeSkipMatch) == 0) { + address_t loadedObj = CycleEdgeLoadObject(stack, *curInfo); + __MRT_ASSERT(curInfo->destNodeIndex < nNodes, "destNodeIndex overflow"); + address_t expectedObj = stack[curInfo->destNodeIndex]; + if (loadedObj != expectedObj) { + return false; + } + } + + // next info + curInfo = reinterpret_cast( + ((reinterpret_cast(curInfo)) + sizeof(CyclePatternEdgeInfo))); + } + return true; +} + +// check if color is still gray and can successfully set white +// return true, if check success +// weakCollectedSet is set true, if weak collected bit is set in atomic operation +static bool AtomicCheckCycleCollectable(address_t objAddr, bool &weakCollectedSet, uint32_t rc) { + atomic &headerAtomic = RefCountAtomicLVal(objAddr); + uint32_t oldHeader = headerAtomic.load(); + uint32_t newHeader = 0; + + do { + if (UNLIKELY(SkipRC(oldHeader))) { + return false; + } + + if ((oldHeader & kRCCycleColorMask) != kRCCycleColorGray) { + return false; + } + + if (GetRCFromRCHeader(oldHeader) != rc) { + return false; + } + + newHeader = (oldHeader & ~kRCCycleColorMask) | kRCCycleColorWhite; + if ((!IsWeakCollectedFromRCHeader(oldHeader))) { + newHeader = (newHeader | kWeakCollectedBit); + weakCollectedSet = true; + } else { + weakCollectedSet = false; + } + } while (!headerAtomic.compare_exchange_weak(oldHeader, newHeader)); + return true; +} + +// check if pattern match is valid and set weak collected bit if needed. +// 1. check if old color is still gray and new color is set to white +// 2. weak collected bit need set iff: weak collected bit not set && +// (weak rc > 1 || resurrect weak rc > 0) +// 3. if old color is not gray, update color and weak collected bit is fail +// 4. if node's check is fail, need revert early weak collected bit. +bool CycleCollector::CheckStackColorGray(const address_t stack[], int32_t nNodes, + bool weakCollectedSet[], const uint32_t expectRC[]) { + bool checkPass = true; + int i; + for (i = 0; i < nNodes; ++i) { + address_t obj = stack[i]; + uint32_t rc = expectRC[i]; + bool weakCollectedBitSet = false; + if (AtomicCheckCycleCollectable(obj, weakCollectedBitSet, rc)) { + weakCollectedSet[i] = weakCollectedBitSet; + } else { + checkPass = false; + break; + } + } + if (!checkPass) { + LOG2FILE(kLogtypeGc) << "CheckStackColorGray fail with rollback " << std::endl; + for (int j = 0; j < i; ++j) { + if (weakCollectedSet[j]) { + AtomicClearWeakCollectable(stack[j]); + } + } + return false; + } + return true; +} + +// If any object is fianlizable in cycle, need put object in finalize list +void CycleCollector::FinalizeCycleObjects(const address_t stack[], int32_t nNodes, const bool weakCollectedSet[]) { + __MRT_ASSERT(nNodes <= kCyclepPatternMaxNodeNum, "overflow"); + address_t finalizableObjs[kCyclepPatternMaxNodeNum] = {}; + int32_t finalizableCount = 0; + // record if object is finalizable before add into finalizable queue, otherwise + // for cycle A<->B, A is finalizable + // 1. mutator thread find cycle and add A into finalizable queue + // 2. RP thread get invoked and execute A's finalize and trigger cycle pattern match again release A and B + // 3. mutator thread try processing B and find it is already release. + // + // If multiple object in cycle is finalizable, there might racing in add finalizable + // for cycle A<->B, A and B is finalizable + // 1. mutator thread find cycle and add A into finalizable queue + // 2. RP thread execute A's finalize and trigger cycle pattern match again, put B into finalize queue + // 3. mutator thread also put B into finalize queue + // solution is put A and B into finalize queue together + for (int32_t i = 0; i < nNodes; ++i) { + if (IsObjFinalizable(stack[i])) { + __MRT_ASSERT(weakCollectedSet[i] == true, "weak collected bit already set for finalizable object"); + finalizableObjs[finalizableCount] = stack[i]; + ++finalizableCount; + } else { + if (weakCollectedSet[i]) { + NRCMutator().DecWeak(stack[i]); + } + } + } + __MRT_ASSERT(finalizableCount > 0, "no finalizable found"); + ReferenceProcessor::Instance().AddFinalizables(finalizableObjs, finalizableCount, true); +} + +static uint32_t AtomicUpdateStrongRC(address_t objAddr, int32_t delta) { + atomic &headerAtomic = RefCountAtomicLVal(objAddr); + uint32_t oldHeader = headerAtomic.load(); + uint32_t newHeader = 0; + + do { + StatsRCOperationCount(objAddr); + if (UNLIKELY(SkipRC(oldHeader))) { + return oldHeader; + } + newHeader = static_cast((static_cast(oldHeader)) + delta); + uint32_t color = (delta > 0) ? kRCCycleColorBlack : kRCCycleColorBrown; + newHeader = (newHeader & ~kRCCycleColorMask) | color; + } while (!headerAtomic.compare_exchange_weak(oldHeader, newHeader)); + return oldHeader; +} + +// strong rc is not cleared and weak collected bit is set +// check if node can be release iff weak and resurrect weak is 0 +// 1. weak collected bit is set before cycle pattern match and all rc is zero +// 2. weak collected bit is set in cycle pattern match and only weak rc one left +void ReleaseObjInCycle(const address_t stack[], NaiveRCMutator &mutator, bool deferRelease, + const bool weakCollectedSet[], int32_t i){ + address_t obj = stack[i]; + if (weakCollectedSet[i]) { + if (TotalWeakRefCount(obj) == kWeakRCOneBit) { + // no other strong and weak reference, no racing + if (LIKELY(!deferRelease)) { + mutator.ReleaseObj(obj); + } else { + // no racing its safe to update rc without atomic + RefCountLVal(obj) &= ~(kRCBits | kWeakRcBits); + RCReferenceProcessor::Instance().AddAsyncReleaseObj(obj, true); + } + } else { + // has other weak reference, might racing, storng reference can not change + uint32_t strongRC = RefCount(obj); + uint32_t oldHeader __MRT_UNUSED = AtomicUpdateStrongRC(obj, -static_cast(strongRC)); + __MRT_ASSERT(GetWeakRCFromRCHeader(oldHeader) > 0, "weak rc must be none zero"); + if (LIKELY(!deferRelease)) { + mutator.WeakReleaseObj(obj); + } else { + // skip weak release might cause delayed release + LOG2FILE(kLogtypeGc) << "Skip Weak Release " << i << " " << std::hex << RCHeader(obj) << " " << + GCHeader(obj) << " " << std::dec << reinterpret_cast(obj)->GetClass()->GetName() << std::endl; + } + mutator.DecWeak(obj); + } + } else { + uint32_t strongRC = RefCount(obj); + if (strongRC > 1) { + (void)AtomicUpdateStrongRC(obj, 1 - static_cast(strongRC)); + } + mutator.DecRef(obj); + } +} + +// If none object is finalizable, invoke release +// release object need skip recurisve white object +const uint32_t kCycleDepthToAsyncReleaseThreshold = 5; +void CycleCollector::ReleaseCycleObjects(const address_t stack[], CyclePatternNodeInfo &nInfos, int32_t nNodes, + CyclePatternEdgeInfo &eInfos, int32_t nEdges, const bool weakCollectedSet[]) { + CyclePatternNodeInfo *curNInfo = &nInfos; + for (int32_t i = 1; i < nNodes; ++i) { + // decref(load_result) + // remove internal edges + address_t objAddr = stack[curNInfo->loadIndex]; + reffield_t *addr = reinterpret_cast(objAddr + curNInfo->loadOffset); + TLMutator().SatbWriteBarrier(objAddr, *addr); + *addr = 0; + curNInfo = reinterpret_cast( + ((reinterpret_cast(curNInfo)) + sizeof(CyclePatternNodeInfo))); + } + + CyclePatternEdgeInfo *curEInfo = &eInfos; + for (int32_t i = 0; i < nEdges; ++i) { + address_t objAddr = stack[curEInfo->srcNodeIndex]; + reffield_t *addr = reinterpret_cast(objAddr + curEInfo->loadOffset); + TLMutator().SatbWriteBarrier(objAddr, *addr); + *addr = 0; + curEInfo = reinterpret_cast( + ((reinterpret_cast(curEInfo)) + sizeof(CyclePatternEdgeInfo))); + } + + for (int32_t i = 0; i < nNodes; ++i) { + // clear strong rc/resurrect rc and dec weak rc + // if weak rc is not zero, there could be other threads dec weak rc concurrently + address_t obj = stack[i]; + uint32_t oldHeader = RCHeader(obj); + uint32_t reusrrectWeakRC = GetResurrectWeakRCFromRCHeader(oldHeader); + if (i == 0 && reusrrectWeakRC > 0) { + __MRT_ASSERT(reusrrectWeakRC == 1, "unexpected resurrect weak count, not 1"); + // none reference processing cycle pattern match, node's reusrrect rc must be one. + (void)AtomicUpdateRC<0, 0, -1>(obj); + } else { + __MRT_ASSERT((reusrrectWeakRC == 0) || IsWeakCollected(obj), "unexpected weak count, not 0"); + } +#if RC_TRACE_OBJECT + TraceRefRC(stack[i], 0, " Release In Cycle Pattern"); +#endif + } + + NaiveRCMutator &mutator = NRCMutator(); +#if CONFIG_JSAN + bool deferRelease = false; + (void)kCycleDepthToAsyncReleaseThreshold; +#else + bool deferRelease = (mutator.CycleDepth() >= kCycleDepthToAsyncReleaseThreshold); +#endif + mutator.IncCycleDepth(); + for (int32_t i = 0; i < nNodes; ++i) { + ReleaseObjInCycle(stack, mutator, deferRelease, weakCollectedSet, i); + } + mutator.DecCycleDepth(); +} + +string GetSoNameFromCls(const MClass *elementClass) { + Dl_info dlinfo; + while (elementClass->IsArrayClass()) { + elementClass = elementClass->GetComponentClass(); + __MRT_ASSERT(elementClass != nullptr, "object array class's element cannot be null"); + } + if ((dladdr(elementClass, &dlinfo) != 0) && dlinfo.dli_fname != nullptr) { + string fullname = string(dlinfo.dli_fname); + // trim to libmaplecore-all.so + if (fullname == maple::fs::kLibcorePath) { +#ifdef OPS_ANDROID + return "libcore-all.so"; +#else // OPS_ANDROID + return "libmaplecore-all.so"; +#endif // OPS_ANDROID + } + return fullname; + } else { + return InterpSupport::GetSoName(); + } +} +} // end of namespace diff --git a/src/mrt/compiler-rt/src/collector/gc_reason.cpp b/src/mrt/compiler-rt/src/collector/gc_reason.cpp new file mode 100644 index 0000000000..ba21180576 --- /dev/null +++ b/src/mrt/compiler-rt/src/collector/gc_reason.cpp @@ -0,0 +1,121 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "gc_reason.h" +#include "gc_log.h" +#include "chosen.h" +#include "collector/stats.h" + +namespace maplert { +namespace { +// Minimum time between async GC (heuristic, native). +constexpr int64_t kMinAsyncGcIntervalNs = static_cast(maple::kSecondToNanosecond); +// 10ms for heuristic gc: gc usually takes longer than 10ms; if interval is +// smaller than 10ms, it tells us something is faulty or we should use blocking gc +constexpr int64_t kMinHeuGcIntervalNs = 10 * static_cast(maple::kMillisecondToNanosecond); + +// Set a safe initial value so that the first GC is able to trigger. +int64_t initHeuTriggerTimestamp = static_cast(timeutils::NanoSeconds()) - kMinHeuGcIntervalNs; +int64_t initNativeTriggerTimestamp = static_cast(timeutils::NanoSeconds()) - kMinAsyncGcIntervalNs; +} // namespace + +int64_t GCReasonConfig::lastGCTimestamp = initNativeTriggerTimestamp; + +void GCReasonConfig::IgnoreCallback() const { + LOG2FILE(kLogtypeGc) << name << " is triggered too frequently. Ignoring this request." << std::endl; + LOG(ERROR) << "[GC] " << name << " is triggered too frequently. Ignoring this request." << maple::endl; +} + +inline bool GCReasonConfig::IsFrequentGC() const { + if (minIntervelNs == 0) { + return false; + } + int64_t now = static_cast(timeutils::NanoSeconds()); + return (now - lastTriggerTimestamp < minIntervelNs); +} + +inline bool GCReasonConfig::IsFrequentAsyncGC() const { + int64_t now = static_cast(timeutils::NanoSeconds()); + return (now - lastGCTimestamp < minIntervelNs); +} + +// heuristic gc is triggered by object allocation, +// the heap stats should take into consideration. +inline bool GCReasonConfig::IsFrequentHeuristicGC() const { + return IsFrequentAsyncGC(); +} + +bool GCReasonConfig::ShouldIgnore() const { + switch (reason) { + case kGCReasonHeu: + return IsFrequentHeuristicGC(); + case kGCReasonNative: + return IsFrequentAsyncGC(); + case kGCReasonOOM: + return IsFrequentGC(); + default: + return false; + } +} + +GCReleaseSoType GCReasonConfig::ShouldReleaseSo() const { + switch (reason) { + case kGCReasonUserNi: // fall through + case kGCReasonForceGC: + return kReleaseAll; + case kGCReasonTransistBG: + return Collector::Instance().InJankImperceptibleProcessState() ? kReleaseAppSo : kReleaseNone; + default: + return kReleaseNone; + } +} + +bool GCReasonConfig::ShouldTrimHeap() const { + switch (reason) { + case kGCReasonUserNi: // fall through + case kGCReasonForceGC: + return true; + case kGCReasonTransistBG: + return Collector::Instance().InJankImperceptibleProcessState(); + default: + return false; + } +} + +bool GCReasonConfig::ShouldCollectCycle() const { + switch (reason) { + case kGCReasonUserNi: + return true; + default: + return false; + } +} + +void GCReasonConfig::SetLastTriggerTime(int64_t timestamp) { + lastTriggerTimestamp = timestamp; + lastGCTimestamp = timestamp; +} + +GCReasonConfig reasonCfgs[] = { + { kGCReasonUser, "user", true, false, false, true, 0, 0 }, + { kGCReasonUserNi, "user_ni", true, false, false, true, 0, 0 }, + { kGCReasonOOM, "oom", true, false, false, false, 0, 0 }, + { kGCReasonForceGC, "force", true, false, false, true, 0, 0 }, + { kGCReasonTransistBG, "transist_background", true, false, false, true, 0, 0 }, + { kGCReasonHeu, "heuristic", false, false, true, true, kMinHeuGcIntervalNs, initHeuTriggerTimestamp }, + { kGCReasonNative, "native_alloc", false, false, true, true, kMinAsyncGcIntervalNs, initNativeTriggerTimestamp }, + { kGCReasonHeuBlocking, "heuristic_blocking", false, true, false, true, 0, 0 }, + { kGCReasonNativeBlocking, "native_alloc_blocking", false, true, false, true, 0, 0 }, +}; +} // namespace maplert diff --git a/src/mrt/compiler-rt/src/collector/gc_reference_processor.cpp b/src/mrt/compiler-rt/src/collector/gc_reference_processor.cpp new file mode 100644 index 0000000000..53b708f208 --- /dev/null +++ b/src/mrt/compiler-rt/src/collector/gc_reference_processor.cpp @@ -0,0 +1,245 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "collector/gc_reference_processor.h" +#include "yieldpoint.h" +#include "chosen.h" + +namespace maplert { +GCReferenceProcessor::GCReferenceProcessor() : ReferenceProcessor() { + for (uint32_t type = kRPWeakRef; type <= kRPPhantomRef; ++type) { + refContext[type].discoverRefs = 0; + refContext[type].enqueueRefs = 0; + refContext[type].discoverCount = 0; + refContext[type].enqueueCount = 0; + } +} + +bool GCReferenceProcessor::ShouldStartIteration() { + if (processAllRefs.load()) { + processAllRefs.store(false); + return true; + } + if (hasBackgroundGC.load()) { + return true; + } + for (uint32_t type = kRPWeakRef; type <= kRPPhantomRef; ++type) { + if (refContext[type].enqueueRefs != 0) { + return true; + } + } + return false; +} + +void GCReferenceProcessor::VisitGCRoots(RefVisitor &visitor) { + ReferenceProcessor::VisitGCRoots(visitor); + for (uint32_t type = kRPWeakRef; type <= kRPPhantomRef; ++type) { + GCRefContext &ctx = refContext[type]; + if (ctx.enqueueRefs != 0) { + visitor(ctx.enqueueRefs); + } + } +} + +inline void SetNextReference(address_t next, address_t reference) { + if (next == 0) { + ReferenceSetPendingNext(reference, reference); + } else { + ReferenceSetPendingNext(reference, next); + } +} + +void GCReferenceProcessor::DiscoverReference(address_t reference) { + MClass *klass = reinterpret_cast(reference)->GetClass(); + uint32_t classFlag = klass->GetFlag(); + uint32_t type = ReferenceProcessor::GetRPTypeByClassFlag(classFlag); + GCRefContext &ctx = refContext[type]; + { + LockGuard guard(ctx.discoverLock); + if (ReferenceGetPendingNext(reference) != 0) { // avoid multiple threads racing discover + return; + } + address_t head = ctx.discoverRefs; + SetNextReference(head, reference); + ctx.discoverRefs = reference; + ++(ctx.discoverCount); + } +} + +// Concurrently remove reference with null or marked referent, save STW processing time +// 1. Lock discover ref and retrive whole list +// 2. start with prev and cur reference, always record the head and tail +// 3. Iterate and remove reference whose referent is null or marked +// 4. Merge discovered list back to context +void GCReferenceProcessor::ConcurrentProcessDisovered() { + Collector &collector = Collector::Instance(); + __MRT_ASSERT(collector.IsConcurrentMarkRunning(), "Not In Concurrent marking"); + for (uint32_t type = kRPWeakRef; type <= kRPPhantomRef; ++type) { + GCRefContext &ctx = refContext[type]; + address_t prev = 0; + address_t cur; + address_t head; + uint32_t removedCount = 0; + { + LockGuard guard(ctx.discoverLock); + head = ctx.discoverRefs; + cur = ctx.discoverRefs; + ctx.discoverRefs = 0; + } + while (cur != 0) { + address_t referent = ReferenceGetReferent(cur); + address_t next = ReferenceGetPendingNext(cur); + if (next == cur) { + next = 0; + } + if (referent == 0 || !collector.IsGarbage(referent)) { + ++removedCount; + if (prev == 0) { + head = next; + } else { + SetNextReference(next, prev); + } + ReferenceSetPendingNext(cur, 0); + } else { + prev = cur; + } + if (next == 0) { + break; + } + cur = next; + } + if (head != 0) { + LockGuard guard(ctx.discoverLock); + address_t curHead = ctx.discoverRefs; + ctx.discoverRefs = head; + if (curHead != 0) { + ReferenceSetPendingNext(cur, curHead); + } + } + if (removedCount > 0) { + LOG2FILE(kLogtypeGc) << "ConcurrentProcessDisovered clear " << type << " " << removedCount << std::endl; + } + } +} + +void GCReferenceProcessor::InitEnqueueAtFork(uint32_t type, address_t refs) { + __MRT_ASSERT(type <= kRPPhantomRef, "Invalid type"); + GCRefContext &ctx = refContext[type]; + __MRT_ASSERT(ctx.enqueueRefs == 0, "not zero at fork"); + ctx.enqueueRefs = refs; +} + +// Iterate discoverd Reference: +// 1. check if it can be enqueued, referent is not null and dead +// 2. If enqueuable, add into enqueueRefs +// 3. Otherwise, clear pending next +// 4. clear discoveredref +void GCReferenceProcessor::ProcessDiscoveredReference(uint32_t flags) { + __MRT_ASSERT(WorldStopped(), "Not In STW"); + for (uint32_t type = kRPWeakRef; type <= kRPPhantomRef; type++) { + if (!(flags & RPMask(type))) { + continue; + } + GCRefContext &ctx = refContext[type]; + address_t reference = ctx.discoverRefs; + while (reference != 0) { + address_t referent = ReferenceGetReferent(reference); + address_t next = ReferenceGetPendingNext(reference); + if (next == reference) { + next = 0; + } + // must be in heap or cleared during concurrent mark + if (referent != 0 && Collector::Instance().IsGarbage(referent)) { + ReferenceClearReferent(reference); + SetNextReference(ctx.enqueueRefs, reference); + ctx.enqueueRefs = reference; + ++(ctx.enqueueCount); + } else { + ReferenceSetPendingNext(reference, 0); + } + reference = next; + } + ctx.discoverRefs = 0; + ctx.discoverCount = 0; + } +} + +// Iterate enqueued Reference: +// 1. Leave Safe Region, avoid GC STW modify enqueueRefs +// 2. Check if available enqueueRefs, break if empty +// 3. Get Reference to proceess and update enqueueRefs +// 4. Invoke Enqueu method +void GCReferenceProcessor::EnqeueReferences() { + for (uint32_t type = kRPWeakRef; type <= kRPPhantomRef; ++type) { + GCRefContext &ctx = refContext[type]; + // iterate all reference and invoke enqueue method, might racing with GC STW phase + while (true) { + ScopedObjectAccess soa; // leave safe region to sync with GC + if (ctx.enqueueRefs == 0) { + break; + } + ScopedHandles sHandles; + ObjHandle reference(ctx.enqueueRefs); + address_t next = ReferenceGetPendingNext(reference.AsRaw()); + __MRT_ASSERT(next != 0 && ReferenceGetReferent(reference.AsRaw()) == 0, "Invalid pending enqueue reference"); + if (next == reference.AsRaw()) { + next = 0; + } else { + TLMutator().SatbWriteBarrier(next); + } + ReferenceSetPendingNext(reference.AsRaw(), reference.AsRaw()); + --(ctx.enqueueCount); + ctx.enqueueRefs = next; + Enqueue(reference.AsRaw()); + } + } +} + +void GCReferenceProcessor::LogRefProcessorBegin() { + if (!GCLog().IsWriteToFile(kLogtypeRp)) { + return; + } + timeCurrentRefProcessBegin = timeutils::MicroSeconds(); + LOG2FILE(kLogtypeRp) << "[RefProcessor] Begin (" << timeutils::GetDigitDate() << ") Soft: " << + refContext[kRPSoftRef].enqueueCount << + " Weak " << refContext[kRPWeakRef].enqueueCount << + " Phantom " << refContext[kRPPhantomRef].enqueueCount << std::endl; +} + +void GCReferenceProcessor::LogRefProcessorEnd() { + if (!GCLog().IsWriteToFile(kLogtypeRp)) { + return; + } + uint64_t timeNow = timeutils::MicroSeconds(); + uint64_t timeConsumed = timeNow - timeCurrentRefProcessBegin; + uint64_t totalTimePassed = timeNow - timeRefProcessorBegin; + timeRefProcessUsed += timeConsumed; + float percentage = ((maple::kTimeFactor * timeRefProcessUsed) / totalTimePassed) / kPercentageDivend; + LOG2FILE(kLogtypeRp) << "[RefProcessor] End " << " (" << timeConsumed << "us" << + " [" << timeRefProcessUsed << "us]" << + " [ " << percentage << "%]" << std::endl; +} + +void GCReferenceProcessor::PreAddFinalizables(address_t[], uint32_t, bool needLock) { + if (needLock) { + finalizersLock.lock(); + } +} + +void GCReferenceProcessor::PostAddFinalizables(address_t[], uint32_t, bool needLock) { + if (needLock) { + finalizersLock.unlock(); + } +} +} diff --git a/src/mrt/compiler-rt/src/collector/mpl_thread_pool.cpp b/src/mrt/compiler-rt/src/collector/mpl_thread_pool.cpp new file mode 100644 index 0000000000..de9f665e8c --- /dev/null +++ b/src/mrt/compiler-rt/src/collector/mpl_thread_pool.cpp @@ -0,0 +1,296 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "collector/mpl_thread_pool.h" + +#include +#include +#include +#include + +#include "securec.h" +#include "base/logging.h" +#include "chosen.h" + +// thread pool implementation +namespace maplert { +MplPoolThread::MplPoolThread(MplThreadPool *threadPool, const char *threadName, size_t threadId, size_t stackSize) + : schedCores(nullptr), + id(threadId), + tid(-1), + name(threadName), + pool(threadPool) { + pthread_attr_t attr; + CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), ""); + CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, stackSize), stackSize); + CHECK_PTHREAD_CALL(pthread_create, (&pthread, nullptr, &WorkerFunc, this), "MplPoolThread init"); + CHECK_PTHREAD_CALL(pthread_setname_np, (pthread, threadName), "MplPoolThread SetName"); + CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), "MplPoolThread init"); +} + +MplPoolThread::~MplPoolThread() { + CHECK_PTHREAD_CALL(pthread_join, (pthread, nullptr), "thread deinit"); + schedCores = nullptr; + pool = nullptr; +} + +void MplPoolThread::SetPriority(int32_t priority) { + int32_t result = setpriority(static_cast(PRIO_PROCESS), tid, priority); + if (result != 0) { + LOG(ERROR) << "Failed to setpriority to :" << priority; + } +} + +void *MplPoolThread::WorkerFunc(void *param) { + // set current thread as a gc thread. + (void)maple::tls::CreateTLS(); + StoreTLS(reinterpret_cast(true), maple::tls::kSlotIsGcThread); + + MplPoolThread *thread = reinterpret_cast(param); + MplThreadPool *pool = thread->pool; + + thread->tid = maple::GetTid(); + MRT_SetThreadPriority(thread->tid, pool->priority); + + while (!pool->IsExited()) { + MplTask *task = nullptr; + { + std::unique_lock taskLock(pool->taskMutex); + // hang up in threadSleepingCondVar when pool stopped or to many active thread + while (((pool->currActiveThreadNum > pool->maxActiveThreadNum) || !pool->IsRunning()) && !pool->IsExited()) { + // currActiveThreadNum start at maxThreadNum, dec before thread hangup in sleeping state + --(pool->currActiveThreadNum); + if (pool->currActiveThreadNum == 0) { + // all thread sleeping, pool in stop state, notify wait stop thread + pool->allThreadStopped.notify_all(); + } + pool->threadSleepingCondVar.wait(taskLock); + ++(pool->currActiveThreadNum); + } + // if no task available thread hung up in taskEmptyCondVar + while (pool->taskQueue.empty() && pool->IsRunning() && !pool->IsExited()) { + // currExecuteThreadNum start at 0, inc before thread wait for task + ++(pool->currWaittingThreadNum); + if (pool->currWaittingThreadNum == pool->maxActiveThreadNum) { + // all task is done, notify wait finish thread + pool->allWorkDoneCondVar.notify_all(); + } + pool->taskEmptyCondVar.wait(taskLock); + --(pool->currWaittingThreadNum); + } + if (!pool->taskQueue.empty() && pool->IsRunning() && !pool->IsExited()) { + task = pool->taskQueue.front(); + pool->taskQueue.pop(); + } + } + if (task != nullptr) { + if (thread->schedCores != nullptr) { + thread->schedCores->push_back(sched_getcpu()); + } + task->Execute(thread->id); + delete task; + } + } + { + std::unique_lock taskLock(pool->taskMutex); + --(pool->currActiveThreadNum); + if (pool->currActiveThreadNum == 0) { + // all thread sleeping, pool in stop state, notify wait stop thread + pool->allThreadStopped.notify_all(); + } + } + maple::tls::DestoryTLS(); + return nullptr; +} + +const int kMaxNameLen = 256; + +MplThreadPool::MplThreadPool(const char *poolName, int32_t threadNum, int32_t prior) + : priority(prior), + name(poolName), + running(false), + exit(false), + maxThreadNum(threadNum), + maxActiveThreadNum(threadNum), + currActiveThreadNum(maxThreadNum), + currWaittingThreadNum(0) { + // init and start thread + char threadName[kMaxNameLen]; + for (int32_t i = 0; i < maxThreadNum; ++i) { + // threadID 0 is main thread, sub threadID start at 1 + errno_t ret = snprintf_s(threadName, kMaxNameLen, (kMaxNameLen - 1), "Pool%s_%d", poolName, (i + 1)); + if (ret < 0) { + LOG(ERROR) << "snprintf_s " << "name = " << name << "threadId" << (i + 1) << + " in MplThreadPool::MplThreadPool return " << ret << " rather than 0." << maple::endl; + } + // default Sleeping + MplPoolThread *threadItem = new (std::nothrow) MplPoolThread(this, threadName, (i + 1), kDefaultStackSize); + if (threadItem == nullptr) { + LOG(FATAL) << "new MplPoolThread failed" << maple::endl; + } + threads.push_back(threadItem); + } + // pool init in stop state + Stop(); + LOG(DEBUGY) << "MplThreadPool init" << maple::endl; +} + +void MplThreadPool::Exit() { + std::unique_lock taskLock(taskMutex); + // set pool exit flag + exit.store(true, std::memory_order_relaxed); + + // notify all waitting thread exit + taskEmptyCondVar.notify_all(); + // notify all stopped thread exit + threadSleepingCondVar.notify_all(); + + // notify all WaitFinish thread return + allWorkDoneCondVar.notify_all(); + // notify all WaitStop thread return + allThreadStopped.notify_all(); + LOG(DEBUGY) << "MplThreadPool Exit" << maple::endl; +} + +MplThreadPool::~MplThreadPool() { + Exit(); + // wait until threads exit + for (auto thread : threads) { + delete thread; + } + threads.clear(); + ClearAllTask(); +} + +void MplThreadPool::SetPriority(int32_t prior) { + for (auto thread : threads) { + thread->SetPriority(prior); + } +} + +void MplThreadPool::SetMaxActiveThreadNum(int32_t num) { + std::unique_lock taskLock(taskMutex); + int32_t oldNum = maxActiveThreadNum; + if (num >= maxThreadNum) { + maxActiveThreadNum = maxThreadNum; + } else if (num > 0) { + maxActiveThreadNum = num; + } else { + LOG(ERROR) << "SetMaxActiveThreadNum invalid input val" << maple::endl;; + return; + } + // active more thread get to work when pool is running + if ((maxActiveThreadNum > oldNum) && (currWaittingThreadNum > 0) && IsRunning()) { + threadSleepingCondVar.notify_all(); + } +} + +void MplThreadPool::AddTask(MplTask *task) { + if (UNLIKELY(task == nullptr)) { + LOG(FATAL) << "failed to add a null task" << maple::endl; + } + std::unique_lock taskLock(taskMutex); + taskQueue.push(task); + // do not notify when pool isn't running, notify_all in start + // notify if there is active thread waiting for task + if (IsRunning() && (currWaittingThreadNum > 0)) { + taskEmptyCondVar.notify_one(); + } +} + +void MplThreadPool::AddTask(std::function func) { + AddTask(new (std::nothrow) MplLambdaTask(func)); +} + +void MplThreadPool::Start() { + // notify all sleeping threads get to work + std::unique_lock taskLock(taskMutex); + running.store(true, std::memory_order_relaxed); + threadSleepingCondVar.notify_all(); +} + +void MplThreadPool::DrainTaskQueue() { + __MRT_ASSERT(!IsRunning(), "thread pool is running"); + MplTask *task = nullptr; + do { + task = nullptr; + taskMutex.lock(); + if (!taskQueue.empty()) { + task = taskQueue.front(); + taskQueue.pop(); + } + taskMutex.unlock(); + if (task != nullptr) { + task->Execute(0); + delete task; + } + } while (task != nullptr); +} + +void MplThreadPool::WaitFinish(bool addToExecute, std::vector *schedCores) { + if (addToExecute) { + MplTask *task = nullptr; + do { + task = nullptr; + taskMutex.lock(); + if (!taskQueue.empty() && IsRunning() && !IsExited()) { + task = taskQueue.front(); + taskQueue.pop(); + } + taskMutex.unlock(); + if (task != nullptr) { + if (schedCores != nullptr) { + schedCores->push_back(sched_getcpu()); + } + task->Execute(0); + delete task; + } + } while (task != nullptr); + } + + // wait all task excute finish + // currWaittingThreadNum == maxActiveThreadNum indicate all work done + // no need to wait when pool stopped or exited + { + std::unique_lock taskLock(taskMutex); + while ((currWaittingThreadNum != maxActiveThreadNum) && IsRunning() && !IsExited()) { + allWorkDoneCondVar.wait(taskLock); + } + } + // let all thread sleeing for next start + // if threads not in sleeping mode, next start signal may be missed + Stop(); + // clean up task in GC thread, thread pool might receive "exit" in stop thread + DrainTaskQueue(); +} + +void MplThreadPool::Stop() { + // notify & wait all thread enter stopped state + std::unique_lock taskLock(taskMutex); + running.store(false, std::memory_order_relaxed); + taskEmptyCondVar.notify_all(); + while (currActiveThreadNum != 0) { + allThreadStopped.wait(taskLock); + } +} + +void MplThreadPool::ClearAllTask() { + std::unique_lock taskLock(taskMutex); + while (!taskQueue.empty()) { + MplTask *task = taskQueue.front(); + taskQueue.pop(); + delete task; + } +} +} // namespace maplert diff --git a/src/mrt/compiler-rt/src/collector/mrt_bitmap.cpp b/src/mrt/compiler-rt/src/collector/mrt_bitmap.cpp new file mode 100644 index 0000000000..4ce4694c1f --- /dev/null +++ b/src/mrt/compiler-rt/src/collector/mrt_bitmap.cpp @@ -0,0 +1,103 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "collector/mrt_bitmap.h" + +#include "chosen.h" +#include "syscall.h" + +namespace maplert { +// round value up to alignValue which must be a power of 2. +ALWAYS_INLINE uint32_t MrtBitmap::AlignRight(uint32_t value, uint32_t alignValue) const noexcept { + return (value + alignValue - 1) & ~(alignValue - 1); +} + +void MrtBitmap::ResetCurEnd() { + curEnd = spaceStart + (*theAllocator).GetCurrentSpaceCapacity(); + LOG2FILE(kLogtypeGc) << "resetCurEnd Bitmap " << std::hex << spaceStart << " " << curEnd << std::dec << '\n'; +} + +ALWAYS_INLINE size_t MrtBitmap::GetBitmapSizeByHeap(size_t heapBytes) { + size_t nBytes; + size_t nBits; + + if ((heapBytes & ((static_cast(1) << kLogObjAlignment) - 1)) == 0) { + nBits = heapBytes >> kLogObjAlignment; + } else { + nBits = (heapBytes >> kLogObjAlignment) + 1; + } + + nBytes = AlignRight(static_cast(nBits), kBitsPerWord) >> kLogBitsPerByte; + return nBytes; +} + +void MrtBitmap::Initialize() { + if (isInitialized) { + return; + } + + size_t maxHeapBytes = (*theAllocator).GetMaxCapacity(); + spaceStart = (*theAllocator).HeapLowerBound(); + spaceEnd = spaceStart + maxHeapBytes; + curEnd = spaceStart + (*theAllocator).GetCurrentSpaceCapacity(); + + bitmapSize = GetBitmapSizeByHeap(maxHeapBytes); + size_t roundUpPage = AlignRight(static_cast(bitmapSize), maple::kPageSize); + void *ret = mmap(nullptr, roundUpPage, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + if (ret == MAP_FAILED) { + LOG(FATAL) << "maple failed to initialize MrtBitmap"; + } else { + MRT_PRCTL(ret, roundUpPage, "MrtBitmap_Initialize"); + } + + bitmapBegin = reinterpret_cast*>(ret); + isInitialized = true; + LOG2FILE(kLogtypeGc) << "Initialize Bitmap " << std::hex << spaceStart << " " << curEnd << std::dec << '\n'; + size_t curBitmapSize = GetBitmapSizeByHeap(curEnd - spaceStart); + if (memset_s(reinterpret_cast(bitmapBegin), curBitmapSize, 0, curBitmapSize) != EOK) { + LOG(FATAL) << "MrtBitmap init memset_s failed." << maple::endl; + } +} + +MrtBitmap::~MrtBitmap() { + if (munmap(bitmapBegin, AlignRight(static_cast(bitmapSize), maple::kPageSize)) != EOK) { + LOG(ERROR) << "munmap error in MrtBitmap destruction!" << maple::endl; + } + bitmapBegin = nullptr; +} + +#if MRT_DEBUG_BITMAP +void MrtBitmap::CopyBitmap(const MrtBitmap &bitmap) { + if (bitmapBegin != nullptr) { + if (munmap(bitmapBegin, AlignRight(bitmapSize, maple::kPageSize)) != 0) { + LOG(FATAL) << "munmap error in copy bitmap"; + } + bitmapBegin = nullptr; + } + isInitialized = bitmap.Initialized(); + bitmapSize = bitmap.Size(); + if (isInitialized) { + void *result = mmap(nullptr, AlignRight(bitmapSize, maple::kPageSize), + PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + if (result == MAP_FAILED) { + LOG(FATAL) << "mmap(" << bitmapSize << ") failed in copy bitmap"; + } + bitmapBegin = reinterpret_cast*> (result); + if (memcpy_s(bitmapBegin, bitmapSize, bitmap.bitmapBegin, bitmapSize) != EOK) { + LOG(ERROR) << "memcpy_s error in copy bitmap"; + } + } +} +#endif +} diff --git a/src/mrt/compiler-rt/src/collector/native_gc.cpp b/src/mrt/compiler-rt/src/collector/native_gc.cpp new file mode 100644 index 0000000000..fc702911f0 --- /dev/null +++ b/src/mrt/compiler-rt/src/collector/native_gc.cpp @@ -0,0 +1,172 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "collector/native_gc.h" + +#include +#include + +#include "panic.h" +#include "chosen.h" +#include "collector/stats.h" + +namespace maplert { +NativeGCStats NativeGCStats::instance; + +size_t NativeGCStats::GetNativeBytes() { + struct mallinfo mi = mallinfo(); + constexpr bool kMallocBytesNeedCast = (sizeof(size_t) > sizeof(mi.uordblks)); + constexpr bool kMappedBytesNeedCast = (sizeof(size_t) > sizeof(mi.hblkhd)); + size_t mallocBytes = kMallocBytesNeedCast ? static_cast(mi.uordblks) : mi.uordblks; + size_t mappedBytes = kMappedBytesNeedCast ? static_cast(mi.hblkhd) : mi.hblkhd; + if (UNLIKELY(mappedBytes > mallocBytes)) { + mallocBytes = mappedBytes; + } + return mallocBytes + nativeBytesRegistered.load(std::memory_order_relaxed); +} + +void NativeGCStats::CheckForGC() { + if (Collector::Instance().IsGcTriggered()) { + // skip native gc check if gc already triggered. + return; + } + const size_t currentNativeBytes = GetNativeBytes(); + const float factor = NativeGcFactor(currentNativeBytes); + if (LIKELY(factor < kTriggerNativeGcFactor)) { + return; + } + bool wait = (factor > kWaitNativeGcFactor && currentNativeBytes > kHugeNativeBytes) && + (!Collector::Instance().InStartupPhase()) && + Collector::Instance().InJankImperceptibleProcessState(); + LOG2FILE(kLogtypeGc) << "Trigger native GC" << (wait ? " and wait," : ",") << " factor: " << factor << '\n' << + " old native: " << oldNativeBytesAllocated.load(std::memory_order_relaxed) << '\n' << + " cur native: " << currentNativeBytes << '\n' << + " cur heap : " << stats::gcStats->CurAllocBytes() << '\n' << + " threshold : " << stats::gcStats->CurGCThreshold() << '\n' << + " reg bytes : " << nativeBytesRegistered.load(std::memory_order_relaxed) << '\n' << + " notif objs: " << nativeObjectNotified.load(std::memory_order_relaxed) << '\n' << + std::endl; + if (wait) { + Collector::Instance().InvokeGC(kGCReasonNativeBlocking); + } else { + Collector::Instance().InvokeGC(kGCReasonNative); + } +} + +static inline size_t NativeGcWatermark() { + const size_t heapGcThreshold = stats::gcStats->CurGCThreshold(); + return heapGcThreshold / NativeGCStats::kNativeWatermarkFactor + + NativeGCStats::kNativeWatermarkExtra; +} + +static inline double MemoryGrowthRate() { + return Collector::Instance().InJankImperceptibleProcessState() ? NativeGCStats::kBackgroundGrowthRate : + NativeGCStats::kFrontgroundGrowthRate; +} + +float NativeGCStats::NativeGcFactor(size_t currentNativeBytes) { + const size_t oldNativeBytes = oldNativeBytesAllocated.load(std::memory_order_relaxed); + if (oldNativeBytes > currentNativeBytes) { + oldNativeBytesAllocated.store(currentNativeBytes, std::memory_order_relaxed); + return kSkipNativeGcFactor; + } + const size_t newNativeBytes = currentNativeBytes - oldNativeBytes; + const size_t weightedNativeBytes = (newNativeBytes * NativeDiscountRatio::num / NativeDiscountRatio::den) + + (oldNativeBytes / kOldDiscountFactor); + const size_t watermarkNativeBytes = + static_cast(NativeGcWatermark() * MemoryGrowthRate()) * + NativeDiscountRatio::num / NativeDiscountRatio::den; + const size_t currentHeapBytes = stats::gcStats->CurAllocBytes(); + const size_t heapGcThreshold = stats::gcStats->CurGCThreshold(); + return static_cast(currentHeapBytes + weightedNativeBytes) / + static_cast(heapGcThreshold + watermarkNativeBytes); +} + +NativeEpochStats NativeEpochStats::instance; + +void NativeEpochStats::Init(uint32_t epochSeconds) { + epochInterval = static_cast((epochSeconds * kEpochSecondRatio) / MrtRpEpochIntervalMs()); + logNativeInfo = VLOG_IS_ON(opennativelog); + curGCWatermark = NativeGCStats::Instance().GetNativeBytes(); + curRPWatermark = curGCWatermark; + epochMin = epochMax = epochTotal = curGCWatermark; + curEpochIndex = 1; + RPTriggered = false; + + if (logNativeInfo) { + LOG(INFO) << "FigoNativeMEpoch Init " << " Interval " << curEpochIndex << + " GC WaterMark " << curGCWatermark << + " GC WaterMark " << curRPWatermark << + maple::endl; + } +} + +static inline float BytesToMB(size_t bytes) { + return (bytes * 1.0f) / maple::MB; +} + +// Check and record current native meomory +void NativeEpochStats::CheckNativeGC() { + size_t curNativeBytes = NativeGCStats::Instance().GetNativeBytes(); + if (++curEpochIndex == epochInterval) { + if (logNativeInfo) { + LOG(INFO) << "FigoNativeMEpoch " << curEpochIndex << "[" << BytesToMB(epochMin) << ", " << + BytesToMB(epochMax) << ", " << (BytesToMB(epochTotal) / epochInterval) << "] " << + "[" << BytesToMB(curGCWatermark) << ", " << BytesToMB(curRPWatermark) << "] " << maple::endl; + } + if (epochMin > (curGCWatermark * kEpochGCDeltaRatio)) { + if (RPTriggered || (epochMin > (curGCWatermark * kEpochGCEagerDeltaRatio))) { + LOG2FILE(kLogtypeGc) << "EpochMin " << BytesToMB(epochMin) << " cur " << BytesToMB(curGCWatermark) << std::endl; + MRT_TriggerGC(kGCReasonNative); + curGCWatermark = epochMin; + } else { + ReferenceProcessor::Instance().Notify(true); + curRPWatermark = epochMin; + RPTriggered = true; + } + } else if (epochMin > (curRPWatermark * kEpochRPDeltaRatio)) { + ReferenceProcessor::Instance().Notify(true); + curRPWatermark = epochMin; + } else { + RPTriggered = false; + } + if (epochMin < curGCWatermark) { + if (logNativeInfo) { + LOG(INFO) << "New watermark " << BytesToMB(curGCWatermark) << " To " << BytesToMB(epochMin) << maple::endl; + } + curGCWatermark = epochMin; + } + if (epochMin < curRPWatermark) { + if (logNativeInfo) { + LOG(INFO) << "New RP watermark " << BytesToMB(curRPWatermark) << " To " << BytesToMB(epochMin) << maple::endl; + } + curRPWatermark = epochMin; + } + epochTotal = curNativeBytes; + epochMax = curNativeBytes; + epochMin = curNativeBytes; + curEpochIndex = 0; + } else { + if (logNativeInfo) { + epochTotal += curNativeBytes; + if (curNativeBytes > epochMax) { + epochMax = curNativeBytes; + } + } + if (curNativeBytes < epochMin) { + epochMin = curNativeBytes; + } + } +} +} // namespace maplert diff --git a/src/mrt/compiler-rt/src/collector/rc_reference_processor.cpp b/src/mrt/compiler-rt/src/collector/rc_reference_processor.cpp new file mode 100644 index 0000000000..df511924fe --- /dev/null +++ b/src/mrt/compiler-rt/src/collector/rc_reference_processor.cpp @@ -0,0 +1,1224 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "collector/rc_reference_processor.h" + +#include + +#include "chosen.h" +#include "yieldpoint.h" +#include "collector/native_gc.h" + +namespace maplert { +constexpr uint64_t kRPLogBufSize = 256; +#if CONFIG_JSAN +constexpr uint32_t kRCEpochIntervalMs = 50; +#else +constexpr uint32_t kRCEpochIntervalMs = 773; +#endif + +namespace { +// (default) Parameters to control (how fast) the references are processed +// {WeakRef, SoftRef, PhantomRefAndCleaner, Finalier, WeakGRT, ReleaseQueue} +// WeakGRT is processed every time, useless now. +uint32_t referenceLimit[kRPTypeNum] = { 50, 100, 100, 20, 0, 40 }; +// slow-down factor for low-speed tasks, ~6min +uint32_t agedReferenceLimit[kRPTypeNum] = { 50, 50, 50, 0, 0, 0 }; +// process all references after hungryLimit runs. +uint32_t hungryLimit = 100; +uint32_t agedHungryLimit = 400; + +uint64_t referenceProcessorCycles = 0; +uint64_t curRpIndex = 0; + +const char *kRefTypeMnemonics[kRPTypeNum] = { + "WeakRef", "SoftRef", "PhantomRef", "Finalizer", "WeakGRT", "ReleaseQueue", +}; +} + + +constexpr uint32_t kPendingFinalizeEpochMS = 8000; // ms between pending finalize processing +constexpr uint32_t kPendingFinalizerRpCount = (kPendingFinalizeEpochMS / kRCEpochIntervalMs) + 1; + +static void LogReferenceFrequencies() { + if (!GCLog().IsWriteToFile(kLogtypeRp)) { + return; + } + LOG2FILE(kLogtypeRp) << "[RefProcessor] Read Frequencies from File: " << std::endl; + LOG2FILE(kLogtypeRp) << " refLimit: "; + for (uint32_t i = 0; i < kRPTypeNum; i++) { + LOG2FILE(kLogtypeRp) << kRefTypeMnemonics[i] << "[" << referenceLimit[i] << "] "; + } + LOG2FILE(kLogtypeRp) << std::endl; + LOG2FILE(kLogtypeRp) << " agedRefLimit: "; + for (uint32_t i = 0; i < kRPTypeNum; i++) { + LOG2FILE(kLogtypeRp) << kRefTypeMnemonics[i] << "[" << agedReferenceLimit[i] << "] "; + } + LOG2FILE(kLogtypeRp) << std::endl; + LOG2FILE(kLogtypeRp) << " agedRefLimit: " << hungryLimit << std::endl; +} + +static void InitReferenceFlags() { + LogReferenceFrequencies(); +} + +uint32_t MrtRpEpochIntervalMs() { + return kRCEpochIntervalMs; +} + +// Aged Reference list structure +constexpr uint32_t kRefRetireThreshold = 1600; +// Node for references, with age information +struct TimedRef { + explicit TimedRef(address_t obj) : ref(obj), age(0) {} + address_t ref; + uint32_t age; +}; + +constexpr uint32_t kYoungRefParallelListNum = 4; +constexpr uint32_t kAgedRefParallelListNum = 8; + +// Policy used in reference processing +std::unique_ptr rpSoftPolicy = std::make_unique(); + +// queues and queue-header access lock +static LockType weakRefLock; +static ManagedForwardList weakReferences[kYoungRefParallelListNum]; // for young weakRefs +static ManagedForwardList workingWeakRefs; +static ManagedForwardList agedWeakRefs[kAgedRefParallelListNum]; // for aged weakRefs + +// SoftReference, PhantomReference +static LockType softRefLock; +static ManagedForwardList softRefs[kYoungRefParallelListNum]; // for young SoftRefs +static ManagedForwardList workingSoftRefs; +static ManagedForwardList agedSoftRefs[kAgedRefParallelListNum]; // for aged SoftRefs + +// cleaners shared the same queue with phantom refs +static LockType phantomRefLock; +static ManagedForwardList phantomRefs[kYoungRefParallelListNum]; // for young PhantomRefs and Cleaners +static ManagedForwardList workingPhantomRefs; +static ManagedForwardList agedPhantomRefs[kAgedRefParallelListNum]; // for aged PhantomRefs and Cleaners + +// stats, element in forward_list +static uint32_t agedWeakCount = 0; // only used in RP thread +static std::atomic weakCount = { 0 }; // used in mutator and RP thread, need atomic + +static uint32_t agedSoftCount = 0; +static std::atomic softCount = { 0 }; +// the count of aged_phantom/phantom, contatined cleaners +static uint32_t agedPhantomCount = 0; +static std::atomic phantomCount = { 0 }; + +// Context for different reference handling: visit, enqueue, context includes +// List/count/lock/type +struct ReferenceContext { + ManagedForwardList (&youngs)[kYoungRefParallelListNum]; + ManagedForwardList &working; + ManagedForwardList (&ages)[kAgedRefParallelListNum]; + LockType &lock; + uint32_t &agedCount; + std::atomic &youngCount; +}; + +static ReferenceContext referenceContext[kRPPhantomRef + 1] = { + { weakReferences, workingWeakRefs, agedWeakRefs, weakRefLock, agedWeakCount, weakCount }, + { softRefs, workingSoftRefs, agedSoftRefs, softRefLock, agedSoftCount, softCount }, + { phantomRefs, workingPhantomRefs, agedPhantomRefs, phantomRefLock, agedPhantomCount, phantomCount } +}; + +static void RCVisitReferences(ReferenceContext &context, AddressVisitor &visitor) { + for (TimedRef ref : context.working) { + visitor(ref.ref); + } + for (uint32_t i = 0; i < kYoungRefParallelListNum; ++i) { + ManagedForwardList &young = context.youngs[i]; + for (TimedRef ref : young) { + visitor(ref.ref); + } + } + for (uint32_t i = 0; i < kAgedRefParallelListNum; ++i) { + ManagedForwardList &aged = context.ages[i]; + for (address_t ref : aged) { + visitor(ref); + } + } +} + +void MrtVisitReferenceRoots(AddressVisitor visitor, uint32_t flags) { + if (flags & RPMask(kRPSoftRef)) { + RCVisitReferences(referenceContext[kRPSoftRef], visitor); + } + if (flags & RPMask(kRPPhantomRef)) { + RCVisitReferences(referenceContext[kRPPhantomRef], visitor); + } + if (flags & RPMask(kRPWeakRef)) { + RCVisitReferences(referenceContext[kRPWeakRef], visitor); + } + + if (flags & RPMask(kRPFinalizer)) { + RefVisitor finalizableVisitor = [&visitor](address_t &obj) { + visitor(obj); + }; + ReferenceProcessor::Instance().VisitFinalizers(finalizableVisitor); + } + if (flags & RPMask(kRPReleaseQueue)) { + RefVisitor releaseQueueVistor = [&visitor](address_t &obj) { + visitor(obj); + }; + RCReferenceProcessor::Instance().VisitAsyncReleaseObjs(releaseQueueVistor); + } +} + +// used in gc for process aged generation references +// remove reference from queue when reference is dead. +static inline void GCProcessRefs(ManagedForwardList &refList, RefVisitor &visitor) { + auto itor = refList.begin(); + while (itor != refList.end()) { + address_t &ref = *itor; + visitor(ref); + ++itor; + } +} + +static inline void GCProcessGenRefs(ManagedForwardList::iterator &begin, + ManagedForwardList::iterator &end, + RefVisitor &visitor) { + ManagedForwardList::iterator &itor = begin; + while (itor != end) { + TimedRef &ref = *itor; + visitor(ref.ref); + ++itor; + } +} + +// used in gc for process young generation references +static inline void GCProcessGenRefs(ManagedForwardList &refList, RefVisitor &visitor) { + auto begin = refList.begin(); + auto end = refList.end(); + GCProcessGenRefs(begin, end, visitor); +} + +// only used in concurrent mark sweep to process references in non-parallel mode. +void MRT_GCVisitReferenceRoots(function &visitor, uint32_t flags) { + ManagedForwardList::iterator begins[kYoungRefParallelListNum + 1]; + ManagedForwardList::iterator ends[kYoungRefParallelListNum + 1]; + for (uint32_t type = kRPWeakRef; type <= kRPPhantomRef; ++type) { + if (!(flags & RPMask(type))) { + continue; + } + ReferenceContext &ctx = referenceContext[type]; + { + LockGuard guard(ctx.lock); + for (uint32_t i = 0; i < kYoungRefParallelListNum; ++i) { + begins[i] = ctx.youngs[i].begin(); + ends[i] = ctx.youngs[i].end(); + } + begins[kYoungRefParallelListNum] = ctx.working.begin(); + ends[kYoungRefParallelListNum] = ctx.working.end(); + } + for (uint32_t i = 0; i <= kYoungRefParallelListNum; ++i) { + GCProcessGenRefs(begins[i], ends[i], visitor); + } + for (uint32_t i = 0; i < kAgedRefParallelListNum; ++i) { + GCProcessRefs(ctx.ages[i], visitor); + } + } +} + +// only used in GC thread to process references in parallel mode. +// we process softReference & phantomReference & weakReference & cleaner in GC. +void MRT_ParallelVisitReferenceRoots(MplThreadPool &threadPool, RefVisitor &visitor, uint32_t flags) { + std::atomic taskIndex[kRPPhantomRef + 1]; + const uint32_t threadCount = static_cast(threadPool.GetMaxThreadNum() + 1); + for (uint32_t type = kRPWeakRef; type <= kRPPhantomRef; type++) { + if (!(flags & RPMask(type))) { + continue; + } + std::atomic &index = taskIndex[type]; + index.store(0); + ReferenceContext &ctx = referenceContext[type]; + for (uint32_t i = 0; i < threadCount; ++i) { + threadPool.AddTask(new (std::nothrow) MplLambdaTask([&index, &ctx, &visitor](size_t) { + while (true) { + uint32_t old = index.fetch_add(1, std::memory_order_relaxed); + if (old < kYoungRefParallelListNum) { + GCProcessGenRefs(ctx.youngs[old], visitor); + } else if (old < (kYoungRefParallelListNum + kAgedRefParallelListNum)) { + GCProcessRefs(ctx.ages[old - kYoungRefParallelListNum], visitor); + } else if (old == (kYoungRefParallelListNum + kAgedRefParallelListNum)) { + GCProcessGenRefs(ctx.working, visitor); + } else { + return; + } + } + })); + } + } + threadPool.Start(); + threadPool.WaitFinish(true); +} + +// RC cycle pattern related, cycle pattern load and save +// 1. MRT_SendCyclePatternJob is invoked when load app cycle pattern, invoked in platform-rt +// 2. MRT_SetPeriodicSaveCpJob, invoked in platform-rt, set save method, after cycle pattern is +// learned, job is set and wait RP thread to process +// 3. MRT_SetPeriodicLearnCpJob, similar with period save job, but empty now +static std::atomic_bool hasCPJobs(false); +static LockType cpJobsLock; +static std::deque> cpJobs; // cycle pattern jobs +static bool periodSaveCpJobOn = false; +static std::function periodSaveCpJob = []() {}; + +// Submitting job +extern "C" void MRT_SendBackgroundGcJob(bool force) { + ReferenceProcessor::Instance().NotifyBackgroundGC(force); +} + +extern "C" void MRT_SendCyclePatternJob(function job) { + { + std::lock_guard lock(cpJobsLock); + cpJobs.push_back(std::move(job)); + hasCPJobs = true; + } + ReferenceProcessor::Instance().Notify(false); +} + +extern "C" void MRT_SetPeriodicSaveCpJob(std::function job) { + periodSaveCpJob = std::move(job); + periodSaveCpJobOn = true; +} + +extern "C" void MRT_SendSaveCpJob() { + if (periodSaveCpJobOn) { + MRT_SendCyclePatternJob(periodSaveCpJob); + } +} + +extern "C" void MRT_SetPeriodicLearnCpJob(std::function job ATTR_UNUSED) {} + +static void runAllCyclePatternJobs() { + while (true) { + function job; + { + std::lock_guard lock(cpJobsLock); + if (cpJobs.empty()) { + hasCPJobs = false; + break; + } + job = cpJobs.front(); + cpJobs.pop_front(); + } + job(); + } +} + +static void LogStatsReference(std::unordered_map &countMap, string &&prefix, const uint32_t total) { + constexpr uint32_t topClassNumInRefqueue = 10; + std::vector> vtMap; + for (auto iter = countMap.begin(); iter != countMap.end(); ++iter) { + vtMap.push_back(std::make_pair(iter->first, iter->second)); + } + std::sort(vtMap.begin(), vtMap.end(), + [](const std::pair &x, const std::pair &y) -> bool { + return x.second > y.second; + }); + LOG(INFO) << prefix << "'s top reference:" << maple::endl; + uint32_t count = 0; + uint32_t nullCount = 0; + for (auto iter = vtMap.begin(); iter != vtMap.end() && (count < topClassNumInRefqueue); ++iter, ++count) { + if (iter->first == nullptr) { + nullCount = iter->second; + continue; + } + const char *className = iter->first->GetName(); + LOG(INFO) << prefix << " " << className << " count: " << iter->second << "/" << total << maple::endl; + } + if (nullCount != 0) { + LOG(INFO) << prefix << " NULL ref count in " << nullCount << "/" << total << maple::endl; + } +} + +// Dump Reference Queue and finalizer hot type information. Only stats kMaxCountInRefqueue reference +// 1. stats individually for young/aged reference +// 2. stats referent/fianlizer type +// 3. find hot types and log +// Skip processing working as it might modifing during check and it has small impact on entire stats +// For aged list, it might modifiying while stats here, lock deosn't help. +static __MRT_UNUSED void StatsReference(const uint32_t type) { + constexpr uint32_t maxCountInRefqueue = 4096; + ReferenceContext &ctx = referenceContext[type]; + std::unordered_map refClassCount; + uint32_t count = 0; + LOG(INFO) << kRefTypeMnemonics[type] << " young size : " << ctx.youngCount << + " aged " << ctx.agedCount << maple::endl; + AddressVisitor countReference = [&refClassCount, &count](address_t reference) { + if (reference == 0) { + return; + } + ++count; + address_t referent = MRT_LoadReferentField(reference, + reinterpret_cast(reference + WellKnown::kReferenceReferentOffset)); + if (referent) { + refClassCount[reinterpret_cast(referent)->GetClass()] += 1; + MRT_DecRef(referent); + } else { + refClassCount[NULL] += 1; + } + }; + for (uint32_t i = 0; i < kYoungRefParallelListNum && (count < maxCountInRefqueue); ++i) { + LockGuard guard(ctx.lock); + ManagedForwardList &young = ctx.youngs[i]; + for (TimedRef &timedRef : young) { + countReference(timedRef.ref); + if (count >= maxCountInRefqueue) { + break; + } + } + } + LogStatsReference(refClassCount, string(kRefTypeMnemonics[type]) + "young", count); + + count = 0; + refClassCount.clear(); + for (uint32_t i = 0; i < kAgedRefParallelListNum && (count < maxCountInRefqueue); ++i) { + ManagedForwardList &aged = ctx.ages[i]; + for (address_t ref : aged) { + countReference(ref); + if (count >= maxCountInRefqueue) { + break; + } + } + } + LogStatsReference(refClassCount, string(kRefTypeMnemonics[type]) + "aged", count); +} + +#if __MRT_DEBUG +static atomic outOfMemoryPrintLock(false); +void MRT_logRefqueuesSize() { + if (outOfMemoryPrintLock.exchange(true) == true) { + return; + } + LOG(INFO) << "========log top ref in queues now========" << maple::endl; + for (uint32_t type = kRPWeakRef; type <= kRPPhantomRef; ++type) { + StatsReference(type); + } + ReferenceProcessor::Instance().LogFinalizeInfo(); + outOfMemoryPrintLock = false; +} +#else +void MRT_logRefqueuesSize() {} +#endif // #if __MRT_DEBUG + +static uint32_t refAddIndex[kRPPhantomRef + 1] = { 0, 0, 0 }; +void AddNewReference(address_t obj, uint32_t classFlag) { + static_assert((kRPWeakRef == 0) && (kRPSoftRef == (kRPWeakRef + 1)) && + (kRPPhantomRef == (kRPSoftRef + 1)), "RP type change"); + SetObjReference(obj); + ScopedObjectAccess soa; + // hold a reference here , use a fast version of MRT_IncRef(objaddr); + (void)AtomicUpdateRC<1, 0, 0>(obj); + uint32_t type = ReferenceProcessor::GetRPTypeByClassFlag(classFlag); + ReferenceContext &ctx = referenceContext[type]; + { + LockGuard guard(ctx.lock); + uint32_t youngIndex = refAddIndex[type] % kYoungRefParallelListNum; + refAddIndex[type] += 1; + ctx.youngs[youngIndex].push_front(TimedRef(obj)); + RCReferenceProcessor::Instance().CountRecent(type); + } + (void)ctx.youngCount.fetch_add(1, std::memory_order_relaxed); +} + +// Clear Reference and invoke enqueu method +// 1. leave safe region because it will modify reference and invoke java method +// 2. clear referent +// 3. check reference queu and invoke method +static __attribute__((noinline)) void ClearReferentAndEnqueue(address_t reference) { + ScopedObjectAccess soa; + if (ReferenceGetReferent(reference)) { + MRT_ReferenceClearReferent(reference); + } + ReferenceProcessor::Enqueue(reference); +} + +enum RefStatus { // the status/result of processing a reference + kNothingTodo = 0, + kReferentCleared, + kPromoteToOld, +}; + +static __attribute__((noinline)) RefStatus TryClearReferent(address_t reference, address_t referent, + const uint32_t reftype) { + if (reftype == kRPWeakGRT) { + __MRT_ASSERT(reference == 0, "reference null"); + return kReferentCleared; + } + if ((reftype == kRPSoftRef) || (reftype == kRPWeakRef)) { + __MRT_ASSERT(reference != 0, "reference not null"); + // suppose has multiple reference, like WGRT + ClearReferentAndEnqueue(reference); + return kReferentCleared; + } + if (reftype == kRPPhantomRef) { + __MRT_ASSERT(reference != 0, "reference not null"); + if (!IsObjFinalizable(referent)) { + ClearReferentAndEnqueue(reference); + return kReferentCleared; + } + } + return kNothingTodo; +} + +// do weak collection of references according to weak rc +// referent has local rc in current thread +static __attribute__((noinline)) RefStatus WeakRCCollection(address_t reference, + address_t referent, + const uint32_t reftype) { + if (IsWeakCollected(referent)) { + // weak bit is set + return TryClearReferent(reference, referent, reftype); + } + // If weak RC collect is performed for normal reference, weak grt, + // it has racing with muator, so refernt is loaded and Inced to keep operations safe + // we need sub delta in cycle pattern match and weak collect bit set. + if (AtomicCheckWeakCollectable(referent, 1)) { + // success + if (IsObjFinalizable(referent)) { + ReferenceProcessor::Instance().AddFinalizable(referent, true); + } else { + NRCMutator().WeakReleaseObj(referent); + NRCMutator().DecWeak(referent); + } + return TryClearReferent(reference, referent, reftype); + } else { + // fail + uint32_t referentRc = GetRCFromRCHeader(RCHeader(referent)); + if (IsValidForCyclePatterMatch(GCHeader(referent), referentRc - 1)) { + if (CycleCollector::TryFreeCycleAtMutator(referent, 1, true)) { + return TryClearReferent(reference, referent, reftype); + } + } + } + return kNothingTodo; +} + +// try-free weak global references +void TryFreeWeakGRs() { + ScopedObjectAccess soa; + ReferenceProcessor::Instance().SetProcessingType(kRPWeakGRT); + maple::irtVisitFunc visitor = [&](uint32_t index, address_t obj) { + if (!IS_HEAP_OBJ(obj)) { + return; + } + if (WeakRCCollection(0, obj, kRPWeakGRT) == kReferentCleared) { + maple::GCRootsVisitor::ClearWeakGRTReference(index, MObject::Cast(obj)->AsJobject()); + RCReferenceProcessor::Instance().CountProcessed(); + } + }; + maple::GCRootsVisitor::VisitWeakGRTConcurrent(visitor); +} + +static RefStatus ProcessEmptyRef(address_t reference, const uint32_t reftype, bool young){ + if (reftype == kRPPhantomRef) { + // cleaner doesn't have get method, so no load volatile mask + address_t collectedReferent = ReferenceGetReferent(reference); + if (IS_HEAP_OBJ(collectedReferent)) { + // weak collected bit is set, but finalize is not invoked yet + if (IsObjFinalizable(collectedReferent)) { + return kNothingTodo; + } + (void)young; + } + } + ClearReferentAndEnqueue(reference); + return kReferentCleared; +} + +static RefStatus ProcessMygoteRef(address_t reference){ + std::atomic &referentAddr = AddrToLValAtomic( + reinterpret_cast(reference + WellKnown::kReferenceReferentOffset)); + address_t mygoteReferent = referentAddr.load(std::memory_order_acquire); + if (mygoteReferent) { + return kNothingTodo; + } else { + ClearReferentAndEnqueue(reference); + return kReferentCleared; + } +} + +// process a single reference (softRef, phantomeRef) +// return value: +// REFERENT_CLEARED the referent is cleared (reference can be removed now) +// kNothingTodo: the reference cannot be removed +// Note: ref should not be nullptr +template +static RefStatus ProcessRef(address_t reference, const uint32_t reftype) { + if (IsMygoteObj(reference)) { + return ProcessMygoteRef(reference); + } + + // the reference is dead when rc becomes 1 + if (CanReleaseObj<-1, 0, 0>(RCHeader(reference)) == kReleaseObject) { + MRT_ReferenceClearReferent(reference); + return kReferentCleared; + } + + address_t referent = MRT_LoadReferentField(reference, + reinterpret_cast(reference + WellKnown::kReferenceReferentOffset)); + if (referent) { + // need refactored in moving gc + ScopedHandles sHandles; + ObjHandle referentRef(referent); + __MRT_ASSERT(IS_HEAP_ADDR(referent), "off heap referent"); + // policy may not work, cause of soft hard to become old + if (UNLIKELY(reftype == kRPSoftRef && !rpSoftPolicy->ShouldClearSoftRef())) { + if (young) { + SetReferenceActive(reference); + } + return kNothingTodo; + } + if (young && IsReferenceActive(reference)) { + // If reference is recently get, skip its processing + // avoid reclaim valid weak/soft cache too fast + // check after load, make this flag visible after mutator's change + // If this load happens later than muator referent load + ClearReferenceActive(reference); + return kNothingTodo; + } + // concurrent marking skip reference's referent, make it live in concurrent marking + MRT_PreRenewObject(referentRef.AsRaw()); + if (WeakRCCollection(reference, referent, reftype) == kReferentCleared) { + return kReferentCleared; + } + return kNothingTodo; + } else { + return ProcessEmptyRef(reference, reftype, young); + } +} + +// process aged reference queues: currently only for old-gen queues +// i.e., reference with empty referent. This can only be done for old-gen queues +// since reference in young-gen queues may not done initialization and set referent yet. +// GC may put reference in other readyToRemoveQueue and clean it's referent. +static void ProcessRefs(ManagedForwardList &refList, const uint32_t reftype, uint32_t &agedCount) { + auto prev = refList.before_begin(); + auto itor = refList.begin(); + while (itor != refList.end()) { + ScopedObjectAccess soa; // avoid from being cleared/rc-changed by GC thread + address_t reference = *itor; + if (reference == 0) { + if (Collector::Instance().IsConcurrentMarkRunning()) { + // soa is a gc point, before modify the list, check if concurrent mark is running + continue; + } + ++itor; + refList.erase_after(prev); + --agedCount; + continue; + } + // RC will not moving, only keep it on stack roots + ScopedHandles sHandles; + ObjHandle referenceRef(reference); + if (ProcessRef(reference, reftype) == kReferentCleared) { + // referent is cleared, can now be removed from queue + RC_RUNTIME_DEC_REF(reference); // release ownership + RCReferenceProcessor::Instance().AddAgedProcessedCount(); + if (Collector::Instance().IsConcurrentMarkRunning()) { + *itor = 0; + continue; + } + + // now gc will not modify the list, we can safely remove the node + ++itor; // increase itor first, since current node will be deleted + refList.erase_after(prev); + --agedCount; + } else { + prev = itor; + ++itor; + } + } +} + +// process cleaner, weak-references (in young-generation working queue) +// return value: +// kReferentCleared: zombie field is cleared (reference can be removed now) +// kPromoteToOld: reference is old enough to move to old gen queue +// kNothingTodo: the reference cannot be removed from queue +// Note: ref.ref should not be nullptr +static __attribute__((noinline)) RefStatus ProcessGenRef(TimedRef &ref, const uint32_t reftype) { + ++(ref.age); + address_t reference = ref.ref; + RefStatus status = ProcessRef(reference, reftype); + if (status != kNothingTodo) { + return status; + } + + if (ref.age > kRefRetireThreshold) { + return kPromoteToOld; + } + return kNothingTodo; +} + +// process reference queues in a generational style +static uint32_t ProcessGenRefs(ManagedForwardList &refList, ManagedForwardList &agedRefList, + const uint32_t reftype, uint32_t &agedCount) { + uint32_t removedCount = 0; + auto prev = refList.before_begin(); + auto itor = refList.begin(); + while (itor != refList.end()) { + ScopedObjectAccess soa; // avoid from being cleared/rc-changed by GC thread + TimedRef &ref = *itor; + address_t reference = ref.ref; + if (reference == 0) { + if (Collector::Instance().IsConcurrentMarkRunning()) { + // soa is a gc point, before modify the list, check if concurrent mark is running + continue; + } + ++itor; + refList.erase_after(prev); + ++removedCount; + continue; + } + ScopedHandles sHandles; + ObjHandle refRef(reference); + RefStatus result = ProcessGenRef(ref, reftype); + if (result == kPromoteToOld) { + if (Collector::Instance().IsConcurrentMarkRunning()) { + continue; + } + // reference is old enough, move it to aged reference list + agedRefList.push_front(reference); + ++itor; // increase itor first, since current node will be deleted + refList.erase_after(prev); + ++agedCount; + ++removedCount; + // prev doesn't need to update + } else if (result == kReferentCleared) { + // reference is ready to be removed from the queue + RC_RUNTIME_DEC_REF(reference); // release ownership + RCReferenceProcessor::Instance().CountProcessed(); + ++removedCount; + if (Collector::Instance().IsConcurrentMarkRunning()) { + ref.ref = 0; + continue; + } + ++itor; // increase itor first, since current node will be deleted + refList.erase_after(prev); + } else { // nothing to do + prev = itor; + ++itor; + } + } + return removedCount; +} + +extern "C" void *MRT_CLASSINFO(Ljava_2Flang_2Fref_2FReference_3B); +extern "C" void *MRT_CLASSINFO(Ljava_2Flang_2Fref_2FFinalizerReference_3B); + +// used to sync reference-processor and its creator. +void MRT_WaitProcessReferencesStarted() { + ReferenceProcessor::Instance().WaitStarted(); +} + +// This method is used in qemu unitest (not invoked from zygote) +// keep hungryLimit to 1 might keep rp busy and make mutator not running +extern "C" void MRT_SetReferenceProcessMode(bool immediate) { + if (Collector::Instance().Type() != kNaiveRC) { + return; + } + constexpr uint32_t kUnitTestRPWaitTimeMS = 2; + constexpr uint32_t kUnitTestRPHungryLimit = 5; + constexpr uint32_t kUnitTestRPAgedHungryLimit = 10; + bool immediateRPMode = VLOG_IS_ON(immediaterp); + if (immediate || immediateRPMode) { + for (uint32_t i = 0; i < kRPTypeNum; ++i) { + referenceLimit[i] = 0; + agedReferenceLimit[i] = 0; + } + hungryLimit = kUnitTestRPHungryLimit; + agedHungryLimit = kUnitTestRPAgedHungryLimit; + ReferenceProcessor::Instance().SetIterationWaitTimeMs(kUnitTestRPWaitTimeMS); + } +} + +extern "C" void MRT_StopProcessReferences(bool finalize) { + ReferenceProcessor::Instance().Stop(finalize); + ReferenceProcessor::Instance().Notify(false); +} + +extern "C" void MRT_WaitProcessReferencesStopped() { + ReferenceProcessor::Instance().WaitStop(); +} + +RCReferenceProcessor::RCReferenceProcessor() : ReferenceProcessor() { + iterationWaitTime = kRCEpochIntervalMs; + referenceFlags = 0; + agedReferenceFlags = 0; + hungryCount = 1; + agedHungryCount = 1; + for (uint32_t i = 0; i < kRPTypeNum; ++i) { + numProcessedAgedRefs[i] = 0; + numLastProcessedAgedRefs[i] = 0; + recentCount[i] = 0; + agedReferenceCount[i] = 0; + } +} + +void RCReferenceProcessor::Init() { + ReferenceProcessor::Init(); + InitReferenceFlags(); + referenceFlags = kRPAllFlags; +} + +// Clear RC reference data structure: release queue, discovered reference list +// Move Pending finalizable to fianlizables. +void RCReferenceProcessor::Fini() { + ReferenceProcessor::Fini(); + __MRT_ASSERT(RPRunning == false, "still running"); + __MRT_ASSERT(workingAsyncReleaseQueue.size() == 0, "working not empty"); + asyncReleaseQueue.clear(); + cpJobs.clear(); +} + +static void inline DiscoverReference(address_t &head, address_t enqueued) { + if (head == 0) { + ReferenceSetPendingNext(enqueued, enqueued); + } else { + ReferenceSetPendingNext(enqueued, head); + } + head = enqueued; +} + +static void inline DiscoverGenRefs(address_t &head, ManagedForwardList &refList, uint32_t &count) { + auto itor = refList.begin(); + auto end = refList.end(); + while (itor != end) { + TimedRef &ref = *itor; + address_t reference = ref.ref; + if (reference != 0 && ReferenceGetReferent(reference) == 0) { + DiscoverReference(head, reference); + ++count; + } + ++itor; + } +} + +static void inline DiscoverRefs(address_t &head, ManagedForwardList &refList, uint32_t &count) { + auto itor = refList.begin(); + auto end = refList.end(); + while (itor != end) { + address_t reference = *itor; + if (reference != 0 && ReferenceGetReferent(reference) == 0) { + DiscoverReference(head, reference); + ++count; + } + ++itor; + } +} + +address_t RCReferenceProcessor::TransferEnquenenableReferenceOnFork(uint32_t type) { + ReferenceContext &ctx = referenceContext[type]; + address_t enqueueables = 0; + uint32_t count = 0; + DiscoverGenRefs(enqueueables, ctx.working, count); + ctx.working.clear(); + for (uint32_t i = 0; i < kYoungRefParallelListNum; ++i) { + DiscoverGenRefs(enqueueables, ctx.youngs[i], count); + ctx.youngs[i].clear(); + } + for (uint32_t i = 0; i < kAgedRefParallelListNum; ++i) { + DiscoverRefs(enqueueables, ctx.ages[i], count); + ctx.ages[i].clear(); + } + return enqueueables; +} + +void RCReferenceProcessor::TransferFinalizblesOnFork(ManagedList &toFinalizables) { + __MRT_ASSERT(workingFinalizables.empty() == true, "working finalizable not empty"); + __MRT_ASSERT(runFinalizations.empty() == true, "working runFinalizations not empty"); + uint32_t transferedFinal = 0; + for (address_t obj : finalizables) { + toFinalizables.push_back(obj); + ++transferedFinal; + } + for (address_t obj : pendingFinalizablesPrev) { + toFinalizables.push_back(obj); + ++transferedFinal; + } + for (address_t obj : pendingFinalizables) { + toFinalizables.push_back(obj); + ++transferedFinal; + } + finalizables.clear(); + pendingFinalizablesPrev.clear(); + pendingFinalizables.clear(); + LOG(INFO) << "TransferFinalizblesOnFork " << transferedFinal << maple::endl; +} + +// Periodically check which reference need be processed +// 1. agedHungryCount reach threshold, process all references +// 2. hungryCount reach threshold, process all young references +// 3. otherwise check if specific type recent count reach threshold +// signle aged reference process will be determined after young is processed +bool RCReferenceProcessor::CheckAndSetReferenceFlags() { + ++agedHungryCount; + ++hungryCount; + if (agedHungryCount > agedHungryLimit) { + referenceFlags = kRPAllFlags; + agedReferenceFlags = kRPAllFlags; + agedHungryCount = 0; + hungryCount = 0; + LOG2FILE(kLogtypeRp) << "[RefProcessor] AgedHungryLimit(" << agedHungryLimit << + ") reached, ready to process all aged references." << std::endl; + } else if (hungryCount > hungryLimit) { + referenceFlags = kRPAllFlags; + agedReferenceFlags = 0; + hungryCount = 0; + LOG2FILE(kLogtypeRp) << "[RefProcessor] HungryLimit(" << hungryLimit << + ") reached, ready to process all references." << std::endl; + } else { + uint32_t flags = 0; + for (uint32_t i = 0; i < kRPTypeNum; ++i) { + if (recentCount[i] > referenceLimit[i]) { + flags |= (1U << i); + recentCount[i] = 0; + } + } + referenceFlags = flags; + agedReferenceFlags = 0; + } + return (referenceFlags != 0 || agedReferenceFlags != 0); +} + +bool RCReferenceProcessor::CheckAndUpdateAgedReference(uint32_t type) { + uint32_t mask = RPMask(type); + if ((agedReferenceFlags & mask) != 0 || agedReferenceCount[type] > agedReferenceLimit[type]) { + agedReferenceCount[type] = 0; + return true; + } + ++(agedReferenceCount[type]); + return false; +} + +void RCReferenceProcessor::LogRefProcessorBegin() { + if (!GCLog().IsWriteToFile(kLogtypeRp)) { + return; + } + timeCurrentRefProcessBegin = timeutils::MicroSeconds(); + char buf[kRPLogBufSize]; // doesn't need initialize + buf[0] = 0; + for (uint32_t i = 0; i < kRPTypeNum; ++i) { + if (referenceFlags & (1U << i)) { + errno_t tmpResult1 = strcat_s(buf, sizeof(buf), kRefTypeMnemonics[i]); + errno_t tmpResult2 = strcat_s(buf, sizeof(buf), " "); + if (UNLIKELY(tmpResult1 != EOK || tmpResult2 != EOK)) { + LOG(ERROR) << "strcat_s() in maplert::logRefProcessorBegin() return " << tmpResult1 << " and " << + tmpResult2 << " rather than 0. " << maple::endl; + } + } + } + LOG2FILE(kLogtypeRp) << "[RefProcessor] Begin " << curRpIndex << " (" << timeutils::GetDigitDate() << + ") : (" << buf << ")" << std::endl; + for (uint32_t i = 0; i < kRPTypeNum; ++i) { + numLastProcessedRefs[i] = numProcessedRefs[i]; + numLastProcessedAgedRefs[i] = numProcessedAgedRefs[i]; + } +} + +void RCReferenceProcessor::LogRefProcessorEnd() { + if (!GCLog().IsWriteToFile(kLogtypeRp)) { + return; + } + LOG2FILE(kLogtypeRp) << "[RefProcessor] Number Processed: "; + for (uint32_t i = 0; i < kRPTypeNum; ++i) { + if (referenceFlags & (1U << i)) { + LOG2FILE(kLogtypeRp) << kRefTypeMnemonics[i] << " " << (numProcessedRefs[i] - numLastProcessedRefs[i]) << + " [" << numProcessedRefs[i] << "] "; + if (numProcessedAgedRefs[i] != numLastProcessedAgedRefs[i]) { + LOG2FILE(kLogtypeRp) << " (aged: " << (numProcessedAgedRefs[i] - numLastProcessedAgedRefs[i]) << + " [" << numProcessedAgedRefs[i] << "]) "; + } + } + } + LOG2FILE(kLogtypeRp) << std::endl; + + uint64_t timeNow = timeutils::MicroSeconds(); + uint64_t timeConsumed = timeNow - timeCurrentRefProcessBegin; + uint64_t totalTimePassed = timeNow - timeRefProcessorBegin; + timeRefProcessUsed += timeConsumed; + float percentage = ((maple::kTimeFactor * timeRefProcessUsed) / totalTimePassed) / kPercentageDivend; + LOG2FILE(kLogtypeRp) << "[RefProcessor] End " << curRpIndex << " (" << timeConsumed << "us" << + " [" << timeRefProcessUsed << "us]" << + " [ " << percentage << "%]" << + std::endl; + ++curRpIndex; +} + +void RCReferenceProcessor::AddAsyncReleaseObj(address_t obj, bool isMutator) { + __MRT_ASSERT(IS_HEAP_OBJ(obj), "Not valid object"); + __MRT_ASSERT(IsRCCollectable(obj), "Not collectable object"); + + if (isMutator) { + __MRT_ASSERT(IsObjResurrectable(obj) == false, "Add finalizable object to release queue in mutator"); + if (UNLIKELY(TLMutator().InSaferegion())) { + LOG(ERROR) << "__MRTMutatorDeferReleaseObj in safe region."; + DumpMutatorsListInfo(true); + } + if (UNLIKELY(Collector::Instance().IsConcurrentMarkRunning())) { + // if concurrent mark is running, can not add object into pending release + // as all object need been recrusively processed. + return; + } + } else { + // GC concurrent sweep + if (UNLIKELY(IsObjResurrectable(obj))) { + AddFinalizable(obj, true); + return; + } + } + { + LockGuard guard(releaseQueueLock); + asyncReleaseQueue.push_back(obj); + } + CountRecent(kRPReleaseQueue); +} + +void RCReferenceProcessor::ClearAsyncReleaseObjs() { + __MRT_ASSERT(WorldStopped(), "Invoke when world not stop"); + asyncReleaseQueue.clear(); + workingAsyncReleaseQueue.clear(); +} + +void RCReferenceProcessor::ProcessAsyncReleaseObjs() { + SetProcessingType(kRPReleaseQueue); + { + // Swap release queue. Leave saferegion to avoid GC visit those changing queues. + ScopedObjectAccess soa; + LockGuard guard(releaseQueueLock); + workingAsyncReleaseQueue.swap(asyncReleaseQueue); + } + LOG2FILE(kLogtypeRp) << "Async Release Queue size " << workingAsyncReleaseQueue.size() << std::endl; + while (true) { + ScopedObjectAccess soa; + // because GC might clear this queue, check and fetch reference from deque in none-safe region + if (workingAsyncReleaseQueue.empty()) { + return; + } + address_t obj = workingAsyncReleaseQueue.front(); // no need in slv because no gc in release + workingAsyncReleaseQueue.pop_front(); + MRT_ReleaseObj(obj); + CountProcessed(); + } +} + +void RCReferenceProcessor::VisitAsyncReleaseObjs(const RefVisitor &vistor) { + for (address_t &obj : workingAsyncReleaseQueue) { + vistor(obj); + } + for (address_t &obj : asyncReleaseQueue) { + vistor(obj); + } +} + +// Visit pending finalizable list for RC +void RCReferenceProcessor::VisitFinalizers(RefVisitor &visitor) { + ReferenceProcessor::VisitFinalizers(visitor); + for (address_t &obj : pendingFinalizables) { + visitor(obj); + } + for (address_t &obj : pendingFinalizablesPrev) { + visitor(obj); + } +} + +// 1. IncRef for objects added into RC finalizable list, otherwise it will be live with RC 0 +// 2. Acquire finalizersLock because mutliple mutator might add finalizable objects spontaneously +// 3. Only STW can skip lock +void RCReferenceProcessor::PreAddFinalizables(address_t objs[], uint32_t count, bool needLock) { + for (uint32_t i = 0; i < count; ++i) { + address_t obj = objs[i]; + if (needLock) { + (void)AtomicUpdateRC<1, 0, 0>(obj); + } else { + (void)UpdateRC<1, 0, 0>(obj); + } + CountRecent(kRPFinalizer); + } + if (needLock) { + finalizersLock.lock(); + } else { + __MRT_ASSERT(WorldStopped(), "No lock when not STW"); + } +} + +void RCReferenceProcessor::PostAddFinalizables(address_t objs[], uint32_t count, bool needLock) { + (void)objs; + (void)count; + if (needLock) { + finalizersLock.unlock(); + } +} + +bool RCReferenceProcessor::CheckAndAddFinalizable(address_t obj) { + if (UNLIKELY(IsObjResurrectable(obj))) { + AddFinalizable(obj, true); + return true; + } + return false; +} + +bool RCReferenceProcessor::SpecializedAddFinalizable(address_t) { + return false; +} + +// Processes pending finalize periodically +void RCReferenceProcessor::PostProcessFinalizables() { + if ((referenceProcessorCycles % kPendingFinalizerRpCount) == 0) { + { + ScopedObjectAccess soa; + finalizersLock.lock(); + workingFinalizables.swap(pendingFinalizablesPrev); + pendingFinalizablesPrev.swap(pendingFinalizables); + finalizersLock.unlock(); + } + LOG2FILE(kLogtypeRp) << "referenceProcessorCycles " << referenceProcessorCycles << + " pending finalizer: working size " << workingFinalizables.size() << + " pending finalizer: waiting size " << pendingFinalizablesPrev.size() << + std::endl; + MRT_PHASE_TIMER("PendingFinalizer", kLogtypeRp); + ProcessFinalizablesList(workingFinalizables); + } +} + +// DecRef and weak reference processing for finalizable objects +void RCReferenceProcessor::PostFinalizable(address_t obj) { + if (IsWeakCollected(obj)) { + NaiveRCMutator &mutator = NRCMutator(); + // check if finalize object is ready to be released + // if its strong rc is 1 and weak rc is 1 + if (TotalRefCount(obj) == kWeakRCOneBit + 1) { + mutator.ReleaseObj(obj); + CountProcessed(); + return; + } else if (RefCount(obj) == 1) { + mutator.WeakReleaseObj(obj); + } + // else finalizer might have cycle with other finalizer object or resurrect after fianlize + mutator.DecWeak(obj); + } + // resume the pending DecRef + RC_RUNTIME_DEC_REF(obj); + CountProcessed(); +} + +void RCReferenceProcessor::PreIteration() { + ++referenceProcessorCycles; + ReferenceProcessor::PreIteration(); + if (UNLIKELY(hasCPJobs)) { + MRT_PHASE_TIMER("cycle pattern job", kLogtypeRp); + runAllCyclePatternJobs(); + } + + { + MRT_PHASE_TIMER("release", kLogtypeRp); + ProcessAsyncReleaseObjs(); + } +} + +// RC specific processing after RP iteration +// 1. Process Weak GRT in RC, GC clear weak GRT in STW +// 2. If reach hungry limit, try drain RC release queue +// 3. RC specifie GC trigger +// 3.1 Epoch native mode: trigger native GC if binder proxy count exceed: tobe in android +// 3.2 Epoch native mode: check native gc epoch stats and trigger gc +// 3.3 None epoch mode, trigger GC if reference count delta exceed threshold +void RCReferenceProcessor::PostIteration() { + { + MRT_PHASE_TIMER("FreeWeakGRT", kLogtypeRp); + TryFreeWeakGRs(); + } + + // release again in full processing + if (agedHungryCount == 0) { + MRT_PHASE_TIMER("release 2", kLogtypeRp); + ProcessAsyncReleaseObjs(); + } + + if (NativeEpochStats::Instance().isEnabled()) { + MRT_PHASE_TIMER("Epoch Check Native", kLogtypeRp); + NativeEpochStats::Instance().CheckNativeGC(); + } +} + +void RCReferenceProcessor::PreExitDoFinalize() { + LOG(INFO) << "Release all releasing-obj before stopped" << maple::endl; + ProcessAsyncReleaseObjs(); +} + +// Enqueue reference in RC will check before enque reference, steps include +// 1. iterate all reference type: weak, soft, phantom, check if need process current type +// 2. log, setting current processing type and prepare work +// 3. process young refs +// 4. check and process aged refs +static uint32_t youngToAgedIndex = 0; +void RCReferenceProcessor::EnqeueReferences() { + for (uint32_t type = 0; type <= kRPPhantomRef; ++type) { + if (HasReferenceFlag(type) == false) { + continue; + } + ReferenceContext &ctx = referenceContext[type]; + MRT_PHASE_TIMER(kRefTypeMnemonics[type], kLogtypeRp); + LOG2FILE(kLogtypeRp) << kRefTypeMnemonics[type] << ": young " << ctx.youngCount << + " age " << ctx.agedCount << std::endl; + if (type == kRPSoftRef) { + rpSoftPolicy->Init(); + } + SetProcessingType(type); + // processing youngs + { + uint32_t index = (youngToAgedIndex++) % kAgedRefParallelListNum; + uint32_t removeCount = ProcessGenRefs(ctx.working, ctx.ages[index], type, ctx.agedCount); + (void)ctx.youngCount.fetch_sub(removeCount, std::memory_order_relaxed); + } + for (uint32_t i = 0; i < kYoungRefParallelListNum; ++i) { + { + // Exchange current queue and working. Leave saferegion to avoid GC visit those changing queues. + ScopedObjectAccess soa; + ctx.lock.lock(); + ctx.working.swap(ctx.youngs[i]); + ctx.lock.unlock(); + } + uint32_t index = (youngToAgedIndex++) % kAgedRefParallelListNum; + uint32_t removeCount = ProcessGenRefs(ctx.working, ctx.ages[index], type, ctx.agedCount); + (void)ctx.youngCount.fetch_sub(removeCount, std::memory_order_relaxed); + } + if (CheckAndUpdateAgedReference(type)) { + for (uint32_t i = 0; i < kAgedRefParallelListNum; ++i) { + ProcessRefs(ctx.ages[i], type, ctx.agedCount); + } + } + } +} + +bool RCReferenceProcessor::ShouldStartIteration() { + if (processAllRefs.load()) { + referenceFlags = kRPAllFlags; + agedReferenceFlags = kRPAllFlags; + agedHungryCount = 0; + processAllRefs.store(false); + return true; + } + bool hasReferenctToProcess = CheckAndSetReferenceFlags(); + return (hasReferenctToProcess || hasCPJobs.load() || hasBackgroundGC.load()); +} +} diff --git a/src/mrt/compiler-rt/src/collector/rp_base.cpp b/src/mrt/compiler-rt/src/collector/rp_base.cpp new file mode 100644 index 0000000000..9aa6bfbd6b --- /dev/null +++ b/src/mrt/compiler-rt/src/collector/rp_base.cpp @@ -0,0 +1,446 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "collector/rp_base.h" + +#include "chosen.h" +#include "collector/stats.h" +#ifdef __ANDROID__ +#include "collie.h" +#endif + +namespace maplert { +constexpr uint32_t kDefaultGCRPTimeoutMs = 2000; +constexpr uint64_t kNanoPerSecond = 1000L * 1000 * 1000; // 1sec equals 10^9ns +ReferenceProcessor *ReferenceProcessor::instance = nullptr; +MethodMeta *ReferenceProcessor::enqueueMethod = nullptr; + +// Note: can only be called by reference-processing thread +extern "C" void *MRT_ProcessReferences(void *not_used __attribute__((unused))) { + ReferenceProcessor::Instance().Run(); + return nullptr; +} + +void CurrentHeapPolicy::Init() { + size_t freeMem = MRT_MaxMemory() - HeapStats::CurrentSize(); + maxInterval = static_cast(freeMem / (maple::MB)); +} + +void ReferenceProcessor::Create(CollectorType type) { + if (type == kNaiveRCMarkSweep) { + return; + } + __MRT_ASSERT(instance == nullptr, "RP already created"); + if (type == kNaiveRC) { + instance = new (std::nothrow) RCReferenceProcessor(); + if (instance == nullptr) { + LOG(FATAL) << "new RCReferenceProcessor failed" << maple::endl; + } + } else if (type == kMarkSweep) { + instance = new (std::nothrow) GCReferenceProcessor(); + if (instance == nullptr) { + LOG(FATAL) << "new GCReferenceProcessor failed" << maple::endl; + } + } else { + __MRT_ASSERT(false, "Unepxected type"); + } + __MRT_ASSERT(instance != nullptr, "RP created failed"); +} + +// create new GC reference processor to accept information +// Copy ready to qneuque reference from rc list to gc enqueue list +// Copy finalizables to gc reference processor +// Clean RC reference processor +// delete old reference processor +void ReferenceProcessor::SwitchToGCOnFork() { + RCReferenceProcessor *rcRefProcessor = static_cast(instance); + instance = nullptr; + Create(kMarkSweep); + GCReferenceProcessor *gcRefProcessor = static_cast(instance); + // copy ready to enque reference to gc reference processor + for (uint32_t type = kRPWeakRef; type <= kRPPhantomRef; ++type) { + address_t refs = rcRefProcessor->TransferEnquenenableReferenceOnFork(type); + gcRefProcessor->InitEnqueueAtFork(type, refs); + } + // copy finalizables to gc reference processor + ManagedList &toFinalizables = gcRefProcessor->finalizables; + rcRefProcessor->TransferFinalizblesOnFork(toFinalizables); + // clear old rc reference processor + rcRefProcessor->Fini(); + delete rcRefProcessor; +} + +ReferenceProcessor::ReferenceProcessor () { + threadHandle = 0; + RPStarted = false; + RPRunning = false; + doFinalizeOnStop = false; + curProcessingRef = kRPWeakRef; + for (uint32_t i = 0; i < kRPTypeNum; ++i) { + numProcessedRefs[i] = 0; + numLastProcessedRefs[i] = 0; + } + iterationWaitTime = kDefaultGCRPTimeoutMs; + processAllRefs = false; + hasBackgroundGC = false; + forceBackgroundGC = false; + catchBGGcJobTime = 0; + hasBackgroundGC.store(false, std::memory_order_release); + timeRefProcessorBegin = 0; + timeRefProcessUsed = 0; + timeCurrentRefProcessBegin = 0; +} + +bool ReferenceProcessor::IsCurrentRPThread() const { + pthread_t cur = ::pthread_self(); + return threadHandle == cur; +} + +void ReferenceProcessor::Run() { + Init(); + NotifyStrated(); + while (RPRunning) { + { + LogRefProcessorBegin(); + PreIteration(); + ProcessFinalizables(); + EnqeueReferences(); + PostIteration(); + LogRefProcessorEnd(); + } + { + MRT_PHASE_TIMER("RP waitting time", kLogtypeRp); + while (RPRunning) { + Wait(iterationWaitTime); + DoChores(); + if (ShouldStartIteration()) { + break; + } + } + } + } + + if (doFinalizeOnStop) { + PreExitDoFinalize(); + LOG(INFO) << "Do finalization before ReferenceProcessor stopped" << maple::endl; + ProcessFinalizables(); + } + LOG(INFO) << "ReferenceProcessor thread stopped" << maple::endl; +} + +void ReferenceProcessor::Init() { + threadHandle = ::pthread_self(); + RPRunning = true; + doFinalizeOnStop = false; + if (enqueueMethod == nullptr) { + enqueueMethod = WellKnown::GetMClassReference()->GetMethod("enqueue", "()Z"); + } + timeRefProcessorBegin = timeutils::MicroSeconds(); + timeRefProcessUsed = 0; + LOG(INFO) << "ReferenceProcessor thread started" << maple::endl; +} + +// Stop Reference Processor is only invoked at Fork or Runtime finliazaiton +// Should only invoke once. +void ReferenceProcessor::Stop(bool finalize) { + __MRT_ASSERT(RPRunning == true, "invalid RP status"); + doFinalizeOnStop = finalize; + RPRunning = false; + forceBackgroundGC = false; + hasBackgroundGC.store(false, std::memory_order_release); +} + +void ReferenceProcessor::WaitStop() { + ScopedEnterSaferegion saferegion; + pthread_t thread = threadHandle; + int tmpResult = ::pthread_join(thread, nullptr); + if (UNLIKELY(tmpResult != 0)) { + LOG(FATAL) << "::pthread_join() in maplert::MRT_WaitProcessReferencesStopped() return " << tmpResult << + " rather than 0. " << maple::endl; + } + RPStarted = false; + threadHandle = 0; +} + +// Invoked before switch collector, need clear all internal data structure +void ReferenceProcessor::PreSwitchCollector() {} + +void ReferenceProcessor::VisitFinalizers(RefVisitor &visitor) { + for (address_t &obj : finalizables) { + visitor(obj); + } + for (address_t &obj : workingFinalizables) { + visitor(obj); + } + for (address_t &obj : runFinalizations) { + visitor(obj); + } +} + +void ReferenceProcessor::VisitGCRoots(RefVisitor &visitor) { + VisitFinalizers(visitor); +} + +void ReferenceProcessor::Notify(bool processAll) { + if (processAll) { + processAllRefs.store(true); + } + wakeCondition.notify_one(); +} + +void ReferenceProcessor::NotifyBackgroundGC(bool force) { + if (forceBackgroundGC == false) { + catchBGGcJobTime = timeutils::NanoSeconds(); + if (force) { + forceBackgroundGC = true; + catchBGGcJobTime += kNanoPerSecond; + } + hasBackgroundGC.store(true, std::memory_order_release); + } else { + __MRT_ASSERT(hasBackgroundGC.load(std::memory_order_acquire) == true, + "force is true but hasBackgroundGC is false"); + } +} + +void ReferenceProcessor::RunBackgroundGC() { + uint64_t curTime = timeutils::NanoSeconds(); + if (hasBackgroundGC.load(std::memory_order_acquire) && + (curTime > catchBGGcJobTime) && (curTime - catchBGGcJobTime) > kNanoPerSecond) { + if (forceBackgroundGC || Collector::Instance().InJankImperceptibleProcessState()) { + Collector::Instance().InvokeGC(kGCReasonTransistBG); + forceBackgroundGC = false; + Collector::Instance().EndStartupPhase(); + LOG(INFO) << "End startup phase" << maple::endl; + } + hasBackgroundGC.store(false, std::memory_order_release); + } +} + +void ReferenceProcessor::Wait(uint32_t timeoutMilliSeconds) { + std::unique_lock lock(wakeLock); + std::chrono::milliseconds epoch(timeoutMilliSeconds); + wakeCondition.wait_for(lock, epoch); +} + +void ReferenceProcessor::NotifyStrated() { + { + std::unique_lock lock(startedLock); + __MRT_ASSERT(RPStarted == false, "unpexcted true, reference processor might not wait stopped"); + RPStarted = true; + } + startedCondition.notify_all(); +} + +void ReferenceProcessor::WaitStarted() { + std::unique_lock lock(startedLock); + if (RPStarted) { + return; + } + startedCondition.wait(lock, [this]{ return RPStarted; }); +} + +void ReferenceProcessor::PreIteration() { + if (UNLIKELY(hasBackgroundGC)) { + MRT_PHASE_TIMER("may trigger background gc", kLogtypeRp); + RunBackgroundGC(); + } +} + +void ReferenceProcessor::PreExitDoFinalize() {} + +// do non-rp-related stuff +void ReferenceProcessor::DoChores() { + // trigger heuristic gc + size_t threshold = stats::gcStats->CurGCThreshold(); + size_t allocated = (*theAllocator).AllocatedMemory(); + if (allocated >= threshold) { + Collector::Instance().InvokeGC(kGCReasonHeu); + } +} + +// Add finalizable objects into finalizable list. This could be invoked +// in GC thread or Java mutator (RC cycle pattern), at STW time or running time. +// needLock is true when RC mutator add or multiple GC threads add spontaneously. +// 1. pre hook, collector specific handling: RC/Lock/Assert +// 2. For each obj, set enqued bit, GC barrier, allocator hook +// 3. Add into finalizable list +// 2. PostHook, lock, stats +void ReferenceProcessor::AddFinalizables(address_t objs[], uint32_t count, bool needLock) { + PreAddFinalizables(objs, count, needLock); + + for (uint32_t i = 0; i < count; ++i) { + address_t obj = objs[i]; + __MRT_ASSERT(!IsEnqueuedObjFinalizable(obj), "alredy enqueued"); + + // change status befor pushback. After pushback, this addr is not safe: + // we may change another obj while reference-collector may free it at sametime + SetEnqueuedObjFinalizable(obj); + + // tell collector that the object prepare to renew, this is required for concurrent marking. + // it's safe to invoke at STW, as concurrent marking is false and no action happen + Mutator *mutator = TLMutatorPtr(); + if (mutator != nullptr) { + mutator->SatbWriteBarrier(obj); + } + + // This is the place where object resurrects. + (*theAllocator).OnFinalizableObjResurrected(obj); + + if (SpecializedAddFinalizable(obj) == false) { + finalizables.push_back(obj); + } + } + PostAddFinalizables(objs, count, needLock); +} + +void ReferenceProcessor::AddFinalizable(address_t obj, bool needLock) { + __MRT_ASSERT(!IsMygoteObj(obj), "too late to enqueue mygote finalizable obj"); + AddFinalizables(&obj, 1, needLock); +} + +// Process finalizable list +// 1. always process list head and remove processed fnalizables +// 2. Leave safe region (calling in RP thread) +// 3. timeout processing, finalizer watchdog: tobe in collector-platform +// 4. Invoke finalize method +// 5. post processing: collector specific action and stats +void ReferenceProcessor::ProcessFinalizablesList(ManagedList &list) { + auto itor = list.begin(); + while (itor != list.end()) { + // keep GC thread from scanning roots when finalizer list is updating + ScopedObjectAccess soa; + __MRT_ASSERT(!MRT_HasPendingException(), "should not exist pending exception"); + address_t finalizeObj = *itor; + if (IsMygoteObj(finalizeObj)) { + // don't want to touch mygote obj (makes its page private) + // it's okay because mygote finalizables will never be enqueued again (rc overflow) + list.pop_front(); + itor = list.begin(); + continue; + } + { +#ifdef __ANDROID__ + // set up finalizer watch-dog for this finalizer + MplCollieScope mcs(kProcessFinalizeCollie, MPLCOLLIE_FLAG_ABORT, maple::GetTid(), + [](void *finalizer) { + MClass *classInfo = reinterpret_cast(finalizer)->GetClass(); + LOG(ERROR) << "--- calling finalize() on " << finalizer << "(" << + classInfo->GetName() << ") took too long" << maple::endl; + }, reinterpret_cast(finalizeObj)); +#endif // __ANDROID__ + + MClass *classInfo = reinterpret_cast(finalizeObj)->GetClass(); + MethodMeta *finalizerMethod = classInfo->GetFinalizeMethod(); + if (finalizerMethod != nullptr) { + // finalize method return void + (void)finalizerMethod->InvokeJavaMethodFast(reinterpret_cast(finalizeObj)); + } else { + LOG(FATAL) << std::hex << finalizeObj << std::dec << " has no finalize method " << maple::endl; + } + } + MRT_ClearPendingException(); + ClearObjFinalizable(finalizeObj); + PostFinalizable(finalizeObj); + list.pop_front(); + itor = list.begin(); + } +} + +// Invoked from RP thread and should be in safe region +// 1. acquire finalizerProcessingLock, indicate RP is processsing finalizables, sync with runFinalization +// 2. swap finalizables with workingFinalizables +// 3. process working list +// 4. Invoke Post Process for collector specific operations +// 5. release finalizerProcessingLock lock +void ReferenceProcessor::ProcessFinalizables() { + MRT_PHASE_TIMER("Finalizer", kLogtypeRp); + SetProcessingType(kRPFinalizer); + finalizerProcessingLock.lock(); + { + // Exchange current queue and working queue. + // we leave saferegion to avoid GC visit those changing queues. + ScopedObjectAccess soa; + finalizersLock.lock(); + workingFinalizables.swap(finalizables); + finalizersLock.unlock(); + } + + LOG2FILE(kLogtypeRp) << "finalizer: working size " << workingFinalizables.size() << std::endl; + ProcessFinalizablesList(workingFinalizables); + PostProcessFinalizables(); + finalizerProcessingLock.unlock(); +} + +// Invoked from compiled code and should be in none safe region +// 1. wait finalizerProcessingLock, avoid multiple run finalization concurrently and use same runFinalizations list +// 2. swap finalizables with runFinalizations +// 3. process finalizables in runFinalizations +// 4. Sync with RP thread with running finalization processing, try get and release finalizerProcessingLock +void ReferenceProcessor::RunFinalization() { +#ifndef __OPENJDK__ + __MRT_CallAndAssertTrue(MRT_EnterSaferegion(), "calling runFinalization from safe region"); +#endif + runFinalizationLock.lock(); + (void)MRT_LeaveSaferegion(); + { + __MRT_ASSERT(runFinalizations.empty(), "not empty run finalization list"); + finalizersLock.lock(); + runFinalizations.swap(finalizables); + finalizersLock.unlock(); + } + ProcessFinalizablesList(runFinalizations); + runFinalizationLock.unlock(); + +#ifdef __OPENJDK__ + finalizerProcessingLock.lock(); +#else + __MRT_CallAndAssertTrue(MRT_EnterSaferegion(), "calling runFinalization from safe region"); + finalizerProcessingLock.lock(); + (void)MRT_LeaveSaferegion(); +#endif + finalizerProcessingLock.unlock(); +} + +void ReferenceProcessor::LogFinalizeInfo() { + LOG(INFO) << " finalizers size : " << finalizables.size() << " working " << workingFinalizables.size() << + " runFinalization " << runFinalizations.size() << maple::endl; +} + +void ReferenceProcessor::Enqueue(address_t reference) { + // clear possible pending exception. + MRT_ClearPendingException(); + // enqueue tells application this reference is now free-able + (void)enqueueMethod->InvokeJavaMethodFast(reinterpret_cast(reference)); + MRT_ClearPendingException(); +} + +void ReferenceProcessor::LogRefProcessorBegin() {} + +void ReferenceProcessor::LogRefProcessorEnd() {} + +bool ReferenceProcessor::ShouldClearReferent(GCReason reason) { + switch (reason) { + case kGCReasonForceGC: + return forceGcSoftPolicy.ShouldClearSoftRef(); + case kGCReasonOOM: + return oomGcSoftPolicy.ShouldClearSoftRef(); + default: + return heapGcSoftPolicy.ShouldClearSoftRef(); + } +} + +void ReferenceProcessor::InitSoftRefPolicy() { + heapGcSoftPolicy.Init(); +} +} diff --git a/src/mrt/compiler-rt/src/collector/stats.cpp b/src/mrt/compiler-rt/src/collector/stats.cpp new file mode 100644 index 0000000000..f4956b83a3 --- /dev/null +++ b/src/mrt/compiler-rt/src/collector/stats.cpp @@ -0,0 +1,250 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "collector/stats.h" +#include "chosen.h" + +namespace maplert { +namespace stats { +ImmortalWrapper gcStats; + +GCStats::GCStats() + : numGcTriggered(0), + totalBytesCollected(0), + recentGcCount(0), + maxBytesCollected(0), + maxStopTheWorldTime(0), + numAllocAnomalies(0), + numRcAnomalies(0), + currentGcThreshold(kInitGCThreshold), + waterLevelLow(kAppGCWaterLevelLow), + waterLevel(kAppGCWaterLevel) {} + +void GCStats::BeginGCRecord() { + __MRT_ASSERT(curRec == nullptr, "curRec is already initialized. Please commit the last record."); + curRec = make_unique(); + curRec->isConcurrentMark = false; + curRec->async = false; + curRec->stw1Time = 0; + curRec->stw2Time = 0; + curRec->totalGcTime = 0; + curRec->objectsCollected = 0; + curRec->bytesCollected = 0; + curRec->bytesSurvived = 0; +} + +void GCStats::CommitGCRecord() { + __MRT_ASSERT(curRec != nullptr, "curRec is nullptr. Call BeginGCRecord() first!"); + OnGCFinished(std::move(curRec)); + curRec = nullptr; +} + +void GCStats::OnCollectorInit() { + if (Collector::Instance().Type() == kMarkSweep) { + waterLevel = kGCWaterLevelGCOnly; + } +} + +void GCStats::UpdateStatistics(const unique_ptr &rec) { + ++numGcTriggered; + totalBytesCollected += rec->bytesCollected; + ++recentGcCount; + maxBytesCollected = std::max(rec->bytesCollected, maxBytesCollected); + + // Strictly speaking, we need a compare-and-swap to atomically update this. + // But (1) this field is only updated by the GC thread, and (2) statistics is + // not exact. So we only need to ensure that individual reads and writes are + // atomic so that readers do not see garbage when reading those fields without + // locking. + if (rec->reason != kGCReasonUserNi) { + maxStopTheWorldTime = max(maxStopTheWorldTime.load(), rec->MaxSTWTime()); + } + + size_t maxHeapThreshold = (*theAllocator).GetMaxCapacity() - (maple::MB + maple::MB); + + // a simple heuristic to adjust the threshold according to + // 1. previous garbage ratio. + // 2. current heap capacity vs. max heap capacity. + // if the ratio is high, means we need to lower the water level to do more GC + size_t calculatedThr = static_cast(rec->bytesSurvived * waterLevelLow); + if (rec->reason != kGCReasonTransistBG && !Collector::Instance().InJankImperceptibleProcessState()) { + float totalBytes = static_cast(rec->bytesSurvived + rec->bytesCollected); + float garbageRatio = (static_cast(rec->bytesCollected)) / totalBytes; + // footprint is the heap range we have ever touched + // it's preferable to reduce the footprint from the allocator point of view (less fragmentation) + size_t footprint = (*theAllocator).GetCurrentSpaceCapacity(); + size_t gcAdvance = std::max((*theAllocator).AllocatedMemory(), rec->bytesSurvived) - rec->bytesSurvived; + size_t attemptedThr = static_cast(rec->bytesSurvived * waterLevel); + + if (attemptedThr <= footprint - gcAdvance && garbageRatio <= (1 - 1 / waterLevel) && + footprint <= static_cast(maxHeapThreshold * kHeapWaterLevel)) { + calculatedThr = attemptedThr; + } + } + + // caps and exceptions + size_t maxDeltaThreashold = currentGcThreshold + kMaxGCThresholdDelta; + calculatedThr = std::min(calculatedThr, maxDeltaThreashold); + if (Collector::Instance().InStartupPhase()) { + calculatedThr = static_cast(calculatedThr * kAppStartupHeapHeurstic); + } + LOG2FILE(kLogtypeGc) << "old threshold " << currentGcThreshold << " new threshold " << calculatedThr << std::endl; + currentGcThreshold = std::min(maxHeapThreshold, calculatedThr); +} + +void GCStats::Dump(const unique_ptr &rec) { + // Print a summary of the last GC. + size_t used = CurAllocBytes(); + size_t capacity = CurAllocatorCapacity(); + double utilization = MemoryUtilization(); + constexpr int kOneHundred = 100; + + std::ostringstream ost; + ost << processName.c_str() << " " << Collector::Instance().GetName() << + " GC for " << reasonCfgs[rec->reason].name << ": " << + (rec->async ? "async:" : "sync: ") << + "collected objects: " << rec->objectsCollected << + "(" << PrettyOrderInfo(rec->bytesCollected, "B") << "), " << + (utilization * kOneHundred) << "% utilization " << + "(" << PrettyOrderInfo(used, "B") << "/" << PrettyOrderInfo(capacity, "B") << "), " << + "max pause: " << PrettyOrderMathNano(rec->MaxSTWTime(), "s") << ", " << + "total pause: " << PrettyOrderMathNano(rec->TotalSTWTime(), "s") << ", " << + "total GC time: " << PrettyOrderMathNano(rec->totalGcTime, "s") << + maple::endl; + + // This will appear in LogCat. + LOG(INFO) << ost.str(); + + // Utilization is not printed to GCLog. Ensure we also get a copy there so + // that we don't need to collect the numbers from both sides. + LOG2FILE(kLogtypeGc) << "End of GC. GC statistics committed.\n" << + " current total allocated bytes: " << Pretty(used) << "\n" << + " current heap capacity: " << Pretty(capacity) << "\n" << + " heap utilization: " << (utilization * kOneHundred) << "%\n"; +} + +// This function is only called by the GC thread. +void GCStats::OnGCFinished(const unique_ptr &rec) { + UpdateStatistics(rec); + Dump(rec); +} + +void GCStats::OnAllocAnomaly() { + ++numAllocAnomalies; +} + +void GCStats::OnFreeObject(size_t size __attribute__((unused))) const { +} + +void GCStats::OnRCAnomaly() { + ++numRcAnomalies; +} + +size_t GCStats::CurAllocBytes() const { + return (*theAllocator).AllocatedMemory(); +} + +size_t GCStats::CurAllocatorCapacity() const { + return (*theAllocator).GetActualSize(); +} + +size_t GCStats::CurSpaceCapacity() const { + return (*theAllocator).GetCurrentSpaceCapacity(); +} + +size_t GCStats::CurGCThreshold() const { + return currentGcThreshold; +} + +void GCStats::InitialGCThreshold(const bool isSystem) { + if (isSystem) { + currentGcThreshold = kInitSystemGCThreshold; + waterLevelLow = kGCWaterLevelLow; + waterLevel = kGCWaterLevel; + } else { + currentGcThreshold = kInitGCThreshold; + waterLevelLow = kAppGCWaterLevelLow; + waterLevel = kAppGCWaterLevel; + } +} + +void GCStats::InitialGCProcessName() { + const std::string fileName = "/proc/self/status"; + std::ifstream file(fileName); + if (!file.is_open()) { + LOG(ERROR) << "InitialGCProcessName open file failed" << maple::endl; + return; + } + constexpr size_t bufSize = 64; + char buf[bufSize + 1] = { 0 }; + if (file.getline(buf, bufSize + 1, '\n')) { + processName = processName.assign(buf); + processName.erase(0, processName.find_last_of("\t") + 1); + } + file.close(); +} + +uint64_t GCStats::MaxSTWNanos() const { + return maxStopTheWorldTime; +} + +size_t GCStats::NumGCTriggered() const { + return numGcTriggered; +} + +size_t GCStats::AverageMemoryLeak() const { + return recentGcCount ? (static_cast(totalBytesCollected / recentGcCount)) : 0; +} + +size_t GCStats::TotalMemoryLeak() const { + return maxBytesCollected; +} + +double GCStats::MemoryUtilization() const { + size_t used = CurAllocBytes(); + size_t capacity = CurAllocatorCapacity(); + if (capacity != 0) { + return static_cast(used) / capacity; + } + return 0; +} + +size_t GCStats::NumAllocAnomalies() const { + // get the number of allocated objects since last time we rest the count + return numAllocAnomalies; +} + +size_t GCStats::NumRCAnomalies() const { + return numRcAnomalies; +} + +void GCStats::ResetMaxSTWNanos() { + maxStopTheWorldTime = 0; +} +void GCStats::ResetNumGCTriggered() { + numGcTriggered = 0; +} +void GCStats::ResetMemoryLeak() { + totalBytesCollected = 0; + recentGcCount = 0; + maxBytesCollected = 0; +} +void GCStats::ResetNumAllocAnomalies() { + numAllocAnomalies = 0; +} +void GCStats::ResetNumRCAnomalies() { + numRcAnomalies = 0; +} +} // namespace stats +} // namespace maplert diff --git a/src/mrt/compiler-rt/src/collie.cpp b/src/mrt/compiler-rt/src/collie.cpp new file mode 100644 index 0000000000..16ea0afee8 --- /dev/null +++ b/src/mrt/compiler-rt/src/collie.cpp @@ -0,0 +1,450 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif +#ifdef __ANDROID__ +#include "collie.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#undef LOG_TAG +#define LOG_TAG "MAPLECOLLIE" +#include + +#define USE_LIBC_SEC +#ifdef USE_LIBC_SEC +#include "securec.h" +#endif +#include "chelper.h" + +#define ID_IS_INVALID(x) (UNLIKELY(((x) >= kCollieTypeMax) || ((x) < 0))) + +namespace maplert { +MplCollie mplCollie; + +// let monitor thread try to sleep +// it will sleep if count to kMaxDelayCount and no request from the watched threads +void MplCollie::CollieTrySleep(void) { + if (trCtl.delayCount.load() >= kMaxDelayCount) { + std::unique_lock lock(*trCtl.sleepMutexPtr.get()); + trCtl.threadInSleep = true; + trCtl.sleepCond->wait(lock, [this] { return !(trCtl.threadInSleep && runnable); }); + trCtl.delayCount = 0; + collieTimerRingSTW.timer = 0; + collieTimerRingNonSTW.timer = 0; + } +} + +// initialize a monitor node +void MplCollie::ResetNode(CollieNode &node, int type) { + int savedType = node.type; + if (memset_s(&node, sizeof(CollieNode), 0, sizeof(CollieNode)) != EOK) { + LOG(FATAL) << "ResetNode memset_s not return 0" << maple::endl; + } + node.type = CollieType(type); + node.isUsed = false; + switch (type) { + case kGCCollie: { + node.name = "WaitForGCFinish Collie"; + node.timeout = kMplWaitCheckInterval; + break; + } + case kProcessFinalizeCollie: { + node.name = "Finalizer Collie"; + node.timeout = kMplFinalizerTimeout; + break; + } + case kSTWCollie: { + node.name = "StopTheWorld Collie"; + node.timeout = kMplCollieMaxRecordVal; + break; + } + case kThreadDumpCollie: { + node.name = "ThreadDump Collie"; + node.timeout = kThreadDumpTimeout; + break; + } + default: { + node.type = CollieType(savedType); + } + } + int maxPromoteTimeout = ((VLOG_IS_ON(dumpgarbage) || VLOG_IS_ON(dumpheapbeforegc) || VLOG_IS_ON(dumpheapaftergc) || + VLOG_IS_ON(rcverify))) ? kMplWaitHeavyGcTimeout : kMplWaitGcTimeout; + node.promoteTimes = maxPromoteTimeout / kMplWaitCheckInterval; + node.Reset(); +} + +int MplCollie::CallbackShouldLimit(int flag ATTR_UNUSED) { + int ret = 0; + time_t now; + + now = time(nullptr); + if (timeCallback + kMplCollieCallbackTimewinMax < now) { + timeCallback = now; + } else { + if (++nrCallback > kMplCollieCallbackHistoryMax) { + ret = 1; + } + } + + return ret; +} + +void MplCollie::FatalPanicLocked(std::string &msg) { + if (VLOG_IS_ON(dumpgarbage) || VLOG_IS_ON(dumpheapbeforegc) || VLOG_IS_ON(dumpheapaftergc) || VLOG_IS_ON(rcverify)) { + LOG(ERROR) << "dump heap switch or verfiy rc is on, skip maple collie panic"; + return; + } +#if CONFIG_JSAN + LOG(ERROR) << msg << maple::endl; + LOG(ERROR) << "== Skip abort in JSAN version =="; +#else + android_set_abort_message(msg.c_str()); + sleep(2); + abort(); +#endif +} + +void MplCollie::FatalPanicStopTheWorld(std::string &msg) { + std::unique_lock lock(panicMutex); + + std::string dumpMsg; + if (!ID_IS_INVALID(stwID)) { + targetTid = collieNodesArray[stwID].tid; + } else { + targetTid = static_cast(maple::GetTid()); + dumpMsg = "cannot find stw id\n"; + } + maplert::MutatorList::Instance().VisitMutators([this, &dumpMsg](Mutator *mutator) { + if (!mutator->InSaferegion()) { + targetTid = static_cast(mutator->GetTid()); + dumpMsg = dumpMsg + " not in saferegion : " + std::to_string(targetTid); + } + }); + dumpMsg = "tid: " + std::to_string(targetTid) + "\n" + msg + dumpMsg; + FatalPanicLocked(dumpMsg); +} + +static void PromoteThreadPriority(pid_t tid) { + errno = 0; + int32_t priority = getpriority(static_cast(PRIO_PROCESS), tid); + if (UNLIKELY(errno != 0)) { + char errMsg[maple::kMaxStrErrorBufLen]; + (void)strerror_r(errno, errMsg, sizeof(errMsg)); + LOG(ERROR) << "getpriority() in failed with errno " << errno << ": " << errMsg; + return; + } + if (priority + maple::kPriorityPromoteStep > maple::kGCThreadStwPriority) { + priority += maple::kPriorityPromoteStep; + } else if (priority > maple::kGCThreadStwPriority) { + priority = maple::kGCThreadStwPriority; + } else { + return; + } + MRT_SetThreadPriority(tid, priority); +} + +void MplCollie::FatalPanic(std::string &msg, int tid) { + std::unique_lock lock(panicMutex); + msg = "tid: " + std::to_string(tid) + "\n" + msg; + FatalPanicLocked(msg); +} + +// trigger callback and caller-defined callback +void MplCollie::RunCallback(CollieNode &cb) { + if (CallbackShouldLimit(cb.flag)) { + LOG(ERROR) << "Too many callback triggerd in a short time!" << maple::endl; + return; + } + + if (cb.callback) { + cb.callback(cb.arg); + } + + if (static_cast(cb.flag) & MPLCOLLIE_FLAG_ABORT) { + std::string msg = cb.name; + msg += " took too long: from "; + msg += std::to_string(cb.startTime); + msg += " to "; + msg += std::to_string(time(nullptr)); + if (MPLCOLLIE_FLAG_IS_STW(static_cast(cb.flag))) { + FatalPanicStopTheWorld(msg); + } else { + FatalPanic(msg, cb.tid); + } + } +} + +void MplCollie::CheckTimerRing(CollieNode callbackList[], CollieTimerRing &r, int &count) { + r.timer += kTimerRingCheckInterval; + CollieNode *cur = r.cl.GetHead(); + while (cur != nullptr && r.timer > cur->timeout) { + CollieNode *node = cur; + cur = cur->next; + r.cl.Remove(*node); + if ((static_cast(node->flag) & MPLCOLLIE_FLAG_PROMOTE_PRIORITY) && node->promoteTimes > 0) { + PromoteThreadPriority(node->tid); + node->timeout = r.timer + kMplWaitCheckInterval; + --(node->promoteTimes); + r.cl.Insert(*node); + } else { + if (stwID == node->type) { + stwID = kInvalidId; + } else { + callbackList[count] = *node; + ++count; + } + int type = node->type; + ResetNode(*node, type); + } + } +} + +// check the current timer ring position and run all the callbacks +void MplCollie::TimerRingTimeout() { + CollieNode callbackList[kCollieTypeMax]; + + int count = 0; + { + std::unique_lock lock(listMutex); + bool isEmpty = true; + // point to the current list timeout in timer ring + if (!collieTimerRingSTW.cl.IsEmpty()) { + CheckTimerRing(callbackList, collieTimerRingSTW, count); + isEmpty = false; + } + // if in stop the world, then stop the nonstw timer walking + if (ID_IS_INVALID(stwID)) { + if (!collieTimerRingNonSTW.cl.IsEmpty()) { + CheckTimerRing(callbackList, collieTimerRingNonSTW, count); + isEmpty = false; + } + } + if (isEmpty) { + ++(trCtl.delayCount); + } else { + trCtl.delayCount = 0; + } + } + // run timeout callback + for (int i = 0; i < count; ++i) { + LOG(ERROR) << "Trigger " << callbackList[i].name << " Callback Function (start time: " << + std::to_string(callbackList[i].startTime) << ")" << maple::endl; + RunCallback(callbackList[i]); + } +} + +// monitor thread main loop +void *MplCollie::CollieThreadHandle() { + pthread_setname_np(pthread_self(), "MapleCollie"); + + while (runnable) { + { + std::unique_lock lock(timerMutex); + timerCond.wait_for(lock, std::chrono::seconds(kTimerRingCheckInterval), [this] { return !runnable; }); + } + // timeout process + // step 1: get timeout info and run callback + TimerRingTimeout(); + + // step 2: goto sleep if needed + // if wake up from sleep state, time should be re-calc + CollieTrySleep(); + } + return nullptr; +} + +// force collie thread to end. +void MplCollie::ForceEnd() { + std::unique_lock lock(timerMutex); + runnable = false; + timerCond.notify_one(); +} + +void MplCollie::JoinThread() { + int ret = ::pthread_join(collieThread, nullptr); + if (UNLIKELY(ret != 0)) { + LOG(FATAL) << "failed to join maple collie thread!" << maple::endl; + } +} + +void* MplCollie::CollieThreadEntry(void *arg) { + if (arg != nullptr) { + MplCollie *self = reinterpret_cast(arg); + self->CollieThreadHandle(); + } + + return nullptr; +} + +// initializing the data struct +void MplCollie::InitImp(void) { + trCtl.threadInSleep = false; + trCtl.delayCount = 0; + trCtl.sleepMutexPtr = std::make_unique(); + trCtl.sleepCond = std::make_unique(); + + for (int i = 0; i < kCollieTypeMax; ++i) { + ResetNode(collieNodesArray[i], i); + } + + collieTimerRingSTW.timer = 0; + collieTimerRingSTW.cl.Init(); + + collieTimerRingNonSTW.timer = 0; + collieTimerRingNonSTW.cl.Init(); + + stwID = kInvalidId; + // wait until last collie thread exits successfully. + runnable = true; + int ret = pthread_create(&collieThread, nullptr, MplCollie::CollieThreadEntry, this); + if (ret != EOK) { + LOG(ERROR) << "pthread_create return fail, MplCollie::InitImp" << maple::endl; + } +} + +// try wake up the monitor thread if it is sleeping +void MplCollie::CollieTryWake(void) { + std::unique_lock lock(*trCtl.sleepMutexPtr.get()); + if (trCtl.threadInSleep) { + trCtl.threadInSleep = false; + trCtl.sleepCond->notify_one(); + } +} + +void MplCollie::Init(void) { + std::unique_lock lock(initMutex); + if (UNLIKELY(!runnable)) { + InitImp(); + } +} + +void MplCollie::Fini() { + ForceEnd(); + { + std::unique_lock lock(listMutex); + collieTimerRingSTW.cl.Init(); + collieTimerRingNonSTW.cl.Init(); + } + CollieTryWake(); +} + +bool MplCollie::GetSTWPanic(void) { + if (ID_IS_INVALID(stwID)) { + return true; + } + CollieNode *node = &collieNodesArray[stwID]; + return ((static_cast(node->flag) & MPLCOLLIE_FLAG_ABORT) != 0); +} + +void MplCollie::SetSTWPanic(bool enable) { + if (ID_IS_INVALID(stwID)) { + return; + } + LOG(ERROR) << "Set STW Panic: " << (enable ? "true" : "false") << maple::endl; + CollieNode *node = &collieNodesArray[stwID]; + if (enable) { + node->flag = static_cast(static_cast(node->flag) | MPLCOLLIE_FLAG_ABORT); // set abort flag + } else { + node->flag = static_cast(static_cast(node->flag) & (~MPLCOLLIE_FLAG_ABORT)); // clear abort flag + } +} + +// fill the collie node, and put it into the timer ring +// @param flag used to control whether to call the FatalPanicLocked or PromoteThreadPriority function after timeout +// @param tid thread id being monitored +// @param func callback called after timeout +// @param arg args for callback +int MplCollie::Start(CollieType type, int flag, pid_t tid, void (*func)(void*), void *arg) { + if (UNLIKELY(!runnable)) { + return kInvalidId; + } + std::unique_lock lock(listMutex); + + CollieNode *node = &(collieNodesArray[type]); + if (node->isUsed) { + LOG(ERROR) << node->name << "node is in used" << maple::endl; + return kInvalidId; + } + node->Reset(); + + // fill node info + node->startTime = time(nullptr); + node->tid = tid; + node->arg = arg; + node->flag = flag; + node->callback = func; + + // add to timer ring + CollieTimerRing *r = MPLCOLLIE_FLAG_IS_STW(static_cast(node->flag)) ? &collieTimerRingSTW + : &collieTimerRingNonSTW; + node->timeout += r->timer; + r->cl.Insert(*node); + node->isUsed = true; + + if (static_cast(flag) & MPLCOLLIE_FLAG_FOR_STW) { + stwID = node->type; + } + + CollieTryWake(); // Wake up the collie monitor thread when inserting at the front. + trCtl.delayCount = 0; + return node->type; +} + +void MplCollie::End(int type) { + if (UNLIKELY(!runnable)) { + return; + } + + if (ID_IS_INVALID(type)) { + LOG(ERROR) << "MplCollie::End : not valid type: " << type << maple::endl; + return; + } + + // get node from timer ring, add to free list + std::unique_lock lock(listMutex); + + if (!collieNodesArray[type].isUsed) { + LOG(ERROR) << "MplCollie::End : already release type: " << type << maple::endl; + return; + } + CollieNode *node = &(collieNodesArray[type]); + + if (static_cast(node->flag) & MPLCOLLIE_FLAG_FOR_STW) { + stwID = kInvalidId; + } + + // remove from timer ring + CollieTimerRing *r = MPLCOLLIE_FLAG_IS_STW(static_cast(node->flag)) ? &collieTimerRingSTW + : &collieTimerRingNonSTW; + r->cl.Remove(*node); + + ResetNode(*node, type); +} +} +#endif diff --git a/src/mrt/compiler-rt/src/errno_utils.cpp b/src/mrt/compiler-rt/src/errno_utils.cpp new file mode 100644 index 0000000000..8df489ea75 --- /dev/null +++ b/src/mrt/compiler-rt/src/errno_utils.cpp @@ -0,0 +1,89 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "errno_utils.h" + +#include +#include // Not . strerror_r is a POSIX function not in the C++ standard. + +namespace maplert { +// NOTE: We use strerror_r, because strerror is not thread-safe. The man page +// of strerror_l says it is thread-safe, but the man page contradicts with +// itself. +// +// Since we don't know whether we are using the XSI-compliant version or the GNU +// version of strerror_r, we use C++ function overloading to detect which +// strerror_r is provided. +// +// See: man strerror +using XSIStrErrorRType = int(*)(int, char*, size_t); +using GNUStrErrorRType = char*(*)(int, char*, size_t); + +// The XSI-compliant version +std::string DoErrnoToString(XSIStrErrorRType theStrErrorRFunction, int errNum, char *buf, size_t bufSize) { + std::string message; + + int result = theStrErrorRFunction(errNum, buf, bufSize); + if (result == 0) { + message = buf; + } else { + int anotherErrnum = errno; + + message = "Error while calling XSI-compliant strerror_r: "; + + switch (anotherErrnum) { + case EINVAL: + message += "The value of errNum is not a valid error number."; + break; + case ERANGE: + message += "Insufficient storage was supplied to contain the error description string."; + break; + default: + message += "Unexpected errno from strerror_r: " + std::to_string(anotherErrnum); + break; + } + } + + return message; +} + +// The GNU version +std::string DoErrnoToString(GNUStrErrorRType theStrErrorRFunction, int errNum, char *buf, size_t bufSize) { + std::string message; + + char *result = theStrErrorRFunction(errNum, buf, bufSize); + // On error, result will point to something like "Unkonwn error nnn". + message = result; + + return message; +} + +// Get the error message for an errno as a std::string. +// +// @param errNum The error code, from the errno macro. +// +// @return The error description string represented as an std::string. Handles +// strerror_r errors internally. +std::string ErrnoToString(int errNum) { + static constexpr size_t kErrorBufferSize = 256; + char buf[kErrorBufferSize]; // Cannot use std::string until C++17. Need to write to the buffer. + + // Because of function overloading, the C++ compiler will choose the + // appropriate DoErrnoToString variant depending on the actual type of + // ::strerror_r. + std::string message = DoErrnoToString(::strerror_r, errNum, buf, kErrorBufferSize); + + return message; +} +} // namespace maplert diff --git a/src/mrt/compiler-rt/src/exception/eh_personality.cpp b/src/mrt/compiler-rt/src/exception/eh_personality.cpp new file mode 100644 index 0000000000..b4149967e7 --- /dev/null +++ b/src/mrt/compiler-rt/src/exception/eh_personality.cpp @@ -0,0 +1,516 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "exception/eh_personality.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include "exception/mrt_exception.h" +#include "exception/mpl_exception.h" +#include "linker_api.h" + +namespace maplert { +const uint8_t TTYPE_ENCODING = 0x9B; +const uint8_t LP_START_ENCODING = 0xFF; +const uint8_t CALL_SITE_ENCODING = 0x01; + +bool EHTable::CanCatch(const MrtClass catchType, const void *exObj) { + if (exObj == nullptr) { + EHLOG(FATAL) << "exception object is null" << maple::endl; + } + return reinterpret_cast(exObj)->IsInstanceOf(*reinterpret_cast(catchType)); +} + +uintptr_t EHTable::ReadSData4(const uint8_t **p) const { + uint32_t value; + size_t size = sizeof(uint32_t); + errno_t result = memcpy_s(&value, size, *p, size); + if (result != EOK) { + EHLOG(ERROR) << "memcpy_s() in ReadSData4() return " << result << "rather than 0." << maple::endl; + return reinterpret_cast(nullptr); + } + *p += size; + return static_cast(value); +} + +uintptr_t EHTable::ReadULEB128(const uint8_t **data) const { + uintptr_t result = 0; + uintptr_t shift = 0; + const uintptr_t shiftLength = 7; + unsigned char byte; + const uint8_t *p = *data; + do { + byte = *p++; + result |= static_cast(byte & 0x7F) << shift; + shift += shiftLength; + } while (byte & 0x80); + *data = p; + return result; +} + +uintptr_t EHTable::ReadTypeData(const uint8_t **data, bool use32Ref) { + const uint8_t *typePoint = *data; + // first get value + uintptr_t result = ReadSData4(&typePoint); + // then add relative offset + if (result != 0) { + result += reinterpret_cast(*data); + } + if (result != 0) { + if (use32Ref) { +#ifdef LINKER_LAZY_BINDING + (void)MRT_RequestLazyBindingForInitiative(reinterpret_cast(result)); +#endif // LINKER_LAZY_BINDING + result = static_cast(*(reinterpret_cast(result))); + } else { +#ifdef LINKER_LAZY_BINDING + (void)MRT_RequestLazyBindingForInitiative(reinterpret_cast(result)); +#endif // LINKER_LAZY_BINDING + result = *(reinterpret_cast(result)); + } + } + *data = typePoint; + return result; +} + +MrtClass EHTable::GetMplTypeInfo(uintptr_t tTypeIndex, const uint8_t *classInfo) { + if (classInfo == nullptr) { + EHLOG(FATAL) << "classInfo is 0 in GetMplTypeInfo" << maple::endl;; + } +#if defined(__aarch64__) + if (UINT64_MAX / sizeof(int) < tTypeIndex) { +#elif defined(__arm__) + if (UINT32_MAX / sizeof(int) < tTypeIndex) { +#endif + EHLOG(FATAL) << "tTypeIndex * sizeof(int) > UINT64_MAX" << maple::endl; + } else { + tTypeIndex *= sizeof(int); + } + classInfo -= tTypeIndex; +#ifdef LINKER_32BIT_REF_FOR_DEF_UNDEF + return reinterpret_cast(ReadTypeData(&classInfo, true)); +#else + return reinterpret_cast(ReadTypeData(&classInfo)); +#endif // LINKER_32BIT_REF_FOR_DEF_UNDEF +} + +extern "C" { +ATTR_NO_SANITIZE_ADDRESS +static void SetRegisters(const _Unwind_Exception &unwindException, const _Unwind_Context &context, + const ScanResults &results) { + MExceptionHeader *exceptionHeader = GetThrownExceptionHeader(unwindException); + +#if defined(__arm__) + const _Unwind_Exception *unwindExceptionAddress = &unwindException; + _Unwind_VRS_Set(const_cast<_Unwind_Context*>(&context), _UVRSC_CORE, kR0, _UVRSD_UINT32, + reinterpret_cast(&unwindExceptionAddress)); + _Unwind_VRS_Set(const_cast<_Unwind_Context*>(&context), _UVRSC_CORE, kR1, _UVRSD_UINT32, + &exceptionHeader->tTypeIndex); + _Unwind_VRS_Set(const_cast<_Unwind_Context*>(&context), _UVRSC_CORE, kPC, _UVRSD_UINT32, + const_cast(&results.landingPad)); +#else + _Unwind_SetGR(const_cast<_Unwind_Context*>(&context), __builtin_eh_return_data_regno(0), + reinterpret_cast(&unwindException)); + _Unwind_SetGR(const_cast<_Unwind_Context*>(&context), __builtin_eh_return_data_regno(1), + static_cast(exceptionHeader->tTypeIndex)); + _Unwind_SetIP(const_cast<_Unwind_Context*>(&context), results.landingPad); +#endif +} +} // extern "C" + +// maple exception table: +// +// No exception function >> +// .word 0xFFFFFFFF +// .word .Label.Cleanup +// +// No frame function(leaf function) >> +// .word 0x55555555 +// +// exception function >> +// .word mplETableOffset +// .byte lpStartEncoding +// .byte ttypeEncoding +// .uleb128 classInfoOffset +// .byte callSiteEncoding +// .uleb128 callSiteTableLength +// Call Site Table +// .uleb start +// .uleb length +// .uleb ladingPad +// .uleb actionEntry +// .... +// Action Table +// .byte tTypeIndex +// .byte ttypeOffset +// .... +// Exception Class Info +// .4byte class info +// + +void EHTable::ParseEHTableHeadInfo(const uint8_t *mLSDA) { + if (mLSDA == nullptr) { + LOG(FATAL) << "mLSDA is nullptr in function ScanExceptionTable for method" << maple::endl; + } + uint8_t lpStartEncoding = *mLSDA++; + MplCheck(lpStartEncoding == LP_START_ENCODING); + + uint8_t ttypeEncoding = *mLSDA++; + MplCheck(ttypeEncoding == TTYPE_ENCODING); + uintptr_t classInfoOffset = ReadULEB128(&mLSDA); + classInfoPoint = mLSDA + classInfoOffset; + + // Walk call-site table looking for range that includes current PC. + uint8_t callSiteEncoding = *mLSDA++; + MplCheck(callSiteEncoding == CALL_SITE_ENCODING); + uint32_t callSiteTableLength = static_cast(ReadULEB128(&mLSDA)); + callSiteTableStart = mLSDA; + callSiteTableEnd = callSiteTableStart + callSiteTableLength; + + actionTableStart = callSiteTableEnd; + curPtr = mLSDA; +} + +void EHTable::ScanExceptionTable(UnwindAction actions, bool nativeException, + const _Unwind_Exception &unwindException) { + MplCheck(nativeException); + // actions must be either _UA_SEARCH_PHASE or _UA_SEARCH_PHASE. + // either action shares the same scanning strategy. + MplCheck((static_cast(actions) & static_cast(kSearchPhase)) || + (static_cast(actions) & static_cast(kCleanupPhase))); + + if (type == kNoException || type == kNoFrame) { + return; + } + + uintptr_t ip = reinterpret_cast(currentPC); + // we should always adjust ip by -1 to make it point to the instruction which + // raises the signal or performs a function call, because this is the precise + // site triggering an exception. + // Note in PrepareToHandleJavaSignal we have already bias the pc to the next + // instruction of the instruction which raises a signal, like what bl instruction + // does. so it is safe to do so. + ip -= kPrevInsnOffset; + + // Get beginning current frame's code (as defined by the emitted dwarf code) + uintptr_t funcStart = reinterpret_cast(funcStartPoint); + uintptr_t ipOffset = ip - funcStart; + + jthrowable thrownObject = GetThrownObject(unwindException); + MplCheck(thrownObject != nullptr); + + while (curPtr < callSiteTableEnd) { + // There is one entry per call site. + // The call sites are non-overlapping in [start, start+length) + // The call sites are ordered in increasing value of start + uintptr_t start = ReadULEB128(&curPtr); + uintptr_t length = ReadULEB128(&curPtr); + uintptr_t landingPad = ReadULEB128(&curPtr); + uintptr_t actionEntry = ReadULEB128(&curPtr); + if ((start <= ipOffset) && (ipOffset < (start + length))) { + // Found the call site containing ip. + if (landingPad == 0) { + // No handler here + results.unwindReason = _URC_CONTINUE_UNWIND; + return; + } + landingPad = reinterpret_cast(funcStartPoint) + landingPad; + if (actionEntry == 0) { + // Found a cleanup + SetScanResultsValue(0, landingPad, _URC_HANDLER_FOUND, false); + return; + } + // Convert 1-based byte offset into + const uint8_t *action = actionTableStart + (actionEntry - 1); + // Scan action entries until you find a matching handler, or the end of action list. + // action list is a sequence of pairs (type index, offset of next action). + // if offset of next action is 0, the end of current action list is reached. + // right now type index would not be 0 for maple exception handling. + for (;;) { + uintptr_t tTypeIndex = *action++; + MplCheck(tTypeIndex != 0); + if (tTypeIndex > 0) { + // this is a catch (T) clause + const MrtClass catchType = GetMplTypeInfo(static_cast(tTypeIndex), + classInfoPoint); + MplCheck(catchType != nullptr); + if (CanCatch(catchType, thrownObject)) { + // Found a matching handler + // Save state and return _URC_HANDLER_FOUND + SetScanResultsValue(tTypeIndex, landingPad, _URC_HANDLER_FOUND, true); + return; + } + } + + const uint8_t actionOffset = *action; + // reach the end of action list + if (actionOffset == 0) { + // End of action list, no matching handler or cleanup found, + // which means the thrown exception can not be caught by this catch clause. + // For C++, we will return _URC_CONTINUE_UNWIND. + // For maple, we will continue to search call site table until we reach + // the last call site entry which is the cleanup entry. + break; + } else if (actionOffset == 125) { + // According to the sleb encoding format, when actionOffset is equal to 125, + // it is equivalent to action -3, and the parsing process of sleb reading is omitted here. + action -= 3; + } + } // there is no break out of this loop, only return end of while + } + } // there might be some tricky cases which break out of this loop + + // It is possible that no eh table entry specify how to handle + // this exception. By spec, terminate it immediately. + EHLOG(FATAL) << "ScanExceptionTable failed" << maple::endl; +} + +// set ScanResults Value +void EHTable::SetScanResultsValue(const uintptr_t tTypeIndex, const uintptr_t landingPad, + const UnwindReasonCode unwindReason, const bool caughtByJava) { + results.tTypeIndex = tTypeIndex; + results.landingPad = landingPad; + results.unwindReason = unwindReason; + results.caughtByJava = caughtByJava; +} + +extern "C" { +static void DumpInfoPersonality(int version, const _Unwind_Exception &unwindException, int line) { + EHLOG(ERROR) << "personality error line [" << line << "]" << maple::endl; + EHLOG(ERROR) << "version: " << version << maple::endl; + MRT_DumpException(GetThrownObject(unwindException), nullptr); + MplDumpStack("\n DumpInfoPersonality"); +} + +#if defined(__aarch64__) +static void DumpFrameInfo(UnwindAction actions, const _Unwind_Context &context) { + Dl_info addrInfo; + addrInfo.dli_sname = nullptr; + std::string unwindAction; + if (static_cast(actions) & static_cast(kSearchPhase)) { + unwindAction = "unwind actions: kSearchPhase "; + } + if (static_cast(actions) & static_cast(kCleanupPhase)) { + unwindAction = "unwind actions: kCleanupPhase "; + } + uintptr_t pc; + pc = _Unwind_GetIP(const_cast<_Unwind_Context*>(&context)); + int dladdrOk = dladdr(reinterpret_cast(pc), &addrInfo); + if (dladdrOk && addrInfo.dli_sname) { + EHLOG(INFO) << unwindAction << "function name of current frame: " << addrInfo.dli_sname << maple::endl; + } else { + EHLOG(INFO) << unwindAction << "pc of current frame: 0x" << std::hex << pc << std::dec << maple::endl; + } +} + +UnwindReasonCode __n2j_stub_personality(int version, UnwindAction actions, uintptr_t exceptionClass, + const _Unwind_Exception *unwindException, const _Unwind_Context *context) { + if (unwindException == nullptr) { + EHLOG(FATAL) << "unwindException is nullptr!" << maple::endl; + } + if (version != 1 || context == nullptr) { + DumpInfoPersonality(version, *unwindException, __LINE__); + return _URC_FATAL_PHASE1_ERROR; + } + bool nativeException = (exceptionClass & kGetVendorAndLanguage) == (kOurExceptionClass & kGetVendorAndLanguage); + + if (nativeException && (static_cast(actions) & static_cast(kCleanupPhase))) { + // that n2j stub is the last chance to handle java exception. + // so we pretend that n2j frame catches this exception. + MExceptionHeader *exceptionHeader = GetThrownExceptionHeader(*unwindException); + exceptionHeader->tTypeIndex = 0; + ScanResults results; + uintptr_t ip; + ip = _Unwind_GetIP(const_cast<_Unwind_Context*>(context)); + if ((ip & 1) == 1) { + ip--; + } + results.landingPad = reinterpret_cast(ip); + + // when the stack backtracks to native frame, print the java stack and all the stack + // the five line can be deleted for performance + if (VLOG_IS_ON(eh)) { + MRT_DumpException(GetThrownObject(*unwindException)); + MplDumpStack("call stack for non-caught exception:"); + } + + MRT_ThrowExceptionUnsafe(GetThrownObject(*unwindException)); + MRT_ClearThrowingException(); + + // set the continuation with the handler + SetRegisters(*unwindException, *context, results); + free(GetThrownException(*unwindException)); + return _URC_INSTALL_CONTEXT; + } + + // should not reach here + DumpInfoPersonality(version, *unwindException, __LINE__); + return _URC_FATAL_PHASE1_ERROR; +} +#elif defined(__arm__) +UnwindReasonCode __n2j_stub_personality(_Unwind_State state, _Unwind_Exception *unwindException, + _Unwind_Context *context) { + if (unwindException == nullptr) { + EHLOG(FATAL) << "unwindException is nullptr!" << maple::endl; + } + if (context == nullptr) { + DumpInfoPersonality(state, *unwindException, __LINE__); + return _URC_FATAL_PHASE1_ERROR; + } + + if (state == (_US_VIRTUAL_UNWIND_FRAME | _US_FORCE_UNWIND)) { + constexpr int contextFPOffset = 12; + constexpr int contextPCOffset = 16; + uintptr_t *callerFP = *(reinterpret_cast(context) + contextFPOffset); + uintptr_t *currentPC = reinterpret_cast(context) + contextPCOffset; + *currentPC = *(callerFP + 1); + uintptr_t *currentFP = reinterpret_cast(context) + contextFPOffset; + *currentFP = *callerFP; + return _URC_CONTINUE_UNWIND; + } + // that n2j stub is the last chance to handle java exception. + // so we pretend that n2j frame catches this exception. + MExceptionHeader *exceptionHeader = GetThrownExceptionHeader(*unwindException); + exceptionHeader->tTypeIndex = 0; + ScanResults results; + uintptr_t ip; + _Unwind_VRS_Get(const_cast<_Unwind_Context*>(context), _UVRSC_CORE, kPC, _UVRSD_UINT32, &ip); + if ((ip & 1) == 1) { + ip--; + } + results.landingPad = reinterpret_cast(ip); + + // when the stack backtracks to native frame, print the java stack and all the stack + // the five line can be deleted for performance + if (VLOG_IS_ON(eh)) { + MRT_DumpException(GetThrownObject(*unwindException)); + MplDumpStack("call stack for non-caught exception:"); + } + + MRT_ThrowExceptionUnsafe(GetThrownObject(*unwindException)); + MRT_ClearThrowingException(); + + // set the continuation with the handler + SetRegisters(*unwindException, *context, results); + free(GetThrownException(*unwindException)); + return _URC_INSTALL_CONTEXT; +} +#endif + +#if defined(__aarch64__) +// this personality is invoked to enter the general handler of the java frame on stack top. +UnwindReasonCode __mpl_personality_v0(int version, UnwindAction actions, uintptr_t exceptionClass, + const _Unwind_Exception *unwindException, const _Unwind_Context *context) { + if (unwindException == nullptr) { + EHLOG(FATAL) << "unwindException is nullptr!" << maple::endl; + } + if (version != 1 || context == nullptr) { + DumpInfoPersonality(version, *unwindException, __LINE__); + return _URC_FATAL_PHASE1_ERROR; + } + + bool nativeException = (exceptionClass & kGetVendorAndLanguage) == (kOurExceptionClass & kGetVendorAndLanguage); + + MplCheck(nativeException); + MplCheck((static_cast(actions) & static_cast(kCleanupPhase)) != 0); + + const uint32_t *pc = reinterpret_cast(_Unwind_GetIP(const_cast<_Unwind_Context*>(context))); + const uint32_t *mLSDA = nullptr; + const uint32_t *startPC = nullptr; + LinkerLocInfo info; + if (LinkerAPI::Instance().LocateAddress(pc, info, false)) { + startPC = reinterpret_cast(info.addr); + mLSDA = reinterpret_cast((uintptr_t)info.addr + info.size); + } else { + EHLOG(INFO) << "pc must be java method written with assembly" << maple::endl; + return _URC_CONTINUE_UNWIND; + } + EHTable ehTable(startPC, mLSDA, pc); + ehTable.ScanExceptionTable(actions, nativeException, *unwindException); + // If the stack-top java frame is abnormal, eh still calls __mpl_personality_v0 now because .cfi_personality. + if (ehTable.results.unwindReason == _URC_HANDLER_FOUND && ehTable.results.landingPad) { + MExceptionHeader *exceptionHeader = GetThrownExceptionHeader(*unwindException); + if (VLOG_IS_ON(eh)) { + DumpFrameInfo(actions, *context); + EHLOG(INFO) << "Catch Exception Succeeded : " << + reinterpret_cast(GetThrownObject(*unwindException))->GetClass()->GetName() << maple::endl; + } + SetRegisters(*unwindException, *context, ehTable.results); + if (!exceptionHeader->caughtByJava) { + free(GetThrownException(*unwindException)); + } // otherwise exception wrapper is freed in begin_catch + return _URC_INSTALL_CONTEXT; + } else { + // control flow reaches here if top-stack java frame is abnormal + return _URC_CONTINUE_UNWIND; + } +} + +#elif defined(__arm__) +UnwindReasonCode __mpl_personality_v0(_Unwind_State state, _Unwind_Exception *unwindException, + _Unwind_Context *context) { + if (unwindException == nullptr || context == nullptr) { + return _URC_FATAL_PHASE1_ERROR; + } + + if (state == (_US_VIRTUAL_UNWIND_FRAME | _US_FORCE_UNWIND)) { + constexpr int contextFPOffset = 12; + constexpr int contextPCOffset = 16; + uintptr_t *callerFP = *(reinterpret_cast(context) + contextFPOffset); + uintptr_t *currentPC = reinterpret_cast(context) + contextPCOffset; + *currentPC = *(callerFP + 1); + uintptr_t *currentFP = reinterpret_cast(context) + contextFPOffset; + *currentFP = *callerFP; + return _URC_CONTINUE_UNWIND; + } + const uint32_t *pc; + _Unwind_VRS_Get(const_cast<_Unwind_Context*>(context), _UVRSC_CORE, kPC, _UVRSD_UINT32, &pc); + const uint32_t *mLSDA = nullptr; + const uint32_t *startPC = nullptr; + LinkerLocInfo info; + if (LinkerAPI::Instance().LocateAddress(pc, info, false)) { + startPC = reinterpret_cast(info.addr); + mLSDA = reinterpret_cast((uintptr_t)info.addr + info.size); + } else { + EHLOG(INFO) << "pc must be java method written with assembly" << maple::endl; + return _URC_CONTINUE_UNWIND; + } + EHTable ehTable(startPC, mLSDA, pc); + ehTable.ScanExceptionTable(kSearchPhase, true, *unwindException); + // If the stack-top java frame is abnormal, eh still calls __mpl_personality_v0 now because .cfi_personality. + if (ehTable.results.unwindReason == _URC_HANDLER_FOUND && ehTable.results.landingPad) { + MExceptionHeader *exceptionHeader = GetThrownExceptionHeader(*unwindException); + if (VLOG_IS_ON(eh)) { + EHLOG(INFO) << "Catch Exception Succeeded : " << + reinterpret_cast(GetThrownObject(*unwindException))->GetClass()->GetName() << maple::endl; + } + SetRegisters(*unwindException, *context, ehTable.results); + if (!exceptionHeader->caughtByJava) { + free(GetThrownException(*unwindException)); + } // otherwise exception wrapper is freed in begin_catch + return _URC_INSTALL_CONTEXT; + } else { + // control flow reaches here if top-stack java frame is abnormal + return _URC_CONTINUE_UNWIND; + } +} +#endif +} // extern "C" +} // maplert diff --git a/src/mrt/compiler-rt/src/exception/exception_handling.cpp b/src/mrt/compiler-rt/src/exception/exception_handling.cpp new file mode 100644 index 0000000000..d54e9a7c8a --- /dev/null +++ b/src/mrt/compiler-rt/src/exception/exception_handling.cpp @@ -0,0 +1,187 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "exception/exception_handling.h" + +#include +#include +#include +#include +#include +#include "libs.h" +#include "exception/mpl_exception.h" +#include "exception/mrt_exception.h" +#include "exception_store.h" +#include "chosen.h" + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif + +namespace maplert { +void EHFrameInfo::LookupExceptionHandler(const _Unwind_Exception &unwindException) { + EHTable ehTable(GetStartPC(), GetEndPC(), GetJavaFrame().ip); + ehTable.ScanExceptionTable(kSearchPhase, true, unwindException); + mGeneralHandler = reinterpret_cast(ehTable.results.landingPad); + + if (ehTable.results.caughtByJava) { + MExceptionHeader *exceptionHeader = GetThrownExceptionHeader(unwindException); + exceptionHeader->tTypeIndex = ehTable.results.tTypeIndex; + exceptionHeader->caughtByJava = true; // for a matching handler + mCleanupCode = nullptr; + mCatchCode = reinterpret_cast(ehTable.results.landingPad); + } else { + mCatchCode = nullptr; + mCleanupCode = reinterpret_cast(ehTable.results.landingPad); + } + + // no general handler for this frame means it is an abnormal frame. + __MRT_ASSERT(mGeneralHandler != nullptr || (!javaFrame.HasFrameStructure()), ""); + return; +} + +void EHStackInfo::Build(_Unwind_Exception &unwindException, bool isRet) { + JavaFrame frame; + // If UnwindFinish is returned here, it means that no Java frame has been found + // on the stack in the current n2j block, so no build operation is required in + // the exception handling phase. + if (MapleStack::GetLastJavaFrame(frame, nullptr, true) == kUnwindFinish) { + return; + } + + (void)isRet; + + if (VLOG_IS_ON(eh)) { + MplCheck(frame.IsCompiledFrame(), "Build should start at java frame"); + } + + size_t n = 0; + while (n < MAPLE_STACK_UNWIND_STEP_MAX && frame.IsCompiledFrame()) { + EHFrameInfo frameInfo(frame, unwindException); + + // the latest java frame might be a fake frame, we ignore this fake frame + const JavaFrame& calleeframe = frameInfo.GetJavaFrame(); + if (calleeframe.HasFrameStructure()) { + priEhStackInfo.push_back(frameInfo); + }; + + if (frameInfo.GetCatchCode()) { + if (VLOG_IS_ON(eh)) { + std::stringstream ss; + frameInfo.Dump(">>>> Building eh stack: handler at", ss); + EHLOG(INFO) << ss.str() << maple::endl; + } + break; + } + + if (VLOG_IS_ON(eh)) { + std::stringstream ss; + frameInfo.Dump(">>>> Building eh stack: unwind at", ss); + EHLOG(INFO) << ss.str() << maple::endl; + } + + (void)calleeframe.UnwindToNominalCaller(frame); + ++n; + } +} + +// chain all java frames in this exception-handling stack +// this is done by rewriting the return address of callee frame with the general +// handler of caller frame. +void EHStackInfo::ChainAllEHFrames(const _Unwind_Exception &unwindException, bool isRet, bool isImplicitNPE) { + MExceptionHeader *exceptionHeader = GetThrownExceptionHeader(unwindException); + // If the array is equal to 0, it represents a special scenario of a Java method + // implemented natively. + if (priEhStackInfo.size() == 0) { + (void)isRet; + (void)isImplicitNPE; + return; + } + + + EHFrameInfo *currentFrameInfo = &priEhStackInfo[0]; + for (unsigned i = 1; i < priEhStackInfo.size(); ++i) { + EHFrameInfo *callerFrameInfo = &priEhStackInfo[i]; + currentFrameInfo->ChainToCallerEHFrame(*callerFrameInfo); + currentFrameInfo = callerFrameInfo; + } + + // if exception is caught by java frame, the last frame in priEhStackInfo holds the handler. + // otherwise, we treat the return address of last frame in priEhStackInfo as the handler. + if (exceptionHeader->caughtByJava) { + MRT_SetReliableUnwindContextStatus(); + } else { + if (currentFrameInfo->GetJavaFrame().HasFrameStructure()) { + currentFrameInfo->ChainToGeneralHandler(currentFrameInfo->GetReturnAddress()); + } + MRT_ThrowExceptionUnsafe(GetThrownObject(unwindException)); + MRT_ClearThrowingException(); + } + +} + +// This function is called by HandleJavaSignalStub and aims to check whether the +// segv signal results into a java exception. If so it returns PrepareArgsForExceptionCatcher as +// the continuation point for HandleJavaSignalStub. +extern "C" uintptr_t IsThrowingExceptionByRet() { + // Check whether there is an exception thrown by TLS. + MrtClass *thrownObject = reinterpret_cast(maple::ExceptionVisitor::GetExceptionAddress()); + if (thrownObject != nullptr) { + return reinterpret_cast(&PrepareArgsForExceptionCatcher); + } else { + return 0; + } +} + +struct HandlerCatcherArgs { + uintptr_t uwException; + intptr_t typeIndex; + uintptr_t topJavaHandler; +}; + +extern "C" void MRT_GetHandlerCatcherArgs(struct HandlerCatcherArgs *cArgs) { + (void)cArgs; +} + +#if defined(__arm__) +extern "C" MRT_EXPORT UnwindReasonCode AdaptationFunc(_Unwind_State, _Unwind_Exception*, _Unwind_Context*) { + return _URC_CONTINUE_UNWIND; +} +#endif + + +ATTR_NO_SANITIZE_ADDRESS +void RaiseException(struct _Unwind_Exception &unwindException, bool isRet, bool isImplicitNPE) { + { + EHStackInfo ehStack; + ehStack.Build(unwindException, isRet); + + // 1. chain all java frames in this exception-handling stack + ehStack.ChainAllEHFrames(unwindException, isRet, isImplicitNPE); + } + + // 2. the entry point of exception handler is the extended epilogue of top java frame + // here we invoke _Unwind_Resume in libgcc which invokes _Unwind_RaiseException_Phase2. + // we adjust the personality to trick _Unwind_RaiseException_Phase2 to continue to + // the entry point of exception handler. +#if defined(__aarch64__) + unwindException.private_1 = 0; +#elif defined(__arm__) + // Add an adaptation function, the first time you enter __gnu_Unwind_Resume will be executed. + unwindException.unwinder_cache.reserved2 = reinterpret_cast(&AdaptationFunc); + unwindException.unwinder_cache.reserved3 = reinterpret_cast(__builtin_return_address(0)); +#endif + _Unwind_Resume(&unwindException); +} +} // namespace maplert diff --git a/src/mrt/compiler-rt/src/exception/mpl_exception.cpp b/src/mrt/compiler-rt/src/exception/mpl_exception.cpp new file mode 100644 index 0000000000..a9f0f55da1 --- /dev/null +++ b/src/mrt/compiler-rt/src/exception/mpl_exception.cpp @@ -0,0 +1,220 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "exception/mpl_exception.h" + +#include +#include + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif +#include +#include +#include "libs.h" +#include "exception/mrt_exception.h" +#include "exception_store.h" + +namespace maplert { +static uint64_t exceptionTotalCount = 0; +list ehObjectList; +std::map ehObjectStackMap; +std::mutex ehObjectListLock; +std::mutex ehObjectStackMapLock; + + +static inline uint64_t IncExceptionTotalCount(uint64_t count) { + return __atomic_add_fetch(&exceptionTotalCount, count, __ATOMIC_ACQ_REL); +} + +static inline void RecordExceptionType(const std::string ehNumType) { + std::lock_guard lock(ehObjectListLock); + ehObjectList.push_back(ehNumType); +} + +static void RecordExceptionStack(std::string ehStack) { + std::lock_guard lock(ehObjectStackMapLock); + auto it = ehObjectStackMap.find(ehStack); + if (it == ehObjectStackMap.end()) { + if (UNLIKELY(!ehObjectStackMap.insert(std::make_pair(ehStack, 1)).second)) { + EHLOG(ERROR) << "ehObjectStackMap.insert() in RecordExceptionStack() failed." << maple::endl; + } + } else { + it->second = it->second + 1; + } +} + +static void SetExceptionClass(_Unwind_Exception &unwindException) { +#if defined(__aarch64__) + unwindException.exception_class = kOurExceptionClass; +#elif defined(__arm__) + if (strcpy_s(unwindException.exception_class, sizeof(kOurExceptionClass), kOurExceptionClass) != EOK) { + LOG(FATAL) << "SetExceptionClass strcpy_s() not return 0" << maple::endl; + }; +#endif +} + +extern "C" { + +ATTR_NO_SANITIZE_ADDRESS +void MplThrow(const MrtClass &thrownObject, bool isRet, bool isImplicitNPE) { + MExceptionHeader *exceptionHeader = + &(MplExceptionFromThrownObject(const_cast(&thrownObject))->exceptionHeader); + + exceptionHeader->exceptionType = nullptr; + exceptionHeader->exceptionDestructor = nullptr; + + SetExceptionClass(exceptionHeader->unwindHeader); + + if (VLOG_IS_ON(eh)) { + uint64_t totalCount = IncExceptionTotalCount(1); + std::string exceptionType(reinterpret_cast(thrownObject)->GetClass()->GetName()); + std::string exceptionCount = std::to_string(totalCount); + EHLOG(INFO) << "Exception Type : " << exceptionType << maple::endl; + EHLOG(INFO) << "Total Exception Count : " << totalCount << maple::endl; + RecordExceptionType(exceptionType + ":" + exceptionCount); + + MplDumpStack("------------------ Dump Stack In Start To Throw Exception ------------------"); + + std::string exceptionStack; + MRT_DumpException(reinterpret_cast(thrownObject), &exceptionStack); + RecordExceptionStack(exceptionStack); + } + + RaiseException(exceptionHeader->unwindHeader, isRet, isImplicitNPE); + + + // This only happens when there is no handler, or some unexpected unwinding error happens. + MplDumpStack("------------------ Dump Stack As Throw Exception Fail ------------------"); + EHLOG(FATAL) << "Throw Exception failed" << maple::endl; +} + +static UnwindReasonCode UnwindBacktraceDumpStackCallback(const _Unwind_Context *context, void *ip) { + uintptr_t pc; +#if defined(__arm__) + _Unwind_VRS_Get(const_cast<_Unwind_Context*>(context), _UVRSC_CORE, kPC, _UVRSD_UINT32, &pc); +#else + pc = _Unwind_GetIP(const_cast<_Unwind_Context*>(context)); +#endif + if (*reinterpret_cast(ip) == pc) { + EHLOG(ERROR) << "It could be a recursive function, pc : " << pc << maple::endl; + return _URC_NORMAL_STOP; + } + *reinterpret_cast(ip) = pc; + if (pc) { + (void)JavaFrame::DumpProcInfo(pc); + } else { + EHLOG(ERROR) << "Unwind_Backtrace failed to get pc address :" << ip << maple::endl; + } + return _URC_NO_REASON; +} + +void MplDumpStack(const std::string &msg) { + EHLOG(INFO) << "----------------------------- dump call stack -------------------------------------" << maple::endl; + if (msg != "") { + EHLOG(INFO) << "reason: " << msg << maple::endl; + } + uintptr_t ip = 0; + (void)_Unwind_Backtrace(UnwindBacktraceDumpStackCallback, &ip); +} + +void MplCheck(bool ok, const std::string &msg) { + if (!ok) { + MplDumpStack(msg); + EHLOG(FATAL) << "Check failed" << maple::endl; + } +} + +static MrtClass *MplWrapException(MrtClass obj, const void *sigIp = nullptr) { + if (obj == nullptr) { + MRT_ThrowNullPointerExceptionUnw(); + // never returns here + } + const size_t kMplExceptionSize = sizeof(struct MplException); + // memory allocated here is freed when: + // 1. after exception is caught, during __java_begin_catch is called, + // 2. if exception is not caught, when we jump to the continuation of caller native frame. + MplException *mplException = reinterpret_cast(calloc(1, kMplExceptionSize)); + // must to be successful from malloc above + if (mplException == nullptr) { + EHLOG(ERROR) << "--eh-- exception object malloc failed." << maple::endl; + std::terminate(); + } + + // since this record exception at the wrapper + RC_RUNTIME_INC_REF(obj); + // both are set, first is for root visitor + // second is for personality + maple::ExceptionVisitor::SetThrowningException(reinterpret_cast(obj)); + mplException->exceptionHeader.sigIP = const_cast(sigIp); + mplException->thrownObject = reinterpret_cast(obj); + return reinterpret_cast(&(mplException->thrownObject)); +} + +void *MCC_JavaBeginCatch(const _Unwind_Exception *unwindException) { + maple::ExceptionVisitor::SetThrowningException(nullptr); + jthrowable thrown = GetThrownObject(*unwindException); + MRT_InvokeResetHandler(); + free(GetThrownException(*unwindException)); + return thrown; +} + +void MCC_ThrowException(MrtClass obj) { + MrtClass *exPtr = MplWrapException(obj); + MplThrow(*exPtr, true); +} + +// note: this function can not be called directly by java code. +void MRT_DecRefThrowExceptionUnw(MrtClass obj, const void *sigIp) { + MrtClass *exPtr = MplWrapException(obj, sigIp); + + // we do not hava chance to dec ref-count for exception when we throw exception + // directly in runtime native code since this function changes control flow. + RC_RUNTIME_DEC_REF(obj); + + MplThrow(*exPtr); +} + +void MRT_DecRefThrowExceptionRet(MrtClass obj, bool isImplicitNPE, const void *sigIp) { + MrtClass *exPtr = MplWrapException(obj, sigIp); + // we do not hava chance to dec ref-count for exception when we throw exception + // directly in runtime native code since this function changes control flow. + RC_RUNTIME_DEC_REF(obj); + + MplThrow(*exPtr, true, isImplicitNPE); +} + +void MCC_ThrowPendingException() { + jobject ex = maple::ExceptionVisitor::GetPendingException(); + if (ex == nullptr) { + EHLOG(FATAL) << "pending exception is null" << maple::endl; + } + + maple::ExceptionVisitor::SetPendingException(nullptr); + MrtClass *exPtr = MplWrapException(ex, nullptr); + + // we do not hava chance to dec ref-count for exception when we throw exception + // directly in runtime native code since this function changes control flow. + RC_RUNTIME_DEC_REF(ex); + + MplThrow(*exPtr, true); +} + +// alias for exported symbol +void MRT_CheckException(bool ok, std::string msg) __attribute__((alias("MplCheck"))); + +void MCC_RethrowException(MrtClass obj) __attribute__((alias("MCC_ThrowException"))); + +} // extern "C" +} // namespace maplert diff --git a/src/mrt/compiler-rt/src/exception/mrt_exception.cpp b/src/mrt/compiler-rt/src/exception/mrt_exception.cpp new file mode 100644 index 0000000000..6fc4ce222e --- /dev/null +++ b/src/mrt/compiler-rt/src/exception/mrt_exception.cpp @@ -0,0 +1,615 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "exception/mrt_exception.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "libs.h" +#include "base/logging.h" +#include "exception_store.h" + +#ifndef UNIFIED_MACROS_DEF +#define UNIFIED_MACROS_DEF +#include "unified.macros.def" +#endif + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif + + +namespace maplert { +static uint64_t nativeExceptionTotalCount = 0; + +list nativeEhObjectList; +std::map nativeEhObjectStackMap; +std::mutex nativeEhObjectListLock; +std::mutex nativeEhObjectStackMapLock; + +static inline uint64_t IncNativeExceptionTotalCount(uint64_t count) { + return __atomic_add_fetch(&nativeExceptionTotalCount, count, __ATOMIC_ACQ_REL); +} + +static inline void RecordNativeExceptionType(const std::string ehNumType) { + std::lock_guard lock(nativeEhObjectListLock); + nativeEhObjectList.push_back(ehNumType); +} + +static void RecordNativeExceptionStack(std::string &ehStack) { + std::lock_guard lock(nativeEhObjectStackMapLock); + auto it = nativeEhObjectStackMap.find(ehStack); + if (it == nativeEhObjectStackMap.end()) { + if (nativeEhObjectStackMap.insert(std::make_pair(ehStack, 1)).second != true) { + EHLOG(ERROR) << "record_nateve_exception_stack insert() not return true" << maple::endl; + } + } else { + it->second = it->second + 1; + } +} + +static void ValidateThrowableException(const MrtClass ex) { + CHECK(ex != nullptr) << "the thrown exception must not be null" << maple::endl; + bool throwable = reinterpret_cast(ex)->IsInstanceOf(*WellKnown::GetMClassThrowable()); + CHECK(throwable) << "should not throw a nonthrowable object" << maple::endl; +} + +static MrtClass NewException(MrtClass classType, const char *kMsg = "unknown reason", MrtClass cause = nullptr) { + CHECK(classType != nullptr) << "invalid exception class type: null" << maple::endl; + if (classType == nullptr) { + return nullptr; + } + + MClass *exceptioncls = reinterpret_cast(classType); + MObject *newex = nullptr; + + ScopedHandles sHandles; + if (kMsg != nullptr && cause != nullptr) { + MethodMeta *exceptionConstruct = exceptioncls->GetDeclaredConstructor( + "(Ljava/lang/String;Ljava/lang/Throwable;)V"); + CHECK(exceptionConstruct != nullptr) << "failed to find constructor (Ljava/lang/String;Ljava/lang/Throwable;)V" << + maple::endl; + + ObjHandle jmsg(NewStringUTF(kMsg, strlen(kMsg))); + CHECK(jmsg() != 0) << "failed to create jstring object for exception message" << maple::endl; + + if (exceptionConstruct == nullptr || jmsg() == 0) { + EHLOG(FATAL) << "ex msg : " << kMsg << maple::endl; + } + newex = MObject::NewObject(*exceptioncls, exceptionConstruct, jmsg.AsRaw(), cause); + } else if (kMsg != nullptr && cause == nullptr) { + MethodMeta *exceptionConstruct = exceptioncls->GetDeclaredConstructor("(Ljava/lang/String;)V"); + CHECK(exceptionConstruct != nullptr) << "failed to find constructor (Ljava/lang/String;)V" << maple::endl; + + ObjHandle jmsg(NewStringUTF(kMsg, strlen(kMsg))); + CHECK(jmsg() != 0) << "failed to create jstring object for exception message" << maple::endl; + + if (exceptionConstruct == nullptr || jmsg() == 0) { + EHLOG(FATAL) << "ex msg : " << kMsg << maple::endl; + } + newex = MObject::NewObject(*exceptioncls, exceptionConstruct, jmsg.AsRaw()); + } else if (kMsg == nullptr && cause != nullptr) { + MethodMeta *exceptionConstruct = exceptioncls->GetDeclaredConstructor("(Ljava/lang/Throwable;)V"); + if (exceptionConstruct == nullptr) { + EHLOG(FATAL) << "failed to find constructor (Ljava/lang/Throwable;)V" << maple::endl; + } + newex = MObject::NewObject(*exceptioncls, exceptionConstruct, cause); + } else { + MethodMeta *exceptionConstruct = exceptioncls->GetDeclaredConstructor("()V"); + if (exceptionConstruct == nullptr) { + EHLOG(FATAL) << "failed to find constructor ()V" << maple::endl; + } + newex = MObject::NewObject(*exceptioncls, exceptionConstruct); + } + + // this function shoule never returns null. + if (newex == nullptr) { + EHLOG(ERROR) << "new exception object is nullptr" << maple::endl; + } + return reinterpret_cast(newex); +} + +extern "C" void MRT_DumpException(jthrowable exObj, std::string *exceptionStack) { + MObject *ex = reinterpret_cast(exObj); + if (ex == nullptr) { + return; + } + + bool throwable = ex->IsInstanceOf(*WellKnown::GetMClassThrowable()); + if (throwable) { + MClass *clsThrowable = ex->GetClass(); + if (clsThrowable == nullptr) { + EHLOG(ERROR) << "Cannot Find the corresponding Exception Class" << maple::endl; + return; + } + + if (exceptionStack != nullptr) { + exceptionStack->append(clsThrowable->GetName()).append("\n"); + } else { + EHLOG(INFO) << "-- exception class -- " << clsThrowable->GetName() << maple::endl; + } + + FieldMeta *fid = clsThrowable->GetField("backtrace"); + if (fid == nullptr) { + EHLOG(ERROR) << "Cannot Find the backtrace field in Class " << clsThrowable->GetName() << maple::endl; + return; + } + + jlongArray backtrace = reinterpret_cast(fid->GetObjectValue(ex)); + if (!backtrace) { + EHLOG(ERROR) << "Cannot Find the backtrace Array in Class " << clsThrowable->GetName() << maple::endl; + return; + } + MArray *backtraceArray = reinterpret_cast(backtrace); + uint32_t size = backtraceArray->GetLength(); + jlong *array = reinterpret_cast(backtraceArray->ConvertToCArray()); + + for (uint32_t i = 0; i < size; ++i) { + (void)JavaFrame::DumpProcInfo(static_cast(array[i]), exceptionStack); + } + RC_LOCAL_DEC_REF(backtrace); + } else { + EHLOG(ERROR) << "object is not an exception!" << maple::endl; + } +} + +extern "C" void MRT_DumpExceptionForLog(jthrowable exObj) { + MObject *ex = reinterpret_cast(exObj); + if (ex == nullptr) { + return; + } + + bool throwable = ex->IsInstanceOf(*WellKnown::GetMClassThrowable()); + if (throwable) { + MClass *clsThrowable = ex->GetClass(); + if (clsThrowable == nullptr) { + EHLOG(ERROR) << "Cannot Find the corresponding Exception Class" << maple::endl; + return; + } + + EHLOG(ERROR) << "-- exception class -- " << clsThrowable->GetName() << + maple::endl; + + FieldMeta *fid = clsThrowable->GetField("backtrace"); + if (fid == nullptr) { + EHLOG(ERROR) << "Cannot Find the backtrace field in Class " << clsThrowable->GetName() << maple::endl; + return; + } + + jlongArray backtrace = reinterpret_cast(fid->GetObjectValue(ex)); + if (!backtrace) { + EHLOG(ERROR) << "Cannot Find the backtrace Array in Class " << clsThrowable->GetName() << maple::endl; + return; + } + + MArray *backtraceArray = reinterpret_cast(backtrace); + uint32_t size = backtraceArray->GetLength(); + jlong *array = reinterpret_cast(backtraceArray->ConvertToCArray()); + + for (uint32_t i = 0; i < size; ++i) { + JavaFrame::DumpProcInfoLog(array[i]); + } + RC_LOCAL_DEC_REF(backtrace); + } else { + EHLOG(ERROR) << "object is not an exception!" << maple::endl; + } +} + +extern "C" void MRT_DumpExceptionTypeCount(std::ostream &os) { + std::lock_guard lock(ehObjectListLock); + os << "-- eh object list --" << std::endl; + for (auto eh_object : ehObjectList) { + os << eh_object << std::endl; + } +} + +extern "C" void MRT_DumpExceptionStack(std::ostream &os) { + std::lock_guard lock(ehObjectStackMapLock); + os << "-- eh object-stack map --" << std::endl; + for (auto it = ehObjectStackMap.begin(); it != ehObjectStackMap.end(); ++it) { + os << "----eh stack----" << std::endl; + os << it->first << std::endl; + os << "----total count----: " << it->second << std::endl; + } +} + +extern "C" void MRT_DumpNativeExceptionTypeCount(std::ostream &os) { + std::lock_guard lock(nativeEhObjectListLock); + os << "-- native eh object list --" << std::endl; + for (auto &native_eh_object : nativeEhObjectList) { + os << native_eh_object << std::endl; + } +} + +extern "C" void MRT_DumpNativeExceptionStack(std::ostream &os) { + std::lock_guard lock(nativeEhObjectStackMapLock); + os << "-- native eh object-stack map --" << std::endl; + for (auto it = nativeEhObjectStackMap.begin(); it != nativeEhObjectStackMap.end(); ++it) { + os << "----native eh stack----" << std::endl; + os << it->first << std::endl; + os << "----total count----: " << it->second << std::endl; + } +} + +// "raise" an async exception from JNI code +extern "C" void MRT_ThrowExceptionSafe(jobject ex) { + ValidateThrowableException(ex); + MRT_ThrowExceptionUnsafe(ex); +} + +extern "C" void MRT_ThrowNewExceptionInternalType(MrtClass classType, const char *msg) { + MrtClass newex = NewException(classType, msg); + if (newex == nullptr) { + return; + } + MRT_ThrowExceptionSafe(reinterpret_cast(newex)); + RC_LOCAL_DEC_REF(newex); +} + +extern "C" void MRT_ThrowNewException(const char *className, const char *msg) { + jclass exceptionCls = MRT_ReflectClassForCharName(className, false, nullptr); + if (!exceptionCls) { + EHLOG(FATAL) << "exceptioncls is null." << maple::endl; + } + MRT_ThrowNewExceptionInternalType(reinterpret_cast(exceptionCls), msg); +} + +extern "C" jobject MRT_PendingException() { + jobject e = maple::ExceptionVisitor::GetPendingException(); + if (e != nullptr) { + RC_LOCAL_INC_REF(e); + } + return e; +} + +extern "C" bool MRT_HasPendingException() { + jobject e = maple::ExceptionVisitor::GetPendingException(); + return (e != nullptr); +} + +extern "C" void MRT_ThrowExceptionUnsafe(jobject ex) { + if (VLOG_IS_ON(eh)) { + uint64_t nativeTotalCount = IncNativeExceptionTotalCount(1); + std::string nativeExceptionType(reinterpret_cast(ex)->GetClass()->GetName()); + std::string nativeExceptionCount = std::to_string(nativeTotalCount); + EHLOG(INFO) << "Native Exception Type: " << nativeExceptionType << maple::endl; + EHLOG(INFO) << "Total Native Exception Count: " << nativeTotalCount << maple::endl; + RecordNativeExceptionType(nativeExceptionType + ":" + nativeExceptionCount); + std::string nativeExceptionStack; + MRT_DumpException(reinterpret_cast(ex), &nativeExceptionStack); + RecordNativeExceptionStack(nativeExceptionStack); + } + + RC_RUNTIME_INC_REF(ex); + maple::ExceptionVisitor::SetPendingException(ex); +} + +extern "C" void MRT_CheckThrowPendingExceptionRet() { + jobject ex = MRT_PendingException(); + if (ex) { + MRT_ClearPendingException(); + MRT_DecRefThrowExceptionRet(ex); + } +} + +extern "C" void MRT_ClearPendingException() { + jobject e = maple::ExceptionVisitor::GetPendingException(); + if (e != nullptr) { + RC_RUNTIME_DEC_REF(e); + maple::ExceptionVisitor::SetPendingException(nullptr); + } +} + +extern "C" void MRT_ClearThrowingException() { + jobject e = maple::ExceptionVisitor::GetThrowningException(); + if (e != nullptr) { + RC_RUNTIME_DEC_REF(e); + maple::ExceptionVisitor::SetThrowningException(nullptr); + } +} + +// raise a sync exception from runtime native code +// These function will be removed later. +extern "C" void MRT_CheckThrowPendingExceptionUnw() { + jobject ex = MRT_PendingException(); + if (ex) { + MRT_ClearPendingException(); + MRT_DecRefThrowExceptionUnw(ex); + } +} + +ATTR_NO_SANITIZE_ADDRESS +extern "C" void ThrowExceptionUnw(MrtClass ex) { + jobject e = maple::ExceptionVisitor::GetPendingException(); + CHECK(e == nullptr) << "pending exception needs to be handled" << maple::endl; + + ValidateThrowableException(ex); + MRT_DecRefThrowExceptionUnw(ex); +} + +ATTR_NO_SANITIZE_ADDRESS +extern "C" void ThrowExceptionRet(MrtClass ex) { + jobject e = maple::ExceptionVisitor::GetPendingException(); + CHECK(e == nullptr) << "pending exception needs to be handled" << maple::endl; + + ValidateThrowableException(ex); + MRT_DecRefThrowExceptionRet(ex); +} + +extern "C" void ThrowNewExceptionInternalTypeUnw(MrtClass classType, const char *kMsg) { + MrtClass throwable = NewException(classType, kMsg); + ThrowExceptionUnw(throwable); +} + +extern "C" void ThrowNewExceptionInternalTypeRet(MrtClass classType, const char *kMsg) { + MrtClass throwable = NewException(classType, kMsg); + ThrowExceptionRet(throwable); +} + +extern "C" void MRT_ThrowNewExceptionRet(const char *className, const char *msg) { + jclass exceptionCls = MRT_ReflectClassForCharName(className, false, nullptr); + ThrowNewExceptionInternalTypeRet(reinterpret_cast(exceptionCls), msg); +} + +extern "C" void MRT_ThrowNewExceptionUnw(const char *className, const char *msg) { + jclass exceptionCls = MRT_ReflectClassForCharName(className, false, nullptr); + ThrowNewExceptionInternalTypeUnw(reinterpret_cast(exceptionCls), msg); +} + +extern "C" void MRT_ThrowImplicitNullPointerExceptionUnw(const void *sigIP) { + jobject e = maple::ExceptionVisitor::GetPendingException(); + CHECK(e == nullptr) << "pending exception needs to be handled" << maple::endl; + + MrtClass throwable = NewException(*WellKnown::GetMClassNullPointerException()); + ValidateThrowableException(throwable); + MRT_DecRefThrowExceptionRet(throwable, true, sigIP); +} + +extern "C" void MRT_ThrowNullPointerExceptionUnw() { + ThrowNewExceptionInternalTypeUnw(*WellKnown::GetMClassNullPointerException()); +} + +extern "C" void MRT_ThrowArithmeticExceptionUnw() { + ThrowNewExceptionInternalTypeUnw(*WellKnown::GetMClassArithmeticException()); +} + +extern "C" void MRT_ThrowInterruptedExceptionUnw() { + ThrowNewExceptionInternalTypeUnw(*WellKnown::GetMClassInterruptedException()); +} + +extern "C" void MRT_ThrowClassCastExceptionUnw(const std::string msg) { + ThrowNewExceptionInternalTypeUnw(*WellKnown::GetMClassClassCastException(), msg.c_str()); +} + +extern "C" void MRT_ThrowArrayStoreExceptionUnw(const std::string msg) { + ThrowNewExceptionInternalTypeUnw(*WellKnown::GetMClassArrayStoreException(), msg.c_str()); +} + +extern "C" void MRT_ThrowArrayIndexOutOfBoundsExceptionUnw(int32_t length, int32_t index) { + // formatting information about exception + char msg[kBufferSize / 2] = { 0 }; // kBufferSize / 2 = 64 + // MRT_ThrowException_Unw does not return, so cannot apply for heap memory here. + if (sprintf_s(msg, sizeof(msg), "length=%d; index=%d", length, index) < 0) { + EHLOG(ERROR) << "MRT_ThrowArrayIndexOutOfBoundsExceptionUnw sprintf_s return -1" << maple::endl; + } + ThrowNewExceptionInternalTypeUnw(*WellKnown::GetMClassArrayIndexOutOfBoundsException(), msg); +} + +extern "C" void MRT_ThrowUnsatisfiedLinkErrorUnw() { + ThrowNewExceptionInternalTypeUnw(*WellKnown::GetMClassUnsatisfiedLinkError()); +} + +extern "C" void MRT_ThrowExceptionInInitializerErrorUnw(MrtClass cause) { + MrtClass throwable = NewException(*WellKnown::GetMClassExceptionInInitializerError(), nullptr, cause); + if (cause != nullptr) { + RC_RUNTIME_DEC_REF(cause); + } + ThrowExceptionUnw(throwable); +} + +extern "C" void MRT_ThrowNoSuchMethodErrorUnw(const std::string &msg) { + MrtClass throwable = NewException(*WellKnown::GetMClassNoSuchMethodError(), msg.c_str()); + ThrowExceptionUnw(throwable); +} + +extern "C" void MRT_ThrowNoSuchFieldErrorUnw(const std::string &msg) { + MrtClass throwable = NewException(*WellKnown::GetMClassNoSuchFieldError(), msg.c_str()); + ThrowExceptionUnw(throwable); +} + +extern "C" void MRT_ThrowNoClassDefFoundErrorUnw(const char *msg) { + MrtClass throwable = NewException(*WellKnown::GetMClassNoClassDefFoundError(), msg); + ThrowExceptionUnw(throwable); +} + +extern "C" void MRT_ThrowNoClassDefFoundErrorClassUnw(const void *classInfo) { + char msg[kBufferSize * 2] = { 0 }; // kBufferSize * 2 = 256 + { + // ThrowExceptionUnw does not return, so name must be in closed scope or + // use smart pointer. + std::string name; + (reinterpret_cast(const_cast(classInfo)))->GetTypeName(name); + if (sprintf_s(msg, sizeof(msg), "Could not initialize class %s", name.c_str()) < 0) { + LOG(ERROR) << "MRT_ThrowNoClassDefFoundErrorClassUnw sprintf_s return -1" << maple::endl; + } + } + MrtClass throwable = NewException(*WellKnown::GetMClassNoClassDefFoundError(), msg); + ThrowExceptionUnw(throwable); +} + +extern "C" void MRT_ThrowStringIndexOutOfBoundsExceptionUnw() { + ThrowNewExceptionInternalTypeUnw(*WellKnown::GetMClassStringIndexOutOfBoundsException()); +} + +extern "C" void MRT_ThrowVerifyErrorUnw(const std::string &msg) { + ThrowNewExceptionInternalTypeUnw(*WellKnown::GetMClassVerifyError(), msg.c_str()); +} + +// all no Unw suffixed functions are used to throw exception by recording pending +extern "C" void MRT_ThrowImplicitNullPointerException() { + jobject e = maple::ExceptionVisitor::GetPendingException(); + CHECK(e == nullptr) << "pending exception needs to be handled" << maple::endl; + + MrtClass throwable = NewException(WellKnown::GetMClassNullPointerException()); + MRT_ThrowExceptionSafe(reinterpret_cast(throwable)); + RC_LOCAL_DEC_REF(throwable); +} + +extern "C" void MRT_ThrowNullPointerException() { + MRT_ThrowNewExceptionInternalType(WellKnown::GetMClassNullPointerException()); +} + +extern "C" void MRT_ThrowArithmeticException() { + MRT_ThrowNewExceptionInternalType(WellKnown::GetMClassArithmeticException()); +} + +extern "C" void MRT_ThrowInterruptedException() { + MRT_ThrowNewExceptionInternalType(WellKnown::GetMClassInterruptedException()); +} + +extern "C" void MRT_ThrowClassCastException(const std::string msg) { + MRT_ThrowNewExceptionInternalType(WellKnown::GetMClassClassCastException(), msg.c_str()); +} + +extern "C" void MRT_ThrowArrayStoreException(const std::string msg) { + MRT_ThrowNewExceptionInternalType(WellKnown::GetMClassArrayStoreException(), msg.c_str()); +} + +extern "C" void MRT_ThrowArrayIndexOutOfBoundsException(int32_t length, int32_t index) { + // formatting information about exception + char msg[kBufferSize / 2] = { 0 }; // kBufferSize / 2 = 64 + // MRT_ThrowException_Unw does not return, so cannot apply for heap memory here. + if (sprintf_s(msg, sizeof(msg), "length=%d; index=%d", length, index) < 0) { + EHLOG(ERROR) << "MRT_ThrowArrayIndexOutOfBoundsExceptionUnw sprintf_s return -1" << maple::endl; + } + ThrowNewExceptionInternalTypeRet(WellKnown::GetMClassArrayIndexOutOfBoundsException(), msg); +} + +extern "C" void MRT_ThrowUnsatisfiedLinkError() { + MRT_ThrowNewExceptionInternalType(WellKnown::GetMClassUnsatisfiedLinkError()); +} + +extern "C" void MRT_ThrowNoSuchMethodError(const std::string &msg) { + MRT_ThrowNewExceptionInternalType(WellKnown::GetMClassNoSuchMethodError(), msg.c_str()); +} + +extern "C" void MRT_ThrowNoSuchFieldError(const std::string &msg) { + MRT_ThrowNewExceptionInternalType(WellKnown::GetMClassNoSuchFieldError(), msg.c_str()); +} + +extern "C" void MRT_ThrowVerifyError(const std::string &msg) { + MRT_ThrowNewExceptionInternalType(*WellKnown::GetMClassVerifyError(), msg.c_str()); +} + +extern "C" void MRT_ThrowStringIndexOutOfBoundsException() { + MRT_ThrowNewExceptionInternalType(WellKnown::GetMClassStringIndexOutOfBoundsException()); +} + +extern "C" void MRT_ThrowExceptionInInitializerError(MrtClass cause) { + MrtClass throwable = NewException(*WellKnown::GetMClassExceptionInInitializerError(), nullptr, cause); + if (throwable == nullptr) { + EHLOG(FATAL) << "Create New Exception is fail" << maple::endl; + } + if (cause != nullptr) { + RC_RUNTIME_DEC_REF(cause); + } + MRT_ThrowExceptionSafe(reinterpret_cast(throwable)); + RC_LOCAL_DEC_REF(throwable); +} + +extern "C" void MRT_ThrowNoClassDefFoundError(const std::string &msg) { + MrtClass throwable = NewException(WellKnown::GetMClassNoClassDefFoundError(), msg.c_str()); + if (throwable == nullptr) { + EHLOG(FATAL) << "Create New Exception is fail" << maple::endl; + } + MRT_ThrowExceptionSafe(reinterpret_cast(throwable)); + RC_LOCAL_DEC_REF(throwable); +} + +// all MCC prefixed functions are used only for maple compiler to generate code +extern "C" void MCC_CheckThrowPendingException() { + MRT_SetReliableUnwindContextStatus(); + jobject ex = MRT_PendingException(); + if (ex) { + MRT_ClearPendingException(); + MRT_DecRefThrowExceptionRet(ex); + } +} + +extern "C" void MCC_ThrowNullArrayNullPointerException() { + ThrowNewExceptionInternalTypeUnw(*WellKnown::GetMClassNullPointerException(), "Attempt to get length of null array"); +} + +extern "C" void MCC_ThrowNullPointerException() { + ThrowNewExceptionInternalTypeUnw(*WellKnown::GetMClassNullPointerException()); +} + +extern "C" void MCC_ThrowArithmeticException() { + ThrowNewExceptionInternalTypeUnw(*WellKnown::GetMClassArithmeticException()); +} + +extern "C" void MCC_ThrowInterruptedException() { + ThrowNewExceptionInternalTypeUnw(*WellKnown::GetMClassInterruptedException()); +} + +extern "C" void MCC_ThrowClassCastException(const char *msg) { + ThrowNewExceptionInternalTypeUnw(*WellKnown::GetMClassClassCastException(), msg); +} + +extern "C" void MCC_ThrowArrayIndexOutOfBoundsException(const char *msg) { + ThrowNewExceptionInternalTypeUnw(*WellKnown::GetMClassArrayIndexOutOfBoundsException(), msg); +} + +extern "C" void MCC_ThrowUnsatisfiedLinkError() { + ThrowNewExceptionInternalTypeUnw(*WellKnown::GetMClassUnsatisfiedLinkError()); +} + +extern "C" void MCC_ThrowSecurityException() { +#ifndef __OPENJDK__ + ThrowNewExceptionInternalTypeUnw(*WellKnown::GetMClassSecurityException()); +#endif // __OPENJDK__ +} + +extern "C" void MCC_ThrowExceptionInInitializerError(MrtClass cause) { + MrtClass throwable = NewException(*WellKnown::GetMClassExceptionInInitializerError(), nullptr, cause); + ThrowExceptionUnw(throwable); +} + +extern "C" void MCC_ThrowNoClassDefFoundError(const MrtClass classInfo) { + char msg[kBufferSize * 2] = { 0 }; // kBufferSize * 2 = 256 + { + std::string name; + reinterpret_cast(classInfo)->GetTypeName(name); + if (sprintf_s(msg, sizeof(msg), "Could not initialize class %s", name.c_str()) < 0) { + EHLOG(ERROR) << "MRT_ThrowNoClassDefFoundErrorUnw sprintf_s return -1" << maple::endl; + } + } + + MRT_ThrowNoClassDefFoundErrorUnw(msg); +} + +extern "C" void MCC_ThrowStringIndexOutOfBoundsException() { + ThrowNewExceptionInternalTypeUnw(*WellKnown::GetMClassStringIndexOutOfBoundsException()); +} +} // namespace maplert + diff --git a/src/mrt/compiler-rt/src/exception/stack_unwinder.cpp b/src/mrt/compiler-rt/src/exception/stack_unwinder.cpp new file mode 100644 index 0000000000..2b6d727c80 --- /dev/null +++ b/src/mrt/compiler-rt/src/exception/stack_unwinder.cpp @@ -0,0 +1,1106 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "exception/stack_unwinder.h" + +#include +#include +#include +#include +#include +#include + +#include "libs.h" +#include "mm_config.h" +#include "panic.h" +#include "syscall.h" +#include "exception_store.h" +#include "exception/mpl_exception.h" +#include "chosen.h" +#include "collie.h" +#include "mutator_list.h" +#include "interp_support.h" + +namespace maplert { +const int kBuffSize = 24; +static RemoteUnwinder remoteUnwinder; + +#define MAPLE_RECORD_METHOD_INFO_AT_START_PROC + +// analyze the instruction: +// .word __methods__Lexcp01802_3B+56-. +// pc belongs to section .text.java +// desc - pc +// desc belongs to section .rodata +// method info - desc +// method info belongs to .data +// using uintptr_t as offset means we should put section .rodata after section .text.java, thus use its signed +// counter-part. +const MethodDesc *JavaFrame::GetMethodDesc(const uint32_t *startPC) { + const uint32_t *offsetAddr = startPC - kMethodDescOffset; + const MethodDesc *methodDesc = reinterpret_cast( + reinterpret_cast(offsetAddr) + static_cast(*offsetAddr)); + return methodDesc; +} + +uint64_t *JavaFrame::GetMethodMetadata(const uint32_t *startPC) { + const MethodDesc *methodDesc = GetMethodDesc(startPC); + const uint32_t *offsetAddr = &(methodDesc->metadataOffset); + uint64_t *metadata = reinterpret_cast( + reinterpret_cast(offsetAddr) + static_cast(*offsetAddr)); + return metadata; +} + +jclass JavaFrame::GetDeclaringClass(const uint64_t *md) { + if (md != nullptr) { + const MethodMetaBase *methodMeta = reinterpret_cast(md); + void *cls = methodMeta->GetDeclaringClass(); + if (cls == nullptr) { + LOG(FATAL) << "class should not be null" << maple::endl; + } + // some faked compact methods don't have a valid declaringClass + // for e.g., the methods in duplicateFunc.s and hashcode.s + if (MRT_ReflectIsClass(reinterpret_cast(cls))) { + return reinterpret_cast(cls); + } + } + return nullptr; +} + +// class name mangled by maple +void JavaFrame::GetMapleClassName(std::string &name, const uint64_t *md) { + if (md != nullptr) { + jclass cls = GetDeclaringClass(md); + if (cls != nullptr) { + name.append(reinterpret_cast(cls)->GetName()); + } else { + name.append("figo.internal.class"); + } + } +} + +void JavaFrame::GetJavaClassName(std::string &name, const uint64_t *md) { + if (md != nullptr) { + jclass cls = GetDeclaringClass(md); + if (cls) { + reinterpret_cast(cls)->GetBinaryName(name); + } else { + name.append("figo.internal.class"); + } + } +} + +// fill in the string of method and type name +void JavaFrame::GetJavaMethodSignatureName(std::string &name, const void *md) { + if (md != nullptr) { + const MethodMetaBase *methodMeta = reinterpret_cast(md); + const std::string mathodName = methodMeta->GetName(); + name.append(mathodName); + } +} + +// fill in the string of method +void JavaFrame::GetJavaMethodName(std::string &name, const void *md) { + if (md != nullptr) { + const MethodMetaBase *methodMeta = reinterpret_cast(md); + name = methodMeta->GetName(); + } +} + +bool JavaFrame::GetMapleMethodFullName(std::string &name, const uint64_t *md) { + if (md != nullptr) { + GetJavaClassName(name, md); + const MethodMetaBase *methodMeta = reinterpret_cast(md); + const std::string methodName = methodMeta->GetName(); + std::string sigName; + methodMeta->GetSignature(sigName); + name.append("|").append(methodName).append("|").append(sigName); + return true; + } + return false; +} + +// ||([Ljava/lang/String;)V +bool JavaFrame::GetMapleMethodFullName(std::string &name, const uint64_t *md, const void *ip) { + if (GetMapleMethodFullName(name, md)) { + return true; + } else { + Dl_info addrInfo; + int dladdrOk = dladdr(ip, &addrInfo); + if (dladdrOk && addrInfo.dli_sname) { + name.append(addrInfo.dli_sname); + EHLOG(ERROR) << "Method meta data is invalid " << reinterpret_cast(md) << + " for " << addrInfo.dli_sname << " at PC " << reinterpret_cast(ip) << maple::endl; + return true; + } + } + return false; +} + +// .([Ljava/lang/String;)V +bool JavaFrame::GetJavaMethodFullName(std::string &name, const uint64_t *md) { + if (md != nullptr) { + GetJavaClassName(name, md); + const MethodMetaBase *methodMeta = reinterpret_cast(md); + const std::string mathodName = methodMeta->GetName(); + std::string sigName; + methodMeta->GetSignature(sigName); + name.append(".").append(mathodName).append(sigName); + return true; + } + return false; +} + +static bool DumpProcInfoForJava( + uintptr_t pc, const LinkerLocInfo &procInfo, const LinkerMFileInfo &mplInfo, std::string *symbolInfo) { + string methodName; + const uint64_t *md = JavaFrame::GetMethodMetadata(reinterpret_cast(procInfo.addr)); + if (UNLIKELY(!JavaFrame::GetJavaMethodFullName(methodName, md))) { + EHLOG(ERROR) << "JavaFrame::GetJavaMethodFullName() in DumpProcInfoForJava() return false." << maple::endl; + return false; + } + uintptr_t lineNumber = pc - 1 - reinterpret_cast(procInfo.addr); + + if (symbolInfo != nullptr) { + char ipBuf[kBuffSize] = {0}; + if (UNLIKELY(sprintf_s(ipBuf, sizeof(ipBuf), "0x%llx", static_cast(pc)) == -1)) { + EHLOG(ERROR) << "sprintf_s() in DumpProcInfoForJava() return -1 for failed." << maple::endl; + return false; + } + symbolInfo->append("\t").append(ipBuf).append(" in ").append(methodName).append(" at ").append( + mplInfo.name).append(":").append(std::to_string(lineNumber)).append("\n"); + } else { + EHLOG(INFO) << "\t" << std::hex << "0x" << pc << " in " << methodName << + " at " << mplInfo.name << ":" << std::dec << lineNumber << maple::endl; + } + return true; +} + +uint64_t JavaFrame::GetRelativePc(const std::string &soName, uint64_t pc, LinkerMFileInfo *mplInfo) { + if (mplInfo == nullptr) { + mplInfo = LinkerAPI::Instance().GetLinkerMFileInfoByName(soName); + } + if (mplInfo == nullptr) { + LOG(ERROR) << "mplInfo is nullptr" << maple::endl; + return 0; + } + + if (mplInfo->elfBase == nullptr) { + (void)LinkerAPI::Instance().CheckLinkerMFileInfoElfBase(*mplInfo); + } + return pc - reinterpret_cast(mplInfo->elfBase); +} + +void JavaFrame::DumpProcInfoLog(jlong elem) { + uint64_t ip = static_cast(elem); + LinkerLocInfo info; + LinkerMFileInfo *mplInfo = nullptr; + bool isJava = LinkerAPI::Instance().GetJavaTextInfo(reinterpret_cast(ip), &mplInfo, info, false); + if (isJava) { + uint64_t *md = JavaFrame::GetMethodMetadata(reinterpret_cast(info.addr)); + std::string declaringClass; + std::string methodName; + std::string fileName; + JavaFrame::GetJavaClassName(declaringClass, md); + JavaFrame::GetJavaMethodSignatureName(methodName, md); + fileName = mplInfo->name; + uint64_t lineNumber = JavaFrame::GetRelativePc(fileName, static_cast(ip), mplInfo); + EHLOG(INFO) << declaringClass << ":" << methodName << ":" << fileName << ":" << lineNumber << maple::endl; + } else if (mplInfo != nullptr) { + std::string fileName = mplInfo->name; + uint64_t lineNumber = JavaFrame::GetRelativePc(fileName, static_cast(ip), mplInfo); + EHLOG(ERROR) << fileName << ":" << lineNumber << maple::endl; + } else { + EHLOG(ERROR) << "Could not get mplinfo." << maple::endl; + } +} + +// use procInfo first if it is java code, then pc +bool JavaFrame::DumpProcInfo(uintptr_t pc, std::string *symbolInfo) { + LinkerLocInfo info; + LinkerMFileInfo *mplInfo = nullptr; + bool isJava = LinkerAPI::Instance().GetJavaTextInfo(reinterpret_cast(pc), &mplInfo, info, false); + if (isJava) { + return DumpProcInfoForJava(pc, info, *mplInfo, symbolInfo); + } else { + Dl_info addrInfo; + int dladdrOk = dladdr(reinterpret_cast(pc), &addrInfo); + if (dladdrOk && addrInfo.dli_sname) { + std::string fileName(addrInfo.dli_fname); + uintptr_t lineNumber = pc - reinterpret_cast(addrInfo.dli_saddr); + EHLOG(INFO) << "\t" << std::hex << "0x" << pc << " in " << addrInfo.dli_sname << + " at " << fileName << ":" << std::dec << lineNumber << maple::endl; + } else { + EHLOG(ERROR) << "\t" << std::hex << "0x" << pc << " in (unresolved symbol): dladdr() failed" << + std::dec << maple::endl; + return false; + } + } + return true; +} + +bool JavaFrame::JavaMethodHasNoFrame(const uint32_t *ip) { + if (ip == nullptr) { + return false; + } + + LinkerLocInfo info; + const uint32_t *end_proc = nullptr; + if (LinkerAPI::Instance().LocateAddress(const_cast(ip), info, false)) { + end_proc = const_cast( + reinterpret_cast((reinterpret_cast(info.addr) + info.size))); + } else { + EHLOG(INFO) << "ip must be java method written with assembly. pc : " << ip << maple::endl; + return true; + } + + // LSDA is placed just at end_proc, so end_proc is the start address of LSDA. + if (*end_proc == kAbnormalFrameTag) { + return true; + } + return false; +} + +extern "C" void MRT_UpdateLastUnwindFrameLR(const uint32_t *lr) { + InitialUnwindContext &initialUnwindContext = maplert::TLMutator().GetInitialUnwindContext(); + UnwindContext &initialContext = initialUnwindContext.GetContext(); + initialUnwindContext.SetEffective(false); + initialContext.frame.lr = lr; + initialUnwindContext.SetEffective(true); +} + +void UnwindContext::UpdateFrame(const uint32_t *pc, CallChain *fp) { + frame.ip = pc; + frame.fa = fp; + frame.ra = nullptr; + frame.lr = nullptr; + interpFrame = nullptr; +} + +extern "C" void MRT_UpdateLastJavaFrame(const uint32_t *pc, void *fa) { + CallChain *fp = reinterpret_cast(fa); + InitialUnwindContext &initialUnwindContext = maplert::TLMutator().GetInitialUnwindContext(); + UnwindContext &initialContext = initialUnwindContext.GetContext(); + initialUnwindContext.SetEffective(false); + initialContext.UpdateFrame(pc, fp); + initialUnwindContext.SetEffective(true); +} + +extern "C" void MRT_UpdateLastUnwindContext(const uint32_t *pc, CallChain *fp, UnwindContextStatus status) { + InitialUnwindContext &initialUnwindContext = maplert::TLMutator().GetInitialUnwindContext(); + UnwindContext &initialContext = initialUnwindContext.GetContext(); + initialUnwindContext.SetEffective(false); + initialContext.UpdateFrame(pc, fp); + initialContext.status = status; + if (status != UnwindContextStatusIsIgnored) { + initialContext.SetInterpFrame(nullptr); + initialContext.TagDirectJavaCallee(false); + } + initialUnwindContext.SetEffective(true); +} + +extern "C" void MRT_UpdateLastUnwindContextIfReliable(const uint32_t *pc, void *fa) { + CallChain *fp = reinterpret_cast(fa); + InitialUnwindContext &initialUnwindContext = maplert::TLMutator().GetInitialUnwindContext(); + UnwindContext &initialContext = initialUnwindContext.GetContext(); + initialUnwindContext.SetEffective(false); + if (initialContext.status == UnwindContextIsReliable) { + initialContext.UpdateFrame(pc, fp); + } + initialUnwindContext.SetEffective(true); +} + +extern "C" void MRT_SetRiskyUnwindContext(const uint32_t *pc, void *fa) { + CallChain *fp = reinterpret_cast(fa); + MRT_UpdateLastUnwindContext(pc, fp, UnwindContextIsRisky); +} + +extern "C" void MRT_SetReliableUnwindContextStatus() { + InitialUnwindContext &initialUnwindContext = maplert::TLMutator().GetInitialUnwindContext(); + UnwindContext &initialContext = initialUnwindContext.GetContext(); + initialUnwindContext.SetEffective(false); + initialContext.status = UnwindContextIsReliable; + initialContext.SetInterpFrame(nullptr); + initialContext.TagDirectJavaCallee(false); + initialUnwindContext.SetEffective(true); +} + +#if !defined(__ANDROID__) || defined(__arm__) +extern "C" void MCC_SetRiskyUnwindContext(uint32_t *pc, void *fp) +__attribute__ ((alias ("MRT_SetRiskyUnwindContext"))); + +extern "C" void MCC_SetReliableUnwindContext() +__attribute__ ((alias ("MRT_SetReliableUnwindContextStatus"))); +#endif // __ANDROID__ + +extern "C" void MRT_SetIgnoredUnwindContextStatus() { + InitialUnwindContext &initialUnwindContext = maplert::TLMutator().GetInitialUnwindContext(); + UnwindContext &initialContext = initialUnwindContext.GetContext(); + initialUnwindContext.SetEffective(false); + initialContext.status = UnwindContextStatusIsIgnored; + initialUnwindContext.SetEffective(true); +} + +extern "C" void MCC_SetIgnoredUnwindContext() +__attribute__ ((alias ("MRT_SetIgnoredUnwindContextStatus"))); + +extern "C" void MRT_SaveLastUnwindContext(CallChain *frameAddr) { + DCHECK(frameAddr != nullptr) << "frameAddr is nullptr in MRT_SaveLastUnwindContext" << maple::endl; + InitialUnwindContext &initialUnwindContext = maplert::TLMutator().GetInitialUnwindContext(); + UnwindContext &initialContext = initialUnwindContext.GetContext(); + + initialUnwindContext.SetEffective(false); + UnwindData *unwindData = R2CFrame::GetUnwindData(frameAddr); + unwindData->pc = initialContext.frame.ip; + unwindData->fa = initialContext.frame.fa; + unwindData->lr = initialContext.frame.lr; + unwindData->ucstatus = static_cast(initialContext.status); + unwindData->interpFrame = reinterpret_cast(initialContext.GetInterpFrame()); + unwindData->directCallJava = static_cast(initialContext.HasDirectJavaCallee()); + initialUnwindContext.SetEffective(true); +} + +extern "C" void MRT_RestoreLastUnwindContext(CallChain *frameAddr) { + InitialUnwindContext &initialUnwindContext = maplert::TLMutator().GetInitialUnwindContext(); + UnwindContext &initialContext = initialUnwindContext.GetContext(); + initialUnwindContext.SetEffective(false); + initialContext.RestoreUnwindContextFromR2CStub(frameAddr); + initialUnwindContext.SetEffective(true); +} + +extern "C" void MRT_SetCurrentCompiledMethod(void *func) { + maplert::TLMutator().SetCurrentCompiledMethod(func); +} +extern "C" void *MRT_GetCurrentCompiledMethod() { + return maplert::TLMutator().GetCurrentCompiledMethod(); +} + +ATTR_NO_SANITIZE_ADDRESS +UnwindState BasicFrame::UnwindToMachineCaller(BasicFrame &caller) const { + if (VLOG_IS_ON(eh)) { + Check(this->HasFrameStructure(), ">> try to unwind an abnormal frame"); + } + + CallChain *curFp = this->fa; // backup current frame address + CallChain *callerFp = curFp->callerFrameAddress; + caller.fa = callerFp; + caller.ip = this->ra; + Check(caller.ip == curFp->returnAddress, "fatal: error callee frame for stack unwinding"); + + if (caller.IsAnchorFrame()) { + caller.ra = nullptr; + return kUnwindFinish; + } else { + caller.ra = callerFp->returnAddress; + return kUnwindSucc; + } +} + +void BasicFrame::Check(bool val, const char *msg) const { + if (!val) { + Dump(msg); + MplDumpStack(msg); + std::abort(); + } +} + +// this method is able to obtain the nominal caller for java methods whose prologue and epilogue +// are eliminated during compilation. +ATTR_NO_SANITIZE_ADDRESS +UnwindState JavaFrame::UnwindToNominalCaller(JavaFrame &caller) const { + if (VLOG_IS_ON(eh)) { + Check(!this->IsAnchorFrame(), ">> try to unwind up from stack bottom frame"); + } + + if (this->HasFrameStructure()) { + // in this case the parent frame is also the nominal caller. + UnwindState state = BasicFrame::UnwindToMachineCaller(caller); + caller.md = nullptr; + caller.lr = nullptr; + return state; + } else { + // in this case we construct a nominal caller. + caller.fa = fa; + caller.ip = ra; + caller.lr = nullptr; + caller.md = nullptr; + + if (caller.IsAnchorFrame()) { + return kUnwindFinish; + } + + CallChain *callerFp = caller.fa; + DCHECK(callerFp != nullptr) << "caller frame address is null in JavaFrame::UnwindToNominalCaller" << maple::endl; + caller.ra = callerFp->returnAddress; + return kUnwindSucc; + } +} + +UnwindState JavaFrame::UnwindToJavaCallerFromRuntime(JavaFrame &frame, bool isEH) { + size_t n = 0; + while (n < MAPLE_STACK_UNWIND_STEP_MAX) { + if (frame.IsAnchorFrame() || (isEH && frame.IsR2CFrame())) { + return kUnwindFinish; + } + + (void)frame.UnwindToNominalCaller(frame); + + if (frame.IsCompiledFrame()) { + return kUnwindSucc; + } + ++n; + } + + return kUnwindFail; +} + +bool UnwindContext::IsStackBottomContext() const { + if (interpFrame == nullptr) { + return frame.IsAnchorFrame(); + } else { + return false; + } +} + +ATTR_NO_SANITIZE_ADDRESS +void UnwindContext::RestoreUnwindContextFromR2CStub(CallChain *fa) { + __MRT_ASSERT(fa != nullptr, "fatal error: frame pointer should be null"); + UnwindData *unwindData = R2CFrame::GetUnwindData(fa); + status = static_cast(unwindData->ucstatus); + frame.ip = unwindData->pc; + frame.fa = unwindData->fa; + frame.lr = unwindData->lr; + frame.ra = nullptr; + interpFrame = reinterpret_cast(unwindData->interpFrame); + + CallChain *fp = frame.fa; + if (fp != nullptr) { + if (frame.lr != nullptr) { + frame.ra = frame.lr; + } else { + frame.ra = fp->returnAddress; + } + } +} + +UnwindState UnwindContext::UnwindToCallerContextFromR2CStub(UnwindContext& caller) const { + __MRT_ASSERT(frame.IsR2CFrame(), "fatal error: current frame is not R2C stub"); + __MRT_ASSERT(status == UnwindContextIsReliable || status == UnwindContextIsRisky, + "fatal error: R2C stub is not reliable for unwinding stack"); + + UnwindData *unwindData = R2CFrame::GetUnwindData(frame.fa); + UnwindContextStatus stubStatus = static_cast(unwindData->ucstatus); + + switch (stubStatus) { + case UnwindContextStatusIsIgnored: { + caller.status = UnwindContextStatusIsIgnored; + caller.interpFrame = reinterpret_cast(unwindData->interpFrame); + __MRT_ASSERT(caller.interpFrame != nullptr, "fatal error: interp frame is null"); + caller.frame.Reset(); + break; + } + case UnwindContextIsReliable: { + (void)frame.UnwindToNominalCaller(caller.frame); + caller.status = UnwindContextIsReliable; + caller.interpFrame = nullptr; + break; + } + case UnwindContextIsRisky: { + caller.RestoreUnwindContextFromR2CStub(frame.fa); + caller.status = UnwindContextIsReliable; + if (VLOG_IS_ON(eh)) { + __MRT_ASSERT(caller.interpFrame == nullptr, "fatal error: interp frame is null"); + } + break; + } + // Normally, there is only one case can go into here. A thread is executing MRT_SaveLastUnwindContext, + // Suddenly receive a signal sent by another thread to unwind stack. This case is not guaranteed the status slot + // of stub has been updated. So it may be an invalid status. + default: { + EHLOG(ERROR) << "Top function must be MRT_SaveLastUnwindContext, \ + Otherwise it is memory overwrite" << maple::endl; + return kUnwindFinish; + } + } + + if (caller.IsStackBottomContext()) { + return kUnwindFinish; + } + return kUnwindSucc; +} + +UnwindState UnwindContext::UnwindToCallerContext(UnwindContext& caller, bool) const { + // R2C stub is treated specially for unwinding stack because it saves unwind context transition. + if (frame.IsR2CFrame()) { + return UnwindToCallerContextFromR2CStub(caller); + } + + switch (status) { + case UnwindContextStatusIsIgnored: { + // caller frame is obtained from *interpFrame* and must be an interpreted frame + frame.Check(interpFrame != nullptr, "caller frame is expected to be an interpreted frame"); + return UnwindContextInterpEx::UnwindToCaller(interpFrame, caller); + } + case UnwindContextIsReliable: { + caller.interpFrame = nullptr; + caller.status = UnwindContextIsReliable; + // caller frame is obtained by checking the structure of current context frame + return frame.UnwindToNominalCaller(caller.frame); + } + case UnwindContextIsRisky: { + if (VLOG_IS_ON(eh)) { + // caller frame is obtained the value saved in UnwindContext.frame, and it is must be a compiled java frame + frame.Check(frame.IsCompiledFrame(), "a compiled java frame is expected"); + } + + caller.frame = frame; + caller.status = UnwindContextIsReliable; + caller.interpFrame = nullptr; + return kUnwindSucc; + } + + default: { + LOG(FATAL) << "UnwindToCallerContext status invalid" << maple::endl; + } + } +} + +// obtain an unwind context as the start point of current thread. +UnwindState UnwindContext::GetStackTopUnwindContext(UnwindContext &context) { + UnwindContext &lastContext = maplert::TLMutator().GetLastJavaContext(); + switch (lastContext.status) { + case UnwindContextIsReliable: { + lastContext.frame.Check(lastContext.interpFrame == nullptr, ""); + MRT_UNW_GETCALLERFRAME(context.frame); + context.status = UnwindContextIsReliable; + context.interpFrame = nullptr; + break; + } + case UnwindContextIsRisky: { + lastContext.frame.Check(lastContext.interpFrame == nullptr, ""); + context.status = UnwindContextIsReliable; + context.frame = lastContext.frame; + + if (context.IsStackBottomContext()) { + return kUnwindFinish; + } + // restore return address by frame structure since it is not maintained in this case + if (context.frame.lr != nullptr) { + context.frame.ra = context.frame.lr; + } else { + CallChain *curFp = lastContext.frame.fa; + context.frame.ra = curFp->returnAddress; + } + break; + } + case UnwindContextStatusIsIgnored: { + lastContext.frame.Check(lastContext.interpFrame != nullptr, ""); + context.status = UnwindContextStatusIsIgnored; + context.interpFrame = lastContext.interpFrame; + break; + } + + default: std::abort(); + } + return kUnwindFinish; +} + +void JavaFrameInfo::ResolveFrameInfo() { + if (resolved) { + return; + } + + const uint32_t *ip = javaFrame.GetFramePC(); + if (ip == nullptr) { + return; + } + LinkerLocInfo info; + if (LinkerAPI::Instance().LocateAddress(const_cast(ip), info, false)) { + startIp = static_cast(info.addr); + endIp = reinterpret_cast(reinterpret_cast(info.addr) + info.size); + resolved = true; + } else { + EHLOG(INFO) << "Frame is java method written with assembly. pc : " << ip << maple::endl; + startIp = nullptr; + endIp = nullptr; + return; + } + ResolveMethodMetadata(); +} + +void MapleStack::FastRecordCurrentJavaStack(std::vector& uwContextStack, size_t steps, bool resolveMD) { + if (VLOG_IS_ON(eh)) { + MplDumpStack("------------------ Fast Record Current Java Stack ------------------"); + } + UnwindContext uwContext; + (void) UnwindContext::GetStackTopUnwindContext(uwContext); + + size_t n = 0; + while (n < steps && !uwContext.IsStackBottomContext()) { + // Here you need to exclude the stack of the native implementation of the java method, + // Otherwise the LocalAddress used in the later GetCallClass will trigger abort + if (uwContext.IsCompiledContext()) { + if (resolveMD) { + uwContext.frame.ResolveMethodMetadata(); + } + uwContextStack.push_back(uwContext); + ++n; + } else if (uwContext.IsInterpretedContext()) { + uwContextStack.push_back(uwContext); + ++n; + } + + UnwindContext caller; + if (UNLIKELY(uwContext.UnwindToCallerContext(caller) == kUnwindFinish)) { + break; + } + uwContext = caller; + } +} + +void *MapleStack::VisitCurrentJavaStack(UnwindContext& uwContext, + std::function const filter) { + if (VLOG_IS_ON(eh)) { + MplDumpStack("------------------ Visit Current Java Stack ------------------"); + } + + (void) UnwindContext::GetStackTopUnwindContext(uwContext); + + size_t n = 0; + void *retval = nullptr; + while (n < MAPLE_STACK_UNWIND_STEP_MAX && !uwContext.IsStackBottomContext()) { + if (uwContext.IsCompiledContext() || + uwContext.IsInterpretedContext()) { + retval = filter(uwContext); + if (retval != nullptr) { + return retval; + } + ++n; + } + + UnwindContext caller; + if (UNLIKELY(uwContext.UnwindToCallerContext(caller) == kUnwindFinish)) { + break; + } + uwContext = caller; + } + return retval; +} + +void MapleStack::VisitJavaStackRoots(const UnwindContext &initialContext, const AddressVisitor &func, uint32_t tid) { + UnwindContext context; + UnwindState state = MapleStack::GetLastJavaContext(context, initialContext, tid); + if (state != kUnwindSucc) { + return; + } + + while (!context.IsStackBottomContext()) { + if (context.IsCompiledContext()) { + // precise stack scan stack local var + JavaFrameInfo frameInfo(context.frame); + if (!frameInfo.IsAsmFrame()) { + frameInfo.VisitGCRoots(func); + } + } else if (context.IsInterpretedContext()) { + // visit gc roots in interp frame + UnwindContextInterpEx::VisitGCRoot(context, func); + } + + UnwindContext caller; + if (UNLIKELY(context.UnwindToCallerContext(caller) == kUnwindFinish)) { + break; + } + context = caller; + } + return; +} + +static UnwindReasonCode UnwindBacktraceCallback(const _Unwind_Context *context, void *arg) { + uintptr_t pc; +#if defined(__arm__) + _Unwind_VRS_Get(const_cast<_Unwind_Context*>(context), _UVRSC_CORE, kPC, _UVRSD_UINT32, &pc); +#else + pc = _Unwind_GetIP(const_cast<_Unwind_Context*>(context)); +#endif + std::vector *stack = reinterpret_cast*>(arg); + if (pc) { + (*stack).push_back(pc); + } else { + LOG(INFO) << "UnwindBacktrace is failed to get pc" << maple::endl; + } + return _URC_NO_REASON; +} + +void MapleStack::FastRecordCurrentStackPCsByUnwind(std::vector &callStack, size_t step) { + (void)_Unwind_Backtrace(UnwindBacktraceCallback, &callStack); + if (callStack.size() > step) { + callStack.erase(callStack.begin() + step, callStack.end()); + } +} + +struct sigaction act; +struct sigaction oldact; + +// The LOG interface cannot be used in this function to prevent deadlocks. +static void RemoteUnwindHandler(int, siginfo_t*, void *ucontext) { + sigaction(SIGRTMIN, &oldact, nullptr); + Mutator *mutator = CurrentMutatorPtr(); + if (mutator == nullptr) { + LOG(FATAL) << "current mutator should not be nullptr " << ucontext << maple::endl; + } + +#if defined(__aarch64__) + mcontext_t *mcontext = &(reinterpret_cast(ucontext)->uc_mcontext); + if (!mutator->InSaferegion()) { + if (LinkerAPI::Instance().IsJavaText(reinterpret_cast(mcontext->pc))) { + InitialUnwindContext &initialUnwindContext = mutator->GetInitialUnwindContext(); + UnwindContext &initialContext = initialUnwindContext.GetContext(); + initialContext.frame.fa = reinterpret_cast(mcontext->regs[Register::kFP]); + initialContext.frame.ip = reinterpret_cast(mcontext->pc + kAarch64InsnSize); + } else { + while (!remoteUnwinder.IsUnwinderIdle()) {} + remoteUnwinder.SetRemoteIdle(true); + return; + } + } +#elif defined(__arm__) + if (!mutator->InSaferegion()) { + while (!remoteUnwinder.IsUnwinderIdle()) {} + remoteUnwinder.SetRemoteIdle(true); + return; + } +#endif + + // Last java frame update is finished,wake up the request thread to unwind. + remoteUnwinder.SetRemoteUnwindStatus(kRemoteLastJavaFrameUpdataFinish); + + // Waiting for request thread to finish unwind + if (!remoteUnwinder.WaitRemoteUnwindStatus(kRemoteUnwindFinish)) { + while (!remoteUnwinder.IsUnwinderIdle()) {} + remoteUnwinder.SetRemoteIdle(true); + return; + } + + remoteUnwinder.SetRemoteUnwindStatus(kRemoteTargetThreadSignalHandlingExited); + while (!remoteUnwinder.IsUnwinderIdle()) {} + remoteUnwinder.SetRemoteIdle(true); +} + +void CheckMutatorListLock(uint32_t tid) { + uint64_t times = 0; + constexpr uint64_t sleepTime = 5000; // 5us + constexpr uint64_t maxTimes = 1000000; // 1sec + while (!MutatorList::Instance().TryLock()) { + ++times; + if (times > maxTimes) { + LOG(FATAL) << "Get Mutator Timeout : tid = " << tid << maple::endl; + } + timeutils::SleepForNano(sleepTime); + } +} + +void RecordStackFromUnwindContext(std::vector &callStack, size_t steps, UnwindContext &context) { + size_t n = 0; + while (n < steps && !context.IsStackBottomContext()) { + // A program counter (PC) is a CPU register in the computer processor which has the address of the next + // instruction to be executed from memory. so the address of current instruction should minus 4. + // Here you need to exclude the stack of the native implementation of the java method, + // Otherwise the LocalAddress used in the later GetCallClass will trigger abort + if (!UnwindContextInterpEx::TryRecordStackFromUnwindContext(callStack, context)) { + // The lowest ip bit in r2c frame is marked with 1 and needs to be filtered out and dec 1. + if (reinterpret_cast(context.frame.ip) & kDexMethodTag) { + callStack.push_back(reinterpret_cast(context.frame.ip) - kDexMethodTag - kAarch64InsnSize); + } else { + callStack.push_back(reinterpret_cast(context.frame.ip) - kAarch64InsnSize); + } + } + + ++n; + UnwindContext caller; + if (UNLIKELY(context.UnwindToCallerContext(caller) == kUnwindFinish)) { + break; + } + context = caller; + } +} + +// At present, when printing call stack in maple, Java method implemented by native is not +// included, because this kind of method is compiled by g++ compiler without metedata, +// so it can not be printed when printing call stack. +void MapleStack::FastRecordCurrentStackPCs(std::vector &callStack, size_t steps) { + if (VLOG_IS_ON(eh)) { + MplDumpStack("------------------ Fast Record Current Java Stack PCs ------------------"); + } + + UnwindContext context; + (void)UnwindContext::GetStackTopUnwindContext(context); + + RecordStackFromUnwindContext(callStack, steps, context); +} + +void MapleStack::RecordStackPCs(std::vector &callStack, uint32_t tid, size_t steps) { + std::lock_guard lock(remoteUnwinder.GetRemoteUnwindLock()); + // Reserve memory space for the vector. When you push back, the memory will not be + // applied. Avoid deadlock with target thread + callStack.reserve(steps); + + if (tid == static_cast(maple::GetTid())) { + LOG(ERROR) << "The target thread is the same as the current thread" << maple::endl; + remoteUnwinder.SetUnwinderIdle(true); + return; + } + + CheckMutatorListLock(tid); + + Mutator *mutator = MutatorList::Instance().GetMutator(tid); + if (mutator == nullptr) { + MutatorList::Instance().Unlock(); + return; + } + + if (!remoteUnwinder.WaitForRemoteIdle()) { + MutatorList::Instance().Unlock(); + return; + } + + remoteUnwinder.SetUnwinderIdle(false); + remoteUnwinder.SetRemoteIdle(false); + remoteUnwinder.SetRemoteUnwindStatus(kRemoteUnwindIdle); + + // register signal hander function + (void)memset_s(&act, sizeof(act), 0, sizeof(act)); + (void)memset_s(&oldact, sizeof(oldact), 0, sizeof(oldact)); + act.sa_sigaction = RemoteUnwindHandler; + act.sa_flags = SA_RESTART | SA_SIGINFO | SA_ONSTACK; + sigemptyset(&act.sa_mask); + if (sigaction(SIGRTMIN, &act, &oldact) != 0) { + LOG(ERROR) << "sigaction failed" << maple::endl; + return; + } + + LOG(INFO) << "send signal to tid " << tid << maple::endl; + + // *************************************************************************** + // From now on until the SignalHandler exits, you can't use the LOG interface, + // because if the target thread is in the LOG state, a deadlock will occur. + // *************************************************************************** + if (maple::TgKill(static_cast(maple::GetPid()), tid, SIGRTMIN) != 0) { + MutatorList::Instance().Unlock(); + remoteUnwinder.SetRemoteIdle(true); + remoteUnwinder.SetUnwinderIdle(true); + sigaction(SIGRTMIN, &oldact, nullptr); + return; + } + + // waiting for the target thread to complete the last java frame update + if (!remoteUnwinder.WaitRemoteUnwindStatus(kRemoteLastJavaFrameUpdataFinish)) { + MutatorList::Instance().Unlock(); + remoteUnwinder.SetUnwinderIdle(true); + return; + } + + UnwindContext uwContext; + InitialUnwindContext &initialUnwindContext = mutator->GetInitialUnwindContext(); + + // At this time, the target thread has entered the signal handling. The validity of the + // corresponding mutator can be guaranteed and so the lock can be released. + MutatorList::Instance().Unlock(); + UnwindState state = kUnwindFail; + if (initialUnwindContext.IsEffective()) { + const UnwindContext &initialContext = initialUnwindContext.GetContext(); + state = GetLastJavaContext(uwContext, initialContext, tid); + } + + if (state == kUnwindFail) { + // After the kill signal is sent, it must be exited by the handshake + // mechanism, otherwise the corresponding thread will always wait. + remoteUnwinder.SetRemoteUnwindStatus(kRemoteUnwindFinish); + if (!remoteUnwinder.WaitRemoteUnwindStatus(kRemoteTargetThreadSignalHandlingExited)) { + remoteUnwinder.SetUnwinderIdle(true); + return; + } + remoteUnwinder.SetUnwinderIdle(true); + return; + } + + RecordStackFromUnwindContext(callStack, steps, uwContext); + + // After the kill signal is sent, it must be exited by the handshake + // mechanism, otherwise the corresponding thread will always wait. + remoteUnwinder.SetRemoteUnwindStatus(kRemoteUnwindFinish); + + if (!remoteUnwinder.WaitRemoteUnwindStatus(kRemoteTargetThreadSignalHandlingExited)) { + remoteUnwinder.SetUnwinderIdle(true); + return; + } + remoteUnwinder.SetUnwinderIdle(true); +} + +UnwindState GetNextJavaFrame(UnwindContext &context) { + UnwindState state = kUnwindFail; + while (!context.IsStackBottomContext()) { + if (context.IsCompiledContext()) { + context.interpFrame = nullptr; + context.status = UnwindContextIsReliable; + state = kUnwindSucc; + break; + } + + if (context.IsInterpretedContext()) { + context.status = UnwindContextStatusIsIgnored; + state = kUnwindSucc; + break; + } + + UnwindContext caller; + if (UNLIKELY(context.UnwindToCallerContext(caller) == kUnwindFinish)) { + state = kUnwindFinish; + break; + } + context = caller; + } + return state; +} + +// NEED: support enable USE_ZTERP in phone. Similar with other functions, need refactor. +UnwindState MapleStack::GetLastJavaContext(UnwindContext &context, const UnwindContext &initialContext, uint32_t tid) { + if (VLOG_IS_ON(eh)) { + EHLOG(INFO) << "GetLastJavaContext tid : " << tid << maple::endl; + } + if (initialContext.IsStackBottomContext()) { + return kUnwindFinish; + } + + UnwindState state = kUnwindFail; + + switch (initialContext.status) { + case UnwindContextStatusIsIgnored: { + initialContext.frame.Check(initialContext.interpFrame != nullptr, ""); + context.status = UnwindContextStatusIsIgnored; + context.interpFrame = initialContext.interpFrame; + context.frame.Reset(); + state = kUnwindSucc; + break; + } + case UnwindContextIsReliable: + case UnwindContextIsRisky: { + JavaFrame frame = initialContext.frame; + CallChain *curFp = frame.fa; + if (VLOG_IS_ON(eh)) { + MplCheck(curFp != 0, "latest java frame address should not be null"); + } + if (frame.lr) { + // this frame does not have frame structure. + frame.ra = frame.lr; + } else { + // this frame does have frame structure so we obtain return address as ABI described. + frame.ra = curFp->returnAddress; + } + + frame.SetMetadata(nullptr); + context.frame = frame; + context.status = initialContext.status; + state = GetNextJavaFrame(context); + break; + } + default: std::abort(); + } + return state; +} + + +// If the parameter isEH is true, it means that the function may recognize n2j frame, +// which needs to be set to true when calling the function in exception handling, and +// false when stacking. +UnwindState MapleStack::GetLastJavaFrame(JavaFrame &frame, const UnwindContext *initialContext, bool isEH) { + bool unwindAnotherThread = true; + if (initialContext == nullptr) { + initialContext = &(TLMutator().GetLastJavaContext()); + unwindAnotherThread = false; + } + + UnwindContextStatus status = initialContext->status; + + UnwindState state = kUnwindFail; + if (status == UnwindContextIsReliable && !unwindAnotherThread) { + MRT_UNW_GETCALLERFRAME(frame); + state = frame.UnwindToJavaCallerFromRuntime(frame, isEH); + } else { + frame = initialContext->frame; + + // For rare cases (like in reference collector thread), *initialContext* may holds an anchor frame. + if (frame.IsAnchorFrame()) { + state = kUnwindFinish; + return state; + } else { + CallChain *curFp = frame.fa; + MplCheck(curFp != 0, "latest java frame address should not be null"); + + if (frame.lr) { + // this frame does not have frame structure. + frame.ra = frame.lr; + } else { + // this frame does have frame structure so we obtain return address as ABI described. + frame.ra = curFp->returnAddress; + } + + frame.SetMetadata(nullptr); + } + + if (frame.IsCompiledFrame()) { + state = kUnwindSucc; + } else { + state = frame.UnwindToJavaCallerFromRuntime(frame); + } + } + + if (VLOG_IS_ON(eh)) { + std::stringstream ss; + frame.Dump(">>>> Get stack-top java frame ", ss); + EHLOG(INFO) << ss.str() << maple::endl << + "\t" << "frame context is " << + (status == UnwindContextIsReliable ? "Reliable" : "Risky") << + " -- Unwind " << (state == kUnwindSucc ? "Succeeded" : "Finished") << maple::endl; + } + + return state; +} + +#ifdef MAPLE_RECORD_METHOD_INFO_AT_START_PROC +extern "C" MRT_EXPORT bool MapleGetJavaMethodNameByIP(const uint32_t *ip, std::string &funcName, uint64_t *funcOffset) { + LinkerLocInfo info; + bool isJava = LinkerAPI::Instance().LocateAddress(ip, info, false); + if (isJava) { + if (VLOG_IS_ON(eh)) { + EHLOG(INFO) << std::hex << "obtain java method name for pc " << reinterpret_cast(ip) << " (" << + info.addr << " ~ " << reinterpret_cast(info.addr) + info.size << ")" << + std::dec << maple::endl; + } + + uint64_t *md = static_cast( + JavaFrame::GetMethodMetadata(reinterpret_cast(info.addr))); + if (UNLIKELY(!JavaFrame::GetMapleMethodFullName(funcName, md, ip))) { + EHLOG(ERROR) << "JavaFrame::GetMapleMethodFullName() in MapleGetJavaMethodNameByIP() return false." << + maple::endl; + } + if (funcOffset == nullptr) { + EHLOG(FATAL) << "funcOffset is nullptr" << maple::endl; + } + *funcOffset = reinterpret_cast(ip) - reinterpret_cast(info.addr); + return funcName.size() > 0; + } + return false; +} +#endif // MAPLE_RECORD_METHOD_INFO_AT_START_PROC +} // namespace maplert diff --git a/src/mrt/compiler-rt/src/file_layout.cpp b/src/mrt/compiler-rt/src/file_layout.cpp new file mode 100644 index 0000000000..584dc595ff --- /dev/null +++ b/src/mrt/compiler-rt/src/file_layout.cpp @@ -0,0 +1,39 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "file_layout.h" + +namespace maple { +std::string GetLayoutTypeString(uint32_t type) { + switch (type) { + case kLayoutBootHot: + return "BootHot"; + case kLayoutBothHot: + return "BothHot"; + case kLayoutRunHot: + return "RunHot"; + case kLayoutStartupOnly: + return "StartupOnly"; + case kLayoutUsedOnce: + return "UsedOnce"; + case kLayoutExecuted: + return "UsedMaybe"; + case kLayoutUnused: + return "Unused"; + default: + std::cerr << "no such type" << std::endl; + return ""; + } +} +} // namespace maple diff --git a/src/mrt/compiler-rt/src/gc_log.cpp b/src/mrt/compiler-rt/src/gc_log.cpp new file mode 100644 index 0000000000..1b656d3207 --- /dev/null +++ b/src/mrt/compiler-rt/src/gc_log.cpp @@ -0,0 +1,247 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "gc_log.h" + +#include +#include "panic.h" +#include "collector/stats.h" +#include "cinterface.h" +#include "linker_api.h" + +#define SET_LOG_FLAGS(type, usefile, verbose, open) do { \ + writeToFile[type] = MRT_ENVCONF(usefile, usefile##_DEFAULT) || VLOG_IS_ON(verbose); \ + openFileOnStartup[type] = MRT_ENVCONF(open, open##_DEFAULT) || VLOG_IS_ON(verbose); \ +} while (0) + +namespace maplert { +class NullBuffer : public std::streambuf { + public: + int overflow(int c) override { + return c; + } +}; + +class NullStream : public std::ostream { + public: + NullStream() : std::ostream(&sb) {} + ~NullStream() = default; + private: + NullBuffer sb; +}; + +static NullStream nullStream; + +namespace { +const char *kPrefixName[kLogtypeNum] = { + "gclog_", "rctracelog_", "rplog_", "cyclelog_", "allocatorfragmentlog_", "allocatorlog_", "mixlog_", "stderr_" +}; + +// Orders of magnitudes. Note: The upperbound of uint64_t is 16E (16 * (1024 ^ 6)) +const char *kOrdersOfManitude[] = { "", "K", "M", "G", "T", "P", "E" }; + +// Orders of magnitudes. Note: The upperbound of uint64_t is 16E (16 * (1024 ^ 6)) +const char *kOrdersOfMagnitudeFromNano[] = { "n", "u", "m", nullptr }; + +// number of digits in a pretty format segment (100,000,000 each has three digits) +constexpr int kNumDigitsPerSegment = 3; +} + +void GCLogImpl::Init(bool gcLog) { + openGCLog = gcLog; + SetFlags(); + if (doWriteToFile && doOpenFileOnStartup) { + OpenFile(); + } +} + +void GCLogImpl::SetFlags() { + SET_LOG_FLAGS(kLogtypeGc, MRT_GCLOG_USE_FILE, opengclog, MRT_GCLOG_OPEN_ON_STARTUP); + SET_LOG_FLAGS(kLogtypeRcTrace, MRT_RCTRACELOG_USE_FILE, openrctracelog, MRT_RCTRACELOG_OPEN_ON_STARTUP); + SET_LOG_FLAGS(kLogtypeRp, MRT_RPLOG_USE_FILE, openrplog, MRT_RPLOG_OPEN_ON_STARTUP); + SET_LOG_FLAGS(kLogtypeCycle, MRT_CYCLELOG_USE_FILE, opencyclelog, MRT_CYCLELOG_OPEN_ON_STARTUP); + SET_LOG_FLAGS(kLogTypeAllocFrag, MRT_ALLOCFRAGLOG_USE_FILE, allocatorfragmentlog, MRT_ALLOCFRAGLOG_OPEN_ON_STARTUP); + SET_LOG_FLAGS(kLogTypeAllocator, MRT_ALLOCATORLOG_USE_FILE, allocatorlog, MRT_ALLOCATOR_OPEN_ON_STARTUP); + SET_LOG_FLAGS(kLogTypeMix, MRT_MIXLOG_USE_FILE, openmixlog, MRT_MIXLOG_OPEN_ON_STARTUP); + + if (openGCLog) { + writeToFile[kLogtypeGc] = true; + openFileOnStartup[kLogtypeGc] = true; + } + doWriteToFile = false; + doOpenFileOnStartup = false; + for (int i = 0; i < kLogtypeNum; ++i) { + if (i == kLogtypeGcOrStderr) { +#ifdef __ANDROID__ + writeToFile[i] = false; + (void)file[i].rdbuf()->pubsetbuf(nullptr, 0); // set 0 as unbuffered stream to save memory when gclog disabled + os[i] = &nullStream; + doWriteToFile = doWriteToFile || writeToFile[i]; + doOpenFileOnStartup = doOpenFileOnStartup || openFileOnStartup[i]; +#else + writeToFile[i] = true; + os[i] = &std::cerr; +#endif + } else { + (void)file[i].rdbuf()->pubsetbuf(nullptr, 0); // set 0 as unbuffered stream to save memory when gclog disabled + os[i] = &nullStream; + doWriteToFile = doWriteToFile || writeToFile[i]; + doOpenFileOnStartup = doOpenFileOnStartup || openFileOnStartup[i]; + } + } +} + +void GCLogImpl::OpenFile() { + if (!doWriteToFile) { + return; + } + + pid_t pid = getpid(); + std::string dateDigit = timeutils::GetDigitDate(); +#ifdef __ANDROID__ + std::string dirName = util::GetLogDir(); +#else + std::string dirName = "."; +#endif + + for (int i = 0; i < kLogtypeNum; ++i) { + if (!writeToFile[i] || (i == kLogtypeGcOrStderr)) { + continue; + } + + std::string baseName = kPrefixName[i] + std::to_string(pid) + "_" + dateDigit + ".txt"; + std::string fileName = dirName + "/" + baseName; + file[i] = std::ofstream(fileName, std::ofstream::app); // Assignment closes the old file. + if (!file[i]) { + LOG(ERROR) << "GCLogImpl::OpenFile(): fail to open the file" << maple::endl; + continue; + } + buffer[i] = new (std::nothrow) char[kGcLogBufSize]; + if (buffer[i] == nullptr) { + LOG(ERROR) << "GCLogImpl::OpenFile(): new char[kGcLogBufSize] failed" << maple::endl; + continue; + } + (void)file[i].rdbuf()->pubsetbuf(buffer[i], kGcLogBufSize); + os[i] = &file[i]; + } +#ifdef __ANDROID__ + writeToFile[kLogtypeGcOrStderr] = writeToFile[kLogtypeGc]; + os[kLogtypeGcOrStderr] = &file[kLogtypeGc]; +#endif +} + +void GCLogImpl::CloseFile() { + if (!doWriteToFile) { + return; + } + + for (int i = 0; i < kLogtypeNum; ++i) { + if (writeToFile[i] && (i != kLogtypeGcOrStderr)) { + if (os[i] == &file[i]) { + os[i] = &nullStream; + file[i].close(); + delete[] buffer[i]; + buffer[i] = nullptr; + } + } + } +} + +void GCLogImpl::OnPreFork() { + CloseFile(); +} + +void GCLogImpl::OnPostFork() { + SetFlags(); + OpenFile(); +} + +void GCLogImpl::OnGCStart() { + std::string dateDigit = timeutils::GetDigitDate(); + Stream(kLogtypeGc) << "Begin GC log. Time: " << dateDigit << '\n'; + Stream(kLogtypeGc) << "Current allocated: " << Pretty(stats::gcStats->CurAllocBytes()) << + " Current threshold: " << Pretty(stats::gcStats->CurGCThreshold()) << '\n'; + Stream(kLogtypeGc) << "Current allocated native bytes (before GC): " << Pretty(MRT_GetNativeAllocBytes()) << '\n'; +} + +void GCLogImpl::OnGCEnd() { + Stream(kLogtypeGc) << "Current allocated native bytes (after GC): " << Pretty(MRT_GetNativeAllocBytes()) << '\n'; + Stream(kLogtypeGc) << "End of GC log.\n\n"; + Stream(kLogtypeGc).flush(); +} + +GCLogImpl gcLogInstance; + +GCLogImpl &GCLog() { + return gcLogInstance; +} + +std::string Pretty(uint64_t number) { + std::string orig = std::to_string(number); + int pos = static_cast(orig.length()) - kNumDigitsPerSegment; + while (pos > 0) { + orig.insert(pos, ","); + pos -= kNumDigitsPerSegment; + } + return orig; +} + +std::string Pretty(int64_t number) { + std::string orig = std::to_string(number); + int pos = static_cast(orig.length()) - kNumDigitsPerSegment; + while (pos > 0) { + orig.insert(pos, ","); + pos -= kNumDigitsPerSegment; + } + return orig; +} + +std::string Pretty(uint32_t number) { + return Pretty(static_cast(number)); +} + +// Useful for informatic units, such as KiB, MiB, GiB, ... +std::string PrettyOrderInfo(uint64_t number, std::string unit) { + size_t order = 0; + const uint64_t factor = 1024; + + while (number > factor) { + number /= factor; + order += 1; + } + + const char *prefix = kOrdersOfManitude[order]; + const char *infix = order > 0 ? "i" : ""; // 1KiB = 1024B, but there is no "1iB" + + return std::to_string(number) + std::string(prefix) + std::string(infix) + unit; +} + +// Useful for scientific units where number is in nanos: ns, us, ms, s +std::string PrettyOrderMathNano(uint64_t number, std::string unit) { + size_t order = 0; + const uint64_t factor = 1000; // show in us if under 10ms + + while (number > factor && kOrdersOfMagnitudeFromNano[order] != nullptr) { + number /= factor; + order += 1; + } + + const char *prefix = kOrdersOfMagnitudeFromNano[order]; + if (prefix == nullptr) { + prefix = ""; + } + + return std::to_string(number) + std::string(prefix) + unit; +} +} \ No newline at end of file diff --git a/src/mrt/compiler-rt/src/heap_stats.cpp b/src/mrt/compiler-rt/src/heap_stats.cpp new file mode 100644 index 0000000000..32706e7ddd --- /dev/null +++ b/src/mrt/compiler-rt/src/heap_stats.cpp @@ -0,0 +1,101 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "heap_stats.h" +#include "chosen.h" + +namespace maplert { +HeapStats heapStats; +address_t HeapStats::heapStartAddr = 0; +size_t HeapStats::heapCurrentSize = 0; +atomic heapProfile; + +struct HeapProfileInit { + public: + HeapProfileInit() { + heapProfile.store(HEAP_PROFILE, std::memory_order_relaxed); + } +}; +HeapProfileInit heapProfileInit; + +// Enable/Disable heapstats collection +extern "C" void MRT_SetHeapProfile(int hp) { + static constexpr int heapProfileDisable = 0; + static constexpr int heapProfileEnable = 1; + switch (hp) { + case heapProfileDisable: + // stop heap usage counting (android.os.debug.stopAllocAccounting) + heapStats.DisableHeapStats(); + break; + case heapProfileEnable: + // start heap usage counting (android.os.debug.startAllocAccounting) + heapStats.EnableHeapStats(); + break; + default: + break; + } +} + +HeapStats::HeapStats() + : bytesAlloc(0), + bytesFreed(0), + numAlloc(0), + numFreed(0) {} + +// Reset total and per mutator heapstats +void HeapStats::ResetHeapStats() { + bytesAlloc = 0; + bytesFreed = 0; + numAlloc = 0; + numFreed = 0; + (*theAllocator).GetAllocAccount().ResetWindowTotal(); +} + +// Sum heapstats from all mutators, unused because the per-mutator data is not maintained +void HeapStats::SumHeapStats() { + // this has lock, be careful + (*theAllocator).ForEachMutator([this](AllocMutator &mutator) { + auto &account = mutator.GetAllocAccount(); + bytesAlloc += account.TotalAllocdBytes(); + bytesFreed += account.TotalFreedBytes(); + numAlloc += account.TotalAllocdObjs(); + numFreed += account.TotalFreedObjs(); + account.ResetWindowTotal(); + }); +} + +// Begin heap usage stats collection +void HeapStats::EnableHeapStats() { + heapProfile.store(1, std::memory_order_relaxed); +} + +// End heap usage stats collection +void HeapStats::DisableHeapStats() { + heapProfile.store(0, std::memory_order_relaxed); + auto &account = (*theAllocator).GetAllocAccount(); + bytesAlloc = account.TotalAllocdBytes(); + bytesFreed = account.TotalFreedBytes(); + numAlloc = account.TotalAllocdObjs(); + numFreed = account.TotalFreedObjs(); + account.ResetWindowTotal(); +} + +void HeapStats::PrintHeapStats() { + LOG2FILE(kLogtypeGc) << "[HEAPSTATS] Heap statistics:" << maple::endl; + LOG2FILE(kLogtypeGc) << "[HEAPSTATS] allocated " << bytesAlloc << maple::endl; + LOG2FILE(kLogtypeGc) << "[HEAPSTATS] freed " << bytesFreed << maple::endl; + LOG2FILE(kLogtypeGc) << "[HEAPSTATS] number of allocation " << numAlloc << maple::endl; + LOG2FILE(kLogtypeGc) << "[HEAPSTATS] number of free " << numFreed << maple::endl; +} +} diff --git a/src/mrt/compiler-rt/src/libs.cpp b/src/mrt/compiler-rt/src/libs.cpp new file mode 100644 index 0000000000..b503ae12cc --- /dev/null +++ b/src/mrt/compiler-rt/src/libs.cpp @@ -0,0 +1,537 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "libs.h" +#include +#include +#include "exception/exception_handling.h" +#include "tracer.h" +#include "mm_config.h" +#include "mrt_libs_api.h" +#include "mrt_monitor_api.h" +#include "exception/mrt_exception.h" +#include "mrt_handleutil.h" +#include "mrt_methodhandle_mpl.h" +#include "profile.h" +#include "object_base.h" +#include "sizes.h" + +namespace maplert { +// we may inline this? +// array-length: Returns the number of elements of the array +// referenced by 'p' +int32_t MCC_DexArrayLength(const void *p) { + if (p == nullptr) { + MRT_ThrowNullPointerExceptionUnw(); + return -1; + } + return *reinterpret_cast((reinterpret_cast(p) + kJavaArrayLengthOffset)); +} + +int32_t MCC_JavaArrayLength(const void *p) __attribute__((alias("MCC_DexArrayLength"))); + +// we may inline this? +// fill-array-data: Fills the array referenced by d with the static data 's'. +void MCC_DexArrayFill(void *d, void *s, int32_t len) { + __MRT_ASSERT(d && s, "MCC_DexArrayFill arg nullptr"); + char *dest = reinterpret_cast(d) + kJavaArrayContentOffset; + size_t lenSizet = len; + // We trust the frontend to supply the correct length + errno_t rv = memcpy_s(dest, lenSizet, s, lenSizet); + if (rv != EOK) { + LOG(ERROR) << "memcpy_s failed. Reason: " << rv << ", dest: " << dest << ", destMax: " << + lenSizet << ", src: " << s << ", count: " << lenSizet << ", SECUREC_MEM_MAX_LEN: " << + SECUREC_MEM_MAX_LEN << maple::endl; + LOG(FATAL) << "check logcat and get more info " << maple::endl; + } +} + +void MCC_JavaArrayFill(void *d, void *s, int32_t len) __attribute__((alias("MCC_DexArrayFill"))); + +// we may move this to mpl2mpl where Java EH is lowered. +void *MCC_DexCheckCast(void *i __attribute__((unused)), void *c __attribute__((unused))) { + // eventually, remove this builtin function. + // and from the be lowerer + __MRT_ASSERT(0, "MCC_DexCheckCast"); + return nullptr; +} + +void *MCC_JavaCheckCast(void *i __attribute__((unused)), void *c __attribute__((unused))) + __attribute__((alias("MCC_DexCheckCast"))); + +// Moves the class object of a class identified by type_id +// (e.g. Object.class) into vx. +void *MCC_GetReferenceToClass(void *p2tyid) { + return p2tyid; +} + +bool MCC_DexInstanceOf(void *obj, void *javaClass) { + if (UNLIKELY(obj == nullptr)) { + return false; + } + MObject *o = reinterpret_cast(obj); + MClass *c = reinterpret_cast(javaClass); + DCHECK(c != nullptr) << "javaClass is nullptr." << maple::endl; + return o->IsInstanceOf(*c); +} + +bool MCC_JavaInstanceOf(void *obj, void *javaClass) __attribute__((alias("MCC_DexInstanceOf"))); + +bool MCC_IsAssignableFrom(jclass subClass, jclass superClass) { + MClass *mSub = reinterpret_cast(subClass); + MClass *mSuper = reinterpret_cast(superClass); + DCHECK(mSub != nullptr) << "mSub is nullptr." << maple::endl; + DCHECK(mSuper != nullptr) << "mSuper is nullptr." << maple::endl; + return mSuper->IsAssignableFrom(*mSub); +} + +void MCC_DexInterfaceCall(void *dummy __attribute__((unused))) { + // placeholder for doing the right interface call at runtime + __MRT_ASSERT(0, "MCC_DexInterfaceCall not implemented yet"); +} + +void MCC_JavaInterfaceCall(void *dummy __attribute__((unused))) __attribute__((alias("MCC_DexInterfaceCall"))); + +#if defined(__aarch64__) +jvalue MCC_DexPolymorphicCall(jstring calleeName, jstring protoString, int paramNum, jobject methodHandle, ...) { + va_list args; + va_start(args, methodHandle); + VArg vargs(args); + MClass *callerClass = GetContextCls(vargs, reinterpret_cast(calleeName)); + POLYRETURNTYPE result = PolymorphicCallEnter(reinterpret_cast(calleeName), + reinterpret_cast(protoString), static_cast(paramNum), + reinterpret_cast(methodHandle), vargs, callerClass); + va_end(args); + return result; +} + +jvalue MCC_JavaPolymorphicCall(jstring calleeName, jstring protoString, int paramNum, jobject methodHandle, ...) + __attribute__((alias("MCC_DexPolymorphicCall"))); +#endif + +void MRT_BuiltinSyncEnter(address_t obj) { + VLOG(monitor) << "MRT_BuiltinSyncEnter obj = " << obj << maple::endl; + if (UNLIKELY(obj == 0)) { + MRT_ThrowNullPointerExceptionUnw(); + } + + if (maple::IThread *tSelf = maple::IThread::Current()) { + void *ra = __builtin_return_address(0); + tSelf->PushLockedMonitor(reinterpret_cast(obj), ra, (uint32_t)maple::kMplWaitingOn); + jint ret = maple::ObjectBase::MonitorEnter(obj); + tSelf->UpdateLockMonitorState(); + if (UNLIKELY(ret == JNI_ERR)) { + MRT_CheckThrowPendingExceptionUnw(); + } + } +} + +void MRT_BuiltinSyncExit(address_t obj) { + VLOG(monitor) << "MRT_BuiltinSyncExit obj = " << obj << maple::endl; + if (UNLIKELY(obj == 0)) { + MRT_ThrowNullPointerExceptionUnw(); + } + + if (maple::IThread *tSelf = maple::IThread::Current()) { + jint ret = maple::ObjectBase::MonitorExit(obj); + tSelf->PopLockedMonitor(); + if (UNLIKELY(ret == JNI_ERR)) { + MRT_CheckThrowPendingExceptionUnw(); + } + } +} + +void MRT_DumpMethodUse(std::ostream &os) { + DumpMethodUse(os); +} + +// Instrumentation/tracing facilities +static bool ResolveAddr2Name(std::string &funcname, uint64_t addr) { + LinkerLocInfo info; +#ifdef RECORD_METHOD + MRT_DisableMetaProfile(); +#endif + bool result = LinkerAPI::Instance().LocateAddress(reinterpret_cast(addr), info, true); +#ifdef RECORD_METHOD + MRT_EnableMetaProfile(); +#endif + if (result) { + funcname = info.sym; + return true; + } + Dl_info dlInfo; + if (dladdr(reinterpret_cast(addr), &dlInfo) && dlInfo.dli_sname) { + funcname = dlInfo.dli_sname; + return dlInfo.dli_sname; + } + return false; +} + +#if defined(__aarch64__) +#define ENTRY_POINT_IMPL(name) \ + asm(" .text\n" \ + " .align 2\n" \ + " .globl __" #name \ + "__\n" \ + " .type __" #name \ + "__, %function\n" \ + "__" #name \ + "__:\n" \ + " /* save arguments. must match frame below. */" \ + " stp X29, X30, [sp,-16]!\n" \ + " stp X0, X1, [SP,-16]!\n" \ + " stp X2, X3, [SP,-16]!\n" \ + " stp X4, X5, [SP,-16]!\n" \ + " stp X6, X7, [SP,-16]!\n" \ + " stp D0, D1, [SP,-16]!\n" \ + " stp D2, D3, [SP,-16]!\n" \ + " stp D4, D5, [SP,-16]!\n" \ + " stp D6, D7, [SP,-16]!\n" \ + " mov X0, SP\n" \ + " bl " #name \ + "Impl\n" \ + " ldp D6, D7, [SP], 16\n" \ + " ldp D4, D5, [SP], 16\n" \ + " ldp D2, D3, [SP], 16\n" \ + " ldp D0, D1, [SP], 16\n" \ + " ldp X6, X7, [SP], 16\n" \ + " ldp X4, X5, [SP], 16\n" \ + " ldp X2, X3, [SP], 16\n" \ + " ldp X0, X1, [SP], 16\n" \ + " ldp X29, X30, [SP], 16\n" \ + " ret\n"); // this return send the control back to the original caller, + // not the next instruction of the jump instruction +#elif defined(__arm__) +#define ENTRY_POINT_IMPL(name) \ + asm(" .text\n" \ + " .align 2\n" \ + " .globl __" #name \ + "__\n" \ + " .type __" #name \ + "__, %function\n" \ + "__" #name \ + "__:\n" \ + "\n"); +#endif + +#define ENTRY_POINT_BL(name) ENTRY_POINT_IMPL(name) +#define ENTRY_POINT_JMP(name) ENTRY_POINT_IMPL(name) + +#ifdef __ANDROID__ +// for save context and terminate system +asm(" .text\n" + " .align 2\n" + " .globl MRT_BuiltinAbortSaferegister\n" + " .type MRT_BuiltinAbortSaferegister, %function\n" + "MRT_BuiltinAbortSaferegister:\n" +#if defined(__aarch64__) + " brk #1\n" + " ret\n" +#endif + " .size MRT_BuiltinAbortSaferegister, .-MRT_BuiltinAbortSaferegister"); + +#else +extern "C" void MRT_BuiltinAbortSaferegister(maple::address_t addr ATTR_UNUSED, const char *clsName ATTR_UNUSED) { + util::PrintBacktrace(); + abort(); +} +#endif + +struct AArch64RegSet { + uint64_t x0; + uint64_t x1; + uint64_t x2; + uint64_t x3; + uint64_t x4; + uint64_t x5; + uint64_t x6; + uint64_t x7; + uint64_t x8; + uint64_t x9; + uint64_t x10; + uint64_t x11; + uint64_t x12; + uint64_t x13; + uint64_t x14; + uint64_t x15; + uint64_t x16; + uint64_t x17; + uint64_t x18; + uint64_t x19; + uint64_t x20; + uint64_t x21; + uint64_t x22; + uint64_t x23; + uint64_t x24; + uint64_t x25; + uint64_t x26; + uint64_t x27; + uint64_t x28; + uint64_t x29; + uint64_t x30; + uint64_t sp; +}; + +const int kMaxRegConst = 128; +static void PrintRegister(const std::string reg, uint64_t val) { + char content[kMaxRegConst]; + int ret = sprintf_s(content, sizeof(content), "\t - phy reg %s\t%zx\t%zu", reg.c_str(), val, val); + if (ret < EOK) { + LOG(ERROR) << "sprintf_s failed. buffer: " << content << maple::endl; + return; + } +#ifdef __ANDROID__ + LOG(INFO) << content << maple::endl; +#else + printf("%s\n", content); +#endif +} + +extern "C" void PrintAArch64RegSet(AArch64RegSet *pregs) { + const char *msg = "dump phy reg for aarch64, note x16 and x17 are unreliable due to plt or signal handler stub"; +#ifdef __ANDROID__ + LOG(INFO) << msg << maple::endl; +#else + printf("%s\n", msg); +#endif + DCHECK(pregs != nullptr) << "pregs is nullptr in PrintAArch64RegSet" << maple::endl; + PrintRegister("x0", pregs->x0); + PrintRegister("x1", pregs->x1); + PrintRegister("x2", pregs->x2); + PrintRegister("x3", pregs->x3); + PrintRegister("x4", pregs->x4); + PrintRegister("x5", pregs->x5); + PrintRegister("x6", pregs->x6); + PrintRegister("x7", pregs->x7); + PrintRegister("x8", pregs->x8); + PrintRegister("x9", pregs->x9); + PrintRegister("x10", pregs->x10); + PrintRegister("x11", pregs->x11); + PrintRegister("x12", pregs->x12); + PrintRegister("x13", pregs->x13); + PrintRegister("x14", pregs->x14); + PrintRegister("x15", pregs->x15); + PrintRegister("x16", pregs->x16); + PrintRegister("x17", pregs->x17); + PrintRegister("x18", pregs->x18); + PrintRegister("x19", pregs->x19); + PrintRegister("x20", pregs->x20); + PrintRegister("x21", pregs->x21); + PrintRegister("x22", pregs->x22); + PrintRegister("x23", pregs->x23); + PrintRegister("x24", pregs->x24); + PrintRegister("x25", pregs->x25); + PrintRegister("x26", pregs->x26); + PrintRegister("x27", pregs->x27); + PrintRegister("x28", pregs->x28); + PrintRegister("x29", pregs->x29); + PrintRegister("x30", pregs->x30); + PrintRegister("sp", pregs->sp); + + // dump anything of interest here +} + +// undocumented entry points +ENTRY_POINT_BL(MplDtEnter) +ENTRY_POINT_JMP(MplDtExit) +ENTRY_POINT_JMP(MplFuncProfile) +ENTRY_POINT_BL(MplTraceWithBreak) + +struct frame { + double d6; + double d7; + double d4; + double d5; + double d2; + double d3; + double d0; + double d1; + uint64_t x6; + uint64_t x7; + uint64_t x4; + uint64_t x5; + uint64_t x2; + uint64_t x3; + uint64_t x0; + uint64_t x1; + uint64_t x29; // FP + uint64_t x30; // LR +}; + +extern "C" { +const unsigned int kNumLimit = 10; +const unsigned int kCodeOffset = 4; +void DecodeName(const char *name, char *dname) { + size_t i = 0; + size_t j = 0; + + if (name == nullptr || dname == nullptr) { + return; + } + size_t len = strlen(name); + const size_t maxLen = 512; + if (len >= maxLen) { + return; + } + + while (i < len) { + unsigned char c = name[i++]; + if (c == '_') { + if (name[i] == '_') { + dname[j++] = name[i++]; + } else { + // _XX: '_' followed by ascii code in hex + c = name[i++]; + auto v = static_cast((c <= '9') ? c - '0' : c - 'A' + kNumLimit); + unsigned asc = v << kCodeOffset; + c = name[i++]; + v = static_cast((c <= '9') ? c - '0' : c - 'A' + kNumLimit); + asc += v; + dname[j++] = static_cast(asc); + + if (asc == '(' || asc == ')' || asc == ';') { + while (name[i] == 'A') { + dname[j++] = '['; + i++; + } + } + } + } else { + dname[j++] = c; + } + } + dname[j] = '\0'; + return; +} + +static __attribute__((used)) __thread int nEntered = 0; +const int kStackCapacity = 1024; +static __attribute__((used)) __thread uint64_t funcStack[kStackCapacity]; + +void MplFuncProfileImpl(struct frame *fr) { + if (!VLOG_IS_ON(methodtrace)) { + return; + } + std::string func("unknown"); + std::string soname("unknown"); + std::ostringstream funcaddrstr; + DCHECK(fr != nullptr) << "fr is nullptr in MplFuncProfileImpl" << maple::endl; + uint64_t faddr = fr->x30 - 4; /* 4 for bl to this call */ + if (CheckMethodResolved(faddr)) { + return; + } +#if RECORD_FUNC_NAME + bool result = resolve_addr2name(func, faddr); + if (!result) { + LOG(WARNING) << "resolve " << std::hex << "0x" << faddr << " failed " << std::dec; + funcaddrstr << std::hex << "0x" << faddr; + func = funcaddrstr.str(); + } else { + soname = maple::LinkerAPI::Instance().GetMFileNameByPC(reinterpret_cast(faddr), false); + } +#endif + RecordMethod(faddr, func, soname); +} + +// No floating point can be used in this function. +__attribute__((used)) void MplDtEnterImpl(struct frame *fr) { + if (!VLOG_IS_ON(methodtrace)) { + return; + } + std::string func("unknown"); + std::ostringstream funcaddrstr; + DCHECK(fr != nullptr) << "fr is nullptr in MplDtEnterImpl" << maple::endl; + uint64_t faddr = fr->x30 - 4; // 4 for bl to this call + bool result = ResolveAddr2Name(func, faddr); + if (!result) { + LOG(WARNING) << "resolve " << std::hex << "0x" << faddr << " failed " << std::dec; + } + pthread_t self; + self = pthread_self(); + char dfunc[512]; // max length of dfunc + int top = nEntered++; + if (top < kStackCapacity && top >= 0) { + funcStack[top] = faddr; + } + DecodeName(func.c_str(), dfunc); + (void)fprintf(stderr, "enter: [%d:%p] %s : %16p\n", top, reinterpret_cast(self), dfunc, + reinterpret_cast(fr->x0)); +#if CONFIG_TRACE + const int minDFuncLength = 12; + if (IsTracingEnabled()) { + if (strlen(dfunc) <= minDFuncLength) { + return; + } + + GetTracer()->LogMethodTraceEvent(dfunc, 0x0); + } +#endif +} + +__attribute__((used)) void MplDtExitImpl(struct frame *fr) { + if (!VLOG_IS_ON(methodtrace)) { + return; + } + + pthread_t self; + self = pthread_self(); + std::string func; + char dfunc[512] = { 0 }; // max length of dfunc + uint64_t faddr = 0; + int top = --nEntered; + if (top < kStackCapacity && top >= 0) { + faddr = funcStack[top]; + } + if (faddr != 0) { + if (ResolveAddr2Name(func, faddr)) { + DecodeName(func.c_str(), dfunc); + } + } + DCHECK(fr != nullptr) << "fr is nullptr in MplDtExitImpl" << maple::endl; + fprintf(stderr, " exit: [%d:%p] %s : rv %16p\n", top, reinterpret_cast(self), + (faddr != 0) ? dfunc : "unknown", reinterpret_cast(fr->x0)); +#if CONFIG_TRACE + const int minDfuncLength = 12; + if (IsTracingEnabled()) { + if (strlen(dfunc) <= minDfuncLength) { + return; + } + + GetTracer()->LogMethodTraceEvent(dfunc, 0x1); + } +#endif +} +const int kAddrRegOffset = 8; +// No floating point can be used in this function. +__attribute__((used)) void MplTraceImpl(struct frame *fr) { + std::string func; + char dfunc[512]; // max length of dfunc + DCHECK(fr != nullptr) << "fr is nullptr in MplTraceImpl" << maple::endl; + if (ResolveAddr2Name(func, fr->x30 - kAddrRegOffset)) { + DecodeName(func.c_str(), dfunc); + (void)fprintf(stderr, "%s : %16p\n", dfunc, reinterpret_cast(fr->x0)); + } +} + +__attribute__((noinline)) void SetBreakPoint() { +} + +// No floating point can be used in this function. +__attribute__((used)) void MplTraceWithBreakImpl(struct frame *fr) { + MplTraceImpl(fr); + SetBreakPoint(); +} +} + +} // namespace maplert diff --git a/src/mrt/compiler-rt/src/libs_fast.cpp b/src/mrt/compiler-rt/src/libs_fast.cpp new file mode 100644 index 0000000000..ec3f53ec76 --- /dev/null +++ b/src/mrt/compiler-rt/src/libs_fast.cpp @@ -0,0 +1,116 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include "securec.h" +#include "libs.h" +#include "mm_config.h" +#include "panic.h" +#include "thread_offsets.h" +#include "exception/mrt_exception.h" + +namespace maplert { +extern "C" { +// lockword in java object is 32bit. from lockword.h: +// +// ThinLock: +// |10|98|765432109876|5432109876543210| +// |00|xx|lock count |thread id | +#define THREADID(lockword) (lockword & 0xFFFF) +#define STATE_THREADID(lockword) (lockword & 0xC000FFFF) +#define LOCKSTATE(lockword) (lockword & 0xC0000000) +#define LOCKCOUNT(lockword) (lockword & 0x0FFF0000) +const int kLockCountMax = 0x0FFF0000; +const int kLockCount1 = 0x00010000; + +// This version will be converted to assembly code +void MCC_SyncEnterFast2(address_t obj) { + maple::IThread *tSelf = maple::IThread::Current(); + + if (LIKELY((tSelf != nullptr) && (obj != 0))) { + // fast path + std::atomic &lockword = AddrToLValAtomic(obj + kLockWordOffset); + uint32_t oldLockword = lockword.load(); + uint32_t oldLockword2 = oldLockword; + oldLockword2 &= 0xEFFFFFFF; + uint32_t threadID = *reinterpret_cast((reinterpret_cast(tSelf) + kThreadIdOffset)); + // Note: threadID bit 16-31 should be zero, shold we check it here? + if (LIKELY(oldLockword2 == 0)) { // nobody owns the lock + uint32_t newLockword = oldLockword | threadID; // just set threadID + if (lockword.compare_exchange_weak(oldLockword, newLockword)) { + goto SYNC_ENTER_FAST_PATH_EXIT; + } + } else if (threadID == STATE_THREADID(oldLockword)) { // thin lock owned by this thread + if (LOCKCOUNT(oldLockword) < kLockCountMax) { // not exceed thin-lock capability + if (lockword.compare_exchange_weak(oldLockword, oldLockword + kLockCount1)) { + goto SYNC_ENTER_FAST_PATH_EXIT; + } + } + } + } + + // slow path + MRT_BuiltinSyncEnter(obj); + return; + +SYNC_ENTER_FAST_PATH_EXIT: + tSelf->PushLockedMonitor(reinterpret_cast(obj), __builtin_return_address(0), + static_cast(maple::kMplLocked)); +} + +void MCC_SyncEnterFast0(address_t obj) __attribute__((alias("MCC_SyncEnterFast2"))); +void MCC_SyncEnterFast1(address_t obj) __attribute__((alias("MCC_SyncEnterFast2"))); +void MCC_SyncEnterFast3(address_t obj) __attribute__((alias("MCC_SyncEnterFast2"))); + +void MCC_SyncExitFast(address_t obj) { + maple::IThread *tSelf = maple::IThread::Current(); + + if (LIKELY((tSelf != nullptr) && (obj != 0))) { + // fast path + std::atomic &lockword = AddrToLValAtomic(obj + kLockWordOffset); + uint32_t oldLockword = lockword.load(); + uint32_t threadID = *reinterpret_cast((reinterpret_cast(tSelf) + kThreadIdOffset)); + if (threadID == STATE_THREADID(oldLockword)) { // thin lock owned by this thread + uint32_t newLockword = oldLockword & 0x10000000; // default to count == 1 case, need to clear threadID + if (UNLIKELY(LOCKCOUNT(oldLockword) > 0)) { // count > 1, just --count + newLockword = oldLockword - kLockCount1; + } + if (lockword.compare_exchange_weak(oldLockword, newLockword)) { + goto SYNC_EXIT_FAST_PATH_EXIT; + } + } + } + + // slow path + MRT_BuiltinSyncExit(obj); + return; + +SYNC_EXIT_FAST_PATH_EXIT: + tSelf->PopLockedMonitor(); +} + +JNIEnv *MCC_PreNativeCall(jobject) { + // at this point, currentThread cannot be null + maple::IThread *currentThread = maple::IThread::Current(); + return currentThread->PreNativeCall(); +} + +void MCC_PostNativeCall(JNIEnv *env) { + // at this point, currentThread cannot be null + maple::IThread *currentThread = maple::IThread::Current(); + currentThread->PostNativeCall(env); +} + +} // extern "C" +} // namespace maplert diff --git a/src/mrt/compiler-rt/src/linker/linker.cpp b/src/mrt/compiler-rt/src/linker/linker.cpp new file mode 100644 index 0000000000..6bddeac917 --- /dev/null +++ b/src/mrt/compiler-rt/src/linker/linker.cpp @@ -0,0 +1,979 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "linker/linker.h" + +#include +#include +#include +#include +#include +#include +#include + +#include "file_system.h" +#include "linker/linker_inline.h" +#include "linker/linker_debug.h" +#include "linker/linker_cache.h" +#include "linker/linker_lazy_binding.h" +#ifdef LINKER_DECOUPLE +#include "linker/decouple/linker_decouple.h" +#endif +#include "collector/cp_generator.h" + +using namespace maple; + +namespace maplert { +using namespace linkerutils; +FeatureName Linker::featureName = kFLinker; +bool Linker::HandleSymbol() { + bool ret = true; + if (!HandleMethodSymbol()) { + ret = false; + } + if (!HandleDataSymbol()) { + ret = false; + } + return ret; +} + +// Handle all methods relevant symbols for the .so file of 'handle', by traverse 'handles' list. +bool Linker::HandleMethodSymbol() { + if (!LOG_NDEBUG) { + (void)(pInvoker->ForEachDoAction(pInvoker->Get(), &Debug::DumpMethodSymbol)); + } + bool ret = true; + if (!(pInvoker->ForEachDoAction(this, &Linker::ResolveMethodSymbol))) { + ret = false; + } + if (!(pInvoker->ForEachDoAction(this, &Linker::RelocateMethodSymbol))) { + ret = false; + } + if (!(pInvoker->ForEachDoAction(this, &Linker::ResolveVTableSymbol))) { + ret = false; + } + if (!(pInvoker->ForEachDoAction(this, &Linker::ResolveITableSymbol))) { + ret = false; + } + if (!LOG_NDEBUG) { + (void)(pInvoker->ForEachDoAction(pInvoker->Get(), &Debug::DumpMethodUndefSymbol)); + } + return ret; +} + +// Handle all data relevant symbols for the .so file of 'handle', by traverse 'handles' list. +bool Linker::HandleDataSymbol() { + if (!LOG_NDEBUG) { + (void)(pInvoker->ForEachDoAction(pInvoker->Get(), &Debug::DumpDataSymbol)); + } + bool ret = true; + if (!(pInvoker->ForEachDoAction(this, &Linker::ResolveDataSymbol))) { + ret = false; + } + if (!(pInvoker->ForEachDoAction(this, &Linker::RelocateDataSymbol))) { + ret = false; + } + if (!(pInvoker->ForEachDoAction(this, &Linker::ResolveSuperClassSymbol))) { + ret = false; + } + if (!(pInvoker->ForEachDoAction(this, &Linker::ResolveGCRootSymbol))) { + ret = false; + } + + if (!LOG_NDEBUG) { + (void)(pInvoker->ForEachDoAction(pInvoker->Get(), &Debug::DumpDataUndefSymbol)); + } + return ret; +} + +bool Linker::HandleSymbol(LinkerMFileInfo &mplInfo) { + // Ignore the system class loader. + if (!pInvoker->IsSystemClassLoader(reinterpret_cast(mplInfo.classLoader)) && + GetLoadState() == kLoadStateApk) { + pInvoker->SubMultiSoPendingCount(); + } + bool ret = true; + if (!HandleMethodSymbol(mplInfo)) { + ret = false; + } + if (!HandleDataSymbol(mplInfo)) { + ret = false; + } + return ret; +} + +// Handle all methods relevant symbols for the .so file of 'handle', by traverse 'handles' list. +bool Linker::HandleMethodSymbol(LinkerMFileInfo &mplInfo) { + if (!LOG_NDEBUG) { + (void)(pInvoker->DoAction(pInvoker->Get(), &Debug::DumpMethodSymbol, mplInfo)); + } + if (!mplInfo.IsFlag(kIsLazy)) { + // If not resolved symbols exist after previous trial, we need a full traversal. + if (methodHasNotResolved) { + methodHasNotResolved = !pInvoker->ForEachDoAction(this, &Linker::ResolveMethodSymbol); + } else { + methodHasNotResolved = !pInvoker->DoAction(this, &Linker::ResolveMethodSymbol, mplInfo); + } + + if (NeedRelocateSymbol(mplInfo.name)) { // Ignore the procedure for profiling top apps + // If not relocated symbols exist after previous trial, we need a full traversal. + if (methodHasNotRelocated) { + methodHasNotRelocated = !pInvoker->ForEachDoAction(this, &Linker::RelocateMethodSymbol); + } else { + methodHasNotRelocated = !pInvoker->DoAction(this, &Linker::RelocateMethodSymbol, mplInfo); + } + } + + // If not resolved symbols exist after previous trial, we need a full traversal. + if (vtableHasNotResolved) { + vtableHasNotResolved = !pInvoker->ForEachDoAction(this, &Linker::ResolveVTableSymbol); + } else { + vtableHasNotResolved = !pInvoker->DoAction(this, &Linker::ResolveVTableSymbol, mplInfo); + } + + // If not resolved symbols exist after previous trial, we need a full traversal. + if (itableHasNotResolved) { + itableHasNotResolved = !pInvoker->ForEachDoAction(this, &Linker::ResolveITableSymbol); + } else { + itableHasNotResolved = !pInvoker->DoAction(this, &Linker::ResolveITableSymbol, mplInfo); + } + } + + if (!LOG_NDEBUG) { + (void)(pInvoker->DoAction(pInvoker->Get(), &Debug::DumpMethodUndefSymbol, mplInfo)); + } + return !methodHasNotResolved && !methodHasNotRelocated && !vtableHasNotResolved && !itableHasNotResolved; +} + +// Handle all data relevant symbols for the .so file of 'handle', by traverse 'handles' list. +bool Linker::HandleDataSymbol(LinkerMFileInfo &mplInfo) { + if (!LOG_NDEBUG) { + (void)(pInvoker->DoAction(pInvoker->Get(), &Debug::DumpDataSymbol, mplInfo)); + } + if (!mplInfo.IsFlag(kIsLazy)) { + // If not resolved symbols exist after previous trial, we need a full traversal. + if (dataHasNotResolved) { + dataHasNotResolved = !pInvoker->ForEachDoAction(this, &Linker::ResolveDataSymbol); + } else { + dataHasNotResolved = !pInvoker->DoAction(this, &Linker::ResolveDataSymbol, mplInfo); + } + + if (NeedRelocateSymbol(mplInfo.name)) { // Ignore the procedure for profiling top apps + // If not relocated symbols exist after previous trial, we need a full traversal. + if (dataHasNotRelocated) { + dataHasNotRelocated = !pInvoker->ForEachDoAction(this, &Linker::RelocateDataSymbol); + } else { + dataHasNotRelocated = !pInvoker->DoAction(this, &Linker::RelocateDataSymbol, mplInfo); + } + } + + // If not resolved symbols exist after previous trial, we need a full traversal. + if (superClassHasNotResolved) { + superClassHasNotResolved = !pInvoker->ForEachDoAction(this, &Linker::ResolveSuperClassSymbol); + } else { + superClassHasNotResolved = !pInvoker->DoAction(this, &Linker::ResolveSuperClassSymbol, mplInfo); + } + } + + // If not resolved symbols exist after previous trial, we need a full traversal. + if (gcRootListHasNotResolved) { + gcRootListHasNotResolved = !pInvoker->ForEachDoAction(this, &Linker::ResolveGCRootSymbol); + } else { + gcRootListHasNotResolved = !pInvoker->DoAction(this, &Linker::ResolveGCRootSymbol, mplInfo); + } + + if (!LOG_NDEBUG) { + (void)(pInvoker->DoAction(pInvoker->Get(), &Debug::DumpDataUndefSymbol, mplInfo)); + } + return !dataHasNotResolved && !dataHasNotRelocated && !superClassHasNotResolved && !gcRootListHasNotResolved; +} + +// Resolve all undefined methods symbols for the .so file of 'handle', by traverse 'handles' list. +bool Linker::ResolveMethodSymbol(LinkerMFileInfo &mplInfo) { + mplInfo.SetFlag(kIsMethodUndefHasNotResolved, false); + if (mplInfo.IsFlag(kIsMethodDefResolved) || mplInfo.IsFlag(kIsLazy)) { + return true; + } + size_t undefSize = mplInfo.GetTblSize(kMethodUndef); + if (undefSize == 0) { + LINKER_LOG(ERROR) << "undefSize=" << undefSize << ", " << mplInfo.name << maple::endl; + mplInfo.SetFlag(kIsMethodDefResolved, true); + return true; + } + AddrSlice addrSlice(mplInfo.GetTblBegin(kMethodUndef), undefSize); + MuidSlice muidSlice(mplInfo.GetTblBegin(kMethodUndefMuid), undefSize); + pInvoker->GetClassLoaderList(mplInfo, mplInfo.clList); +#ifdef LINKER_RT_CACHE + pInvoker->Get()->ResolveMethodSymbol(mplInfo, addrSlice, muidSlice); + if (!mplInfo.IsFlag(kIsLazy)) { + pInvoker->Get()->FreeMethodUndefTable(mplInfo); + } +#else + for (size_t i = 0; i < undefSize; ++i) { + LinkerOffsetType tmpAddr = 0; + if (!pInvoker->ForEachLookUp(muidSlice[i].muid, pInvoker, + &LinkerInvoker::LookUpMethodSymbolAddress, mplInfo, tmpAddr)) { // Not found + if (!mplInfo.IsFlag(kIsLazy)) { + mplInfo.SetFlag(kIsMethodUndefHasNotResolved, true); + } + } else { // Found + addrSlice[i].addr = tmpAddr; + } + } +#endif + if (!mplInfo.IsFlag(kIsMethodUndefHasNotResolved)) { + mplInfo.SetFlag(kIsMethodDefResolved, true); + return true; + } else { + LINKER_VLOG(mpllinker) << "failed to resolve all MUIDs for " << mplInfo.name << maple::endl; + return false; + } +} + +#ifdef LINKER_LAZY_BINDING +#ifdef LINKER_32BIT_REF_FOR_DEF_UNDEF +void Linker::InitMethodSymbolLazy32(LinkerMFileInfo &mplInfo, size_t defSize) { + if (mplInfo.IsFlag(kIsLazy)) { + size_t undefSize = mplInfo.GetTblSize(kMethodUndef); + LinkerAddrTableItem *pUndefTable = mplInfo.GetTblBegin(kMethodUndef); + if (undefSize != 0 && pUndefTable != nullptr) { + for (size_t i = 0; i < undefSize; ++i) { + // + kBindingStateMethodUndef:0x5 + pUndefTable[i].addr = pInvoker->AddrToUint32( + reinterpret_cast(__BindingProtectRegion__ + static_cast(pUndefTable[i].addr))); + } + } else { + LINKER_LOG(ERROR) << "failed, --lazy-binding, pUndefTable is invalid in " << mplInfo.name << maple::endl; + } + LinkerAddrTableItem *pDefTable = mplInfo.GetTblBegin(kMethodDef); + if (pDefTable != nullptr) { + for (size_t i = 0; i < defSize; ++i) { + // + kBindingStateMethodDef:0x6 + pDefTable[i].addr = pInvoker->AddrToUint32( + reinterpret_cast(__BindingProtectRegion__ + static_cast(pDefTable[i].addr))); + } + } else { + LINKER_LOG(ERROR) << "failed, --lazy-binding, pDefTable is null in " << mplInfo.name << maple::endl; + } + } +} + +void Linker::InitDataSymbolLazy32(LinkerMFileInfo &mplInfo, size_t defSize) { + if (mplInfo.IsFlag(kIsLazy)) { + size_t undefSize = mplInfo.GetTblSize(kDataUndef); + LinkerAddrTableItem *pUndefTable = mplInfo.GetTblBegin(kDataUndef); + if (undefSize != 0 && pUndefTable != nullptr) { + for (size_t i = 0; i < undefSize; ++i) { + // + kBindingStateCinfUndef:1, or + kBindingStateDataUndef:3 + pUndefTable[i].addr = pInvoker->AddrToUint32( + reinterpret_cast(__BindingProtectRegion__ + reinterpret_cast(pUndefTable[i].addr))); + } + } else { + LINKER_LOG(ERROR) << "failed, --lazy-binding, pUndefTable is invalid in " << mplInfo.name << maple::endl; + } + LinkerAddrTableItem *pDefTable = mplInfo.GetTblBegin(kDataDef); + if (pDefTable != nullptr) { + for (size_t i = 0; i < defSize; ++i) { + // + kBindingStateCinfDef:2, or + kBindingStateDataDef:4 + pDefTable[i].addr = pInvoker->AddrToUint32( + reinterpret_cast(__BindingProtectRegion__ + reinterpret_cast(pDefTable[i].addr))); + } + } else { + LINKER_LOG(ERROR) << "failed, --lazy-binding, pDefTable is null in " << mplInfo.name << maple::endl; + } + } +} +#endif // LINKER_32BIT_REF_FOR_DEF_UNDEF +#endif // LINKER_LAZY_BINDING + +// Init all defined method symbols. +void Linker::InitMethodSymbol(LinkerMFileInfo &mplInfo) { + // Fill the address by its offset. + if (!mplInfo.IsFlag(kIsRelMethodOnce)) { + mplInfo.SetFlag(kIsRelMethodOnce, true); + size_t defSize = mplInfo.GetTblSize(kMethodDef); + if (defSize == 0) { + return; + } + + // Check if it's lazy-binding flag for methods. + if (mplInfo.GetTblSize(kMethodDefOrig) != 0) { + LINKER_VLOG(mpllinker) << "applied compiler --lazy-binding option in " << mplInfo.name << maple::endl; + mplInfo.SetFlag(kIsLazy, true); + } + + LinkerAddrTableItem *pTable = mplInfo.GetTblBegin(kMethodDefOrig); + if (pTable == nullptr) { + LINKER_LOG(ERROR) << "failed, pTable is null in " << mplInfo.name << maple::endl; + __MRT_ASSERT(0, "InitMethodSymbol(), pTable is null!\n"); + } + + if (!mplInfo.IsFlag(kIsLazy)) { + for (size_t i = 0; i < defSize; ++i) { + AdjustDefTableAddress(mplInfo, *pTable, i); + } + } + +#ifdef LINKER_LAZY_BINDING +#ifdef LINKER_32BIT_REF_FOR_DEF_UNDEF + InitMethodSymbolLazy32(mplInfo, defSize); +#endif // LINKER_32BIT_REF_FOR_DEF_UNDEF +#endif // LINKER_LAZY_BINDING + } +} + +// Relocate all defined method symbols. +bool Linker::RelocateMethodSymbol(LinkerMFileInfo &mplInfo) { + mplInfo.SetFlag(kIsMethodDefHasNotResolved, false); + if (mplInfo.IsFlag(kIsMethodRelocated) || mplInfo.IsFlag(kIsLazy) || mplInfo.name == maple::fs::kLibcorePath) { + return true; + } + size_t defSize = mplInfo.GetTblSize(kMethodDef); + if (defSize == 0) { + mplInfo.SetFlag(kIsMethodRelocated, true); + LINKER_LOG(ERROR) << "methodDef table is null in" << mplInfo.name << maple::endl; + return true; + } + AddrSlice addrSlice(mplInfo.GetTblBegin(kMethodDef), defSize); + MuidSlice muidSlice(mplInfo.GetTblBegin(kMethodDefMuid), defSize); + pInvoker->GetClassLoaderList(mplInfo, mplInfo.clList); +#ifdef LINKER_RT_CACHE + pInvoker->Get()->RelocateMethodSymbol(mplInfo, addrSlice, muidSlice); + pInvoker->Get()->FreeMethodDefTable(mplInfo); +#else + // Start binary search def from here. + for (size_t i = 0; i < defSize; ++i) { + LinkerOffsetType tmpAddr = 0; + if (!pInvoker->ForEachLookUp(muidSlice[i].muid, pInvoker, + &LinkerInvoker::LookUpMethodSymbolAddress, mplInfo, tmpAddr)) { // Not found + // Never reach here. + LINKER_LOG(ERROR) << "failed to relocate MUID=" << muidSlice[i].muid.ToStr() << " in " << mplInfo.name << + ", tmpAddr=" << tmpAddr; + mplInfo.SetFlag(kIsMethodDefHasNotResolved, true); + } else { // Found + addrSlice[i].addr = tmpAddr; + } + } +#endif + if (!mplInfo.IsFlag(kIsMethodDefHasNotResolved)) { + mplInfo.SetFlag(kIsMethodRelocated, true); + return true; + } else { + LINKER_VLOG(mpllinker) << "failed to relocate all MUIDs for " << mplInfo.name << maple::endl; + return false; + } +} + +bool Linker::ResolveUndefVTableSymbol(LinkerMFileInfo &mplInfo, bool fromUndef, size_t index, + VTableSlice &vTableSlice, size_t i) { + size_t methodUndefSize = mplInfo.GetTblSize(kMethodUndef); + AddrSlice methodUndefSlice(mplInfo.GetTblBegin(kMethodUndef), methodUndefSize); + size_t methodDefSize = mplInfo.GetTblSize(kMethodDef); + AddrSlice methodDefSlice(mplInfo.GetTblBegin(kMethodDefOrig), methodDefSize); + bool hasNotResolved = false; + if (fromUndef && index < methodUndefSize) { + if (mplInfo.IsFlag(kIsLazy) && pInvoker->GetAddrBindingState(methodUndefSlice, index) != kBindingStateResolved) { + size_t muidSize = mplInfo.GetTblSize(kMethodUndefMuid); + MuidSlice muidSlice = MuidSlice(mplInfo.GetTblBegin(kMethodUndefMuid), muidSize); + void *addr = pInvoker->Get()->ResolveMethodSymbol( + mplInfo, methodUndefSlice, muidSlice, index, false); + if (addr != nullptr) { + vTableSlice[i].index = reinterpret_cast(addr); + LINKER_VLOG(lazybinding) << "resolved lazily, " << fromUndef << ", addr=" << + methodUndefSlice[index].Address() << " in " << mplInfo.name << maple::endl; + } else { + LINKER_LOG(ERROR) << "not resolved lazily, " << fromUndef << ", " << index << ", " << methodUndefSize << ", " << + methodDefSize << " in " << mplInfo.name << maple::endl; + hasNotResolved = true; + } + } else { + LINKER_DLOG(mpllinker) << "undef, " << std::hex << "addr=" << methodUndefSlice[index].Address() << + " in " << mplInfo.name << maple::endl; + vTableSlice[i].index = reinterpret_cast(methodUndefSlice[index].Address()); + } + } else if (!fromUndef && index < methodDefSize) { + LINKER_DLOG(mpllinker) << "def, " << std::hex << "addr=" << methodDefSlice[index].Address() << " in " << + mplInfo.name << maple::endl; + vTableSlice[i].index = reinterpret_cast(GetDefTableAddress(mplInfo, methodDefSlice, + static_cast(index), true)); + } else { + LINKER_VLOG(mpllinker) << "not resolved, " << fromUndef << ", " << index << ", " << methodUndefSize << ", " << + methodDefSize << " in " << mplInfo.name << maple::endl; + hasNotResolved = true; + } + return hasNotResolved; +} + +// Resolve all VTable symbols for the .so file of 'handle'. +bool Linker::ResolveVTableSymbol(LinkerMFileInfo &mplInfo) { + if (mplInfo.IsFlag(kIsVTabResolved) || mplInfo.IsFlag(kIsLazy)) { + return true; + } + + bool hasNotResolved = false; + size_t vSize = mplInfo.GetTblSize(kVTable); + if (vSize == 0) { + mplInfo.SetFlag(kIsVTabResolved, true); + LINKER_LOG(ERROR) << "vTable is null in " << mplInfo.name << maple::endl; + return true; + } + VTableSlice vTableSlice = VTableSlice(mplInfo.GetTblBegin(kVTable), vSize); + + for (size_t i = 0; i < vSize; ++i) { + LinkerRef ref(static_cast(vTableSlice[i].index)); + if (ref.IsVTabIndex()) { // Index of undefine table. + hasNotResolved = ResolveUndefVTableSymbol(mplInfo, ref.IsTabUndef(), ref.GetTabIndex(), vTableSlice, i); + } else if (vTableSlice[i].index & kNegativeNum) { // Offset of address. + DataRefOffset *data = reinterpret_cast(&(vTableSlice[i].index)); +#ifdef USE_32BIT_REF + // To allow re-parse Vtable by the proper way. + if (!(static_cast(vTableSlice[i].index) < kDsoLoadedAddressEnd && + static_cast(vTableSlice[i].index) >= kDsoLoadedAddressStart)) { + vTableSlice[i].index = data->GetDataRef(); + } else { + LINKER_DLOG(mpllinker) << "not re-parse for patch, " << std::hex << "addr=" << vTableSlice[i].index << + ">X>" << data->GetDataRef() << " in " << mplInfo.name << maple::endl; + } +#else + vTableSlice[i].index = data->GetDataRef(); +#endif + } + } + if (!hasNotResolved) { + mplInfo.SetFlag(kIsVTabResolved, true); + LINKER_VLOG(mpllinker) << "successfully resolved VTable for " << mplInfo.name << maple::endl; + return true; + } else { + LINKER_VLOG(mpllinker) << "failed to resolve all VTable for " << mplInfo.name << maple::endl; + return false; + } +} + +bool Linker::ResolveUndefITableSymbol( + LinkerMFileInfo &mplInfo, bool fromUndef, size_t index, ITableSlice &iTableSlice, size_t i) { + size_t methodUndefSize = mplInfo.GetTblSize(kMethodUndef); + AddrSlice methodUndefSlice(mplInfo.GetTblBegin(kMethodUndef), methodUndefSize); + size_t methodDefSize = mplInfo.GetTblSize(kMethodDef); + AddrSlice methodDefSlice(mplInfo.GetTblBegin(kMethodDefOrig), methodDefSize); + bool hasNotResolved = false; + if (fromUndef && index < methodUndefSize && !methodUndefSlice.Empty()) { + if (mplInfo.IsFlag(kIsLazy) && pInvoker->GetAddrBindingState(methodUndefSlice, index) != kBindingStateResolved) { + size_t muidSize = mplInfo.GetTblSize(kMethodUndefMuid); + MuidSlice muidSlice = MuidSlice(mplInfo.GetTblBegin(kMethodUndefMuid), muidSize); + void *addr = pInvoker->Get()->ResolveMethodSymbol( + mplInfo, methodUndefSlice, muidSlice, index, false); + if (addr != nullptr) { + iTableSlice[i].index = reinterpret_cast(addr); + LINKER_VLOG(lazybinding) << "resolved lazily, " << fromUndef << ", addr=" << + methodUndefSlice[index].Address() << " in " << mplInfo.name << maple::endl; + } else { + LINKER_LOG(ERROR) << "not resolved lazily, " << fromUndef << ", " << index << ", " << methodUndefSize << ", " << + methodDefSize << " in " << mplInfo.name << maple::endl; + hasNotResolved = true; + } + } else { + iTableSlice[i].index = reinterpret_cast(methodUndefSlice[index].Address()); + } + } else if (!fromUndef && index < methodDefSize && !methodDefSlice.Empty()) { + iTableSlice[i].index = reinterpret_cast(GetDefTableAddress(mplInfo, methodDefSlice, + static_cast(index), true)); + } else { + hasNotResolved = true; + } + return hasNotResolved; +} + +// Resolve all ITable symbols for the .so file of 'handle'. +bool Linker::ResolveITableSymbol(LinkerMFileInfo &mplInfo) { + if (mplInfo.IsFlag(kIsITabResolved) || mplInfo.IsFlag(kIsLazy)) { + return true; + } + + bool hasNotResolved = false; + size_t iSize = mplInfo.GetTblSize(kITable); + if (iSize == 0) { + mplInfo.SetFlag(kIsITabResolved, true); + LINKER_LOG(ERROR) << "iTable is null in " << mplInfo.name << maple::endl; + return true; + } + ITableSlice iTableSlice = ITableSlice(mplInfo.GetTblBegin(kITable), iSize); + + for (size_t i = 0; i < iSize; ++i) { + if (i % 2 == 1) { // i % 2 is 1 means the number is odd. + LinkerRef ref(static_cast(iTableSlice[i].index)); + if (ref.IsITabIndex()) { // Index of undefine table. + hasNotResolved = ResolveUndefITableSymbol(mplInfo, ref.IsTabUndef(), ref.GetTabIndex(), iTableSlice, i); + } + } else { // Offset of address +#ifdef USE_32BIT_REF + MByteRef32 *ref = reinterpret_cast(&(iTableSlice[i].index)); + void *addr = ref->GetRef(); + ref->SetRef(addr); +#else + MByteRef *ref = reinterpret_cast(&(iTableSlice[i].index)); + void *addr = ref->GetRef(); + ref->SetRef(addr); +#endif + } + } + + if (!hasNotResolved) { + mplInfo.SetFlag(kIsITabResolved, true); + LINKER_VLOG(mpllinker) << "successfully resolved ITable for " << mplInfo.name << maple::endl; + return true; + } else { + LINKER_VLOG(mpllinker) << "failed to resolve all ITable for " << mplInfo.name << maple::endl; + return false; + } +} + +// Resolve all undefined data symbols for the .so file of 'handle', by traverse 'handles' list. +bool Linker::ResolveDataSymbol(LinkerMFileInfo &mplInfo) { + mplInfo.SetFlag(kIsDataUndefHasNotResolved, false); + if (mplInfo.IsFlag(kIsDataDefResolved) || mplInfo.IsFlag(kIsLazy)) { + return true; + } + size_t undefSize = mplInfo.GetTblSize(kDataUndef); + if (undefSize == 0) { + mplInfo.SetFlag(kIsDataDefResolved, true); + LINKER_LOG(ERROR) << "DataUndef table is null in " << mplInfo.name << maple::endl; + return true; + } + AddrSlice addrSlice(mplInfo.GetTblBegin(kDataUndef), undefSize); + MuidSlice muidSlice(mplInfo.GetTblBegin(kDataUndefMuid), undefSize); + pInvoker->GetClassLoaderList(mplInfo, mplInfo.clList); +#ifdef LINKER_RT_CACHE + pInvoker->Get()->ResolveDataSymbol(mplInfo, addrSlice, muidSlice); + if (!mplInfo.IsFlag(kIsLazy)) { + pInvoker->Get()->FreeDataUndefTable(mplInfo); + } +#else + for (size_t i = 0; i < undefSize; ++i) { + LinkerOffsetType tmpAddr = 0; + if (!pInvoker->ForEachLookUp(muidSlice[i].muid, pInvoker, + &LinkerInvoker::LookUpDataSymbolAddress, mplInfo, tmpAddr)) { // Not found + if (!mplInfo.IsFlag(kIsLazy)) { + mplInfo.SetFlag(kIsDataUndefHasNotResolved, true); + } + } else { // Found + addrSlice[i].addr = tmpAddr; + } + } +#endif // LINKER_RT_CACHE + if (!mplInfo.IsFlag(kIsDataUndefHasNotResolved)) { + mplInfo.SetFlag(kIsDataDefResolved, true); + return true; + } else { + LINKER_VLOG(mpllinker) << "failed to resolve all MUIDs for " << mplInfo.name << maple::endl; + return false; + } +} + +inline void Linker::AdjustDefTableAddress( + const LinkerMFileInfo &mplInfo, LinkerAddrTableItem &pTable, size_t index) { +#ifndef LINKER_ADDRESS_VIA_BASE +#ifdef LINKER_32BIT_REF_FOR_DEF_UNDEF + (&pTable)[index].addr = pInvoker->AddrToUint32((&pTable)[index].AddressFromOffset()); +#else + (&pTable)[index].addr = reinterpret_cast((&pTable)[index].AddressFromOffset()); +#endif // USE_32BIT_REF +#else // LINKER_ADDRESS_VIA_BASE +#ifdef LINKER_32BIT_REF_FOR_DEF_UNDEF + (&pTable)[index].addr = pInvoker->AddrToUint32((&pTable)[index].AddressFromBase(mplInfo.elfBase)); +#else + (&pTable)[index].addr = reinterpret_cast((&pTable)[index].Address()); +#endif // USE_32BIT_REF +#endif // LINKER_ADDRESS_VIA_BASE + (void)(&mplInfo); +} + +// Init all defined data symbols. +void Linker::InitDataSymbol(LinkerMFileInfo &mplInfo) { + // Resolve the literal initialization entries based on the Literal pool + void **cTable = mplInfo.GetTblBegin(kDataConstStr); + if (cTable != nullptr) { + for (size_t i = 0; i < mplInfo.GetTblSize(kDataConstStr); ++i) { + DataRef *constStringRef = reinterpret_cast(&cTable[i]); + MString *oldStrObj = constStringRef->GetDataRef(); + DCHECK(oldStrObj != nullptr); + cTable[i] = GetOrInsertLiteral(*oldStrObj); + } + } + + // Fill the address by its offset. + if (!mplInfo.IsFlag(kIsRelDataOnce)) { + mplInfo.SetFlag(kIsRelDataOnce, true); + size_t defSize = mplInfo.GetTblSize(kDataDef); + if (defSize == 0) { + return; + } + +#ifdef LINKER_LAZY_BINDING + // Check if it's lazy-binding flag for cinf or data. + if (mplInfo.GetTblSize(kDataDefOrig) != 0) { + LINKER_VLOG(mpllinker) << "applied compiler --lazy-binding option in " << mplInfo.name << maple::endl; + mplInfo.SetFlag(kIsLazy, true); + } +#endif // LINKER_LAZY_BINDING + + LinkerAddrTableItem *pTable = mplInfo.GetTblBegin(kDataDefOrig); + if (pTable == nullptr) { + LINKER_LOG(ERROR) << "failed, pTable is null in " << mplInfo.name << maple::endl; + __MRT_ASSERT(0, "InitDataSymbol(), pTable is null!\n"); + } + +#ifdef LINKER_LAZY_BINDING + if (!mplInfo.IsFlag(kIsLazy)) { +#endif // LINKER_LAZY_BINDING + for (size_t i = 0; i < defSize; ++i) { + AdjustDefTableAddress(mplInfo, *pTable, i); + } +#ifdef LINKER_LAZY_BINDING + } +#endif // LINKER_LAZY_BINDING + +#ifdef LINKER_LAZY_BINDING +#ifdef LINKER_32BIT_REF_FOR_DEF_UNDEF + InitDataSymbolLazy32(mplInfo, defSize); +#endif // LINKER_32BIT_REF_FOR_DEF_UNDEF +#endif // LINKER_LAZY_BINDING + } +} + +// Relocate all defined data symbols. +bool Linker::RelocateDataSymbol(LinkerMFileInfo &mplInfo) { + mplInfo.SetFlag(kIsDataDefHasNotResolved, false); + if (mplInfo.IsFlag(kIsDataRelocated) || mplInfo.IsFlag(kIsLazy) || mplInfo.name == maple::fs::kLibcorePath) { + return true; + } + size_t defSize = mplInfo.GetTblSize(kDataDef); + if (defSize == 0) { + mplInfo.SetFlag(kIsDataRelocated, true); + LINKER_LOG(ERROR) << "dataDef table is null in " << mplInfo.name << maple::endl; + return true; + } + AddrSlice addrSlice(mplInfo.GetTblBegin(kDataDef), defSize); + MuidSlice muidSlice(mplInfo.GetTblBegin(kDataDefMuid), defSize); +#ifdef LINKER_RT_CACHE + pInvoker->Get()->RelocateDataSymbol(mplInfo, addrSlice, muidSlice); + pInvoker->Get()->FreeDataDefTable(mplInfo); +#else + pInvoker->GetClassLoaderList(mplInfo, mplInfo.clList); + for (size_t i = 0; i < defSize; ++i) { + LinkerOffsetType tmpAddr = 0; + if (!pInvoker->ForEachLookUp(muidSlice[i].muid, pInvoker, + &LinkerInvoker::LookUpDataSymbolAddress, mplInfo, tmpAddr)) { // Not found + // Never reach here. + LINKER_LOG(ERROR) << "failed to relocate MUID=" << muidSlice[i].muid.ToStr() << " in " << mplInfo.name << + ", tmpAddr=" << tmpAddr; + mplInfo.SetFlag(kIsDataDefHasNotResolved, true); + } else { // Found + addrSlice[i].addr = tmpAddr; + } + } +#endif // LINKER_RT_CACHE + if (!mplInfo.IsFlag(kIsDataDefHasNotResolved)) { + mplInfo.SetFlag(kIsDataRelocated, true); + return true; + } else { + LINKER_VLOG(mpllinker) << "failed to relocate all MUIDs for " << mplInfo.name << maple::endl; + return false; + } +} + +bool Linker::DoResolveSuperClassSymbol(LinkerMFileInfo &mplInfo, IndexSlice &superTableSlice, + const AddrSlice &dataUndefSlice, const AddrSlice &dataDefSlice, size_t i) { + bool fromUpper = false; + LinkerRef ref(superTableSlice[i].index); + if (ref.IsIndex()) { + size_t index = ref.GetIndex(); + bool fromUndef = ref.IsFromUndef(); + if (fromUndef && index < dataUndefSlice.Size() && !dataUndefSlice.Empty()) { + if (mplInfo.IsFlag(kIsLazy) && pInvoker->GetAddrBindingState(dataUndefSlice, index) != kBindingStateResolved) { + size_t muidSize = mplInfo.GetTblSize(kDataUndefMuid); + MuidSlice muidSlice = MuidSlice(mplInfo.GetTblBegin(kDataUndefMuid), muidSize); + void *addr = pInvoker->Get()->ResolveClassSymbol( + mplInfo, dataUndefSlice, muidSlice, index, nullptr, fromUpper, false); + if (addr != nullptr) { + superTableSlice[i].index = reinterpret_cast(addr); + LINKER_VLOG(lazybinding) << "resolved lazily, " << "addr=" << dataUndefSlice[index].Address() << maple::endl; + } else { + LINKER_LOG(ERROR) << "not resolved lazily, " << fromUndef << ", " << index << ", " << dataUndefSlice.Size() << + ", " << dataDefSlice.Size() << " in " << mplInfo.name << maple::endl; + return true; + } + } else { + superTableSlice[i].index = reinterpret_cast(dataUndefSlice[index].Address()); + LINKER_DLOG(mpllinker) << "undef, addr=" << std::hex << superTableSlice[i].index << maple::endl; + } + } else if (!fromUndef && index < dataDefSlice.Size() && !dataDefSlice.Empty()) { + if (mplInfo.IsFlag(kIsLazy) && pInvoker->GetAddrBindingState(dataDefSlice, index) != kBindingStateResolved) { + size_t muidSize = mplInfo.GetTblSize(kDataDefMuid); + MuidSlice muidSlice = MuidSlice(mplInfo.GetTblBegin(kDataDefMuid), muidSize); + void *addr = pInvoker->Get()->ResolveClassSymbol( + mplInfo, dataDefSlice, muidSlice, index, nullptr, fromUpper, true); + if (addr != nullptr) { + superTableSlice[i].index = reinterpret_cast(addr); + LINKER_VLOG(lazybinding) << "resolved lazily, " << "addr=" << dataDefSlice[index].Address() << maple::endl; + } else { + LINKER_LOG(ERROR) << "not resolved lazily, " << fromUndef << ", " << index << ", " << dataUndefSlice.Size() << + ", " << dataDefSlice.Size() << " in " << mplInfo.name << maple::endl; + return true; + } + } else { + superTableSlice[i].index = reinterpret_cast(dataDefSlice[index].Address()); + LINKER_DLOG(mpllinker) << "def, addr=" << std::hex << superTableSlice[i].index << maple::endl; + } + } else { + return true; + } + } + return false; +} + +// Resolve all super-class symbols for the .so file of 'handle'. +bool Linker::ResolveSuperClassSymbol(LinkerMFileInfo &mplInfo) { + if (mplInfo.IsFlag(kIsSuperClassResolved) || mplInfo.IsFlag(kIsLazy)) { + return true; + } + + bool hasNotResolved = false; + size_t superSize = mplInfo.GetTblSize(kDataSuperClass); + if (superSize == 0) { + mplInfo.SetFlag(kIsSuperClassResolved, true); + LINKER_LOG(ERROR) << "dataSuperClass table is null in " << mplInfo.name << maple::endl; + return true; + } + IndexSlice superTableSlice = IndexSlice(mplInfo.GetTblBegin(kDataSuperClass), superSize); + size_t dataUndefSize = mplInfo.GetTblSize(kDataUndef); + AddrSlice dataUndefSlice(mplInfo.GetTblBegin(kDataUndef), dataUndefSize); + size_t dataDefSize = mplInfo.GetTblSize(kDataDef); + AddrSlice dataDefSlice(mplInfo.GetTblBegin(kDataDefOrig), dataDefSize); + for (size_t i = 0; i < superSize; ++i) { + hasNotResolved = DoResolveSuperClassSymbol(mplInfo, superTableSlice, dataUndefSlice, dataDefSlice, i); + } + if (!hasNotResolved) { + mplInfo.SetFlag(kIsSuperClassResolved, true); + LINKER_VLOG(mpllinker) << "successfully resolved super-class for " << mplInfo.name << maple::endl; + return true; + } else { + LINKER_VLOG(mpllinker) << "failed to resolve all super-class for " << mplInfo.name << maple::endl; + return false; + } +} + +// Resolve all super-class symbols for the .so file of 'handle'. +bool Linker::ResolveGCRootSymbol(LinkerMFileInfo &mplInfo) { + if (mplInfo.IsFlag(kIsGCRootListResolved)) { + LINKER_DLOG(mpllinker) << "already resolved for " << mplInfo.name << maple::endl; + return true; + } + + bool hasNotResolved = false; + size_t gcRootSize = mplInfo.GetTblSize(kDataGcRoot); + if (gcRootSize == 0) { + mplInfo.SetFlag(kIsGCRootListResolved, true); + LINKER_LOG(ERROR) << "gcRootTable is null in " << mplInfo.name << maple::endl; + return true; + } + LinkerGCRootTableItem *gcRootTable = mplInfo.GetTblBegin(kDataGcRoot); + size_t dataDefSize = mplInfo.GetTblSize(kDataDef); + AddrSlice dataDefSlice(mplInfo.GetTblBegin(kDataDefOrig), dataDefSize); + + for (size_t i = 0; i < gcRootSize; ++i) { + LinkerRef ref(gcRootTable[i].index); + if (ref.IsIndex()) { // Index + size_t index = ref.GetIndex(); + if (!ref.IsFromUndef() && index < dataDefSize) { + gcRootTable[i].index = reinterpret_cast(GetDefTableAddress(mplInfo, + dataDefSlice, static_cast(index), false)); + } else { + hasNotResolved = true; + } + } + } + if (!hasNotResolved) { + mplInfo.SetFlag(kIsGCRootListResolved, true); + LINKER_VLOG(mpllinker) << "successfully resolved GCRoots for " << mplInfo.name << maple::endl; + return true; + } else { + LINKER_VLOG(mpllinker) << "failed to resolve all GCRoots for " << mplInfo.name << maple::endl; + return false; + } +} + +// Look up the MUID in both method table and data table. +// Also see void *dlsym(void *handle, const char *symbol) +void *Linker::LookUpSymbolAddress(const void *handle, const MUID &muid) { + LinkerMFileInfo *mplInfo = pInvoker->GetLinkerMFileInfo(kFromHandle, handle); + if (mplInfo != nullptr) { + return LookUpSymbolAddress(mplInfo, muid); + } + return nullptr; +} + +// Look up the MUID in both method table and data table. +// Also see void *dlsym(void *handle, const char *symbol) +void *Linker::LookUpSymbolAddress(LinkerMFileInfo &mplInfo, const MUID &muid) { + void *symbol = nullptr; + size_t index = 0; + symbol = reinterpret_cast(pInvoker->LookUpMethodSymbolAddress(mplInfo, muid, index)); + if (symbol == nullptr) { + symbol = reinterpret_cast(pInvoker->LookUpDataSymbolAddress(mplInfo, muid, index)); + } + return symbol; +} + +// Look up the MUID in both method table and data table. +// Also see void *dlsym(void *handle, const char *symbol) +void *Linker::LookUpSymbolAddress(const MUID &muid) { + void *addr = nullptr; + auto handle = [this, &muid, &addr](LinkerMFileInfo &mplInfo)->bool { + addr = this->LookUpSymbolAddress(mplInfo, muid); + if (addr != nullptr) { + LINKER_VLOG(mpllinker) << "found address of " << muid.ToStr() << ", in " << mplInfo.name << maple::endl; + return true; + } + return false; + }; + (void)pInvoker->mplInfoList.FindIf(handle); + return addr; +} + +void Linker::InitLinkerMFileInfoIgnoreSysCL(LinkerMFileInfo &mplInfo) { + mplInfo.hash = pInvoker->GetValidityCode(mplInfo); + mplInfo.hashOfDecouple = pInvoker->GetValidityCodeForDecouple(mplInfo); + bool isBoot = (mplInfo.classLoader == nullptr || + pInvoker->IsBootClassLoader(reinterpret_cast(mplInfo.classLoader))); + mplInfo.SetFlag(kIsBoot, isBoot); + if (!pInvoker->IsSystemClassLoader(reinterpret_cast(mplInfo.classLoader)) && + GetLoadState() == kLoadStateApk) { + pInvoker->AddMultiSoPendingCount(); + if (GetAppLoadState() == kAppLoadBaseOnlyReady) { + SetAppLoadState(kAppLoadBaseOnly); + + // For multi-so, set kGlobalAppBaseStr as + // "/data/app/com.sina.weibolite-ylociBbJwvlbG4TkHkRGkQ==/maple/arm64/" + // without "mapleclasses.so" + size_t posSeparator = mplInfo.name.rfind("/"); + SetAppBaseStr(mplInfo.name.substr(0, posSeparator + 1)); + LINKER_VLOG(mpllinker) << "GlobalAppBaseStr=" << GetAppBaseStr().c_str() << " in " << mplInfo.name << maple::endl; + } else if (GetAppLoadState() == kAppLoadBaseOnly) { + size_t posSeparator = mplInfo.name.rfind("/"); + if (mplInfo.name.substr(0, posSeparator + 1) != GetAppBaseStr()) { + LINKER_VLOG(lazybinding) << "kAppLoadBaseOnly-->kAppLoadBaseAndOthers, " << mplInfo.name << maple::endl; + SetAppLoadState(kAppLoadBaseAndOthers); + } + } + } +} + +void Linker::InitLinkerMFileInfoTableAddr(LinkerMFileInfo &mplInfo) const { + static const void *mapleCoreElfStart = nullptr; + uint64_t *rangeStart = static_cast(GetSymbolAddr(mplInfo.handle, kRangeTableFunc, true)); + uint64_t *rangeEnd = static_cast(GetSymbolAddr(mplInfo.handle, kRangeTableEndFunc, true)); + mplInfo.tableAddr = reinterpret_cast(rangeStart); + uint64_t maxLength = static_cast(rangeEnd - rangeStart) / kTable2ndDimMaxCount; + mplInfo.rangeTabSize = static_cast(maxLength); + + mplInfo.elfStart = GetSymbolAddr(mplInfo.handle, kMapleStartFunc, true); + mplInfo.elfEnd = GetSymbolAddr(mplInfo.handle, kMapleEndFunc, true); + + // Update the start and end address of the maple file(ELF). according by libmaplecore-all.so + if (mapleCoreElfStart == mplInfo.elfStart) { + mplInfo.elfStart = nullptr; + } + + if (mplInfo.elfStart == nullptr || mplInfo.elfEnd == nullptr) { + // Update with the start of range table. + void *start = mplInfo.tableAddr; + mplInfo.elfStart = (mplInfo.elfStart == nullptr || mplInfo.elfStart > start) ? start : mplInfo.elfStart; + // Update with outline of range table end. + void *end = mplInfo.tableAddr + maxLength; + mplInfo.elfEnd = mplInfo.elfEnd > end ? mplInfo.elfEnd : end; + // Start from kVTable, ignore kRange. + for (size_t i = static_cast(kVTable); i < static_cast(maxLength); ++i) { + // Update the start address. + start = mplInfo.tableAddr[i][kTable1stIndex]; + mplInfo.elfStart = (start != nullptr && mplInfo.elfStart > start) ? start : mplInfo.elfStart; + // Update the end address. + end = mplInfo.tableAddr[i][kTable2ndIndex]; + mplInfo.elfEnd = mplInfo.elfEnd > end ? mplInfo.elfEnd : end; + } + } + if (mplInfo.name == maple::fs::kLibcorePath) { + mapleCoreElfStart = mplInfo.elfStart; + } +} + +// Set the maple file's handle and path name. +// We save them in a map, then we can query the path by handle in the map. +// Also see GetDlopenMapleFiles(). +bool Linker::InitLinkerMFileInfo(LinkerMFileInfo &mplInfo, int32_t pos) { + InitLinkerMFileInfoTableAddr(mplInfo); + // Ignore the system class loader. + InitLinkerMFileInfoIgnoreSysCL(mplInfo); + InitMethodSymbol(mplInfo); + InitDataSymbol(mplInfo); + +#ifdef LINKER_DECOUPLE + DecoupleMFileInfo *dpInfo = reinterpret_cast(&mplInfo); + size_t dataDefSize = mplInfo.GetTblSize(kDataDef); + size_t dataUndefSize = mplInfo.GetTblSize(kDataUndef); + dpInfo->dataDefSlice = AddrSlice(dpInfo->GetTblBegin(kDataDefOrig), dataDefSize); + dpInfo->dataUndefSlice = AddrSlice(dpInfo->GetTblBegin(kDataUndef), dataUndefSize); + pInvoker->Get()->InitDecoupledClasses(dpInfo); +#endif // LINKER_DECOUPLE + + mplInfo.startHotStrTab = reinterpret_cast(GetSymbolAddr(mplInfo.handle, kStartHotStrTabBegin, true)); + mplInfo.bothHotStrTab = reinterpret_cast(GetSymbolAddr(mplInfo.handle, kBothHotStrTabBegin, true)); + mplInfo.runHotStrTab = reinterpret_cast(GetSymbolAddr(mplInfo.handle, kRunHotStrTabBegin, true)); + mplInfo.coldStrTab = reinterpret_cast(GetSymbolAddr(mplInfo.handle, kColdStrTabBegin, true)); + mplInfo.coldStrTabEnd = reinterpret_cast(GetSymbolAddr(mplInfo.handle, kColdStrTabEnd, true)); + + mplInfo.rometadataFieldStart = GetSymbolAddr(mplInfo.handle, kMetadataFieldStart, true); + mplInfo.rometadataFieldEnd = GetSymbolAddr(mplInfo.handle, kMetadataFieldEnd, true); + + mplInfo.rometadataMethodStart = GetSymbolAddr(mplInfo.handle, kMetadataMethodStart, true); + mplInfo.rometadataMethodEnd = GetSymbolAddr(mplInfo.handle, kMetadataMethodEnd, true); + + mplInfo.romuidtabStart = GetSymbolAddr(mplInfo.handle, kMuidTabStart, true); + mplInfo.romuidtabEnd = GetSymbolAddr(mplInfo.handle, kMuidTabEnd, true); + + pInvoker->mplInfoNameMap.Append(mplInfo); + pInvoker->mplInfoHandleMap.Append(mplInfo); + pInvoker->mplInfoList.Append(mplInfo, pos); + pInvoker->mplInfoListCLMap.Append(mplInfo, pos); + bool isSuccess = pInvoker->mplInfoElfAddrSet.Append(mplInfo); + __MRT_ASSERT(isSuccess, "mplInfoElfAddrSet.insert failed\n"); + if (mplInfo.IsFlag(kIsLazy)) { + isSuccess = pInvoker->mplInfoElfAddrLazyBindingSet.Append(mplInfo); + __MRT_ASSERT(isSuccess, "mplInfoElfAddrLazyBindingSet.insert failed\n"); +#if defined(LINKER_RT_CACHE) && defined(LINKER_RT_LAZY_CACHE) + pInvoker->PreLinkLazyMethod(&mplInfo); +#endif // defined(LINKER_RT_CACHE) && defined(LINKER_RT_LAZY_CACHE) + } + return mplInfo.IsFlag(kIsLazy); +} + +void Linker::FreeAllCacheTables(const MObject *classLoader) const { +#ifdef LINKER_RT_CACHE + auto handle = [this](LinkerMFileInfo &mplInfo) { + this->pInvoker->Get()->FreeAllTables(mplInfo); + }; + pInvoker->mplInfoListCLMap.ForEach(classLoader, handle); +#endif + (void)classLoader; +} +} // namespace maplert diff --git a/src/mrt/compiler-rt/src/linker/linker_api.cpp b/src/mrt/compiler-rt/src/linker/linker_api.cpp new file mode 100644 index 0000000000..fec0f089d8 --- /dev/null +++ b/src/mrt/compiler-rt/src/linker/linker_api.cpp @@ -0,0 +1,974 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "linker_api.h" + +#include +#include +#include +#include +#include +#include +#include + +#include "file_system.h" +#include "object_type.h" +#include "linker/linker.h" +#include "linker/linker_inline.h" +#include "linker/linker_model.h" +#include "linker/linker_cache.h" +#include "linker/linker_debug.h" +#include "linker/linker_hotfix.h" +#include "linker/linker_lazy_binding.h" +#include "linker/linker_gctib.h" +#ifdef LINKER_DECOUPLE +#include "linker/decouple/linker_decouple.h" +#endif +#include "utils/name_utils.h" +#include "file_layout.h" +#include "collector/cp_generator.h" + +using namespace maple; +namespace maplert { +using namespace linkerutils; +void LinkerInvoker::PreInit() { + pLoader = &LoaderAPI::Instance(); +#ifdef LINKER_RT_CACHE + SetCachePath(kLinkerRootCachePath); +#endif +} +void LinkerInvoker::PostInit() { + multiSoPendingCount = 0; +} +void LinkerInvoker::UnInit() { + multiSoPendingCount = 0; +} +#ifdef LINKER_RT_CACHE +void LinkerInvoker::SetCachePath(const char *path) { + Get()->SetPath(path); +} +bool LinkerInvoker::GetCachePath(LinkerMFileInfo &mplInfo, std::string &path, LinkerCacheType cacheType) { + return Get()->GetPath(mplInfo, path, cacheType, maplert::linkerutils::GetLoadState()); +} +#endif +bool LinkerInvoker::LinkClassLazily(jclass klass) { + Get()->LinkClass(reinterpret_cast(klass)); + return reinterpret_cast(klass)->IsLazyBinding(); +} +bool LinkerInvoker::ReGenGctib4Class(jclass classInfo) { +#ifdef LINKER_DECOUPLE + MplGctibAnalysis &gctib = Get()->GetGctibResolver(); + return gctib.ReGenGctib4Class(reinterpret_cast(classInfo)); +#else + return ReGenGctib(reinterpret_cast(classInfo)); +#endif +} +uint64_t LinkerInvoker::DumpMetadataSectionSize(std::ostream &os, void *handle, const std::string sectionName) { + return Get()->DumpMetadataSectionSize(os, handle, sectionName); +} + +void LinkerInvoker::DumpAllMplSectionInfo(std::ostream &os) { + Get()->DumpAllMplSectionInfo(os); +} +void LinkerInvoker::DumpAllMplFuncProfile(std::unordered_map> &funcProfileRaw) { + Get()->DumpAllMplFuncProfile(funcProfileRaw); +} +void LinkerInvoker::DumpAllMplFuncIRProfile(std::unordered_map &funcProfileRaw) { + Get()->DumpAllMplFuncIRProfile(funcProfileRaw); +} +void LinkerInvoker::DumpBBProfileInfo(std::ostream &os) { + Get()->DumpBBProfileInfo(os); +} +void LinkerInvoker::ClearAllMplFuncProfile() { + auto handle = [](LinkerMFileInfo &mplInfo)->void { + size_t size = mplInfo.GetTblSize(kMethodProfile); + LinkerFuncProfileTableItem *pTable = mplInfo.GetTblBegin(kMethodProfile); + if (size == 0) { + return; + } + for (size_t i = 0; i < size; ++i) { + LinkerFuncProfileTableItem item = pTable[i]; + if (item.callTimes) { + (void)mplInfo.funProfileMap.insert(std::make_pair(static_cast(i), + FuncProfInfo(item.callTimes, static_cast(kLayoutBootHot)))); + } + } + }; + mplInfoList.ForEach(handle); +} + +// 1.ignore zygote process +// 2.ignore system library in app process +// 3.ignore library whose javaTextTableSize < 100KB +// 4.release cold string table, JAVA_TEXT_TABLE and read-only memory of muid-tables: +// METHOD_INF_TABLE, METHOD_DEF_MUID_TABLE, DATA_DEF_MUID_TABLE, METHOD_MUID_INDEX_TABLE +void LinkerInvoker::ReleaseBootPhaseMemory(bool isZygote, bool isSystemServer) { + if (isZygote) { + // release other mygote only boot-memory + return; + } + auto workingLinkerMFileInfoList = mplInfoList.Clone(); + for (auto mplInfo : workingLinkerMFileInfoList) { + // ignore system library memory release in app process, which reduce release about 7M/8M + if (!isSystemServer && !mplInfo->BelongsToApp()) { + continue; + } + + // according to statistics, the library memory is small if its javaTextTableSize < 100kB + // ignore all the sections release with the small library + size_t javaTextTableSize = mplInfo->GetTblSize(kJavaText); + if (javaTextTableSize < kLeastReleaseMemoryByteSize) { + continue; + } + + // release .java_text + void *startAddr = mplInfo->GetTblBegin(kJavaText); + void *endAddr = mplInfo->GetTblEnd(kJavaText); + ReleaseMemory(startAddr, endAddr); + + mplInfo->ReleaseReadOnlyMemory(); + } +} + +bool LinkerInvoker::CheckLinkerMFileInfoElfBase(LinkerMFileInfo &mplInfo) { + if (mplInfo.elfEnd == nullptr) { + mplInfo.elfEnd = GetSymbolAddr(mplInfo.handle, "MRT_GetMapleEnd", true); + } + int result = dl_iterate_phdr( + [](struct dl_phdr_info *phdrInfo, size_t, void *data)->int { + if (!LOG_NDEBUG) { + std::string name(phdrInfo->dlpi_name); + if (name.find("libmaple") != std::string::npos) { + LINKER_DLOG(mpllinker) << "name=" << phdrInfo->dlpi_name << " (" << phdrInfo->dlpi_phnum << " segments)" << + "base=" << std::hex << phdrInfo->dlpi_addr << maple::endl; + for (int i = 0; i < phdrInfo->dlpi_phnum; ++i) { + LINKER_DLOG(mpllinker) << "header[" << i << "][type:" << phdrInfo->dlpi_phdr[i].p_type << "]: address=" << + std::hex << reinterpret_cast(phdrInfo->dlpi_addr + phdrInfo->dlpi_phdr[i].p_vaddr) << + maple::endl; + } + } + } + LinkerMFileInfo *mplInfo = static_cast(data); + if (mplInfo->name != phdrInfo->dlpi_name) { + // If not equel, to check next dl_phdr_info + return 0; + } + mplInfo->elfBase = reinterpret_cast(phdrInfo->dlpi_addr); + return 1; + }, &mplInfo); + // To match abnormal path, such as /data/maple/lib64/libmaplecore-all.so -> /system/lib64/libmaplecore-all.so + bool flagResult = static_cast(result); + if (!flagResult) { + result = dl_iterate_phdr( + [](struct dl_phdr_info *phdrInfo, size_t, void *data)->int { + LinkerMFileInfo *mplInfo = static_cast(data); + size_t pos = mplInfo->name.rfind('/'); + if (pos == std::string::npos) { + return 0; + } + std::string fileName = mplInfo->name.substr(pos + 1); + std::string name(phdrInfo->dlpi_name); + if (name.find(fileName) == std::string::npos) { + // If not equel, to check next dl_phdr_info + return 0; + } + mplInfo->elfBase = reinterpret_cast(phdrInfo->dlpi_addr); + LINKER_VLOG(mpllinker) << "for abnormal path, elfBase=" << std::hex << mplInfo->elfBase << ", for " << + mplInfo->name << maple::endl; + return 1; + }, &mplInfo); + } + + LINKER_VLOG(mpllinker) << "base=" << mplInfo.elfBase << ", " << + GetSymbolAddr(mplInfo.handle, "MRT_GetMapleEnd", true) << + ", range={" << mplInfo.elfStart << ", " << mplInfo.elfEnd << "}, for " << mplInfo.name << maple::endl; + return static_cast(result); +} + +// Check if the maple file of name is open for the maple file. +// Return true, if it's valid maple file handle, or return false. +bool LinkerInvoker::ContainLinkerMFileInfo(const std::string &name) { + return mplInfoNameMap.Find(name); +} + +// Check if the handle is open for the maple file. +// Return true, if it's valid maple file handle, or return false. +bool LinkerInvoker::ContainLinkerMFileInfo(const void *handle) { + return mplInfoHandleMap.Find(handle); +} + +bool LinkerInvoker::GetJavaTextInfo(const void *addr, LinkerMFileInfo **mplInfo, LinkerLocInfo &info, bool getName) { + *mplInfo = GetLinkerMFileInfo(kFromPC, addr); + if (*mplInfo != nullptr && LocateAddress(**mplInfo, addr, info, getName)) { + return true; + } + LINKER_VLOG(mpllinker) << "failed, not found " << addr << maple::endl; + return false; +} + +void LinkerInvoker::GetStrTab(jclass klass, StrTab &strTab) { + const MClass *dCl = reinterpret_cast(klass); + if (dCl->IsProxy() && dCl->GetNumOfSuperClasses() >= 1) { + dCl = dCl->GetSuperClassArray()[1]; + } + constexpr int32_t cacheSize = 4; + LinkerMFileCache *lc = static_cast(maple::tls::GetTLS(maple::tls::kSlotMFileCache)); + if (lc == nullptr) { + lc = new (std::nothrow) LinkerMFileCache; + if (lc == nullptr) { + LOG(FATAL) << "new LinkerMFileCache fail" << maple::endl; + } + } + LinkerMFileInfo *mplInfo = nullptr; + for (uint8_t i = 0; i < cacheSize; ++i) { + if (lc->clsArray[i] == dCl) { + mplInfo = lc->mpArray[i]; + } + } + if (mplInfo == nullptr) { + mplInfo = GetLinkerMFileInfo(kFromMeta, dCl, dCl->IsLazyBinding()); + lc->clsArray[lc->idx] = const_cast(dCl); + lc->mpArray[lc->idx] = mplInfo; + lc->idx = (lc->idx + 1) % cacheSize; + maple::tls::StoreTLS(static_cast(lc), maple::tls::kSlotMFileCache); + } + if (mplInfo == nullptr) { + LINKER_VLOG(mpllinker) << dCl->GetName() << ", mplInfo is nullptr" << maple::endl; + return; + } + strTab.startHotStrTab = mplInfo->startHotStrTab; + strTab.bothHotStrTab = mplInfo->bothHotStrTab; + strTab.runHotStrTab = mplInfo->runHotStrTab; + strTab.coldStrTab = mplInfo->coldStrTab; +} + +char *LinkerInvoker::GetCString(jclass klass, uint32_t srcIndex) { + const MClass *dCl = reinterpret_cast(klass); + constexpr int32_t realIndexStart = 2; + LinkerMFileInfo *mplInfo = GetLinkerMFileInfo(kFromMeta, dCl, dCl->IsLazyBinding()); + if (mplInfo == nullptr) { + LINKER_VLOG(mpllinker) << dCl->GetName() << ", mplInfo is nullptr" << maple::endl; + return nullptr; + } + char *cStrStart = nullptr; + uint32_t index = srcIndex & 0xFFFFFFFF; + // 0x03 is 0011, index & 0x03 is to check isHotReflectStr. + bool isHotReflectStr = (index & 0x03) != 0; + uint32_t cStrIndex = index >> realIndexStart; + if (isHotReflectStr) { + uint32_t tag = (index & 0x03) - kCStringShift; + if (tag == static_cast(kLayoutBootHot)) { + cStrStart = mplInfo->startHotStrTab; + } else if (tag == static_cast(kLayoutBothHot)) { + cStrStart = mplInfo->bothHotStrTab; + } else { + cStrStart = mplInfo->runHotStrTab; + } + } else { + cStrStart = mplInfo->coldStrTab; + } + return cStrStart + cStrIndex; +} + +void LinkerInvoker::DestroyMFileCache() { + LinkerMFileCache *lc = static_cast(maple::tls::GetTLS(maple::tls::kSlotMFileCache)); + if (lc != nullptr) { + delete lc; + lc = nullptr; + maple::tls::StoreTLS(nullptr, maple::tls::kSlotMFileCache); + } +} + +// Thread-Unsafe +bool LinkerInvoker::UpdateMethodSymbolAddress(jmethodID method, uintptr_t newAddr) { + MethodMeta *methodInfo = reinterpret_cast(method); + MClass *klass = methodInfo->GetDeclaringClass(); + if (klass == nullptr) { + LINKER_LOG(FATAL) << "klass is nullptr" << maple::endl; + } + // build symbol name + std::string symbolName(klass->GetName()); + symbolName += "|"; + symbolName += methodInfo->GetName(); + symbolName += "|"; + symbolName += methodInfo->GetSignature(); + // generate unique id + MUID symbolId = GetMUID(namemangler::EncodeName(symbolName), true); + if (!UpdateMethodSymbolAddressDef(klass, symbolId, newAddr)) { + return false; + } + UpdateNode node(klass, symbolId, 0, newAddr); + // traverse all libraries and update external reference table + (void)ForEachDoAction(this, &LinkerInvoker::UpdateMethodSymbolAddressUndef, &node); + // constructor, private or static method + if (methodInfo->IsDirectMethod()) { + return true; // all done + } + uintptr_t oldAddr = reinterpret_cast(methodInfo->GetFuncAddress()); + node.oldAddr = oldAddr; +#ifdef LINKER_DECOUPLE + if (!MRT_CLASS_IS_DECOUPLE(klass)) { + // traverse all libraries and update vtable and itable (non-decoupled class) + (void)ForEachDoAction(this, &LinkerInvoker::UpdateMethodSymbolAddressDecouple, &node); + } else { + // update vtable and itable in the perm space (decoupled class) + MRT_VisitDecoupleObjects([oldAddr, newAddr](address_t objAddr) { + int tag = DecoupleAllocHeader::GetTag(objAddr); + if (static_cast(tag) != kITabAggregate && static_cast(tag) != kVTabArray) { + return; // next block + } + size_t blockSize = DecoupleAllocHeader::GetSize(objAddr); + size_t size = static_cast(blockSize / sizeof(LinkerVTableItem)); + LinkerVTableItem *pTable = reinterpret_cast(objAddr); + if (size != 0 && pTable != nullptr) { + for (size_t i = 0; i < size; ++i) { + if (static_cast(pTable[i].index) == oldAddr) { // compare by address + LINKER_VLOG(mpllinker) << "Update [perm]" << (pTable + i) << ":" << tag << ":" << oldAddr << maple::endl; + pTable[i].index = newAddr; + } + } + } + }); + } +#else + // traverse all libraries and update vtable and itable (non-decoupled class) + (void)ForEachDoAction(this, &LinkerInvoker::UpdateMethodSymbolAddressDecouple, &node); +#endif + return true; +} + +jclass LinkerInvoker::GetSuperClass(ClassMetadata **addr) { + MClass *klass = reinterpret_cast(__atomic_load_n(addr, __ATOMIC_ACQUIRE)); + // To check if the super class is not resolved. + // If so, we'd try to resolve it once again. + LinkerRef ref(klass); + if (ref.IsIndex()) { + size_t index = ref.GetIndex(); + bool fromUndef = ref.IsFromUndef(); + LINKER_LOG(ERROR) << "unresolved super class=" << klass << ", " << index << + ", fromUndef=" << fromUndef << maple::endl; + LinkerMFileInfo *mplInfo = SearchAddress(addr); + if (mplInfo == nullptr) { + LINKER_LOG(FATAL) << "LinkerMFileInfo is null, addr=" << addr << ", " << klass << ", " << index << maple::endl; + return 0; + } + size_t dataUndefSize = mplInfo->GetTblSize(kDataUndef); + AddrSlice dataUndefSlice(mplInfo->GetTblBegin(kDataUndef), dataUndefSize); + size_t dataDefSize = mplInfo->GetTblSize(kDataDef); + AddrSlice dataDefSlice(mplInfo->GetTblBegin(kDataDef), dataDefSize); + if (fromUndef && index < dataUndefSize && !dataUndefSlice.Empty()) { + if (mplInfo->IsFlag(kIsLazy)) { + LINKER_LOG(FATAL) << "should not resolve lazily, " << fromUndef << ", " << index << ", " << dataUndefSize << + ", " << dataDefSize << " in " << mplInfo->name << maple::endl; + } else { + klass = reinterpret_cast(dataUndefSlice[index].Address()); + LINKER_LOG(ERROR) << "(UNDEF), klass=" << klass->GetName() << "addr=" << + dataUndefSlice[index].Address() << " in " << mplInfo->name << maple::endl; + } + } else if (!fromUndef && index < dataDefSize && !dataDefSlice.Empty()) { + if (mplInfo->IsFlag(kIsLazy)) { + LINKER_LOG(FATAL) << "(DEF), should not resolve lazily, " << fromUndef << ", " << index << ", " << + dataUndefSize << ", " << dataDefSize << " in " << mplInfo->name << maple::endl; + } else { + klass = reinterpret_cast(GetDefTableAddress(*mplInfo, dataDefSlice, + static_cast(index), false)); + LINKER_LOG(ERROR) << "(DEF), klass=" << klass->GetName() << "addr=" << + dataDefSlice[index].Address() << " in " << mplInfo->name << maple::endl; + } + } else { + LINKER_LOG(FATAL) << "(DEF), not resolved, " << fromUndef << ", " << index << ", " << dataUndefSize << ", " << + dataDefSize << " in " << mplInfo->name << maple::endl; + } + } + + return reinterpret_cast(klass); +} + +// Locate the address for method, which defined in the .so of 'handle'. +// Also see int dladdr(void *addr, Dl_info *info) +bool LinkerInvoker::LocateAddress(const void *handle, const void *addr, LinkerLocInfo &info, bool getName) { + LinkerMFileInfo *mplInfo = GetLinkerMFileInfo(kFromHandle, handle); + if (UNLIKELY(mplInfo != nullptr)) { + return LocateAddress(&mplInfo, addr, info, getName); + } + return false; +} + +void LinkerInvoker::ResolveColdClassSymbol(jclass cls) { + MClass *klass = reinterpret_cast(cls); + static std::mutex resolveClassSymbolMutex; + LinkerMFileInfo *mplInfo = SearchAddress(*klass, kTypeClass); + if (mplInfo == nullptr) { + LINKER_DLOG(mpllinker) << "not find so for class=" << klass->GetName() << maple::endl; + return; + } + { + std::lock_guard lock(resolveClassSymbolMutex); + if (!klass->IsColdClass()) { + return; + } + +#ifdef LINKER_DECOUPLE + if (mplInfo->IsFlag(kIsLazy) && mplInfo->GetDecoupleLevel() != 0) { + LINKER_LOG(FATAL) << "failed to resolve class = " << klass->GetName() << " for " << mplInfo->name << + ", lazy=" << klass->IsLazyBinding() << ", cold=" << klass->IsColdClass() << ", decouple=" << + klass->IsDecouple() << maple::endl; + } +#endif + + __MRT_Profile_ClassMeta(*klass); +#ifdef LINKER_DECOUPLE + if (!MRT_CLASS_IS_DECOUPLE(klass)) { + ResolveVTableSymbolByClass(*mplInfo, klass, true); + ResolveVTableSymbolByClass(*mplInfo, klass, false); + } +#else + ResolveVTableSymbolByClass(*mplInfo, klass, true); + ResolveVTableSymbolByClass(*mplInfo, klass, false); +#endif + + ResolveSuperClassSymbolByClass(*mplInfo, klass); + klass->ReSetFlag(0xF7FF); // 0xF7FF is Cold Flag, Clear Cold Flag + } +} + +jclass LinkerInvoker::InvokeClassLoaderLoadClass(jobject clsLoader, const std::string &className) { + MObject *classLoader = reinterpret_cast(clsLoader); + MethodMeta *method = nullptr; + MClass *classLoaderClass = reinterpret_cast(MRT_ReflectGetObjectClass( + reinterpret_cast(classLoader))); + + // To find the loadClass() method (for the classLoader class) in cached map. + method = mplClassLoaderLoadClassMethodMap.Find(classLoaderClass); + // If no cached method found + if (method == nullptr) { + // Find the loadClass() method in classLoader class or its super class. + method = classLoaderClass->GetMethod("loadClass", "(Ljava/lang/String;)Ljava/lang/Class;"); + if (method == nullptr) { + LINKER_LOG(ERROR) << "failed, MethodMeta is null, " << classLoaderClass->GetName() << maple::endl; + return nullptr; + } + // Insert it into cached map + mplClassLoaderLoadClassMethodMap.Append(classLoaderClass, method); + } + + std::string dotName = nameutils::SlashNameToDot(className); + MClass *ret = nullptr; + ScopedHandles sHandles; + ObjHandle classLoaderRef(classLoader); + ObjHandle javaClassName(NewStringUTF(dotName.c_str(), static_cast(dotName.length()))); + // Method shouldn't be nullptr + if (!method->NeedsInterp()) { // Method is in compiled code + uintptr_t funcAddr = method->GetFuncAddress(); + ret = RuntimeStub::FastCallCompiledMethod(funcAddr, classLoaderRef(), javaClassName()); + } else { // Method needs to be interpreted + address_t param = javaClassName.AsRaw(); + ret = reinterpret_cast(MRT_ReflectInvokeMethodAjobject( + classLoaderRef.AsJObj(), reinterpret_cast(method), reinterpret_cast(¶m))); + } + LINKER_VLOG(lazybinding) << "(" << dotName << "), classLoader=" << classLoaderRef() << ", ret=" << ret << + ", has exception=" << MRT_HasPendingException() << maple::endl; + ObjHandle ex(MRT_PendingException()); + if (ex() != 0) { + if (VLOG_IS_ON(lazybinding)) { + std::string exceptionString; + MRT_DumpException(reinterpret_cast(ex()), &exceptionString); + LINKER_VLOG(mpllinker) << "(" << dotName << "), exception=" << exceptionString.c_str() << maple::endl; + LINKER_LOG(ERROR) << "(), Pending Exception: " << reinterpret_cast(ex())->GetClass()->GetName() << + ", with classLoader=" << classLoaderRef() << maple::endl; + } + MRT_ClearPendingException(); // Not throw by MRT_ThrowException_Unw((MObject*)ex); + // not dec ret because its class type and off heap + return nullptr; // Not found + } + return reinterpret_cast(ret); +} + +void *LinkerInvoker::LookUpSymbolAddress(const MUID &muid) { + return Get()->LookUpSymbolAddress(muid); +} + +MUID LinkerInvoker::GetMUID(const std::string symbol, bool forSystem) { + MUID muid; + if (symbol.empty()) { + LINKER_LOG(ERROR) << "failed, symbol is null." << maple::endl; + return muid; // All fields are 0 + } + GenerateMUID(symbol.c_str(), muid); + + if (forSystem) { + muid.SetSystemNameSpace(); + } else { + muid.SetApkNameSpace(); + } + return muid; +} + +bool LinkerInvoker::Add(ObjFile &objFile, jobject classLoader) { + std::lock_guard lock(mLinkLock); + // Filter non-maple .so out. + // + // Maple so includes two type: + // Type 1: maple so + // e.g. /system/lib64/libmaplecore-all.so + // /system/lib64/libmapleframework.so + // Type 2: apk wrapped maple so + // e.g. /system/priv-app/HwSystemServer/HwSystemServer.apk!/maple/arm64/mapleclasses.so + // /system/app/KeyChain/KeyChain.apk!/maple/arm64/mapleclasses.so + const void *handle = objFile.GetHandle(); + std::string libName = objFile.GetName(); + LINKER_VLOG(mpllinker) << "old name=" << libName << ", handle=" << handle << ", cl=" << classLoader << maple::endl; + std::string::size_type index = libName.rfind('/'); + if (index == std::string::npos) { + libName = maple::fs::kSystemLibPath + libName; + } + // Eliminate . and .. + if (libName.find("/./") != std::string::npos || libName.find("/../") != std::string::npos) { + if (libName.length() > PATH_MAX) { + LINKER_LOG(ERROR) << "failed: path exceeds limit! " << libName.length() << ", " << libName << maple::endl; + return false; + } + char canonical[PATH_MAX + 1] = { 0 }; + if (realpath(libName.c_str(), canonical)) { + libName = canonical; + } else { + return false; + } + } + + MFileInfoSource searchKey[] = { kFromHandle, kFromName }; + const void *data[] = { handle, libName.c_str() }; + for (uint32_t i = 0; i < sizeof(searchKey) / sizeof(MFileInfoSource); ++i) { + auto resInfo = GetLinkerMFileInfo(searchKey[i], data[i]); + if (resInfo != nullptr) { + objFile.SetMplInfo(*resInfo); + LINKER_VLOG(mpllinker) << "failed:" << libName << ", handle=" << handle << ", cl=" << classLoader << maple::endl; + return false; + } + } + + LINKER_VLOG(mpllinker) << "new name=" << libName << ", handle=" << handle << ", cl=" << classLoader << maple::endl; + CreateMplInfo(objFile, classLoader); + return true; +} + +void LinkerInvoker::CreateMplInfo(ObjFile &objFile, jobject classLoader) { + LinkerMFileInfo *mplInfo; +#ifdef LINKER_DECOUPLE + mplInfo = new (std::nothrow) DecoupleMFileInfo(); +#else + mplInfo = new (std::nothrow) LinkerMFileInfo(); +#endif + if (mplInfo == nullptr) { + LINKER_LOG(FATAL) << "new mplInfo failed" << maple::endl; + } + mplInfo->name = objFile.GetName(); + mplInfo->handle = const_cast(objFile.GetHandle()); + mplInfo->classLoader = classLoader; + mplInfo->SetFlag(kIsMethodDefResolved, false); + mplInfo->SetFlag(kIsDataDefResolved, false); + mplInfo->SetFlag(kIsVTabResolved, false); + mplInfo->SetFlag(kIsITabResolved, false); + int32_t pos = -1; // pos -1 means isn't hotfix + if (objFile.GetUniqueID() == 0) { // hotfix + pos = 0; + } + if (Get()->InitLinkerMFileInfo(*mplInfo, pos)) { + objFile.SetLazyBinding(); + } + objFile.SetMplInfo(*mplInfo); +} + +// Resolve all undefined symbols for all the maple .so. +bool LinkerInvoker::Resolve() { + std::lock_guard lock(mLinkLock); + bool ret = true; + if (!Get()->HandleSymbol()) { + ret = false; + } +#ifdef LINKER_DECOUPLE + if (!Get()->HandleDecouple()) { + ret = false; + } +#endif + return ret; +} + +// Resolve all undefined symbols for the single maple .so of 'handle'. +bool LinkerInvoker::Resolve(LinkerMFileInfo &mplInfo, bool decouple) { + std::lock_guard lock(mLinkLock); + bool ret = true; + if (!Get()->HandleSymbol(mplInfo)) { + ret = false; + } +#ifdef LINKER_DECOUPLE + if (decouple && !Get()->HandleDecouple(&mplInfo)) { + ret = false; + } +#else + (void)decouple; +#endif + + LINKER_VLOG(mpllinker) << "pre clinit " << mplInfo.name << maple::endl; + // invoke __MRT_PreinitModuleClasses() to preinit specified classes for current .so file + (void)GetSymbolAddr(mplInfo.handle, "MRT_PreinitModuleClasses", true); + return ret; +} + +#ifdef LINKER_DECOUPLE +bool LinkerInvoker::HandleDecouple(std::vector &mplList) { + if (mplList.size() > 0) { + std::lock_guard lock(mLinkLock); + return Get()->HandleDecouple(); + } + return true; +} +#endif + +// Notify all resolving jobs finished. +void LinkerInvoker::FinishLink(jobject classLoader) { + std::lock_guard lock(mLinkLock); + Get()->FreeAllCacheTables(reinterpret_cast(classLoader)); +} + +// Link all the maple .so library added by Add() before. +bool LinkerInvoker::Link() { + return Resolve(); +} + +// Just resolve the single maple file. MUST invoked Add() before. +bool LinkerInvoker::Link(LinkerMFileInfo &mplInfo, bool decouple) { + return Resolve(mplInfo, decouple); +} + +void LinkerInvoker::SetLoadState(LoadStateType state) { + maplert::linkerutils::SetLoadState(state); +#ifdef LINKER_RT_CACHE + Get()->Reset(); +#endif // LINKER_RT_CACHE +} + +void LinkerInvoker::SetLinkerMFileInfoClassLoader(const ObjFile &objFile, jobject classLoader) { + LinkerMFileInfo *mplInfo = nullptr; + mplInfo = GetLinkerMFileInfo(kFromHandle, objFile.GetHandle()); + if (mplInfo == nullptr) { + LINKER_LOG(ERROR) << "handle exists, failed to change classLoader, name=" << objFile.GetName() << ", handle=" << + objFile.GetHandle() << ", classLoader=" << classLoader << maple::endl; + return; + } + LinkerMFileInfo *mplInfo2 = GetLinkerMFileInfoByName(objFile.GetName()); + if (mplInfo2 == nullptr || mplInfo2 != mplInfo) { + LINKER_LOG(ERROR) << "name not exists or not equal with handle, " << "failed to change classLoader, name=" << + objFile.GetName() << ", handle=" << objFile.GetHandle() << ", classLoader=" << classLoader << maple::endl; + return; + } + mplInfo->classLoader = classLoader; +} + +void LinkerInvoker::SetClassLoaderParent(jobject classLoader, jobject newParent) { + std::lock_guard lock(mLinkLock); + Get()->SetClassLoaderParent(reinterpret_cast(classLoader), reinterpret_cast(newParent)); +} + +bool LinkerInvoker::InsertClassesFront(ObjFile &objFile, jobject classLoader) { + if (!Add(objFile, classLoader)) { + LINKER_LOG(ERROR) << "InsertClassesFront failed" << maple::endl; + return false; + } + std::lock_guard lock(mLinkLock); + Get()->InsertClassesFront(reinterpret_cast(classLoader), *(objFile.GetMplInfo()), + objFile.GetName()); + return true; +} +bool LinkerInvoker::IsFrontPatchMode(const std::string &path) { + std::lock_guard lock(mLinkLock); + return Get()->IsFrontPatchMode(path); +} +void LinkerInvoker::SetPatchPath(std::string &path, int32_t mode) { + std::lock_guard lock(mLinkLock); + Get()->SetPatchPath(path, mode); +} + +void LinkerInvoker::InitArrayCache(uintptr_t pc, uintptr_t addr) { + LinkerLocInfo locInfo; + LinkerMFileInfo *mplInfo = nullptr; + DataRefOffset *srcAddr = reinterpret_cast(addr); + bool isJava = GetJavaTextInfo(reinterpret_cast(pc), &mplInfo, locInfo, false); + if (!isJava) { + MRT_ThrowNullPointerExceptionUnw(); + return; + } + DataRefOffset *tabStart = mplInfo->GetTblBegin(kArrayClassCacheIndex); + size_t tableSize = mplInfo->GetTblSize(kArrayClassCacheIndex); + if (tabStart > srcAddr || + ((reinterpret_cast(tabStart) + tableSize) <= reinterpret_cast(srcAddr))) { + MRT_ThrowNullPointerExceptionUnw(); + return; + } + + size_t index = srcAddr - tabStart; + DataRefOffset *classNameTabStart = mplInfo->GetTblBegin(kArrayClassCacheNameIndex); + DataRefOffset *classNameItem = classNameTabStart + index; + char *className = classNameItem->GetDataRef(); + uint64_t *md = JavaFrame::GetMethodMetadata(reinterpret_cast(locInfo.addr)); + MClass *callerCls = reinterpret_cast(JavaFrame::GetDeclaringClass(md)); + MClass *classArrayCache = MClass::GetClassFromDescriptor(callerCls, className); + if (classArrayCache == nullptr) { + MRT_CheckThrowPendingExceptionUnw(); + return; + } + srcAddr->SetRawValue(classArrayCache->AsUintptr()); +} +#ifdef __cplusplus +extern "C" { +#endif +__attribute__((aligned(4096), visibility("default"))) +uint8_t __BindingProtectRegion__[kBindingStateMax] = { 0 }; + +static int64_t sigvLazyBindingCountCinfUndef = 0; +static int64_t sigvLazyBindingCountCinfDef = 0; +static int64_t sigvLazyBindingCountDataUndef = 0; +static int64_t sigvLazyBindingCountDataDef = 0; +static int64_t sigvLazyBindingCountMethodUndef = 0; +static int64_t sigvLazyBindingCountMethodDef = 0; + +static inline int64_t IncSigvLazyBindingCount(int64_t &count) { + return __atomic_add_fetch(&count, 1, __ATOMIC_ACQ_REL); +} + +void InitProtectedRegion() { + if (mprotect(__BindingProtectRegion__, 4096, PROT_NONE)) { // 4096 is 2^12, region page size is 4k. + LINKER_LOG(ERROR) << "protect __BindingProtectRegion__ failed" << maple::endl; + } + LINKER_VLOG(lazybinding) << "__BindingProtectRegion__=" << static_cast(__BindingProtectRegion__) << + maple::endl; +} + +bool MRT_RequestLazyBindingForSignal(const SignalInfo &data) { + void *pc = data.pc; + void *offset = data.offset; + delete &data; + return MRT_RequestLazyBinding(offset, pc, true); +} + +bool MRT_RequestLazyBindingForInitiative(const void *data) { + LinkerMFileInfo *mplInfo = LinkerAPI::As().GetLinkerMFileInfoByAddress(data, true); + if (mplInfo == nullptr) { + LINKER_VLOG(lazybinding) << "data=" << data << ", not found LinkerMFileInfo." << maple::endl; + return false; + } + return MRT_RequestLazyBinding(data, 0, false); +} + +bool MRT_RequestLazyBinding(const void *offset, const void *pc, bool fromSignal) { + BindingState state = + LinkerAPI::As().GetAddrBindingState(*(reinterpret_cast(offset))); + LINKER_VLOG(lazybinding) << "*offset=" << + reinterpret_cast(*(reinterpret_cast(offset))) << + ", __BindingProtectRegion__=" << reinterpret_cast(__BindingProtectRegion__) << maple::endl; + bool res = false; + LazyBinding *lazyBinding = LinkerAPI::As().Get(); + switch (state) { + case kBindingStateCinfUndef: + LINKER_VLOG(lazybinding) << "cinf undef SEGV count=" << + IncSigvLazyBindingCount(sigvLazyBindingCountCinfUndef) << maple::endl; + res = lazyBinding->HandleSymbol(offset, pc, state, fromSignal); + break; + case kBindingStateDataUndef: + LINKER_VLOG(lazybinding) << "data undef SEGV count=" << + IncSigvLazyBindingCount(sigvLazyBindingCountDataUndef) << maple::endl; + res = lazyBinding->HandleSymbol(offset, pc, state, fromSignal); + break; + case kBindingStateCinfDef: + LINKER_VLOG(lazybinding) << "cinf def SEGV count=" << + IncSigvLazyBindingCount(sigvLazyBindingCountCinfDef) << maple::endl; + res = lazyBinding->HandleSymbol(offset, pc, state, fromSignal); + break; + case kBindingStateDataDef: + LINKER_VLOG(lazybinding) << "data def SEGV count=" << + IncSigvLazyBindingCount(sigvLazyBindingCountDataDef) << maple::endl; + res = lazyBinding->HandleSymbol(offset, pc, state, fromSignal); + break; + case kBindingStateMethodUndef: + LINKER_VLOG(lazybinding) << "method undef SEGV count=" << + IncSigvLazyBindingCount(sigvLazyBindingCountMethodUndef) << maple::endl; + res = lazyBinding->HandleSymbol(offset, pc, state, fromSignal); + break; + case kBindingStateMethodDef: + LINKER_VLOG(lazybinding) << "method def SEGV count=" << + IncSigvLazyBindingCount(sigvLazyBindingCountMethodDef) << maple::endl; + res = lazyBinding->HandleSymbol(offset, pc, state, fromSignal); + break; + default: + // Handle exception here... + if (VLOG_IS_ON(lazybinding)) { + LINKER_LOG(ERROR) << "wrong state " << static_cast(state) << "! offset=" << offset << ", pc=" << pc << + ", __BindingProtectRegion__=" << reinterpret_cast(__BindingProtectRegion__) << maple::endl; + LinkerAPI::As().DumpStackInfoInLog(); + } + break; + } + return res; +} + +int32_t MCC_FixOffsetTableVtable(uint32_t offsetVal, char *offsetEntry) { +#ifdef LINKER_DECOUPLE + Decouple *decouple = LinkerAPI::As().Get(); + // highest bit is fix flag + const uint32_t kMplOffsetFixFlag = 0x80000000; + (void)offsetVal; // placeholder para + int32_t offsetValue = *(reinterpret_cast(offsetEntry)); + if (static_cast(offsetValue) & kMplOffsetFixFlag) { + uint32_t offsetIndex = (static_cast(offsetValue) & (~kMplOffsetFixFlag)); + LinkerOffsetValItem tmpItem; + offsetEntry -= reinterpret_cast(&tmpItem.offset) - reinterpret_cast(&tmpItem); + LinkerOffsetValItem *offsetTable = reinterpret_cast(offsetEntry) - offsetIndex; + LinkerOffsetKeyTableInfo *keyTableInfo = reinterpret_cast(offsetTable) - 1; + if (offsetIndex >= keyTableInfo->vtableOffsetTableSize) { + LINKER_LOG(FATAL) << "offsetIndex = " << offsetIndex << maple::endl; + } + return decouple->FixOffsetTableLazily(reinterpret_cast(offsetTable), offsetIndex); + } else { + return offsetValue; + } +#else + (void)offsetVal; + (void)offsetEntry; + return -1; +#endif +} + +int32_t MCC_FixOffsetTableField(uint32_t offsetVal, char *offsetEntry) { +#ifdef LINKER_DECOUPLE + Decouple *decouple = LinkerAPI::As().Get(); + // highest bit is fix flag + const uint32_t kMplOffsetFixFlag = 0x80000000; + (void)offsetVal; // placeholder para + uint32_t offsetValue = *(reinterpret_cast(offsetEntry)); + if (offsetValue & kMplOffsetFixFlag) { + uint32_t offsetIndex = (offsetValue & (~kMplOffsetFixFlag)); + LinkerOffsetValItem tmpItem; + offsetEntry -= reinterpret_cast(&tmpItem.offset) - reinterpret_cast(&tmpItem); + LinkerOffsetValItem *offsetTable = reinterpret_cast(offsetEntry) - offsetIndex; + LinkerOffsetKeyTableInfo *keyTableInfo = reinterpret_cast(offsetTable) - 1; + if (offsetIndex < keyTableInfo->vtableOffsetTableSize) { + LINKER_LOG(FATAL) << "offsetIndex = " << offsetIndex << maple::endl; + } + return decouple->FixOffsetTableLazily(reinterpret_cast(offsetTable), offsetIndex); + } else { + return static_cast(offsetValue); + } +#else + (void)offsetVal; + (void)offsetEntry; + return -1; +#endif +} + +void MRT_FixOffsetTableLazily(LinkerOffsetValItemLazyLoad &offsetEntry) { +#ifdef LINKER_DECOUPLE + Decouple *decouple = LinkerAPI::As().Get(); + // highest bit is fix flag + const uint32_t kMplOffsetFixFlag = 0x80000000; + uint32_t offsetValue = static_cast(offsetEntry.offset); + if (offsetValue & kMplOffsetFixFlag) { + uint32_t offsetIndex = (offsetValue & (~kMplOffsetFixFlag)); + (void)decouple->FixOffsetTableLazily(reinterpret_cast(&offsetEntry - offsetIndex), offsetIndex); + } +#else + (void)offsetEntry; +#endif +} + +void MRT_FixStaticAddrTable(LinkerStaticAddrItem &addrTableItem) { +#ifdef LINKER_DECOUPLE + Decouple *decouple = LinkerAPI::As().Get(); + MplStaticDecouple &staticResolver = decouple->GetStaticResolver(); + uint32_t signalIndex = static_cast(addrTableItem.index); + MplStaticAddrTabHead *staticAddrTabHead = reinterpret_cast(&addrTableItem - signalIndex) - 1; + LinkerMFileInfo *mplInfo = staticResolver.GetLinkerMFileInfoFromHead(*staticAddrTabHead); + LinkerStaticAddrItem *addrTableItems = reinterpret_cast(&addrTableItem - signalIndex); + LinkerStaticDecoupleClass *keyTableItems = mplInfo->GetTblBegin(kStaticDecoupleKey); + size_t keyTableSize = mplInfo->GetTblSize(kStaticDecoupleKey); + if (UNLIKELY(addrTableItems == nullptr || keyTableItems == nullptr || keyTableSize == 0)) { + LINKER_LOG(FATAL) << "Fail to get static address table from " << mplInfo->name << maple::endl; + return; + } + bool resolved = staticResolver.FixClassClinit(*keyTableItems, *addrTableItems, signalIndex, false); + LINKER_VLOG(staticdcp) << "exit. class initialized=" << resolved << maple::endl; +#else + (void)addrTableItem; +#endif +} + +bool MRT_IsLazyBindingState(const uint8_t *address) { + if (address >= __BindingProtectRegion__ && address < &__BindingProtectRegion__[kBindingStateMax]) { + return true; + } + return false; +} + +void MRT_FixStaticAddrTableLazily(LinkerStaticAddrItem &addrTableItem) { +#ifdef LINKER_DECOUPLE + Decouple *decouple = LinkerAPI::As().Get(); + MplStaticDecouple &staticResolver = decouple->GetStaticResolver(); + LINKER_VLOG(staticdcp) << "entered, addrTableItem=" << std::hex << &addrTableItem << maple::endl; + uint32_t signalIndex = static_cast(addrTableItem.index); + MplStaticAddrTabHead *staticAddrTabHead = reinterpret_cast(&addrTableItem - signalIndex) - 1; + LinkerMFileInfo *mplInfo = staticResolver.GetLinkerMFileInfoFromHead(*staticAddrTabHead); + LinkerStaticAddrItem *addrTableItems = reinterpret_cast(&addrTableItem - signalIndex); + LinkerStaticDecoupleClass *keyTableItems = mplInfo->GetTblBegin(kStaticDecoupleKey); + size_t keyTableSize = mplInfo->GetTblSize(kStaticDecoupleKey); + if (UNLIKELY(addrTableItems == nullptr || keyTableItems == nullptr || keyTableSize == 0)) { + LINKER_LOG(FATAL) << "Fail to get static address table from " << mplInfo->name << " keyTableSize=" << + keyTableSize << " addrTableItems=" << std::hex << addrTableItems << " keyTableItems=" << keyTableItems << + maple::endl; + return; + } + + int32_t index = staticResolver.GetClassInfoIndex(*addrTableItems, signalIndex); + ClassMetadata *clsCallee = staticResolver.GetClassMetadata(mplInfo, keyTableItems[index].callee); + if (UNLIKELY(clsCallee == nullptr)) { + LINKER_LOG(FATAL) << "exit: null pointer to current class" << maple::endl; + return; + } + bool isResolved = true; + if (static_cast(signalIndex) <= (index + static_cast(keyTableItems[index].fieldsNum))) { + isResolved = staticResolver.SetStaticFieldAddr(mplInfo, *clsCallee, + keyTableItems[signalIndex], addrTableItems[signalIndex]); + } else { + isResolved = staticResolver.SetStaticMethodAddr(mplInfo, *clsCallee, + keyTableItems[signalIndex], addrTableItems[signalIndex]); + } + bool initialized = staticResolver.FixClassClinit(*keyTableItems, *addrTableItems, signalIndex, true); + LINKER_VLOG(staticdcp) << "exit. isResolved = " << isResolved << " clinit = " << initialized << maple::endl; +#else + (void)addrTableItem; +#endif +} + +// init array class cache reference by compiler code +bool MRT_RequestInitArrayCache(SignalInfo *info) { + uintptr_t pc = reinterpret_cast(info->pc); + // array cache addr + uintptr_t addr = reinterpret_cast(info->offset); + delete info; + LinkerAPI::As().InitArrayCache(pc, addr); + return true; +} +#ifdef __cplusplus +} +#endif +} // namespace maple + diff --git a/src/mrt/compiler-rt/src/linker/linker_cache.cpp b/src/mrt/compiler-rt/src/linker/linker_cache.cpp new file mode 100644 index 0000000000..8056ea256f --- /dev/null +++ b/src/mrt/compiler-rt/src/linker/linker_cache.cpp @@ -0,0 +1,1289 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "linker/linker_cache.h" + +#include +#include +#include +#include +#include +#include +#include + +#include "mrt_object.h" +#include "linker/linker_model.h" +#include "linker/linker_inline.h" +#include "mpl_cache_first_file.h" +namespace maplert { +#ifdef LINKER_RT_CACHE +using namespace linkerutils; +const uint64_t kCacheMagicNumber = 0xcac7e5272799710bull; +namespace { + constexpr char kLinkerLazyInvalidSoName[] = "LAZY"; + constexpr char kLinkerInvalidName[] = "X"; +} +static constexpr struct LinkerCache::CacheInfo methodUndef = { + LinkerCache::kMethodUndefIndex, + &LinkerInvoker::LookUpMethodSymbolAddress, + kIsMethodUndefHasNotResolved, + kIsMethodUndefCacheValid, + true, + true +}; +static constexpr struct LinkerCache::CacheInfo dataUndef = { + LinkerCache::kDataUndefIndex, + &LinkerInvoker::LookUpDataSymbolAddress, + kIsDataUndefHasNotResolved, + kIsDataUndefCacheValid, + false, + true +}; +static constexpr struct LinkerCache::CacheInfo methodDef = { + LinkerCache::kMethodDefIndex, + &LinkerInvoker::LookUpMethodSymbolAddress, + kIsMethodDefHasNotResolved, + kIsMethodCacheValid, + true, + false +}; +static constexpr struct LinkerCache::CacheInfo dataDef = { + LinkerCache::kDataDefIndex, + &LinkerInvoker::LookUpDataSymbolAddress, + kIsDataDefHasNotResolved, + kIsDataCacheValid, + false, + false +}; + +const MUID LinkerCache::kInvalidHash = {{{ 0 }}}; +FeatureName LinkerCache::featureName = kFLinkerCache; +void LinkerCache::SetPath(const std::string &path) { + LINKER_VLOG(mpllinker) << pCachePath << " --> " << path << maple::endl; + pCachePath = path; +} + +LinkerMFileInfo *LinkerCache::FindLinkerMFileInfo(uint16_t soid, const MFileCacheInf &inf, MplInfoStore &store) { + if (soid >= store.size()) { + store.resize(soid + 1); + } + auto res = store[soid]; + if (res == nullptr) { + res = pInvoker.GetLinkerMFileInfoByName(inf.GetName(soid)); + store[soid] = res; + } + return res; +} + +bool LinkerCache::GetPath(LinkerMFileInfo &mplInfo, std::string &path, + LinkerCacheType cacheType, LoadStateType loadState) { + if (GetFastPath(mplInfo, cacheType, path)) { + return true; + } + if (loadState == kLoadStateBoot || loadState == kLoadStateSS) { + path = pCachePath; // Use preset value from SetCachePath(). + if (path.empty()) { + if (loadState == kLoadStateBoot) { + path = kLinkerRootCachePath; + } else { // If loadState is equal to kLoadStateSS + path = kLinkerSystemCachePath; + } + } + } else if (loadState == kLoadStateApk) { + path = GetAppInfo(); + } else { + LINKER_LOG(ERROR) << "(" << static_cast(cacheType) << "), failed to prepare folder, loadState=" << + loadState << maple::endl; + return false; + } + path.append("/").append(kLinkerCacheFold).append("/"); + if (!PrepareFolder(path)) { + LINKER_LOG(ERROR) << "(" << static_cast(cacheType) << "), failed to prepare folder:" << path << + maple::endl; + return false; + } + // Use the full path in case of the same so name. + // e.g. + // /system/lib64/libmaplecore-all.so + // ==> _system_lib64_libmaplecore-all.so + // + // /system/app/KeyChain/KeyChain.apk!/maple/arm64/mapleclasses.so + // ==> _system_app_KeyChain_KeyChain.apk!_maple_arm64_mapleclasses.so + std::string name = mplInfo.name; + std::replace(name.begin(), name.end(), '/', '_'); + path.append(name); + FinishPath(mplInfo, cacheType, path); + LINKER_VLOG(mpllinker) << "(" << static_cast(cacheType) << "), loadState=" << loadState << ", " << path << + maple::endl; + return true; +} + +bool LinkerCache::GetFastPath(LinkerMFileInfo &mplInfo, LinkerCacheType cacheType, std::string &path) { + if (cacheType == LinkerCacheType::kLinkerCacheLazy && mplInfo.rep.lazyCachePath.length() != 0) { + path = mplInfo.rep.lazyCachePath; + return true; + } else if (cacheType == LinkerCacheType::kLinkerCacheMethodUndef && mplInfo.rep.methodUndefCachePath.length() != 0) { + path = mplInfo.rep.methodUndefCachePath; + return true; + } else if (cacheType == LinkerCacheType::kLinkerCacheMethodDef && mplInfo.rep.methodDefCachePath.length() != 0) { + path = mplInfo.rep.methodDefCachePath; + return true; + } else if (cacheType == LinkerCacheType::kLinkerCacheDataUndef && mplInfo.rep.dataUndefCachePath.length() != 0) { + path = mplInfo.rep.dataUndefCachePath; + return true; + } else if (cacheType == LinkerCacheType::kLinkerCacheDataDef && mplInfo.rep.dataDefCachePath.length() != 0) { + path = mplInfo.rep.dataDefCachePath; + return true; + } + return false; +} + +void LinkerCache::FinishPath(LinkerMFileInfo &mplInfo, LinkerCacheType cacheType, std::string &path) { + if (cacheType == LinkerCacheType::kLinkerCacheLazy) { + path += GetLinkerCacheTypeStr(cacheType); + mplInfo.rep.lazyCachePath = path; + } else if (cacheType == LinkerCacheType::kLinkerCacheMethodUndef) { + path += GetLinkerCacheTypeStr(cacheType); + mplInfo.rep.methodUndefCachePath = path; + } else if (cacheType == LinkerCacheType::kLinkerCacheMethodDef) { + path += GetLinkerCacheTypeStr(cacheType); + mplInfo.rep.methodDefCachePath = path; + } else if (cacheType == LinkerCacheType::kLinkerCacheDataUndef) { + path += GetLinkerCacheTypeStr(cacheType); + mplInfo.rep.dataUndefCachePath = path; + } else if (cacheType == LinkerCacheType::kLinkerCacheDataDef) { + path += GetLinkerCacheTypeStr(cacheType); + mplInfo.rep.dataDefCachePath = path; + } else { + path += GetLinkerCacheTypeStr(cacheType); + } +} + +void LinkerCache::Reset() { + pCachePath.clear(); +} + +inline LinkerCacheType LinkerCache::GetLinkerCacheType(CacheIndex cacheIndex) { + switch (cacheIndex) { + case kMethodUndefIndex: + return LinkerCacheType::kLinkerCacheMethodUndef; + case kMethodDefIndex: + return LinkerCacheType::kLinkerCacheMethodDef; + case kDataUndefIndex: + return LinkerCacheType::kLinkerCacheDataUndef; + case kDataDefIndex: + return LinkerCacheType::kLinkerCacheDataDef; + }; +} + +// Load the table from FS cache if exist. +// Return true if the cache exists and is valid, or return false. +bool LinkerCache::LoadTable(LinkerMFileInfo &mplInfo, CacheIndex cacheIndex) { + if (LINKER_INSTALL_STATE) { + return false; + } + LinkerCacheType cacheType = GetLinkerCacheType(cacheIndex); + std::string path; + if (!GetPath(mplInfo, path, cacheType, GetLoadState())) { + LINKER_DLOG(mpllinker) << "(" << cacheIndex << "), failed to prepare folder for " << + path << ", " << errno << maple::endl; + return false; + } + if (LoadTable(mplInfo, path, cacheIndex)) { + LINKER_VLOG(mpllinker) << "(" << cacheIndex << "), runtime load cache:" << path << " OK" << maple::endl; + return true; + } + LINKER_DLOG(mpllinker) << "load runtime path:" << path << " fail" << maple::endl; + // try to load cache generated in HOTA/Install process + if (LoadInstallCache(mplInfo, cacheType)) { + LINKER_VLOG(mpllinker) << "(" << cacheIndex << "), install load cache:" << path << " OK" << maple::endl; + return true; + } + return false; +} + +bool LinkerCache::LoadTable(LinkerMFileInfo &mplInfo, const std::string &path, CacheIndex cacheIndex) { + bool res = true; + BufferSlice buf; + size_t cacheSize = 0; + void *content = LoadCache(path, cacheSize); + if (content == nullptr) { + res = false; + goto END; + } + buf = BufferSlice(reinterpret_cast(content), cacheSize); + // update before clhash and clsocnt. + UpdateProperty(mplInfo, cacheIndex); + if (!LoadFooter(buf, cacheIndex)) { + res = false; + goto END; + } + if (!LoadMeta(mplInfo, buf, cacheIndex)) { + res = false; + goto END; + } + LoadData(mplInfo, buf, cacheIndex); + if (buf.Size() != 0) { + res = false; + LINKER_LOG(ERROR) << "invalid format, still have cache dat to resolve:" << mplInfo.name << maple::endl; + } +END: + if (content != MAP_FAILED) { + munmap(content, cacheSize); + } + if (!res) { + LINKER_VLOG(mpllinker) << "(" << cacheIndex << "), to remove " << path << maple::endl; + RemoveTable(mplInfo, cacheIndex); // In case of more exceptions, from here to re-save the cache. + } + return res; +} + +void *LinkerCache::LoadCache(const std::string &path, size_t &cacheSize) { + struct stat sb; + int fd = open(path.c_str(), O_RDONLY); + if (fd < 0) { + if (errno != ENOENT && errno != EACCES) { + LINKER_LOG(ERROR) << "failed to open " << path << ", " << errno << maple::endl; + } else { + if (errno == EACCES) { + LINKER_LOG(ERROR) << "EACCES, failed to open " << path << ", " << errno << maple::endl; + } + } + return nullptr; + } + // shared lock for read + if (flock(fd, LOCK_SH) < 0) { + LINKER_LOG(ERROR) << "failed to flock(" << errno << ") " << path << maple::endl; + close(fd); + return nullptr; + } + if (fstat(fd, &sb) < 0) { + LINKER_LOG(ERROR) << "to obtain file size fail(" << errno << ") "<< path << maple::endl; + close(fd); + return nullptr; + } + cacheSize = sb.st_size; + // {head:|MinVersion(int32)|MaxVersion(int32)|ValidityCode(MUID)|clhash(MUID)|clSoCount(uint16)|}, + // {data:|mapSize(uint32)|}, {footer:|contentLen(uint32)|cacheType(uint8)|CacheValidity(MUID)|magicNumber(uint64)|} + const int minSize = sizeof(int32_t) * 2 + sizeof(MUID) * 2 + sizeof(uint16_t) + sizeof(uint32_t) + + sizeof(uint32_t) + sizeof(uint8_t) + sizeof(MUID) + sizeof(uint64_t); + if (cacheSize < minSize) { + LINKER_LOG(INFO) << "failed, read no data for " << path << ", " << errno << maple::endl; + close(fd); + return nullptr; + } + void *content = mmap(nullptr, cacheSize, PROT_READ, MAP_FILE | MAP_PRIVATE, fd, 0); + if (content == MAP_FAILED) { + LINKER_LOG(ERROR) << "failed to mmap " << cacheSize << " (" << errno << ")for " << path << maple::endl; + close(fd); + return nullptr; + } + if (madvise(content, cacheSize, MADV_WILLNEED | MADV_SEQUENTIAL) < 0) { + LINKER_LOG(ERROR) << "madvise failed(" << errno << ") " << path << maple::endl; + } + close(fd); + return content; +} + +bool LinkerCache::LoadFooter(BufferSlice &buf, CacheIndex cacheIndex) { + size_t size = buf.Size(); + // 1. checking magic number. + size -= sizeof(kCacheMagicNumber); // 8 bytes + uint64_t magicNum = *(reinterpret_cast(&buf[size])); + if (magicNum != kCacheMagicNumber) { + LINKER_LOG(ERROR) << "magic number checking failed," << " wrong number:" << std::hex << magicNum << maple::endl; + return false; + } + // 2. checking the validity. + size -= sizeof(MUID); + MUID lastCacheValidity = *(reinterpret_cast(&buf[size])); + // Generate the digest for validity, excluding the length of content. + MUID rtCacheValidity; + GenerateMUID(buf.Data(), size, rtCacheValidity); + if (lastCacheValidity != rtCacheValidity) { + LINKER_LOG(ERROR) << "cache validity checking failed," << rtCacheValidity.ToStr() << + " vs. " << lastCacheValidity.ToStr() << maple::endl; + return -1; + } + // 3. Read the cache type + size -= sizeof(uint8_t); // 1 bytes + uint8_t type = *(reinterpret_cast(&buf[size])); + uint8_t wantedType = static_cast(cacheIndex); + if (type != wantedType) { + LINKER_LOG(ERROR) << "cache index type failed," << wantedType << " vs. " << type << maple::endl; + return false; + } + // 4. Read the length of content. + size -= sizeof(uint32_t); // 4 bytes + uint32_t contentSize = *(reinterpret_cast(&buf[size])); + if (contentSize != size) { + LINKER_LOG(ERROR) << "cache length checking failed," << contentSize << + " vs. " << size << maple::endl; + return false; + } + buf = Slice(buf.Data(), size); + return true; +} + +bool LinkerCache::LoadMeta(LinkerMFileInfo &mplInfo, BufferSlice &buf, CacheIndex cacheIndex) { + // 1. Read maximum version. + int32_t maxVersion = *(reinterpret_cast(buf.Data())); + if (maxVersion != 0 && maxVersion != GetMaxVersion()) { + LINKER_LOG(ERROR) << "failed to check max version," << maxVersion << + " vs. " << GetMaxVersion() << maple::endl; + return false; + } + buf += sizeof(maxVersion); // 4 bytes + // 2. Read minimum version. + int32_t minVersion = *(reinterpret_cast(buf.Data())); + if (minVersion != 0 && minVersion != GetMinVersion()) { + LINKER_LOG(ERROR) << "failed to check min version," << minVersion << " vs. " << + GetMinVersion() << maple::endl; + return false; + } + buf += sizeof(minVersion); // 4 bytes + // 3. Read the hash from cache file, comparing with .so. + MUID lastSoValidity = *(reinterpret_cast(buf.Data())); + MUID rtSoValidity = pInvoker.GetValidityCode(mplInfo); + if (lastSoValidity != rtSoValidity) { + LINKER_LOG(ERROR) << "so validity checking failed," << rtSoValidity.ToStr() << " vs. " << + lastSoValidity.ToStr() << " in " << mplInfo.name << maple::endl; + return false; + } + buf += sizeof(lastSoValidity); // 8 bytes + // 4. Read the clhash from cache file + MUID lastClValidity = *(reinterpret_cast(buf.Data())); + MUID rtClValidity = pClHash[cacheIndex]; + if (lastClValidity != rtClValidity) { + LINKER_LOG(ERROR) << "classloader validity checking failed, " << rtClValidity.ToStr() << + " vs. " << lastClValidity.ToStr() << " in " << mplInfo.name << maple::endl; + return false; + } + buf += sizeof(lastClValidity); // 8 bytes + // 5. Read the classloader .so count. + uint16_t lastClSoCnt = *(reinterpret_cast(buf.Data())); + uint16_t rtClSoCnt = pClSoCnt[cacheIndex]; + if (lastClSoCnt != rtClSoCnt) { + LINKER_LOG(ERROR) << "classloader so count checking failed, " << rtClSoCnt << " vs. " + << lastClSoCnt << " in " << mplInfo.name << maple::endl; + return false; + } + buf += sizeof(lastClSoCnt); // 2 bytes + return true; +} + +bool LinkerCache::LoadData(LinkerMFileInfo &mplInfo, BufferSlice &buf, CacheIndex cacheIndex) { + // 1. Read the map size + uint32_t mapSize = *(reinterpret_cast(buf.Data())); + buf += sizeof(mapSize); // 4 bytes + if (mapSize == 0) { + return true; + } + // 2. Read So Name List Info. + LoadNameList(buf, cacheIndex); + // 3. Read Bucket for resolving table. + uint32_t mapBucketCnt = *(reinterpret_cast(buf.Data())); + buf += sizeof(mapBucketCnt); // 4 bytes + auto &cacheMap = *GetCacheMap(mplInfo, cacheIndex); + cacheMap.rehash(mapBucketCnt); + for (uint32_t i = 0; i < mapSize; ++i) { + // 4. Read the undef index. + uint32_t undefIndex = *(reinterpret_cast(buf.Data())); + buf += sizeof(undefIndex); + // 5. Read the addr index. + uint32_t addrIndex = *(reinterpret_cast(buf.Data())); + buf += sizeof(addrIndex); + // 6. Read the soid + uint16_t soid = *(reinterpret_cast(buf.Data())); + buf += sizeof(soid); + LinkerCacheTableItem pItem(addrIndex, soid); + cacheMap.insert(std::make_pair(undefIndex, pItem)); + } + return true; +} + +bool LinkerCache::LoadNameList(BufferSlice &buf, CacheIndex cacheIndex) { + auto &inf = pMplCacheInf[cacheIndex]; + // 6. listsize + uint32_t listSize = *reinterpret_cast(buf.Data()); + buf += sizeof(listSize); + for (uint32_t i = 0; i < listSize; ++i) { + // 7. name size + uint32_t nameSize = *reinterpret_cast(buf.Data()); + buf += sizeof(nameSize); + // 8. name content + const char *name = buf.Data(); + buf += nameSize; + // 9. so hash. + const MUID *muid = reinterpret_cast(buf.Data()); + buf += sizeof(*muid); + inf.Append(std::string(name, nameSize), *muid); + } + return true; +} + +MplCacheMapT *LinkerCache::GetCacheMap(LinkerMFileInfo &mplInfo, CacheIndex cacheIndex) { + switch (cacheIndex) { + case kMethodUndefIndex: + return &(mplInfo.rep.methodUndefCacheMap); + case kMethodDefIndex: + return &(mplInfo.rep.methodDefCacheMap); + case kDataUndefIndex: + return &(mplInfo.rep.dataUndefCacheMap); + case kDataDefIndex: + return &(mplInfo.rep.dataDefCacheMap); + }; +} + +// Save the table into FS cache. +bool LinkerCache::SaveTable(LinkerMFileInfo &mplInfo, CacheIndex cacheIndex) { + bool res = true; + std::string path; + std::string buffer; + int fd = GetCacheFd(mplInfo, path, cacheIndex); + if (fd < 0) { + res = false; + goto END; + } + // exclusive lock for write + if (flock(fd, LOCK_EX) < 0) { + LINKER_LOG(ERROR) << "failed to flock(" << errno << ") " << path << maple::endl; + res = false; + goto END; + } + if (ftruncate(fd, 0) < 0) { + LINKER_LOG(ERROR) << "failed to ftruncate zero(" << errno << ") path:" << path << maple::endl; + res = false; + goto END; + } + UpdateProperty(mplInfo, cacheIndex); + SaveMeta(mplInfo, buffer, cacheIndex); + SaveData(mplInfo, buffer, cacheIndex); + if (!WriteTable(fd, buffer, cacheIndex)) { + res = false; + } +END: + if (!res) { + RemoveTable(mplInfo, cacheIndex); // In case of more exceptions, from here to re-save the cache. + } + return CleanSavingTable(mplInfo, fd, res); +} + +int LinkerCache::GetCacheFd(LinkerMFileInfo &mplInfo, std::string &path, CacheIndex cacheIndex) { + int fd = -1; + LinkerCacheType cacheType = GetLinkerCacheType(cacheIndex); + if (LINKER_INSTALL_STATE) { + if (mplInfo.IsFlag(kIsLazy)) { + LINKER_LOG(WARNING) << "(" << cacheIndex << "), not save install cache for lazy binding." << maple::endl; + return -1; + } + fd = maple::MplCacheFirstFile::GetFd(mplInfo.name, static_cast(cacheType)); + if (fd < 0) { + LINKER_VLOG(mpllinker) << "(" << cacheIndex << ") get invalid fd when install" << maple::endl; + return -1; + } + } else { + if (!GetPath(mplInfo, path, cacheType, GetLoadState())) { + LINKER_LOG(ERROR) << "(" << cacheIndex << "), failed to prepare folder for " << + path << ", " << errno << maple::endl; + return -1; + } + fd = open(path.c_str(), O_WRONLY | O_CREAT, S_IRUSR | S_IWUSR); + if (fd == -1) { + if (errno != EACCES) { + LINKER_LOG(ERROR) << "(" << cacheIndex << "), failed to open " << + path << ", " << errno << maple::endl; + } else { + LINKER_LOG(ERROR) << "(" << cacheIndex << "), EACCES, failed to open " << + path << ", " << errno << maple::endl; + } + return -1; + } + } + return fd; +} + +void LinkerCache::SaveMeta(LinkerMFileInfo &mplInfo, std::string &buffer, CacheIndex cacheIndex) { + // Prepare all the data in buffer firstly. + // 1. Write maximum version. + int32_t maxVersion = GetMaxVersion(); + buffer.append(reinterpret_cast(&maxVersion), sizeof(maxVersion)); + // 2. Write minimum version. + int32_t minVersion = GetMinVersion(); + buffer.append(reinterpret_cast(&minVersion), sizeof(minVersion)); + // 3. Write the hash from .so. + MUID soValidity = pInvoker.GetValidityCode(mplInfo); + buffer.append(reinterpret_cast(&soValidity), sizeof(soValidity)); + // 4. Write the clhash from classloader *.so + buffer.append(reinterpret_cast(&pClHash[cacheIndex]), sizeof(pClHash[cacheIndex])); + // 5. Write the classloader .so count. + buffer.append(reinterpret_cast(&pClSoCnt[cacheIndex]), sizeof(pClSoCnt[cacheIndex])); +} + +bool LinkerCache::SaveNameList(std::string &buffer, CacheIndex cacheIndex) { + auto &inf = pMplCacheInf[cacheIndex]; + const auto &nameList = inf.NameList(); + // 1. Write the listsize + uint32_t listSize = static_cast(nameList.size()); + buffer.append(reinterpret_cast(&listSize), sizeof(listSize)); + for (const auto &item : nameList) { + // 2. Write the name size + const std::string &name = item.first; + uint32_t size = static_cast(name.size()); + buffer.append(reinterpret_cast(&size), sizeof(size)); + // 3. Write the name data + buffer.append(name.data(), name.size()); + // 4. Write the so hash. + const MUID &hash = item.second; + buffer.append(reinterpret_cast(&hash), sizeof(hash)); + } + return true; +} + +bool LinkerCache::SaveData(LinkerMFileInfo &mplInfo, std::string &buffer, CacheIndex cacheIndex) { + const auto &cacheMap = *GetCacheMap(mplInfo, cacheIndex); + // 1. Write the size and nBucket of map. + uint32_t mapSize = static_cast(cacheMap.size()); + buffer.append(reinterpret_cast(&mapSize), sizeof(mapSize)); + if (mapSize == 0) { + return true; + } + // 2. Write the so name list info. + SaveNameList(buffer, cacheIndex); + // 3. Write the bucket number. + uint32_t mapBucketCnt = static_cast(cacheMap.bucket_count()); + buffer.append(reinterpret_cast(&mapBucketCnt), sizeof(mapBucketCnt)); + for (auto it = cacheMap.begin(); it != cacheMap.end(); ++it) { + const LinkerCacheTableItem &tableItem = it->second; + // 4. Write the undef index. + uint32_t undefIndex = it->first; + buffer.append(reinterpret_cast(&undefIndex), sizeof(undefIndex)); + // 5. Write the addr index. + uint32_t addrIndex = tableItem.AddrId(); + buffer.append(reinterpret_cast(&addrIndex), sizeof(addrIndex)); + // 6. Write the so index. + uint16_t soid = tableItem.SoId(); + buffer.append(reinterpret_cast(&soid), sizeof(soid)); + } + return true; +} + +bool LinkerCache::WriteTable(int fd, std::string &buffer, CacheIndex cacheIndex) { + // 1. Append content data + uint32_t bufferSize = static_cast(buffer.size()); + buffer.append(reinterpret_cast(&bufferSize), sizeof(bufferSize)); + // 2. Append cache index type. + uint8_t type = static_cast(cacheIndex); + buffer.append(reinterpret_cast(&type), sizeof(type)); + // 2. Append validity crc. + MUID cacheValidity; + GenerateMUID(buffer.data(), buffer.size(), cacheValidity); + buffer.append(reinterpret_cast(&cacheValidity), sizeof(cacheValidity)); + // 3. Append magic number. + buffer.append(reinterpret_cast(&kCacheMagicNumber), sizeof(kCacheMagicNumber)); + // Write for all cache data. + if (write(fd, buffer.data(), buffer.size()) < 0) { + LINKER_LOG(ERROR) << "failed to write cache content," << errno << maple::endl; + return false; + } + return true; +} + +bool LinkerCache::CleanSavingTable(LinkerMFileInfo &mplInfo, int fd, bool res) { +#ifdef LINKER_DECOUPLE_CACHE + // chmod from 600 to 644. When installing/updating app and generating decouple cache, need to + // read these cache as OTHER + if (res) { + if (fchmod(fd, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH) < 0) { + LINKER_LOG(ERROR) << "failed to chmod cache," << errno << " name:" << mplInfo.name << maple::endl; + res = false; + } + } +#endif + if (!LINKER_INSTALL_STATE) { + close(fd); + } + return res; +} + +// For debug only. +bool LinkerCache::DumpTable(LinkerMFileInfo &mplInfo, CacheIndex cacheIndex) { + bool res = false; + std::ifstream in; + std::ofstream out; + BufferSlice buf; + std::stringstream ss; + if (!InitDump(mplInfo, in, out, cacheIndex)) { + goto END; + } + // Read all the data in buffer firstly. + in.seekg(0, std::ios::end); + buf = Slice(new char[in.tellg()], in.tellg()); + in.seekg(0, std::ios::beg); + if (!in.read(buf.Data(), buf.Size())) { + LINKER_LOG(ERROR) << "(" << cacheIndex << "), failed to read all data," << errno << maple::endl; + return false; + } + if (!DumpMetaValidity(ss, buf) || !DumpMetaVersion(mplInfo, ss, buf) || !DumpMetaCl(ss, buf, cacheIndex)) { + goto END; + } + for (uint32_t type = 0; type < 2; ++type) { // 2 types is undef:0 or def:1 + if (type == 0) { + ss << "\n===== UNDEF =====\n"; + } else { + ss << "\n===== DEF =====\n"; + } + auto mapSize = DumpMap(ss, buf); + if (mapSize < 0) { + goto END; + } else if (mapSize == 0) { + continue; + } + if (!DumpData(ss, buf, mapSize)) { + goto END; + } + } + if (!DumpFile(out, ss)) { + goto END; + } + res = true; +END: + in.close(); + out.close(); + if (!buf.Empty()) { + delete []buf.Data(); + } + return res; +} + +bool LinkerCache::InitDump(LinkerMFileInfo &mplInfo, std::ifstream &in, std::ofstream &out, CacheIndex cacheIndex) { + LinkerCacheType cacheType = GetLinkerCacheType(cacheIndex); + std::string path; + if (!GetPath(mplInfo, path, cacheType, GetLoadState())) { + LINKER_LOG(ERROR) << "(" << cacheIndex << "), failed to prepare folder for " << + path << ", " << errno << maple::endl; + return false; + } + in.open(path, std::ios::binary); + bool ifStream = (!in); + if (ifStream) { + if (errno != ENOENT && errno != EACCES) { + LINKER_LOG(ERROR) << "(" << cacheIndex << "), failed to open " << path << ", " << errno << maple::endl; + } + return false; + } + std::string dump = path + ".dump"; + out.open(dump, std::ios::trunc); + bool ofstream = (!out); + if (ofstream) { + if (errno != EACCES) { + LINKER_LOG(ERROR) << "(" << cacheIndex << "), failed to open " << dump << ", " << errno << maple::endl; + } + return false; + } + return true; +} + +bool LinkerCache::DumpMetaValidity(std::stringstream &ss, BufferSlice &buf) { + if (buf.Size() < 50) { // 50 is count all sizeof. + LINKER_LOG(ERROR) << "failed, read no data," << errno << maple::endl; + return false; + } + // 0. Read EOF. + uint32_t eof = *(reinterpret_cast(&buf[buf.Size() - sizeof(eof)])); + if (eof != static_cast(EOF)) { + LINKER_LOG(ERROR) << "cache EOF checking failed, eof=" << std::hex << eof << maple::endl; + return false; + } + // 1. Read the validity. + MUID lastCacheValidity = *(reinterpret_cast(buf.Data())); + buf += sizeof(lastCacheValidity); + // 2. Read the length of content. + uint32_t contentSize = *(reinterpret_cast(buf.Data())); + buf += sizeof(contentSize); + size_t size = buf.Size() - sizeof(eof); + ss << "LEN:\t" << contentSize << "\n"; + ss << "\t" << contentSize << " vs. " << static_cast(size) << "\n"; + if (contentSize != static_cast(size)) { + LINKER_LOG(ERROR) << "cache length checking failed," << contentSize << " vs. " << + static_cast(size) << maple::endl; + ss << "\t[FAILED]" << "\n"; + return false; + } + ss << "VALIDITY:\t" << lastCacheValidity.ToStr() << "\n"; + // Generate the digest for validity, excluding the length of content. + MUID cacheValidity; + GenerateMUID(buf.Data(), size, cacheValidity); + ss << "\t" << lastCacheValidity.ToStr() << " vs. " << cacheValidity.ToStr() << "\n"; + if (lastCacheValidity != cacheValidity) { + LINKER_LOG(ERROR) << "cache validity checking failed" << maple::endl; + ss << "\t[FAILED]" << "\n"; + return false; + } + return true; +} + +bool LinkerCache::DumpMetaVersion(LinkerMFileInfo &mplInfo, std::stringstream &ss, BufferSlice &buf) { + // 3. Read maximum version. + int32_t maxVersion = *(reinterpret_cast(buf.Data())); + ss << "MAX:\t" << maxVersion << "\n"; + ss << "\t" << maxVersion << " vs. " << GetMaxVersion() << "\n"; + if (maxVersion != GetMaxVersion()) { + LINKER_LOG(ERROR) << "failed to check max version for " << maxVersion << " vs. " << GetMaxVersion() << maple::endl; + ss << "\t[FAILED]" << "\n"; + return false; + } + buf += sizeof(maxVersion); + // 4. Read minimum version. + int32_t minVersion = *(reinterpret_cast(buf.Data())); + ss << "MIN:\t" << maxVersion << "\n"; + ss << "\t" << minVersion << " vs. " << GetMinVersion() << "\n"; + if (minVersion != GetMinVersion()) { + LINKER_LOG(ERROR) << "failed to check min version for " << minVersion << " vs. " << GetMinVersion() << maple::endl; + ss << "\t[FAILED]" << "\n"; + return false; + } + buf += sizeof(minVersion); + // 5. Read the hash from cache file, comparing with .so. + MUID lastSoValidity = *(reinterpret_cast(buf.Data())); + MUID soValidity = pInvoker.GetValidityCode(mplInfo); + ss << "SO_HASH:\t" << lastSoValidity.ToStr() << "\n"; + ss << "\t" << lastSoValidity.ToStr() << " vs. " << soValidity.ToStr() << "\n"; + if (lastSoValidity != soValidity) { + LINKER_LOG(ERROR) << "so validity checking failed, " << soValidity.ToStr() << + " vs. " << lastSoValidity.ToStr() << " in " << mplInfo.name << maple::endl; + ss << "\t[FAILED]" << "\n"; + return false; + } + buf += sizeof(lastSoValidity); + return true; +} + +bool LinkerCache::DumpMetaCl(std::stringstream &ss, BufferSlice &buf, CacheIndex cacheIndex) { + // 6. Read the hash from classloader. + MUID lastClValidity = *(reinterpret_cast(buf.Data())); + MUID rtClValidity = pClHash[cacheIndex]; + ss << "CL_HASH:\t" << lastClValidity.ToStr() << "\n"; + ss << "\t" << lastClValidity.ToStr() << " vs. " << rtClValidity.ToStr() << "\n"; + if (lastClValidity != rtClValidity) { + LINKER_LOG(ERROR) << "(" << cacheIndex << "), classloader validity checking failed, " << + rtClValidity.ToStr() << " vs. " << lastClValidity.ToStr(); + ss << "\t[FAILED]" << "\n"; + return false; + } + buf += sizeof(lastClValidity); + // 7. Read the .so count. + uint16_t lastClSoCnt = *(reinterpret_cast(buf.Data())); + uint16_t rtClSoCnt = pClSoCnt[cacheIndex]; + ss << "\tCL_nSO:\t" << lastClSoCnt << "\n"; + ss << "\t" << lastClSoCnt << " vs. " << rtClSoCnt << "\n"; + if (lastClSoCnt != rtClSoCnt) { + LINKER_LOG(ERROR) << "(" << cacheIndex << "), classloader so cnt checking failed, " << + rtClSoCnt << " vs. " << lastClSoCnt; + ss << "\t[FAILED]" << "\n"; + return false; + } + buf += sizeof(uint16_t); + return true; +} + +bool LinkerCache::DumpData(std::stringstream &ss, BufferSlice &buf, size_t mapSize) { + std::vector> soList; + for (size_t i = 0; i < mapSize; ++i) { + MUID hash; + uint16_t arrayIndex = 0; + // 9. Read the undef index. + uint32_t undefIndex = *(reinterpret_cast(buf.Data())); + ss << "\t*UNDEF_INX:\t" << undefIndex << "\n"; + buf += sizeof(undefIndex); + // 10. Read the name length firstly. + uint16_t len = *(reinterpret_cast(buf.Data())); + ss << "\tLEN_FLAG:\t" << len << "\n"; + if (len > PATH_MAX) { + LINKER_LOG(ERROR) << "failed, length is too long:" << len << maple::endl; + return false; + } + buf += sizeof(len); + // 11. Read the name by its length. + if (len == 0) { // Name index. + arrayIndex = *(reinterpret_cast(buf.Data())); + buf += sizeof(arrayIndex); + std::pair pair = soList[arrayIndex]; + ss << "\tNAME:\t" << pair.first << "\n" << "\tHASH:\t" << pair.second.ToStr() << "\n"; + } else { // Name string, including INVALID_NAME. + std::string text(buf.Data(), len); + buf += len; + if (len == kLinkerInvalidNameLen) { + LINKER_LOG(ERROR) << "has not resolved symbol" << maple::endl; + ss << "\tNAME:\tX" << "\n" << "\tHASH:\t(nul)" << "\n"; + } else { + // 12. Read the hash code of .so + hash = *(reinterpret_cast(buf.Data())); + buf += sizeof(hash); + ss << "\tNAME:\t" << text << "\n" << "\tHASH:\t" << hash.ToStr() << "\n"; + auto soListIt = std::find_if (soList.begin(), soList.end(), + [&text](const std::pair &item){ return item.first == text; }); + if (soListIt == soList.end()) { + soList.push_back(std::make_pair(text, hash)); + } else { + LINKER_LOG(ERROR) << "bad format, has multiple name string" << maple::endl; + ss << "\t[FAILED]" << "\n"; + return false; + } + } + } + // 13. Read the index. + uint32_t idx = *(reinterpret_cast(buf.Data())); + ss << "\t*INDEX:\t" << idx << "\n"; + buf += sizeof(uint32_t); + } + return true; +} + +int LinkerCache::DumpMap(std::stringstream &ss, BufferSlice &buf) { + // 8. Read the map size and nBucket for resolving table. + uint32_t mapSize = *(reinterpret_cast(buf.Data())); + ss << "MAP_SIZE:\t" << mapSize << "\n"; + ss << "\t" << mapSize << " vs. 0" << "\n"; + if (mapSize == 0) { + ss << "\t[SUCC]" << "\n"; + return 0; + } + buf += sizeof(mapSize); + uint32_t mapBucketCnt = *(reinterpret_cast(buf.Data())); + ss << "nBUCKET:\t" << mapBucketCnt << "\n"; + ss << "\t" << mapBucketCnt << " vs. 0" << "\n"; + if (mapBucketCnt == 0) { + LINKER_LOG(ERROR) << "map nBucket is 0" << maple::endl; + ss << "\t[FAILED]" << "\n"; + return -1; + } + buf += sizeof(mapBucketCnt); + return mapSize; +} + +bool LinkerCache::DumpFile(std::ofstream &out, std::stringstream &ss) { + // 14. Dump the content. + std::string content = ss.str(); + if (!out.write(content.data(), content.length())) { + LINKER_LOG(ERROR) << "failed to write cache content:" << errno << maple::endl; + return false; + } + return true; +} + +// Remove FS cache. +bool LinkerCache::RemoveTable(LinkerMFileInfo &mplInfo, CacheIndex cacheIndex) { + LinkerCacheType cacheType = GetLinkerCacheType(cacheIndex); + std::string path; + if (!GetPath(mplInfo, path, cacheType, GetLoadState())) { + LINKER_LOG(ERROR) << "(" << cacheIndex << "), failed to prepare folder for " << + path << ", " << errno << maple::endl; + return false; + } + if (std::remove(path.c_str())) { + if (errno != ENOENT) { + LINKER_LOG(ERROR) << "(" << cacheIndex << "), failed to remove " << path << ", " << errno << maple::endl; + } + return false; + } + LINKER_LOG(INFO) << "(" << cacheIndex << "), remove " << path << " successfully" << maple::endl; + return true; +} + +// Release the memory allocated. +// Notice: MUST only invoke ONCE after BOTH def&undef finished! +void LinkerCache::FreeTable(LinkerMFileInfo &mplInfo, CacheIndex cacheIndex) { + switch (cacheIndex) { + case kMethodUndefIndex: + mplInfo.rep.methodUndefCacheMap.clear(); + MplCacheMapT().swap(mplInfo.rep.methodUndefCacheMap); + mplInfo.SetFlag(kIsMethodUndefCacheValid, false); + break; + case kMethodDefIndex: + mplInfo.rep.methodDefCacheMap.clear(); + MplCacheMapT().swap(mplInfo.rep.methodDefCacheMap); + mplInfo.SetFlag(kIsMethodCacheValid, false); + break; + case kDataUndefIndex: + mplInfo.rep.dataUndefCacheMap.clear(); + MplCacheMapT().swap(mplInfo.rep.dataUndefCacheMap); + mplInfo.SetFlag(kIsDataUndefCacheValid, false); + break; + case kDataDefIndex: + mplInfo.rep.dataDefCacheMap.clear(); + MplCacheMapT().swap(mplInfo.rep.dataDefCacheMap); + mplInfo.SetFlag(kIsDataCacheValid, false); + break; + }; +} + +static constexpr uint32_t kLinkerMaxCacheNumPerType = 10; +bool LinkerCache::LoadInstallCache(LinkerMFileInfo &mplInfo, LinkerCacheType cacheType) { + std::string path; + if (!GetInstallCachePath(mplInfo, path, cacheType)) { + return false; + } + path += '.'; + for (uint32_t i = 0; i < kLinkerMaxCacheNumPerType; ++i) { + std::string file = path + static_cast('0' + i); + LINKER_DLOG(mpllinker) << "name:" << mplInfo.name << " get file:" << file << maple::endl; + if (access(file.c_str(), R_OK) != 0) { + LINKER_VLOG(mpllinker) << "name:" << mplInfo.name << " access file:" << file << " fail" << maple::endl; + break; + } + + bool ret = false; + switch (cacheType) { + case LinkerCacheType::kLinkerCacheMethodUndef: + ret = LoadTable(mplInfo, file, kMethodUndefIndex); + break; + case LinkerCacheType::kLinkerCacheMethodDef: + ret = LoadTable(mplInfo, file, kMethodDefIndex); + break; + case LinkerCacheType::kLinkerCacheDataUndef: + ret = LoadTable(mplInfo, file, kDataUndefIndex); + break; + case LinkerCacheType::kLinkerCacheDataDef: + ret = LoadTable(mplInfo, file, kDataDefIndex); + break; + default: + break; + } + if (ret) { + return true; + } + } + return false; +} + +void LinkerCache::UpdateProperty(LinkerMFileInfo &mplInfo, CacheIndex cacheIndex) { + LinkerMFileInfoListT mplFileList; + pInvoker.GetLinkerMFileInfos(mplInfo, mplFileList); + pClHash[cacheIndex] = mplFileList.Hash(false); + pClSoCnt[cacheIndex] = mplFileList.Size(); +} + +void LinkerCache::ResolveMethodSymbol(LinkerMFileInfo &mplInfo, AddrSlice &addrSlice, MuidSlice &muidSlice) { + bool saveRtCache = false; + if (mplInfo.rep.methodUndefCacheSize == -1) { + mplInfo.SetFlag(kIsMethodUndefCacheValid, LoadTable(mplInfo, kMethodUndefIndex)); + mplInfo.rep.methodUndefCacheSize = mplInfo.rep.methodUndefCacheMap.size(); + if (mplInfo.rep.methodUndefCacheMap.size() < addrSlice.Size()) { + mplInfo.SetFlag(kIsMethodUndefCacheValid, false); + } + } + if (mplInfo.IsFlag(kIsMethodUndefCacheValid)) { + saveRtCache = ProcessTable(mplInfo, addrSlice, muidSlice, methodUndef); + } else { + saveRtCache = LookUpUndefSymbol(mplInfo, addrSlice, muidSlice, methodUndef); + } + if (saveRtCache) { + SaveTable(mplInfo, kMethodUndefIndex); + } +} + +void LinkerCache::FreeMethodUndefTable(LinkerMFileInfo &mplInfo) { + FreeTable(mplInfo, kMethodUndefIndex); +} + +bool LinkerCache::LookUpDefAddr(LinkerMFileInfo &mplInfo, const MUID &muid, LinkerOffsetType &addr, + LinkerCacheTableItem &pItem, CacheInfo cacheInfo) { + LinkerMFileInfo *resInfo = nullptr; + LinkerOffsetType resAddr = 0; + size_t index = 0; + if (!pInvoker.ForEachLookUp(muid, &pInvoker, cacheInfo.lookUpSymbolAddress, mplInfo, + &resInfo, index, resAddr)) { + LINKER_LOG(ERROR) << "[RT/binary searching] failed to relocate MUID=" << + muid.ToStr() << " in " << mplInfo.name << maple::endl; + mplInfo.SetFlag(cacheInfo.notResolved, true); + } else { + addr = resAddr; + auto &inf = pMplCacheInf[cacheInfo.cacheIndex]; + uint16_t soid = inf.Append(resInfo->name, resInfo->hash); + pItem.SetIds(index, soid).SetFilled(); + return true; + } + return false; +} + +bool LinkerCache::LookUpDefSymbol(LinkerMFileInfo &mplInfo, AddrSlice &addrSlice, MuidSlice &muidSlice, + CacheInfo cacheInfo) { + auto &inf = pMplCacheInf[cacheInfo.cacheIndex]; + bool saveRtCache = false; + std::mutex mtx; + auto lookFunc = [&](size_t begSize, size_t endSize) { + LinkerOffsetType resAddr = 0; + LinkerMFileInfo *resInfo = nullptr; + size_t index = 0; + // Start binary search def from here. + for (size_t i = begSize; i < endSize; ++i) { + if (!pInvoker.ForEachLookUp(muidSlice[i].muid, &pInvoker, cacheInfo.lookUpSymbolAddress, mplInfo, + &resInfo, index, resAddr)) { // Not found + // Never reach here. + LINKER_LOG(ERROR) << "failed to relocate MUID=" << muidSlice[i].muid.ToStr() << + " in " << mplInfo.name << ", resAddr=" << resAddr << maple::endl; + mplInfo.SetFlag(cacheInfo.notResolved, true); + } else { // Found + if (mplInfo.name != resInfo->name) { // It's not in self SO. + std::lock_guard lck(mtx); + addrSlice[i].addr = resAddr; + uint16_t soid = inf.Append(resInfo->name, resInfo->hash); + LinkerCacheTableItem pItem(static_cast(index), soid); + pItem.SetFilled(); + auto &cacheMap = *GetCacheMap(mplInfo, cacheInfo.cacheIndex); + cacheMap[static_cast(i)] = pItem; + } + saveRtCache = true; + } + } + }; + const int threadNumber = 4; // half of the cpu number of phone. + ParallelLookUp(lookFunc, threadNumber, addrSlice.Size()); + mplInfo.SetFlag(cacheInfo.cacheValid, true); + return saveRtCache; +} + +template +void LinkerCache::ParallelLookUp(F const &lookFunc, int numThreads, size_t defSize) { + size_t blockStart; + size_t blockEnd; + size_t blockSize = defSize / numThreads; + std::vector threads; + for (int i = 0; i < numThreads - 1; ++i) { + blockStart = i * blockSize; + blockEnd = (i + 1) * blockSize; + threads.push_back(std::thread(lookFunc, blockStart, blockEnd)); + } + lookFunc((numThreads - 1) * blockSize, defSize); + for (auto &thr : threads) { + thr.join(); + } +} + +void LinkerCache::RelocateMethodSymbol(LinkerMFileInfo &mplInfo, AddrSlice &addrSlice, MuidSlice &muidSlice) { + bool saveRtCache = false; + if (mplInfo.rep.methodDefCacheSize == -1) { + mplInfo.SetFlag(kIsMethodCacheValid, LoadTable(mplInfo, kMethodDefIndex)); + mplInfo.rep.methodDefCacheSize = mplInfo.rep.methodDefCacheMap.size(); + } + if (mplInfo.IsFlag(kIsMethodCacheValid)) { + saveRtCache = ProcessTable(mplInfo, addrSlice, muidSlice, methodDef); + } else { + saveRtCache = LookUpDefSymbol(mplInfo, addrSlice, muidSlice, methodDef); + } + if (saveRtCache) { + SaveTable(mplInfo, kMethodDefIndex); + } +} + +void LinkerCache::FreeMethodDefTable(LinkerMFileInfo &mplInfo) { + FreeTable(mplInfo, kMethodDefIndex); +} + +bool LinkerCache::ProcessTable(LinkerMFileInfo &mplInfo, AddrSlice &addrSlice, MuidSlice &muidSlice, + CacheInfo cacheInfo) { + auto &inf = pMplCacheInf[cacheInfo.cacheIndex]; + auto &store = pMplStore[cacheInfo.cacheIndex]; + bool saveRtCache = false; + auto &cacheMap = *GetCacheMap(mplInfo, cacheInfo.cacheIndex); + for (auto it = cacheMap.begin(); it != cacheMap.end(); ++it) { + bool noRtCache = false; + uint32_t i = it->first; + LinkerCacheTableItem &pItem = it->second; + if (pItem.Filled()) { // Already done + continue; + } else if (pItem.LazyInvalidName() && cacheInfo.isUndef) { + continue; // Ignore the symbols not found from system(Boot class loader). + } else if (pItem.Valid()) { + LinkerMFileInfo *res = FindLinkerMFileInfo(pItem.SoId(), inf, store); + if (res != nullptr) { + // We suppose that dex(.so) list is always changed with increment in runtime. + // If so, we use dex(.so) list hash and its count to check the cache validity. + const MUID &hash = inf.GetHash(pItem.SoId()); + if (hash == res->hash) { + addrSlice[i].addr = GetAddr(*res, pItem, cacheInfo.isMethod); + pItem.SetFilled(); + } else { + noRtCache = true; + if (VLOG_IS_ON(mpllinker)) { + LINKER_VLOG(mpllinker) << "runtime cache is invalid(expired) {" << hash.ToStr() << " vs. " << + res->hash.ToStr() << "} for " << mplInfo.name << "->" << res->name << maple::endl; + } + } + } else { + mplInfo.SetFlag(cacheInfo.notResolved, true); // noRtCache is true; // Wait for next loading so. + } + } else if (cacheInfo.isUndef) { + noRtCache = true; + } else { + // Never reach here!! + LINKER_LOG(ERROR) << "no runtime cache so found? for " << mplInfo.name << maple::endl; + } + // If no runtime cached one found, start binary searching. + if (cacheInfo.isUndef) { + if (noRtCache && LookUpUndefAddr(mplInfo, muidSlice[i].muid, addrSlice[i].addr, pItem, dataUndef)) { + saveRtCache = true; + } + } else { + if (noRtCache && LookUpDefAddr(mplInfo, muidSlice[i].muid, addrSlice[i].addr, pItem, dataUndef)) { + saveRtCache = true; + } + } + } + return saveRtCache; +} + +bool LinkerCache::LookUpUndefAddr(LinkerMFileInfo &mplInfo, const MUID &muid, + LinkerOffsetType &addr, LinkerCacheTableItem &pItem, CacheInfo cacheInfo) { + auto &inf = pMplCacheInf[cacheInfo.cacheIndex]; + LinkerMFileInfo *resInfo = nullptr; + size_t index = 0; + LinkerOffsetType resAddr = 0; + if (!pInvoker.ForEachLookUp(muid, &pInvoker, + cacheInfo.lookUpSymbolAddress, mplInfo, &resInfo, index, resAddr)) { // Not found + // Ignore the symbols not found from system(Boot class loader) for lazy binding. + if (!mplInfo.IsFlag(kIsLazy)) { + mplInfo.SetFlag(cacheInfo.notResolved, true); + } + } else { // Found + // Ignore the symbols found not from system(Boot class loader). + if (mplInfo.IsFlag(kIsLazy) && !resInfo->IsFlag(kIsBoot) && + !pInvoker.IsSystemClassLoader(reinterpret_cast(resInfo->classLoader))) { + uint16_t soid = inf.Append(kLinkerLazyInvalidSoName, kInvalidHash); + pItem.SetLazyInvalidSoId(soid).SetFilled(); + } else { + addr = resAddr; + uint16_t soid = inf.Append(resInfo->name, resInfo->hash); + pItem.SetIds(index, soid).SetFilled(); + } + return true; + } + return false; +} + +bool LinkerCache::UndefSymbolFailHandler(LinkerMFileInfo &mplInfo, uint32_t idx, CacheInfo cacheInfo) { + auto &inf = pMplCacheInf[cacheInfo.cacheIndex]; + LinkerCacheTableItem pItem; + // Ignore the symbols not found from system(Boot class loader). + if (mplInfo.IsFlag(kIsLazy)) { + uint16_t soid = inf.Append(kLinkerLazyInvalidSoName, kInvalidHash); + pItem.SetLazyInvalidSoId(soid).SetFilled(); + } else { + uint16_t soid = inf.Append(kLinkerInvalidName, kInvalidHash); + pItem.SetInvalidSoId(soid); + } + auto &cacheMap = *GetCacheMap(mplInfo, cacheInfo.cacheIndex); + cacheMap.insert(std::make_pair(idx, pItem)); + if (!mplInfo.IsFlag(kIsLazy)) { + mplInfo.SetFlag(cacheInfo.notResolved, true); + } + return true; +} + +bool LinkerCache::LookUpUndefSymbol(LinkerMFileInfo &mplInfo, AddrSlice &addrSlice, MuidSlice &muidSlice, + CacheInfo cacheInfo) { + auto &inf = pMplCacheInf[cacheInfo.cacheIndex]; + bool saveRtCache = false; + // Start binary search from here. + for (size_t i = 0; i < addrSlice.Size(); ++i) { + // To check pTable[i].addr == 0 later? + LinkerMFileInfo *resInfo = nullptr; + size_t index = 0; + LinkerOffsetType resAddr = 0; + if (!pInvoker.ForEachLookUp(muidSlice[i].muid, &pInvoker, + cacheInfo.lookUpSymbolAddress, mplInfo, &resInfo, index, resAddr)) { // Not found + saveRtCache = UndefSymbolFailHandler(mplInfo, static_cast(i), cacheInfo); + } else { // Found + LinkerCacheTableItem pItem; + // Ignore the symbols found not from system(Boot class loader). + if (mplInfo.IsFlag(kIsLazy) && !resInfo->IsFlag(kIsBoot) && + !pInvoker.IsSystemClassLoader(reinterpret_cast(resInfo->classLoader))) { + uint16_t soid = inf.Append(kLinkerLazyInvalidSoName, kInvalidHash); + pItem.SetLazyInvalidSoId(soid).SetFilled(); + } else { + addrSlice[i].addr = resAddr; + uint16_t soid = inf.Append(resInfo->name, resInfo->hash); + pItem.SetIds(index, soid).SetFilled(); + } + auto &cacheMap = *GetCacheMap(mplInfo, cacheInfo.cacheIndex); + cacheMap.insert(std::make_pair(i, pItem)); + saveRtCache = true; + } + } + mplInfo.SetFlag(cacheInfo.cacheValid, true); + return saveRtCache; +} + +void LinkerCache::ResolveDataSymbol(LinkerMFileInfo &mplInfo, AddrSlice &addrSlice, MuidSlice &muidSlice) { + bool saveRtCache = false; + if (mplInfo.rep.dataUndefCacheSize == -1) { + mplInfo.SetFlag(kIsDataUndefCacheValid, LoadTable(mplInfo, kDataUndefIndex)); + mplInfo.rep.dataUndefCacheSize = mplInfo.rep.dataUndefCacheMap.size(); + if (mplInfo.rep.dataUndefCacheMap.size() < addrSlice.Size()) { + mplInfo.SetFlag(kIsDataUndefCacheValid, false); + } + } + if (mplInfo.IsFlag(kIsDataUndefCacheValid)) { + saveRtCache = ProcessTable(mplInfo, addrSlice, muidSlice, dataUndef); + } else { + saveRtCache = LookUpUndefSymbol(mplInfo, addrSlice, muidSlice, dataUndef); + } + if (saveRtCache) { + SaveTable(mplInfo, kDataUndefIndex); + } +} + +void LinkerCache::FreeDataUndefTable(LinkerMFileInfo &mplInfo) { + FreeTable(mplInfo, kDataUndefIndex); +} + +void LinkerCache::RelocateDataSymbol(LinkerMFileInfo &mplInfo, AddrSlice &addrSlice, MuidSlice &muidSlice) { + bool saveRtCache = false; + if (mplInfo.rep.dataDefCacheSize == -1) { + mplInfo.SetFlag(kIsDataCacheValid, LoadTable(mplInfo, kDataDefIndex)); + mplInfo.rep.dataDefCacheSize = mplInfo.rep.dataDefCacheMap.size(); + } + if (mplInfo.IsFlag(kIsDataCacheValid)) { + saveRtCache = ProcessTable(mplInfo, addrSlice, muidSlice, dataDef); + } else { + saveRtCache = LookUpDefSymbol(mplInfo, addrSlice, muidSlice, dataDef); + } + if (saveRtCache) { + SaveTable(mplInfo, kDataDefIndex); + } +} + +void LinkerCache::FreeDataDefTable(LinkerMFileInfo &mplInfo) { + FreeTable(mplInfo, kDataDefIndex); +} + +LinkerOffsetType LinkerCache::GetAddr(LinkerMFileInfo &res, LinkerCacheTableItem &pItem, bool isMethod) { + if (isMethod) { +#ifdef USE_32BIT_REF + return pInvoker.AddrToUint32(pInvoker.GetMethodSymbolAddress(res, pItem.AddrId())); +#else + return reinterpret_cast(pInvoker.GetMethodSymbolAddress(res, pItem.AddrId())); +#endif // USE_32BIT_REF + } else { +#ifdef USE_32BIT_REF + return pInvoker.AddrToUint32(pInvoker.GetDataSymbolAddress(res, pItem.AddrId())); +#else + return reinterpret_cast(pInvoker.GetDataSymbolAddress(res, pItem.AddrId())); +#endif // USE_32BIT_REF + } +} + +#endif +} // namespace maplert diff --git a/src/mrt/compiler-rt/src/linker/linker_debug.cpp b/src/mrt/compiler-rt/src/linker/linker_debug.cpp new file mode 100644 index 0000000000..84e4784d9f --- /dev/null +++ b/src/mrt/compiler-rt/src/linker/linker_debug.cpp @@ -0,0 +1,428 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "linker/linker_debug.h" + +#include "linker/linker_model.h" +#include "linker/linker_inline.h" +#include "file_layout.h" +#include "collector/cp_generator.h" +#include "interp_support.h" +using namespace maple; + +namespace maplert { +using namespace linkerutils; +FeatureName Debug::featureName = kFDebug; +// Dump all the section info of all loaded maple-so +void Debug::DumpAllMplSectionInfo(std::ostream &os) { + uint64_t sumMethodsHotSize = 0; + uint64_t sumMethodsColdSize = 0; + uint64_t sumMethodsCompactSize = 0; + uint64_t sumFieldsHotSize = 0; + uint64_t sumFieldsColdSize = 0; + uint64_t sumFieldsCompactSize = 0; + uint64_t sumVtbHotSize = 0; + uint64_t sumVtbColdSize = 0; + uint64_t sumItbHotSize = 0; + uint64_t sumItbColdSize = 0; + uint64_t sumSuperClassHotSize = 0; + uint64_t sumSuperClassColdSize = 0; + uint64_t sumClassMeta = 0; + uint64_t sumHotLiteralSize = 0; + uint64_t sumColdLiteralSize = 0; + uint64_t sumEhframeSize = 0; + uint64_t sumReflectionStrSize = 0; + uint64_t sumMuidTabSize = 0; + void *symAddr = nullptr; + auto handle = [&, this](LinkerMFileInfo &mplInfo) { + os << mplInfo.name << ":\n"; + void *javaStartAddr = mplInfo.GetTblBegin(kJavaText); + void *javaEndAddr = reinterpret_cast( + reinterpret_cast(javaStartAddr) + mplInfo.GetTblSize(kJavaText)); + os << "\tjava-text-start:" << std::hex << javaStartAddr << "\n"; + os << "\tjava-text-end:" << std::hex << javaEndAddr << "\n"; + os << "\tstrtab-start:" << std::hex << GetSymbolAddr(mplInfo.handle, "MRT_GetStrTabBegin", true) << "\n"; + os << "\tstrtab-end:" << std::hex << GetSymbolAddr(mplInfo.handle, "MRT_GetStrTabEnd", true) << "\n"; + os << "\tjnitab-start:" << std::hex << GetSymbolAddr(mplInfo.handle, "__MRT_GetJNITable_Begin", true) << "\n"; + os << "\tjnitab-end:" << std::hex << GetSymbolAddr(mplInfo.handle, "__MRT_GetJNITable_End", true) << "\n"; + os << "\tjnifunctab-start:" << std::hex << GetSymbolAddr(mplInfo.handle, "__MRT_GetJNIFuncTable_Begin", true) << + "\n"; + os << "\tjnifunctab-end:" << std::hex << GetSymbolAddr(mplInfo.handle, "__MRT_GetJNIFuncTable_End", true) << "\n"; + os << "\trangetab-start:" << std::hex << GetSymbolAddr(mplInfo.handle, "MRT_GetRangeTableBegin", true) << "\n"; + os << "\trangetab-end:" << std::hex << GetSymbolAddr(mplInfo.handle, "MRT_GetRangeTableEnd", true) << "\n"; + + os << "\treflectColdStr_begin:" << std::hex << GetSymbolAddr(mplInfo.handle, "MRT_GetColdStrTabBegin", true) << + "\n"; + uint64_t reflectionColdStrSize = + static_cast(static_cast(GetSymbolAddr(mplInfo.handle, "MRT_GetColdStrTabEnd", true)) - + static_cast(GetSymbolAddr(mplInfo.handle, "MRT_GetColdStrTabBegin", true))); + os << "\treflectStartHotStr_begin:" << std::hex << + GetSymbolAddr(mplInfo.handle, "MRT_GetStartHotStrTabBegin", true) << "\n"; + uint64_t reflectionStartHotStrSize = + static_cast(static_cast(GetSymbolAddr(mplInfo.handle, "MRT_GetStartHotStrTabEnd", true)) - + static_cast(GetSymbolAddr(mplInfo.handle, "MRT_GetStartHotStrTabBegin", true))); + os << "\treflectBothHotStr_begin:" << std::hex << + GetSymbolAddr(mplInfo.handle, "MRT_GetBothHotStrTabBegin", true) << "\n"; + uint64_t reflectionBothHotStrSize = + static_cast(static_cast(GetSymbolAddr(mplInfo.handle, "MRT_GetBothHotStrTabEnd", true)) - + static_cast(GetSymbolAddr(mplInfo.handle, "MRT_GetBothHotStrTabBegin", true))); + os << "\treflectRunHotStr_begin:" << std::hex << + GetSymbolAddr(mplInfo.handle, "MRT_GetRunHotStrTabBegin", true) << "\n"; + uint64_t reflectionRunHotStrSize = + static_cast(static_cast(GetSymbolAddr(mplInfo.handle, "MRT_GetRunHotStrTabEnd", true)) - + static_cast(GetSymbolAddr(mplInfo.handle, "MRT_GetRunHotStrTabBegin", true))); + sumReflectionStrSize += (reflectionColdStrSize + reflectionStartHotStrSize + + reflectionBothHotStrSize + reflectionRunHotStrSize); + os << "\treflectColdStr size:" << std::hex << reflectionColdStrSize << "\n"; + os << "\treflectStartHotStr size:" << std::hex << reflectionStartHotStrSize << "\n"; + os << "\treflectBothHotStr size:" << std::hex << reflectionBothHotStrSize << "\n"; + os << "\treflectRunHotStr size:" << std::hex << reflectionRunHotStrSize << "\n"; + + os << "\tmuidTab_begin:" << std::hex << GetSymbolAddr(mplInfo.handle, "MRT_GetMuidTabBegin", true) << "\n"; + uint64_t muidTabSize = + static_cast(static_cast(GetSymbolAddr(mplInfo.handle, "MRT_GetMuidTabEnd", true)) - + static_cast(GetSymbolAddr(mplInfo.handle, "MRT_GetMuidTabBegin", true))); + sumMuidTabSize += muidTabSize; + os << "\tmuidTab size:" << std::hex << muidTabSize << "\n"; + + os << "\tEhframe_begin:" << std::hex << GetSymbolAddr(mplInfo.handle, "MRT_GetEhframeStart", true) << "\n"; + uint64_t ehframeSize = + static_cast(static_cast(GetSymbolAddr(mplInfo.handle, "MRT_GetEhframeEnd", true)) - + static_cast(GetSymbolAddr(mplInfo.handle, "MRT_GetEhframeStart", true))); + sumEhframeSize += ehframeSize; + os << "\tEhframe size:" << std::hex << ehframeSize << "\n"; + for (uint32_t i = 0; i < static_cast(kLayoutTypeCount); ++i) { + std::string markerName = "__MBlock_" + GetLayoutTypeString(i) + "_func_start"; + symAddr = GetSymbolAddr(mplInfo.handle, markerName.c_str(), false); + if (symAddr != nullptr) { + os << "\t" << markerName << ":" << std::hex << *(reinterpret_cast(symAddr)) << "\n"; + } + } + if ((symAddr = GetSymbolAddr(mplInfo.handle, "__MBlock_globalVars_hot_begin", false))) { + os << "\t__MBlock_globalVars_hot_begin:" << std::hex << *(reinterpret_cast(symAddr)) << "\n"; + } + if ((symAddr = GetSymbolAddr(mplInfo.handle, "__MBlock_globalVars_cold_begin", false))) { + os << "\t__MBlock_globalVars_cold_begin:" << std::hex << *(reinterpret_cast(symAddr)) << "\n"; + } + if ((symAddr = GetSymbolAddr(mplInfo.handle, "__MBlock_globalVars_cold_end", false))) { + os << "\t__MBlock_globalVars_cold_end:" << std::hex << *(reinterpret_cast(symAddr)) << "\n"; + } + + sumHotLiteralSize += DumpMetadataSectionSize(os, mplInfo.handle, "__MBlock_literal_hot"); + sumColdLiteralSize += DumpMetadataSectionSize(os, mplInfo.handle, "__MBlock_literal_cold"); + sumClassMeta += mplInfo.GetTblSize(kTabClassMetadata); + + sumMethodsHotSize += DumpMetadataSectionSize(os, mplInfo.handle, "__MBlock__methods_info__hot"); + sumMethodsColdSize += DumpMetadataSectionSize(os, mplInfo.handle, "__MBlock__methods_info__cold"); + sumMethodsCompactSize += DumpMetadataSectionSize(os, mplInfo.handle, "__MBlock__methods_infocompact__cold"); + + sumFieldsHotSize += DumpMetadataSectionSize(os, mplInfo.handle, "__MBlock__fields_info__hot"); + sumFieldsColdSize += DumpMetadataSectionSize(os, mplInfo.handle, "__MBlock__fields_info__cold"); + sumFieldsCompactSize += DumpMetadataSectionSize(os, mplInfo.handle, "__MBlock__fields_infocompact__cold"); + + sumVtbHotSize += mplInfo.GetTblSize(kVTable); + sumVtbColdSize += DumpMetadataSectionSize(os, mplInfo.handle, "__MBlock__vtb_cold"); + sumItbHotSize += mplInfo.GetTblSize(kITable); + sumItbColdSize += DumpMetadataSectionSize(os, mplInfo.handle, "__MBlock__itb_cold"); + sumSuperClassHotSize += mplInfo.GetTblSize(kDataSuperClass); + sumSuperClassColdSize += DumpMetadataSectionSize(os, mplInfo.handle, "__MBlock__superclasses__cold"); + }; + pInvoker->mplInfoList.ForEach(handle); + os << std::dec; + os << "All maple*.so literal information: " << "\n"; + os << "\tsumLiteralHotSize: " << sumHotLiteralSize << "\n"; + os << "\tsumLiteralColdSize: " << sumColdLiteralSize << "\n"; + + os << "All maple*.so MetaData information: " << "\n"; + os << "\tsumClassMetaSize: " << sumClassMeta << "\n"; + os << "\tsumMethodsHotSize: " << sumMethodsHotSize << "\n"; + os << "\tsumMethodsColdSize: " << sumMethodsColdSize << "\n"; + os << "\tsumMethodsCompactSize: " << sumMethodsCompactSize << "\n"; + os << "\tsumFieldsHotSize: " << sumFieldsHotSize << "\n"; + os << "\tsumFieldsColdSize: " << sumFieldsColdSize << "\n"; + os << "\tsumFieldsCompactSize: " << sumFieldsCompactSize << "\n"; + os << "\tsumVtbHotSize: " << sumVtbHotSize << "\n"; + os << "\tsumVtbColdSize: " << sumVtbColdSize << "\n"; + os << "\tsumItbHotSize: " << sumItbHotSize << "\n"; + os << "\tsumItbColdSize: " << sumItbColdSize << "\n"; + os << "\tsumSuperClassHotSize: " << sumSuperClassHotSize << "\n"; + os << "\tsumSuperClassColdSize: " << sumSuperClassColdSize << "\n"; + os << "\tsumEhframeSize: " << sumEhframeSize << "\n"; + os << "\tsumMuidTabSize: " << sumMuidTabSize << "\n"; + os << "\tsumReflectionStrSize: " << sumReflectionStrSize << "\n"; +} + +// Dump all the function profile of all loaded maple-so +void Debug::DumpAllMplFuncProfile(std::unordered_map> &funcProfileRaw) { + size_t size; + LinkerFuncProfileTableItem *tableItem = nullptr; + auto handle = [&](LinkerMFileInfo &mplInfo) { + size = mplInfo.GetTblSize(kMethodProfile); + tableItem = mplInfo.GetTblBegin(kMethodProfile); + VLOG(profiler) << mplInfo.name << " size " << size << "\n"; + bool emptySize = (!size); + if (emptySize) { + return; + } + auto &profileList = funcProfileRaw[mplInfo.name]; + LinkerInfTableItem *pInfTable = mplInfo.GetTblBegin(kMethodInfo); + for (size_t i = 0; i < size; ++i) { + LinkerFuncProfileTableItem item = tableItem[i]; + if (item.callTimes) { + LinkerInfTableItem *infItem = reinterpret_cast(pInfTable + i); + std::string funcName = GetMethodSymbolByOffset(*infItem); + auto startupProfile = mplInfo.funProfileMap.find(static_cast(i)); + uint8_t layoutType = static_cast(kLayoutBootHot); + if (mplInfo.funProfileMap.empty()) { + layoutType = static_cast(kLayoutBootHot); + } else if (startupProfile == mplInfo.funProfileMap.end()) { + layoutType = static_cast(kLayoutRunHot); + } else if ((startupProfile->second).callTimes == item.callTimes) { + layoutType = static_cast(kLayoutBootHot); + } else { + layoutType = static_cast(kLayoutBothHot); + } + profileList.emplace_back(item.callTimes, layoutType, funcName); + } + } + }; + pInvoker->mplInfoList.ForEach(handle); +} + +// Dump the method undefine table of 'handle'. +bool Debug::DumpMethodUndefSymbol(LinkerMFileInfo &mplInfo) { + size_t size = mplInfo.GetTblSize(kMethodUndef); + if (size == 0) { + LINKER_DLOG(mpllinker) << "failed, size is zero in " << mplInfo.name << maple::endl; + return false; + } + LinkerAddrTableItem *pTable = mplInfo.GetTblBegin(kMethodUndef); + if (pTable == nullptr) { + LINKER_DLOG(mpllinker) << "failed, pTable is null in " << mplInfo.name << maple::endl; + return false; + } + LinkerMuidTableItem *pMuidTable = mplInfo.GetTblBegin(kMethodUndefMuid); + if (pMuidTable == nullptr) { + LINKER_DLOG(mpllinker) << "failed, pMuidTable is null in " << mplInfo.name << maple::endl; + return false; + } + for (size_t i = 0; i < size; ++i) { + LinkerAddrTableItem item = pTable[i]; + LinkerMuidTableItem muidItem = pMuidTable[i]; + if (item.addr == 0) { + LINKER_LOG(INFO) << "(" << i << "), \tMUID = " << muidItem.muid.ToStr() << " in " << mplInfo.name << maple::endl; + } + LINKER_DLOG(mpllinker) << "(" << i << "), \taddr = " << item.addr << ", MUID = " << muidItem.muid.ToStr() << + " in " << mplInfo.name << maple::endl; + } + return true; +} + +// Dump the method define table of 'handle'. +bool Debug::DumpMethodSymbol(LinkerMFileInfo &mplInfo) { + size_t size = mplInfo.GetTblSize(kMethodDef); + if (size == 0) { + LINKER_DLOG(mpllinker) << "failed, size is zero in " << mplInfo.name << maple::endl; + return false; + } + LinkerAddrTableItem *pTable = mplInfo.GetTblBegin(kMethodDefOrig); + if (pTable == nullptr) { + LINKER_DLOG(mpllinker) << "failed, pTable is null in " << mplInfo.name << maple::endl; + return false; + } + LinkerMuidTableItem *pMuidTable = mplInfo.GetTblBegin(kMethodDefMuid); + if (pMuidTable == nullptr) { + LINKER_DLOG(mpllinker) << "failed, pMuidTable is null in " << mplInfo.name << maple::endl; + return false; + } + LinkerInfTableItem *pInfTable = mplInfo.GetTblBegin(kMethodInfo); + if (pInfTable == nullptr) { + LINKER_DLOG(mpllinker) << "failed, pInfTable is null in " << mplInfo.name << maple::endl; + return false; + } + + for (size_t i = 0; i < size; ++i) { + LinkerAddrTableItem *item = reinterpret_cast(pTable + i); + LinkerMuidTableItem *muidItem = reinterpret_cast(pMuidTable + i); + LinkerInfTableItem *infItem = reinterpret_cast(pInfTable + i); + LINKER_LOG(INFO) << "(" << i << "), \tMUID=" << muidItem->muid.ToStr() << " in " << mplInfo.name << maple::endl; + LINKER_LOG(INFO) << "(" << i << "), \taddr=" << item->addr << " in " << mplInfo.name << maple::endl; + LINKER_LOG(INFO) << "(" << i << "), \tsize=" << infItem->size << " in " << mplInfo.name << maple::endl; + LINKER_LOG(INFO) << "(" << i << "), \tsym=" << GetMethodSymbolByOffset(*infItem) << " in " << + mplInfo.name << maple::endl; + } + return true; +} + +// Dump the data undefine table of 'handle'. +bool Debug::DumpDataUndefSymbol(LinkerMFileInfo &mplInfo) { + size_t size = mplInfo.GetTblSize(kDataUndef); + if (size == 0) { + LINKER_LOG(ERROR) << "failed, size is zero, in " << mplInfo.name << maple::endl; + return false; + } + LinkerAddrTableItem *pTable = mplInfo.GetTblBegin(kDataUndef); + if (pTable == nullptr) { + LINKER_LOG(ERROR) << "failed, pTable is null, in " << mplInfo.name << maple::endl; + return false; + } + LinkerMuidTableItem *pMuidTable = mplInfo.GetTblBegin(kDataUndefMuid); + if (pMuidTable == nullptr) { + LINKER_LOG(ERROR) << "failed, pMuidTable is null in " << mplInfo.name << maple::endl; + return false; + } + + for (size_t i = 0; i < size; ++i) { + LinkerAddrTableItem item = pTable[i]; + LinkerMuidTableItem muidItem = pMuidTable[i]; + if (item.addr == 0) { + LINKER_LOG(INFO) << "(" << i << "), \tMUID=" << muidItem.muid.ToStr() << " in " << mplInfo.name << maple::endl; + } + LINKER_DLOG(mpllinker) << "(" << i << "), \taddr=" << item.addr << ", MUID=" << muidItem.muid.ToStr() << " in " << + mplInfo.name << maple::endl; + } + return true; +} + +// Dump the data define table of 'handle'. +bool Debug::DumpDataSymbol(LinkerMFileInfo &mplInfo) { + size_t size = mplInfo.GetTblSize(kDataDef); + if (size == 0) { + LINKER_DLOG(mpllinker) << "failed, size is zero, in " << mplInfo.name << maple::endl; + return false; + } + LinkerAddrTableItem *pTable = mplInfo.GetTblBegin(kDataDefOrig); + if (pTable == nullptr) { + LINKER_DLOG(mpllinker) << "failed, pTable is null, in " << mplInfo.name << maple::endl; + return false; + } + LinkerMuidTableItem *pMuidTable = mplInfo.GetTblBegin(kDataDefMuid); + if (pMuidTable == nullptr) { + LINKER_LOG(ERROR) << "failed, pMuidTable is null in " << mplInfo.name << maple::endl; + return false; + } + + for (size_t i = 0; i < size; ++i) { + LinkerAddrTableItem item = pTable[i]; + LinkerMuidTableItem muidItem = pMuidTable[i]; + LINKER_LOG(INFO) << "(" << i << "), \tMUID=" << muidItem.muid.ToStr() << " in " << mplInfo.name << maple::endl; + LINKER_LOG(INFO) << "(" << i << "), \taddr=" << item.addr << " in " << mplInfo.name << maple::endl; + } + return true; +} + +// Lazy-Binding routines start +void Debug::DumpStackInfoInLog() { + // We needn't care the performance of unwinding here. + std::vector uwContextStack; + // Unwind as many as possible till reaching the end. + MapleStack::FastRecordCurrentJavaStack(uwContextStack, MAPLE_STACK_UNWIND_STEP_MAX); + + for (auto &uwContext : uwContextStack) { + if (!UnwindContextInterpEx::TryDumpStackInfoInLog(*pInvoker, uwContext)) { + if (uwContext.IsCompiledContext()) { + std::string methodName; + uwContext.frame.GetJavaMethodFullName(methodName); + LinkerMFileInfo *mplInfo = + pInvoker->GetLinkerMFileInfo(kFromAddr, reinterpret_cast(uwContext.frame.ip)); + if (mplInfo != nullptr) { + LINKER_LOG(INFO) << methodName << ":" << uwContext.frame.ip << ":" << mplInfo->name << maple::endl; + } else { + LINKER_LOG(INFO) << methodName << ":" << uwContext.frame.ip << ":NO_SO_NAME" << maple::endl; + } + } + } + } +} + +uint64_t Debug::DumpMetadataSectionSize(std::ostream &os, void *handle, const std::string sectionName) { + std::ios::fmtflags f(os.flags()); + std::string hotOrCold = (sectionName.find("hot") != std::string::npos) ? "hot" : "cold"; + std::string startString = sectionName + "_begin"; + std::string endString = sectionName + "_end"; + void *end = GetSymbolAddr(handle, endString.c_str(), false); + void *start = GetSymbolAddr(handle, startString.c_str(), false); + uint64_t size = static_cast(reinterpret_cast(end) - reinterpret_cast(start)); + os << "\t" << startString << ": " << std::hex << start << "\n"; + os << "\t" << hotOrCold << " " << sectionName << " size: " << size << "\n"; + os.flags(f); + return size; +} + +void Debug::DumpBBProfileInfo(std::ostream &os) { + auto handle = [&os](const LinkerMFileInfo &mplInfo) { + void *bbProfileTabStart = GetSymbolAddr(mplInfo.handle, kBBProfileTabBegin, true); + void *bbProfileTabEnd = GetSymbolAddr(mplInfo.handle, kBBProfileTabEnd, true); + size_t itemSize = (reinterpret_cast(bbProfileTabEnd) - + reinterpret_cast(bbProfileTabStart)) / sizeof(uint32_t); + + char *bbProfileStrTabStart = static_cast(GetSymbolAddr(mplInfo.handle, kBBProfileStrTabBegin, true)); + char *bbProfileStrTabEnd = reinterpret_cast(GetSymbolAddr(mplInfo.handle, kBBProfileStrTabEnd, true)); + std::vector profileStr; + std::string str(bbProfileStrTabStart, bbProfileStrTabEnd - bbProfileStrTabStart); + std::stringstream ss; + ss.str(str); + std::string item; + while (std::getline(ss, item, '\0')) { + profileStr.emplace_back(item); + } + if (profileStr.size() != itemSize) { + LOG(INFO) << "profileStr size " << profileStr.size() << " doestn't match item size " << + itemSize << " in " << mplInfo.name << maple::endl; + LOG(INFO) << "profile Tab Start " << std::hex << bbProfileTabStart << " end " << + bbProfileTabEnd << std::dec << maple::endl; + return; + } + uint32_t *bbProfileTab = reinterpret_cast(bbProfileTabStart); + for (size_t i = 0; i < itemSize; ++i) { + os << profileStr[i] << ":" << bbProfileTab[i] << "\n"; + } + }; + pInvoker->mplInfoList.ForEach(handle); +} +// Dump all the func IR profile of all loaded maple-so +void Debug::DumpAllMplFuncIRProfile(std::unordered_map &funcProfileRaw) { + auto handle = [&](LinkerMFileInfo &mplInfo) { + auto counterSize = mplInfo.GetTblSize(kFuncIRProfCounter); + auto counterTable = mplInfo.GetTblBegin(kFuncIRProfCounter); + auto descSize = mplInfo.GetTblSize(kFuncIRProfDesc); + auto descTable = mplInfo.GetTblBegin(kFuncIRProfDesc); + VLOG(profiler) << mplInfo.name << " counter size " << counterSize << " desc size " << descSize << "\n"; + if (!counterSize || !descSize) { + return; + } + auto &profileList = funcProfileRaw[mplInfo.name]; + auto &descTab = profileList.descTab; + auto &counterTab = profileList.counterTab; + counterTab.reserve(counterSize); + LinkerInfTableItem *pInfTable = mplInfo.GetTblBegin(kMethodInfo); + for (size_t i = 0; i < descSize; ++i) { + LinkerFuncProfDescItem descItem = descTable[i]; + if ((descItem.start >= counterSize) || (descItem.end >= counterSize)) { + return; + } + LinkerInfTableItem *infItem = reinterpret_cast(pInfTable + i); + std::string funcName = GetMethodSymbolByOffset(*infItem); + VLOG(profiler) << funcName << " counter range [" << descItem.start << "," << descItem.end << "]" << "\n"; + descTab.emplace_back(funcName, descItem.hash, descItem.start, descItem.end); + } + + for (size_t i = 0; i < counterSize; ++i) { + LinkerFuncProfileTableItem counterItem = counterTable[i]; + counterTab.push_back(counterItem.callTimes); + } + }; + pInvoker->mplInfoList.ForEach(handle); +} +} // namespace maplert diff --git a/src/mrt/compiler-rt/src/linker/linker_gctib.cpp b/src/mrt/compiler-rt/src/linker/linker_gctib.cpp new file mode 100644 index 0000000000..89eee09311 --- /dev/null +++ b/src/mrt/compiler-rt/src/linker/linker_gctib.cpp @@ -0,0 +1,150 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "linker/linker_gctib.h" + +#include +#include +#include "base/logging.h" +#include "collector/cp_generator.h" + +namespace maplert { +// Get all offset of reference field. +void RefCal(const MClass &klass, vector &refOffsets, vector &weakOffsets, + vector &unownedOffsets, uint64_t &maxRefOffset) { + if (&klass == WellKnown::GetMClassObject()) { + return; + } + FieldMeta *fields = klass.GetFieldMetas(); + uint32_t numOfField = klass.GetNumOfFields(); + for (uint32_t i = 0; i < numOfField; ++i) { + FieldMeta *field = &fields[i]; + if (field->IsPrimitive() || field->IsStatic()) { + continue; + } + uint64_t fieldOffset = field->GetOffset(); + if (fieldOffset == 0) { + // skip first meta + continue; + } + if (fieldOffset > maxRefOffset) { + maxRefOffset = fieldOffset; + } + size_t fieldModifier = field->GetMod(); + if (modifier::IsUnowned(fieldModifier)) { + unownedOffsets.push_back(fieldOffset); + } else if (modifier::IsWeakRef(fieldModifier)) { + weakOffsets.push_back(fieldOffset); + } else { + refOffsets.push_back(fieldOffset); + } + } + + if (klass.GetNumOfSuperClasses() == 0) { + return; + } + + if (klass.IsColdClass() && !klass.IsLazyBinding()) { + LinkerAPI::Instance().ResolveColdClassSymbol(klass.AsJclass()); + } + + MClass **superclassArray = klass.GetSuperClassArrayPtr(); + MClass *superClass = superclassArray[0]; + if (superClass != WellKnown::GetMClassObject() && !superClass->IsInterface()) { + RefCal(*superClass, refOffsets, weakOffsets, unownedOffsets, maxRefOffset); + } + return; +} + +void DumpGctib(const struct GCTibGCInfo &gcTib) { + ostringstream oss; + for (size_t i = 0; i != gcTib.nBitmapWords; ++i) { + oss << std::hex << gcTib.bitmapWords[i] << '\t'; + } + LOG(WARNING) << "[Gctib]headerProto:" << gcTib.headerProto << + ", nBitmapWords:" << gcTib.nBitmapWords << + ", bitmapWords:" << oss.str() << endl; +} + +static inline uint64_t OffsetToMapWordIndex(uint64_t offset) { + return (offset / sizeof(reffield_t)) / kRefWordPerMapWord; +} + +static inline uint64_t OffsetToInMapWordOffset(uint64_t offset) { + return ((offset / sizeof(reffield_t)) % kRefWordPerMapWord) * kBitsPerRefWord; +} + +static void FillBitmapWords(vector refOffsets, struct GCTibGCInfo *newGctibPtr, uint64_t refType) { + for (auto refOffset : refOffsets) { + uint64_t index = OffsetToMapWordIndex(refOffset); + uint64_t shift = OffsetToInMapWordOffset(refOffset); + newGctibPtr->bitmapWords[index] |= (refType << shift); + } +} + +bool ReGenGctib(ClassMetadata *classInfo, bool forceUpdate) { + vector refOffsets; + vector weakOffsets; + vector unownedOffsets; + uint64_t maxRefOffset = 0; + MClass *cls = reinterpret_cast(classInfo); + if (cls->IsInterface()) { + return false; + } + RefCal(*cls, refOffsets, weakOffsets, unownedOffsets, maxRefOffset); + + struct GCTibGCInfo *gcInfo = reinterpret_cast(cls->GetGctib()); + // construct new GCTIB + uint64_t nBitmapWords = 0; + if (maxRefOffset > 0) { + nBitmapWords = OffsetToMapWordIndex(maxRefOffset) + 1; + } + uint32_t gctibByteSize = static_cast((nBitmapWords + 1) * sizeof(uint64_t)); + struct GCTibGCInfo *newGctibPtr = reinterpret_cast(calloc(gctibByteSize, sizeof(char))); + if (newGctibPtr == nullptr) { + LOG(FATAL) << "malloc gctib failed" << endl; + return false; + } + + // fill bitmap + FillBitmapWords(refOffsets, newGctibPtr, kNormalRefBits); + FillBitmapWords(weakOffsets, newGctibPtr, kWeakRefBits); + FillBitmapWords(unownedOffsets, newGctibPtr, kUnownedRefBits); + newGctibPtr->nBitmapWords = static_cast(nBitmapWords); + if (gcInfo != nullptr) { + if (maxRefOffset > 0) { + newGctibPtr->headerProto = gcInfo->headerProto | kHasChildRef; + } else { + newGctibPtr->headerProto = gcInfo->headerProto & ~(kHasChildRef); + } + newGctibPtr->headerProto = newGctibPtr->headerProto & ~(kCyclePatternBit); + if ((!forceUpdate || VLOG_IS_ON(mpllinker)) && (gcInfo->nBitmapWords == nBitmapWords) && + (memcmp(gcInfo, newGctibPtr, gctibByteSize) == 0)) { + free(newGctibPtr); + return false; + } else if (VLOG_IS_ON(mpllinker)) { + LOG(WARNING) << "[Gctib] ResolveClassGctib:" << cls->GetName() << " mismatch" << endl; + DumpGctib(*gcInfo); + DumpGctib(*newGctibPtr); + } + } + classInfo->gctib.SetGctibRef(newGctibPtr); + if (gcInfo != nullptr && Collector::Instance().Type() == kNaiveRC && ClassCycleManager::HasDynamicLoadPattern(cls)) { + free(gcInfo); + // delete the cls from dynamic_loaded_cycle_classes_ + ClassCycleManager::DeleteDynamicLoadPattern(cls); + } + return true; +} +} diff --git a/src/mrt/compiler-rt/src/linker/linker_hotfix.cpp b/src/mrt/compiler-rt/src/linker/linker_hotfix.cpp new file mode 100644 index 0000000000..b5cb4beb4e --- /dev/null +++ b/src/mrt/compiler-rt/src/linker/linker_hotfix.cpp @@ -0,0 +1,80 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "linker/linker_hotfix.h" + +#include "linker/linker_model.h" +#include "linker/linker_inline.h" +#include "linker/linker_cache.h" +#include "linker/linker.h" + +namespace maplert { +using namespace linkerutils; +FeatureName Hotfix::featureName = kFHotfix; +bool Hotfix::ResetResolvedFlags(LinkerMFileInfo &mplInfo) { + LINKER_VLOG(mpllinker) << mplInfo.name << maple::endl; + mplInfo.SetFlag(kIsMethodDefResolved, false); + mplInfo.SetFlag(kIsVTabResolved, false); + mplInfo.SetFlag(kIsITabResolved, false); + mplInfo.SetFlag(kIsSuperClassResolved, false); + mplInfo.SetFlag(kIsGCRootListResolved, false); + mplInfo.SetFlag(kIsMethodRelocated, false); + mplInfo.SetFlag(kIsDataRelocated, false); + return true; +} + +bool Hotfix::ReleaseCaches(LinkerMFileInfo &mplInfo) { + LINKER_VLOG(mpllinker) << mplInfo.name << maple::endl; +#ifdef LINKER_RT_CACHE + pInvoker->Get()->RemoveAllTables(mplInfo); + pInvoker->Get()->FreeAllTables(mplInfo); +#endif // LINKER_RT_CACHE + return true; +} + +void Hotfix::SetClassLoaderParent(const MObject *classLoader, const MObject *newParent) { + LINKER_VLOG(hotfix) << classLoader << ", " << newParent << maple::endl; + pInvoker->ResetClassLoaderList(classLoader); + pInvoker->ResetClassLoaderList(newParent); + (void)(pInvoker->ForEachDoAction(this, &Hotfix::ResetResolvedFlags)); + (void)(pInvoker->ForEachDoAction(this, &Hotfix::ReleaseCaches)); + (void)(pInvoker->Get()->HandleMethodSymbol()); + (void)(pInvoker->Get()->HandleDataSymbol()); +} + +void Hotfix::InsertClassesFront(const MObject *classLoader, const LinkerMFileInfo &mplInfo, const std::string &path) { + LINKER_VLOG(hotfix) << "insert path[" << path << "] in ClassLoader[" << classLoader << "], isLazyBinding=" << + mplInfo.IsFlag(kIsLazy) << maple::endl; + if (!mplInfo.IsFlag(kIsLazy)) { + (void)(pInvoker->ForEachDoAction(this, &Hotfix::ResetResolvedFlags)); + (void)(pInvoker->ForEachDoAction(this, &Hotfix::ReleaseCaches)); + (void)(pInvoker->Get()->HandleMethodSymbol()); + (void)(pInvoker->Get()->HandleDataSymbol()); + } +} + +void Hotfix::SetPatchPath(const std::string &path, int32_t mode) { + LINKER_VLOG(hotfix) << "set patch path[" << path << "], mode =" << mode << maple::endl; + patchPath = path; + patchMode = mode; +} + +bool Hotfix::IsFrontPatchMode(const std::string &path) { + return (patchMode == 1 && path == patchPath) ? true : false; // 0 - classloader parent, 1 - dexpath list +} + +std::string Hotfix::GetPatchPath() { + return patchPath; +} +} // namespace maplert diff --git a/src/mrt/compiler-rt/src/linker/linker_info.cpp b/src/mrt/compiler-rt/src/linker/linker_info.cpp new file mode 100644 index 0000000000..5c216fb368 --- /dev/null +++ b/src/mrt/compiler-rt/src/linker/linker_info.cpp @@ -0,0 +1,37 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "linker/linker_info.h" + +namespace maplert { +MUID GetHash(const std::vector &mplInfos, bool getSolidHash) { + std::stringstream sstream; + for (auto mplInfo : mplInfos) { + MUID &hash = getSolidHash ? mplInfo->hashOfDecouple : mplInfo->hash; +#ifdef USE_64BIT_MUID + // 8 spaces to 64 bit + sstream << std::setfill('0') << std::setw(8) << std::hex << hash.data.words[1] << + std::setfill('0') << std::setw(8) << std::hex << hash.data.words[0]; +#else + // 16 spaces to 32 bit + sstream << std::setfill('0') << std::setw(16) << std::hex << hash.data.words[1] << + std::setfill('0') << std::setw(16) << std::hex << hash.data.words[0]; +#endif // USE_64BIT_MUID + } + auto str = sstream.str(); + MUID muid; + linkerutils::GenerateMUID(str.data(), str.size(), muid); + return muid; +} +} // namespace maplert diff --git a/src/mrt/compiler-rt/src/linker/linker_lazy_binding.cpp b/src/mrt/compiler-rt/src/linker/linker_lazy_binding.cpp new file mode 100644 index 0000000000..e7ae688a2f --- /dev/null +++ b/src/mrt/compiler-rt/src/linker/linker_lazy_binding.cpp @@ -0,0 +1,1536 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "linker/linker_lazy_binding.h" + +#include "linker/linker_model.h" +#include "linker/linker_inline.h" +#include "linker/linker_method_builder.h" +#ifdef LINKER_DECOUPLE +#include "linker/decouple/linker_decouple.h" +#include "linker/decouple/linker_field.h" +#include "linker/decouple/linker_gctib.h" +#endif +#include "loader_api.h" +#include "loader/object_loader.h" +#include "chelper.h" +#include "object_base.h" +#include "mrt_class_api.h" +#include "mrt_reflection_method.h" +#include "fieldmeta_inline.h" +#include "exception/mrt_exception.h" +#include "exception/stack_unwinder.h" +#include "mrt_well_known.h" + +using namespace maple; + +namespace maplert { +using namespace linkerutils; +FeatureName LazyBinding::featureName = kFLazyBinding; +// In most cases, decoupling routine don't call this function here. +// We just add this procedure in case. +bool LazyBinding::HandleSymbolForDecoupling(LinkerMFileInfo &mplInfo, const AddrSlice &addrSlice, size_t index) { + BindingState state = GetAddrBindingState(addrSlice, index); + LINKER_VLOG(lazybinding) << "state=" << static_cast(state) << ", addr=" << addrSlice.Data() << ", index=" << + index << maple::endl; + return HandleSymbol(mplInfo, addrSlice.Data() + index, nullptr, state, true); +} + +bool LazyBinding::HandleSymbol(const void *offset, const void *pc, BindingState state, bool fromSignal) { + LinkerMFileInfo *mplInfo = pInvoker->GetLinkerMFileInfo(kFromAddr, offset, true); + if (mplInfo == nullptr) { + pInvoker->DumpStackInfoInLog(); + LINKER_DLOG(lazybinding) << "--end--/" << offset << ", " << static_cast(state) << maple::endl; + LINKER_LOG(FATAL) << "failed, offset=" << offset << ", pc=" << pc << ", state=" << static_cast(state) << + ", mplInfo is null." << maple::endl; + } + return HandleSymbol(*mplInfo, offset, pc, state, fromSignal); +} + +void LazyBinding::GetAddrAndMuidSlice(LinkerMFileInfo &mplInfo, + BindingState state, AddrSlice &addrSlice, MuidSlice &muidSlice, const void *offset) { + size_t tableSize = 0; + size_t muidSize = 0; + switch (state) { + case kBindingStateCinfUndef: + case kBindingStateDataUndef: + tableSize = mplInfo.GetTblSize(kDataUndef); + addrSlice = AddrSlice(mplInfo.GetTblBegin(kDataUndef), tableSize); + muidSize = mplInfo.GetTblSize(kDataUndefMuid); + muidSlice = MuidSlice(mplInfo.GetTblBegin(kDataUndefMuid), muidSize); + break; + case kBindingStateCinfDef: + case kBindingStateDataDef: + tableSize = mplInfo.GetTblSize(kDataDef); + addrSlice = AddrSlice(mplInfo.GetTblBegin(kDataDef), tableSize); + muidSize = mplInfo.GetTblSize(kDataDefMuid); + muidSlice = MuidSlice(mplInfo.GetTblBegin(kDataDefMuid), muidSize); + break; + case kBindingStateMethodUndef: + tableSize = mplInfo.GetTblSize(kMethodUndef); + addrSlice = AddrSlice(mplInfo.GetTblBegin(kMethodUndef), tableSize); + muidSize = mplInfo.GetTblSize(kMethodUndefMuid); + muidSlice = MuidSlice(mplInfo.GetTblBegin(kMethodUndefMuid), muidSize); + break; + case kBindingStateMethodDef: + tableSize = mplInfo.GetTblSize(kMethodDef); + addrSlice = AddrSlice(mplInfo.GetTblBegin(kMethodDef), tableSize); + muidSize = mplInfo.GetTblSize(kMethodDefMuid); + muidSlice = MuidSlice(mplInfo.GetTblBegin(kMethodDefMuid), muidSize); + break; + default: + LINKER_LOG(ERROR) << "failed, offset=" << offset << ", state=" << static_cast(state) << maple::endl; + return; + } +} + +void *LazyBinding::ResolveClassSymbolClassification( + LinkerMFileInfo &mplInfo, BindingState state, const AddrSlice &addrSlice, const MuidSlice &muidSlice, + const size_t &index, MObject *candidateClassLoader, bool &fromUpper, const void *pc, const void *offset) { + void *data = nullptr; + switch (state) { + case kBindingStateCinfUndef: + if (mplInfo.IsFlag(kIsLazy) && mplInfo.classLoader == nullptr) { + candidateClassLoader = pInvoker->GetClassLoaderByAddress(mplInfo, pc); + LINKER_VLOG(lazybinding) << "candidateClassLoader=" << candidateClassLoader << maple::endl; + } + data = ResolveClassSymbol(mplInfo, addrSlice, muidSlice, index, candidateClassLoader, fromUpper, false, true); + break; + case kBindingStateCinfDef: + data = ResolveClassSymbol(mplInfo, addrSlice, muidSlice, index, candidateClassLoader, fromUpper, true, true); + break; + case kBindingStateDataUndef: + data = ResolveDataSymbol(mplInfo, addrSlice, muidSlice, index, false); + break; + case kBindingStateDataDef: + data = ResolveDataSymbol(mplInfo, addrSlice, muidSlice, index, true); + break; + case kBindingStateMethodUndef: + data = ResolveMethodSymbol(mplInfo, addrSlice, muidSlice, index, false, true); + break; + case kBindingStateMethodDef: + data = ResolveMethodSymbol(mplInfo, addrSlice, muidSlice, index, true, true); + break; + default: + // Never reach here. + break; + } + + if (data == nullptr) { + LINKER_LOG(ERROR) << "failed, data returns null, offset=" << offset << ", " << mplInfo.name << maple::endl; + pInvoker->DumpStackInfoInLog(); + } + return data; +} + +bool LazyBinding::HandleSymbol( + LinkerMFileInfo &mplInfo, const void *offset, const void *pc, BindingState state, bool fromSignal) { + LINKER_DLOG(lazybinding) << "--start--/" << offset << ", " << state << ", " << fromSignal << maple::endl; + AddrSlice addrSlice; + MuidSlice muidSlice; + GetAddrAndMuidSlice(mplInfo, state, addrSlice, muidSlice, offset); + + if (pInvoker->GetMultiSoPendingCount() > 0) { + if (state == kBindingStateCinfUndef || state == kBindingStateDataUndef || state == kBindingStateMethodUndef) { + LINKER_LOG(ERROR) << "waiting for startup asynchronism procedure, " << "name=" << mplInfo.name << + ", multiSoPendingCount=" << pInvoker->GetMultiSoPendingCount() << maple::endl; + (void)MRT_EnterSaferegion(!fromSignal); + while (pInvoker->GetMultiSoPendingCount() > 0) { + constexpr int oneMillisecond = 1000; + (void)usleep(oneMillisecond); // Sleep 1ms + } + (void)MRT_LeaveSaferegion(); + } + } + + size_t index = GetAddrIndex(mplInfo, addrSlice, offset); + if (GetAddrBindingState(addrSlice, index, true) == kBindingStateResolved) { + LINKER_VLOG(lazybinding) << "resolved, state=" << state << ", index=" << index << mplInfo.name << maple::endl; + return true; + } + if (index == static_cast(-1)) { + pInvoker->DumpStackInfoInLog(); + LINKER_LOG(FATAL) << "out of range, offset=" << offset << "|" << state << "|" << mplInfo.name << maple::endl; + } + bool fromUpper = false; + MObject *candidateClassLoader = nullptr; + void *data = ResolveClassSymbolClassification( + mplInfo, state, addrSlice, muidSlice, index, candidateClassLoader, fromUpper, pc, offset); + + if (state == kBindingStateCinfUndef || state == kBindingStateCinfDef) { + if (!fromUpper) { + LinkerMFileInfo *tmpLinkerMFileInfo = pInvoker->GetLinkerMFileInfo(kFromMeta, data); // Not only lazy .so. + if (tmpLinkerMFileInfo != nullptr) { + LinkClassInternal(*tmpLinkerMFileInfo, static_cast(data), candidateClassLoader, true); + } else { + // If the class is from the Interpreter, it can not be found in the LinkerMFileInfos list. + // It's allowed to return success in this case and not to link it any more. + LINKER_VLOG(lazybinding) << "class is lazy but no defined MFileInfo, from Interpreter, " << " klass=" << + data << ", name=" << reinterpret_cast(data)->GetName() << maple::endl; + } + } + } + if (SetAddrInAddrTable(addrSlice, index, reinterpret_cast(data)) == nullptr) { + LINKER_LOG(ERROR) << "set addr failed, addr = " << data << maple::endl; + } + LINKER_VLOG(lazybinding) << "resolved " << data << ", state=" << state << ", index=" << index << + ", successfully in " << mplInfo.name << maple::endl; + return true; +} + +size_t LazyBinding::GetAddrIndex( + const LinkerMFileInfo &mplInfo, const AddrSlice &addrSlice, const void *offset) { + if (addrSlice.Data() > offset) { + LINKER_LOG(ERROR) << "failed, pTable=" << addrSlice.Data() << ", offset=" << offset << " in " << mplInfo.name << + maple::endl; + return static_cast(-1); // Maximum value of size_t means failure. + } + + const LinkerAddrTableItem *pAddr = static_cast(const_cast(offset)); + size_t index = static_cast(pAddr - addrSlice.Data()); + LINKER_VLOG(lazybinding) << "index=" << index << maple::endl; + return index; +} + +BindingState LazyBinding::GetAddrBindingState(LinkerVoidType addr) { + int difference = static_cast(reinterpret_cast(addr) - __BindingProtectRegion__); + if (difference >= static_cast(kBindingStateCinfUndef) && + difference < static_cast(kBindingStateResolved)) { + return static_cast(difference); + } else { + return kBindingStateResolved; + } +} + +BindingState LazyBinding::GetAddrBindingState(const AddrSlice &addrSlice, size_t index, bool isAtomic) { + if (isAtomic) { + LinkerOffsetType *tmp = const_cast(&addrSlice[index].addr); + LinkerVoidType addr = __atomic_load_n(tmp, __ATOMIC_ACQUIRE); + return GetAddrBindingState(addr); + } else { + return GetAddrBindingState(static_cast(addrSlice[index].addr)); + } +} + +BindingState LazyBinding::GetAddrBindingState( + const LinkerMFileInfo &mplInfo, const AddrSlice &addrSlice, const void *offset) { + size_t index = GetAddrIndex(mplInfo, addrSlice, offset); + BindingState state = GetAddrBindingState(addrSlice, index); + return state; +} + +// Set the 'addrSlice[index].data()' as 'klass' +// addrSlice is data def|undef table, but not def original table. +// Returns the value of 'klass' in void *type. +inline void *LazyBinding::SetAddrInAddrTable(AddrSlice &addrSlice, size_t index, const MClass *addr) { + if (addr != nullptr) { +#ifdef LINKER_32BIT_REF_FOR_DEF_UNDEF + __atomic_store_n(&addrSlice[index].addr, pInvoker->AddrToUint32(addr), __ATOMIC_RELEASE); +#else + __atomic_store_n(&addrSlice[index].addr, reinterpret_cast(addr), __ATOMIC_RELEASE); +#endif // USE_32BIT_REF + return addrSlice[index].Address(); + } + return nullptr; +} + +void *LazyBinding::SetClassInDefAddrTable(size_t index, const MClass *klass) { + LinkerMFileInfo *mplInfo = pInvoker->GetLinkerMFileInfo(kFromMeta, klass, true); + size_t dataDefSize = mplInfo->GetTblSize(kDataDef); + AddrSlice dataDefSlice(mplInfo->GetTblBegin(kDataDef), dataDefSize); + return SetAddrInAddrTable(dataDefSlice, index, klass); +} + +void *LazyBinding::SearchInUndef(LinkerMFileInfo &mplInfo, const AddrSlice &addrSlice, const MuidSlice &muidSlice, + size_t index, MObject *candidateClassLoader, bool &fromUpper, bool isDef, std::string className) { + void *res = nullptr; + LinkerMFileInfo *tmpLinkerMFileInfo = nullptr; + void *addr = LookUpDataSymbolAddressLazily(nullptr, muidSlice[index].muid, &tmpLinkerMFileInfo, true); + LINKER_LOG(WARNING) << "(" << (isDef ? "DEF" : "UNDEF") << "), double resolve MUID=" << + muidSlice[index].muid.ToStr() << ", addr=" << addr << ", candidateClassLoader=" << candidateClassLoader << + ", in " << mplInfo.name << maple::endl; + if (addr != nullptr) { // To check candidateClassLoader != nullptr? + if (className.empty()) { + className = reinterpret_cast(addr)->GetName(); + } + res = ResolveClassSymbolInternal( + mplInfo, addrSlice, muidSlice, index, candidateClassLoader, fromUpper, isDef, className); + if (res == nullptr) { + LINKER_LOG(WARNING) << "(" << (isDef ? "DEF" : "UNDEF") << "), failed to double resolve MUID=" << + muidSlice[index].muid.ToStr() << ", use addr=" << addr << ", candidateClassLoader=" << + candidateClassLoader << ", in " << mplInfo.name << maple::endl; + MClass *pseudo = reinterpret_cast(addr); + res = reinterpret_cast(pseudo); + if (pseudo != nullptr && !MRT_IsClassInitialized(*pseudo)) { + LoaderAPI::Instance().SetClassCL(*pseudo, tmpLinkerMFileInfo->classLoader); + } + fromUpper = false; + } + } + return res; +} + +void *LazyBinding::ResolveClassSymbolForAPK(LinkerMFileInfo &mplInfo, size_t index, bool &fromUpper, bool isDef) { + void *res = nullptr; + size_t dataDefSize = mplInfo.GetTblSize(kDataDef); + AddrSlice dataDefSlice(mplInfo.GetTblBegin(kDataDefOrig), dataDefSize); + MClass *klass = reinterpret_cast(GetDefTableAddress(mplInfo, dataDefSlice, + static_cast(index), false)); + res = reinterpret_cast(klass); + if (klass != nullptr && !MRT_IsClassInitialized(*klass)) { + LoaderAPI::Instance().SetClassCL(*klass, mplInfo.classLoader); + LINKER_VLOG(lazybinding) << isDef << "for apk loading, klass=" << klass->GetName() << "in" << + mplInfo.name.c_str() << maple::endl; + } + fromUpper = false; + return res; +} + +// Just resolve without checking. +void *LazyBinding::ResolveClassSymbol(LinkerMFileInfo &mplInfo, const AddrSlice &addrSlice, const MuidSlice &muidSlice, + size_t index, MObject *candidateClassLoader, bool &fromUpper, bool isDef, bool clinit) { + void *res = nullptr; + // Optimize for app loading. + // WARNING: On the premise of that we've filtered out the class in APK, + // which is already defined in boot class loader jar path. + if (isDef && GetAppLoadState() == kAppLoadBaseOnly) { + res = ResolveClassSymbolForAPK(mplInfo, index, fromUpper, isDef); + return res; + } + + // Building class loader hierarchy. + ClassLoaderListT classLoaderList; + pInvoker->GetClassLoaderList(mplInfo, classLoaderList); + // Look up the address with class loader. + string className; + for (jobject &loader : classLoaderList) { + MClass *pseudo = nullptr; + MObject *classLoader = reinterpret_cast(loader); + if (isDef) { + // Check the real def table directly. + size_t dataDefSize = mplInfo.GetTblSize(kDataDef); + AddrSlice dataDefSlice(mplInfo.GetTblBegin(kDataDefOrig), dataDefSize); + pseudo = reinterpret_cast(GetDefTableAddress(mplInfo, dataDefSlice, + static_cast(index), false)); + } else { + // We must find the real cinfo firstly with its muid. + pseudo = reinterpret_cast(LookUpDataSymbolAddressLazily(classLoader, muidSlice[index].muid)); + } + if (pseudo == nullptr) { + LINKER_VLOG(lazybinding) << isDef << ", pseudo=nil, not resolved MUID=" << muidSlice[index].muid.ToStr() << + ", with classloader=" << classLoader << " in " << mplInfo.name << maple::endl; + continue; + } + className = pseudo->GetName(); + void *tempRes = ResolveClassSymbolInternal( + mplInfo, addrSlice, muidSlice, index, classLoader, fromUpper, isDef, className); + if (tempRes != nullptr) { + res = tempRes; + } + } + + // Add more searching without classloader for UNDEF. + if (!isDef && res == nullptr) { + res = SearchInUndef(mplInfo, addrSlice, muidSlice, index, candidateClassLoader, fromUpper, isDef, className); + } + + if (res == nullptr) { + LINKER_LOG(ERROR) << "failed, className = " << className.c_str() << maple::endl; + char msg[256] = { 0 }; // 256 is maxBuffSize + (void)sprintf_s(msg, sizeof(msg), "No class definition found"); + MRT_ThrowNoClassDefFoundErrorUnw(msg); + return nullptr; + } + // We will check if we can open (void)MRT_TryInitClass(res, false); + if (clinit && (static_cast(res)->IsLazyBinding())) { + LINKER_DLOG(lazybinding) << isDef << ", CLINIT for " << res << ", in " << mplInfo.name << maple::endl; + } + LINKER_VLOG(lazybinding) << isDef << ", success, res=" << res << " in " << mplInfo.name << maple::endl; + return res; +} + +void *LazyBinding::ResolveClassSymbolInternal( + LinkerMFileInfo &mplInfo, const AddrSlice &addrSlice, const MuidSlice &muidSlice, + size_t index, MObject *classLoader, bool &fromUpper, bool isDef, const std::string &className) { + void *res = nullptr; + MClass *klass = nullptr; + LoaderAPI *loader = const_cast(pInvoker->GetLoader()); + klass = reinterpret_cast(loader->FindClass(className, SearchFilter(reinterpret_cast(classLoader)))); + if (klass != nullptr) { + LINKER_VLOG(lazybinding) << "(" << (isDef ? "DEF" : "UNDEF") << "), delegation, resolved MUID=" << + muidSlice[index].muid.ToStr() << ", name=" << klass->GetName() << ", addr={" << + addrSlice[index].Address() << "->" << klass << "}, with classloader=" << classLoader << " in " << + mplInfo.name << ", cold=" << klass->IsColdClass() << ", lazy=" << klass->IsLazyBinding() << ", init=" << + MRT_IsClassInitialized(*klass) << maple::endl; + res = reinterpret_cast(klass); + fromUpper = false; + return res; + } else { + MObject *bootClassLoader = reinterpret_cast(loader->GetBootClassLoaderInstance()); + // Ignore lower classloaders + if (classLoader != nullptr && classLoader != bootClassLoader && !pInvoker->IsSystemClassLoader(classLoader)) { + klass = reinterpret_cast(pInvoker->InvokeClassLoaderLoadClass(*classLoader, className)); + if (klass != nullptr) { + LINKER_VLOG(lazybinding) << "(" << (isDef ? "DEF" : "UNDEF") << "), route, resolved MUID=" << + muidSlice[index].muid.ToStr() << ", name=" << klass->GetName() << ", addr={" << + addrSlice[index].Address() << "->" << klass << "}, with classloader=" << classLoader << "/" << + bootClassLoader << "/" << loader->GetSystemClassLoader() << " in " << mplInfo.name << ", cold=" << + klass->IsColdClass() << ", lazy=" << klass->IsLazyBinding() << ", init=" << + MRT_IsClassInitialized(*klass) << maple::endl; + LinkStaticSymbol(mplInfo, klass); + res = reinterpret_cast(klass); + fromUpper = true; + return res; + } else { + LINKER_VLOG(lazybinding) << "(" << (isDef ? "DEF" : "UNDEF") << "), route, not resolved MUID=" << + muidSlice[index].muid.ToStr() << ", name=" << className << ", with classloader=" << classLoader << + "/" << bootClassLoader << "/" << loader->GetSystemClassLoader() << " in " << mplInfo.name << maple::endl; + } + } + } + return nullptr; +} + +void *LazyBinding::ResolveDataSymbol(const AddrSlice &addrSlice, const MuidSlice &muidSlice, size_t index, bool isDef) { + LinkerMFileInfo *mplInfo = pInvoker->GetLinkerMFileInfo(kFromAddr, &addrSlice, true); + if (mplInfo == nullptr) { + LINKER_LOG(ERROR) << "failed, " << addrSlice.Data() << ", mplInfo is null." << maple::endl; + return nullptr; + } + return ResolveDataSymbol(*mplInfo, addrSlice, muidSlice, index, isDef); +} + +void *LazyBinding::ResolveDataSymbol(LinkerMFileInfo &mplInfo, const AddrSlice &addrSlice, + const MuidSlice &muidSlice, size_t index, bool isDef) { + void *res = nullptr; + // Optimize for app loading. + // WARNING: On the premise of that we've filtered out the class in APK, + // which is already defined in boot class loader jar path. + if (isDef) { + if (GetAppLoadState() == kAppLoadBaseOnly) { + size_t dataDefSize = mplInfo.GetTblSize(kDataDef); + AddrSlice dataDefSlice(mplInfo.GetTblBegin(kDataDefOrig), dataDefSize); + res = GetDefTableAddress(mplInfo, dataDefSlice, static_cast(index), false); + LINKER_DLOG(lazybinding) << isDef << ", optimized for App, resolved MUID=" << muidSlice[index].muid.ToStr() << + ", addr={" << addrSlice[index].Address() << "->" << res << "} in " << mplInfo.name << maple::endl; + return res; + } + } + + // Building class loader hierarchy. + ClassLoaderListT classLoaderList; + pInvoker->GetClassLoaderList(mplInfo, classLoaderList); + for (jobject &classLoader : classLoaderList) { // Look up the address. + void *addr = LookUpDataSymbolAddressLazily(reinterpret_cast(classLoader), muidSlice[index].muid); + if (addr != nullptr) { + LINKER_VLOG(lazybinding) << isDef << ", resolved MUID=" << muidSlice[index].muid.ToStr() << ", addr={" << + addrSlice[index].Address() << "->" << addr << "}, in " << mplInfo.name << maple::endl; + res = addr; + } + } + if (!isDef && res == nullptr) { // Add more searching without classloader for UNDEF. + LinkerMFileInfo *tmpLinkerMFileInfo = nullptr; + void *addr = LookUpDataSymbolAddressLazily(nullptr, muidSlice[index].muid, &tmpLinkerMFileInfo, true); + if (addr != nullptr) { + LINKER_LOG(WARNING) << isDef << ", double resolve MUID=" << muidSlice[index].muid.ToStr() << ", addr=" << addr << + ", in " << mplInfo.name << ", from " << tmpLinkerMFileInfo->name << maple::endl; + res = addr; + } + } + + if (res == nullptr) { + LINKER_LOG(ERROR) << isDef << ", failed to resolve MUID=" << muidSlice[index].muid.ToStr() << " in " << + mplInfo.name << maple::endl; + char msg[256] = { 0 }; // 256 is maxBuffSize + (void)sprintf_s(msg, sizeof(msg), "No static field MUID:%s found in the class", + muidSlice[index].muid.ToStr().c_str()); + MRT_ThrowNoSuchFieldErrorUnw(msg); + } + return res; +} + +void *LazyBinding::ResolveMethodSymbol( + const AddrSlice &addrSlice, const MuidSlice &muidSlice, size_t index, bool isDef) { + LinkerMFileInfo *mplInfo = pInvoker->GetLinkerMFileInfo(kFromAddr, &addrSlice, true); + if (mplInfo == nullptr) { + LINKER_LOG(ERROR) << "failed, " << addrSlice.Data() << ", mplInfo is null." << maple::endl; + return nullptr; + } + return ResolveMethodSymbol(*mplInfo, addrSlice, muidSlice, index, isDef, true); +} + +void *LazyBinding::ResolveMethodSymbol(LinkerMFileInfo &mplInfo, const AddrSlice &addrSlice, + const MuidSlice &muidSlice, size_t index, bool isDef, bool forClass) { + void *res = nullptr; + // Optimize for app loading. + // WARNING: On the premise of that we've filtered out the class in APK, + // which is already defined in boot class loader jar path. + if (isDef) { + if (GetAppLoadState() == kAppLoadBaseOnly) { + size_t dataDefSize = mplInfo.GetTblSize(kDataDef); + AddrSlice dataDefSlice(mplInfo.GetTblBegin(kMethodDefOrig), dataDefSize); + res = GetDefTableAddress(mplInfo, dataDefSlice, static_cast(index), true); + LINKER_DLOG(lazybinding) << isDef << forClass << ", MUID=" << muidSlice[index].muid.ToStr() << + ", addr={" << addrSlice[index].Address() << "->" << res << "} in " << mplInfo.name << maple::endl; +#ifdef LINKER_LAZY_BINDING_METHOD_TO_CLASS + if (forClass) { + ResolveMethodsClassSymbol(mplInfo, isDef, res); + } +#endif // LINKER_LAZY_BINDING_METHOD_TO_CLASS + return res; + } + } + + ClassLoaderListT classLoaderList; // Building class loader hierarchy. + pInvoker->GetClassLoaderList(mplInfo, classLoaderList); + for (jobject &classLoader : classLoaderList) { // Look up the address with class loader. + void *tempRes = LookUpMethodSymbolAddressLazily(reinterpret_cast(classLoader), muidSlice[index].muid); + if (tempRes != nullptr) { + LINKER_VLOG(lazybinding) << isDef << ", MUID=" << muidSlice[index].muid.ToStr() << ", addr={" << + addrSlice[index].Address() << "->" << tempRes << "}, in " << mplInfo.name << maple::endl; +#ifdef LINKER_LAZY_BINDING_METHOD_TO_CLASS + if (forClass) { + ResolveMethodsClassSymbol(mplInfo, isDef, tempRes); + } +#endif // LINKER_LAZY_BINDING_METHOD_TO_CLASS + res = tempRes; + } + } + + if (!isDef && res == nullptr) { // Add more searching without classloader for UNDEF. + res = LookUpMethodSymbolAddressLazily(nullptr, muidSlice[index].muid, true); + LINKER_LOG(WARNING) << isDef << ", double resolve MUID=" << muidSlice[index].muid.ToStr() << ", addr=" << res << + " in " << mplInfo.name << maple::endl; +#ifdef LINKER_LAZY_BINDING_METHOD_TO_CLASS + if (res != nullptr && forClass) { + ResolveMethodsClassSymbol(mplInfo, isDef, res); + } +#endif // LINKER_LAZY_BINDING_METHOD_TO_CLASS + } + + if (res == nullptr) { + char msg[256] = { 0 }; // 256 is maxBuffSize + (void)sprintf_s(msg, sizeof(msg), "No method MUID:%s found in the class", muidSlice[index].muid.ToStr().c_str()); + MRT_ThrowNoSuchMethodErrorUnw(msg); + } + return res; +} + +MClass *LazyBinding::GetUnresolvedClass(MClass *klass, bool &fromUpper, bool isDef) { + // The klass must be DEF, of course. + LinkerMFileInfo *mplInfo = pInvoker->GetLinkerMFileInfo(kFromMeta, static_cast(klass), true); + if (mplInfo == nullptr) { + LINKER_LOG(FATAL) << "LinkerMFileInfo not found in lazy list, klass=" << klass << "name = " << + klass->GetName()<< maple::endl; + return nullptr; + } + size_t dataDefSize = mplInfo->GetTblSize(kDataDef); + AddrSlice dataDefSlice(mplInfo->GetTblBegin(kDataDef), dataDefSize); + size_t muidSize = mplInfo->GetTblSize(kDataDefMuid); + MuidSlice muidSlice = MuidSlice(mplInfo->GetTblBegin(kDataDefMuid), muidSize); + + // To get the index in the pTable + MClass *indexClass = klass->GetClass(); + uint32_t defIndex = static_cast(reinterpret_cast(indexClass)); + if (defIndex == 0) { // It's not lazy, we use 0 as flag of not lazy. + LINKER_LOG(FATAL) << "class is lazy, but no def index?klass=" << klass << "name=" << klass->GetName()<< maple::endl; + return nullptr; + } else { + if (indexClass == WellKnown::GetMClassClass()) { + return nullptr; + } + --defIndex; + } + + if (GetAddrBindingState(dataDefSlice, defIndex, true) == kBindingStateResolved) { + return nullptr; + } + return reinterpret_cast(ResolveClassSymbol( + *mplInfo, dataDefSlice, muidSlice, defIndex, nullptr, fromUpper, isDef, true)); +} + +void LazyBinding::ResolveMethodsClassSymbol(const LinkerMFileInfo &mplInfo, bool isDef, const void *addr) { + // To get the declaring class of the method. + void *md = JavaFrame::GetMethodMetadata(static_cast(addr)); + if (md != nullptr) { + const MethodMetaBase *methodMeta = reinterpret_cast(md); + void *classAddr = methodMeta->GetDeclaringClass(); + MClass *klass = static_cast(classAddr); + if (klass == nullptr) { + LINKER_LOG(FATAL) << "klass is nullptr" << maple::endl; + } + LINKER_VLOG(lazybinding) << "(" << (isDef ? "DEF" : "UNDEF") << "), to resolve klass=" << klass << ", name=" << + klass->GetName() << ", prim=" << klass->IsPrimitiveClass() << ", array=" << klass->IsArray() << ", cold=" << + modifier::IsColdClass(klass->GetFlag()) << ", lazy=" << klass->IsLazyBinding() << + ", in " << mplInfo.name << maple::endl; + if (!klass->IsPrimitiveClass() && !klass->IsArrayClass() && klass->IsLazyBinding()) { + bool fromUpper = false; + MClass *unresolvedClass = GetUnresolvedClass(klass, fromUpper, isDef); + if (unresolvedClass == nullptr) { + LINKER_LOG(ERROR) << "(" << (isDef ? "DEF" : "UNDEF") << "), class is lazy, but no def index? klass=" << + klass << ", name=" << klass->GetName() << maple::endl; + // Should not reach here? + return; + } else { + if (!fromUpper) { + LINKER_VLOG(lazybinding) << "(" << (isDef ? "DEF" : "UNDEF") << "), found class, unresolved class=" << + unresolvedClass << ", class name=" << unresolvedClass->GetName() << maple::endl; + // Not only lazy .so. + LinkerMFileInfo *tmpLinkerMFileInfo = pInvoker->GetLinkerMFileInfo(kFromMeta, unresolvedClass); + if (tmpLinkerMFileInfo != nullptr) { + LinkClassInternal(*tmpLinkerMFileInfo, unresolvedClass, nullptr, true); + } else { + LINKER_LOG(FATAL) << "class is lazy, but no lazy LinkerMFileInfo?" << " klass=" << unresolvedClass << + ", name=" << unresolvedClass->GetName() << maple::endl; + } + return; + } else { + // We don't set the method address any more. + LINKER_VLOG(lazybinding) << "(" << (isDef ? "DEF" : "UNDEF") << "), not set the method address for klass=" << + klass << ", name=" << klass->GetName() << maple::endl; + } + } + } else { + // Already resolved the class before, just set address for the method. + return; + } + } else { + LINKER_LOG(FATAL) << "(" << (isDef ? "DEF" : "UNDEF") << "), md is null, addr=" << addr << maple::endl; + } + return; +} + +void LazyBinding::LinkStaticSymbol(LinkerMFileInfo &mplInfo, const MClass *target) { + LazyBinding::LinkStaticMethod(mplInfo, target); + LazyBinding::LinkStaticField(mplInfo, target); +} + +void LazyBinding::LinkStaticMethodSlow(MethodMeta &meta, std::string name, + LinkerMFileInfo &mplInfo, LinkerMFileInfo &oldMplInfo, AddrSlice &srcTable, AddrSlice &dstTable) { + MethodMeta *method = &meta; + LinkerVoidType dstAddr = 0; + size_t iSrcIndex = 0; + size_t iDstIndex = 0; + std::string methodName = name; + methodName += "|"; + methodName += method->GetName(); + methodName += "|"; + methodName += method->GetSignature(); + std::string encodedMethodName = namemangler::EncodeName(methodName); + MUID methodMuid = GetMUID(encodedMethodName, true); + LinkerVoidType srcAddr = pInvoker->LookUpMethodSymbolAddress(oldMplInfo, methodMuid, iSrcIndex); + if (UNLIKELY(&oldMplInfo != &mplInfo)) { + dstAddr = pInvoker->LookUpMethodSymbolAddress(mplInfo, methodMuid, iDstIndex); + } else { + dstAddr = srcAddr; + iDstIndex = iSrcIndex; + } + if (srcAddr != 0) { + void *addr = reinterpret_cast(srcAddr); + LINKER_VLOG(lazybinding) << "resolved MUID=" << methodMuid.ToStr() << ", Address=" << dstAddr << + ", copy form srcAddr=" << srcTable[iSrcIndex].Address() << ", srcIndex=" << iSrcIndex << " to dstAddr=" << + dstTable[iDstIndex].Address() << ", dstIndex=" << iDstIndex << ", methodName=" << methodName << + ", form mplInfo=" << oldMplInfo.name << " to mplInfo=" << mplInfo.name << maple::endl; + if (SetAddrInAddrTable(dstTable, iDstIndex, reinterpret_cast(addr)) == nullptr) { + LINKER_LOG(ERROR) << "set addr failed, addr = " << addr << maple::endl; + } + } else { + LINKER_LOG(ERROR) << "failed to resolve MUID=" << methodMuid.ToStr() << ", Address=" << dstAddr << + ", encodedMethodName=" << encodedMethodName << ", srcAddr=" << srcTable[iSrcIndex].Address() << + ", srcIndex" << iSrcIndex << ", methodName=" << methodName.c_str() << ", from mplInfo=" << + oldMplInfo.name << " to mplInfo=" << mplInfo.name << maple::endl; + } +} + +void LazyBinding::LinkStaticMethodFast(MethodMeta &meta, const std::string name, size_t index, + LinkerMFileInfo &mplInfo, const LinkerMFileInfo &oldMplInfo, AddrSlice &srcTable, AddrSlice &dstTable) { + MethodMeta *method = &meta; + LinkerVoidType dstAddr = 0; + size_t iSrcIndex = index; + size_t iDstIndex = 0; + std::string methodName = name; +#ifdef LINKER_32BIT_REF_FOR_DEF_UNDEF + LinkerVoidType methodSrcAddr = pInvoker->AddrToUint32(GetDefTableAddress(mplInfo, srcTable, iSrcIndex, true)); +#else + LinkerVoidType methodSrcAddr = + reinterpret_cast(GetDefTableAddress(mplInfo, srcTable, iSrcIndex, true)); +#endif // USE_32BIT_REF + if (methodSrcAddr != 0) { + method->SetAddress(methodSrcAddr); + dstAddr = methodSrcAddr; + if (UNLIKELY(&oldMplInfo != &mplInfo)) { + methodName += "|"; + methodName += method->GetName(); + methodName += "|"; + methodName += method->GetSignature(); + std::string encodedMethodName = namemangler::EncodeName(methodName); + MUID methodMuid = GetMUID(encodedMethodName, true); + dstAddr = pInvoker->LookUpMethodSymbolAddress(mplInfo, methodMuid, iDstIndex); + if (SetAddrInAddrTable(dstTable, iDstIndex, reinterpret_cast(methodSrcAddr)) == nullptr) { + LINKER_LOG(ERROR) << "set addr failed, addr = " << methodSrcAddr << maple::endl; + } + } else { + iDstIndex = iSrcIndex; + if (SetAddrInAddrTable(dstTable, iDstIndex, reinterpret_cast(methodSrcAddr)) == nullptr) { + LINKER_LOG(ERROR) << "set addr failed, addr = " << methodSrcAddr << maple::endl; + } + } + LINKER_VLOG(lazybinding) << "resolved " << ", Address=" << dstAddr << ", copy form methodSrcAddr=" << + srcTable[iSrcIndex].Address() << ", srcIndex=" << iSrcIndex << " to dstAddr=" << + dstTable[iDstIndex].Address() << ", dstIndex=" << iDstIndex << ", methodName=" << + methodName << ", form mplInfo=" << oldMplInfo.name << " to mplInfo=" << mplInfo.name << maple::endl; + } else { + LINKER_LOG(ERROR) << "failed to resolve " << ", Address=" << methodSrcAddr << ", methodSrcAddr=" << + srcTable[iSrcIndex].Address() << ", srcIndex" << iSrcIndex << ", methodName=" << methodName << + ", from mplInfo=" << oldMplInfo.name << " to mplInfo=" << mplInfo.name << maple::endl; + } +} + +// In maple, actually all methods are linked to static symbols. +void LazyBinding::LinkStaticMethod(LinkerMFileInfo &mplInfo, const MClass *target) { + if (UNLIKELY(target == nullptr)) { + LINKER_DLOG(lazybinding) << "failed, target is null!" << maple::endl; + return; + } + LinkerMFileInfo *oldMplInfo = pInvoker->GetLinkerMFileInfo(kFromMeta, target); + size_t iTableSize = 0; + AddrSlice srcTable; + AddrSlice dstTable; + if (oldMplInfo == nullptr) { + oldMplInfo = &mplInfo; + } + if (UNLIKELY(oldMplInfo != &mplInfo)) { + iTableSize = oldMplInfo->GetTblSize(kMethodDefOrig); + size_t dstTableSize = mplInfo.GetTblSize(kMethodDef); + srcTable = AddrSlice(oldMplInfo->GetTblBegin(kMethodDefOrig), iTableSize); + dstTable = AddrSlice(mplInfo.GetTblBegin(kMethodDef), dstTableSize); + } else { + iTableSize = mplInfo.GetTblSize(kMethodDefOrig); + srcTable = AddrSlice(mplInfo.GetTblBegin(kMethodDefOrig), iTableSize); + dstTable = AddrSlice(mplInfo.GetTblBegin(kMethodDef), iTableSize); + } + if (dstTable.Empty() || srcTable.Empty()) { + LINKER_DLOG(lazybinding) << "failed, table is null" << maple::endl; + return; + } + MethodMeta *methods = target->GetMethodMetas(); + uint32_t numOfMethods = target->GetNumOfMethods(); + for (uint32_t i = 0; i < numOfMethods; ++i) { + MethodMeta *method = methods + i; + // Consider as the case of default methods, we don't check if (__MRT_Class_isInterface(target)). + if (method == nullptr || UNLIKELY(method->IsAbstract())) { + continue; + } + std::string methodName = target->GetName(); + int32_t index = method->GetDefTabIndex(); + if (index != -1) { + size_t offset = static_cast(index); + if (offset >= iTableSize) { + continue; + } + LinkStaticMethodFast(*method, methodName, offset, mplInfo, *oldMplInfo, srcTable, dstTable); + } else { + LinkStaticMethodSlow(*method, methodName, mplInfo, *oldMplInfo, srcTable, dstTable); + } + } +} + +void LazyBinding::LinkStaticFieldSlow(FieldMeta &meta, std::string name, + LinkerMFileInfo &mplInfo, LinkerMFileInfo &oldMplInfo, AddrSlice &srcTable, AddrSlice &dstTable) { + FieldMeta *field = &meta; + LinkerVoidType dstAddr = 0; + size_t iSrcIndex = 0; + size_t iDstIndex = 0; + std::string fieldName = name; + fieldName += "|"; + fieldName += field->GetName(); + std::string encodedFieldName = namemangler::EncodeName(fieldName); + MUID fieldMuid = GetMUID(encodedFieldName, true); + LinkerVoidType srcAddr = pInvoker->LookUpDataSymbolAddress(oldMplInfo, fieldMuid, iSrcIndex); + if (UNLIKELY(&oldMplInfo != &mplInfo)) { + dstAddr = pInvoker->LookUpDataSymbolAddress(mplInfo, fieldMuid, iDstIndex); + } else { + dstAddr = srcAddr; + iDstIndex = iSrcIndex; + } + if (srcAddr != 0) { + void *addr = reinterpret_cast(srcAddr); + LINKER_VLOG(lazybinding) << "resolved MUID=" << fieldMuid.ToStr() << ", Address=" << dstAddr << + ", copy form srcAddr=" << srcTable[iSrcIndex].Address() << ", srcIndex=" << iSrcIndex << " to dstAddr=" << + dstTable[iDstIndex].Address() << ", dstIndex=" << iDstIndex << ", fieldName=" << fieldName << + ", form mplInfo=" << oldMplInfo.name << " to mplInfo=" << mplInfo.name << maple::endl; + if (SetAddrInAddrTable(dstTable, iDstIndex, reinterpret_cast(addr)) == nullptr) { + LINKER_LOG(ERROR) << "set addr failed, addr = " << addr << maple::endl; + } + } else { + LINKER_LOG(ERROR) << "failed to resolve MUID=" << fieldMuid.ToStr() << ", Address=" << dstAddr << + ", encodedFieldName=" << encodedFieldName << ", srcAddr=" << srcTable[iSrcIndex].Address() << ", srcIndex" << + iSrcIndex << ", fieldName=" << fieldName.c_str() << ", from mplInfo=" << oldMplInfo.name << + " to mplInfo=" << mplInfo.name << maple::endl; + } +} + +void LazyBinding::LinkStaticFieldFast(FieldMeta &meta, const std::string name, size_t index, + LinkerMFileInfo &mplInfo, const LinkerMFileInfo &oldMplInfo, AddrSlice &srcTable, AddrSlice &dstTable) { + FieldMeta *field = &meta; + LinkerVoidType dstAddr = 0; + size_t iSrcIndex = index; + size_t iDstIndex = 0; + std::string fieldName = name; +#ifdef LINKER_32BIT_REF_FOR_DEF_UNDEF + LinkerVoidType fieldSrcAddr = pInvoker->AddrToUint32(GetDefTableAddress(mplInfo, srcTable, iSrcIndex, false)); +#else + LinkerVoidType fieldSrcAddr = + reinterpret_cast(GetDefTableAddress(mplInfo, srcTable, iSrcIndex, false)); +#endif // USE_32BIT_REF + if (fieldSrcAddr != 0) { + field->SetStaticAddr(fieldSrcAddr); + dstAddr = fieldSrcAddr; + if (UNLIKELY(&oldMplInfo != &mplInfo)) { + fieldName += "|"; + fieldName += field->GetName(); + std::string encodedFieldName = namemangler::EncodeName(fieldName); + MUID fieldMuid = GetMUID(encodedFieldName, true); + dstAddr = pInvoker->LookUpDataSymbolAddress(mplInfo, fieldMuid, iDstIndex); + if (SetAddrInAddrTable(dstTable, iDstIndex, reinterpret_cast(fieldSrcAddr)) == nullptr) { + LINKER_LOG(ERROR) << "set addr failed, addr = " << fieldSrcAddr << maple::endl; + } + } else { + iDstIndex = iSrcIndex; + if (SetAddrInAddrTable(dstTable, iDstIndex, reinterpret_cast(fieldSrcAddr)) == nullptr) { + LINKER_LOG(ERROR) << "set addr failed, addr = " << fieldSrcAddr << maple::endl; + } + } + LINKER_VLOG(lazybinding) << "resolved " << ", Address=" << dstAddr << ", copy form fieldSrcAddr=" << + srcTable[iSrcIndex].Address() << ", srcIndex=" << iSrcIndex << " to dstAddr=" << + dstTable[iDstIndex].Address() << ", dstIndex=" << iDstIndex << ", fieldName=" << + fieldName << ", form mplInfo=" << oldMplInfo.name << " to mplInfo=" << mplInfo.name << maple::endl; + } else { + LINKER_LOG(ERROR) << "failed to resolve " << ", Address=" << fieldSrcAddr << ", fieldSrcAddr=" << + srcTable[iSrcIndex].Address() << ", srcIndex" << iSrcIndex << ", fieldName=" << fieldName << + ", from mplInfo=" << oldMplInfo.name << " to mplInfo=" << mplInfo.name << maple::endl; + } +} + +void LazyBinding::LinkStaticField(LinkerMFileInfo &mplInfo, const MClass *target) { + if (UNLIKELY(target == nullptr)) { + LINKER_DLOG(lazybinding) << "failed, target is null!" << maple::endl; + return; + } + size_t iTableSize = 0; + AddrSlice srcTable; + AddrSlice dstTable; + LinkerMFileInfo *oldMplInfo = pInvoker->GetLinkerMFileInfo(kFromMeta, target); + if (oldMplInfo == nullptr) { + oldMplInfo = &mplInfo; + } + if (UNLIKELY(oldMplInfo != &mplInfo)) { + iTableSize = oldMplInfo->GetTblSize(kDataDefOrig); + size_t dstTableSize = mplInfo.GetTblSize(kDataDef); + srcTable = AddrSlice(oldMplInfo->GetTblBegin(kDataDefOrig), iTableSize); + dstTable = AddrSlice(mplInfo.GetTblBegin(kDataDef), dstTableSize); + } else { + iTableSize = mplInfo.GetTblSize(kDataDefOrig); + srcTable = AddrSlice(mplInfo.GetTblBegin(kDataDefOrig), iTableSize); + dstTable = AddrSlice(mplInfo.GetTblBegin(kDataDef), iTableSize); + } + if (dstTable.Empty() || srcTable.Empty()) { + LINKER_DLOG(lazybinding) << "failed, table is null" << maple::endl; + return; + } + FieldMeta *fields = target->GetFieldMetas(); + uint32_t numOfFields = target->GetNumOfFields(); + for (uint32_t i = 0; i < numOfFields; ++i) { + FieldMeta *field = fields + i; + // Not check !field->IsPublic() any more. + if (field == nullptr || !field->IsStatic()) { + continue; + } + std::string fieldName = target->GetName(); + int32_t index = field->GetDefTabIndex(); + if (index != -1) { + size_t offset = static_cast(index); + if (offset >= iTableSize) { + continue; + } + LinkStaticFieldFast(*field, fieldName, offset, mplInfo, *oldMplInfo, srcTable, dstTable); + } else { + LinkStaticFieldSlow(*field, fieldName, mplInfo, *oldMplInfo, srcTable, dstTable); + } + } +} + +void LazyBinding::LinkSuperClassAndInterfaces( + const MClass *klass, MObject *candidateClassLoader, bool recursive, bool forCold) { + LinkerMFileInfo *mplInfo = pInvoker->GetLinkerMFileInfo(kFromMeta, klass, !forCold); + if (mplInfo != nullptr && (forCold || mplInfo->IsFlag(kIsLazy))) { + LinkSuperClassAndInterfaces(*mplInfo, klass, candidateClassLoader, recursive, forCold); + } +} + +MClass *LazyBinding::LinkDataUndefSuperClassAndInterfaces( + LinkerMFileInfo &mplInfo, MObject *candidateClassLoader, size_t index, AddrSlice dataUndefSlice, + MClass *superClassesItem, IndexSlice superTableSlice, uint32_t i, bool fromUndef) { + size_t muidSize = mplInfo.GetTblSize(kDataUndefMuid); + MuidSlice muidSlice = MuidSlice(mplInfo.GetTblBegin(kDataUndefMuid), muidSize); + if (GetAddrBindingState(dataUndefSlice, index, true) != kBindingStateResolved) { + bool fromUpper = false; + superClassesItem = reinterpret_cast(ResolveClassSymbol( + mplInfo, dataUndefSlice, muidSlice, index, candidateClassLoader, fromUpper, false, true)); + if (superClassesItem != nullptr) { + superTableSlice[i].index = reinterpret_cast(superClassesItem); + LINKER_VLOG(lazybinding) << "fromUpper:"<< fromUpper << "resolved UNDEF lazily, " << fromUndef << + ", addr=" << dataUndefSlice[index].Address() << ", class=" << superClassesItem->GetName() << + " in " << mplInfo.name << maple::endl; + } else { + __MRT_ASSERT(0, "LazyBinding::LinkSuperClassAndInterfaces(), not resolved UNDEF lazily\n"); + } + } else { + LINKER_VLOG(lazybinding) << "already resolved UNDEF, just use, " << fromUndef << ", addr=" << + dataUndefSlice[index].Address() << ", class=" << + reinterpret_cast(dataUndefSlice[index].Address())->GetName() << " in " << mplInfo.name << + maple::endl; + superTableSlice[i].index = reinterpret_cast(dataUndefSlice[index].Address()); + + // To link lazy class even if it's resolved before. + MClass *resolvedClass = reinterpret_cast(dataUndefSlice[index].Address()); + if (resolvedClass->IsLazyBinding()) { + superClassesItem = resolvedClass; + } + } + return superClassesItem; +} + +MClass *LazyBinding::LinkDataDefSuperClassAndInterfaces( + LinkerMFileInfo &mplInfo, size_t index, AddrSlice dataDefSlice, + MClass *superClassesItem, IndexSlice superTableSlice, uint32_t i, bool fromUndef) { + size_t muidSize = mplInfo.GetTblSize(kDataDefMuid); + MuidSlice muidSlice = MuidSlice(mplInfo.GetTblBegin(kDataDefMuid), muidSize); + // We must don't check resolved state for DEF table. + // Not check if (GetAddrBindingState(dataDefSlice, index, true) == kBindingStateResolved) + if (GetAddrBindingState(dataDefSlice, index, true) == kBindingStateResolved) { + LINKER_VLOG(lazybinding) << "already resolved DEF before, " << fromUndef << ", addr=" << + dataDefSlice[index].Address() << ", class=" << + reinterpret_cast(dataDefSlice[index].Address())->GetName() << ", in " << mplInfo.name << + maple::endl; + } + bool fromUpper = false; + superClassesItem = reinterpret_cast(ResolveClassSymbol( + mplInfo, dataDefSlice, muidSlice, index, nullptr, fromUpper, true, true)); + if (superClassesItem != nullptr) { + superTableSlice[i].index = reinterpret_cast(superClassesItem); + LINKER_VLOG(lazybinding) << "fromUpper:" << fromUpper <<"resolved DEF lazily, " << fromUndef << ", addr=" << + dataDefSlice[index].Address() << ", class=" << superClassesItem->GetName() << " in " << mplInfo.name << + maple::endl; + } else { + __MRT_ASSERT(0, "LazyBinding::LinkSuperClassAndInterfaces(), not resolved DEF lazily\n"); + } + return superClassesItem; +} + +void LazyBinding::LinkSuperClassAndInterfaces( + LinkerMFileInfo &mplInfo, const MClass *klass, MObject *candidateClassLoader, bool recursive, bool forCold) { + uint32_t superClassSize = klass->GetNumOfSuperClasses(); + IndexSlice superTableSlice = IndexSlice( + reinterpret_cast(klass->GetSuperClassArrayPtr()), superClassSize); + if (superTableSlice.Empty()) { + return; + } + + size_t dataUndefSize = mplInfo.GetTblSize(kDataUndef); + AddrSlice dataUndefSlice(mplInfo.GetTblBegin(kDataUndef), dataUndefSize); + size_t dataDefSize = mplInfo.GetTblSize(kDataDef); + AddrSlice dataDefSlice(mplInfo.GetTblBegin(kDataDef), dataDefSize); + for (uint32_t i = 0; i < superClassSize; ++i) { + LinkerRef ref(superTableSlice[i].index); + MClass *superClassesItem = nullptr; + LINKER_VLOG(lazybinding) << "super[" << i << "]=" << superTableSlice[i].index << ", defSize=" << dataDefSize << + ", undefSize=" << dataUndefSize << ", for " << klass->GetName() << ", in " << mplInfo.name << maple::endl; + if (ref.IsIndex()) { // Def or Undef Index + bool fromUndef = ref.IsFromUndef(); + size_t index = ref.GetIndex(); + if (fromUndef && index < dataUndefSize && !dataUndefSlice.Empty()) { + superClassesItem = LinkDataUndefSuperClassAndInterfaces(mplInfo, candidateClassLoader, index, dataUndefSlice, + superClassesItem, superTableSlice, i, fromUndef); + } else if (!fromUndef && index < dataDefSize && !dataDefSlice.Empty()) { + superClassesItem = LinkDataDefSuperClassAndInterfaces(mplInfo, index, dataDefSlice, superClassesItem, + superTableSlice, i, fromUndef); + } else { + __MRT_ASSERT(0, "LazyBinding::LinkSuperClassAndInterfaces(), must be DEF or UNDEF\n"); + } + } else { // Address. + // Already resolved. + if (!ref.IsEmpty() && forCold) { + superClassesItem = ref.GetDataRef(); + LINKER_VLOG(lazybinding) << "already resolved, super[" << i << "]=" << superClassesItem << ", defSize=" << + dataDefSize << ", undefSize=" << dataUndefSize << ", for " << superClassesItem->GetName() << + ", in " << mplInfo.name << maple::endl; + } + // Allow re-link. + } + + if (recursive) { + if (superClassesItem == nullptr) { + LINKER_VLOG(lazybinding) << "ignore one item of super in " << klass->GetName() << maple::endl; + // We allow super class or interfaces as null. + continue; + } + if (forCold) { + LinkSuperClassAndInterfaces(superClassesItem, candidateClassLoader, true, true); + } else { + LinkClassInternal(superClassesItem, candidateClassLoader); + } + } + } +} + +void LazyBinding::LinkClass(MClass *klass) { + LINKER_DLOG(lazybinding) << "klass=" << klass << ", name=" << klass->GetName() << ", lazy=" << + klass->IsLazyBinding() << ", cold=" << klass->IsColdClass() << maple::endl; + // If the array's component is not ready, to link the component class for array class. + if (klass->IsArrayClass()) { + MClass *componentClass = klass->GetComponentClass(); + if (componentClass == nullptr) { + LINKER_LOG(FATAL) << "component class is null, klass=" << klass << ", name=" << klass->GetName() << maple::endl; + return; + } + if (componentClass->IsLazyBinding()) { + LinkClass(componentClass); + } + return; + } + + // We use the field of shadow to store def table index. + if (klass->IsPrimitiveClass() || !klass->IsLazyBinding()) { + return; + } + LinkerMFileInfo *mplInfo = pInvoker->GetLinkerMFileInfo(kFromMeta, klass, true); + if (mplInfo != nullptr) { + // To get the index in the pTable + MClass *indexClass = klass->GetClass(); + uint32_t defIndex = static_cast(reinterpret_cast(indexClass)); + if (defIndex == 0) { // It's not lazy, we use 0 as flag of not lazy. + LINKER_LOG(FATAL) << "class has no def index? klass=" << klass << ", name=" << klass->GetName() << maple::endl; + return; + } else { + if (indexClass == WellKnown::GetMClassClass()) { + return; + } + --defIndex; + } + size_t dataDefSize = mplInfo->GetTblSize(kDataDef); + AddrSlice dataTable(mplInfo->GetTblBegin(kDataDef), dataDefSize); + if (GetAddrBindingState(dataTable, defIndex, true) == kBindingStateResolved) { + return; + } + if (!MRT_IsClassInitialized(*klass)) { + LoaderAPI::Instance().SetClassCL(*klass, mplInfo->classLoader); + LinkClassInternal(*mplInfo, klass, reinterpret_cast(mplInfo->classLoader), true); + } else { + LinkClassInternal(*mplInfo, klass, reinterpret_cast(MRT_GetClassLoader(*klass)), true); + } + } else { + LINKER_LOG(FATAL) << "lazy class has no MFileInfo? klass=" << klass << ", name=" << klass->GetName() << maple::endl; + } +} + +// Only used in LinkSuperClassAndInterfaces(). +void LazyBinding::LinkClassInternal(MClass *klass, MObject *candidateClassLoader) { + LinkerMFileInfo *mplInfo = pInvoker->GetLinkerMFileInfo(kFromMeta, klass, true); + if (mplInfo != nullptr) { + LinkClassInternal(*mplInfo, klass, candidateClassLoader, true); + } +} + +void LazyBinding::LinkClassInternal( + LinkerMFileInfo &mplInfo, MClass *klass, MObject *candidateClassLoader, bool first) { + LINKER_VLOG(lazybinding) << "klass=" << klass << ", name=" << klass->GetName() << ", lazy=" << + klass->IsLazyBinding() << ", cold=" << klass->IsColdClass() << ", decouple=" << + klass->IsDecouple() << ", in " << mplInfo.name << maple::endl; + if (!klass->IsLazyBinding() || *klass == MRT_GetClassObject()) { + return; + } + if (maple::ObjectBase::MonitorEnter(reinterpret_cast(klass)) == JNI_ERR) { + LINKER_LOG(ERROR) << ", maple::ObjectBase::MonitorEnter() failed" << maple::endl; + } + + // Check if we link it before, or no need linking. + // (Notice:Lazy binding class must also be cold class, + // but lazy-binding is optional, lazy binding class maybe not cold.) + // To prevent deadlock, we will checking klass status after got MonitorEnter lock. + if (!klass->IsLazyBinding() || *klass == MRT_GetClassObject()) { + if (maple::ObjectBase::MonitorExit(reinterpret_cast(klass)) == JNI_ERR) { + LINKER_LOG(ERROR) << "maple::ObjectBase::MonitorExit() failed" << maple::endl; + } + return; + } + + LinkSuperClassAndInterfaces(mplInfo, klass, candidateClassLoader, true); + + // Set the def index all the time. + MClass *indexClass = klass->GetClass(); + MetaRef index = static_cast(reinterpret_cast(indexClass)); + if (index == 0) { // It's not lazy, we use 0 as flag of not lazy. + LINKER_LOG(FATAL) << "class is lazy, but no def index? klass=" << klass << ", name=" << + klass->GetName() << maple::endl; + } else { + if (indexClass == WellKnown::GetMClassClass()) { + if (maple::ObjectBase::MonitorExit(reinterpret_cast(klass)) == JNI_ERR) { + LINKER_LOG(ERROR) << "maple::ObjectBase::MonitorExit() failed" << maple::endl; + } + return; + } + --index; + } + + LinkStaticSymbol(mplInfo, klass); + // To handle decoupling routine. + LinkMethods(mplInfo, klass, first, index); + LinkFields(mplInfo, klass); + + // The same as MRTSetMetadataShadow(reinterpret_cast(klass), WellKnown::GetMClassClass()) + // If first loaded, we clear the def index in 'shadow' field which multiplex used for lazy binding. + // Set shadow to CLASS. We multiplex use the 'shadow' before class's being intialized, + MRTSetMetadataShadow(reinterpret_cast(klass), WellKnown::GetMClassClass()); + if (SetClassInDefAddrTable(index, klass) == nullptr) { + LINKER_LOG(ERROR) << "SetClassInDefAddrTable failed" << maple::endl; + } + + // Set cold class as hot. + klass->SetHotClass(); + // Clear lazy binding flag. + klass->ReSetFlag(0xDFFF); // 0xDFFF is LazyBinding flag + klass->SetFlag(modifier::kClassLazyBoundClass); + + if (maple::ObjectBase::MonitorExit(reinterpret_cast(klass)) == JNI_ERR) { + LINKER_LOG(ERROR) << "maple::ObjectBase::MonitorExit() failed" << maple::endl; + } +} + + +inline std::string LazyBinding::ResolveClassMethod(MClass *klass) { + return pMethodBuilder->BuildMethod(klass); +} + +#if defined(LINKER_RT_CACHE) && defined(LINKER_RT_LAZY_CACHE) +void LazyBinding::PreLinkLazyMethod(LinkerMFileInfo &mplInfo) { + if (mplInfo.IsFlag(kIsLazy)) { + std::string cachingStr; + if (!LoadLazyCache(&mplInfo, cachingStr)) { + return; + } + mplInfo.methodCachingStr = cachingStr; + + std::vector cachingIndexes; + std::istringstream issCaching(cachingStr); + std::string tmp; + while (getline(issCaching, tmp, ';')) { + cachingIndexes.push_back(tmp); + } + LINKER_VLOG(lazybinding) << "klass num=" << cachingIndexes.size() << ", in " << mplInfo.name << maple::endl; + + LinkerAddrTableItem *pDefRealTable = mplInfo.GetTblBegin(kDataDefOrig); + for (size_t i = 0; i < cachingIndexes.size(); ++i) { + std::string indexStr = cachingIndexes[i]; + std::string::size_type pos = indexStr.find(':'); + if (pos == std::string::npos) { // Error + continue; + } + constexpr int decimalBase = 10; + auto index = std::strtol(indexStr.c_str(), nullptr, decimalBase); + MClass *klass = reinterpret_cast(GetDefTableAddress(mplInfo, *pDefRealTable, index, false)); + LinkStaticSymbol(mplInfo, klass); + LinkSuperClassAndInterfaces(mplInfo, klass, nullptr, true, true); + std::string cachingIndex = indexStr.substr(pos + 1); + pMethodBuilder->BuildMethodByCachingIndex(klass, cachingIndex); + + // To handle decoupling routine. + LinkFields(mplInfo, klass); + + // The same as MRTSetMetadataShadow(reinterpret_cast(klass), WellKnown::GetMClassClass()) + // If first loaded, we clear the def index in 'shadow' field which multiplex used for lazy binding. + // Set shadow to CLASS. + MRTSetMetadataShadow(reinterpret_cast(klass), WellKnown::GetMClassClass()); + (void)SetClassInDefAddrTable(index, klass); + + // Set Classloader for the class. + LoaderAPI::Instance().SetClassCL(*klass, mplInfo.classLoader); + // Set cold class as hot. + klass->SetHotClass(); + // Clear lazy binding flag. + klass->ReSetFlag(0xDFFF); // 0xDFFF is LazyBinding flag + klass->SetFlag(modifier::kClassLazyBoundClass); + + ++(mplInfo.methodCachingNum); + } + } +} +#endif // defined(LINKER_RT_CACHE) && defined(LINKER_RT_LAZY_CACHE) + +void LazyBinding::LinkMethods(LinkerMFileInfo &mplInfo, MClass *klass, bool first, MetaRef index) { + LINKER_VLOG(lazybinding) << "(" << &mplInfo << ", " << klass->GetName() << "), first=" << first << ", index=" << + index << maple::endl; +#ifdef LINKER_DECOUPLE + if (mplInfo.IsFlag(kIsLazy) && mplInfo.GetDecoupleLevel() != 0) { + // Inteface has no vtable or itable. + if (klass->IsInterface() || !first) { + return; + } + if (klass->IsDecouple()) { + if (reinterpret_cast(klass)->vTable.refVal != 0) { + LINKER_LOG(ERROR) << "(" << klass->GetName() << "), already set, index=" << index << maple::endl; + return; + } +#if defined(LINKER_RT_CACHE) && defined(LINKER_RT_LAZY_CACHE) + if (mplInfo.methodCachingNum < 2000) { // We don't need save all classes, here we use 2000 as upper threshold. + std::lock_guard autoLock(mplInfo->methodCachingLock); + std::string cachingStr = ResolveClassMethod(klass); + if (!cachingStr.empty()) { + mplInfo.methodCachingStr += std::to_string(index) + ':'; + mplInfo.methodCachingStr += cachingStr; + mplInfo.methodCachingStr += ';'; + ++(mplInfo.methodCachingNum); + } + + if (mplInfo.methodCachingNum % 500 == 0) { // We use 500 as upper threshold, to reduce the times of saving. + SaveLazyCache(mplInfo, mplInfo.methodCachingStr); + } + } else { + (void)ResolveClassMethod(klass); + } +#else // defined(LINKER_RT_CACHE) && defined(LINKER_RT_LAZY_CACHE) + (void)ResolveClassMethod(klass); +#endif // defined(LINKER_RT_CACHE) && defined(LINKER_RT_LAZY_CACHE) + } + } else if (klass->IsColdClass()) { +#else + if (klass->IsColdClass()) { +#endif + LINKER_VLOG(lazybinding) << "for non-decouple class, name=" << klass->GetName() << ", index=" << index << + ", lazy=" << klass->IsLazyBinding() << ", cold=" << klass->IsColdClass() << ", decouple=" << + klass->IsDecouple() << ", in " << mplInfo.name << maple::endl; + pInvoker->ResolveVTableSymbolByClass(mplInfo, klass, true); + pInvoker->ResolveVTableSymbolByClass(mplInfo, klass, false); + } +} + +inline void LazyBinding::LinkFields(const LinkerMFileInfo &mplInfo, MClass *klass) { + LINKER_VLOG(lazybinding) << "(" << &mplInfo << ", " << klass->GetName() << ")" << maple::endl; +#ifdef LINKER_DECOUPLE + if (mplInfo.IsFlag(kIsLazy) && mplInfo.GetDecoupleLevel() != 0) { + Decouple *decouple = pInvoker->Get(); + MplFieldDecouple *fieldDecouple = &decouple->GetFieldResolver(); + MplGctibAnalysis *gctibResolver = &decouple->GetGctibResolver(); + fieldDecouple->ResolveFieldOffsetAndObjSizeLazily(*(reinterpret_cast(klass))); + (void)gctibResolver->ReGenGctib4Class(reinterpret_cast(klass)); + } +#endif +} + +#if defined(LINKER_RT_CACHE) && defined(LINKER_RT_LAZY_CACHE) +inline bool LazyBinding::SaveLazyCacheInternal(std::string &path, int &fd, std::vector &bufVector) { + char *buf = nullptr; + MUID cacheValidity; + uint32_t eof = static_cast(EOF); + + // Ready to write all the data in buffer. + buf = reinterpret_cast(bufVector.data()); + std::streamsize byteCount = bufVector.size(); + // 1. Write for validity. + GenerateMUID(buf, byteCount, cacheValidity); + if (write(fd, reinterpret_cast(&cacheValidity.data), sizeof(cacheValidity.data)) < 0) { + LINKER_LOG(ERROR) << "..validity, " << path << ", " << errno << maple::endl; + return false; + } + // 2. Write for the content length. + uint32_t contentSize = static_cast(byteCount); + if (write(fd, reinterpret_cast(&contentSize), sizeof(contentSize)) < 0) { + LINKER_LOG(ERROR) << "..content length, " << path << ", " << errno << maple::endl; + return false; + } + // 3. Write for the content. + if (write(fd, buf, byteCount) < 0) { + LINKER_LOG(ERROR) << "..content, " << path << ", " << errno << maple::endl; + return false; + } + // 4. Write EOF. + if (write(fd, reinterpret_cast(&eof), sizeof(eof)) < 0) { + LINKER_LOG(ERROR) << "write EOF error, " << path << ", " << errno << maple::endl; + return false; + } + return true; +} + +bool LazyBinding::SaveLazyCache(LinkerMFileInfo &mplInfo, const std::string &cachingIndex) { + bool res = true; + std::vector bufVector; + char *str = nullptr; + std::string path; + LinkerCacheType cacheType = LinkerCacheType::kLinkerCacheLazy; + + if (!pInvoker->GetCachePath(mplInfo, path, cacheType)) { + LINKER_LOG(ERROR) << "fail to prepare " << path << ", " << errno << maple::endl; + return false; + } + int fd = open(path.c_str(), O_WRONLY | O_CREAT | O_TRUNC, 0644); // 0644 means file's permissions + if (fd == -1) { + LINKER_LOG(ERROR) << "fail to open " << path << ", " << errno << maple::endl; + res = false; + goto END; + } + + // Prepare all the data in buffer firstly. + // 1. Insert maximum version into content. + int32_t maxVersion = GetMaxVersion(); + str = reinterpret_cast(&maxVersion); + bufVector.insert(bufVector.end(), str, str + sizeof(maxVersion)); + // 2. Insert minimum version into content. + int32_t minVersion = GetMinVersion(); + str = reinterpret_cast(&minVersion); + bufVector.insert(bufVector.end(), str, str + sizeof(minVersion)); + // 3. Insert the hash from .so into content. + MUID soValidity = pInvoker->GetValidityCode(mplInfo); + str = reinterpret_cast(&soValidity); + bufVector.insert(bufVector.end(), str, str + sizeof(soValidity)); + + // 4. Insert the string length firstly into content. + uint32_t len = cachingIndex.size(); + str = reinterpret_cast(&len); + bufVector.insert(bufVector.end(), str, str + sizeof(len)); + + // 5. Insert the caching index info. into content. + bufVector.insert(bufVector.end(), cachingIndex.begin(), cachingIndex.end()); + + if (!SaveLazyCacheInternal(path, fd, bufVector)) { + res = false; + } + +END: + close(fd); + return res; +} + +inline bool LazyBinding::LoadLazyCacheInternal( + LinkerMFileInfo &mplInfo, std::string &path, std::string &cachingIndex, + char *buf, std::streamsize &index, std::streamsize &byteCount) { + MUID cacheValidity; + + // 1. Read the validity. + MUID lastCacheValidity = *(reinterpret_cast(&buf[index])); + index += sizeof(lastCacheValidity); + // 2. Read the length of content. + uint32_t contentSize = *(reinterpret_cast(&buf[index])); + index += sizeof(contentSize); + if (contentSize != static_cast(byteCount - index)) { + LINKER_LOG(ERROR) << "wrong cache length, " << path << ", " << contentSize << " vs. " << + static_cast(byteCount - index) << maple::endl; + return false; + } + + // Generate the digest for validity, excluding the length of content. + GenerateMUID(&buf[index], byteCount - index, cacheValidity); + if (lastCacheValidity != cacheValidity) { + LINKER_LOG(ERROR) << "cache validity checking failed, " << path << maple::endl; + return false; + } + + // 3. Read maximum version. + int32_t maxVersion = *(reinterpret_cast(&buf[index])); + if (maxVersion != GetMaxVersion()) { + LINKER_LOG(ERROR) << "wrong max version, " << path << ", " << maxVersion << maple::endl; + return false; + } + index += sizeof(maxVersion); + + // 4. Read minimum version. + int32_t minVersion = *(reinterpret_cast(&buf[index])); + if (minVersion != GetMinVersion()) { + LINKER_LOG(ERROR) << "wrong min version, " << path << ", " << minVersion << maple::endl; + return false; + } + index += sizeof(minVersion); + + // 5. Read the hash from cache file, comparing with .so. + MUID lastSoValidity = *(reinterpret_cast(&buf[index])); + MUID soValidity = pInvoker->GetValidityCode(mplInfo); + if (lastSoValidity != soValidity) { + LINKER_LOG(ERROR) << "wrong validity, " << soValidity.ToStr() << " vs. " << lastSoValidity.ToStr() << " in " << + mplInfo.name << maple::endl; + return false; + } + index += sizeof(lastSoValidity); + + // 6. Read the content length firstly. + uint32_t len = *(reinterpret_cast(&buf[index])); + index += sizeof(len); + + // 7. Read the content. + cachingIndex.resize(len + 1); + cachingIndex.assign(&buf[index], len); + index += len; + return true; +} + +bool LazyBinding::LoadLazyCache(LinkerMFileInfo &mplInfo, std::string &cachingIndex) { + bool res = false; + std::ifstream in; + std::streamsize index = 0; + std::vector bufVector; + char *buf = nullptr; + std::string path; + + if (!pInvoker->GetCachePath(&mplInfo, path, LinkerCacheType::kLinkerCacheLazy)) { + LINKER_LOG(ERROR) << "fail to prepare " << path << ", " << errno << maple::endl; + return res; + } + in.open(path, std::ios::binary); + bool failIn = (!in); + if (failIn) { + LINKER_LOG(ERROR) << "fail to open " << path << ", " << errno << maple::endl; + goto END; + } + + // Read all the data in buffer firstly. + in.seekg(0, std::ios::end); + std::streamsize byteCount = in.tellg(); + in.seekg(0, std::ios::beg); + bufVector.resize(byteCount); + buf = reinterpret_cast(bufVector.data()); + if (!in.read(buf, byteCount)) { + LINKER_LOG(ERROR) << "read data error, " << path << ", " << errno << maple::endl; + goto END; + } + // 0. Read EOF. + uint32_t eof = *(reinterpret_cast(&buf[byteCount - sizeof(eof)])); + if (eof != static_cast(EOF)) { + LINKER_LOG(ERROR) << "wrong EOF, " << path << ", eof=" << std::hex << eof << maple::endl; + goto END; + } + byteCount -= sizeof(eof); + if (!LoadLazyCacheInternal(&mplInfo, path, cachingIndex, buf, index, byteCount)) { + goto END; + } + if (index == byteCount) { + res = true; + } + +END: + if (in.is_open()) { + in.close(); + } + return res; +} +#endif // defined(LINKER_RT_CACHE) && defined(LINKER_RT_LAZY_CACHE) + +void *LazyBinding::GetClassMetadata(LinkerMFileInfo &mplInfo, size_t classIndex) { + LinkerRef ref(classIndex); + void *addr = nullptr; + if (ref.IsIndex()) { // Index + size_t index = ref.GetIndex(); + bool fromUndef = ref.IsFromUndef(); + if (fromUndef && index < mplInfo.GetTblSize(kDataUndef)) { + LinkerMuidTableItem *pMuidTable = mplInfo.GetTblBegin(kDataUndefMuid); + size_t dataUndefSize = mplInfo.GetTblSize(kDataUndef); + AddrSlice dataUndefTable(mplInfo.GetTblBegin(kDataUndef), dataUndefSize); + if (mplInfo.IsFlag(kIsLazy) && GetAddrBindingState(dataUndefTable, index) != kBindingStateResolved) { + if (HandleSymbolForDecoupling(mplInfo, dataUndefTable, index)) { + addr = dataUndefTable[index].Address(); + LINKER_VLOG(lazybinding) << "(UNDEF), resolved lazily, index=" << index << ", name=" << + reinterpret_cast(addr)->GetName() << " for " << mplInfo.name << maple::endl; + } else { + addr = dataUndefTable[index].Address(); + LINKER_LOG(ERROR) << "(UNDEF), resolve class error, index=" << index << ", muid=" << + pMuidTable[index].muid.ToStr() << ", addr=" << addr << ", resolved=" << + (GetAddrBindingState(dataUndefTable, index) == kBindingStateResolved) << + " for " << mplInfo.name << maple::endl; + } + } else { + addr = dataUndefTable[index].Address(); + } + } else if (!fromUndef && index < mplInfo.GetTblSize(kDataDef)) { + size_t dataDefSize = mplInfo.GetTblSize(kDataDef); + AddrSlice dataDefTable(mplInfo.GetTblBegin(kDataDef), dataDefSize); + if (mplInfo.IsFlag(kIsLazy) && GetAddrBindingState(dataDefTable, index) != kBindingStateResolved) { + if (HandleSymbolForDecoupling(mplInfo, dataDefTable, index)) { + addr = dataDefTable[index].Address(); + LINKER_VLOG(lazybinding) << "(DEF), resolved lazily, index=" << index << ", name=" << + reinterpret_cast(addr)->GetName() << " for " << mplInfo.name << maple::endl; + } else { + addr = GetDefTableAddress(mplInfo, dataDefTable, static_cast(index), false); + LinkerMuidTableItem *dataDefMuidTable = mplInfo.GetTblBegin(kDataDefMuid); + LINKER_LOG(ERROR) << "(DEF), resolve class error, index=" << index << ", muid=" << + dataDefMuidTable[index].muid.ToStr() << ", addr=" << addr << ", resolved=" << + (GetAddrBindingState(dataDefTable, index) == kBindingStateResolved) << + " for " << mplInfo.name << maple::endl; + } + } else { + addr = dataDefTable[index].Address(); + } + } + } + return addr; +} + +void *LazyBinding::LookUpDataSymbolAddressLazily( + const MObject *classLoader, const MUID &muid, LinkerMFileInfo **outLinkerMFileInfo, bool ignoreClassLoader) { + void *res = nullptr; + if (!ignoreClassLoader) { + auto handle = [this, &muid, &res](LinkerMFileInfo &mplInfo)->bool { + size_t tmp = 0; + LinkerVoidType addr = this->pInvoker->LookUpDataSymbolAddress(mplInfo, muid, tmp); + if (addr != 0) { + res = reinterpret_cast(addr); + return true; + } + return false; + }; + (void)pInvoker->mplInfoListCLMap.FindIf(classLoader, handle); + } else { + auto handle = [this, &muid, outLinkerMFileInfo, &res](LinkerMFileInfo &mplInfo)->bool { + size_t tmp = 0; + LinkerVoidType addr = this->pInvoker->LookUpDataSymbolAddress(mplInfo, muid, tmp); + if (addr != 0) { + if (outLinkerMFileInfo != nullptr) { + *outLinkerMFileInfo = &mplInfo; + } + res = reinterpret_cast(addr); + return true; + } + return false; + }; + (void)pInvoker->mplInfoList.FindIf(handle); + } + return res; +} + +void *LazyBinding::LookUpMethodSymbolAddressLazily( + const MObject *classLoader, const MUID &muid, bool ignoreClassLoader) { + void *res = nullptr; + auto handle = [this, &muid, &res](LinkerMFileInfo &mplInfo)->bool { + size_t tmp = 0; + LinkerVoidType addr = this->pInvoker->LookUpMethodSymbolAddress(mplInfo, muid, tmp); + if (addr != 0) { + res = reinterpret_cast(addr); + return true; + } + return false; + }; + if (!ignoreClassLoader) { + (void)pInvoker->mplInfoListCLMap.FindIf(classLoader, handle); + } else { + (void)pInvoker->mplInfoList.FindIf(handle); + } + return res; +} +} // namespace maplert diff --git a/src/mrt/compiler-rt/src/linker/linker_method_builder.cpp b/src/mrt/compiler-rt/src/linker/linker_method_builder.cpp new file mode 100644 index 0000000000..2a5e510ec0 --- /dev/null +++ b/src/mrt/compiler-rt/src/linker/linker_method_builder.cpp @@ -0,0 +1,791 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "linker/linker_method_builder.h" + +#include + +#include "linker/linker_model.h" +#include "itab_util.h" +#include "cinterface.h" +#include "mrt_object.h" +#include "mclass_inline.h" +#include "mrt_reflection_class.h" +#include "modifier.h" +#include "mrt_reflection_method.h" +#include "mrt_class_api.h" + +namespace maplert { +using namespace linkerutils; +FeatureName MethodBuilder::featureName = kFMethodBuilder; +inline unsigned int MethodBuilder::GetHashIndex(const char *name) { + unsigned int hashcode = maple::DJBHash(name); + return (hashcode % maple::kHashSize); +} + +inline bool MethodBuilder::IsValidMethod(const MethodMetaBase &method) const { + uint32_t methodFlag = method.GetFlag(); + uint32_t methodModifier = method.GetMod(); + return !modifier::IsNotvirtualMethod(methodFlag) && + !modifier::IsStatic(methodModifier) && !modifier::IsPrivate(methodModifier); +} + +// Check the method and sort them with vtab index, +// then check if it's compact. +inline bool MethodBuilder::CheckMethodMetaNoSort( + const MClass *klass, std::vector> &methods) { + uint32_t methodNum = klass->GetNumOfMethods(); + MethodMeta *methodMetas = klass->GetRawMethodMetas(); + if (UNLIKELY(methodNum == 0 || methodMetas == nullptr)) { + return false; + } + bool isCompactMeta = reinterpret_cast(klass)->IsCompactMetaMethods(); + + // We should create the method metadata list sorted by its index in default, + // then we can avoid sorting them once here. + bool isInterface = klass->IsInterface(); + for (uint32_t i = 0; i < methodNum; ++i) { + MethodMetaBase *method = nullptr; + if (!isCompactMeta) { + method = reinterpret_cast(&methodMetas[i]); + } else { + MethodMetaCompact *compact = MethodMetaCompact::GetMethodMetaCompact(*klass, i); + method = reinterpret_cast(compact); + } + if (!isInterface && !IsValidMethod(*method)) { + continue; + } + methods.push_back(std::make_pair(method, i)); // Push back the method and its position. + } + return true; +} + +inline bool MethodBuilder::CheckMethodMeta( + const MClass *klass, std::vector> &methods) { + bool res = CheckMethodMetaNoSort(klass, methods); + std::sort(methods.begin(), methods.end(), [](auto &lhs, auto &rhs) { + return lhs.first->GetVtabIndex() < rhs.first->GetVtabIndex(); + }); + return res; +} + +inline void MethodBuilder::BuildAdjMethodList( + std::vector &virtualMethods, MplLazyBindingVTableMapT &adjMethods) { + uint16_t size = virtualMethods.size(); + if (UNLIKELY(size == 0)) { + return; + } + adjMethods.resize(size); + adjMethods.assign(size, AdjItem()); + for (uint16_t i = 0; i < size; ++i) { // Travel across the methods one by one. + uint16_t hash = virtualMethods[i].hash; + uint16_t pos = hash % size; + adjMethods[i].next = adjMethods[pos].first; + adjMethods[pos].first = i; + adjMethods[i].methodMeta = virtualMethods[i].methodMeta; + } +} + +void MethodBuilder::CollectClassMethods(const MClass *klass, const bool &isDecouple, const std::vector &depth, + std::vector &virtualMethods, MplLazyBindingVTableMapT &adjMethods) { + bool isInterface = klass->IsInterface(); + if (isDecouple || isInterface) { + std::vector> methods; + if (!CheckMethodMeta(klass, methods)) { + return; + } + CollectClassMethodsSlow(klass, depth, virtualMethods, adjMethods, methods); + } else { + std::vector> methods; + if (!CheckMethodMetaNoSort(klass, methods)) { + return; + } + CollectClassMethodsFast(klass, depth, virtualMethods, methods); + } + + // If we want to save the mapping info. in memory, so we shouldn't check klass != targetClass. + BuildAdjMethodList(virtualMethods, adjMethods); +} + +void MethodBuilder::OverrideVTabMethod(std::vector &virtualMethods, uint16_t vtabIndex, + const MethodMetaBase &method, const std::vector &depth, uint16_t index, bool isInterface) { + virtualMethods[vtabIndex].SetMethod(method); +#ifdef LINKER_RT_LAZY_CACHE + virtualMethods[vtabIndex].depth = (depth.size() > 0) ? depth : virtualMethods[vtabIndex].depth; + virtualMethods[vtabIndex].index = index; +#endif // LINKER_RT_LAZY_CACHE + virtualMethods[vtabIndex].isInterface = isInterface || virtualMethods[vtabIndex].isInterface; + LINKER_DLOG(lazybinding) << "method=" << GetMethodFullName(method) << ", vtabIndex=" << vtabIndex << + ", depth size=" << depth.size() << ", index" << index << + ", isInterface=" << virtualMethods[vtabIndex].isInterface << maple::endl; +} + +void MethodBuilder::AppendVTabMethod(std::vector &virtualMethods, int16_t vtabIndex, + const MethodMetaBase &method, const std::vector &depth, uint16_t index, bool isInterface, + uint16_t hash) { + MethodItem methodItem; + methodItem.SetMethod(method); +#ifdef LINKER_RT_LAZY_CACHE + methodItem.depth = (depth.size() > 0) ? depth : methodItem.depth; + methodItem.index = index; +#endif // LINKER_RT_LAZY_CACHE + methodItem.isInterface = isInterface; + methodItem.hash = hash; + if (vtabIndex < 0) { + virtualMethods.push_back(std::move(methodItem)); + } else { + virtualMethods[vtabIndex] = std::move(methodItem); + } + LINKER_DLOG(lazybinding) << "method=" << GetMethodFullName(method) << ", depth size=" << depth.size() << + ", index=" << index << ", isInterface=" << isInterface << ", hash=" << hash << maple::endl; +} + +void MethodBuilder::CollectClassMethodsSlow(const MClass *klass, const std::vector &depth, + std::vector &virtualMethods, const MplLazyBindingVTableMapT &adjMethods, + const std::vector> &methods) { + bool isInterface = reinterpret_cast(klass)->IsInterface(); + for (size_t i = 0; i < methods.size(); ++i) { // Travel across the methods one by one. + auto methodPair = methods[i]; + MethodMetaBase *method = methodPair.first; + uint16_t index = methodPair.second; + uint16_t hash = GetMethodMetaHash(*method); + uint16_t k = kAdjacencyInvalidValue; + size_t size = adjMethods.size(); + if (LIKELY(size > 0)) { + for (k = adjMethods[hash % size].first; k != kAdjacencyInvalidValue; k = adjMethods[k].next) { + if (EqualMethod(*(virtualMethods[k].GetMethod()), *method)) { // Found. + break; + } + } + } + LINKER_VLOG(lazybinding) << "method=" << GetMethodFullName(*method) << ", vtabIndex=" << method->GetVtabIndex() << + "->" << k << ", depth/index=" << depth.size() << "/" << index << ", interface=" << isInterface << + ", isAbstract=" << modifier::IsAbstract(method->GetMod()) << ", ofClass=" << klass->GetName() << ", hash=" << + hash << ", flag=" << method->GetFlag() << ", modifier=" << method->GetMod() << maple::endl; + if (k != kAdjacencyInvalidValue) { // Found, 'k' is the position. + // Class, and not abstract. Should we check CanAccess for class type. + // or Interface, and old method is also interface. + bool isAbstract = modifier::IsAbstract(method->GetMod()); + bool canAccess = CanAccess(*(virtualMethods[k].GetMethod()), *method); + MClass *virtualClass = virtualMethods[k].GetMethod()->GetDeclaringClass(); + if (virtualClass == nullptr) { + LINKER_LOG(FATAL) << "virtualClass is nullptr" << maple::endl; + } + if (!isAbstract && ((!isInterface && canAccess) || (isInterface && virtualClass->IsInterface()))) { + OverrideVTabMethod(virtualMethods, k, *method, depth, index, isInterface); + } else if (!isAbstract && !isInterface && !canAccess) { + // -1 means invalid vtab index, push_back directly. + AppendVTabMethod(virtualMethods, -1, *method, depth, index, isInterface, hash); + } else { + virtualMethods[k].isInterface = isInterface || virtualMethods[k].isInterface; + } + } else { // Not found. + // -1 means invalid vtab index, push_back directly. + AppendVTabMethod(virtualMethods, -1, *method, depth, index, isInterface, hash); + } + } +} + +// Only for no decoupled classes. +void MethodBuilder::CollectClassMethodsFast(const MClass *klass, const std::vector &depth, + std::vector &virtualMethods, const std::vector> &methods) { + bool isInterface = klass->IsInterface(); + size_t methodNum = methods.size(); + size_t size = virtualMethods.size(); + virtualMethods.resize(size + methodNum); + int16_t ignoredNum = 0; + for (size_t i = 0; i < methodNum; ++i) { // Travel across the methods one by one. + auto methodPair = methods[i]; + MethodMetaBase *method = methodPair.first; + uint16_t index = methodPair.second; + int16_t vtabIndex = method->GetVtabIndex(); + uint16_t hash = GetMethodMetaHash(*method); + if (vtabIndex >= 0 && !isInterface) { // Class. + if (UNLIKELY(static_cast(vtabIndex) < size && virtualMethods[vtabIndex].GetMethod() == nullptr)) { + LINKER_LOG(ERROR) << "null item" << ", method=" << GetMethodFullName(*method) << ", depth/index=" << + depth.size() << "/" << index << ", hash=" << hash << "/" << virtualMethods[vtabIndex].hash << + ", vtabIndex=" << vtabIndex << "/" << size << ", isAbstract=" << modifier::IsAbstract(method->GetMod()) << + ", isInterface=" << isInterface << ", ofClass=" << klass->GetName() << maple::endl; + } + if (static_cast(vtabIndex) < size && + (virtualMethods[vtabIndex].GetMethod() == nullptr || + CanAccess(*(virtualMethods[vtabIndex].GetMethod()), *method))) { + OverrideVTabMethod(virtualMethods, vtabIndex, *method, depth, index, isInterface); + ignoredNum++; + continue; + } else { + AppendVTabMethod(virtualMethods, vtabIndex, *method, depth, index, isInterface, hash); + continue; + } + } + + LINKER_LOG(FATAL) << "negative vtab index, or interface, " << ", method=" << GetMethodFullName(*method) << + ", hash=" << hash << ", vtabIndex=" << method->GetVtabIndex() << "->(" << size << "-" << ignoredNum << + "), ofClass=" << klass->GetName() << maple::endl; + } + virtualMethods.resize(virtualMethods.size() - ignoredNum); +} + +void MethodBuilder::CollectClassMethodsRecursive(MClass *klass, bool &isDecouple, std::set &checkedClasses, + std::vector &depth, uint16_t superNum, + std::vector &virtualMethods, MplLazyBindingVTableMapT &adjMethods) { + if (klass->IsColdClass() && !klass->IsLazyBinding()) { + LINKER_VLOG(lazybinding) << "cold class name=" << klass->GetName() << maple::endl; + pInvoker->ResolveColdClassSymbol(reinterpret_cast(klass)); + } + +#ifdef LINKER_RT_LAZY_CACHE + // superNum == 0xFFFF, means not to record depth. + if (superNum != static_cast(-1)) { + depth.push_back(superNum); + } +#endif // LINKER_RT_LAZY_CACHE + + uint32_t superClassNum = klass->GetNumOfSuperClasses(); + MClass **superClassArray = klass->GetSuperClassArrayPtr(); + if (superClassNum != 0) { + // Check the supers of 'klass'. + for (uint32_t i = 0; i < superClassNum; ++i) { + MClass *superClass = superClassArray[i]; + uint16_t num = 0; +#ifdef LINKER_RT_LAZY_CACHE + if (superNum != static_cast(-1)) { + num = i; + } else { + num = static_cast(-1); + } +#endif // LINKER_RT_LAZY_CACHE + CollectClassMethodsRecursive(superClass, isDecouple, checkedClasses, depth, num, virtualMethods, adjMethods); + } + } + + // Check the 'klass'. + LINKER_VLOG(lazybinding) << "class=" << klass->GetName() << ", " << superNum << maple::endl; + if (!isDecouple) { + // Mark the class as not decoupled when its only superclass is object + isDecouple = (klass->IsDecouple() && (superClassNum != 1 || *superClassArray[0] != MRT_GetClassObject())); + } + bool isInterface = klass->IsInterface(); + if (LIKELY(!isInterface || checkedClasses.count(klass) == 0)) { + CollectClassMethods(klass, isDecouple, depth, virtualMethods, adjMethods); + } + if (isInterface) { + checkedClasses.insert(klass); + } + +#ifdef LINKER_RT_LAZY_CACHE + if (superNum != static_cast(-1)) { + depth.erase(depth.end() - 1); + } +#endif // LINKER_RT_LAZY_CACHE +} + +void MethodBuilder::GenerateAndAttachClassVTable(MClass *klass, std::vector &virtualMethods) { + if (UNLIKELY(virtualMethods.size() == 0)) { + return; + } + + LinkerVoidType *vtab = reinterpret_cast( + MRT_AllocFromDecouple(sizeof(LinkerVTableItem) * virtualMethods.size(), kVTabArray)); + if (UNLIKELY(vtab == nullptr)) { + LINKER_LOG(FATAL) << "returns null" << maple::endl; + return; + } + + LinkerVTableItem *item = reinterpret_cast(vtab); + for (uint32_t i = 0; i < virtualMethods.size(); ++i) { + item->index = reinterpret_cast(virtualMethods[i].GetMethod()->GetFuncAddress()); + item++; + } + reinterpret_cast(klass)->vTable.SetDataRef(vtab); +} + +inline uint16_t MethodBuilder::GetMethodMetaHash(const MethodMetaBase &method) { + uint16_t hash = kMethodHashMask; + if (!method.IsMethodMetaCompact()) { + hash = reinterpret_cast(const_cast(&method))->GetHashCode(); + } + if (hash == kMethodHashMask) { + hash = MClass::GetMethodFieldHash(method.GetName(), GetMethodSignature(method).c_str(), true); + } + return hash; +} + +std::string MethodBuilder::GetMethodFullName(const MethodMetaBase &method) { + const char *methodName = method.GetName(); + std::string name = methodName; + name += '|'; + if (!method.IsMethodMetaCompact()) { + name += reinterpret_cast(&method)->GetSignature(); + } else { + std::string signature; + method.GetSignature(signature); + name += signature; + } + return name; +} + +inline std::string MethodBuilder::GetMethodSignature(const MethodMetaBase &method) const { + if (!method.IsMethodMetaCompact()) { + return std::string(reinterpret_cast(&method)->GetSignature()); + } else { + std::string signature; + method.GetSignature(signature); + return signature; + } +} + +// If the method name and signature are equal. +bool MethodBuilder::EqualMethod(const MethodMetaBase &method1, const MethodMetaBase &method2) { + if (strcmp(method1.GetName(), method2.GetName()) != 0) { + return false; + } + + std::string signature1; + const char *method1Signature; + if (!method1.IsMethodMetaCompact()) { + method1Signature = reinterpret_cast(&method1)->GetSignature(); + } else { + method1.GetSignature(signature1); + method1Signature = signature1.c_str(); + } + std::string signature2; + const char *method2Signature; + if (!method2.IsMethodMetaCompact()) { + method2Signature = reinterpret_cast(&method2)->GetSignature(); + } else { + method2.GetSignature(signature2); + method2Signature = signature2.c_str(); + } + return strcmp(method1Signature, method2Signature) == 0; +} + +inline bool MethodBuilder::EqualMethod( + const MethodMetaBase *method, const char *methodName, const char *methodSignature) { + if (strcmp(method->GetName(), methodName) != 0) { + return false; + } + + std::string tmp; + const char *signature; + if (!method->IsMethodMetaCompact()) { + signature = reinterpret_cast(method)->GetSignature(); + } else { + method->GetSignature(tmp); + signature = tmp.c_str(); + } + return strcmp(signature, methodSignature) == 0; +} + +// Also see VtableAnalysis::CheckOverrideForCrossPackage at compiler side. +// Return true if virtual functions can be set override relationship. +// We just check if the base method is visibile for current method. +inline bool MethodBuilder::CanAccess( + const MethodMetaBase &baseMethod, const MethodMetaBase ¤tMethod) { + // For the corss package inheritance, only if the base func is declared + // as either 'public' or 'protected', we shall set override relationship. + if (modifier::IsPublic(baseMethod.GetMod()) || modifier::IsProtected(baseMethod.GetMod())) { + return true; + } + MClass *basePackageClass = baseMethod.GetDeclaringClass(); + MClass *currentPackageClass = currentMethod.GetDeclaringClass(); + if (basePackageClass == nullptr || currentPackageClass == nullptr) { + LINKER_LOG(FATAL) << "class is null" << maple::endl; + } + char *basePackageName = basePackageClass->GetName(); + char *currentPackageName = currentPackageClass->GetName(); + char *basePos = strrchr(basePackageName, '/'); // Reverse find the end of package + char *currentPos = strrchr(currentPackageName, '/'); // Reverse find the end of package + uint32_t basePackageLen = (basePos == nullptr) ? 0 : static_cast(basePos - basePackageName); + uint32_t currentPackageLen = (currentPos == nullptr) ? 0 : static_cast(currentPos - currentPackageName); + if (basePackageLen == currentPackageLen && strncmp(basePackageName, currentPackageName, basePackageLen) == 0) { + return true; + } + + LINKER_VLOG(lazybinding) << "return false, method=" << GetMethodFullName(currentMethod) << ", baseClass=" << + basePackageName << ", currentClass=" << currentPackageName << maple::endl; + return false; +} + +// virtualMethods: In +// firstITabVector: Out +// firstITabConflictVector: Out +// maxFirstITabIndex: Out +inline uint32_t MethodBuilder::ProcessITableFirstTable(std::vector &virtualMethods, + std::vector &firstITabVector, std::vector &firstITabConflictVector, + uint32_t &maxFirstITabIndex) { + uint32_t num = 0; + std::vector firstITabConflictFlagVector(maple::kItabFirstHashSize, false); + for (size_t i = 0; i < virtualMethods.size(); ++i) { + MethodItem methodItem = virtualMethods[i]; + if (!methodItem.isInterface) { + continue; + } + ++num; + + // Should check if we can directly use GetMethodMetaHash, and mod maple::kHashSize. + auto name = GetMethodFullName(*(methodItem.GetMethod())); + uint32_t hash = GetHashIndex(name.c_str()); + if (hash > maxFirstITabIndex) { + maxFirstITabIndex = hash; + } + if (firstITabVector[hash] == nullptr && !firstITabConflictFlagVector[hash]) { // First insertion. + firstITabVector[hash] = methodItem.GetMethod(); + } else { // Conflict + if (!firstITabConflictFlagVector[hash]) { // First conflict. + firstITabConflictVector.push_back(firstITabVector[hash]); + firstITabConflictVector.push_back(methodItem.GetMethod()); + firstITabVector[hash] = nullptr; + firstITabConflictFlagVector[hash] = true; + } else { // Successive conflicts. + firstITabConflictVector.push_back(methodItem.GetMethod()); + } + } + } + return num; +} + +// firstITabConflictVector: In +// secondITableMap: Out +// secondITabConflictVector: Out +inline size_t MethodBuilder::ProcessITableSecondTable(std::vector &firstITabConflictVector, + std::map &secondITableMap, std::vector &secondITabConflictVector) { + size_t sizeOfMethodsNames = 0; + for (size_t i = 0; i < firstITabConflictVector.size(); ++i) { // Travel across the methods one by one. + MethodMetaBase *method = firstITabConflictVector[i]; + auto name = GetMethodFullName(*method); + sizeOfMethodsNames += name.size() + 1; + uint32_t hash = maple::GetSecondHashIndex(name.c_str()); + if (secondITableMap.find(hash) == secondITableMap.end()) { // First insertion. + secondITableMap[hash] = method; + } else { // Conflict. + if (secondITableMap[hash] != nullptr) { // First conflict. + secondITabConflictVector.push_back(secondITableMap[hash]); + secondITabConflictVector.push_back(method); + secondITableMap[hash] = nullptr; // Set null as conflict flag temporarily. + } else { // Successive conflicts. + secondITabConflictVector.push_back(method); + } + } + } + return sizeOfMethodsNames; +} + +inline void MethodBuilder::GenerateITableFirstTable(LinkerVoidType &itab, + const std::vector &firstITabVector, + const std::vector &firstITabConflictVector, uint32_t maxFirstITabIndex) const { + // ITable's first table + size_t firstITabVectorSize = firstITabConflictVector.empty() ? maxFirstITabIndex + 1 : firstITabVector.size(); + for (size_t i = 0; i < firstITabVectorSize; ++i) { + auto method = firstITabVector[i]; + if (LIKELY(method != nullptr)) { + (&itab)[i] = reinterpret_cast(method->GetFuncAddress()); + } + } +} + +inline void MethodBuilder::GenerateITableSecondTable( + LinkerVoidType &itab, std::map &secondITableMap, + std::vector &secondITabConflictVector, size_t sizeOfITabNoNames) { + // kHeadSizeOfSecondHash:3 + // The last one points to the second level table + (&itab)[maple::kHashSize] = reinterpret_cast(&itab + maple::kHashSize + 1); + int pos = maple::kHashSize + 1; + /// Hash table: size + align +#ifdef USE_32BIT_REF + uint64_t shiftCountBit = 16; // 16 is 4 x 4 +#else + uint64_t shiftCountBit = 32; // 32 is 8 x 4 +#endif + (&itab)[pos++] = ((secondITabConflictVector.size() | (1UL << (shiftCountBit - 1))) << shiftCountBit) + + secondITableMap.size(); + (&itab)[pos++] = maple::kFlagAgInHeadOfSecondHash; + + /// Hash table: hash + method address + for (auto &item : secondITableMap) { + auto method = item.second; + (&itab)[pos++] = item.first; + (&itab)[pos++] = method != nullptr ? + reinterpret_cast(method->GetFuncAddress()) : maple::kFlagSecondHashConflict; + } + + // Conflict table: method string + address + char *methodsNames = reinterpret_cast(&itab + sizeOfITabNoNames); + int namePos = 0; + for (size_t i = 0; i < secondITabConflictVector.size(); i++) { // Travel across the methods one by one. + auto method = secondITabConflictVector[i]; + // Copy the method's name firstly. + std::string name = GetMethodFullName(*method); + size_t len = name.size(); + if (UNLIKELY(strcpy_s(methodsNames + namePos, len + 1, name.c_str()) != EOK)) { + free(&itab); + LINKER_LOG(FATAL) << "strcpy_s() failed" << maple::endl; + } + (methodsNames + namePos)[len] = '\0'; + + LINKER_DLOG(lazybinding) << "conflict name[" << pos << "][" << namePos << "]: " << + reinterpret_cast(method->GetFuncAddress()) << "/" << (methodsNames + namePos) << maple::endl; + // Set the name and address. + (&itab)[pos++] = reinterpret_cast(methodsNames + namePos); + (&itab)[pos++] = reinterpret_cast(method->GetFuncAddress()); + + // Next method's name offset. + namePos += len + 1; + } +} + +void MethodBuilder::GenerateAndAttachClassITable(MClass *klass, std::vector &virtualMethods) { + if (UNLIKELY(klass->IsAbstract())) { + return; + } + // Filter the methods for the first level table of itable. + uint32_t maxFirstITabIndex = 0; + std::vector firstITabVector(maple::kItabFirstHashSize, nullptr); + std::vector firstITabConflictVector; + if (ProcessITableFirstTable( + virtualMethods, firstITabVector, firstITabConflictVector, maxFirstITabIndex) == 0) { + reinterpret_cast(klass)->iTable.SetDataRef(nullptr); + return; + } + // Collect the second level table of itable. + std::map secondITableMap; + std::vector secondITabConflictVector; + size_t sizeOfMethodsNames = ProcessITableSecondTable( + firstITabConflictVector, secondITableMap, secondITabConflictVector); + size_t sizeOfITab = 0; + size_t sizeOfITabNoNames = 0; + if (firstITabConflictVector.empty()) { // No conflict in first level table. + sizeOfITab += maxFirstITabIndex + 1; + sizeOfITab = sizeOfITab * sizeof(LinkerVoidType); + } else { + sizeOfITabNoNames = maple::kHashSize + maple::kHeadSizeOfSecondHash + + secondITableMap.size() * 2 + secondITabConflictVector.size() * 2; // 2 is to skit length of table + sizeOfITab += sizeOfITabNoNames + sizeOfMethodsNames; + sizeOfITab = sizeOfITab * sizeof(LinkerVoidType); + } + LINKER_DLOG(lazybinding) << "size of itab :" << sizeOfITab << maple::endl; + LinkerVoidType *itab = reinterpret_cast(MRT_AllocFromDecouple(sizeOfITab, kITabAggregate)); + if (UNLIKELY(itab == nullptr)) { + LINKER_LOG(FATAL) << "MRT_NewPermObj returns null" << maple::endl; + return; + } + if (UNLIKELY(memset_s(itab, sizeOfITab, 0, sizeOfITab) != EOK)) { + LINKER_LOG(ERROR) << "memset_s() failed" << maple::endl; + return; + } + GenerateITableFirstTable(*itab, firstITabVector, firstITabConflictVector, maxFirstITabIndex); + if (firstITabConflictVector.empty()) { // No conflict in first level table. + reinterpret_cast(klass)->iTable.SetDataRef(itab); + return; + } + GenerateITableSecondTable(*itab, secondITableMap, secondITabConflictVector, sizeOfITabNoNames); + reinterpret_cast(klass)->iTable.SetDataRef(itab); +} + +inline int32_t MethodBuilder::GetVTableItemIndex( + const MClass *klass, MplLazyBindingVTableMapT &adjMethods, const char *methodName, const char *methodSignature) { + size_t size = adjMethods.size(); + if (LIKELY(size > 0)) { + uint16_t hash = MClass::GetMethodFieldHash(methodName, methodSignature, true); + uint16_t pos = hash % size; + uint16_t k = kAdjacencyInvalidValue; + for (k = adjMethods[pos].first; k != kAdjacencyInvalidValue; k = adjMethods[k].next) { + MethodMetaBase *method = adjMethods[k].GetMethod(); + if (EqualMethod(method, methodName, methodSignature)) { // Found. + return k; + } + } + } + LINKER_LOG(ERROR) << "failed to find method=" << methodName << "|" << methodSignature << ", " << size << + ", in class=" << klass->GetName() << maple::endl; + return -1; +} + +int32_t MethodBuilder::UpdateOffsetTableByVTable(const MClass *klass, MplLazyBindingVTableMapT &adjMethods, + LinkerVTableOffsetItem &vtableOffsetItem, LinkerOffsetValItem &offsetTableItem) { + DataRefOffset32 *dataMethodName = reinterpret_cast(&vtableOffsetItem.methodName); + const char *method = dataMethodName->GetDataRef(); + DataRefOffset32 *dataSignatureName = reinterpret_cast(&vtableOffsetItem.signatureName); + const char *signature = dataSignatureName->GetDataRef(); + int32_t index = GetVTableItemIndex(klass, adjMethods, method, signature); + if (LIKELY(index >= 0)) { + offsetTableItem.offset = index * sizeof(LinkerVTableItem); + } + return index; +} + +int32_t MethodBuilder::UpdateOffsetTableByVTable(const MClass *klass, MplLazyBindingVTableMapT &adjMethods, + LinkerVTableOffsetItem &vtableOffsetItem, LinkerOffsetValItemLazyLoad &offsetTableItem) { + DataRefOffset32 *klassMethod = reinterpret_cast(&vtableOffsetItem.methodName); + const char *method = klassMethod->GetDataRef(); + DataRefOffset32 *klassSignature = reinterpret_cast(&vtableOffsetItem.signatureName); + const char *signature = klassSignature->GetDataRef(); + int32_t index = GetVTableItemIndex(klass, adjMethods, method, signature); + if (LIKELY(index >= 0)) { + offsetTableItem.offset = index * sizeof(LinkerVTableItem); + offsetTableItem.offsetAddr = AddressToRefField(reinterpret_cast(&offsetTableItem.offset)); + } + return index; +} + +inline void MethodBuilder::BuildMethodVTableITable(MClass *klass, std::vector &virtualMethods) { + if (reinterpret_cast(klass)->vTable.refVal == 0) { + GenerateAndAttachClassVTable(klass, virtualMethods); + } + if (reinterpret_cast(klass)->iTable.refVal == 0) { + GenerateAndAttachClassITable(klass, virtualMethods); + } +} + +#ifdef LINKER_RT_LAZY_CACHE +MethodMetaBase* MethodBuilder::GetMethodByIndex(MClass *klass, uint16_t index) { + uint32_t methodNum = klass->GetNumOfMethods(); + MethodMeta *methodMetas = klass->GetRawMethodMetas(); + if (UNLIKELY(methodNum == 0 || methodMetas == nullptr)) { + LINKER_LOG(WARNING) << "invalid method, " << klass << "/" << klass->GetName() << + ", " << methodNum << ", " << methodMetas << maple::endl; + return nullptr; + } + bool isCompactMeta = __MRT_IsCompactMeta(reinterpret_cast(methodMetas)); + if (!isCompactMeta) { + method = reinterpret_cast(&methodMetas[index]); + } else { + MethodMetaCompact *compact = MethodMetaCompact::GetMethodMetaCompact(*klass, index); + method = reinterpret_cast(compact); + } + return method; +} + +void MethodBuilder::BuildMethodByCachingIndex(MClass *klass, const std::string &cachingIndex) { + std::vector virtualMethods; + std::string::size_type startPos = 0; + while (true) { + std::string::size_type endPos = cachingIndex.find(',', startPos); + if (endPos == std::string::npos) { + break; + } + + bool isInterface = false; + std::string::size_type indexPos = cachingIndex.find('#', startPos); + if (indexPos == std::string::npos || indexPos > endPos) { + indexPos = cachingIndex.find('!', startPos); + isInterface = true; + } + constexpr int decimalBase = 10; + auto index = std::strtol(cachingIndex.c_str() + indexPos + 1, nullptr, decimalBase); + + // There're 3 type for depth info.: + // 1. If the depth is not like 0/0/0/, for example 0/1/, we should record the whole depth info. + // 2. If the depth is like 0/0/0/, we could only record the depth number(2). + // 3. Further if the depth is 0, we could ignore it as default. + std::vector depth; + char *depthStr == nullptr; + auto tmp = std::strtol(cachingIndex.c_str() + startPos, &depthStr, decimalBase); + if (tmp == 0 && depthStr == cachingIndex.c_str() + startPos) { // Type 3, needn't push anything. + } else if (*depthStr != '/') { // Type 2. + depth.resize(tmp, 0); + } else { // Type 1. + // Ignore the first depth. + depthStr++; + while (depthStr != cachingIndex.c_str() + indexPos) { + tmp = std::strtol(depthStr, &depthStr, decimalBase); + depth.push_back(tmp); + depthStr++; + } + } + + MClass *superClass = klass; + for (uint32_t i = 0; i < depth.size(); ++i) { + uint16_t superNum = depth[i]; + MClass **superClassArray = klass->GetSuperClassArrayPtr(); + // We trust the caching info. and don't check __MRT_Class_getNumofSuperClasses() here. + superClass = superClassArray[superNum]; + } + + auto method = GetMethodByIndex(superClass, index); + uint16_t hash = GetMethodMetaHash(*method); + MethodItem methodItem; + methodItem.SetMethod(*method); + methodItem.isInterface = isInterface; + methodItem.hash = hash; + virtualMethods.push_back(std::move(methodItem)); + startPos = endPos + 1; + } + + BuildMethodVTableITable(klass, virtualMethods); +} + +std::string MethodBuilder::GetMethodCachingIndexString(MClass *klass, std::vector &virtualMethods) { + std::string vtabIndexStr; + for (uint32_t i = 0; i < virtualMethods.size(); ++i) { + bool mainLine = true; + std::string depthStr; + for (uint32_t j = 0; j < virtualMethods[i].depth.size(); ++j) { + if (virtualMethods[i].depth[j] != 0) { + mainLine = false; + } + depthStr += std::to_string(virtualMethods[i].depth[j]); + depthStr += '/'; + } + + // There're 3 type for depth info.: + // 1. If the depth is not like 0/0/0/, for example 0/1/, we should record the whole depth info. + // 2. If the depth is like 0/0/0/, we could only record the depth number(2). + // 3. Further if the depth is 0, we could ignore it as default. + if (!mainLine) { + vtabIndexStr += depthStr; + } else { + if (virtualMethods[i].depth.size() > 1) { + vtabIndexStr += std::to_string(virtualMethods[i].depth.size() - 1); + } + } + if (virtualMethods[i].isInterface) { + vtabIndexStr += '!'; + } else { + vtabIndexStr += '#'; + } + vtabIndexStr += std::to_string(virtualMethods[i].index); + vtabIndexStr += ','; + } + LINKER_DLOG(lazybinding) << "vtabIndexStr=" << vtabIndexStr << ", for " << klass->GetName() << maple::endl; + return vtabIndexStr; +} +#endif // LINKER_RT_LAZY_CACHE + +MplLazyBindingVTableMapT MethodBuilder::GetMethodVTableMap(MClass *klass) { + std::set checkedClasses; + std::vector virtualMethods; + MplLazyBindingVTableMapT adjMethods; + bool isDecouple = false; + std::vector depth; + CollectClassMethodsRecursive( + klass, isDecouple, checkedClasses, depth, static_cast(-1), virtualMethods, adjMethods); + return adjMethods; +} + +std::string MethodBuilder::BuildMethod(MClass *klass) { + std::set checkedClasses; + std::vector virtualMethods; + MplLazyBindingVTableMapT adjMethods; + bool isDecouple = false; + std::vector depth; + CollectClassMethodsRecursive(klass, isDecouple, checkedClasses, depth, 0, virtualMethods, adjMethods); + BuildMethodVTableITable(klass, virtualMethods); + std::string res; +#ifdef LINKER_RT_LAZY_CACHE + res = GetMethodCachingIndexString(klass, virtualMethods); +#endif // LINKER_RT_LAZY_CACHE + return res; +} +} // namespace maplert diff --git a/src/mrt/compiler-rt/src/linker/linker_model.cpp b/src/mrt/compiler-rt/src/linker/linker_model.cpp new file mode 100644 index 0000000000..a342b2c00a --- /dev/null +++ b/src/mrt/compiler-rt/src/linker/linker_model.cpp @@ -0,0 +1,659 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "linker/linker_model.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "file_system.h" +#include "linker/linker_inline.h" +#ifdef LINKER_DECOUPLE +#include "linker/decouple/linker_decouple.h" +#endif +#include "linker/linker_cache.h" +#include "linker/linker_debug.h" +#include "linker/linker_hotfix.h" +#include "linker/linker_lazy_binding.h" +#include "utils/name_utils.h" +#include "file_layout.h" +#include "collector/cp_generator.h" + +using namespace maple; +namespace maplert { +using namespace linkerutils; +bool LinkerMFileInfo::BelongsToApp() { + LinkerInvoker &invoker = LinkerAPI::As(); + return (!IsFlag(kIsBoot) && !invoker.IsSystemClassLoader(reinterpret_cast(this->classLoader))); +} + +void LinkerMFileInfo::ReleaseReadOnlyMemory() { + LINKER_VLOG(mpllinker) << "release file-mapped readonly memory for " << name << maple::endl; + + // release .refl_strtab + ReleaseMemory(this->coldStrTab, this->coldStrTabEnd); + + // release .rometadata for java methods + ReleaseMemory(this->rometadataMethodStart, this->rometadataMethodEnd); + + // release .rometadata for java fields + ReleaseMemory(this->rometadataFieldStart, this->rometadataFieldEnd); + + // release .romuidtab + ReleaseMemory(this->romuidtabStart, this->romuidtabEnd); +} + +LinkerInvoker::LinkerInvoker() { + InitProtectedRegion(); +} +LinkerInvoker::~LinkerInvoker() { + features.clear(); + ClearLinkerMFileInfo(); + pLoader = nullptr; +} + +// Fetch the method address by index directly, in the define table of 'handle'. +void *LinkerInvoker::GetMethodSymbolAddress(LinkerMFileInfo &mplInfo, size_t index) { + size_t methodDefSize = mplInfo.GetTblSize(kMethodDef); + if (methodDefSize == 0) { + LINKER_LOG(ERROR) << "failed, the size of methodDef is zero in " << mplInfo.name << maple::endl; + return nullptr; + } + AddrSlice methodDefSlice(mplInfo.GetTblBegin(kMethodDefOrig), methodDefSize); + if (index < methodDefSize) { + return GetDefTableAddress(mplInfo, methodDefSlice, index, true); + } + return nullptr; +} + +// Fetch the data address by index directly, in the define table of 'handle'. +void *LinkerInvoker::GetDataSymbolAddress(LinkerMFileInfo &mplInfo, size_t index) { + size_t dataDefSize = mplInfo.GetTblSize(kDataDef); + if (dataDefSize == 0) { + LINKER_LOG(ERROR) << "failed, the size of dataDef is zero in " << mplInfo.name << maple::endl; + return nullptr; + } + AddrSlice dataDefSlice(mplInfo.GetTblBegin(kDataDefOrig), dataDefSize); + if (index < dataDefSize) { + return GetDefTableAddress(mplInfo, dataDefSlice, index, false); + } + return nullptr; +} + +bool LinkerInvoker::DoLocateAddress(const LinkerMFileInfo &mplInfo, LinkerLocInfo &info, const void *addr, + const AddrSlice &addrSlice, const InfTableSlice &infTableSlice, int64_t pos, bool getName) { + if (pos != -1) { + info.size = infTableSlice[static_cast(pos)].size; + info.addr = GetDefTableAddress(mplInfo, addrSlice, static_cast(pos), true); + if (getName) { + info.sym = GetMethodSymbolByOffset(infTableSlice[static_cast(pos)]); + // Get library name. + info.path = mplInfo.name; + } else { + info.sym = ""; + info.path = ""; + } + return true; + } else { + LINKER_VLOG(mpllinker) << "failed, not found addr=" << addr << " in " << mplInfo.name << maple::endl; + } + return false; +} + +// Locate the address for method, which defined in the .so of 'handle'. +// Also see int dladdr(void *addr, Dl_info *info) +bool LinkerInvoker::LocateAddress(LinkerMFileInfo &mplInfo, const void *addr, LinkerLocInfo &info, bool getName) { + size_t methodDefSize = mplInfo.GetTblSize(kMethodDef); + if (methodDefSize == 0) { + LINKER_DLOG(mpllinker) << "failed, size is zero, in " << mplInfo.name << maple::endl; + return false; + } + AddrSlice methodDefSlice(mplInfo.GetTblBegin(kMethodDefOrig), methodDefSize); + size_t indexSize = mplInfo.GetTblSize(kMethodMuidIndex); + if (indexSize == 0) { + LINKER_LOG(ERROR) << "failed, muidIndexTable size is zero" << " in " << mplInfo.name << maple::endl; + return false; + } + MuidIndexSlice muidIndexSlice = MuidIndexSlice( + mplInfo.GetTblBegin(kMethodMuidIndex), indexSize); + size_t infSize = mplInfo.GetTblSize(kMethodInfo); + if (infSize == 0 || infSize != methodDefSize) { + LINKER_LOG(ERROR) << "failed, infTable size is 0, or tables size are not equal, " << infSize << " vs. " << + methodDefSize << " in " << mplInfo.name << maple::endl; + return false; + } + InfTableSlice infTableSlice = InfTableSlice(mplInfo.GetTblBegin(kMethodInfo), infSize); + + const size_t start = 0; + size_t end = indexSize - 1; + size_t scopeStartAddr = mplInfo.GetTblBegin(kJavaText); + size_t scopeEndAddr = scopeStartAddr + mplInfo.GetTblSize(kJavaText); + int64_t pos = BinarySearchIndex(mplInfo, methodDefSlice, start, end, + reinterpret_cast(addr), muidIndexSlice, infTableSlice, scopeStartAddr, scopeEndAddr); + return DoLocateAddress(mplInfo, info, addr, methodDefSlice, infTableSlice, pos, getName); +} + +// Locate the address for method, by traversal all open maple .so files. +// Also see int dladdr(void *addr, Dl_info *info) +bool LinkerInvoker::LocateAddress(const void *addr, LinkerLocInfo &info, bool getName) { + if (UNLIKELY(addr == nullptr)) { + return false; + } + LinkerMFileInfo *mplInfo = GetLinkerMFileInfo(kFromPC, addr); + if (mplInfo != nullptr) { + if (LocateAddress(*mplInfo, addr, info, getName)) { + return true; + } + LINKER_VLOG(mpllinker) << "failed, not found " << addr << maple::endl; + } else { + LINKER_VLOG(mpllinker) << "failed, not found in JAVA section for " << addr << maple::endl; + } + return false; +} + +// Return the LinkerMFileInfo pointer, null if not found. +LinkerMFileInfo *LinkerInvoker::SearchAddress(const void *pc, AddressRangeType type, bool isLazyBinding) { + if (type == kTypeText) { + return mplInfoElfAddrSet.SearchJavaText(pc); + } + if (isLazyBinding) { + return mplInfoElfAddrLazyBindingSet.Search(pc, type); + } else { + return mplInfoElfAddrSet.Search(pc, type); + } +} + +// Get offset vtable origin address. +void *LinkerInvoker::GetMplOffsetValue(LinkerMFileInfo &mplInfo) { + void *tableAddr = mplInfo.GetTblBegin(kValueOffset); + return reinterpret_cast(tableAddr) + sizeof(LinkerOffsetKeyTableInfo); +} + +// Get mpl linker validity code from range table. +MUID LinkerInvoker::GetValidityCode(LinkerMFileInfo &mplInfo) const { + if (mplInfo.hash.data.words[0] == 0 && mplInfo.hash.data.words[1] == 0) { + mplInfo.hash.data.words[0] = mplInfo.GetTblBegin(kRange); + mplInfo.hash.data.words[1] = mplInfo.GetTblEnd(kRange); + } + return mplInfo.hash; +} + +// Get decouple validity code from range table. +MUID LinkerInvoker::GetValidityCodeForDecouple(LinkerMFileInfo &mplInfo) const { + if (mplInfo.hashOfDecouple.data.words[0] == 0 && mplInfo.hashOfDecouple.data.words[1] == 0) { + mplInfo.hashOfDecouple.data.words[0] = mplInfo.GetTblBegin(kDecouple); + mplInfo.hashOfDecouple.data.words[1] = mplInfo.GetTblEnd(kDecouple); + } + return mplInfo.hashOfDecouple; +} + +// see if class's super symbol has been resolved +#ifdef LINKER_DECOUPLE +bool LinkerInvoker::IsClassComplete(const MClass &classInfo) { + MClass **superTable = classInfo.GetSuperClassArrayPtr(); + uint32_t superSize = classInfo.GetNumOfSuperClasses(); + if (superTable != nullptr && superSize > 0) { + if (!classInfo.IsColdClass()) { + for (uint32_t i = 0; i < superSize; ++i) { + LinkerRef ref(superTable[i]); + if (ref.IsEmpty() || ref.IsIndex() || !IsClassComplete(*superTable[i])) { + return false; + } + } + } else { + DecoupleMFileInfo *mplInfo = reinterpret_cast(SearchAddress(&classInfo, kTypeClass)); + if (mplInfo == nullptr) { + LOG(ERROR) << "Decouple::IsClassComplete: not find so for " << classInfo.GetName() << maple::endl; + return false; + } + for (uint32_t i = 0; i < superSize; ++i) { + MClass *superClass = superTable[i]; + LinkerRef ref(superClass); + if (ref.IsIndex()) { + size_t index = ref.GetIndex(); + bool fromUndef = ref.IsFromUndef(); + if (fromUndef && index < mplInfo->dataUndefSlice.Size() && !mplInfo->dataUndefSlice.Empty()) { + superClass = static_cast(mplInfo->dataUndefSlice[index].Address()); + } else if (!fromUndef && index < mplInfo->dataDefSlice.Size() && !mplInfo->dataDefSlice.Empty()) { + superClass = static_cast(GetDefTableAddress(*mplInfo, mplInfo->dataDefSlice, index, false)); + } + } + if (superClass == nullptr || !IsClassComplete(*superClass)) { + return false; + } + } + } + } + return true; +} +#endif + +void *LinkerInvoker::GetClassMetadataLazily(const void *offsetTable, size_t classIndex) { + LinkerMFileInfo *mplInfo = GetLinkerMFileInfo(kFromAddr, offsetTable, true); + if (mplInfo == nullptr) { + LINKER_LOG(FATAL) << "failed to find mplInfo for " << offsetTable << maple::endl; + } + return GetClassMetadataLazily(*mplInfo, classIndex); +} + +BindingState LinkerInvoker::GetAddrBindingState(LinkerVoidType addr) { + return Get()->GetAddrBindingState(addr); +} +BindingState LinkerInvoker::GetAddrBindingState(const AddrSlice &addrSlice, size_t index, bool isAtomic) { + return Get()->GetAddrBindingState(addrSlice, index, isAtomic); +} +void LinkerInvoker::DumpStackInfoInLog() { + Get()->DumpStackInfoInLog(); +} +void *LinkerInvoker::GetClassMetadataLazily(LinkerMFileInfo &mplInfo, size_t classIndex) { + return Get()->GetClassMetadata(mplInfo, classIndex); +} + +MObject *LinkerInvoker::GetClassLoaderByAddress(LinkerMFileInfo &mplInfo, const void *addr) { + if (addr != nullptr) { + LINKER_VLOG(lazybinding) << "addr=" << addr << ", in " << mplInfo.name << maple::endl; + LinkerLocInfo info; + if (LocateAddress(mplInfo, addr, info, false) == false) { + LINKER_LOG(ERROR) << "failed to locate, addr=" << addr << maple::endl; + return nullptr; + } + uint32_t *method = static_cast(const_cast(info.addr)); + // To get the declaring class of the method. + void *md = JavaFrame::GetMethodMetadata(method); + if (md != nullptr) { + const MethodMetaBase *methodMeta = reinterpret_cast(md); + return reinterpret_cast(MRT_GetClassLoader(*methodMeta->GetDeclaringClass())); + } + } + return nullptr; +} + +// Look up the method address with MUID, in the define table of 'handle'. +// Also see LookUpSymbol() +LinkerVoidType LinkerInvoker::LookUpMethodSymbolAddress(LinkerMFileInfo &mplInfo, const MUID &muid, size_t &index) { + index = 0; + size_t methodDefSize = mplInfo.GetTblSize(kMethodDef); + if (methodDefSize == 0) { + LINKER_DLOG(mpllinker) << "failed, the size of methodDef table is zero, " << maple::endl; + return 0; + } + AddrSlice methodDefSlice(mplInfo.GetTblBegin(kMethodDefOrig), methodDefSize); + if (methodDefSlice.Empty()) { + LINKER_DLOG(mpllinker) << "failed, methodDef table is null" << maple::endl; + return 0; + } + LinkerMuidTableItem *pMuidTable = mplInfo.GetTblBegin(kMethodDefMuid); + if (pMuidTable == nullptr) { + LINKER_DLOG(mpllinker) << "failed, pMuidTable is null in " << mplInfo.name << maple::endl; + return 0; + } + + // Because method define table is sorted by address ascending order, + // we must add an index side-table for binary searching address with muid. + LinkerMuidIndexTableItem *pMuidIndexTable = mplInfo.GetTblBegin(kMethodMuidIndex); + size_t indexSize = mplInfo.GetTblSize(kMethodMuidIndex); + if (pMuidIndexTable == nullptr || indexSize == 0) { + LINKER_DLOG(mpllinker) << "failed, pMuidIndexTable is null, or size is zero" << maple::endl; + return 0; + } + + const int64_t start = 0; + int64_t end = static_cast(methodDefSize) - 1; + int64_t pos = Get()->BinarySearch( + *pMuidTable, *pMuidIndexTable, start, end, muid); + if (pos != -1) { + index = static_cast(pos); +#ifdef LINKER_32BIT_REF_FOR_DEF_UNDEF + return AddrToUint32(GetDefTableAddress(mplInfo, methodDefSlice, index, true)); +#else + return reinterpret_cast(GetDefTableAddress(mplInfo, methodDefSlice, index, true)); +#endif // USE_32BIT_REF + } else { + LINKER_DLOG(mpllinker) << "failed, not found MUID=" << muid.ToStr() << maple::endl; + } + return 0; +} + +// Look up the data with MUID, in the data define table of 'handle'. +// Also see LookUpSymbol() +LinkerVoidType LinkerInvoker::LookUpDataSymbolAddress(LinkerMFileInfo &mplInfo, const MUID &muid, size_t &index) { + size_t dataDefSize = mplInfo.GetTblSize(kDataDef); + if (dataDefSize == 0) { + LINKER_DLOG(mpllinker) << "failed, the size of dataDef table is zero" << maple::endl; + return 0; + } + AddrSlice dataDefSlice(mplInfo.GetTblBegin(kDataDefOrig), dataDefSize); + if (dataDefSlice.Empty()) { + LINKER_DLOG(mpllinker) << "failed, dataDef table is null" << maple::endl; + return 0; + } + LinkerMuidTableItem *pMuidTable = mplInfo.GetTblBegin(kDataDefMuid); + if (pMuidTable == nullptr) { + LINKER_DLOG(mpllinker) << "failed, pMuidTable is null in " << mplInfo.name << maple::endl; + return 0; + } + + const int64_t start = 0; + int64_t end = static_cast(dataDefSize) - 1; + int64_t pos = Get()->BinarySearch(*pMuidTable, start, end, muid); + if (pos != -1) { + index = static_cast(pos); +#ifdef LINKER_32BIT_REF_FOR_DEF_UNDEF + return AddrToUint32(GetDefTableAddress(mplInfo, dataDefSlice, index, false)); +#else + return reinterpret_cast(GetDefTableAddress(mplInfo, dataDefSlice, index, false)); +#endif // USE_32BIT_REF + } else { + LINKER_DLOG(mpllinker) << "failed, not found MUID=" << muid.ToStr() << maple::endl; + return 0; + } +} + +void LinkerInvoker::ResolveVTableSymbolByClass(LinkerMFileInfo &mplInfo, const MClass *klass, bool forVtab) { + LinkerVTableItem *pVTable = forVtab ? reinterpret_cast( + klass->GetVtab()) : reinterpret_cast(klass->GetItab()); + if (pVTable == nullptr) { + return; + } + bool hasNotResolved = false; + uint32_t i = 0; + bool endFlag = false; + while (!endFlag) { + size_t vtabIndexOrg = pVTable[i].index; + LinkerRef vRef(vtabIndexOrg & (~kGoldSymbolTableEndFlag)); + endFlag = static_cast(vtabIndexOrg & kGoldSymbolTableEndFlag); + if (vRef.IsVTabIndex()) { // Index of undefine table. + hasNotResolved = ResolveSymbolByClass(mplInfo, pVTable[i], + vRef.GetTabIndex(), vRef.GetRawValue(), vRef.IsTabUndef()); + } else if (pVTable[i].index & kNegativeNum) { // Offset of address. + DataRefOffset *klassIndex = reinterpret_cast(&pVTable[i].index); +#ifdef USE_32BIT_REF + // To allow re-parse Vtable by the proper way. + if (!(static_cast(pVTable[i].index) < kDsoLoadedAddressEnd && + static_cast(pVTable[i].index) >= kDsoLoadedAddressStart)) { + pVTable[i].index = klassIndex->GetDataRef(); + } else { + LINKER_VLOG(mpllinker) << "(" << forVtab << "), not re-parse for patch, " << std::hex << "addr=" << + pVTable[i].index << ">X>" << klassIndex->GetDataRef() << " in " << mplInfo.name << maple::endl; + } +#else + pVTable[i].index = klassIndex->GetDataRef(); +#endif + } else if (!forVtab) { + MByteRef32 *ref = reinterpret_cast(&(pVTable[i].index)); + if (ref->IsOffset()) { + std::abort(); + } + } + ++i; + } + if (!forVtab && i == kItabHashSize + 1) { + ResolveITableSymbolByClass(mplInfo, klass); + } + if (hasNotResolved) { + char *className = klass->GetName(); + LINKER_LOG(ERROR) << "failed, " << "mplInfo=" << mplInfo.name << ", class=" << className << maple::endl; + return; + } +} + +void LinkerInvoker::ResolveITableSymbolByClass(LinkerMFileInfo &mplInfo, const MClass *klass) { +#ifdef USE_32BIT_REF + int32_t *address = reinterpret_cast(klass->GetItab()) + kItabHashSize; + LinkerITableItem *pITable = reinterpret_cast(static_cast(*address) & 0xffffffff); + size_t noConflictItabSize = (pITable != nullptr) ? pITable[0].index & 0xffff : 0; +#else + LinkerITableItem *pITable = reinterpret_cast(*reinterpret_cast( + reinterpret_cast(klass->GetItab()) + kItabHashSize)); + size_t noConflictItabSize = (pITable != nullptr) ? pITable[0].index & 0xffffffff : 0; +#endif + if (pITable == nullptr) { + return; + } + size_t conflictItabStart = noConflictItabSize * 2 + 2; // 2 is to count conflictItabStart. + size_t i = 2; + bool endFlag = false; + while (!endFlag) { + if (i % 2 == 1) { // i % 2 == 1 means the number is odd. + size_t itabIndexOrg = pITable[i].index; + LinkerRef ref(itabIndexOrg & (~kGoldSymbolTableEndFlag)); + endFlag = ((itabIndexOrg & kGoldSymbolTableEndFlag) != 0); + if (ref.IsITabIndex()) { // Index of undefine table. + (void)ResolveSymbolByClass(mplInfo, pITable[i], + ref.GetTabIndex(), ref.GetRawValue(), ref.IsTabUndef()); + } else if (ref.GetRawValue() != 1) { + LOG(FATAL) << "ResolveITableSymbolByClass failed, pITableIndex isn't 1" << maple::endl; + } + } else if (conflictItabStart <= i) { +#ifdef USE_32BIT_REF + MByteRef32 *ref = reinterpret_cast(&(pITable[i].index)); +#else + MByteRef *ref = reinterpret_cast(&(pITable[i].index)); +#endif + void *addr = ref->GetRef(); + ref->SetRef(addr); + } + ++i; + } +} + +void LinkerInvoker::ResolveSuperClassSymbolByClass(LinkerMFileInfo &mplInfo, const MClass *klass) { + bool hasNotResolved = false; + uint32_t superSize = klass->GetNumOfSuperClasses(); + if (superSize == 0) { + return; + } + LinkerSuperClassTableItem *pSuperTable = reinterpret_cast(klass->GetSuperClassArrayPtr()); + if (pSuperTable == nullptr) { + return; + } + + for (uint32_t i = 0; i < superSize; ++i) { + LinkerRef ref(pSuperTable[i].index); + if (ref.IsIndex()) { + size_t index = ref.GetIndex(); + bool fromUndef = ref.IsFromUndef(); + hasNotResolved = ResolveSymbolByClass( + mplInfo, pSuperTable[i], index, 0, fromUndef); + } + } + if (hasNotResolved) { + char *className = klass->GetName(); + LINKER_LOG(ERROR) << "failed, " << "mplInfo=" << mplInfo.name << ", class=" << className << maple::endl; + return; + } +} + +LinkerMFileInfo *LinkerInvoker::GetLinkerMFileInfo(MFileInfoSource source, const void *key, bool isLazyBinding) { + if (source == kFromPC) { + return SearchAddress(key, kTypeText, isLazyBinding); + } else if (source == kFromAddr) { + return SearchAddress(key, kTypeWhole, isLazyBinding); + } else if (source == kFromMeta) { + return SearchAddress(key, kTypeClass, isLazyBinding); + } else if (source == kFromHandle) { + LinkerMFileInfo *mplInfo = nullptr; + (void)mplInfoHandleMap.Find(key, mplInfo); + return mplInfo; + } else if (source == kFromName) { + const std::string name = reinterpret_cast(key); + LinkerMFileInfo *mplInfo = nullptr; + (void)mplInfoNameMap.Find(name, mplInfo); + return mplInfo; + } else if (source == kFromClass) { + return SearchAddress(key, kTypeClass); + } + return nullptr; +} + +std::string LinkerInvoker::GetMFileName(MFileInfoSource source, const void *key, bool isLazyBinding) { + LinkerMFileInfo *mplInfo = GetLinkerMFileInfo(source, key, isLazyBinding); + return (mplInfo != nullptr) ? mplInfo->name : ""; +} + +bool LinkerInvoker::UpdateMethodSymbolAddressDef(const MClass *klass, const MUID &symbolId, const uintptr_t newAddr) { + // update symbol table (defining library only) + // get the defining library + LinkerMFileInfo *mplInfo = GetLinkerMFileInfo(kFromMeta, klass); + if (mplInfo == nullptr) { // it should be a dynamic-created class + LINKER_LOG(ERROR) << "No defining library of " << klass->GetName() << maple::endl; + return false; + } + LinkerMuidIndexTableItem *pIndexTable = mplInfo->GetTblBegin(kMethodMuidIndex); + size_t size = mplInfo->GetTblSize(kMethodDef); + LinkerAddrTableItem *pTable = mplInfo->GetTblBegin(kMethodDef); + LinkerMuidTableItem *pMuidTable = mplInfo->GetTblBegin(kMethodDefMuid); + // search by unique id + const int64_t start = 0; + int64_t end = static_cast(size) - 1; + int64_t pos = Get()->BinarySearch( + *pMuidTable, *pIndexTable, start, end, symbolId); + if (pos == -1) { // no idea about it + LINKER_LOG(ERROR) << "No symbol " << klass->GetName() << " in " << mplInfo->name << maple::endl; + return false; + } + LINKER_VLOG(mpllinker) << "Update " << mplInfo->name << ":DefTab#" << pos << ":" << pTable[pos].addr << maple::endl; + pTable[pos].addr = static_cast(newAddr); + return true; +} + +bool LinkerInvoker::UpdateMethodSymbolAddressUndef(LinkerMFileInfo &mplInfo, const UpdateNode &node) { + size_t methodUndefSize = mplInfo.GetTblSize(kMethodUndef); + AddrSlice methodUndefSlice(mplInfo.GetTblBegin(kMethodUndef), methodUndefSize); + LinkerMuidTableItem *pMuidTable = mplInfo.GetTblBegin(kMethodUndefMuid); + if (methodUndefSize != 0 && !methodUndefSlice.Empty() && pMuidTable != nullptr) { + const int64_t start = 0; + int64_t end = static_cast(methodUndefSize) - 1; + int64_t pos = Get()->BinarySearch(*pMuidTable, start, end, node.symbolId); + if (pos != -1 && (!mplInfo.IsFlag(kIsLazy) || + GetAddrBindingState(methodUndefSlice, static_cast(pos)) == kBindingStateResolved)) { + LINKER_VLOG(mpllinker) << "Update " << mplInfo.name << ":UndefTab#" << pos << ":" << + methodUndefSlice[static_cast(pos)].addr << maple::endl; + methodUndefSlice[static_cast(pos)].addr = static_cast(node.newAddr); + } + } + return true; +} + +bool LinkerInvoker::UpdateMethodSymbolAddressDecouple(LinkerMFileInfo &mplInfo, const UpdateNode &node) { + { // update vtable + size_t size = mplInfo.GetTblSize(kVTable); + LinkerVTableItem *pTable = mplInfo.GetTblBegin(kVTable); + if (size != 0 && pTable != nullptr) { + for (size_t i = 0; i < size; ++i) { + if (static_cast(pTable[i].index) == node.oldAddr) { // compare by address + LINKER_VLOG(mpllinker) << "Update " << mplInfo.name << + ":Vtable#" << i << ":" << node.oldAddr << maple::endl; + pTable[i].index = reinterpret_cast(node.newAddr); + } + } + } + } + + if (node.klass->IsInterface()) { + // interface has no itable + return true; + } + + { // update itable + size_t size = mplInfo.GetTblSize(kITable); + LinkerITableItem *pTable = mplInfo.GetTblBegin(kITable); + if (size != 0 && pTable != nullptr) { + for (size_t i = 0; i < size; ++i) { + if (static_cast(pTable[i].index) == node.oldAddr) { // compare by address + LINKER_VLOG(mpllinker) << "Update " << mplInfo.name << + ":Itable#" << i << ":" << node.oldAddr << maple::endl; + pTable[i].index = reinterpret_cast(node.newAddr); + } + } + } + } + return true; // next library +} + +// Get class loader hierarchy list +void LinkerInvoker::GetClassLoaderList(const LinkerMFileInfo &mplInfo, ClassLoaderListT &clList, bool isNewList) { + if (!isNewList || !clList.empty()) { + return; + } + MObject *parent = reinterpret_cast(mplInfo.classLoader); + MObject *system = reinterpret_cast(pLoader->GetSystemClassLoader()); + // first insert system class loader will cause last search + if (parent != system && system != nullptr && mplInfo.name != Get()->GetPatchPath()) { + LINKER_VLOG(mpllinker) << "push SystemClassLoader(" << system << ")" << maple::endl; + clList.push_back(reinterpret_cast(system)); + } + LINKER_VLOG(mpllinker) << ">>> building hierarchy for " << mplInfo.name << maple::endl; + for (;;) { + // Skip if it's boot class loader instance. + if (parent == nullptr || !IsBootClassLoader(parent)) { + LINKER_VLOG(mpllinker) << "push " << parent << maple::endl; + clList.push_back(reinterpret_cast(parent)); + } else { + LINKER_VLOG(mpllinker) << "skip BootClassLoader(" << parent << ")" << maple::endl; + } + if (parent == nullptr) { // 'parent is null' means it reaches the boot class loader. + LINKER_VLOG(mpllinker) << "<<< building hierarchy for " << mplInfo.name << maple::endl; + break; + } + parent = reinterpret_cast(pLoader->GetCLParent(reinterpret_cast(parent))); + } +} + +void LinkerInvoker::ResetClassLoaderList(const MObject *classLoader) { + auto handle = [this](LinkerMFileInfo &mplInfo) { + this->ResetMplInfoClList(mplInfo); + }; + (void)mplInfoListCLMap.ForEach(classLoader, handle); +} + +void LinkerInvoker::GetLinkerMFileInfos(LinkerMFileInfo &mplInfo, LinkerMFileInfoListT &fileList, bool isNewList) { + // Lazy building class loader hierarchy. + GetClassLoaderList(mplInfo, mplInfo.clList, isNewList); + ClassLoaderListT &classLoaderList = mplInfo.clList; + if (classLoaderList.empty()) { + LINKER_LOG(ERROR) << "clList null for " << mplInfo.name << maple::endl; + return; + } + + // Iteration from parent to child. + for (ClassLoaderListRevItT it = classLoaderList.rbegin(); it != classLoaderList.rend(); ++it) { + mplInfoListCLMap.FindToExport(reinterpret_cast(*it), fileList); + } +} + +void LinkerInvoker::ClearLinkerMFileInfo() { + auto handle = [this](LinkerMFileInfo &item) { + (void)this; +#ifdef LINKER_RT_CACHE + this->Get()->FreeAllTables(item); +#endif // LINKER_RT_CACHE + delete (&item); + }; + mplInfoList.ForEach(handle); + mplInfoList.Clear(); + mplInfoNameMap.Clear(); + mplInfoHandleMap.Clear(); + mplInfoElfAddrSet.Clear(); + mplInfoElfAddrLazyBindingSet.Clear(); + mplInfoListCLMap.Clear(); + ClearAppInfo(); +} +} // namespace maplert diff --git a/src/mrt/compiler-rt/src/linker/linker_utils.cpp b/src/mrt/compiler-rt/src/linker/linker_utils.cpp new file mode 100644 index 0000000000..709affa7ae --- /dev/null +++ b/src/mrt/compiler-rt/src/linker/linker_utils.cpp @@ -0,0 +1,375 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "linker/linker_utils.h" + +#include +#include +#include +#include + +#include "mrt_object.h" +#include "mclass_inline.h" +#include "modifier.h" +#include "mrt_object.h" + +namespace maplert { +namespace linkerutils { +static int64_t kGlobalVersionCode = -1; +static LoadStateType kGlobalLoadState = kLoadStateNone; +static AppLoadState kGlobalAppLoadState = kAppLoadNone; +static std::string kGlobalAppDataDir; +static std::string kGlobalAppBaseStr; +// Release the (physical) memory to kernel. Note: only for RO sections +void ReleaseMemory(void *start, void *end) { + constexpr int maxPageSize = 15; + uintptr_t startAddr = reinterpret_cast(start); + uintptr_t endAddr = reinterpret_cast(end); + if (startAddr == 0 || endAddr == 0 || startAddr >= endAddr) { + return; + } + + // round up start to page size: assuming 4K page size, 4K=2^12 + startAddr = (startAddr + (1 << 12) - 1) & (~(static_cast((1 << 12) - 1))); + if (endAddr < (startAddr + (1 << maxPageSize))) { + return; // the region is not big enough, just skip + } + // calculate section size, and round it to 4K page size, 4K=2^12 + size_t length = (endAddr - startAddr) & (~(static_cast((1 << 12) - 1))); + + // doing madvise, ignore its return status, it should be safe for text/ro sections. + (void)(madvise(reinterpret_cast(startAddr), length, MADV_DONTNEED)); +} + +// For un|define tables' address. +void *GetSymbolAddr(void *handle, const char *symbol, bool isFunction) { + if (handle == nullptr) { + LINKER_VLOG(mpllinker) << "failed, handle is null" << maple::endl; + return nullptr; + } + + PtrFuncType func = reinterpret_cast(dlsym(handle, symbol)); + if (func == nullptr) { + LINKER_VLOG(mpllinker) << "failed, function " << symbol << " is null" << maple::endl; + return nullptr; + } + return isFunction ? func() : reinterpret_cast(func); +} + +// Get maxmium version from range table. +int32_t GetMaxVersion() { + constexpr int maxSize = 32; + if (kGlobalLoadState == kLoadStateApk) { + return static_cast(static_cast(kGlobalVersionCode) >> maxSize); + } + return 0; +} + +// Get minmium version from range table. +int32_t GetMinVersion() { + if (kGlobalLoadState == kLoadStateApk) { + return static_cast(kGlobalVersionCode); + } + return 0; +} + +void GetMplVersion(const LinkerMFileInfo &mplInfo, MapleVersionT &version) { + void *begin = static_cast(GetSymbolAddr(mplInfo.handle, kMapleVersionBegin, true)); + void *end = static_cast(GetSymbolAddr(mplInfo.handle, kMapleVersionEnd, true)); + if (begin == end) { + LINKER_VLOG(mpllinker) << mplInfo.name << " has no version number!\n" << maple::endl; + return; + } + MapleVersionT *item = static_cast(begin); + version.mplMajorVersion = item->mplMajorVersion; + version.compilerMinorVersion = item->compilerMinorVersion; +} + +void GetMplCompilerStatus(const LinkerMFileInfo &mplInfo, uint32_t &status) { + void *begin = static_cast(GetSymbolAddr(mplInfo.handle, kMapleCompileStatusBegin, true)); + void *end = static_cast(GetSymbolAddr(mplInfo.handle, kMapleCompileStatusEnd, true)); + if (begin == end) { + status = 0; + return; + } + status = *(static_cast(begin)); +} + +std::string GetAppPackageName() { + size_t pos = kGlobalAppDataDir.find_last_of('/'); + std::string packageName = kGlobalAppDataDir.substr(pos + 1); + return packageName; +} + +const std::string &GetAppInfo() { + return kGlobalAppDataDir; +} +void ClearAppInfo() { + kGlobalAppDataDir.clear(); +} +const LoadStateType &GetLoadState() { + return kGlobalLoadState; +} +void SetLoadState(const LoadStateType state) { + kGlobalLoadState = state; +} +const AppLoadState &GetAppLoadState() { + return kGlobalAppLoadState; +} +void SetAppLoadState(const AppLoadState state) { + kGlobalAppLoadState = state; +} +const std::string &GetAppBaseStr() { + return kGlobalAppBaseStr; +} +void SetAppBaseStr(const std::string &str) { + kGlobalAppBaseStr = str; +} +// Set running app's state, if data path and version code are valid. +void SetAppInfo(const char *dataPath, int64_t appVersionCode) { + if (kGlobalLoadState == kLoadStateApk) { + LINKER_LOG(ERROR) << "(" << dataPath << ", " << appVersionCode << "), failed, already set before? appDataDir=" << + kGlobalAppDataDir << ", versionCode=" << kGlobalVersionCode << maple::endl; + return; + } + + kGlobalAppDataDir = std::string(dataPath); + kGlobalVersionCode = appVersionCode; + if (!kGlobalAppDataDir.empty() && kGlobalVersionCode > 0) { + kGlobalLoadState = kLoadStateApk; + kGlobalAppLoadState = kAppLoadBaseOnlyReady; + } else { + LINKER_LOG(ERROR) << "(" << dataPath << ", " << appVersionCode << "), failed, appDataDir=" << kGlobalAppDataDir << + ", versionCode=" << kGlobalVersionCode << maple::endl; + kGlobalLoadState = kLoadStateNone; + } +} + +bool NeedRelocateSymbol(const std::string &name) { + // 900,4088... is VersionCode. + static std::map apps = { + { "com.ss.android.article.news", 900 }, + { "com.sina.weibolite", 4088 }, + { "com.xunmeng.pinduoduo", 46600 }, + { "com.tencent.kg.android.lite", 282 }, + { "com.Qunar", 198 }, + { "com.wuba", 82400 } + }; + for (auto &app : apps) { + if (kGlobalVersionCode == app.second) { + if (name.find(app.first) != std::string::npos) { + return false; + } + } + } + return true; +} + +void *GetDefTableAddress(const LinkerMFileInfo &mplInfo, const AddrSlice &defSlice, size_t index, bool forMethod) { + if (mplInfo.IsFlag(kIsLazy) || (!mplInfo.IsFlag(kIsRelDataOnce) && !forMethod) || + (!mplInfo.IsFlag(kIsRelMethodOnce) && forMethod)) { + return defSlice[index].AddressFromOffset(); + } else { + return defSlice[index].Address(); + } +} + +const char *GetMuidDefFuncTableFuncname(const LinkerInfTableItem &pInfTable) { + const DataRefOffset32 *data = reinterpret_cast(&pInfTable.funcNameOffset); + MethodMetaBase *methodInfo = data->GetDataRef(); + return methodInfo->GetName(); +} + +std::string GetMuidDefFuncTableSigname(const LinkerInfTableItem &pInfTable) { + const DataRefOffset32 *data = reinterpret_cast(&pInfTable.funcNameOffset); + MethodMetaBase *methodInfo = data->GetDataRef(); + std::string signature; + methodInfo->GetSignature(signature); + return signature; +} + +const char *GetMuidDefFuncTableClassname(const LinkerInfTableItem &pInfTable) { + const DataRefOffset32 *data = reinterpret_cast(&pInfTable.funcNameOffset); + MethodMetaBase *methodInfo = data->GetDataRef(); + MClass *klass = methodInfo->GetDeclaringClass(); + if (klass == nullptr) { + LINKER_LOG(FATAL) << "klass is null" << maple::endl; + } + return klass->GetName(); +} + +// Get method symbol by the offsets of class name, method name and signature. +std::string GetMethodSymbolByOffset(const LinkerInfTableItem &infTable) { + std::string sym; + const char *name = GetMuidDefFuncTableClassname(infTable); + if (name != nullptr) { + sym += name; + sym += '|'; + } else { + LINKER_LOG(WARNING) << "class name is null!?" << maple::endl; + } + + name = GetMuidDefFuncTableFuncname(infTable); + if (name != nullptr) { + sym += name; + sym += '|'; + } else { + LINKER_LOG(WARNING) << "function name is null!?" << maple::endl; + } + + std::string signature = GetMuidDefFuncTableSigname(infTable); + if (!signature.empty()) { + sym += signature; + } else { + LINKER_LOG(WARNING) << "signature is null!?" << maple::endl; + } + return sym; +} + +#ifdef LINKER_RT_CACHE +constexpr uint8_t kLinkerCacheTypeStrSize = 11; // 11 is LinkerCacheType::kLinkerCacheLazy + 1 +constexpr char kLinkerLazyCacheSuffix[] = ".lzy"; +constexpr char kLinkerUndefCacheSuffixFcn[] = ".fcn"; +constexpr char kLinkerDefCacheSuffixFcn[] = ".def.fcn"; +constexpr char kLinkerUndefCacheSuffixVar[] = ".var"; +constexpr char kLinkerDefCacheSuffixVar[] = ".def.var"; +constexpr char kLinkerCacheSuffixVTable[] = ".vtb"; +constexpr char kLinkerCacheSuffixCTable[] = ".ctb"; +constexpr char kLinkerCacheSuffixITable[] = ".itb"; +constexpr char kLinkerCacheSuffixFieldOffset[] = ".fos"; +constexpr char kLinkerCacheSuffixFieldTable[] = ".ftb"; +constexpr char kLinkerCacheSuffixStaAddrTable[] = ".sta"; +const std::string LinkerCacheTypeStr[kLinkerCacheTypeStrSize] = { + kLinkerUndefCacheSuffixVar, + kLinkerDefCacheSuffixVar, + kLinkerUndefCacheSuffixFcn, + kLinkerDefCacheSuffixFcn, + kLinkerCacheSuffixVTable, + kLinkerCacheSuffixCTable, + kLinkerCacheSuffixFieldOffset, + kLinkerCacheSuffixFieldTable, + kLinkerCacheSuffixITable, + kLinkerCacheSuffixStaAddrTable, + kLinkerLazyCacheSuffix +}; + +// get LinkerCacheTypeStr +const std::string &GetLinkerCacheTypeStr(LinkerCacheType index) { + return LinkerCacheTypeStr[static_cast(index)]; +} + +bool FileExists(const char *name) { + struct stat st; + if (stat(name, &st) == 0) { + return S_ISREG(st.st_mode); + } else { + return false; + } +} + +bool FolderExists(const char *name) { + struct stat st; + if (stat(name, &st) == 0) { + return S_ISDIR(st.st_mode); + } else { + return false; + } +} + +bool PrepareFolder(const std::string &dir) { + size_t first = 0; + size_t second = 0; + struct stat dirStat; + if (stat(dir.c_str(), &dirStat) == 0) { + if (!S_ISDIR(dirStat.st_mode)) { + LINKER_LOG(ERROR) << "exist, but not directory! " << dir << maple::endl; + return false; + } else { + return true; + } + } + + // It's not "No such file or directory"? + if (errno != ENOENT) { + LINKER_LOG(ERROR) << strerror(errno) << maple::endl; + return false; + } + + for(;;) { + // Get the parent directory. + first = second; + if (first == dir.length() - 1) { // Exit if reach the end, or root /. + return true; + } + second = dir.find('/', first + 1); + if (second == std::string::npos) { // If it's not ended by /. + second = dir.length() - 1; + } + + // Check if exist. + std::string substr = dir.substr(0, second + 1); + if (stat(substr.c_str(), &dirStat) == 0) { + if (!S_ISDIR(dirStat.st_mode)) { + LINKER_LOG(ERROR) << "parent exist, but not directory! " << substr << maple::endl; + return false; + } else { + LINKER_DLOG(mpllinker) << "parent exist! " << substr << maple::endl; + continue; + } + } + + // Create the folder. + mode_t mode = S_IRWXU | S_IRWXG | S_IXOTH; + if (mkdir(substr.c_str(), mode) != 0) { + LINKER_LOG(ERROR) << "failed to create dir " << substr << ":" << strerror(errno) << maple::endl; + return false; + } + if (chmod(substr.c_str(), mode) != 0) { + LINKER_LOG(ERROR) << "failed to chmod dir " << substr << ":" << strerror(errno) << maple::endl; + return false; + } + LINKER_DLOG(mpllinker) << "created dir " << substr << " successfully!" << maple::endl; + } +} + +// note: the install cache is specified by installd(see mpl_cache_gen.cpp) +static constexpr char kLinkerDataAppPath[] = "/data/app/"; +static constexpr char kLinkerDalvikDcpCachePath[] = "/data/dalvik-cache/instdcp/"; +bool GetInstallCachePath(LinkerMFileInfo &mplInfo, std::string &path, LinkerCacheType cacheType) { + if (mplInfo.name.find(kLinkerDataAppPath) == 0) { + // for data/app + size_t pos = mplInfo.name.find('/', strlen(kLinkerDataAppPath)); + if (pos != std::string::npos) { + path = mplInfo.name.substr(0, pos + 1); + path += "instdcp/"; + std::string name = mplInfo.name.substr(pos + 1); + std::replace(name.begin(), name.end(), '/', '_'); + path += name; + } else { + LINKER_VLOG(mpllinker) << "failed to get app path for " << mplInfo.name << maple::endl; + return false; + } + } else { + // for system/app + path = kLinkerDalvikDcpCachePath; + std::string name = mplInfo.name; + std::replace(name.begin(), name.end(), '/', '_'); + path += name; + } + path += GetLinkerCacheTypeStr(cacheType); + return true; +} +#endif +} +} diff --git a/src/mrt/compiler-rt/src/loader/file_adapter.cpp b/src/mrt/compiler-rt/src/loader/file_adapter.cpp new file mode 100644 index 0000000000..c2d06e87cc --- /dev/null +++ b/src/mrt/compiler-rt/src/loader/file_adapter.cpp @@ -0,0 +1,202 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "file_adapter.h" +#include "loader_api.h" +#include "loader/loader_utils.h" +#include "utils/string_utils.h" +#include "base/file_utils.h" +#include "exception/stack_unwinder.h" +#include "interp_support.h" + +namespace maplert { +FileAdapter::FileAdapter(const std::string srcPath) + : originalPath(loaderutils::GetNicePath(srcPath)), + convertPath(loaderutils::Dex2MplPath(originalPath)), + isThirdApp(stringutils::WithPrefix(originalPath.c_str(), "/data/app/")), + isPartialAot(stringutils::WithSuffix(convertPath.c_str(), "/maple/arm64/maplepclasses.so")) {} + +FileAdapter::FileAdapter(const std::string srcPath, const IAdapterEx adapter) : FileAdapter(srcPath) { + pAdapterEx = adapter; +} + +ObjFile *FileAdapter::OpenObjectFile(jobject classLoader, bool isFallBack, const std::string designPath) { + // try to open MFile first + if (!isFallBack) { + return pAdapterEx.As()->OpenMplFile(classLoader, designPath.empty() ? convertPath : designPath); + } else { + if (pAdapterEx.As()->IsSystemServer()) { + return nullptr; // don't turn-on interpreter for system-server + } + // then try to open the DFile + return pAdapterEx.As()->OpenDexFile(classLoader, designPath.empty() ? originalPath : designPath); + } +} + +bool FileAdapter::CloseObjectFile(ObjFile &objFile) { + return objFile.Close(); +} +const ObjFile *FileAdapter::Get(const std::string &path) { + std::lock_guard lock(mMplLibLock); + return GetLocked(path); +} + +const ObjFile *FileAdapter::GetLocked(const std::string &path) { + auto it = mMplLibs.find(path); + return (it == mMplLibs.end()) ? nullptr : it->second; +} + +void FileAdapter::Put(const std::string &path, const ObjFile &objFile) { + std::lock_guard lock(mMplLibLock); + mMplLibs[path] = &objFile; + if ((objFile.GetFileType() == FileType::kDFile) && (!IsPartialAot())) { + (void)mMplSeqList.insert(mMplSeqList.begin(), &objFile); + } else { + mMplSeqList.push_back(&objFile); + } +} + +bool FileAdapter::Register(const IEnv env, jclass javaClass, const std::string &jniClassName, + const INativeMethod methods, int32_t methodCount, bool fake) { + std::lock_guard lock(mMplLibLock); + + for (const ObjFile *mf : mMplSeqList) { + ObjFile *objFile = const_cast(mf); + if (mf != nullptr && objFile->CanRegisterNativeMethods(jniClassName) && (objFile->RegisterNativeMethods(env, + javaClass, jniClassName, methods, methodCount, fake) == true)) { + return true; + } + } + return false; +} + +void FileAdapter::GetObjFiles(std::vector &bootClassPath) { + std::lock_guard lock(mMplLibLock); + (void)std::copy(mMplSeqList.begin(), mMplSeqList.end(), std::back_inserter(bootClassPath)); +} + +void FileAdapter::GetObjLoaders(std::set &classLoaders) { + std::lock_guard lock(mMplLibLock); + std::vector::iterator it = mMplSeqList.begin(); + for (; it != mMplSeqList.end(); ++it) { + jobject classLoader = (*it)->GetClassLoader(); + (void)classLoaders.insert(classLoader); + } +} + +void FileAdapter::GetObjFileList(std::vector &pathList, bool isFallBack) { + if (!isFallBack) { + if (isThirdApp) { + hasStartUp = GetObjFileListInternal(pathList); + } else { + if (stringutils::WithSuffix(convertPath.c_str(), ".so")) { + pathList.push_back(convertPath); + } + hasStartUp = false; + } + } else { + hasStartUp = false; + pAdapterEx.As()->GetListOfDexFileToLoad(originalPath, pathList); + } + hasSiblings = pathList.size() > 1; +} + +bool FileAdapter::GetObjFileListInternal(std::vector &pathList) { + if (!stringutils::WithSuffix(convertPath.c_str(), ".so")) { + CL_LOG(ERROR) << "base ObjFile name not end with .so: " << convertPath << maple::endl; + return false; + } + + // Base ObjFile located in zip, add to path directly, and return + // Tips: Not support multi-so in zip for performance concern + if (convertPath.rfind("!/") != std::string::npos) { + pathList.push_back(convertPath); + return false; + } + + size_t posPostfix = convertPath.rfind(".so"); + + // Base MplFiles, whose name is mapleclasses.so or maplepclasses.so + if (maple::FileUtils::FileExists(convertPath)) { + pathList.push_back(convertPath); + } else { + CL_LOG(ERROR) << "cannot find base ObjFile: " << convertPath << maple::endl; + return false; + } + + // Sibling MplFiles, whose name starts from 2, like mapleclasses2.so, mapleclass3.so, ... and mapleclass999.so + for (int i = 2;; ++i) { + std::string incFileName(convertPath); + (void)incFileName.insert(posPostfix, std::to_string(i)); + if (maple::FileUtils::FileExists(incFileName)) { + pathList.push_back(incFileName); + } else { + CL_VLOG(classloader) << "cannot find siblings ObjFile: " << incFileName << maple::endl; + break; + } + } + + // Stub ObjFile, whose name index is 999 + std::string stubFileName(convertPath); + std::string stubIndex("999"); + (void)stubFileName.insert(posPostfix, stubIndex); + if (maple::FileUtils::FileExists(stubFileName)) { + pathList.push_back(stubFileName); + } else { + CL_VLOG(classloader) << "cannot find stub-siblings ObjFile: " << stubFileName << maple::endl; + } + + // Starup.so ObjFile. + std::string fileName = "mapleclasses.so"; + size_t posFileName = convertPath.rfind(fileName); + if (posFileName == std::string::npos) { + // Must be "maplepclasses.so" + return false; + } + std::string startupFileName("maplestartup.so"); + std::string startupPathName(convertPath); + (void)startupPathName.replace(posFileName, fileName.length(), startupFileName); + if (maple::FileUtils::FileExists(startupPathName)) { + (void)pathList.insert(pathList.begin(), startupPathName); + return true; + } else { + CL_VLOG(classloader) << "cannot find startup ObjFile: " << startupPathName << maple::endl; + return false; + } +} +void FileAdapter::DumpUnregisterNativeFunc(std::ostream &os) { + std::lock_guard lock(mMplLibLock); + + for (auto mf : mMplSeqList) { + ObjFile *objFile = const_cast(mf); + objFile->DumpUnregisterNativeFunc(os); + } +} +void FileAdapter::DumpMethodName() { + // We needn't care about the performance of unwinding here. + std::vector uwContextStack; + // Unwind as many as possible till reaching the end. + MapleStack::FastRecordCurrentJavaStack(uwContextStack, MAPLE_STACK_UNWIND_STEP_MAX); + + for (auto &uwContext : uwContextStack) { + std::string methodName; + if (!uwContext.IsInterpretedContext()) { + uwContext.frame.GetJavaMethodFullName(methodName); + } else { + UnwindContextInterpEx::GetJavaMethodFullNameFromUnwindContext(uwContext, methodName); + } + CL_LOG(WARNING) << "LoadClassFromMplFile(), " << methodName << maple::endl; + } +} +} diff --git a/src/mrt/compiler-rt/src/loader/hash_pool.cpp b/src/mrt/compiler-rt/src/loader/hash_pool.cpp new file mode 100644 index 0000000000..039b0411f7 --- /dev/null +++ b/src/mrt/compiler-rt/src/loader/hash_pool.cpp @@ -0,0 +1,297 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "loader/hash_pool.h" +#include "cinterface.h" +#include "utils/time_utils.h" + +using namespace std; +namespace maplert { +// special case 0, handle next_prime(i) for i in [1, 210) +const unsigned kSmallPrimes[] = { + 0, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, + 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211 +}; + +// potential primes = 210*k + kIndices[i], k >= 1 these numbers are not divisible +// by 2, 3, 5 or 7 (or any integer 2 <= j <= 10 for that matter). +const unsigned kIndices[] = { + 1, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, + 121, 127, 131, 137, 139, 143, 149, 151, 157, 163, 167, 169, 173, 179, 181, 187, 191, 193, 197, 199, 209 +}; + +const unsigned kFactor[] = { + 10, 2, 4, 2, 4, 6, 2, 6, 4, 2, 4, 6, 6, 2, 6, 4, 2, 6, 4, 6, 8, 4, 2, 4, + 2, 4, 8, 6, 4, 6, 2, 4, 6, 2, 6, 6, 4, 2, 4, 6, 2, 6, 4, 2, 4, 2, 10 +}; +const size_t kMaxIndices = 210; +const size_t kSmallPrimesNum = sizeof(kSmallPrimes) / sizeof(kSmallPrimes[0]); +const size_t kFactNum = sizeof(kFactor) / sizeof(kFactor[0]); +const size_t kIndicesNum = sizeof(kIndices) / sizeof(kIndices[0]); +size_t MClassHashPool::FindNextPrime(size_t nr) const { + // Else nr > largest kSmallPrimes + bool flag = false; + // Start searching list of potential primes: L * k0 + kIndices[in] + // Select first potential prime >= nr + // Known a-priori nr >= L + size_t k0 = nr / kMaxIndices; + size_t in = static_cast(std::lower_bound(kIndices, kIndices + kIndicesNum, nr - k0 * kMaxIndices) - kIndices); + nr = kMaxIndices * k0 + kIndices[in]; + for (;;) { + // It is known a-priori that nr is not divisible by 2, 3, 5 or 7, so don't test those (j == 5 -> divide by 11 first) + // And the potential primes start with 211, so don't test against the last small prime. + // Divide nr by all primes or potential primes(i) until: + // 1.The division is even, so try next potential prime. 2.The i > sqrt(nr), in which case nr is prime. + size_t ix = 211; + for (size_t j = 5; j < kSmallPrimesNum - 1; ++j) { + const std::size_t qr = nr / kSmallPrimes[j]; + if (qr < kSmallPrimes[j]) { + return nr; + } + if (nr == qr * kSmallPrimes[j]) { + goto NEXT; + } + } + // nr cann't divisible by small primes, try potential primes + for (;;) { + std::size_t qr = nr / ix; + if (qr < ix) { + return nr; + } + if (nr == qr * ix) { + break; + } + for (size_t factorIndex = 0; factorIndex < kFactNum; ++factorIndex) { + ix += kFactor[factorIndex]; + qr = nr / ix; + if (qr < ix) { + return nr; + } + if (nr == qr * ix) { + flag = true; + break; + } + } + if (flag) { + flag = false; + break; + } + ix += 2; // This will loop i to the next "plane" of potential primes, 2:Even numbers can not be primes. + } +NEXT: + // nr is not prime. Increment nr to next potential prime. + if (++in == kIndicesNum) { + ++k0; + in = 0; + } + nr = kMaxIndices * k0 + kIndices[in]; + } +} + +inline uint32_t MClassHashPool::NextPrime(size_t n) const { + size_t nr = n * kMaxExtended; // to increase the number of nbuckets in advance, which can reduce the conflict rate. + // If nr is small enough, search in kSmallPrimes + if (nr <= kSmallPrimes[kSmallPrimesNum - 1]) { + return *std::lower_bound(kSmallPrimes, kSmallPrimes + kSmallPrimesNum, nr); + } + return static_cast(FindNextPrime(nr)); +} + +MClassHashPool::MClassHashPool() : mBucketCount(0), mClassCount(0), mConflictBucketCount(0), mFillBucketCount(0) {} + +MClassHashPool::~MClassHashPool() { + bucketData.clear(); + bitBuckets.clear(); + bitCounts.clear(); +} + +void MClassHashPool::Create(uint32_t classCount) { + mClassCount = classCount; + if (classCount != 0) { + mBucketCount = NextPrime(classCount); + } + + CL_VLOG(classloader) << "Create(), extended bucket count: " << classCount << "-->" << mBucketCount << maple::endl; + bitBuckets.resize(mBucketCount / kBucketBitNum + 1, 0); + bitCounts.resize(mBucketCount / kBucketBitNum + 1, 0); +} + +void MClassHashPool::Destroy() { + bucketData.clear(); + bitBuckets.clear(); + bitCounts.clear(); +} + +inline uint32_t MClassHashPool::BitCount(uint32_t n) const { + return __builtin_popcountl(n); +} + +inline uint32_t MClassHashPool::BitCount(uint64_t n) const { + return __builtin_popcountll(n); +} + +void MClassHashPool::InitClass(uint32_t hashIndex) { + uint32_t bucketPos = hashIndex % mBucketCount; + uint32_t index = bucketPos / kBucketBitNum; + uint32_t offset = bucketPos % kBucketBitNum; + BucketType bucketVal = bitBuckets[index]; + BucketType targetBit = 1ul << (kBucketBitNum - offset - 1); + if ((bucketVal & targetBit) == 0) { // no conflict + bitBuckets[index] = bucketVal | targetBit; + ++mFillBucketCount; + } else { + ++mConflictBucketCount; + } +} +void MClassHashPool::Collect() { + bucketData.resize(mFillBucketCount, 0); + uint32_t total = 0; + for (uint32_t i = 0; i < bitCounts.size(); ++i) { + bitCounts[i] = total; + total += BitCount(bitBuckets[i]); + } +} + +void MClassHashPool::Set(uint32_t hashIndex, ClassPtr targetKlass) { + uint32_t bucketPos = hashIndex % mBucketCount; + uint32_t index = bucketPos / kBucketBitNum; + uint32_t offset = bucketPos % kBucketBitNum; + BucketType targetBit = 1ul << (kBucketBitNum - offset - 1); + if ((bitBuckets[index] & targetBit) == 0) { // Not in pool + CL_LOG(FATAL) << "Fatal Error, Set HashPool Empty Bucket, this=" << this << ", hashIndex=" << hashIndex << + ", mBucketCount=" << mBucketCount << ", bitBuckets[index]" << bitBuckets[index] << maple::endl; + return; + } + BucketType shift = (offset == 0) ? 0 : bitBuckets[index] >> (kBucketBitNum - offset); + uint32_t dataPos = bitCounts[index] + BitCount(shift); + if (dataPos >= mFillBucketCount) { + CL_LOG(FATAL) << "Fatal Error, Set HashPool Index overflow, this=" << this << ", index=" << index << ", offset=" << + offset << ", dataPos=" << dataPos << ", mFillBucketCount=" << mFillBucketCount << ", bitCounts[index]=" << + bitCounts[index] << ", BitCount(shift)=" << BitCount(shift) << ", bitBuckets[index]=" << bitBuckets[index] << + ", mBucketCount=" << mBucketCount << ", bitBuckets.size()=" << bitBuckets.size() << ", hashIndex=" << + hashIndex << maple::endl; + return; + } + ClassPtr data = bucketData[dataPos]; + if (data == 0) { // no conflict + bucketData[dataPos] = targetKlass; + } else { // conflict + if ((data & kIsConflict) == 0) { // first conflict + ConflictClass *obj = reinterpret_cast(maplert::MRT_AllocFromMeta(sizeof(ConflictClass), + kClassMetaData)); + obj->conflictData.push_back(data); + obj->conflictData.push_back(targetKlass); + data = static_cast(reinterpret_cast(obj)); + data |= kIsConflict; + bucketData[dataPos] = data; + } else { // no first conflict + ConflictClass *obj = reinterpret_cast(data & ~kMClassAddressMask); + obj->conflictData.push_back(targetKlass); + } + } +} + +MClass *MClassHashPool::Get(const std::string &name) const { + auto compare = [&](const ClassPtr data, const std::string &name)->bool { + if ((data & kIsDexClassOffset) != 0) { + return false; + } + MClass *klass = reinterpret_cast(data & ~kMClassAddressMask); + if (name == klass->GetName()) { + return true; + } + return false; + }; + return reinterpret_cast(Get(name, compare)); +} + +ClassPtr MClassHashPool::Get(const std::string &name, const NameCompare compare) const { + if (mClassCount == 0) { + return 0; + } + uint32_t hashIndex = GetHashIndex32(name); + uint32_t bucketPos = hashIndex % mBucketCount; + uint32_t index = bucketPos / kBucketBitNum; + uint32_t offset = bucketPos % kBucketBitNum; + BucketType targetBit = 1ul << (kBucketBitNum - offset - 1); + if ((bitBuckets[index] & targetBit) == 0) { // Not in pool + return 0; + } + BucketType shift = (offset == 0) ? 0 : bitBuckets[index] >> (kBucketBitNum - offset); + uint32_t dataPos = bitCounts[index] + BitCount(shift); + if (dataPos >= mFillBucketCount) { + CL_LOG(FATAL) << "Fatal Error, Get HashPool Index overflow, this=" << this << ", index=" << index << ", offset=" << + offset << ", dataPos=" << dataPos << ", mFillBucketCount=" << mFillBucketCount << ", bitCounts[index]=" << + bitCounts[index] << ", BitCount(shift)=" << BitCount(shift) << ", bitBuckets[index]=" << bitBuckets[index] << + ", mBucketCount=" << mBucketCount << ", bitBuckets.size()=" << bitBuckets.size() << ", hashIndex=" << + hashIndex << maple::endl; + return 0; + } + ClassPtr data = bucketData[dataPos]; + if (data == 0) { + return 0; + } + if ((data & kIsConflict) == 0) { // no conflict + if (compare(data, name)) { + return data; + } + } else { // conflict + ConflictClass *obj = reinterpret_cast(data & ~kMClassAddressMask); + for (ClassPtr conflict : obj->conflictData) { + if (compare(conflict, name)) { + return conflict; + } + } + } + return 0; +} + +double MClassHashPool::GetHashConflictRate() const { + return static_cast(mConflictBucketCount) / static_cast(mBucketCount); +} + +double MClassHashPool::GetClassConflictRate() const { + return static_cast(mClassCount - mFillBucketCount) / static_cast(mClassCount); +} + +size_t MClassHashPool::CalcMemoryCost() const { + size_t memory = 0; + memory += sizeof(mBucketCount) + sizeof(mClassCount) + sizeof(mFillBucketCount) + sizeof(mConflictBucketCount) + 1; + memory += bitBuckets.size() * sizeof(BucketType); + memory += bitCounts.size() * sizeof(uint16_t); + memory += bucketData.size() * sizeof(ClassPtr); + for (ClassPtr ptr : bucketData) { + if ((ptr & kIsConflict) != 0) { // conflict calc + ConflictClass *obj = reinterpret_cast(ptr & ~kIsConflict); + memory += sizeof(ConflictClass) + obj->conflictData.size() * sizeof(ClassPtr); + } + } + return memory; +} + +void MClassHashPool::VisitClasses(const maple::rootObjectFunc &func) { + for (ClassPtr ptr : bucketData) { + if ((ptr & kIsConflict) != 0) { // conflict calc + ConflictClass *obj = reinterpret_cast(ptr & ~kIsConflict); + for (ClassPtr conflict : obj->conflictData) { + MClass *classInfo = reinterpret_cast(conflict); + func(reinterpret_cast(classInfo)); + } + } else { // not conflict + MClass *classInfo = reinterpret_cast(ptr); + func(reinterpret_cast(classInfo)); + } + } +} +} // namespace maplert diff --git a/src/mrt/compiler-rt/src/loader/loader_utils.cpp b/src/mrt/compiler-rt/src/loader/loader_utils.cpp new file mode 100644 index 0000000000..bae4768e1c --- /dev/null +++ b/src/mrt/compiler-rt/src/loader/loader_utils.cpp @@ -0,0 +1,203 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "loader/loader_utils.h" + +#include + +#include "utils/string_utils.h" +#include "base/file_utils.h" +#include "loader_api.h" +#include "file_system.h" +#include "linker_api.h" +#include "file_adapter.h" + +using namespace maple; +namespace maplert { +namespace loaderutils { +bool IsZipMagic(uint32_t magic) { + const uint8_t lowZipBit = 8; + const uint8_t lowZipMask = 0xff; + return (((magic & lowZipMask) == 'P') && (((magic >> lowZipBit) & lowZipMask) == 'K')); +} +std::string GetNicePath(const std::string &path) { + // Make canonical path. + std::string file(path); + if (file.find("/./") != std::string::npos || file.find("/../") != std::string::npos) { + if (file.length() > PATH_MAX) { + CL_LOG(ERROR) << "the path name exceeds the length limit! " << file.length() << ", " << file << maple::endl; + return ""; + } + char canonical[PATH_MAX + 1] = { 0 }; + if (realpath(file.c_str(), canonical)) { + file = canonical; + } else { + CL_LOG(ERROR) << "returned null! " << file << maple::endl; + return ""; + } + } + return file; +} +std::string GetJarPath(const std::string &curDir, + const std::string &fileWithPostfix, bool isAppDataPath, bool isSystemPath) { + if (stringutils::WithSuffix(fileWithPostfix.c_str(), ".jar")) { + size_t jarPos = fileWithPostfix.rfind(".jar"); + std::string newFileWithPostFix(fileWithPostfix); + newFileWithPostFix = "libmaple" + newFileWithPostFix.replace(jarPos, sizeof(".jar"), ".so"); + // PATH 2: system jar in /system/ or /apex/, and end with .jar + // return /system/lib64/libmaple.so + if (isSystemPath) { + std::string newFilePath = maple::fs::kSystemLibPath + newFileWithPostFix; + CL_VLOG(classloader) << "return " << newFilePath << maple::endl; + return newFilePath; + } + + // PATH 3: not starts with /data/data/ and /data/user/, and end with .jar + // return /maple/arm64/libmaple.so + if (!isAppDataPath) { + CL_VLOG(classloader) << "return " << curDir << "maple/arm64/" << newFileWithPostFix << maple::endl; + return curDir + "maple/arm64/" + newFileWithPostFix; + } + } + return ""; +} +std::string GetApkPath(const std::string &curDir, const std::string &fileWithPostfix, bool isAppDataPath) { + const std::string appSoPostfix = "maple/arm64/mapleclasses.so"; + const std::string appPartialSoPostfix = "maple/arm64/maplepclasses.so"; + if (stringutils::WithSuffix(fileWithPostfix.c_str(), ".apk")) { + // PATH 4: not starts with /data/data/ and /data/user/, and end with .apk + // return /maple/arm64/mapleclasses.so for full AOT + // return /maple/arm64/maplepclasses.so for partial AOT + if (!isAppDataPath) { + std::string result = curDir + appSoPostfix; + // check PATH 4 existence + if (FileUtils::FileExists(result)) { + CL_VLOG(classloader) << "return " << result << maple::endl; + return result; + } + // check partial classes + result = curDir + appPartialSoPostfix; + if (FileUtils::FileExists(result)) { + CL_VLOG(classloader) << "return " << result << maple::endl; + return result; + } + } + } + return ""; +} +std::string Dex2MplPath(const std::string &filePath) { + std::string file(filePath); + CL_VLOG(classloader) << "input " << file << maple::endl; + +#ifndef __OPENJDK__ + size_t curDirPos = file.rfind('/'); + // PATH 1: if there is no path, just file name + // return input directly, like libmaplecore-all.so + if (curDirPos == std::string::npos) { + CL_VLOG(classloader) << "return " << file << maple::endl; + return file; + } + + std::string curDir = file.substr(0, curDirPos + 1); + std::string fileWithPostfix = file.substr(curDirPos + 1); + + // PATH 2: system path starts with /system/ or /apex/ + bool isAppDataPath = stringutils::WithPrefix(file.c_str(), "/data/data/") || + stringutils::WithPrefix(file.c_str(), "/data/user/"); + bool isSystemPath = stringutils::WithPrefix(file.c_str(), "/system/") || + stringutils::WithPrefix(file.c_str(), "/apex/"); + + std::string resultPath = GetJarPath(curDir, fileWithPostfix, isAppDataPath, isSystemPath); + if (!resultPath.empty()) { + return resultPath; + } + + resultPath = GetApkPath(curDir, fileWithPostfix, isAppDataPath); + if (!resultPath.empty()) { + return resultPath; + } + + // return /maple/arm64/libmaple.so + if (isAppDataPath) { + std::string result = curDir + "maple/arm64/libmaple" + fileWithPostfix + ".so"; + // check PATH 5 existence + if (FileUtils::FileExists(result)) { + CL_VLOG(classloader) << "return " << result << maple::endl; + return result; + } + } + + // read and check file magic number + std::unique_ptr fileInput; + fileInput.reset(FileUtils::OpenFileReadOnly(file)); + if (fileInput.get() != nullptr) { + uint32_t magic = 0; + int ret = static_cast(fileInput->Read( + reinterpret_cast(&magic), static_cast(sizeof(magic)), 0)); + if ((ret == static_cast(sizeof(magic))) && IsZipMagic(magic)) { + // PATH 6: if the others all failed + // return !/maple/arm64/mapleclasses.so + CL_VLOG(classloader) << "return " << file << "!/maple/arm64/mapleclasses.so" << maple::endl; + return file + "!/maple/arm64/mapleclasses.so"; + } + } + + // default path + CL_VLOG(classloader) << "return " << file << maple::endl; + return file; +#else + CL_VLOG(classloader) << "return " << file << maple::endl; + return file; +#endif +} + +bool CheckVersion(const ObjFile &mplFile, const LinkerMFileInfo &mplInfo) { + maplert::MapleVersionT mplFileVersion; + maplert::LinkerAPI::Instance().GetMplVersion(mplInfo, mplFileVersion); + if (mplFileVersion.mplMajorVersion > Version::kMajorMplVersion) { + CL_LOG(ERROR) << "Compiler and Runtime major version are inconsistent in " << mplFile.GetName() << + " Compiler version : " << mplFileVersion.mplMajorVersion << "." << mplFileVersion.compilerMinorVersion << + ", Runtime version : " << Version::kMajorMplVersion << "." << Version::kMinorRuntimeVersion << maple::endl; + return false; + } + return true; +} + +bool CheckCompilerStatus(const ObjFile &mplFile, const LinkerMFileInfo &mplInfo) { + // check mpl file compiler status + constexpr uint32_t gcOnlyMask = 1; + uint32_t compilerStatus = 0; + maplert::LinkerAPI::Instance().GetMplCompilerStatus(mplInfo, compilerStatus); + AdapterExAPI *adapter = maplert::LoaderAPI::Instance().GetAdapterEx().As(); + bool gcOnlyInProcess = adapter->IsGcOnly(); + bool gcOnlyInMfile = static_cast(compilerStatus & gcOnlyMask); + + // if mfile is gc only, the process must be gconly mode; + if (gcOnlyInMfile && !gcOnlyInProcess) { + CL_LOG(FATAL) << "Compiler status: " << compilerStatus << " in " << mplFile.GetName() << + " dont match the process gc only mode" << maple::endl; + } + + // in zygote startup mode, all mfile should be rc and process in rc mode + // in application startup mode, if in process rc mode, all after-loaded mfle + // should be rc mfile, otherwise in process gc-only mode, all after-loaded mfile + // can be rc mfile or gc mfile. butr rc mfile don't friendly performance + if (gcOnlyInMfile != gcOnlyInProcess) { + CL_LOG(INFO) << "Compiler status: " << compilerStatus << " in " << mplFile.GetName() << " in not-friendly mode" << + " process is in" << (gcOnlyInProcess ? " gconly mode" : " rc mode") << maple::endl; + } + return true; +} +} +} // end namespace maplert diff --git a/src/mrt/compiler-rt/src/loader/object_loader.cpp b/src/mrt/compiler-rt/src/loader/object_loader.cpp new file mode 100644 index 0000000000..6767fc8ac2 --- /dev/null +++ b/src/mrt/compiler-rt/src/loader/object_loader.cpp @@ -0,0 +1,503 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "loader/object_loader.h" + +#include "chelper.h" +#include "cpphelper.h" +#include "base/systrace.h" +#include "yieldpoint.h" +#include "interp_support.h" + +using namespace std; +namespace maplert { +ObjectLoader::ObjectLoader() : mBootClassLoader(nullptr), mSystemClassLoader(nullptr) {} +ObjectLoader::~ObjectLoader() { + mBootClassLoader = nullptr; + mSystemClassLoader = nullptr; + pLinker = nullptr; + pAdapterEx = nullptr; +} + +// API Interfaces Begin +void ObjectLoader::PreInit(IAdapterEx adapterEx) { + pLinker = &LinkerAPI::Instance(); + pAdapterEx = adapterEx.As(); +} +void ObjectLoader::PostInit(jobject systemClassLoader) { + mSystemClassLoader = reinterpret_cast(systemClassLoader); + RC_RUNTIME_INC_REF(systemClassLoader); +} +void ObjectLoader::UnInit() { + if (mSystemClassLoader != nullptr) { + RC_RUNTIME_DEC_REF(reinterpret_cast(mSystemClassLoader)); + } +} +// If boot class loader, directly use nullptr as class loader. +// If so, we can reduce once iteration. +bool ObjectLoader::IsBootClassLoader(jobject classLoader) { + if (classLoader == nullptr) { + return true; + } + // save Inc/DecRef + // If mBootClassLoader is not initialized, it must false, compare return false + // If mBootClassLoader is initialized, return compare result + return mBootClassLoader == reinterpret_cast(classLoader); +} + +// WARNING: speical handle return value's RC for this method +// parent class loader return result can only be used in maple linker runtime code +// CANNOT return to java code +// save inc/dec cost +jobject ObjectLoader::GetCLParent(jobject classLoader) { + if (IsBootClassLoader(classLoader)) { + return nullptr; + } + if (WorldStopped()) { + jobject res = MRT_ReflectGetFieldjobject(GetCLField(kFieldParent), classLoader); + // warning, this is exceptional case, no incref for return value + RC_LOCAL_DEC_REF(res); + return res; + } + maplert::ScopedObjectAccess soa; + jobject res = MRT_ReflectGetFieldjobject(GetCLField(kFieldParent), classLoader); + // warning, this is exceptional case, no incref for return value + RC_LOCAL_DEC_REF(res); + return res; +} + +void ObjectLoader::SetCLParent(jobject classLoader, jobject parentClassLoader) { + if (IsBootClassLoader(classLoader)) { + return; + } + maplert::ScopedObjectAccess soa; + MRT_ReflectSetFieldjobject(GetCLField(kFieldParent), classLoader, parentClassLoader); +} + +bool ObjectLoader::LoadClasses(jobject classLoader, ObjFile &objFile) { + if (objFile.GetFileType() == FileType::kMFile) { + return LoadClasses(reinterpret_cast(classLoader), objFile); + } else if (objFile.GetFileType() == FileType::kDFile) { + return MClassLocatorManagerInterpEx::LoadClasses(*this, classLoader, objFile); + } + CL_LOG(ERROR) << "unknow class file type" << maple::endl; + return false; +} + +size_t ObjectLoader::GetLoadedClassCount() { + size_t totalCount = 0; + ClassLocator *classLocator = nullptr; + + // Boot Class loader + classLocator = reinterpret_cast(const_cast(mCLTable[0])); + totalCount += classLocator->GetLoadedClassCount(); + + for (int i = 1; i < kMaxClassloaderNum; ++i) { + const MObject *classLoader = mCLTable[i]; + jobject jClassLoader = reinterpret_cast(const_cast(classLoader)); + if (classLoader == nullptr) { + break; + } + classLocator = GetCLClassTable(jClassLoader).As(); + if (classLocator == nullptr) { + CL_LOG(ERROR) << "failed, cl=" << classLoader << ", classLocator is null!" << maple::endl; + continue; + } + totalCount += classLocator->GetLoadedClassCount(); + } + + return totalCount; +} + +size_t ObjectLoader::GetAllHashMapSize() { + size_t totalSize = 0; + // Boot Class loader + ClassLocator *classLocator = reinterpret_cast(const_cast(mCLTable[0])); + totalSize += classLocator->GetClassHashMapSize(); + + for (int i = 1; i < kMaxClassloaderNum; ++i) { + const MObject *classLoader = mCLTable[i]; + jobject jClassLoader = reinterpret_cast(const_cast(classLoader)); + if (classLoader == nullptr) { + break; + } + classLocator = GetCLClassTable(jClassLoader).As(); + if (classLocator == nullptr) { + CL_LOG(ERROR) << "failed, classloader=" << classLoader << ", to CHECK why classLocator is null!" << maple::endl; + continue; + } + totalSize += classLocator->GetClassHashMapSize(); + } + + return totalSize; +} + +bool ObjectLoader::IsLinked(jobject classLoader) { + ClassLocator *classLocator = GetCLClassTable(classLoader).As(); + if (classLocator == nullptr) { + return true; // Not to link if no classes loaded. + } + return classLocator->IsLinked(); +} + +void ObjectLoader::SetLinked(jobject classLoader, bool isLinked) { + ClassLocator *classLocator = GetCLClassTable(classLoader).As(); + if (classLocator == nullptr) { + return; + } + classLocator->SetLinked(isLinked); +} + +void ObjectLoader::ReTryLoadClassesFromMplFile(jobject classLoader, ObjFile &mplFile) { + std::vector fakeInfoList; // just for not multi-so, it is a empty list and never used + (void)LoadClassesFromMplFile(reinterpret_cast(classLoader), mplFile, fakeInfoList); +} + +bool ObjectLoader::GetClassNameList(jobject classLoader, ObjFile &objFile, vector &classVec) { + ClassLocator *classLocator = GetCLClassTable(classLoader).As(); + if (classLocator == nullptr) { + return false; + } + return classLocator->GetClassNameList(objFile, classVec); +} +// MRT_EXPORT Split +// visit class laoder gc roots +void ObjectLoader::VisitGCRoots(const RefVisitor &function) { + // system class loader is also recorded is Runtime, not visit there + function(reinterpret_cast(mSystemClassLoader)); + for (int i = 1; i < kMaxClassloaderNum; ++i) { + if (mCLTable[i] == nullptr) { + break; + } + function(reinterpret_cast(mCLTable[i])); + } +} + +jobject ObjectLoader::GetSystemClassLoader() { + // exceptional case, no need incref, as this only used in runtime and not return to java code + // warning: use it with special care of RC + return reinterpret_cast(mSystemClassLoader); +} + +jobject ObjectLoader::GetBootClassLoaderInstance() { +#ifndef __OPENJDK__ + if (mBootClassLoader == nullptr) { + MClass *cls = reinterpret_cast(LocateClass("Ljava/lang/BootClassLoader;", SearchFilter())); + MethodMeta *getInstanceMth = cls->GetMethod("getInstance", "()Ljava/lang/BootClassLoader;"); + mBootClassLoader = reinterpret_cast(getInstanceMth->InvokeJavaMethodFast(nullptr)); + // warning, this is exceptional case, no incref for return value + RC_LOCAL_DEC_REF(reinterpret_cast(mBootClassLoader)); + return reinterpret_cast(mBootClassLoader); + } +#endif // !__OPENJDK__ + return reinterpret_cast(mBootClassLoader); +} + +void ObjectLoader::SetClassCL(jclass kls, jobject cl) { + MClass *klass = reinterpret_cast(kls); + MObject *classLoader = reinterpret_cast(cl); + if (klass == nullptr) { + CL_LOG(ERROR) << "failed, classloader=" << cl << ", error parameter: klass is null!" << maple::endl; + return; + } + if (IsBootClassLoader(cl)) { // for bootstrap class loader + klass->SetClIndex(static_cast(kClIndexFlag | 0)); + return; + } + int16_t pos = -1; + bool found = LocateIndex(classLoader, pos); + if (found) { + klass->SetClIndex(static_cast(kClIndexFlag | static_cast(pos))); + } else if (!found && pos != -1) { + mCLTable[pos] = classLoader; + RC_RUNTIME_INC_REF(classLoader); + klass->SetClIndex(static_cast(kClIndexFlag | static_cast(pos))); + } else { + CL_LOG(ERROR) << "failed, classloader=" << classLoader << ", table is null!" << maple::endl; + } +} + +// Get class locator from Class Loader's 'classTable' field. +// Notice that we get special locator at the beginning of mCLTable for boot class loader. +IObjectLocator ObjectLoader::GetCLClassTable(jobject classLoader) { + if (IsBootClassLoader(classLoader)) { + return reinterpret_cast(const_cast(mCLTable[0])); + } + return reinterpret_cast(MRT_ReflectGetFieldjlong(GetCLField(kFieldClassTable), classLoader)); +} + +void ObjectLoader::SetCLClassTable(jobject classLoader, IObjectLocator classLocator) { + if (IsBootClassLoader(classLoader)) { // Boot classlocator is fixed at mCLTable[0], use it directly. + mCLTable[0] = classLocator.As(); + return; + } + // We set class locator for all classloaders, except bootclassloader. + MRT_ReflectSetFieldjlong(GetCLField(kFieldClassTable), classLoader, classLocator.As()); +} +// API Interfaces End +bool ObjectLoader::LoadClassesFromMplFile(const MObject *classLoader, ObjFile &objFile, + std::vector &mplInfoList, bool hasSiblings) { + bool isDecouple = true; + jobject jClassLoader = reinterpret_cast(const_cast(classLoader)); + if (objFile.GetFileType() == FileType::kMFile) { + if (!pLinker->Add(objFile, jClassLoader)) { + CL_LOG(ERROR) << "invalid maple file, or multiple loading " << objFile.GetName() << maple::endl; + } + LinkerMFileInfo *mplInfo = objFile.GetMplInfo(); + objFile.Load(); + if (hasSiblings) { + mplInfoList.push_back(mplInfo); + } else { + static_cast(pLinker->Link(*mplInfo, isDecouple)); + } + + // check mpl file version + if (!loaderutils::CheckVersion(objFile, *mplInfo)) { + return false; + } + + // check mpl file compiler status, such as is gconly + if (!loaderutils::CheckCompilerStatus(objFile, *mplInfo)) { + return false; + } + + SetLinked(jClassLoader, false); + + if (!LoadClasses(jClassLoader, objFile)) { + CL_LOG(ERROR) << "failed to load classes from " << objFile.GetName() << ", classloader: " << classLoader << + maple::endl; + return false; + } + objFile.SetClassTableLoaded(); + } else if (objFile.GetFileType() == FileType::kDFile) { + if (!pAdapterEx->IsStarted()) { + // Now, ClassLocatorManager.LoadClasses is needed only for one case: + // loading Dex files in classpath into SystemClassLoader. + if (!LoadClasses(jClassLoader, objFile)) { + CL_LOG(ERROR) << "load class for dex file failed: " << objFile.GetName() << maple::endl; + return false; + } + } + } else { + return false; + } + return true; +} + +void ObjectLoader::UnloadClasses(const MObject *classLoader) { + jobject jClassLoader = reinterpret_cast(const_cast(classLoader)); + ClassLocator *classLocator = GetCLClassTable(jClassLoader).As(); + if (classLocator == nullptr) { + CL_LOG(ERROR) << "failed, classLocator is null." << maple::endl; + return; + } + classLocator->UnloadClasses(); + + // Destruct class loader's references properly. + if (IsBootClassLoader(jClassLoader)) { // for boot class loader + mCLTable[0] = nullptr; // Dereference the class locator of boot class loader. + } else if (!RecycleIndex(classLoader)) { // Dereference the class loader, and recycle mCLTable index. + CL_LOG(ERROR) << "RecycleIndex fail" << maple::endl; + } + delete classLocator; // Delete the class locator of class loader. +} + +void ObjectLoader::VisitClassesByLoader(const MObject *loader, const maple::rootObjectFunc &func) { + jobject jClassLoader = reinterpret_cast(const_cast(loader)); + ClassLocator *locator = GetCLClassTable(jClassLoader).As(); + if (locator == nullptr) { + CL_LOG(ERROR) << "VisitClasses locator null for loader:" << loader << maple::endl; + return; + } + locator->VisitClasses(func, IsBootClassLoader(jClassLoader)); +} + +jfieldID ObjectLoader::GetCLField(const FieldName &field) { + static jfieldID cachedFields[] = { nullptr, nullptr }; + static const char *cachedNames[] = { "parent", "classTable" }; + if (cachedFields[field] == nullptr) { + if (mCLTable[0] != nullptr) { + jclass klass = FindClass("Ljava/lang/ClassLoader;", SearchFilter()); + if (klass == nullptr) { + CL_LOG(ERROR) << "failed, can't find \"java/lang/ClassLoader\"!" << maple::endl; + return nullptr; + } + cachedFields[field] = MRT_ReflectGetCharField(klass, cachedNames[field]); + if (cachedFields[field] == nullptr) { + CL_LOG(ERROR) << "failed, mParentField should not null!" << maple::endl; + } + } else { + CL_LOG(ERROR) << "failed for mCLTable[0] is null!" << maple::endl; + } + } + return cachedFields[field]; +} + +MObject *ObjectLoader::GetClassCL(MClass *klass) { + uint16_t index = klass->GetClIndex(); + // Remove after clIndex can be initialized by compiler. + if ((kClIndexFlagMask & index) == kClIndexFlag && index != kClIndexInintValue) { // Once initialized + index = kClIndexValueMask & index; + } else { // Not initialized + index = 0; + klass->SetClIndex(static_cast(kClIndexFlag | 0)); + CL_DLOG(classloader) << "not initialized for " << klass->GetName() << maple::endl; + } + + if (index == 0) { // for boot class loader + return nullptr; + } else if (index > 0 && index < kMaxClassloaderNum) { + return const_cast(mCLTable[index]); + } else { + CL_LOG(ERROR) << "failed for index=" << index << maple::endl; + return nullptr; + } +} + +// Assign index for classloader in advance, and return index with mask. +uint16_t ObjectLoader::PresetClassCL(const MObject *classLoader) { + jobject jClassLoader = reinterpret_cast(const_cast(classLoader)); + if (IsBootClassLoader(jClassLoader)) { // for boot class loader + return static_cast(kClIndexFlag | 0); + } + int16_t pos = -1; + bool found = LocateIndex(classLoader, pos); + if (found) { + return static_cast(kClIndexFlag | static_cast(pos)); + } else if (!found && pos != -1) { + maplert::ScopedObjectAccess soa; + mCLTable[pos] = classLoader; + RC_RUNTIME_INC_REF(classLoader); + return static_cast(kClIndexFlag | static_cast(pos)); + } else { + CL_LOG(ERROR) << "failed, classloader=" << classLoader << ", table is null!" << maple::endl; + return 0; + } +} + +// Return the index if matched, or found a new valid index. +bool ObjectLoader::LocateIndex(const MObject *classLoader, int16_t &pos) const { + pos = -1; + for (int16_t i = 1; i < kMaxClassloaderNum; ++i) { + if (mCLTable[i] == classLoader) { // Matched class loader set before. + pos = i; + return true; + } else if (mCLTable[i] == nullptr) { + pos = i; + return false; + } + } + return false; +} + +// Recycle the index, we can rearrange the index to other class loader. +bool ObjectLoader::RecycleIndex(const MObject *classLoader) { + for (int32_t i = 1; i < kMaxClassloaderNum; ++i) { + if (mCLTable[i] == classLoader) { // Matched class loader set before. + mCLTable[i] = nullptr; + RC_RUNTIME_DEC_REF(classLoader); + return true; + } + } + + return false; +} + +void ObjectLoader::RegisterDynamicClass(const MObject *classLoader, const MClass *klass) { + jclass jKlass = reinterpret_cast(const_cast(klass)); + jobject jClassLoader = reinterpret_cast(const_cast(classLoader)); + ClassLocator *classLocator = GetCLClassTable(jClassLoader).As(); + if (classLocator == nullptr) { + CL_LOG(ERROR) << "failed, classloader=" << classLoader << ", to CHECK why classLocator is null!" << maple::endl; + return; + } + char *className = klass->GetName(); + if (!classLocator->RegisterDynamicClass(className, *const_cast(klass))) { + CL_LOG(ERROR) << "failed to registerDynamicClass,name:className" << maple::endl; + } + SetClassCL(jKlass, jClassLoader); +} + +void ObjectLoader::UnregisterDynamicClass(const MObject *classLoader, const MClass *klass) { + jobject jClassLoader = reinterpret_cast(const_cast(classLoader)); + ClassLocator *classLocator = GetCLClassTable(jClassLoader).As(); + if (classLocator == nullptr) { + CL_LOG(ERROR) << "failed, classloader=" << classLoader << ", to CHECK why classLocator is null!" << maple::endl; + return; + } + char *className = klass->GetName(); + if (!classLocator->UnregisterDynamicClass(className)) { + CL_LOG(ERROR) << "failed to unregisterDynamicClass, name:className" << maple::endl; + } +} + +// load classes from one ObjFile. +bool ObjectLoader::LoadClasses(const MObject *classLoader, ObjFile &objFile) { + jobject jClassLoader = reinterpret_cast(const_cast(classLoader)); + ClassLocator *classLocator = GetCLClassTable(jClassLoader).As(); + if (classLocator == nullptr) { + classLocator = new (std::nothrow) ClassLocator(); + if (classLocator == nullptr) { + LINKER_LOG(FATAL) << "new ClassLocator failed" << maple::endl; + } + SetCLClassTable(jClassLoader, classLocator); + } + if (classLocator == nullptr) { + CL_LOG(ERROR) << "failed, classloader=" << classLoader << ", classLocator is null" << maple::endl; + return false; + } + + // We will check the 'mplInfo' lazy binding flag in the ClassLocator. + // If it's lazy, we will not set ClassLoader Index during initialization, + // even though set 'initCLIndex' parameter here. + if (!classLocator->LoadClasses(objFile, PresetClassCL(classLoader))) { + CL_LOG(ERROR) << "failed: classloader=" << classLoader << maple::endl; + return false; + } + + return true; +} + +bool ObjectLoader::LoadClasses(const MObject *classLoader, vector &objList) { + jobject jClassLoader = reinterpret_cast(const_cast(classLoader)); + ClassLocator *classLocator = GetCLClassTable(jClassLoader).As(); + if (classLocator == nullptr) { + classLocator = new (std::nothrow) ClassLocator(); + if (classLocator == nullptr) { + LINKER_LOG(FATAL) << "new ClassLocator failed!" << maple::endl; + } + SetCLClassTable(jClassLoader, classLocator); + } + if (classLocator == nullptr) { + CL_LOG(ERROR) << "failed, classloader = " << classLoader << ", classLocator is null" << maple::endl; + return false; + } + + // We will check the 'mplInfo' lazy binding flag in each ClassLocator. + // If it's lazy, we will not set ClassLoader Index during initialization, + // even though set 'initCLIndex' parameter here. + if (!classLocator->LoadClasses(objList, PresetClassCL(classLoader))) { + return false; + } + if (IsBootClassLoader(jClassLoader)) { + // BootClassLoader will only call this function to load classes. + // now register predefined array (object) classes. This is required for fast cycle detection + for (MClass* primitiveItem: primitiveClasses) { + RegisterDynamicClass(classLoader, primitiveItem); + } + } + return true; +} +} // end namespace maple diff --git a/src/mrt/compiler-rt/src/loader/object_locator.cpp b/src/mrt/compiler-rt/src/loader/object_locator.cpp new file mode 100644 index 0000000000..5802ee45c1 --- /dev/null +++ b/src/mrt/compiler-rt/src/loader/object_locator.cpp @@ -0,0 +1,420 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "loader/object_locator.h" + +#include +#include + +#include "sizes.h" +#include "allocator.h" +#include "itab_util.h" +#include "loader/hash_pool.h" +#include "loader/object_loader.h" +using namespace std; + +namespace maplert { +// Load classes for all .so files in list. +bool ClassLocator::LoadClasses(vector &objList, uint16_t clIndex) { + maple::SpinAutoLock lock(spLock); + // Firstly get total classes count in all .so + uint32_t total = 0; + for (auto objFile : objList) { + uint32_t count = GetClassCount(*objFile); + if (count == 0) { + return false; + } + total += count; + + CL_VLOG(classloader) << "Class Count of " << objFile->GetName() << ":" << count << maple::endl; + } + + mLoadedClassCount += total; + CL_VLOG(classloader) << "Class Total Count:" << total << maple::endl; + + // Initialize all class's class loader info. when .so is loaded. + MClassHashPool *pool; + pool = new (std::nothrow) MClassHashPool(); + if (pool == nullptr) { + CL_LOG(FATAL) << "new MClassHashPool failed" << maple::endl; + } + pool->Create(total); + // For performance we construct only one MClassHashPool for boot/system classloader. + // App can't reflect class from boot classloader's maple file. + // So we set handle to nullptr. + if (mHashPools.find(nullptr) != mHashPools.end()) { + CL_LOG(FATAL) << "Attempt to Add Multi Pool to Same Boot Handle!" << maple::endl; + } + mHashPools[nullptr] = pool; + if (InitClasses(objList, *pool)) { + pool->Collect(); + SetClasses(objList, *pool, clIndex); + } + CL_VLOG(classloader) << "LoadClasses(), Class Hash Conflict Rate:" << pool->GetHashConflictRate() << "/" << + pool->GetClassConflictRate() << ", Total Memory Cost=" << pool->CalcMemoryCost() << maple::endl; + return true; +} + +// Load classes for single .so file. +bool ClassLocator::LoadClasses(ObjFile &objFile, uint16_t clIndex) { + maple::SpinAutoLock lock(spLock); + // Firstly get class count in .so + uint32_t count = GetClassCount(objFile); + if (count == 0) { + return false; + } + mLoadedClassCount += count; + CL_VLOG(classloader) << "Class Counts of " << objFile.GetName() << ":" << count << maple::endl; + + // Initialize all class's class loader info. when .so is loaded. + MClassHashPool *pool; + pool = new (std::nothrow) MClassHashPool(); + if (pool == nullptr) { + LINKER_LOG(FATAL) << "new MClassHashPool failed" << maple::endl; + } + pool->Create(count); + if (objFile.GetUniqueID() == 0) { + auto min = min_element(mHashPools.begin(), mHashPools.end(), [](const auto &left, const auto &right) { + if (left.first == nullptr) { + return false; + } else if (right.first == nullptr) { + return true; + } + return left.first->GetUniqueID() < right.first->GetUniqueID(); + }); + if (min != mHashPools.end() && min->first != nullptr) { + objFile.SetUniqueID(min->first->GetUniqueID() - 1); + } + } + if (mHashPools.find(&objFile) != mHashPools.end()) { + CL_LOG(FATAL) << "Attempt to Add Multi Pool to Same Handle, objFile=" << &objFile << maple::endl; + } + mHashPools[&objFile] = pool; + InitClasses(objFile, *pool); + pool->Collect(); + SetClasses(objFile, *pool, clIndex); + CL_VLOG(classloader) << " conflict:" << pool->GetHashConflictRate() << "/" << pool->GetClassConflictRate() << + ", for " << objFile.GetName() << ", Total Memory Cost=" << pool->CalcMemoryCost() << maple::endl; + + return true; +} + +void ClassLocator::SetClasses(vector &objList, MClassHashPool &pool, uint16_t clIndex) { + for (auto objFile : objList) { + SetClasses(*objFile, pool, clIndex); + } +} + +void ClassLocator::SetClasses(const ObjFile &objFile, MClassHashPool &pool, uint16_t clIndex) { + DataRefOffset *data = objFile.GetMplInfo()->GetTblBegin(kClassMetadataBucket); + uint32_t size = static_cast(objFile.GetMplInfo()->GetTblEnd(kClassMetadataBucket) - data); + bool isLazy = objFile.GetMplInfo()->IsFlag(kIsLazy); + for (uint32_t i = 0; i < size; ++i) { + DataRefOffset *klass = data + i; + MClass *classInfo = klass->GetDataRef(); + pool.Set(static_cast(classInfo->GetMonitor()), *classInfo); + classInfo->SetMonitor(0); + classInfo->ResolveVtabItab(); + if (!isLazy) { + classInfo->SetClIndex(clIndex); + // Set classInfo->shadow for not lazy binding. + MRTSetMetadataShadow(reinterpret_cast(classInfo), WellKnown::GetMClassClass()); + } + MRT_SetFastAlloc(reinterpret_cast(classInfo)); + JSAN_ADD_CLASS_METADATA(classInfo); + } +} + +// Initialize all classinfos for all .so in list. +bool ClassLocator::InitClasses(vector &objList, MClassHashPool &pool) { + uint32_t count = 0; + for (auto objFile : objList) { + InitClasses(*objFile, pool); + ++count; + } + return count > 0; +} + +// Initialize all classinfos in single .so. +void ClassLocator::InitClasses(const ObjFile &objFile, MClassHashPool &pool) { + DataRefOffset *data = objFile.GetMplInfo()->GetTblBegin(kClassMetadataBucket); + uint32_t size = static_cast(objFile.GetMplInfo()->GetTblEnd(kClassMetadataBucket) - data); + for (uint32_t i = 0; i < size; ++i) { + const DataRefOffset *klass = data + i; + MClass *classInfo = klass->GetDataRef(); + pool.InitClass(static_cast(classInfo->GetMonitor())); + } +} + +uint32_t ClassLocator::GetClassCount(const ObjFile &objFile) { + DataRefOffset *hashTabStart = objFile.GetMplInfo()->GetTblBegin(kClassMetadataBucket); + DataRefOffset *hashTabEnd = objFile.GetMplInfo()->GetTblEnd(kClassMetadataBucket); + if ((hashTabStart == nullptr) || (hashTabEnd == nullptr)) { + return 0; + } + return static_cast(hashTabEnd - hashTabStart); +} + +void ClassLocator::UnloadClasses() { + maple::SpinAutoLock lock(spLock); + + for (auto item : mHashPools) { + MClassHashPool *pool = item.second; + if (pool == nullptr) { + continue; + } + pool->Destroy(); + delete pool; + pool = nullptr; + } + UnregisterDynamicClasses(); +} + +void ClassLocator::VisitClasses(const maple::rootObjectFunc &func, bool isBoot) { + // Remove lock to avoid dead lock with FindClass call. + for (auto item : mHashPools) { + MClassHashPool *pool = item.second; + pool->VisitClasses(func); + } + + (void)(pthread_rwlock_rdlock(&dClassMapLock)); + for (ClassMapConstIterT it2 = dClassMap.begin(); it2 != dClassMap.end(); ++it2) { + func(reinterpret_cast(it2->second)); + } + (void)(pthread_rwlock_unlock(&dClassMapLock)); + + if (isBoot) { // boot class loader? + LoaderAPI::As().VisitPrimitiveClass(func); + } +} + +bool ClassLocator::GetClassNameList(const ObjFile &objFile, vector &classVec) { + maple::SpinAutoLock lock(spLock); + + LinkerMFileInfo *mplInfo = objFile.GetMplInfo(); + DataRefOffset *hashTabStart = mplInfo->GetTblBegin(kClassMetadataBucket); + DataRefOffset *hashTabEnd = mplInfo->GetTblEnd(kClassMetadataBucket); + if ((hashTabStart == nullptr) || (hashTabEnd == nullptr)) { + return false; + } + DataRefOffset *data = hashTabStart; + uint32_t size = static_cast(hashTabEnd - hashTabStart); + + for (uint32_t i = 0; i < size; ++i) { + DataRefOffset *klass = data + i; + if (klass != nullptr) { + MClass *classInfo = klass->GetDataRef(); + classVec.emplace_back(classInfo->GetName()); + } + } + return true; +} + +// Find a class by class name. +// For array class, if no loaded class found, create a array class and return. +MClass *ClassLocator::InquireClass(const std::string &internalName, SearchFilter &filter) { + // Return null if it's void array such as "[V". + if (internalName[0] == '[') { + uint32_t dim = 0; + while (internalName[dim] == '[') { + ++dim; + } + if (internalName[dim] == 'V') { + return nullptr; + } + } + MClass *classInfo = FindClassInternal(internalName, filter); + if (classInfo != nullptr) { + JSAN_ADD_CLASS_METADATA(classInfo); + return classInfo; + } + + // Try to create a array classe + if (internalName[0] == '[') { + classInfo = CreateArrayClassRecursively(internalName, filter); + } + JSAN_ADD_CLASS_METADATA(classInfo); + return classInfo; +} + +MClass *ClassLocator::FindLoadedClass(const MClassHashPool &pool, const std::string &className, SearchFilter &filter) { + MClass *classInfo = nullptr; + ClassMetadata *metaData = nullptr; + // Find it in hashpool first. + classInfo = pool.Get(className); + // If no class found, we find whether it is dex in the dexFiles. + if (classInfo == nullptr && filter.currentFile != nullptr && filter.currentFile->GetFileType() == FileType::kDFile) { + metaData = MClassLocatorInterpEx::BuildClassFromDex(this, className, filter); + classInfo = reinterpret_cast(metaData); + } + + if (classInfo != nullptr) { + filter.outFile = filter.currentFile; + } + return classInfo; +} + +// Find class without creating array class. +// Here we can find the class in mpl, dex, or created array class. +MClass *ClassLocator::FindClassInternal(const std::string &className, SearchFilter &filter) { + maple::SpinAutoLock lock(spLock); + MClass *classInfo = nullptr; + if (className[0] != '[') { + // Traversal in all class hash map. + if (filter.currentFile != nullptr) { + auto item = mHashPools.find(filter.currentFile); + if (item == mHashPools.end()) { + return classInfo; + } + MClassHashPool *currentPool = item->second; + classInfo = FindLoadedClass(*currentPool, className, filter); + } else { + for (auto item : mHashPools) { + filter.currentFile = item.first; + classInfo = FindLoadedClass(*item.second, className, filter); + if (classInfo != nullptr) { + return classInfo; + } + } + } + } + // It may be created by an array class or by another runtime class, such as proxy/annotation classes. + if (classInfo == nullptr) { + classInfo = FindRuntimeClass(className); + } + return classInfo; +} + +MClass *ClassLocator::CreateArrayClassRecursively(const std::string &mplClassName, SearchFilter &filter) { + uint32_t dim = 0; + while (mplClassName[dim] == '[') { + ++dim; + } + + // find the first component class defined. don't recursive + MClass *componentClass = nullptr; + uint32_t currentDim; + for (currentDim = 1; currentDim < dim; ++currentDim) { + componentClass = FindClassInternal(&mplClassName[currentDim], filter); + if (componentClass != nullptr) { + break; + } + } + + // The non-array element class type + if (componentClass == nullptr) { + componentClass = FindClassInternal(&mplClassName[dim], filter); + } + + // Cannot find the element class + if (componentClass == nullptr) { + return static_cast(nullptr); + } + + if (componentClass->IsLazyBinding()) { + (void)LinkerAPI::Instance().LinkClassLazily(*componentClass); + } + + MClass *currentClass = nullptr; + for (int i = currentDim - 1; i >= 0; --i) { + maple::SpinAutoLock lock(spLock); + currentClass = FindRuntimeClass(&mplClassName[static_cast(i)]); + if (currentClass != nullptr) { // Current class already registered + componentClass = currentClass; + continue; + } else { // Not registered + currentClass = LoaderAPI::As().CreateArrayClass( + &mplClassName[static_cast(i)], *componentClass); + if (currentClass != nullptr) { + (void)!RegisterDynamicClass(&mplClassName[static_cast(i)], *currentClass); + // No memory leak happens, 'cause it's reserved by RegisterDynamicClass(). + componentClass = currentClass; + } else { + break; + } + } + } + return currentClass; +} + +bool ClassLocator::RegisterDynamicClass(const std::string &className, MClass &classObj) { + if (FindRuntimeClass(className) != nullptr) { + return true; + } + (void)(pthread_rwlock_wrlock(&dClassMapLock)); + if ((dClassMap.emplace(std::pair(className, &classObj))).second != true) { + CL_LOG(ERROR) << "dClassMap insert not return true" << maple::endl; + (void)(pthread_rwlock_unlock(&dClassMapLock)); + return false; + } + MRT_SetFastAlloc(reinterpret_cast(&classObj)); + (void)(pthread_rwlock_unlock(&dClassMapLock)); + JSAN_ADD_CLASS_METADATA(&classObj); + return true; +} + +void ClassLocator::UnregisterDynamicClassImpl(const ClassMapIterT &it) const { + MClass *klass = it->second; + if (klass != nullptr) { + char *className = klass->GetName(); + if (className != nullptr) { + klass->SetName(nullptr); + } + it->second = nullptr; + } +} + +bool ClassLocator::UnregisterDynamicClass(const std::string &className) { + (void)(pthread_rwlock_wrlock(&dClassMapLock)); + ClassMapIterT it = dClassMap.find(className); + if (it != dClassMap.end()) { + UnregisterDynamicClassImpl(it); + (void)dClassMap.erase(it); + (void)(pthread_rwlock_unlock(&dClassMapLock)); + return true; + } + (void)(pthread_rwlock_unlock(&dClassMapLock)); + return false; +} + +void ClassLocator::UnregisterDynamicClasses() { + (void)(pthread_rwlock_wrlock(&dClassMapLock)); + for (ClassMapIterT it = dClassMap.begin(); it != dClassMap.end(); ++it) { + UnregisterDynamicClassImpl(it); + } + dClassMap.clear(); + (void)(pthread_rwlock_unlock(&dClassMapLock)); +} + +// just try to get the array class +MClass *ClassLocator::FindRuntimeClass(const std::string &mplClassName) { + MClass *classInfo = LoaderAPI::As().GetPrimitiveClass(mplClassName); + if (classInfo != nullptr) { + return classInfo; + } + (void)(pthread_rwlock_rdlock(&dClassMapLock)); + ClassMapConstIterT it = dClassMap.find(mplClassName); + if (it != dClassMap.end()) { + (void)(pthread_rwlock_unlock(&dClassMapLock)); + return it->second; + } + (void)(pthread_rwlock_unlock(&dClassMapLock)); + return nullptr; +} + +size_t ClassLocator::GetClassHashMapSize() { + maple::SpinAutoLock lock(spLock); + return sizeof(MClassHashPool) * mHashPools.size(); +} +} // namespace maplert diff --git a/src/mrt/compiler-rt/src/mangle_for_jni.cpp b/src/mrt/compiler-rt/src/mangle_for_jni.cpp new file mode 100644 index 0000000000..0a43fc7233 --- /dev/null +++ b/src/mrt/compiler-rt/src/mangle_for_jni.cpp @@ -0,0 +1,137 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mrt_naming_api.h" +#include "utils/string_utils.h" + +namespace namemanglerapi { +static bool ConvertSpeicalChar(uint32_t code, std::string &str) { + switch (code) { + case '.': + case '/': + str = "_"; + return true; + case '_': + str = "_1"; + return true; + case ';': + str = "_2"; + return true; + case '[': + str = "_3"; + return true; + default: + return false; + } +} + +static inline void AppendUint(std::string &s, uint32_t code) { + const uint16_t low = static_cast(code & 0xFFFF); + const uint16_t high = static_cast(code >> 16); // get the high 16 bits + + stringutils::AppendFormat(s, "_0%04x", low); + if (high != 0) { + stringutils::AppendFormat(s, "_0%04x", high); + } +} + +static uint32_t DecodeUtf8(const char *&data) { + // one~three utf8 bytes translate to utf16 in 'Basic Multilingual Plane' + const uint8_t byte0 = *data++; + // utf8 one byte: 0xxxxxxx + if (byte0 < 0x80) { + return byte0; + } + + // utf8 two bytes: 110xxxxx 10xxxxxx + uint32_t rc = 0; + const uint8_t byte1 = *data++; + if (byte0 < 0xe0) { + rc = (static_cast(byte0 & 0x1f) << 6) | (static_cast(byte1 & 0x3f)); + return rc; + } + + // utf8 three bytes: 1110xxxx 10xxxxxx 10xxxxxx + const uint8_t byte2 = *data++; + if (byte0 < 0xf0) { + rc = (static_cast(byte0 & 0x0f) << 12) | + (static_cast(byte1 & 0x3f) << 06) | + (static_cast(byte2 & 0x3f)); + return rc; + } + + // Four utf8 bytes translate to utf16 in 'Supplementary Planes'. + // In Supplementary Planes Utf16 is encoded as a 'Surrogate Pair'. + // About utf16 you can refer to: https://zh.wikipedia.org/wiki/UTF-16 + // + // utf8 four bytes: 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + const uint8_t byte3 = *data++; + uint32_t code; + code = (static_cast(byte0 & 0x07) << 18) | + (static_cast(byte1 & 0x3f) << 12) | + (static_cast(byte2 & 0x3f) << 06) | + (static_cast(byte3 & 0x3f)); + + // Convert to Surrogate Pair, use three steps: + code -= 0x10000; + uint32_t lead = (code >> 10) | 0xD800; + uint32_t tail = (code & 0x3FF) | 0xDC00; + rc = lead + (tail << 16); + return rc; +} + +static inline bool IsAlnum(uint32_t code) { + if ((code >= 'a' && code <= 'z') || (code >= 'A' && code <= 'Z') || (code >= '0' && code <= '9')) { + return true; + } + return false; +} + +static std::string MangleInternal(const std::string &s, const bool isDex) { + std::string output = ""; + const char *cp = &s[0]; + const char *end = cp + s.length(); + while (cp < end) { + uint32_t code = DecodeUtf8(cp); + if (isDex) { + if (code == '(') { + output.append("__"); + continue; + } + if (code == ')') { + break; + } + } + + std::string str; + if (IsAlnum(code)) { + output.append(1, static_cast(code)); + } else if (ConvertSpeicalChar(code, str)) { + output.append(str); + } else { + AppendUint(output, code); + } + } + + return output; +} + +std::string MangleForJniDex(const std::string &s) { + return MangleInternal(s, true); +} + +std::string MangleForJni(const std::string &s) { + return MangleInternal(s, false); +} +} diff --git a/src/mrt/compiler-rt/src/mm_config.cpp b/src/mrt/compiler-rt/src/mm_config.cpp new file mode 100644 index 0000000000..23343e10ea --- /dev/null +++ b/src/mrt/compiler-rt/src/mm_config.cpp @@ -0,0 +1,32 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mm_config.h" + +namespace maplert { +long MrtEnvConf(const char *envName, long defaultValue) { + const char *ev = getenv(envName); + if (ev != nullptr) { + char *endptr = nullptr; + long rv = std::strtol(ev, &endptr, 0); // support dec, oct and hex + if (*endptr == '\0') { + return rv; + } else { + return defaultValue; + } + } else { + return defaultValue; + } +} +} diff --git a/src/mrt/compiler-rt/src/mm_utils.cpp b/src/mrt/compiler-rt/src/mm_utils.cpp new file mode 100644 index 0000000000..7eff08aa6d --- /dev/null +++ b/src/mrt/compiler-rt/src/mm_utils.cpp @@ -0,0 +1,257 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mm_utils.h" + +#include +#include +#include +#include +#include +#include + +#include "mm_config.h" +#include "chosen.h" + +namespace maplert { +namespace util { +const uint32_t kInstructionSize = 4; +const uint32_t kDefaultPrintStackDepth = 30; +const uint32_t kDefaultPrintStackSkip = 2; // skip first two frames in mmutils + +void GetSymbolName(const void *exactPc, std::string &funcPath, std::string &funcName, Dl_info &dlinfo) { + LinkerLocInfo mInfo; + dlinfo.dli_fname = nullptr; + dlinfo.dli_fbase = nullptr; + dlinfo.dli_sname = nullptr; + dlinfo.dli_saddr = nullptr; + + bool dladdrOk = dladdr(exactPc, &dlinfo) != 0; + if (dladdrOk && dlinfo.dli_sname != nullptr) { + funcName = dlinfo.dli_sname; + funcPath = dlinfo.dli_fname; + } else { + if (LinkerAPI::Instance().IsJavaText(exactPc)) { + (void)LinkerAPI::Instance().LocateAddress(exactPc, mInfo, true); + } + + funcName = "unknown function"; + funcPath = "unknown image"; + if (!mInfo.sym.empty()) { + funcName = mInfo.sym; + } + if (!mInfo.path.empty()) { + funcPath = mInfo.path; + } + } +} + +void PrintPCSymbol(const void *pc) { + static constexpr int byteShift = 4; + void *exactPc = reinterpret_cast(reinterpret_cast(pc) - byteShift); + // convert pc to symbol/offset in elf file + std::string funcName, funcPath; + Dl_info dlinfo; + + GetSymbolName(exactPc, funcPath, funcName, dlinfo); + +#ifdef __ANDROID__ + LOG(ERROR) << "PC: " << std::hex << exactPc << " " << funcPath << " (+" << + reinterpret_cast(dlinfo.dli_fbase == nullptr ? 0 : + (reinterpret_cast(exactPc) - (reinterpret_cast(dlinfo.dli_fbase)))) << + ") " << funcName << " (+" << + reinterpret_cast(dlinfo.dli_saddr == nullptr ? 0 : + (reinterpret_cast(exactPc) - (reinterpret_cast(dlinfo.dli_saddr)))) << + ")" << std::dec << maple::endl; +#else + printf("PC: %p %s (+%p) %s (+%p)\n", + exactPc, + funcPath.c_str(), + reinterpret_cast(dlinfo.dli_fbase == nullptr ? 0 : + (reinterpret_cast(exactPc) - (reinterpret_cast(dlinfo.dli_fbase)))), + funcName.c_str(), + reinterpret_cast(dlinfo.dli_saddr == nullptr ? 0 : + (reinterpret_cast(exactPc) - (reinterpret_cast(dlinfo.dli_saddr))))); +#endif +} + +void PrintPCSymbolToLog(const void *pc, uint32_t logtype, bool printBt) { + void *exactPc = reinterpret_cast(reinterpret_cast(pc) - kInstructionSize); + // convert pc to symbol/offset in elf file + std::string funcName = ""; + std::string funcPath = ""; + Dl_info dlinfo; + + GetSymbolName(exactPc, funcPath, funcName, dlinfo); + + LOG2FILE(logtype) << "PC: " << std::hex << exactPc << " " << funcPath << " (+" << + reinterpret_cast(dlinfo.dli_fbase == nullptr ? 0 : + (reinterpret_cast(exactPc) - (reinterpret_cast(dlinfo.dli_fbase)))) << + ") " << funcName << " (+" << + reinterpret_cast(dlinfo.dli_saddr == nullptr ? 0 : + (reinterpret_cast(exactPc) - (reinterpret_cast(dlinfo.dli_fbase)))) << + ") " << printBt << std::dec << std::endl; +} + +void PrintPCSymbolToLog(const void *pc, std::ostream &ofs, bool printBt) { + void *exactPc = reinterpret_cast(reinterpret_cast(pc) - kInstructionSize); + // convert pc to symbol/offset in elf file + std::string funcName, funcPath; + Dl_info dlinfo; + + GetSymbolName(exactPc, funcPath, funcName, dlinfo); + + ofs << "PC: " << std::hex << exactPc << " " << funcPath << " (+" << + reinterpret_cast(dlinfo.dli_fbase == nullptr ? 0 : + (reinterpret_cast(exactPc) - reinterpret_cast(dlinfo.dli_fbase))) << + ") " << funcName << " (+" << + reinterpret_cast(dlinfo.dli_saddr == nullptr ? 0 : + (reinterpret_cast(exactPc) - reinterpret_cast(dlinfo.dli_saddr))) << + ") " << printBt << std::dec << std::endl; +} + +void PrintBacktrace() { + PrintBacktrace(kDefaultPrintStackDepth, -1); +} + +void PrintBacktrace(int32_t logFile) { + PrintBacktrace(kDefaultPrintStackDepth, logFile); +} + +std::mutex btmutex; + +void PrintBacktrace(size_t limit, int32_t logFile) { + std::lock_guard l(btmutex); + const size_t hardLimit = 30; + std::vector stackArray; + + if (limit > hardLimit) { + limit = hardLimit; + } + + if (logFile < 0) { + printf("%lu: PrintBacktrace()\n", pthread_self()); + } else { + LOG2FILE(logFile) << pthread_self() << ": PrintBacktrace()" << std::endl; + } + + MapleStack::FastRecordCurrentStackPCsByUnwind(stackArray, limit); + size_t size = stackArray.size(); + // skip first two frame, record stack and print stack + for (size_t i = kDefaultPrintStackSkip; i < size; ++i) { + if (logFile < 0) { + PrintPCSymbol(reinterpret_cast(stackArray[i])); + } else { + PrintPCSymbolToLog(reinterpret_cast(stackArray[i]), logFile, false); + } + } + if (logFile < 0) { + fflush(stdout); + } +} + +// [obj] addr header rc meta +// content from meta +const size_t kDumpWordsPerLine = 4; +void DumpObject(address_t obj, std::ostream &ofs) { + MObject *mObject = reinterpret_cast(obj); + MClass *classInfo = mObject->GetClass(); + ofs << "[obj] " << std::hex << obj << " " << GCHeader(obj) << + " " << RefCount(obj) << " " << WeakRefCount(obj) << " " << classInfo << + std::dec << " " << GetObjectDWordSize(*mObject) << std::endl; + if (VLOG_IS_ON(dumpheapsimple)) { + auto refFunc = [&ofs](reffield_t &field, uint64_t kind) { + address_t ref = RefFieldToAddress(field); + if (IS_HEAP_ADDR(ref)) { + ofs << std::hex << ref << (kind == kWeakRefBits ? " w" : (kind == kUnownedRefBits ? " u" : "")) << + std::dec << std::endl; + } + }; + ForEachRefField(obj, refFunc); + } else { + size_t word = GetObjectDWordSize(*mObject); + for (size_t i = 0; i < word; i += kDumpWordsPerLine) { + ofs << std::hex << (obj + (i * sizeof(address_t))) << ": "; + size_t bound = (i + kDumpWordsPerLine) < word ? (i + kDumpWordsPerLine) : word; + for (size_t j = i; j < bound; j++) { + ofs << std::hex << *reinterpret_cast(obj + (j * sizeof(address_t))) << " "; +#ifdef USE_32BIT_REF + ofs << std::hex << *reinterpret_cast(obj + (j * sizeof(address_t) + sizeof(reffield_t))) << " "; +#endif // USE_32BIT_REF + } + ofs << std::dec << std::endl; + } + } +} + +// We must not fork until we're single-threaded again. +// Wait until /proc shows we're down to just one thread. +void WaitUntilAllThreadsStopped() { + // If there are only 3 itmes in /proc/selftask, + // we can confirm that all threads stopped except + // main thread. for example: + // $ls -a /proc/self/task + // . ..
+ static constexpr int maxNumEntryOfSingleThreadProc = 3; + struct dirent *entryPtr = nullptr; + int numOfEntries; + + // All background threads are stopped already. We're just waiting + // for their OS counterparts to finish as well. This shouldn't take + // much time so spinning is ok here. + for (;;) { + // Find threads by listing files in "/proc/self/task". + DIR *dirp = ::opendir("/proc/self/task"); + __MRT_ASSERT(dirp != nullptr, "WaitUntilAllThreadsStopped dirp=nullptr"); + for (numOfEntries = 0; numOfEntries <= maxNumEntryOfSingleThreadProc; ++numOfEntries) { + entryPtr = ::readdir(dirp); + if (entryPtr == nullptr) { + break; + } + } + ::closedir(dirp); + + if (numOfEntries == maxNumEntryOfSingleThreadProc) { + return; + } + + // Yield before try again. + std::this_thread::yield(); + } +} + +std::string GetLogDir() { + string appDir = LinkerAPI::Instance().GetAppInfo(); + if (appDir.empty()) { + // must be system server + appDir = "/data/log/maple"; + } else { + appDir = appDir + "/maple"; + } + // Best effort. + mode_t mode = S_IRWXU; + if (mkdir(appDir.c_str(), mode) != 0) { + int myErrno = errno; + errno = 0; + if (myErrno != EEXIST) { + LOG(ERROR) << "Failed to create directory for GCLog." << maple::endl; + LOG(ERROR) << " Directory: " << appDir << maple::endl; + LOG(ERROR) << " Error code: " << strerror(myErrno) << maple::endl; + } + } + + return appDir; +} +} // namespace util +} // namespace maplert diff --git a/src/mrt/compiler-rt/src/muid.cpp b/src/mrt/compiler-rt/src/muid.cpp new file mode 100644 index 0000000000..93afdab671 --- /dev/null +++ b/src/mrt/compiler-rt/src/muid.cpp @@ -0,0 +1,305 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +/* + * We generate muid-hashcode according to the MD5 Message-Digest Algorithm here. + */ +#include "muid.h" +#include +#include "securec.h" + +/* + * Basic MUID functions. + */ +#define F(x, y, z) (((x) & (y)) | ((~(x)) & (z))) +#define G(x, y, z) (((x) & (z)) | ((y) & (~(z)))) +#define H(x, y, z) ((x) ^ (y) ^ (z)) +#define I(x, y, z) ((y) ^ ((x) | (~(z)))) + +/* + * Muid Transformation function. + */ +#define TRANS(f, a, b, c, d, x, t, s) \ + (a) += f((b), (c), (d)) + (x) + (t); \ + (a) = (((a) << (s)) | (((a) & 0xffffffff) >> (32 - (s)))); \ + (a) += (b); + +/* + * Divide the whole input data into sevral groups with kGroupSizebit each and decode them. + */ +#if defined(__i386__) || defined(__x86_64__) || defined(__vax__) +#define DECODE(n, input, output) \ + ((output)[(n)] = \ + (*reinterpret_cast(const_cast(&input[(n) * 4])))) +#else +#define DECODE(n, input, output) \ + ((output)[(n)] = \ + (unsigned int)input[(n) * 4] | \ + ((unsigned int)input[(n) * 4 + 1] << 8) | \ + ((unsigned int)input[(n) * 4 + 2] << 16) | \ + ((unsigned int)input[(n) * 4 + 3] << 24)) +#endif + +/* + * Encode function. + */ +#define ENCODE(dst, src) \ + (dst)[0] = (unsigned char)(src); \ + (dst)[1] = (unsigned char)((src) >> 8); \ + (dst)[2] = (unsigned char)((src) >> 16); \ + (dst)[3] = (unsigned char)((src) >> 24); + +/* + * Body of transformation. + */ +static const unsigned char *MuidTransform(MuidContext &status, const unsigned char &data, uint64_t count) { + unsigned int a, b, c, d; + auto *result = &data; + + while (count--) { + for (unsigned int i = 0; i < kBlockLength; i++) { + DECODE(i, result, status.block); + } + + a = status.a; + b = status.b; + c = status.c; + d = status.d; + + /* Round 1 */ + TRANS(F, a, b, c, d, status.block[0], 0xd76aa478, 7) + TRANS(F, d, a, b, c, status.block[1], 0xe8c7b756, 12) + TRANS(F, c, d, a, b, status.block[2], 0x242070db, 17) + TRANS(F, b, c, d, a, status.block[3], 0xc1bdceee, 22) + TRANS(F, a, b, c, d, status.block[4], 0xf57c0faf, 7) + TRANS(F, d, a, b, c, status.block[5], 0x4787c62a, 12) + TRANS(F, c, d, a, b, status.block[6], 0xa8304613, 17) + TRANS(F, b, c, d, a, status.block[7], 0xfd469501, 22) + TRANS(F, a, b, c, d, status.block[8], 0x698098d8, 7) + TRANS(F, d, a, b, c, status.block[9], 0x8b44f7af, 12) + TRANS(F, c, d, a, b, status.block[10], 0xffff5bb1, 17) + TRANS(F, b, c, d, a, status.block[11], 0x895cd7be, 22) + TRANS(F, a, b, c, d, status.block[12], 0x6b901122, 7) + TRANS(F, d, a, b, c, status.block[13], 0xfd987193, 12) + TRANS(F, c, d, a, b, status.block[14], 0xa679438e, 17) + TRANS(F, b, c, d, a, status.block[15], 0x49b40821, 22) + + /* Round 2 */ + TRANS(G, a, b, c, d, status.block[1], 0xf61e2562, 5) + TRANS(G, d, a, b, c, status.block[6], 0xc040b340, 9) + TRANS(G, c, d, a, b, status.block[11], 0x265e5a51, 14) + TRANS(G, b, c, d, a, status.block[0], 0xe9b6c7aa, 20) + TRANS(G, a, b, c, d, status.block[5], 0xd62f105d, 5) + TRANS(G, d, a, b, c, status.block[10], 0x02441453, 9) + TRANS(G, c, d, a, b, status.block[15], 0xd8a1e681, 14) + TRANS(G, b, c, d, a, status.block[4], 0xe7d3fbc8, 20) + TRANS(G, a, b, c, d, status.block[9], 0x21e1cde6, 5) + TRANS(G, d, a, b, c, status.block[14], 0xc33707d6, 9) + TRANS(G, c, d, a, b, status.block[3], 0xf4d50d87, 14) + TRANS(G, b, c, d, a, status.block[8], 0x455a14ed, 20) + TRANS(G, a, b, c, d, status.block[13], 0xa9e3e905, 5) + TRANS(G, d, a, b, c, status.block[2], 0xfcefa3f8, 9) + TRANS(G, c, d, a, b, status.block[7], 0x676f02d9, 14) + TRANS(G, b, c, d, a, status.block[12], 0x8d2a4c8a, 20) + + /* Round 3 */ + TRANS(H, a, b, c, d, status.block[5], 0xfffa3942, 4) + TRANS(H, d, a, b, c, status.block[8], 0x8771f681, 11) + TRANS(H, c, d, a, b, status.block[11], 0x6d9d6122, 16) + TRANS(H, b, c, d, a, status.block[14], 0xfde5380c, 23) + TRANS(H, a, b, c, d, status.block[1], 0xa4beea44, 4) + TRANS(H, d, a, b, c, status.block[4], 0x4bdecfa9, 11) + TRANS(H, c, d, a, b, status.block[7], 0xf6bb4b60, 16) + TRANS(H, b, c, d, a, status.block[10], 0xbebfbc70, 23) + TRANS(H, a, b, c, d, status.block[13], 0x289b7ec6, 4) + TRANS(H, d, a, b, c, status.block[0], 0xeaa127fa, 11) + TRANS(H, c, d, a, b, status.block[3], 0xd4ef3085, 16) + TRANS(H, b, c, d, a, status.block[6], 0x04881d05, 23) + TRANS(H, a, b, c, d, status.block[9], 0xd9d4d039, 4) + TRANS(H, d, a, b, c, status.block[12], 0xe6db99e5, 11) + TRANS(H, c, d, a, b, status.block[15], 0x1fa27cf8, 16) + TRANS(H, b, c, d, a, status.block[2], 0xc4ac5665, 23) + + /* Round 4 */ + TRANS(I, a, b, c, d, status.block[0], 0xf4292244, 6) + TRANS(I, d, a, b, c, status.block[7], 0x432aff97, 10) + TRANS(I, c, d, a, b, status.block[14], 0xab9423a7, 15) + TRANS(I, b, c, d, a, status.block[5], 0xfc93a039, 21) + TRANS(I, a, b, c, d, status.block[12], 0x655b59c3, 6) + TRANS(I, d, a, b, c, status.block[3], 0x8f0ccc92, 10) + TRANS(I, c, d, a, b, status.block[10], 0xffeff47d, 15) + TRANS(I, b, c, d, a, status.block[1], 0x85845dd1, 21) + TRANS(I, a, b, c, d, status.block[8], 0x6fa87e4f, 6) + TRANS(I, d, a, b, c, status.block[15], 0xfe2ce6e0, 10) + TRANS(I, c, d, a, b, status.block[6], 0xa3014314, 15) + TRANS(I, b, c, d, a, status.block[13], 0x4e0811a1, 21) + TRANS(I, a, b, c, d, status.block[4], 0xf7537e82, 6) + TRANS(I, d, a, b, c, status.block[11], 0xbd3af235, 10) + TRANS(I, c, d, a, b, status.block[2], 0x2ad7d2bb, 15) + TRANS(I, b, c, d, a, status.block[9], 0xeb86d391, 21) + + status.a += a; + status.b += b; + status.c += c; + status.d += d; + + result += kGroupSize; + } + + return result; +} + +/* + * Initialize constants here. + */ +void MuidInit(MuidContext &status) { + status.a = 0x67452301; + status.b = 0xefcdab89; + status.c = 0x98badcfe; + status.d = 0x10325476; + + status.count[0] = 0; + status.count[1] = 0; +} + +/* + * Decoding part(byte to unsigned int). + */ +void MuidDecode(MuidContext &status, const unsigned char &data, size_t size) { + unsigned int tmp = status.count[0]; + status.count[0] = (tmp + size) & 0x1fffffff; + if (status.count[0] < tmp) { + status.count[1]++; + } + uint32_t higherBits = static_cast(size) >> kShiftAmount; + status.count[1] += higherBits * kByteLength; + + size_t idx = tmp & kBitMask; + size_t remain = kGroupSize - idx; + auto *position = &data; + + if (idx != 0) { + if (size < remain) { + if (memcpy_s(&status.buffer[idx], kGroupSize, &data, size) != EOK) { + return; + } + return; + } + + if (memcpy_s(&status.buffer[idx], kGroupSize, &data, remain) != EOK) { + return; + } + (void)MuidTransform(status, *status.buffer, 1); + + size -= remain; + position += remain; + } + + if (size >= kGroupSize) { + position = MuidTransform(status, *position, size / kGroupSize); + size &= kBitMask; + } + + if (memcpy_s(status.buffer, kGroupSize, position, size) != EOK) { + return; + } +} + +/* + * Encoding part(unsigned int to byte). + */ +template +void FullEncode(T &result, MuidContext &status) { + size_t idx = status.count[0] & kBitMask; + status.buffer[idx++] = 0x80; + + size_t remain = kGroupSize - idx; + + if (remain < kByteLength) { + if (memset_s(&status.buffer[idx], kGroupSize, 0, remain) != EOK) { + return; + } + (void)MuidTransform(status, *status.buffer, 1); + idx = 0; + remain = kGroupSize; + } + + if (memset_s(&status.buffer[idx], kGroupSize, 0, remain - kByteLength) != EOK) { + return; + } + status.count[0] *= kByteLength; + const unsigned int indexOfLastEight = 56; + const unsigned int indexOfLastFour = 60; + ENCODE(&status.buffer[indexOfLastEight], status.count[0]) + ENCODE(&status.buffer[indexOfLastFour], status.count[1]) + + (void)MuidTransform(status, *status.buffer, 1); + ENCODE(&result[0], status.a) + ENCODE(&result[4], status.b) +} + +void MuidEncode(unsigned char (&result)[kDigestShortHashLength], MuidContext &status) { + FullEncode(result, status); + if (memset_s(&status, sizeof(status), 0, sizeof(status)) != EOK) { + return; + } +} + +void MuidEncode(unsigned char (&result)[kDigestHashLength], MuidContext &status, bool use64Bit) { + FullEncode(result, status); + if (!use64Bit) { + ENCODE(&result[8], status.c) + ENCODE(&result[12], status.d) + } + if (memset_s(&status, sizeof(status), 0, sizeof(status)) != EOK) { + return; + } +} + +/* + * The entrance functions. + */ +void GetMUIDHash(const unsigned char &data, size_t size, MUID &muid) { + MuidContext status; + MuidInit(status); + MuidDecode(status, data, size); + MuidEncode(muid.data.bytes, status); +} + +DigestHash GetDigestHash(const unsigned char &bytes, uint32_t len) { + DigestHash digestHash; + MuidContext digestContext; + + digestHash.data.first = 0; + digestHash.data.second = 0; + + MuidInit(digestContext); + MuidDecode(digestContext, bytes, len); + MuidEncode(digestHash.bytes, digestContext); + + return digestHash; +} + +MUID GetMUID(const std::string &symbolName, bool forSystem) { + MUID muid; + auto *data = reinterpret_cast(symbolName.c_str()); + GetMUIDHash(*data, symbolName.length(), muid); + if (forSystem) { + muid.SetSystemNameSpace(); + } else { + muid.SetApkNameSpace(); + } + return muid; +} \ No newline at end of file diff --git a/src/mrt/compiler-rt/src/mutator_list.cpp b/src/mrt/compiler-rt/src/mutator_list.cpp new file mode 100644 index 0000000000..42d5d6a1b0 --- /dev/null +++ b/src/mrt/compiler-rt/src/mutator_list.cpp @@ -0,0 +1,27 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mutator_list.h" + +namespace maplert { +// The singleton instance of MutatorList. +ImmortalWrapper MutatorList::instance; + +void MutatorList::DebugShowCurrentMutators() { + (void)fprintf(stderr, "Showing current mutators. size=%zu", mutatorList.size()); + for (auto &mutator : mutatorList) { + mutator->DebugShow(); + } +} +} diff --git a/src/mrt/compiler-rt/src/namemangler.cpp b/src/mrt/compiler-rt/src/namemangler.cpp new file mode 100644 index 0000000000..425db54204 --- /dev/null +++ b/src/mrt/compiler-rt/src/namemangler.cpp @@ -0,0 +1,626 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "namemangler.h" +#include +#include +#include + +namespace namemangler { +#ifdef __MRT_DEBUG +#define ASSERT(f) assert(f) +#else +#define ASSERT(f) ((void)0) +#endif + +const int kLocalCodebufSize = 1024; +const int kMaxCodecbufSize = (1 << 16); // Java spec support a max name length of 64K. + +#define GETHEXCHAR(n) static_cast((n) < 10 ? (n) + '0' : (n) - 10 + 'a') +#define GETHEXCHARU(n) static_cast((n) < 10 ? (n) + '0' : (n) - 10 + 'A') + +bool doCompression = false; + +// Store a mapping between full string and its compressed version +// More frequent and specific strings go before general ones, +// e.g. Ljava_2Flang_2FObject_3B goes before Ljava_2Flang_2F +// +using StringMap = std::map; + +const StringMap kInternalMangleTable = { + { "Ljava_2Flang_2FObject_3B", "L0_3B" }, + { "Ljava_2Flang_2FClass_3B", "L1_3B" }, + { "Ljava_2Flang_2FString_3B", "L2_3B" } +}; + +// This mapping is mainly used for compressing annotation strings +const StringMap kOriginalMangleTable = { + { "Ljava/lang/Object", "L0" }, + { "Ljava/lang/Class", "L1" }, + { "Ljava/lang/String", "L2" } +}; + +// The returned buffer needs to be explicitly freed +static inline char *AllocCodecBuf(size_t maxLen) { + if (maxLen == 0) { + return nullptr; + } + // each char may have 2 more char, so give out the max space buffer + return reinterpret_cast(malloc((maxLen <= kLocalCodebufSize) ? 3 * maxLen : 3 * kMaxCodecbufSize)); +} + +static inline void FreeCodecBuf(char *buf) { + free(buf); +} + +static std::string CompressName(std::string &name, const StringMap &mapping = kInternalMangleTable) { + for (auto &entry : mapping) { + if (name.find(entry.first) != name.npos) { + name = std::regex_replace(name, std::regex(entry.first), entry.second); + } + } + return name; +} + +static std::string DecompressName(std::string &name, const StringMap &mapping = kInternalMangleTable) { + for (auto &entry : mapping) { + if (name.find(entry.second) != name.npos) { + name = std::regex_replace(name, std::regex(entry.second), entry.first); + } + } + return name; +} + +std::string GetInternalNameLiteral(std::string name) { + return (doCompression ? CompressName(name) : name); +} + +std::string GetOriginalNameLiteral(std::string name) { + return (doCompression ? CompressName(name, kOriginalMangleTable) : name); +} + +std::string EncodeName(const std::string &name) { + // name is guaranteed to be null-terminated + size_t nameLen = name.length(); + nameLen = nameLen > kMaxCodecbufSize ? kMaxCodecbufSize : nameLen; + char *buf = AllocCodecBuf(nameLen); + if (buf == nullptr) { + return std::string(name); + } + + size_t pos = 0; + size_t i = 0; + std::string str(name); + std::u16string str16; + while (i < nameLen) { + unsigned char c = name[i]; + if (c == '_') { + buf[pos++] = '_'; + buf[pos++] = '_'; + } else if (c == '[') { + buf[pos++] = 'A'; + } else if (isalnum(c)) { + buf[pos++] = c; + } else if (c <= 0x7F) { + // _XX: '_' followed by ascii code in hex + if (c == '.') { + c = '/'; // use / in package name + } + buf[pos++] = '_'; + unsigned char n = c >> 4; // get the high 4 bit and calculate + buf[pos++] = GETHEXCHARU(n); + n = c - static_cast(n << 4); // revert + buf[pos++] = GETHEXCHARU(n); + } else { + str16.clear(); + // process one 16-bit char at a time + unsigned int n = UTF8ToUTF16(str16, str.substr(i), 1, false); + buf[pos++] = '_'; + if ((n >> 16) == 1) { + unsigned short m = str16[0]; + buf[pos++] = 'u'; + buf[pos++] = GETHEXCHAR((m & 0xF000) >> 12); + buf[pos++] = GETHEXCHAR((m & 0x0F00) >> 8); + buf[pos++] = GETHEXCHAR((m & 0x00F0) >> 4); + buf[pos++] = GETHEXCHAR(m & 0x000F); + } else { + unsigned short m = str16[0]; + buf[pos++] = 'U'; + buf[pos++] = GETHEXCHAR((m & 0xF000) >> 12); + buf[pos++] = GETHEXCHAR((m & 0x0F00) >> 8); + buf[pos++] = GETHEXCHAR((m & 0x00F0) >> 4); + buf[pos++] = GETHEXCHAR(m & 0x000F); + m = str16[1]; + buf[pos++] = GETHEXCHAR((m & 0xF000) >> 12); + buf[pos++] = GETHEXCHAR((m & 0x0F00) >> 8); + buf[pos++] = GETHEXCHAR((m & 0x00F0) >> 4); + buf[pos++] = GETHEXCHAR(m & 0x000F); + } + i += int32_t(n & 0xFFFF) - 1; + } + i++; + } + + buf[pos] = '\0'; + std::string newName = std::string(buf, pos); + FreeCodecBuf(buf); + if (doCompression) { + newName = CompressName(newName); + } + return newName; +} + +static inline bool UpdatePrimType(bool primType, int splitNo, uint32_t ch) { + if (ch == 'L') { + return false; + } + + if (((ch == ';') || (ch == '(') || (ch == ')')) && (splitNo > 1)) { + return true; + } + + return primType; +} + +const int kNumLimit = 10; +const int kCodeOffset3 = 12; +const int kCodeOffset2 = 8; +const int kCodeOffset = 4; +const int kJavaStrLength = 5; + +std::string DecodeName(const std::string &name) { + if (name.find(';') != std::string::npos) { // no need Decoding a non-encoded string + return name; + } + std::string decompressedName; + const char *namePtr = nullptr; + size_t nameLen; + + if (doCompression) { + decompressedName = name; + decompressedName = DecompressName(decompressedName); + namePtr = decompressedName.c_str(); + nameLen = decompressedName.length(); + } + else { + namePtr = name.c_str(); + nameLen = name.length(); + } + + // Demangled name is supposed to be shorter. No buffer overflow issue here. + std::string newName(nameLen, '\0'); + + bool primType = true; + int splitNo = 0; // split: class 0 | method 1 | signature 2 + size_t pos = 0; + std::string str; + std::u16string str16; + for (size_t i = 0; i < nameLen;) { + unsigned char c = namePtr[i++]; + if (c == '_') { // _XX: '_' followed by ascii code in hex + if (i >= nameLen) { + break; + } + if (namePtr[i] == '_') { + newName[pos++] = namePtr[i++]; + } else if (namePtr[i] == 'u') { + str.clear(); + str16.clear(); + i++; + c = namePtr[i++]; + uint8_t b1 = (c <= '9') ? c - '0' : c - 'a' + kNumLimit; + c = namePtr[i++]; + uint8_t b2 = (c <= '9') ? c - '0' : c - 'a' + kNumLimit; + c = namePtr[i++]; + uint8_t b3 = (c <= '9') ? c - '0' : c - 'a' + kNumLimit; + c = namePtr[i++]; + uint8_t b4 = (c <= '9') ? c - '0' : c - 'a' + kNumLimit; + uint32_t codepoint = (b1 << kCodeOffset3) | (b2 << kCodeOffset2) | (b3 << kCodeOffset) | b4; + str16 += (char16_t)codepoint; + unsigned int n = UTF16ToUTF8(str, str16, 1, false); + if ((n >> 16) == 2) { // the count of str equal 2 to 4, use array to save the utf8 results + newName[pos++] = str[0]; + newName[pos++] = str[1]; + } else if ((n >> 16) == 3) { + newName[pos++] = str[0]; + newName[pos++] = str[1]; + newName[pos++] = str[2]; + } else if ((n >> 16) == 4) { + newName[pos++] = str[0]; + newName[pos++] = str[1]; + newName[pos++] = str[2]; + newName[pos++] = str[3]; + } + } else { + c = namePtr[i++]; + unsigned int v = (c <= '9') ? c - '0' : c - 'A' + kNumLimit; + unsigned int asc = v << kCodeOffset; + if (i >= nameLen) { + break; + } + c = namePtr[i++]; + v = (c <= '9') ? c - '0' : c - 'A' + kNumLimit; + asc += v; + + newName[pos++] = static_cast(asc); + + if (asc == '|') { + splitNo++; + } + + primType = UpdatePrimType(primType, splitNo, asc); + } + } else { + if (splitNo < 2) { // split: class 0 | method 1 | signature 2 + newName[pos++] = c; + continue; + } + + primType = UpdatePrimType(primType, splitNo, c); + if (primType) { + newName[pos++] = (c == 'A') ? '[' : c; + } else { + newName[pos++] = c; + } + } + } + + newName.resize(pos); + return newName; +} + +// input: maple name +// output: Ljava/lang/Object; [Ljava/lang/Object; +void DecodeMapleNameToJavaDescriptor(const std::string &nameIn, std::string &nameOut) { + nameOut = DecodeName(nameIn); + if (nameOut[0] == 'A') { + int i = 0; + while (nameOut[i] == 'A') { + nameOut[i++] = '['; + } + } +} + +// convert maple name to java name +// http://docs.oracle.com/javase/8/docs/technotes/guides/jni/spec/design.html#resolving_native_method_names +std::string NativeJavaName(const std::string &name, bool overLoaded) { + // Decompress name first because the generated native function name needs + // to follow certain spec, not something maple can control. + std::string decompressedName(name); + if (doCompression) { + decompressedName = DecompressName(decompressedName); + } + + unsigned int nameLen = static_cast(decompressedName.length()) + kJavaStrLength; + std::string newName = "Java_"; + unsigned int i = 0; + + // leading A's are array + while (i < nameLen && name[i] == 'A') { + newName += "_3"; + i++; + } + + bool isProto = false; // class names in prototype have 'L' and ';' + bool isFuncname = false; + bool isTypename = false; + while (i < nameLen) { + char c = decompressedName[i]; + if (c == '_') { + i++; + // UTF16 unicode + if (decompressedName[i] == 'u') { + newName += "_0"; + i++; + } else if (decompressedName[i] == '_') { + newName += "_1"; + i++; + } else { + // _XX: '_' followed by ascii code in hex + c = decompressedName[i++]; + unsigned char v = (c <= '9') ? c - '0' : c - 'A' + kNumLimit; + unsigned char asc = v << kCodeOffset; + c = decompressedName[i++]; + v = (c <= '9') ? c - '0' : c - 'A' + kNumLimit; + asc += v; + if (asc == '/') { + newName += "_"; + } else if (asc == '|' && !isFuncname) { + newName += "_"; + isFuncname = true; + } else if (asc == '|' && isFuncname) { + if (!overLoaded) { + break; + } + newName += "_"; + isFuncname = false; + } else if (asc == '(') { + newName += "_"; + isProto = true; + } else if (asc == ')') { + break; + } else if (asc == ';' && !isFuncname) { + if (isProto) { + newName += "_2"; + } + isTypename = false; + } else if (asc == '$') { + newName += "_00024"; + } else if (asc == '-') { + newName += "_0002d"; + } else { + printf("name = %s\n", decompressedName.c_str()); + printf("c = %c\n", asc); + ASSERT(false && "more cases in NativeJavaName"); + } + } + } else { + if (c == 'L' && !isFuncname && !isTypename) { + if (isProto) { + newName += c; + } + isTypename = true; + i++; + } else if (c == 'A' && !isTypename && !isFuncname) { + while (name[i] == 'A') { + newName += "_3"; + i++; + } + } else { + newName += c; + i++; + } + } + } + return newName; +} + +static uint16_t ChangeEndian16(uint16_t u16) { + return ((u16 & 0xFF00) >> kCodeOffset2) | ((u16 & 0xFF) << kCodeOffset2); +} + +/* UTF8 + * U+0000 - U+007F 0xxxxxxx + * U+0080 - U+07FF 110xxxxx 10xxxxxx + * U+0800 - U+FFFF 1110xxxx 10xxxxxx 10xxxxxx + * U+10000- U+10FFFF 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + * + * UTF16 + * U+0000 - U+D7FF codePoint + * U+E000 - U+FFFF codePoint + * U+10000- U+10FFFF XXXX YYYY + * code = codePoint - 0x010000, ie, 20-bit number in the range 0x000000..0x0FFFFF + * XXXX: top 10 bits of code + 0xD800: 0xD800..0xDBFF + * YYYY: low 10 bits of code + 0xDC00: 0xDC00..0xDFFF + * + * convert upto num UTF8 elements + * return two 16-bit values: return_number_of_elements | consumed_input_number_of_elements + */ +const int kCodepointOffset1 = 6; // U+0080 - U+07FF 110xxxxx 10xxxxxx +const int kCodepointOffset2 = 12; // U+0800 - U+FFFF 1110xxxx 10xxxxxx 10xxxxxx +const int kCodepointOffset3 = 18; // U+10000- U+10FFFF 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx +const int kCountOffset = 16; +const int kCodeAfterMinusOffset = 10; // codePoint equals itself minus 0x10000 + +unsigned UTF16ToUTF8(std::string &str, const std::u16string &str16, unsigned short num, bool isBigEndian) { + uint32_t codePoint = 0; + uint32_t i = 0; + unsigned short count = 0; + unsigned short retNum = 0; + while (i < str16.length()) { + if (isBigEndian || num == 1) { + codePoint = str16[i++]; + } else { + codePoint = ChangeEndian16(str16[i++]); + } + if (codePoint > 0xFFFF) { + codePoint &= 0x3FF; + codePoint <<= kNumLimit; + if (isBigEndian) { + codePoint += str16[i++] & 0x3FF; + } else { + codePoint += ChangeEndian16(str16[i++]) & 0x3FF; + } + } + if (codePoint <= 0x7F) { + str += static_cast(codePoint); + retNum += 1; // one UTF8 char + } else if (codePoint <= 0x7FF) { + str += static_cast(0xC0 + (codePoint >> kCodepointOffset1)); + str += static_cast(0x80 + (codePoint & 0x3F)); + retNum += 2; // two UTF8 chars + } else if (codePoint <= 0xFFFF) { + str += static_cast(0xE0 + ((codePoint >> kCodepointOffset2) & 0xF)); + str += static_cast(0x80 + ((codePoint >> kCodepointOffset1) & 0x3F)); + str += static_cast(0x80 + (codePoint & 0x3F)); + retNum += 3; // three UTF8 chars + } else { + str += static_cast(0xF0 + ((codePoint >> kCodepointOffset3) & 0x7)); + str += static_cast(0x80 + ((codePoint >> kCodepointOffset2) & 0x3F)); + str += static_cast(0x80 + ((codePoint >> kCodepointOffset1) & 0x3F)); + str += static_cast(0x80 + (codePoint & 0x3F)); + retNum += 4; // four UTF8 chars + } + count++; + if (num == count) { + return ((static_cast(retNum)) << kCountOffset) | static_cast(i); + } + } + return i; +} + +bool NeedConvertUTF16(const std::string &str8) { + uint32_t a = 0; + size_t i = 0; + size_t size = str8.length(); + while (i < size) { + a = static_cast(str8[i++]); + constexpr uint8_t maxValidAscii = 0x7F; + if (a > maxValidAscii) { + return true; + } + } + return false; +} + +uint32_t GetCodePoint(const std::string &str8, uint32_t &i) { + uint32_t b; + uint32_t c; + uint32_t d; + uint32_t codePoint = 0; + uint32_t a = static_cast(str8[i++]); + if (a <= 0x7F) { // 0... + codePoint = a; + } else if (a >= 0xF0) { // 11110... + b = str8[i++]; + c = str8[i++]; + d = str8[i++]; + codePoint = ((a & 0x7) << kCodepointOffset3) | ((b & 0x3F) << kCodepointOffset2) | + ((c & 0x3F) << kCodepointOffset1) | (d & 0x3F); + } else if (a >= 0xE0) { // 1110... + b = str8[i++]; + c = str8[i++]; + codePoint = ((a & 0xF) << kCodepointOffset2) | ((b & 0x3F) << kCodepointOffset1) | (c & 0x3F); + } else if (a >= 0xC0) { // 110... + b = str8[i++]; + codePoint = ((a & 0x1F) << kCodepointOffset1) | (b & 0x3F); + } else { + ASSERT(false && "invalid UTF-8"); + } + return codePoint; +} + +// convert upto num UTF16 elements +// two 16-bit values: return_number_of_elements | consumed_input_number_of_elements +unsigned UTF8ToUTF16(std::u16string &str16, const std::string &str8, unsigned short num, bool isBigEndian) { + uint32_t i = 0; + unsigned short count = 0; + unsigned short retNum = 0; + while (i < str8.length()) { + uint32_t codePoint = GetCodePoint(str8, i); + if (codePoint <= 0xFFFF) { + if (isBigEndian || num == 1) { + str16 += static_cast(codePoint); + } else { + str16 += static_cast(ChangeEndian16(static_cast(codePoint))); + } + retNum += 1; // one utf16 + } else { + codePoint -= 0x10000; + if (isBigEndian || num == 1) { + str16 += static_cast((codePoint >> kCodeAfterMinusOffset) | 0xD800); + str16 += static_cast((codePoint & 0x3FF) | 0xDC00); + } else { + str16 += static_cast( + ChangeEndian16(static_cast((codePoint >> kCodeAfterMinusOffset) | 0xD800))); + str16 += static_cast(ChangeEndian16((codePoint & 0x3FF) | 0xDC00)); + } + retNum += 2; // two utf16 + } + count++; + // only convert num elmements + if (num == count) { + return (static_cast(retNum) << kCountOffset) | static_cast(i); + } + } + return i; +} + +const int kGreybackOffset = 7; +void GetUnsignedLeb128Encode(std::vector &dest, uint32_t value) { + bool done = false; + do { + uint8_t byte = value & 0x7f; + value >>= kGreybackOffset; + done = (value == 0); + if (!done) { + byte |= 0x80; + } + dest.push_back(byte); + } while (!done); +} + +uint32_t GetUnsignedLeb128Decode(const uint8_t **data) { + ASSERT(data != nullptr && "data in GetUnsignedLeb128Decode() is nullptr"); + const uint8_t *ptr = *data; + uint32_t result = 0; + int shift = 0; + uint8_t byte = 0; + while (true) { + byte = *(ptr++); + result |= (byte & 0x7f) << shift; + if ((byte & 0x80) == 0) { + break; + } + shift += kGreybackOffset; + } + *data = ptr; + return result; +} + +uint64_t GetLEB128Encode(int64_t val, bool isUnsigned) { + uint64_t res = 0; + uint8_t byte = 0; + uint8_t count = 0; + bool done = false; + do { + byte = static_cast(val) & 0x7f; + val >>= kGreybackOffset; // intended signed shift: block codedex here + done = (isUnsigned ? val == 0 : (val == 0 || val == -1)); + if (!done) { + byte |= 0x80; + } + res |= (static_cast(byte) << (count++ << 3)); // each byte need 8 bit + } while (!done); + return res; +} + +uint64_t GetUleb128Encode(uint64_t val) { + return GetLEB128Encode(int64_t(val), true); +} + +uint64_t GetSleb128Encode(int64_t val) { + return GetLEB128Encode(val, false); +} + +uint64_t GetUleb128Decode(uint64_t val) { + return val; +} + +int64_t GetSleb128Decode(uint64_t val) { + return val; +} + +size_t GetUleb128Size(uint64_t v) { + ASSERT(v && "if v == 0, __builtin_clzll(v) is not defined"); + size_t clz = static_cast(__builtin_clzll(v)); + // num of 7-bit groups + return size_t((64 - clz + 6) / 7); +} + +size_t GetSleb128Size(int32_t v) { + size_t size = 0; + int rem = v >> kGreybackOffset; + bool hasMore = true; + int end = ((v >= 0) ? 0 : -1); + + while (hasMore) { + hasMore = (rem != end) || ((rem & 1) != ((v >> 6) & 1)); // judege whether has More valid rem + size++; + v = rem; + rem >>= kGreybackOffset; // intended signed shift: block codedex here + } + return size; +} +} // namespace namemangler diff --git a/src/mrt/compiler-rt/src/panic.cpp b/src/mrt/compiler-rt/src/panic.cpp new file mode 100644 index 0000000000..ac0d834820 --- /dev/null +++ b/src/mrt/compiler-rt/src/panic.cpp @@ -0,0 +1,38 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "panic.h" + +asm(" .text\n" + " .align 2\n" + " .globl abort_saferegister\n" + " .type abort_saferegister, %function\n" + "abort_saferegister:\n" +#if defined(__aarch64__) + " brk #1\n" + " ret\n" +#endif + " .size abort_saferegister, .-abort_saferegister"); + +namespace maplert { +void MRT_Panic() { + abort(); +} + +#if __MRT_DEBUG +void __MRT_AssertBreakPoint() { + // hook for debug +} +#endif +} diff --git a/src/mrt/compiler-rt/src/profile.cpp b/src/mrt/compiler-rt/src/profile.cpp new file mode 100644 index 0000000000..915f92d7d2 --- /dev/null +++ b/src/mrt/compiler-rt/src/profile.cpp @@ -0,0 +1,208 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "profile.h" + +#include +#include +#include +#include +#include +#include + +#include "mm_config.h" +#include "cpphelper.h" +#include "yieldpoint.h" +#include "chosen.h" +#include "collector/stats.h" +#include "collector/native_gc.h" +#include "mstring_inline.h" + +namespace maplert { +struct InstanceInfo { + size_t bytes; + size_t count; + size_t size; + std::string name; + + InstanceInfo() { + bytes = 0; + count = 0; + size = 0; + } +}; + +struct FuncInfo { + uint32_t enterCount; + std::string soname; + std::string funcName; + uint64_t funcaddr; + explicit FuncInfo(std::string sonamein, std::string funcname, uint64_t faddr) { + soname = std::move(sonamein); + funcName = std::move(funcname); + funcaddr = std::move(faddr); + enterCount = 1; + } +}; + +static std::unordered_map recordFunc; +static std::mutex recordMtx; + +static std::unordered_map recordStaticField; +static std::mutex recordStaticFieldMtx; + +void DumpRCAndGCPerformanceInfo(std::ostream &os) { + stats::GCStats &curStats = *(stats::gcStats); + + os << "Registered native bytes allocated: " << NativeGCStats::Instance().GetNativeBytes() << std::endl; + + os << "Maximum stop-the-world time:\t" << curStats.MaxSTWNanos() << std::endl; + os << "GC triggered:\t" << curStats.NumGCTriggered() << std::endl; + os << "Average memory leak:\t" << curStats.AverageMemoryLeak() << std::endl; + os << "Total memory leak:\t" << curStats.TotalMemoryLeak() << std::endl; + os << "Number of heap allocation anomalies:\t" << curStats.NumAllocAnomalies() << std::endl; + os << "Number of RC anomalies:\t" << curStats.NumRCAnomalies() << std::endl; + os << "Total string num in intern pool:\t" << ConstStringPoolNum(false) << std::endl; + os << "Total string size in intern pool:\t" << ConstStringPoolSize(false) << std::endl; + os << "Total string num in literal pool:\t" << ConstStringPoolNum(true) << std::endl; + os << "Total string size in literal pool:\t" << ConstStringPoolSize(true) << std::endl; + + size_t totalObjBytes = 0; + + // klass -> statistics map. + std::map instanceInfos; + + { + LOG(INFO) << "Stopping the world for usage-by-class statistics" << maple::endl; + + // Stop the world for statistics. + ScopedStopTheWorld stw; + + // Now all threads are in safe region. + // We enumerate all objects + (void)(*theAllocator).ForEachObj([&instanceInfos, &totalObjBytes](address_t obj) { + MObject *mObj = reinterpret_cast(obj); + MClass *klass = mObj->GetClass(); + size_t bytes = mObj->GetSize(); + + InstanceInfo &info = instanceInfos[klass]; // Creates element if not found. + info.bytes += bytes; + info.count += 1; + + // We leave the InstanceInfo::name field uninitialized here, for performance reason. + totalObjBytes += bytes; + }, true); + } // The world starts here. + + os << "Total object size:\t" << totalObjBytes << std::endl; + + // Copy into a vector for sorting. + std::vector vec; + for (auto &kv : instanceInfos) { + auto &klass = kv.first; + InstanceInfo &info = kv.second; + info.size = klass->GetObjectSize(); + vec.push_back(info); + klass->GetBinaryName(vec.back().name); + } + + // Sort by bytes, count and name. + std::sort(vec.begin(), vec.end(), [](InstanceInfo &a, InstanceInfo &b) { + if (a.bytes != b.bytes) { + return a.bytes > b.bytes; + } else if (a.count != b.count) { + return a.count > b.count; + } else { + return a.name < b.name; + } + }); + + // Print, use std::setw() to ensure alignment + os << "Heap usage by class:" << std::endl; + os << std::setw(10) << "num" << std::setw(16) << "#instance" << std::setw(16) << "#bytes" << + std::setw(16) << "size" << " class name" << std::endl; + os << "-------------------------------------------------------------------" << std::endl; + size_t rank = 1; + + for (auto &info : vec) { + os << std::setw(10) << rank << std::setw(16) << info.count << std::setw(16) << info.bytes << + std::setw(16) << info.size << " " << info.name << std::endl; + rank++; + } + + os << std::endl; +} + +__attribute__((used)) void RecordMethod(uint64_t faddr, std::string &func, std::string &soname) { + if (func.empty()) { + LOG(INFO) << "func is empty " << faddr << std::endl; + } + std::lock_guard lock(recordMtx); + recordFunc.insert(std::make_pair(faddr, new FuncInfo(soname, func, faddr))); +} + +bool CheckMethodResolved(uint64_t faddr) { + std::lock_guard lock(recordMtx); + auto item = recordFunc.find(faddr); + if (item != recordFunc.end()) { + (item->second)->enterCount++; + return true; + } + return false; +} + +void ClearFuncProfile() { + std::lock_guard lock(recordMtx); + recordFunc.clear(); +} + +void DumpMethodUse(std::ostream &os) { + os << "start record func " << std::endl; + std::vector funsInfo; + // sort by calltimes + for (auto &p : recordFunc) { + if (p.second != nullptr) { + funsInfo.push_back(p.second); + } + } + std::sort(funsInfo.begin(), funsInfo.end(), [](const FuncInfo *a, const FuncInfo *b) { + return a->enterCount < b->enterCount; + }); + + for (auto &p : funsInfo) { +#if RECORD_FUNC_NAME + os << p->soname << "\t" << p->funcName << "\t" << p->enterCount << "\t" << std::hex << + p->funcaddr << std::dec << std::endl; +#else + os << std::hex << "0x" << p->funcaddr << "\t" << p->enterCount << std::dec << std::endl; +#endif + } + os << "end record func " << std::endl; +} + +void RecordStaticField(address_t *addr, const std::string name) { + std::lock_guard lock(recordStaticFieldMtx); + if (recordStaticField.find(addr) == recordStaticField.end()) { + recordStaticField.insert(std::make_pair(addr, name)); + } +} + +void DumpStaticField(std::ostream &os) { + os << "start record static fields " << std::endl; + for (auto &p : recordStaticField) { + os << p.first << " " << p.second << std::endl; + } + os << "end record static fields " << std::endl; +} +} diff --git a/src/mrt/compiler-rt/src/tracer.cpp b/src/mrt/compiler-rt/src/tracer.cpp new file mode 100644 index 0000000000..20e75c455c --- /dev/null +++ b/src/mrt/compiler-rt/src/tracer.cpp @@ -0,0 +1,27 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "tracer.h" + +namespace maplert { +static Tracer *tracer = nullptr; + +void SetTracer(Tracer *t) { + tracer = t; +} + +Tracer *GetTracer() { + return tracer; +} +}; // namespace maplert diff --git a/src/mrt/compiler-rt/src/yieldpoint.cpp b/src/mrt/compiler-rt/src/yieldpoint.cpp new file mode 100644 index 0000000000..1e7e58079d --- /dev/null +++ b/src/mrt/compiler-rt/src/yieldpoint.cpp @@ -0,0 +1,537 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "yieldpoint.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mm_config.h" +#include "mm_utils.h" +#include "mutator_list.h" +#include "chosen.h" +#ifdef __ANDROID__ +#include "collie.h" +#include "android/set_abort_message.h" +#endif + +#ifndef MRT_DEBUG_YIELDPOINT +#define MRT_DEBUG_YIELDPOINT __MRT_DEBUG_COND_TRUE +#endif + +namespace maplert { +// use system memory page size for polling page. +const int kPollingPageSize = 4096; + +// stop the world wait timeout, in milliseconds. +#if MRT_UNIT_TEST +// qemu is slow when doing unit test, so we use much longer timeout. +const int kStopTheWorldTimeoutMs = 20000; +#else +const int kStopTheWorldTimeoutMs = 2000; +#endif + +extern "C" { +// Implemented in assembly, see 'yieldpoint-asm.s'. +// DO NOT call this function directly. +void MRT_YieldpointStub(); + +// Yieldpoint handler will be called when yieldpoint is taken, +// the lastSp is the stack frame pointer of the caller, can +// be used as the begin stack pointer for stack scanning. +// DO NOT call this function directly, +// it will be called by MRT_YieldpointStub(). +void MRT_YieldpointHandler(void *lastSp); + +// Return the saved yieldpoint PC, +// only used by MRT_YieldpointStub(). +void *MRT_GetThreadYieldpointPC() { + return maple::tls::GetTLS(maple::tls::kSlotYieldPointPc); +} + +// Polling page for yeildpoint check. when polling page is set unreadable, +// and we try to load data from polling page, a SIGSEGV signal will raised, +// and function YieldpointSignalHandler() get called. +void *globalPollingPage = MAP_FAILED; + +// Most functions are simple wrappers of methods of global or thread-local +// objects defined in chosen.h/cpp. Read cinterface.cpp for more information. +// +// Gets the polling page address. +void *MRT_GetPollingPage() { + return globalPollingPage; +} + +// Call this if you want insert a yieldpoint in managed code. +void MRT_Yieldpoint() { +#if __aarch64__ + // when yieldpoint is taken, x30 will changed to the PC at the yieldpoint, + // so we tell compiler to save x30 for yieldpoints. + __asm__ volatile ("ldr wzr, [x19]" ::: "x30", "memory"); +#else + // this is the portable version of yieldpoint, but tiny slower. + __attribute__ ((unused)) + volatile register uintptr_t x = *reinterpret_cast(MRT_GetPollingPage()); +#endif +} +} + +// saferegion state instance, include pending mutator count and saferegion count. +SaferegionState SaferegionState::instance { 0 }; + +namespace { +// size of SaferegionState should be size_of(uint64_t) * 3 = 24 bytes. +constexpr size_t kSaferegionStateSize = 24; +static_assert(sizeof(SaferegionState) == kSaferegionStateSize, "SaferegionState incorrect size"); + +std::atomic worldStopped = { false }; + +// mutex to ensure only one thread can doing stop-the-world. +std::recursive_mutex stwMutex; + +// Initializer, create polling page before main() function. +struct YieldpointInitializer { + YieldpointInitializer() { + globalPollingPage = ::mmap(nullptr, kPollingPageSize, PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + __MRT_ASSERT(globalPollingPage != MAP_FAILED, "globalPollingPage != MAP_FAILED"); + LOG2FILE(kLogTypeMix) << "polling_page: " << globalPollingPage << std::endl; + } +} yieldpointInitializer; +} + +// Mark the polling page as unreadable +static inline void SetPollingPageUnreadable() { + if (::mprotect(globalPollingPage, kPollingPageSize, PROT_NONE) != 0) { + LOG(FATAL) << "Could not disable polling page. errno=" << errno; + } +} + +// Mark the polling page as readable +static inline void SetPollingPageReadable() { + if (::mprotect(globalPollingPage, kPollingPageSize, PROT_READ) != 0) { + LOG(FATAL) << "Could not enable polling page. errno=" << errno; + } +} + +void DumpMutatorsListInfo(bool isFatal) { + std::atomic_thread_fence(std::memory_order_acquire); + std::stringstream notInSaferegionTids; + size_t visitedCount = 0; + size_t visitedSaferegion = 0; + int firstNotStoppedTid = -1; + LOG(ERROR) << "MutatorList size : " << MutatorList::Instance().Size() << maple::endl; + MutatorList::Instance().VisitMutators( + [¬InSaferegionTids, &visitedCount, &visitedSaferegion, &firstNotStoppedTid](const Mutator *mut) { + mut->DumpRaw(); + if (!mut->InSaferegion()) { + if (firstNotStoppedTid == -1) { + firstNotStoppedTid = static_cast(mut->GetTid()); + } + notInSaferegionTids << mut->GetTid() << " "; + } else { + ++visitedSaferegion; + } + ++visitedCount; + }); + + SaferegionState state = SaferegionState::Load(); + std::stringstream dumpSaferegionInfo; + dumpSaferegionInfo << " total: " << state.asStruct.pendingCount << + ", current: " << state.asStruct.saferegionCount << + ", visited: " << visitedSaferegion << "/" << visitedCount << + ", not stopped: " << notInSaferegionTids.str(); +#if MRT_UNIT_TEST + // for unit-test only. + LOG(ERROR) << "STW Timeout!" << dumpSaferegionInfo.str() << maple::endl; +#endif + if (isFatal) { +#ifdef __ANDROID__ + if (firstNotStoppedTid != -1) { + std::stringstream errorMessage; + errorMessage << "SaferegionState info:" << dumpSaferegionInfo.str() << maple::endl; + android_set_abort_message(errorMessage.str().c_str()); + int ret = tgkill(getpid(), firstNotStoppedTid, SIGABRT); + if (ret == 0) { + // wait abort signal kill + // abort success, so no need fatal again + constexpr int waitAbortSeconds = 3; + sleep(waitAbortSeconds); + return; + } + } +#endif + LOG(FATAL) << "SaferegionState info:" << dumpSaferegionInfo.str() << maple::endl; + } else { + LOG(ERROR) << "SaferegionState info:" << dumpSaferegionInfo.str() << maple::endl; + } +} + +// init after fork in child. +void YieldpointInitAfterFork() { + // check state after fork in child process. + SaferegionState state = SaferegionState::Load(); + Mutator &mutator = TLMutator(); + bool expected = (MutatorList::Instance().Size() == 1 && state.asStruct.pendingCount == 0 && + state.asStruct.saferegionCount == 1 && mutator.IsActive() && mutator.InSaferegion()); + if (UNLIKELY(!expected)) { + LOG(FATAL) << "Illegal state after fork!" << + " MutatorList:" << MutatorList::Instance().Size() << + " SaferegionState:" << state.asStruct.pendingCount << '/' << state.asStruct.saferegionCount << + " Mutator:" << &mutator << '/' << mutator.IsActive() << '/' << mutator.InSaferegion() << + maple::endl; + } + + // init current mutator after fork in child. + mutator.InitAfterFork(); +} + +// called from registered signal handler, see libjavaeh.cpp. +// uc context of the yieldpoint. +bool YieldpointSignalHandler(int sig, siginfo_t *info, ucontext_t *uc) { + // check if the signal is triggered by yieldpoint. + if (sig != SIGSEGV || info == nullptr || uc == nullptr || info->si_addr != globalPollingPage) { + // not a yieldpoint signal. + return false; + } + + // do not enter yieldpoint if already in saferegion. + if (UNLIKELY(TLMutator().InSaferegion())) { + return true; + } + +#if defined(__aarch64__) + // x29 as FP register. + constexpr int fpRegisterNum = 29; + + // logging for debug + LOG2FILE(kLogTypeMix) << "signal handler, pc: " << uc->uc_mcontext.pc << + ", fp: " << uc->uc_mcontext.regs[fpRegisterNum] << std::endl; + + // Save yieldpoint PC to thread local storage. + // MRT_YieldpointStub() use it as the return address. + StoreTLS(reinterpret_cast(static_cast(uc->uc_mcontext.pc)), maple::tls::kSlotYieldPointPc); + + // Redirect yieldpoint to the slow path: call MRT_YieldpointStub(). + uc->uc_mcontext.pc = reinterpret_cast<__u64>(MRT_YieldpointStub); +#elif defined(__arm__) + StoreTLS(reinterpret_cast(static_cast(uc->uc_mcontext.arm_pc)), maple::tls::kSlotYieldPointPc); + uc->uc_mcontext.arm_pc = reinterpret_cast(MRT_YieldpointStub); +#endif + + // tell caller that the yieldpoint signal is handled. + return true; +} + +// Initialize yieldpoint for mutator. +void InitYieldpoint(Mutator &mutator) { + __MRT_ASSERT(!mutator.IsActive(), "InitYieldpoint on active mutator."); + + // add mutator to mutator list. if the world is stopped, + // the AddMutator() will block until the world is started again. + MutatorList::Instance().AddMutator(mutator); + + // let the mutator enter saferegion after mutator started. + // mutator will leave saferegion when managed code is called. + if (UNLIKELY(!mutator.EnterSaferegion(false))) { + // ensure we are not in saferegion before InitYieldpoint(). + LOG(FATAL) << "InitYieldpoint() invalid saferegion state. mutator: " << &mutator << maple::endl; + } +} + +// Finalize yieldpoint for mutator. +void FiniYieldpoint(Mutator &mutator) { + __MRT_ASSERT(mutator.IsActive(), "FiniYieldpoint on inactive mutator."); + + // mutator is about to exit, all reference holding on stack will released. + // We clear managed stack pointers so that stack scanner skip this mutator. + mutator.ClearStackInfo(); + + // enter saferegion before remove mutator from list, + // because RemoveMutator() may block current thread. + if (mutator.EnterSaferegion(false)) { + LOG(FATAL) << "FiniYieldpoint() from nosafe-region. mutator: " << &mutator << maple::endl; + } + + // remove mutator from list. if the world is stopped, + // the RemoveMutator() will block util the world is started again. + MutatorList::Instance().RemoveMutator(mutator, [](Mutator *mut) { + // Leave saferegion after mutator removed from list. + // this will not interrupted by GC because we hold mutator list lock. + if (UNLIKELY(!mut->LeaveSaferegion())) { + // ensure we are in saferegion before RemoveMutator(). + LOG(FATAL) << "FiniYieldpoint() invalid saferegion state. mutator: " << mut << maple::endl; + } + + // clean up mutator. + mut->Fini(); + }); +} + +void MRT_YieldpointHandler(void *lastSp) { + // logging for debug + LOG2FILE(kLogTypeMix) << "MRT_YieldpointHandler, last sp: " << lastSp << + ", prev fp: " << *(reinterpret_cast(lastSp)) << + ", prev lr: " << *(reinterpret_cast((reinterpret_cast(lastSp)) + sizeof(void*))) << std::endl; + + // current mutator. + Mutator &mutator = TLMutator(); + +#if __MRT_DEBUG + // do nothing if mutator already in saferegion. + if (UNLIKELY(mutator.InSaferegion())) { + LOG(FATAL) << "Incorrect saferegion state at yieldpoint!" << maple::endl; + } +#endif + + // save the last stack pointer. + mutator.SaveStackEnd(lastSp); + + // let mutator enter saferegion. + mutator.DoEnterSaferegion(); + + // block current thread before leave saferegion. + mutator.DoLeaveSaferegion(); + LOG2FILE(kLogTypeMix) << "MRT_YieldpointHandler, thread restarted." << std::endl; +} + +extern "C" bool MRT_EnterSaferegion(bool rememberLastJavaFrame) { + return TLMutator().EnterSaferegion(rememberLastJavaFrame); +} + +extern "C" bool MRT_LeaveSaferegion() { + return TLMutator().LeaveSaferegion(); +} + +// if this is the last mutator entering saferegion, wakeup StopTheWorld(). +void EnterSaferegionSlowPath() { + int *saferegionCountAddr = SaferegionState::SaferegionCountAddr(); + (void)maple::futex(saferegionCountAddr, FUTEX_WAKE, INT_MAX, nullptr, nullptr, 0); +} + +void StackScanBarrierSlowPath() { + Collector::Instance().StackScanBarrierInMutator(); +} + +// LeaveSaferegionSlowPath is called if pendingCount is set when we leaving saferegion. +// this means when the world is stopping or stoppped, we should block until the world start. +void LeaveSaferegionSlowPath() { + for (;;) { + // we are not leave saferegion now, so the saferegion count should + // be increased to indicate that current mutator still stay in saferegion. + SaferegionState state = SaferegionState::IncSaferegionCount(); + if (UNLIKELY(state.asStruct.saferegionCount + 1 == state.asStruct.pendingCount)) { + // if this is the last mutator entering saferegion, wakeup StopTheWorld(). + int *saferegionCountAddr = SaferegionState::SaferegionCountAddr(); + (void)maple::futex(saferegionCountAddr, FUTEX_WAKE, INT_MAX, nullptr, nullptr, 0); + } + // wait until pendingCount changed to 0. + int curNum = static_cast(state.asStruct.pendingCount); + if (curNum > 0) { // here curNum might already been set to 0, does so to avoid lost wake-ups + int *pendingCountAddr = SaferegionState::PendingCountAddr(); + if (UNLIKELY(maple::futex(pendingCountAddr, FUTEX_WAIT, curNum, nullptr, nullptr, 0) != 0)) { + LOG(ERROR) << "futex wait failed, " << errno << maple::endl; + } + } + // dec saferegion count, and load pendingCount. + state = SaferegionState::DecSaferegionCount(); + // usually the loaded pendingCount here should be 0, + // but we need to check it. + if (LIKELY(state.asStruct.pendingCount == 0)) { + // leave saferegion if pending count is 0, otherwise try again. + return; + } + } +} + +static void InitTimeSpec(long milliSecond, timespec &timeSpec) { + timeSpec.tv_sec = milliSecond / static_cast(maple::kTimeFactor); + timeSpec.tv_nsec = 0; +} + +static void WaitForAllMutatorsStopped() { + // wait timeout + struct timespec timeout; + InitTimeSpec(kStopTheWorldTimeoutMs, timeout); + int timeoutTimes = 0; + // wait until all mutators enter saferegion. + for (;;) { + // current saferegion state. + SaferegionState state = SaferegionState::Load(); + if (UNLIKELY(state.asStruct.pendingCount < state.asStruct.saferegionCount)) { + MutatorList::Instance().VisitMutators([](const Mutator *mut) { + mut->DumpRaw(); + }); + LOG(FATAL) << "Incorrect SaferegionState! pendingCount: " << state.asStruct.pendingCount << + ", saferegionCount: " << state.asStruct.saferegionCount << + ", nMutators: " << MutatorList::Instance().Size() << maple::endl; + } + +#if MRT_UNIT_TEST + // Use sched_yield() to let mutator have a chance to modify + // the SaferegionState before we check it. + (void)sched_yield(); +#endif + + // stop wait if all mutators stopped (aka. in saferegion). + if (UNLIKELY(state.AllMutatorStopped())) { + return; + } + + // wait for saferegionCount changed. + int curNum = static_cast(state.asStruct.saferegionCount); + int *saferegionCountAddr = SaferegionState::SaferegionCountAddr(); + if (UNLIKELY(maple::futex(saferegionCountAddr, FUTEX_WAIT, curNum, &timeout, nullptr, 0) != 0)) { + if (errno == ETIMEDOUT) { + timeoutTimes++; +#if MRT_DEBUG_YIELDPOINT + DumpMutatorsListInfo(timeoutTimes > 1); +#else + constexpr int maxTimeoutTimes = 30; + DumpMutatorsListInfo(timeoutTimes == maxTimeoutTimes); +#endif + } else if ((errno != EAGAIN) && (errno != EINTR)) { + LOG(ERROR) << "futex wait failed, " << errno << maple::endl; + } + } + } +} + +static bool saferegionStateChanged = false; + +void StopTheWorld() { + constexpr uint64_t waitLockInterval = 5000; // 5us + constexpr uint64_t nanoPerSecond = 1000000000; // 1sec equals 10^9ns + constexpr uint64_t waitLockTimeout = 30; // seconds + bool saferegionEntered = false; + + // ensure an active mutator entered saferegion before + // stop-the-world (aka. stop all other mutators). + Mutator *mutator = TLMutatorPtr(); + if (mutator != nullptr && mutator->IsActive()) { + saferegionEntered = mutator->EnterSaferegion(true); + } + + // block if another thread is holding the stwMutex. + // this prevent multi-thread doing stop-the-world concurrently. + stwMutex.lock(); + + maple::Locks::threadListLock->Dump(LOG_STREAM(DEBUGY)); + uint64_t start = timeutils::NanoSeconds(); + bool threadListLockAcquired = maple::GCRootsVisitor::TryAcquireThreadListLock();; + while (!threadListLockAcquired) { + timeutils::SleepForNano(waitLockInterval); + threadListLockAcquired = maple::GCRootsVisitor::TryAcquireThreadListLock(); + uint64_t now = timeutils::NanoSeconds(); + if (threadListLockAcquired == false && ((now - start) / nanoPerSecond) > waitLockTimeout) { + maple::Locks::threadListLock->Dump(LOG_STREAM(ERROR)); + LOG(FATAL) << " wait thread list lock out of time " << maple::endl; + } + } + + start = timeutils::NanoSeconds(); + bool mutatorListLockAcquired = MutatorList::Instance().TryLock(); + while (!mutatorListLockAcquired) { + timeutils::SleepForNano(waitLockInterval); + mutatorListLockAcquired = MutatorList::Instance().TryLock(); + uint64_t now = timeutils::NanoSeconds(); + if (mutatorListLockAcquired == false && ((now - start) / nanoPerSecond) > waitLockTimeout) { + LOG(FATAL) << " wait mutator list lock out of time " << MutatorList::Instance().LockOwner() << maple::endl; + } + } + // if a mutator saferegion state changed (entered saferegion from outside), + // we should restore it after the mutator called StartTheWorld(). + saferegionStateChanged = saferegionEntered; + // number of mutators. it will not be changed since + // we hold the lock of mutator list when the world is stopped. + size_t nMutators = MutatorList::Instance().Size(); + // do not wait if no mutator in the list. + if (UNLIKELY(nMutators == 0)) { + worldStopped.store(true, std::memory_order_release); + return; + } + // set n_muators as pendingCount. + (void)SaferegionState::SetPendingCount(static_cast(nMutators)); + // trigger yieldpoints. + SetPollingPageUnreadable(); + WaitForAllMutatorsStopped(); + +#ifdef __ANDROID__ + mplCollie.StartSTW(); +#endif + // the world is stopped. + worldStopped.store(true, std::memory_order_release); +} + +void StartTheWorld() { +#ifdef __ANDROID__ + mplCollie.EndSTW(); +#endif + bool shouldLeaveSaferegion = saferegionStateChanged; + worldStopped.store(false, std::memory_order_release); + // restore polling page. + SetPollingPageReadable(); + + // clear pending count. + (void)SaferegionState::SetPendingCount(0); + + // wakeup all mutators which blocking on pendingCount futex. + int *uaddr = SaferegionState::PendingCountAddr(); + (void)maple::futex(uaddr, FUTEX_WAKE, INT_MAX, nullptr, nullptr, 0); + + // release mutator list lock. + MutatorList::Instance().Unlock(); + + LOG2FILE(kLogTypeMix) << "Releasing threadListLock..." << std::endl; + maple::GCRootsVisitor::ReleaseThreadListLock(); + LOG2FILE(kLogTypeMix) << "Released threadListLock." << std::endl; + + // unlock stwMutex to allow other thread stop-the-world. + stwMutex.unlock(); + + // restore saferegion state (leave saferegion) if + // the state is changed when mutator calls StopTheWorld(). + Mutator *mutator = TLMutatorPtr(); + if (mutator != nullptr && mutator->IsActive() && shouldLeaveSaferegion) { + (void)mutator->LeaveSaferegion(); + } +} + +bool WorldStopped() { + return worldStopped.load(std::memory_order_acquire); +} + +void LockStopTheWorld() { + stwMutex.lock(); +} + +void UnlockStopTheWorld() { + stwMutex.unlock(); +} + +void SaferegionState::CheckMagicFailed() { + LOG(FATAL) << "SaferegionState Magic modified!" << std::hex << + " magic1:" << magic1 << + " magic2:" << magic2 << std::dec << + " &magic1:" << &magic1 << + " &state:" << &asUint64 << + " &magic2:" << &magic2 << maple::endl; +} +} // namespace maplert -- Gitee From 2b821cb62c7f7475d8d310fc1b5daa450748b8b5 Mon Sep 17 00:00:00 2001 From: binaryfz Date: Tue, 1 Dec 2020 10:34:52 +0800 Subject: [PATCH 4/9] add maplert --- src/mrt/maplert/BUILD.gn | 116 ++ src/mrt/maplert/include/argvalue.h | 519 ++++++ src/mrt/maplert/include/deferredaccess.h | 74 + src/mrt/maplert/include/fieldmeta.h | 142 ++ src/mrt/maplert/include/fieldmeta_inline.h | 347 ++++ src/mrt/maplert/include/itab_util.h | 33 + src/mrt/maplert/include/literalstrname.h | 52 + src/mrt/maplert/include/marray.h | 73 + src/mrt/maplert/include/marray_inline.h | 119 ++ src/mrt/maplert/include/mclass.h | 215 +++ src/mrt/maplert/include/mclass_inline.h | 821 ++++++++ src/mrt/maplert/include/methodmeta.h | 272 +++ src/mrt/maplert/include/methodmeta_inline.h | 447 +++++ src/mrt/maplert/include/mfield.h | 65 + src/mrt/maplert/include/mfield_inline.h | 73 + src/mrt/maplert/include/mmethod.h | 70 + src/mrt/maplert/include/mmethod_inline.h | 65 + src/mrt/maplert/include/mobject.h | 95 + src/mrt/maplert/include/mobject_inline.h | 174 ++ src/mrt/maplert/include/modifier.h | 287 +++ src/mrt/maplert/include/mrt_annotation.h | 86 + .../maplert/include/mrt_annotation_parser.h | 287 +++ .../include/mrt_annotation_parser_inline.h | 76 + src/mrt/maplert/include/mrt_array.h | 31 + src/mrt/maplert/include/mrt_class_init.h | 36 + src/mrt/maplert/include/mrt_classloader.h | 163 ++ src/mrt/maplert/include/mrt_cyclequeue.h | 116 ++ src/mrt/maplert/include/mrt_handlecommon.h | 324 ++++ src/mrt/maplert/include/mrt_handleutil.h | 62 + src/mrt/maplert/include/mrt_linker.h | 51 + src/mrt/maplert/include/mrt_methodhandle.h | 334 ++++ .../maplert/include/mrt_methodhandle_mpl.h | 88 + src/mrt/maplert/include/mrt_object.h | 32 + src/mrt/maplert/include/mrt_primitive_util.h | 193 ++ src/mrt/maplert/include/mrt_profile.h | 39 + src/mrt/maplert/include/mrt_reflection.h | 137 ++ .../maplert/include/mrt_reflection_class.h | 79 + .../include/mrt_reflection_constructor.h | 20 + .../include/mrt_reflection_executable.h | 23 + .../maplert/include/mrt_reflection_field.h | 50 + .../maplert/include/mrt_reflection_method.h | 26 + .../maplert/include/mrt_reflection_proxy.h | 21 + src/mrt/maplert/include/mrt_string.h | 170 ++ src/mrt/maplert/include/mrt_util.h | 35 + src/mrt/maplert/include/mrt_well_known.h | 865 +++++++++ src/mrt/maplert/include/mstring.h | 74 + src/mrt/maplert/include/mstring_inline.h | 215 +++ src/mrt/maplert/include/reflection_list.def | 30 + src/mrt/maplert/include/white_list.def | 496 +++++ src/mrt/maplert/linker/mapleArm32lld.so.lds | 438 +++++ src/mrt/maplert/linker/mapleld.so.lds | 409 ++++ src/mrt/maplert/linker/maplelld.so.lds | 450 +++++ .../maplert/linker/unified.macros.whitelist | 40 + src/mrt/maplert/public-headers/java2c_rule.h | 42 + src/mrt/maplert/public-headers/jsan.h | 50 + .../maplert/public-headers/mrt_api_common.h | 20 + .../maplert/public-headers/mrt_array_api.h | 70 + .../maplert/public-headers/mrt_class_api.h | 57 + .../public-headers/mrt_classloader_api.h | 40 + .../maplert/public-headers/mrt_compiler_api.h | 263 +++ .../public-headers/mrt_exception_api.h | 80 + .../maplert/public-headers/mrt_fields_api.h | 367 ++++ src/mrt/maplert/public-headers/mrt_libs_api.h | 31 + .../maplert/public-headers/mrt_linker_api.h | 31 + .../public-headers/mrt_mm_config_common.h | 40 + .../maplert/public-headers/mrt_monitor_api.h | 36 + .../maplert/public-headers/mrt_naming_api.h | 27 + .../maplert/public-headers/mrt_poisonstack.h | 31 + .../public-headers/mrt_primitive_api.h | 41 + .../maplert/public-headers/mrt_public_api.h | 32 + .../public-headers/mrt_reference_api.h | 224 +++ .../public-headers/mrt_reflection_api.h | 176 ++ .../maplert/public-headers/mrt_string_api.h | 46 + .../maplert/public-headers/mrt_thread_api.h | 29 + src/mrt/maplert/src/deferredaccess.cpp | 470 +++++ src/mrt/maplert/src/fieldmeta.cpp | 322 ++++ src/mrt/maplert/src/itab_util.cpp | 47 + src/mrt/maplert/src/java2c_rule.cpp | 22 + src/mrt/maplert/src/literalstrname.cpp | 65 + src/mrt/maplert/src/marray.cpp | 89 + src/mrt/maplert/src/mclass.cpp | 441 +++++ src/mrt/maplert/src/methodmeta.cpp | 872 +++++++++ src/mrt/maplert/src/mfield.cpp | 74 + src/mrt/maplert/src/mmethod.cpp | 108 ++ src/mrt/maplert/src/mobject.cpp | 94 + src/mrt/maplert/src/modifier.cpp | 56 + src/mrt/maplert/src/mrt_annotation.cpp | 563 ++++++ src/mrt/maplert/src/mrt_annotation_parser.cpp | 1445 +++++++++++++++ src/mrt/maplert/src/mrt_array.cpp | 330 ++++ src/mrt/maplert/src/mrt_class_init.cpp | 489 +++++ src/mrt/maplert/src/mrt_classloader.cpp | 916 +++++++++ src/mrt/maplert/src/mrt_handlecommon.cpp | 259 +++ src/mrt/maplert/src/mrt_handleutil.cpp | 490 +++++ src/mrt/maplert/src/mrt_linker.cpp | 72 + src/mrt/maplert/src/mrt_methodhandle.cpp | 950 ++++++++++ src/mrt/maplert/src/mrt_methodhandle_mpl.cpp | 766 ++++++++ src/mrt/maplert/src/mrt_module_init.c__ | 204 ++ src/mrt/maplert/src/mrt_object.cpp | 55 + src/mrt/maplert/src/mrt_poisonstack.cpp | 77 + src/mrt/maplert/src/mrt_preload_class.cpp | 51 + src/mrt/maplert/src/mrt_primitive_class.def | 232 +++ src/mrt/maplert/src/mrt_primitive_util.cpp | 317 ++++ src/mrt/maplert/src/mrt_profile.cpp | 596 ++++++ src/mrt/maplert/src/mrt_reflection_class.cpp | 1593 ++++++++++++++++ .../src/mrt_reflection_constructor.cpp | 95 + .../maplert/src/mrt_reflection_executable.cpp | 193 ++ src/mrt/maplert/src/mrt_reflection_field.cpp | 644 +++++++ src/mrt/maplert/src/mrt_reflection_method.cpp | 443 +++++ src/mrt/maplert/src/mrt_reflection_proxy.cpp | 592 ++++++ .../maplert/src/mrt_reflection_reference.cpp | 51 + .../src/mrt_reflection_stubfuncforproxy.def | 928 ++++++++++ src/mrt/maplert/src/mrt_string.cpp | 1646 +++++++++++++++++ src/mrt/maplert/src/mrt_util.cpp | 174 ++ src/mrt/maplert/src/mrt_well_known.cpp | 706 +++++++ src/mrt/maplert/src/mstring.cpp | 153 ++ 115 files changed, 28978 insertions(+) create mode 100644 src/mrt/maplert/BUILD.gn create mode 100644 src/mrt/maplert/include/argvalue.h create mode 100644 src/mrt/maplert/include/deferredaccess.h create mode 100644 src/mrt/maplert/include/fieldmeta.h create mode 100644 src/mrt/maplert/include/fieldmeta_inline.h create mode 100644 src/mrt/maplert/include/itab_util.h create mode 100644 src/mrt/maplert/include/literalstrname.h create mode 100644 src/mrt/maplert/include/marray.h create mode 100644 src/mrt/maplert/include/marray_inline.h create mode 100644 src/mrt/maplert/include/mclass.h create mode 100644 src/mrt/maplert/include/mclass_inline.h create mode 100644 src/mrt/maplert/include/methodmeta.h create mode 100644 src/mrt/maplert/include/methodmeta_inline.h create mode 100644 src/mrt/maplert/include/mfield.h create mode 100644 src/mrt/maplert/include/mfield_inline.h create mode 100644 src/mrt/maplert/include/mmethod.h create mode 100644 src/mrt/maplert/include/mmethod_inline.h create mode 100644 src/mrt/maplert/include/mobject.h create mode 100644 src/mrt/maplert/include/mobject_inline.h create mode 100644 src/mrt/maplert/include/modifier.h create mode 100644 src/mrt/maplert/include/mrt_annotation.h create mode 100644 src/mrt/maplert/include/mrt_annotation_parser.h create mode 100644 src/mrt/maplert/include/mrt_annotation_parser_inline.h create mode 100644 src/mrt/maplert/include/mrt_array.h create mode 100644 src/mrt/maplert/include/mrt_class_init.h create mode 100644 src/mrt/maplert/include/mrt_classloader.h create mode 100644 src/mrt/maplert/include/mrt_cyclequeue.h create mode 100644 src/mrt/maplert/include/mrt_handlecommon.h create mode 100644 src/mrt/maplert/include/mrt_handleutil.h create mode 100644 src/mrt/maplert/include/mrt_linker.h create mode 100644 src/mrt/maplert/include/mrt_methodhandle.h create mode 100644 src/mrt/maplert/include/mrt_methodhandle_mpl.h create mode 100644 src/mrt/maplert/include/mrt_object.h create mode 100644 src/mrt/maplert/include/mrt_primitive_util.h create mode 100644 src/mrt/maplert/include/mrt_profile.h create mode 100644 src/mrt/maplert/include/mrt_reflection.h create mode 100644 src/mrt/maplert/include/mrt_reflection_class.h create mode 100644 src/mrt/maplert/include/mrt_reflection_constructor.h create mode 100644 src/mrt/maplert/include/mrt_reflection_executable.h create mode 100644 src/mrt/maplert/include/mrt_reflection_field.h create mode 100644 src/mrt/maplert/include/mrt_reflection_method.h create mode 100644 src/mrt/maplert/include/mrt_reflection_proxy.h create mode 100644 src/mrt/maplert/include/mrt_string.h create mode 100644 src/mrt/maplert/include/mrt_util.h create mode 100644 src/mrt/maplert/include/mrt_well_known.h create mode 100644 src/mrt/maplert/include/mstring.h create mode 100644 src/mrt/maplert/include/mstring_inline.h create mode 100644 src/mrt/maplert/include/reflection_list.def create mode 100644 src/mrt/maplert/include/white_list.def create mode 100644 src/mrt/maplert/linker/mapleArm32lld.so.lds create mode 100644 src/mrt/maplert/linker/mapleld.so.lds create mode 100644 src/mrt/maplert/linker/maplelld.so.lds create mode 100644 src/mrt/maplert/linker/unified.macros.whitelist create mode 100644 src/mrt/maplert/public-headers/java2c_rule.h create mode 100644 src/mrt/maplert/public-headers/jsan.h create mode 100644 src/mrt/maplert/public-headers/mrt_api_common.h create mode 100644 src/mrt/maplert/public-headers/mrt_array_api.h create mode 100644 src/mrt/maplert/public-headers/mrt_class_api.h create mode 100644 src/mrt/maplert/public-headers/mrt_classloader_api.h create mode 100644 src/mrt/maplert/public-headers/mrt_compiler_api.h create mode 100644 src/mrt/maplert/public-headers/mrt_exception_api.h create mode 100644 src/mrt/maplert/public-headers/mrt_fields_api.h create mode 100644 src/mrt/maplert/public-headers/mrt_libs_api.h create mode 100644 src/mrt/maplert/public-headers/mrt_linker_api.h create mode 100644 src/mrt/maplert/public-headers/mrt_mm_config_common.h create mode 100644 src/mrt/maplert/public-headers/mrt_monitor_api.h create mode 100644 src/mrt/maplert/public-headers/mrt_naming_api.h create mode 100644 src/mrt/maplert/public-headers/mrt_poisonstack.h create mode 100644 src/mrt/maplert/public-headers/mrt_primitive_api.h create mode 100644 src/mrt/maplert/public-headers/mrt_public_api.h create mode 100644 src/mrt/maplert/public-headers/mrt_reference_api.h create mode 100644 src/mrt/maplert/public-headers/mrt_reflection_api.h create mode 100644 src/mrt/maplert/public-headers/mrt_string_api.h create mode 100644 src/mrt/maplert/public-headers/mrt_thread_api.h create mode 100644 src/mrt/maplert/src/deferredaccess.cpp create mode 100644 src/mrt/maplert/src/fieldmeta.cpp create mode 100644 src/mrt/maplert/src/itab_util.cpp create mode 100644 src/mrt/maplert/src/java2c_rule.cpp create mode 100644 src/mrt/maplert/src/literalstrname.cpp create mode 100644 src/mrt/maplert/src/marray.cpp create mode 100644 src/mrt/maplert/src/mclass.cpp create mode 100644 src/mrt/maplert/src/methodmeta.cpp create mode 100644 src/mrt/maplert/src/mfield.cpp create mode 100644 src/mrt/maplert/src/mmethod.cpp create mode 100644 src/mrt/maplert/src/mobject.cpp create mode 100644 src/mrt/maplert/src/modifier.cpp create mode 100644 src/mrt/maplert/src/mrt_annotation.cpp create mode 100644 src/mrt/maplert/src/mrt_annotation_parser.cpp create mode 100644 src/mrt/maplert/src/mrt_array.cpp create mode 100644 src/mrt/maplert/src/mrt_class_init.cpp create mode 100644 src/mrt/maplert/src/mrt_classloader.cpp create mode 100644 src/mrt/maplert/src/mrt_handlecommon.cpp create mode 100644 src/mrt/maplert/src/mrt_handleutil.cpp create mode 100644 src/mrt/maplert/src/mrt_linker.cpp create mode 100644 src/mrt/maplert/src/mrt_methodhandle.cpp create mode 100644 src/mrt/maplert/src/mrt_methodhandle_mpl.cpp create mode 100644 src/mrt/maplert/src/mrt_module_init.c__ create mode 100644 src/mrt/maplert/src/mrt_object.cpp create mode 100644 src/mrt/maplert/src/mrt_poisonstack.cpp create mode 100644 src/mrt/maplert/src/mrt_preload_class.cpp create mode 100644 src/mrt/maplert/src/mrt_primitive_class.def create mode 100644 src/mrt/maplert/src/mrt_primitive_util.cpp create mode 100644 src/mrt/maplert/src/mrt_profile.cpp create mode 100644 src/mrt/maplert/src/mrt_reflection_class.cpp create mode 100644 src/mrt/maplert/src/mrt_reflection_constructor.cpp create mode 100644 src/mrt/maplert/src/mrt_reflection_executable.cpp create mode 100644 src/mrt/maplert/src/mrt_reflection_field.cpp create mode 100644 src/mrt/maplert/src/mrt_reflection_method.cpp create mode 100644 src/mrt/maplert/src/mrt_reflection_proxy.cpp create mode 100644 src/mrt/maplert/src/mrt_reflection_reference.cpp create mode 100644 src/mrt/maplert/src/mrt_reflection_stubfuncforproxy.def create mode 100644 src/mrt/maplert/src/mrt_string.cpp create mode 100644 src/mrt/maplert/src/mrt_util.cpp create mode 100644 src/mrt/maplert/src/mrt_well_known.cpp create mode 100644 src/mrt/maplert/src/mstring.cpp diff --git a/src/mrt/maplert/BUILD.gn b/src/mrt/maplert/BUILD.gn new file mode 100644 index 0000000000..653336473f --- /dev/null +++ b/src/mrt/maplert/BUILD.gn @@ -0,0 +1,116 @@ +# +# Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +sources_common = [ + "src/mrt_array.cpp", + "src/mrt_class_init.cpp", + "src/mrt_object.cpp", + "src/mrt_reflection_class.cpp", + "src/mrt_reflection_method.cpp", + "src/mrt_reflection_field.cpp", + "src/mrt_reflection_constructor.cpp", + "src/modifier.cpp", + "src/mrt_reflection_proxy.cpp", + "src/mrt_reflection_executable.cpp", + "src/mrt_annotation.cpp", + "src/mrt_annotation_parser.cpp", + "src/mrt_string.cpp", + "src/mrt_util.cpp", + "src/mrt_preload_class.cpp", + "src/mrt_linker.cpp", + "src/mrt_classloader.cpp", + "src/mrt_reflection_reference.cpp", + "src/literalstrname.cpp", + "src/java2c_rule.cpp", + "src/mrt_poisonstack.cpp", + "src/mrt_methodhandle.cpp", + "src/mrt_methodhandle_mpl.cpp", + "src/mrt_handlecommon.cpp", + "src/mrt_handleutil.cpp", + "src/mrt_profile.cpp", + "src/mrt_well_known.cpp", + "src/mrt_primitive_util.cpp", + "src/itab_util.cpp", + "src/mobject.cpp", + "src/marray.cpp", + "src/mstring.cpp", + "src/mclass.cpp", + "src/mmethod.cpp", + "src/mfield.cpp", + "src/fieldmeta.cpp", + "src/methodmeta.cpp", + "src/deferredaccess.cpp", +] + +include_common = [ + "${MAPLE_MRT_ROOT}/maplert/include", + "${THIRD_PARTY_ROOT}/libnativehelper/include_jni", + "${MAPLEALL_ROOT}/huawei_secure_c/include/", + "${MAPLE_MRT_ROOT}/compiler-rt/include", + "${MAPLE_MRT_ROOT}/compiler-rt/public-headers", + "${MAPLE_MRT_ROOT}/maplert/public-headers", + "${MAPLE_MRT_ROOT}/libmrtbase/include", + "${MAPLE_MRT_ROOT}/libmrtbase/include/linux", + "${MAPLE_MRT_ROOT}/interpreter/zterp", + "${MAPLE_MRT_ROOT}/dexinterface", + "${MAPLE_MRT_ROOT}/verifier/include", + "${MAPLE_MRT_ROOT}", +] + +cflags = [] +cflags_cc = [] + +cflags += [ + "-DPLATFORM_SDK_VERSION=27", + "-DJNIDEBUG=0", + "-DTRACE_LEVEL=3", +] + +cflags_cc += [ + "-fvisibility=hidden", + "-std=c++14", + "-nostdlibinc", + "-march=armv8-a", +] +if (OPS_ANDROID == 1) { + cflags_cc -= [ "-nostdlibinc" ] + include_common += [ + "${ANDROID_ROOT}/system/core/libcutils/include/", + "${MAPLE_ROOT}/android/bionic/libc/private", + ] +} + +configs = [ "${MAPLE_MRT_ROOT}:mrt_cxx_flags" ] + +static_library("libmaplert") { + sources = sources_common + include_dirs = include_common +} + +copy("maplert_module_init_cpp") { + sources = [ + "src/mrt_module_init.c__", + ] + outputs = [ + "${root_out_dir}/mrt_module_init.cpp", + ] +} + +static_library("libmrt_module_init") { + sources = get_target_outputs(":maplert_module_init_cpp") + include_dirs = include_common + deps = [ + ":maplert_module_init_cpp", + ] +} diff --git a/src/mrt/maplert/include/argvalue.h b/src/mrt/maplert/include/argvalue.h new file mode 100644 index 0000000000..2473ff5e46 --- /dev/null +++ b/src/mrt/maplert/include/argvalue.h @@ -0,0 +1,519 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_MAPLERT_INCLUDE_ARGVALUE_H_ +#define MRT_MAPLERT_INCLUDE_ARGVALUE_H_ + +#include "panic.h" + +namespace maplert { +namespace calljavastubconst{ +enum ArgsType { + kVaArg, + kJvalue, + kJArray, + kNone // no parameter +}; + +#if defined(__aarch64__) +static constexpr uint32_t kAArch64XregSize = 8; +static constexpr uint32_t kAArch64DregSize = 8; +static constexpr uint32_t kAArch64XregAndDregSize = kAArch64XregSize + kAArch64DregSize; +static constexpr uint32_t kRegArgsSize = kAArch64XregAndDregSize; +#elif defined(__arm__) +static constexpr uint32_t kArmRregSize = 4; +#if defined(__ARM_PCS_VFP) +static constexpr uint32_t kArmDregSize = 8; +#else +static constexpr uint32_t kArmDregSize = 0; +#endif +static constexpr uint32_t kRegArgsSize = kArmRregSize + kArmDregSize; +#else +#error "Unimplemented Architecture" +#endif +} // namespace calljavastubconst + +class BaseArgValue { + public: + explicit BaseArgValue(const uintptr_t methodArgs) + : methodArgs(methodArgs), values(regArgValues), valuesSize(calljavastubconst::kRegArgsSize), + gregIdx(0), fregIdx(0), stackIdx(0) { + for (uint32_t i = 0; i < calljavastubconst::kRegArgsSize; i++) { + regArgValues[i].j = 0L; // clean up regArgValues + } + } + + virtual ~BaseArgValue() { + if (values != regArgValues) { + delete []values; + } + } + + virtual void AddReference(MObject *ref) = 0; + virtual void AddInt32(int32_t value) = 0; + virtual void AddInt64(int64_t value) = 0; + virtual void AddFloat(float value) = 0; + virtual void AddDouble(double value) = 0; + virtual uint32_t GetFRegSize() const = 0; + virtual uint32_t GetStackSize() const = 0; + virtual MObject *GetReceiver() const = 0; + + jvalue *GetData() { + return &values[0]; + } + + uintptr_t GetMethodArgs() const { + return methodArgs; + } + + uint32_t GetGIdx() const noexcept { + return gregIdx; + } + + protected: + void Resize(uint32_t newSize) { + if (newSize < valuesSize) { + return; + } + + jvalue *oldValues = values; + uint32_t oldValuesSize = valuesSize; + valuesSize = newSize + kIncSize; + values = new jvalue[valuesSize]; + std::copy(oldValues, oldValues + oldValuesSize, values); + if (oldValues != regArgValues) { + delete []oldValues; // free dynamic array. + } + } + + constexpr static uint32_t kIncSize = 8; + const uintptr_t methodArgs; + jvalue regArgValues[calljavastubconst::kRegArgsSize]; + jvalue *values; + uint32_t valuesSize; + uint32_t gregIdx; + uint32_t fregIdx; + uint32_t stackIdx; +}; + +class ArgValueInterp : public BaseArgValue { + public: + explicit ArgValueInterp(const uintptr_t methodArgs) : BaseArgValue(methodArgs) {} + virtual ~ArgValueInterp() = default; + + virtual void AddReference(MObject *ref) override { + AddInt64(reinterpret_cast(ref)); + } + + virtual void AddInt32(int32_t value) override { + AddInt64(value); + } + + virtual void AddInt64(int64_t value) override { + Resize(gregIdx + 1); + values[gregIdx++].j = value; + } + + virtual void AddFloat(float value) override { + Resize(gregIdx + 1); + values[gregIdx++].f = value; + } + + virtual void AddDouble(double value) override { + Resize(gregIdx + 1); + values[gregIdx++].d = value; + } + + virtual uint32_t GetFRegSize() const override { + LOG(FATAL) << "ArgValueInterp.GetFREgSize: Should not reach here!" << maple::endl; + return 0; + } + + virtual uint32_t GetStackSize() const override { + LOG(FATAL) << "ArgValueInterp.GetStackSize: Should not reach here!" << maple::endl; + return 0; + } + + virtual MObject *GetReceiver() const override { + return reinterpret_cast(reinterpret_cast(values[0].l)); + } +}; + +#if defined(__aarch64__) +class ArgValue : public BaseArgValue { + public: + explicit ArgValue(const uintptr_t methodArgs) : BaseArgValue(methodArgs) { + fregIdx = calljavastubconst::kAArch64XregSize; + stackIdx = calljavastubconst::kAArch64XregAndDregSize; + } + virtual ~ArgValue() = default; + + virtual void AddReference(MObject *ref) override { + AddInt64(reinterpret_cast(ref)); + } + + virtual void AddInt32(int32_t value) override { + AddInt64(value); + } + + virtual void AddInt64(int64_t value) override { + if (gregIdx < calljavastubconst::kAArch64XregSize) { + Resize(gregIdx + 1); + values[gregIdx++].j = value; + } else { + Resize(stackIdx + 1); + values[stackIdx++].j = value; + } + } + + virtual void AddFloat(float value) override { + double arg = 0; + *reinterpret_cast(&arg) = value; + AddDouble(arg); + } + + virtual void AddDouble(double value) override { + if (fregIdx < calljavastubconst::kAArch64XregAndDregSize) { + Resize(fregIdx + 1); + values[fregIdx++].d = value; + } else { + Resize(stackIdx + 1); + values[stackIdx++].d = value; + } + } + + virtual uint32_t GetFRegSize() const override { + return fregIdx - calljavastubconst::kAArch64XregSize; + } + + virtual uint32_t GetStackSize() const override { + return static_cast((stackIdx - calljavastubconst::kAArch64XregAndDregSize) * sizeof(jvalue)); + } + + virtual MObject *GetReceiver() const override { + return reinterpret_cast(reinterpret_cast(values[0].l)); + } + + // used in annotation, confirmed size and not stored in stack + inline MObject *GetReferenceFromGidx(uint32_t idx) const noexcept { + return reinterpret_cast(values[idx].l); + } +}; + + +// we restructure parameter in stub, copy caller's caller parameter to caller stack +class DecodeStackArgs { + public: + explicit DecodeStackArgs(intptr_t *stack) : stackMemery(stack), values(kIncSize), gregIdx(0), valueIdx(0) { + fregIdx = calljavastubconst::kAArch64XregSize; + stackIdx = calljavastubconst::kAArch64XregAndDregSize; + } + ~DecodeStackArgs() { + stackMemery = nullptr; + } + + void DecodeReference() { + DecodeInt64(); + } + + void DecodeInt32() { + Resize(valueIdx); + if (gregIdx < calljavastubconst::kAArch64XregSize) { + values[valueIdx].i = static_cast(stackMemery[gregIdx]); + gregIdx++; + } else { + values[valueIdx].i = static_cast(stackMemery[stackIdx]); + stackIdx++; + } + valueIdx++; + } + + void DecodeInt64() { + Resize(valueIdx); + if (gregIdx < calljavastubconst::kAArch64XregSize) { + values[valueIdx].j = stackMemery[gregIdx]; + gregIdx++; + } else { + values[valueIdx].j = stackMemery[stackIdx]; + stackIdx++; + } + valueIdx++; + } + + void DecodeFloat() { + DecodeDouble(); + } + + void DecodeDouble() { + Resize(valueIdx); + if (fregIdx < calljavastubconst::kAArch64XregAndDregSize) { + values[valueIdx].d = *(reinterpret_cast(&stackMemery[fregIdx])); + fregIdx++; + } else { + values[valueIdx].d = *(reinterpret_cast(&stackMemery[stackIdx])); + stackIdx++; + } + valueIdx++; + } + + jvalue *GetData() { + return &values[0]; + } + + protected: + void Resize(uint32_t newSize) { + if (newSize < values.size()) { + return; + } + values.resize(newSize + kIncSize); + } + + constexpr static uint32_t kIncSize = 8; + intptr_t *stackMemery; + std::vector values; + uint32_t gregIdx; + uint32_t fregIdx; + uint32_t stackIdx; + uint32_t valueIdx; +}; +#elif defined(__arm__) +class ArgValue : public BaseArgValue { + public: + explicit ArgValue(const uintptr_t methodArgs) : BaseArgValue(methodArgs) { + fregIdx = calljavastubconst::kArmRregSize; + stackIdx = calljavastubconst::kArmRregSize + calljavastubconst::kArmDregSize * kWideSize; + } + virtual ~ArgValue() = default; + + virtual void AddReference(MObject *ref) override { + AddInt32(static_cast(reinterpret_cast(ref))); + } + + // used in annotation, confirmed size and not stored in stack + inline MObject *GetReferenceFromGidx(uint32_t idx) noexcept { + return reinterpret_cast(values[idx >> 1].i); + } + + void AddInt32(int32_t *value) { + uint32_t &valueIdx = (gregIdx >= kMaxGregSize) ? stackIdx : gregIdx; + Resize(valueIdx >> 1); + if ((valueIdx & 0x1) == 0) { + values[valueIdx >> 1].i = *value; + } else { + *(reinterpret_cast(&values[valueIdx >> 1]) + 1) = *value; + } + valueIdx++; + } + + virtual void AddInt32(int32_t value) override { + AddInt32(&value); + } + + void AddInt64(int64_t *value) { + uint32_t &tempIdx = (gregIdx >= kMaxGregSize) ? stackIdx : gregIdx; + if ((tempIdx & 0x1) == 1) { + tempIdx++; + } + + uint32_t &valueIdx = (gregIdx >= kMaxGregSize) ? stackIdx : gregIdx; + Resize(valueIdx >> 1); + values[valueIdx >> 1].j = *value; + valueIdx += kWideSize; + } + + virtual void AddInt64(int64_t value) override { + AddInt64(&value); + } + + virtual void AddFloat(float value) override { +#if defined(__ARM_PCS_VFP) + if (fregOddSpace != 0) { + __MRT_ASSERT((fregOddSpace & 1) == 1, "Invalid fregOddSpace"); + *(reinterpret_cast(&values[fregOddSpace >> 1]) + 1) = value; + fregOddSpace = 0; + return; + } + uint32_t &valueIdx = (fregIdx >= kMaxFregSize) ? stackIdx : fregIdx; + Resize(valueIdx >> 1); + if ((valueIdx & 0x1) == 0) { + values[valueIdx >> 1].f = value; + } else { + *(reinterpret_cast(&values[valueIdx >> 1]) + 1) = value; + } + valueIdx++; +#else + AddInt32(reinterpret_cast(&value)); +#endif + } + + virtual void AddDouble(double value) override { +#if defined(__ARM_PCS_VFP) + uint32_t &tempIdx = (fregIdx >= kMaxFregSize) ? stackIdx : fregIdx; + if ((tempIdx & 0x1) == 1) { + __MRT_ASSERT(fregOddSpace == 0, "Invalid float odd space!"); + if (fregIdx < kMaxFregSize) { + fregOddSpace = fregIdx; + } + tempIdx++; + } + uint32_t &valueIdx = (fregIdx >= kMaxFregSize) ? stackIdx : fregIdx; + Resize(valueIdx >> 1); + values[valueIdx >> 1].d = value; + valueIdx += kWideSize; +#else + AddInt64(reinterpret_cast(&value)); +#endif + } + + virtual uint32_t GetFRegSize() const override { + return ((fregIdx + 1) - kMaxGregSize) >> 1; + } + + virtual uint32_t GetStackSize() const override { + return (((stackIdx + 1) - kMaxFregSize) >> 1) * sizeof(jvalue); + } + + virtual MObject *GetReceiver() const override { + return reinterpret_cast(values[0].i); + } + + private: +#if defined(__ARM_PCS_VFP) + uint32_t fregOddSpace = 0; +#endif + constexpr static uint32_t kWideSize = 2; + constexpr static uint32_t kMaxGregSize = calljavastubconst::kArmRregSize; + constexpr static uint32_t kMaxFregSize = calljavastubconst::kArmRregSize + + calljavastubconst::kArmDregSize * kWideSize; +}; + +// we restructure parameter in stub, copy caller's caller parameter to caller stack +class DecodeStackArgs { + public: + explicit DecodeStackArgs(intptr_t *stack) + : stackMemery(stack), values(kIncSize), gregIdx(0), oddFregIdx(0), valueIdx(0) { + fregIdx = calljavastubconst::kArmRregSize; + stackIdx = calljavastubconst::kArmRregSize + calljavastubconst::kArmDregSize * kWideSize; + } + ~DecodeStackArgs() { + stackMemery = nullptr; + } + + void DecodeReference() { + DecodeInt32(); + } + + void DecodeInt32() { + Resize(valueIdx); + if (gregIdx < kMaxGregSize) { + values[valueIdx].i = stackMemery[gregIdx]; + gregIdx++; + } else { + values[valueIdx].i = stackMemery[stackIdx]; + stackIdx++; + } + valueIdx++; + } + + void DecodeInt64() { + Resize(valueIdx); + if ((gregIdx & 0x1) == 0x1) { + gregIdx++; + } + if (gregIdx < kMaxGregSize) { + values[valueIdx].j = *(reinterpret_cast(&stackMemery[gregIdx])); + gregIdx += kWideSize; + } else { + if ((stackIdx & 0x1) == 0x1) { + stackIdx++; + } + values[valueIdx].j = *(reinterpret_cast(&stackMemery[stackIdx])); + stackIdx += kWideSize; + } + valueIdx++; + } + + void DecodeFloat() { +#if defined(__ARM_PCS_VFP) + Resize(valueIdx); + if ((oddFregIdx & 0x1) == 0x01) { + values[valueIdx].f = *(reinterpret_cast(&stackMemery[oddFregIdx])); + oddFregIdx = 0; + valueIdx++; + return; + } + if (fregIdx < kMaxFregSize) { + values[valueIdx].f = *(reinterpret_cast(&stackMemery[fregIdx])); + fregIdx++; + } else { + values[valueIdx].f = *(reinterpret_cast(&stackMemery[stackIdx])); + stackIdx++; + } + valueIdx++; +#else + DecodeInt32(); +#endif + } + + void DecodeDouble() { +#if defined(__ARM_PCS_VFP) + Resize(valueIdx); + if ((fregIdx & 0x1) == 0x1) { + oddFregIdx = fregIdx; + fregIdx++; + } + if (fregIdx < kMaxFregSize) { + values[valueIdx].d = *(reinterpret_cast(&stackMemery[fregIdx])); + fregIdx += kWideSize; + } else { + if ((stackIdx & 0x1) == 0x1) { + stackIdx++; + } + values[valueIdx].d = *(reinterpret_cast(&stackMemery[stackIdx])); + stackIdx += kWideSize; + } + valueIdx++; +#else + DecodeInt64(); +#endif + } + + jvalue *GetData() { + return &values[0]; + } + + protected: + void Resize(uint32_t newSize) { + if (newSize < values.size()) { + return; + } + values.resize(newSize + kIncSize); + } + + constexpr static uint32_t kIncSize = 8; + intptr_t *stackMemery; + std::vector values; + uint32_t gregIdx; + uint32_t fregIdx; + uint32_t stackIdx; + uint32_t oddFregIdx; + uint32_t valueIdx; + constexpr static uint32_t kWideSize = 2; + constexpr static uint32_t kMaxGregSize = calljavastubconst::kArmRregSize; + constexpr static uint32_t kMaxFregSize = calljavastubconst::kArmRregSize + + calljavastubconst::kArmDregSize * kWideSize; +}; +#endif +} +#endif diff --git a/src/mrt/maplert/include/deferredaccess.h b/src/mrt/maplert/include/deferredaccess.h new file mode 100644 index 0000000000..f43fcbe04e --- /dev/null +++ b/src/mrt/maplert/include/deferredaccess.h @@ -0,0 +1,74 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_MAPLERT_INCLUDE_DEFERREDACCESS_H_ +#define MRT_MAPLERT_INCLUDE_DEFERREDACCESS_H_ +#include "marray.h" +#include "fieldmeta.h" +#include "methodmeta.h" + +namespace maplert { +namespace deferredaccess { +enum DeferredInvokeType { + kStaticCall, // invoke-static + kVirtualCall, // invoke-virtual + kSuperCall, // invoke-super + kDirectCall, // invoke-direct + kInterfaceCall // invoke-interface +}; // enum DeferredInvokeType + +// this offset keep order with MCC_DeferredInvoke stack frame +enum DeferredInvokeParameterOffset { + kDaiMethod = 0, // cache dai Method + kInvokeType = 1, // invoke type + kClassName = 2, // class name + kMethodName = 3, // method name +#if defined(__aarch64__) + kSignature = 4, // signature + kThisObj = 5, // this obj +#elif defined(__arm__) + kStackOffset = 20, // StackOffset see argvalue.h, DecodeStackArgs + kSignature = kStackOffset, // signature + kThisObj = kStackOffset + 1, // this obj +#endif +}; // enum DeferredInvokeParameterOrder + +// DAI struct +using DaiClass = struct DAIClass { + MClass *daiClass; +}; +using DaiField = struct DAIField { + FieldMeta *fieldMeta; +}; +using DaiMethod = struct DAIMethod { + MethodMeta *methodMeta; +}; + +MClass *GetConstClass(const MObject &caller, const char *descriptor); +void ClinitCheck(const MClass &classObject); +bool IsInstanceOf(const MClass &classObject, const MObject &obj); +MObject *NewInstance(const MClass &classObject); +MArray *NewArray(const MClass &classObject, uint32_t length); +MArray *NewArray(const MClass &classObject, uint32_t length, va_list initialElement); +jvalue LoadField(const FieldMeta &fieldMeta, const MObject *obj); +void StoreField(const FieldMeta &fieldMeta, MObject *obj, jvalue value); +FieldMeta *InitDaiField(DaiField *daiField, const MClass &classObject, + const MString &fieldName, const MString &fieldTypeName); +jvalue Invoke(DeferredInvokeType invokeType, const MethodMeta *methodMeta, MObject *obj, jvalue args[]); +MethodMeta *InitDaiMethod(DeferredInvokeType invokeType, DaiMethod *daiMethod, const MClass &classObject, + const char *methodName, const char *signatureName); +bool CheckFieldAccess(MObject &caller, const FieldMeta &fieldMeta, const MObject *obj); +}; // namespace deferredaccess +} // namespace maplert +#endif // MRT_MAPLERT_INCLUDE_DEFERREDACCESS_H_ diff --git a/src/mrt/maplert/include/fieldmeta.h b/src/mrt/maplert/include/fieldmeta.h new file mode 100644 index 0000000000..1496da6fdb --- /dev/null +++ b/src/mrt/maplert/include/fieldmeta.h @@ -0,0 +1,142 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_MAPLERT_INCLUDE_FIELDMETA_H_ +#define MRT_MAPLERT_INCLUDE_FIELDMETA_H_ +#include "primitive.h" +#include "mobject.h" +#include "mstring.h" + +namespace maplert { +static constexpr uint32_t kFieldHashMask = 0x3FF; +static constexpr uint32_t kFieldFlagBits = 6; +static constexpr uint32_t kFieldOffsetShift = 3; +class FieldOffset { + public: + void SetBitOffset(uint32_t bitOffset); + uint32_t GetBitOffset() const; + void SetAddress(uintptr_t addr); + uintptr_t GetAddress() const; + int32_t GetDefTabIndex() const; + + private: + union { + DataRefOffset offset; // offset for instance field in bits + DataRefOffset relOffset; // address offset for static field + DataRefOffset defTabIndex; // def tab index, before lazy binding resolve for static field. + }; +}; + +class FieldMeta { + public: + char *GetName() const; + uint16_t GetHashCode() const; + uint32_t GetMod() const; + char *GetTypeName() const; + MClass *GetType() const; + uint16_t GetFlag() const; + uint16_t GetIndex() const; + uint32_t GetOffset() const; + int32_t GetDefTabIndex() const; + // instance field + void SetBitOffset(uint32_t setOffset); + // instance field or static field + void SetOffsetOrAddress(size_t setOffset); + uint32_t GetBitOffset() const; + MObject *GetRealMObject(const MObject *o, bool clinitCheck = true) const; + MClass *GetDeclaringclass() const; + uintptr_t GetStaticAddr() const; + bool IsPublic() const; + bool IsVolatile() const; + bool IsStatic() const; + bool IsFinal() const; + bool IsPrivate() const; + bool IsProtected() const; + bool IsPrimitive() const; + bool IsReference() const; + bool IspOffset() const; + FieldOffset *GetFieldpOffset() const; + bool Cmp(const char *fieldName, const char *fieldType) const; + bool Cmp(const MString *fieldName, const MClass *type) const; + template + void SetPrimitiveValue(MObject *o, char dstType, valueType value, bool clinitCheck = true) const; + template + dstType GetPrimitiveValue(const MObject *o, char srcType) const; + std::string GetAnnotation() const; + std::string GetFullName(const MClass *declaringClass, bool needType) const; + MObject *GetObjectValue(const MObject *o) const; + MObject *GetSignatureAnnotation() const; + void SetObjectValue(MObject *o, const MObject *value, bool clinitCheck = true) const; + void SetStaticAddr(uintptr_t addr); + void SetHashCode(uint16_t hash); + void SetMod(const uint32_t modifier); + void SetTypeName(const char *typeName); + void SetFlag(const uint16_t fieldFlag); + void SetIndex(const uint16_t fieldIndex); + void SetFieldName(const char *fieldName); + void SetAnnotation(const char *strAnnotation); + void SetDeclaringClass(const MClass &dlClass); + void FillFieldMeta(uint32_t modifier, size_t offset, const char *typeName, uint16_t hash, + uint16_t index, const char *name, const char *strAnnotation, const MClass &cls); + size_t GetMemSize() const; // return the memory size this field consumes + + template + static inline FieldMeta *JniCast(T fieldMeta); + template + static inline FieldMeta *JniCastNonNull(T fieldMeta); + template + static inline FieldMeta *Cast(T fieldMeta); + template + static inline FieldMeta *CastNonNull(T fieldMeta); + inline jfieldID AsJfieldID(); + + private: + union { + DataRefOffset pOffset; // point to FieldOffset struct, and meta is RO, if the flag LSB mask 1. + DataRefOffset offset; // offset for instance field in bits, compatibility before or runtime generic meta. + DataRefOffset relOffset; // address offset for static field, compatibility before or runtime generic meta. + }; + uint32_t mod; + uint16_t flag; + uint16_t index; + DataRefOffset typeName; // point to the string address in reflection strtab + DataRefOffset32 fieldName; + DataRefOffset32 annotation; + DataRefOffset declaringClass; + DataRefOffset pClassType; // point to the mclass address in muid tab +}; + +class FieldMetaCompact { + public: + uint32_t GetBitOffset() const; + uintptr_t GetOffsetOrAddress(bool isStatic) const; + void SetBitOffset(uint32_t fieldOffset); + static FieldMeta *DecodeCompactFieldMetas(MClass &cls); + static FieldMetaCompact *DecodeCompactFieldMetasToVector( + const MClass &cls, uint32_t vecSize, char **typeNameVec = nullptr, char **fieldNameVec = nullptr, + size_t *offsetVec = nullptr, uint32_t *modifierVec = nullptr, FieldMetaCompact **fieldMetaCompacts = nullptr, + char **annotationVec = nullptr); + static void SetCompactFieldMetaOffset(const MClass &cls, uint32_t index, int32_t fieldOffset); + FieldOffset *GetFieldpOffset() const; + + private: + union { + DataRefOffset32 pOffset; // point to FieldOffset struct, and meta is always RO. + }; + uint8_t leb128Start; + static std::mutex resolveMutex; + uint8_t *GetpCompact(); +}; +} // namespace maplert +#endif // MRT_MAPLERT_INCLUDE_FIELDMETA_H_ diff --git a/src/mrt/maplert/include/fieldmeta_inline.h b/src/mrt/maplert/include/fieldmeta_inline.h new file mode 100644 index 0000000000..9478f0020d --- /dev/null +++ b/src/mrt/maplert/include/fieldmeta_inline.h @@ -0,0 +1,347 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_MAPLERT_INCLUDE_FIELDMETA_INLINE_H_ +#define MRT_MAPLERT_INCLUDE_FIELDMETA_INLINE_H_ +#include "fieldmeta.h" +#include "cpphelper.h" +#include "modifier.h" +#include "mobject_inline.h" +#include "mrt_class_init.h" +#include "mrt_annotation.h" +#include "mstring_inline.h" +namespace maplert { +inline uint32_t FieldOffset::GetBitOffset() const { + return static_cast(offset.GetRawValue()); +} + +inline uintptr_t FieldOffset::GetAddress() const { + return relOffset.GetDataRef(); +} + +inline void FieldOffset::SetBitOffset(uint32_t value) { + offset.SetRawValue(value); +} + +inline void FieldOffset::SetAddress(uintptr_t addr) { + relOffset.SetDataRef(addr); +} + +inline int32_t FieldOffset::GetDefTabIndex() const { + return static_cast(defTabIndex.GetRawValue()); +} + +inline char *FieldMeta::GetName() const { + char *name = fieldName.GetDataRef(); + __MRT_Profile_CString(name); + return name; +} + +inline int32_t FieldMeta::GetDefTabIndex() const { + if (!IspOffset()) { + // no def tab index + return -1; + } + FieldOffset *fieldOffset = GetFieldpOffset(); + DCHECK(fieldOffset != nullptr) << "FieldMeta::GetDefTabIndex(): fieldOffset is nullptr!" << maple::endl; + return fieldOffset->GetDefTabIndex(); +} + +inline uint16_t FieldMeta::GetHashCode() const { + return (flag >> kFieldFlagBits) & kFieldHashMask; +} + +inline void FieldMeta::SetHashCode(uint16_t hash) { + flag &= ~(kFieldHashMask << kFieldFlagBits); + flag |= (hash & kFieldHashMask) << kFieldFlagBits; +} + +inline void FieldMeta::SetFlag(uint16_t fieldFlag) { + flag = fieldFlag; +} + +inline uint16_t FieldMeta::GetFlag() const { + return flag; +} + +inline void FieldMeta::SetMod(uint32_t modifier) { + mod = modifier; +} + +inline uint32_t FieldMeta::GetMod() const { + return mod; +} + +inline char *FieldMeta::GetTypeName() const { + char *fieldTypeName = typeName.GetDataRef(); + __MRT_Profile_CString(fieldTypeName); + return fieldTypeName; +} + +inline MClass *FieldMeta::GetType() const { + MetaRef* pFieldType = pClassType.GetDataRef(); + if (pFieldType != nullptr && *pFieldType != 0 && !MRT_IsLazyBindingState(reinterpret_cast(*pFieldType))) { + return reinterpret_cast(*pFieldType); + } else { + char *fieldTypeName = GetTypeName(); + return MClass::GetClassFromDescriptor(GetDeclaringclass(), fieldTypeName); + } +} + +// all static field are algned to 8 bytes +inline size_t FieldMeta::GetMemSize() const { + char *fieldTypeName = GetTypeName(); + DCHECK(fieldTypeName != nullptr) << "FieldMeta::GetMemSize: fieldTypeName is nullptr!" << maple::endl; + const size_t algne = 8; // algned to 8 bytes + switch (*fieldTypeName) { + case 'Z': + case 'B': + case 'C': + case 'S': + case 'I': + case 'F': + case 'J': + case 'D': + case '[': + case 'L': + return algne; + default: + __MRT_ASSERT(false, "Unknown Field Type!"); + return 0; + } + return 0; +} + +inline void FieldMeta::SetIndex(uint16_t fieldIndex) { + index = fieldIndex; +} + +inline uint16_t FieldMeta::GetIndex() const { + return index; +} + +inline uint32_t FieldMeta::GetOffset() const { + return IsStatic() ? 0 : GetBitOffset() >> kFieldOffsetShift; +} + +inline uint32_t FieldMeta::GetBitOffset() const { + // instance field offset + DCHECK(!IsStatic()) << "should be a instance field"; + FieldOffset *fieldOffset = GetFieldpOffset(); + DCHECK(fieldOffset != nullptr) << "FieldMeta::GetBitOffset(): fieldOffset is nullptr!" << maple::endl; + return fieldOffset->GetBitOffset(); +} + +inline MClass *FieldMeta::GetDeclaringclass() const { + MClass *dcl = declaringClass.GetDataRef(); + return dcl; +} + +inline uintptr_t FieldMeta::GetStaticAddr() const { + DCHECK(IsStatic()) << "should be a static field"; + FieldOffset *fieldOffset = GetFieldpOffset(); + DCHECK(fieldOffset != nullptr) << "FieldMeta::GetStaticAddr: fieldOffset is nullptr!" << maple::endl; + return fieldOffset->GetAddress(); +} + +inline void FieldMeta::SetTypeName(const char *name) { + typeName.SetDataRef(name); + __MRT_ASSERT(GetTypeName() == name, "Offset larger than 32bits!"); +} + +inline bool FieldMeta::IsPublic() const { + return static_cast(mod & modifier::kModifierPublic); +} + +inline bool FieldMeta::IsVolatile() const { + return static_cast(mod & modifier::kModifierVolatile); +} + +inline bool FieldMeta::IsStatic() const { + return static_cast(mod & modifier::kModifierStatic); +} + +inline bool FieldMeta::IsFinal() const { + return static_cast(mod & modifier::kModifierFinal); +} + +inline bool FieldMeta::IsPrivate() const { + return static_cast(mod & modifier::kModifierPrivate); +} + +inline bool FieldMeta::IsProtected() const { + return static_cast(mod & modifier::kModifierProtected); +} + +inline bool FieldMeta::IsReference() const { + const char *fieldTypeName = GetTypeName(); + DCHECK(fieldTypeName != nullptr) << "FieldMeta::IsReference: fieldTypeName is nullptr!" << maple::endl; + bool isRefType = (fieldTypeName[0] == 'L') || (fieldTypeName[0] == '['); + return isRefType; +} + +inline bool FieldMeta::IsPrimitive() const { + return !IsReference(); +} + +inline bool FieldMeta::IspOffset() const { + return static_cast(flag & modifier::kFieldOffsetIspOffset); +} + +inline FieldOffset *FieldMeta::GetFieldpOffset() const { + FieldOffset *fieldOffset = nullptr; + if (IspOffset()) { + fieldOffset = pOffset.GetDataRef(); + } else { + fieldOffset = const_cast(reinterpret_cast(&pOffset)); + } + return fieldOffset; +} + +inline std::string FieldMeta::GetAnnotation() const { + char *annotationStr = annotation.GetDataRef(); + __MRT_Profile_CString(annotationStr); + return AnnotationUtil::GetAnnotationUtil(annotationStr); +} + +inline MObject *FieldMeta::GetRealMObject(const MObject *o, bool clinitCheck) const { + if (!IsStatic()) { + return const_cast(o); + } + // access to tatic fields + if (clinitCheck && !GetDeclaringclass()->InitClassIfNeeded()) { + return nullptr; + } + return MObject::Cast(GetStaticAddr()); +} + +inline void FieldMeta::SetBitOffset(uint32_t setOffset) { + // instance field offset + DCHECK(!IsStatic()) << "should be instance field" << maple::endl; + FieldOffset *fieldOffset = GetFieldpOffset(); + DCHECK(fieldOffset != nullptr) << "FieldMeta::SetBitOffset: fieldOffset is nullptr!" << maple::endl; + fieldOffset->SetBitOffset(setOffset); +} + +template +inline void FieldMeta::SetPrimitiveValue(MObject *o, char dstType, valueType value, bool clinitCheck) const { + o = GetRealMObject(o, clinitCheck); + if (UNLIKELY(o == nullptr)) { + return; + } + uint32_t offset = GetOffset(); + const bool isVol = IsVolatile(); + switch (dstType) { + case 'Z': + o->Store(offset, static_cast(value), isVol); + break; + case 'B': + o->Store(offset, static_cast(value), isVol); + break; + case 'C': + o->Store(offset, static_cast(value), isVol); + break; + case 'S': + o->Store(offset, static_cast(value), isVol); + break; + case 'I': + o->Store(offset, static_cast(value), isVol); + break; + case 'J': + o->Store(offset, static_cast(value), isVol); + break; + case 'F': + o->Store(offset, static_cast(value), isVol); + break; + case 'D': + o->Store(offset, static_cast(value), isVol); + break; + default: + LOG(FATAL) << "can not run here!!! dstType: " << dstType << maple::endl; + } +} + +template +inline dstType FieldMeta::GetPrimitiveValue(const MObject *o, char srcType) const { + o = GetRealMObject(o); + if (UNLIKELY(o == nullptr)) { + return 0; + } + uint32_t offset = GetOffset(); + const bool isVol = IsVolatile(); + switch (srcType) { + case 'Z': + return static_cast(o->Load(offset, isVol)); + case 'B': + return static_cast(o->Load(offset, isVol)); + case 'C': + return static_cast(o->Load(offset, isVol)); + case 'S': + return static_cast(o->Load(offset, isVol)); + case 'I': + return static_cast(o->Load(offset, isVol)); + case 'J': + return static_cast(o->Load(offset, isVol)); + case 'F': + return static_cast(o->Load(offset, isVol)); + case 'D': + return static_cast(o->Load(offset, isVol)); + default: + LOG(FATAL) << "can not run here!!! srcType: " << srcType << maple::endl; + } + return 0; +} + +inline bool FieldMeta::Cmp(const char *name, const char *type) const { + DCHECK(name != nullptr) << "fieldName is null." << maple::endl; + if (strcmp(name, GetName()) == 0) { + return (type != nullptr) ? (strcmp(type, GetTypeName()) == 0) : true; + } + return false; +} + +inline bool FieldMeta::Cmp(const MString *name, const MClass *type __attribute__((unused))) const { + DCHECK(name != nullptr) << "FieldMeta::Cmp: name is nullptr!" << maple::endl; + return name->Cmp(std::string(GetName())); +} + +template +inline FieldMeta *FieldMeta::JniCast(T fieldMeta) { + static_assert(std::is_same::value || std::is_same::value, "wrong type"); + return reinterpret_cast(fieldMeta); +} + +template +inline FieldMeta *FieldMeta::JniCastNonNull(T fieldMeta) { + DCHECK(fieldMeta != nullptr); + return JniCast(fieldMeta); +} + +template +inline FieldMeta *FieldMeta::Cast(T fieldMeta) { + static_assert(std::is_same::value, "wrong type"); + return reinterpret_cast(fieldMeta); +} + +template +inline FieldMeta *FieldMeta::CastNonNull(T fieldMeta) { + DCHECK(fieldMeta != nullptr); + return Cast(fieldMeta); +} + +inline jfieldID FieldMeta::AsJfieldID() { + return reinterpret_cast(this); +} +} // namespace maplert +#endif // MRT_MAPLERT_INCLUDE_FIELDMETA_INLINE_H_ diff --git a/src/mrt/maplert/include/itab_util.h b/src/mrt/maplert/include/itab_util.h new file mode 100644 index 0000000000..7f11d48cc1 --- /dev/null +++ b/src/mrt/maplert/include/itab_util.h @@ -0,0 +1,33 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_ITAB_HOT_METHOD_H +#define MRT_ITAB_HOT_METHOD_H + +namespace maple { +constexpr int kHashSize = 23; +constexpr int kHeadSizeOfSecondHash = 3; +constexpr int kFlagAgInHeadOfSecondHash = 1; +constexpr unsigned int kItabSecondHashSize = 0x1fff; +constexpr int kFlagFirstHashConflict = 0; +constexpr int kFlagSecondHashConflict = 1; + +constexpr int kItabFirstHashSize = kHashSize; + +unsigned int DJBHash(const char *str); +unsigned int GetHashIndex(const char *name); +unsigned int GetSecondHashIndex(const char *name); + +} // namespace maple +#endif diff --git a/src/mrt/maplert/include/literalstrname.h b/src/mrt/maplert/include/literalstrname.h new file mode 100644 index 0000000000..10f50d1f6d --- /dev/null +++ b/src/mrt/maplert/include/literalstrname.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_INCLUDE_LITERALSTRNAME_H +#define MRT_INCLUDE_LITERALSTRNAME_H +#include +#include +#include "muid.h" + +// literal string naming is shared between maple compiler and runtime, thus not in namespace maplert +const std::string kConstString = "_C_STR_"; +const std::string kConstStringPtr = "_PTR_C_STR_"; +const std::string kLocalStringPrefix = "L_STR_"; +constexpr int kConstStringLen = 7; + +class LiteralStrName { + public: + static int32_t CalculateHashSwapByte(const char16_t *data, uint32_t len); + static uint32_t CalculateHash(const char16_t *data, uint32_t len, bool dataIsCompress) { + uint32_t hash = 0; + if (dataIsCompress) { + const char *dataStart = reinterpret_cast(data); + const char *end = dataStart + len; + while (dataStart < end) { + hash = (hash << 5) - hash + *dataStart++; // calculate the hash code of data + } + } else { + const char16_t *end = data + len; + while (data < end) { + hash = (static_cast(hash) << 5) - hash + *data++; // calculate the hash code of data + } + } + return hash; + } + + static std::string GetHexStr(const uint8_t *bytes, uint32_t len); + static std::string GetLiteralStrName(const uint8_t *bytes, uint32_t len); + static std::string ComputeMuid(const uint8_t *bytes, uint32_t len); +}; + +#endif diff --git a/src/mrt/maplert/include/marray.h b/src/mrt/maplert/include/marray.h new file mode 100644 index 0000000000..85e339d00c --- /dev/null +++ b/src/mrt/maplert/include/marray.h @@ -0,0 +1,73 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_MAPLERT_INCLUDE_MARRAY_H_ +#define MRT_MAPLERT_INCLUDE_MARRAY_H_ +#include "mobject.h" +#include "mrt_fields_api.h" +namespace maplert { + const uint32_t kMrtArrayLengthOffset = 12; // shadow:[4|8] + monitor:4, but we fix it here, + // since content offset is fixed + const uint32_t kMrtArrayContentOffset = 16; // fixed. aligned to 8B to along hosting of size-8B elements. + +enum ArrayElemSize { + kElemByte = 0, + kElemHWord = 1, + kElemWord = 2, + kElemDWord = 3 +}; + +class MArray : public MObject { + public: + uint32_t GetLength() const; + uint32_t GetArraySize() const; + uint32_t GetElementSize() const; + char *GetElementTypeName() const; + void InitialObjectArray(const MObject *initialElement) const; + uint8_t *ConvertToCArray() const; + template + T GetPrimitiveElement(uint32_t index); + template + void SetPrimitiveElement(uint32_t index, T value); + MObject *GetObjectElement(uint32_t index) const; + void SetObjectElement(uint32_t index, const MObject *mObj) const; + MObject *GetObjectElementNoRc(uint32_t index) const; + void SetObjectElementNoRc(uint32_t index, const MObject *mObj) const; + MObject *GetObjectElementOffHeap(uint32_t index) const; + void SetObjectElementOffHeap(uint32_t index, const MObject *mObj) const; + bool HasNullElement() const; + static uint32_t GetArrayContentOffset(); + template + static MArray *NewPrimitiveArray(uint32_t length, const MClass &arrayClass, bool isJNI = false); + static MArray *NewPrimitiveArray(uint32_t length, const MClass &arrayClass, bool isJNI = false); + template + static MArray *NewPrimitiveArrayComponentClass(uint32_t length, const MClass &componentClass); + static MArray *NewPrimitiveArrayComponentClass(uint32_t length, const MClass &componentClass); + static MArray *NewObjectArray(uint32_t length, const MClass &arrayClass); + static MArray *NewObjectArrayComponentClass(uint32_t length, const MClass &componentClass); + + template + static inline MArray *JniCast(T array); + template + static inline MArray *JniCastNonNull(T array); + inline jarray AsJarray() const; + inline jobjectArray AsJobjectArray() const; + + private: + static constexpr uint32_t lengthOffset = kMrtArrayLengthOffset; + static constexpr uint32_t contentOffset = kMrtArrayContentOffset; + static constexpr uint32_t objectElemSize = sizeof(reffield_t); +}; +} // namespace maplert +#endif // MRT_MAPLERT_INCLUDE_MARRAY_H_ diff --git a/src/mrt/maplert/include/marray_inline.h b/src/mrt/maplert/include/marray_inline.h new file mode 100644 index 0000000000..f41b15923d --- /dev/null +++ b/src/mrt/maplert/include/marray_inline.h @@ -0,0 +1,119 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_MAPLERT_INCLUDE_MARRAY_INLINE_H_ +#define MRT_MAPLERT_INCLUDE_MARRAY_INLINE_H_ +#include +#include "marray.h" +#include "mobject_inline.h" +namespace maplert { +inline uint32_t MArray::GetLength() const { + return Load(lengthOffset, false); +} + +inline uint8_t *MArray::ConvertToCArray() const { + return reinterpret_cast(AsUintptr() + contentOffset); +} + +inline uint32_t MArray::GetArrayContentOffset() { + return contentOffset; +} + +inline uint32_t MArray::GetArraySize() const { + uint32_t length = GetLength(); + uint32_t elementSize = GetElementSize(); + return (contentOffset + elementSize * length); +} + +inline uint32_t MArray::GetElementSize() const { + MClass *cls = GetClass(); + return cls->GetComponentSize(); +} + +inline char *MArray::GetElementTypeName() const { + MClass *cls = GetClass(); + MClass *componentClass = cls->GetComponentClass(); + return componentClass->GetName(); +} + +inline MObject *MArray::GetObjectElement(uint32_t index) const { + return LoadObject(contentOffset + objectElemSize * index, false); +} + +inline void MArray::SetObjectElement(uint32_t index, const MObject *mObj) const { + StoreObject(contentOffset + objectElemSize * index, mObj, false); +} + +inline MObject *MArray::GetObjectElementNoRc(uint32_t index) const { + return LoadObjectNoRc(contentOffset + objectElemSize * index); +} + +inline void MArray::SetObjectElementNoRc(uint32_t index, const MObject *mObj) const { + StoreObjectNoRc(contentOffset + objectElemSize * index, mObj); +} + +inline MObject *MArray::GetObjectElementOffHeap(uint32_t index) const { + return LoadObjectOffHeap(contentOffset + objectElemSize * index); +} + +inline void MArray::SetObjectElementOffHeap(uint32_t index, const MObject *mObj) const { + StoreObjectOffHeap(contentOffset + objectElemSize * index, mObj); +} + +template +inline T MArray::GetPrimitiveElement(uint32_t index) { + uint32_t offset = contentOffset + GetElementSize() * index; + T value = Load(offset, false); + return value; +} + +template +inline void MArray::SetPrimitiveElement(uint32_t index, T value) { + uint32_t offset = contentOffset + GetElementSize() * index; + Store(offset, value, false); +} + +inline bool MArray::HasNullElement() const { + uint32_t len = GetLength(); + for (uint32_t i = 0; i < len; ++i) { + MObject *element = GetObjectElementNoRc(i); + if (element == nullptr) { + return true; + } + } + return false; +} + +template +inline MArray *MArray::JniCast(T array) { + static_assert(std::is_same::value || std::is_same::value || + std::is_same::value, "wrong type"); + return reinterpret_cast(array); +} + +template +inline MArray *MArray::JniCastNonNull(T array) { + DCHECK(array != nullptr); + return JniCast(array); +} + +inline jarray MArray::AsJarray() const { + return reinterpret_cast(const_cast(this)); +} + +inline jobjectArray MArray::AsJobjectArray() const { + return reinterpret_cast(const_cast(this)); +} +} // namespace maplert +#endif // MRT_MAPLERT_INCLUDE_MARRAY_INLINE_H_ diff --git a/src/mrt/maplert/include/mclass.h b/src/mrt/maplert/include/mclass.h new file mode 100644 index 0000000000..e6bb0ea648 --- /dev/null +++ b/src/mrt/maplert/include/mclass.h @@ -0,0 +1,215 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_MAPLERT_INCLUDE_MCLASS_H_ +#define MRT_MAPLERT_INCLUDE_MCLASS_H_ +#include "mobject.h" +#include "metadata_inline.h" +#include "fieldmeta.h" +#include "methodmeta.h" + +namespace maplert { +// NOTE: class object layout is different from java.lang.Class +// its layout defind in struct ClassMetadata +class FieldMeta; +class MethodMeta; +class FieldMetaCompact; +class MethodMetaCompact; +class MClass : public MObject { + public: + char *GetName() const; + uint16_t GetClIndex() const; + uint32_t GetFlag() const; + int32_t GetMonitor() const; + uint32_t GetNumOfSuperClasses() const; + uint32_t GetNumOfInterface() const; + MClass *GetComponentClass() const; + uint32_t GetComponentSize() const; + uint8_t *GetItab() const; + uint8_t *GetVtab() const; + uint8_t *GetGctib() const; + uint32_t GetNumOfFields() const; + uint32_t GetNumOfMethods() const; + uint32_t GetModifier() const; + uint32_t GetObjectSize() const; + uintptr_t GetClinitFuncAddr() const; + std::string GetAnnotation() const; + char *GetRawAnnotation() const; + // super class in java + MClass *GetSuperClass() const; + // include interface + MClass *GetSuperClass(uint32_t index) const; + // resolve data + MClass **GetSuperClassArray() const; + // unresolve data + MClass **GetSuperClassArrayPtr() const; + MClass *GetCacheTrueClass() const; + MClass *GetCacheFalseClass() const; + bool HasFinalizer() const; + bool IsObjectArrayClass() const; + bool IsPrimitiveClass() const; + bool IsArrayClass() const; + bool IsInterface() const; + bool IsPublic() const; + bool IsProtected() const; + bool IsAbstract() const; + bool IsProxy() const; + bool IsStringClass() const; + bool IsAnonymousClass() const; + bool IsEnum() const; + bool IsDecouple() const; + bool IsFinalizable() const; + bool IsColdClass() const; + bool IsLazyBinding() const; + bool IsAssignableFrom(MClass &cls) const; + bool IsLazy() const; + bool IsFinal() const; + bool IsInnerClass() const; + bool IsVerified() const; + void SetVerified(); + + // java.lang.Class + void GetPrettyClass(std::string &dstName) const; + // java.lang.Object, [Ljava.lang.Object;, int, [I + void GetBinaryName(std::string &dstName) const; + // java.lang.Object, java.lang.Object[], int, int[] + void GetTypeName(std::string &dstName) const; + // Ljava/lang/Object; [Ljava/lang/Object; I, [I + void GetDescriptor(std::string &dstName) const; + static void ConvertDescriptorToTypeName(const std::string &descriptor, std::string &defineName); + static void ConvertDescriptorToBinaryName(const std::string &descriptor, std::string &binaryName); + uint32_t GetDimension() const; + static uint32_t GetDimensionFromDescriptor(const std::string &descriptor); + + FieldMetaCompact *GetCompactFields() const; + FieldMeta *GetFieldMetas() const; + FieldMeta *GetFieldMeta(uint32_t index) const; + MethodMetaCompact *GetCompactMethods() const; + MethodMeta *GetMethodMeta(uint32_t index) const; + MethodMeta *GetMethodMetas() const; + MethodMeta *GetRawMethodMetas() const; + MethodMeta *GetDeclaredMethod(const char *methodName, const char *signatureName) const; + MethodMeta *GetDeclaredMethod(const MString *methodName, const MArray *signatureName) const; + MethodMeta *GetClinitMethodMeta() const; + MethodMeta *GetVirtualMethod(const MethodMeta &srcMethod) const; + // recursive search method + MethodMeta *GetMethod(const char *methodName, const char *signatureName) const; + MethodMeta *GetInterfaceMethod(const char *methodName, const char *signatureName) const; + MethodMeta *GetMethodForVirtual(const MethodMeta &srcMethod) const; + MethodMeta *GetDeclaredConstructor(const char *signature) const; + MethodMeta *GetDeclaredConstructor(const MArray *signature) const; + MethodMeta *GetDefaultConstructor() const; + MethodMeta *GetDeclaredFinalizeMethod() const; + MethodMeta *GetFinalizeMethod() const; + FieldMeta *GetDeclaredField(const char *name, const char *type = nullptr) const; + FieldMeta *GetDeclaredField(const MString *name) const; + // recursive search field + FieldMeta *GetField(const char *fieldName, const char *fieldType = nullptr, bool isPublic = false) const; + FieldMeta *GetField(const MString *fieldName, bool isPublic) const; + FieldMeta *GetRawFieldMetas() const; + bool IsCompactMetaMethods() const; + bool IsCompactMetaFields() const; + uint32_t GetMethodMetaIndex(const MethodMeta &srcMethodMeta) const; + void GetDeclaredMethods(std::vector &methodsVector, bool publicOnly) const; + void GetSuperClassInterfaces(uint32_t numOfSuperClass, MClass **superArray, + std::vector &interfaceVector, uint32_t firstSuperClass) const; + void GetInterfaces(std::vector &interfaceVector) const; + void GetDirectInterfaces(MClass *interfaceVector[], uint32_t size) const; + bool InitClassIfNeeded() const; + ClassInitState GetInitState() const; + bool IsInitialized() const; + void SetInitState(ClassInitState state); + MObject *GetSignatureAnnotation() const; + + uint32_t GetArrayModifiers() const; + void SetName(const char *name); + void SetModifier(uint32_t newMod); + void SetGctib(uintptr_t newGctib); + void SetSuperClassArray(uintptr_t newValue); + void SetObjectSize(uint32_t newSize); + void SetItable(uintptr_t newItb); + void SetVtable(uintptr_t newVtab); + void SetNumOfFields(uint32_t newValue); + void SetNumOfMethods(uint32_t newValue); + void SetMethods(const MethodMeta &newMethods); + void SetFields(const FieldMeta &newFields); + void SetInitStateRawValue(uintptr_t newValue); + void SetClassMetaRoData(uintptr_t ro); + void SetNumOfSuperClasses(uint32_t numOfSuperclasses); + void SetHotClass(); + void SetFlag(uint16_t newFlag); + void SetClIndex(uint16_t clIndex); + void SetMonitor(int32_t monitor); + void ReSetFlag(uint16_t newFlag); + void SetNumSuperClasses(uint32_t numOfSuperclasses); + void SetComponentClass(const MClass &klass); + // for instanceOf cache + void SetCacheTrueClass(const MClass &cacheClass); + void SetCacheFalseClass(const MClass &cacheClass); + + void ResolveVtabItab(); + // super class may not visible, resolve by other thread + static MClass *ResolveSuperClass(MClass **super); + static MClass *GetArrayClass(const MClass &componentClass); + + // context can caller Class, or its instance + static MClass *GetClassFromDescriptor(const MObject *context, const char *descriptor, bool throwException = true); + static MClass *GetClassFromDescriptorUtil(const MObject *context, const char *descriptor, bool throwException); + static MClass *GetPrimitiveClass(const char *descriptor); + + // alloc class memory from perm space if newClsMem is 0, + // newClsMem size: sizeof(ClassMetadata) + sizeof(ClassMetadataRO) + static MClass *NewMClass(uintptr_t newClsMem = 0); + static uint16_t GetMethodFieldHash(const char *name, const char *signature, bool isMethod); + + operator jclass () { + return reinterpret_cast(this); + } + + operator jclass () const { + return reinterpret_cast(const_cast(this)); + } + + template + static inline MClass *JniCast(T c); + template + static inline MClass *JniCastNonNull(T c); + jclass inline AsJclass() const; + + private: + ClassMetadataRO *GetClassMetaRo() const; + ClassMetadata *GetClassMeta() const; + bool IsAssignableFromImpl(const MClass &cls) const; + // this class is interface + bool IsAssignableFromInterface(const MClass &cls) const; + static uint16_t GetMethodFieldHash(const MString *name, const MArray *signature, bool isMethod); + + template + T1 *CmpHashEqual(int32_t mid, uint32_t num, T1 *metas, T2 name, T3 signature, uint16_t srcHash) const; + template + T1 *GetDeclareMethodFieldUtil(T1 *metas, uint32_t num, T2 name, T3 signature, bool isMethod) const; + template + MethodMeta *GetDeclaredMethodUtil(T1 methodName, T2 methodSignature) const; + template + FieldMeta *GetDeclaredFieldUtil(T1 fieldName, T2 fieldType) const; + template + FieldMeta *GetFieldUtil(T1 fieldName, T2 fieldType, bool isPublic) const; + template + FieldMeta *CmpHashEqualField(int32_t mid, T1 fieldName, T2 fieldType, uint16_t srchash) const; + template + MethodMeta *GetDeclaredConstructorUtil(T signature) const; +}; +} // namespace maplert +#endif // MRT_MAPLERT_INCLUDE_MCLASS_H_ + diff --git a/src/mrt/maplert/include/mclass_inline.h b/src/mrt/maplert/include/mclass_inline.h new file mode 100644 index 0000000000..9426db14b8 --- /dev/null +++ b/src/mrt/maplert/include/mclass_inline.h @@ -0,0 +1,821 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_MAPLERT_INCLUDE_MCLASS_INLINE_H_ +#define MRT_MAPLERT_INCLUDE_MCLASS_INLINE_H_ +#include "mclass.h" +#include "mobject_inline.h" +#include "methodmeta_inline.h" +#include "fieldmeta_inline.h" +#include "mrt_class_init.h" +#include "mrt_profile.h" +#include "modifier.h" +#include "mrt_well_known.h" +#include "linker_api.h" +#include "mstring_inline.h" + +namespace maplert { +inline ClassMetadata *MClass::GetClassMeta() const { + return reinterpret_cast(const_cast(this)); +} + +inline ClassMetadataRO *MClass::GetClassMetaRo() const { + const ClassMetadata *cls = GetClassMeta(); + return cls->classInfoRo.GetDataRef(); +} + +inline char *MClass::GetName() const { + ClassMetadataRO *classInfoRo = GetClassMetaRo(); + char *name = classInfoRo->className.GetRef(); + __MRT_Profile_CString(name); + return name; +} + +inline uint32_t MClass::GetFlag() const { +#ifdef USE_32BIT_REF + const ClassMetadata *cls = GetClassMeta(); + return cls->flag; +#else + ClassMetadataRO *classInfoRo = GetClassMetaRo(); + return classInfoRo->flag; +#endif // USE_32BIT_REF +} + +inline uint32_t MClass::GetModifier() const { + ClassMetadataRO *classInfoRo = GetClassMetaRo(); + return classInfoRo->mod; +} + +inline uint16_t MClass::GetClIndex() const { + const ClassMetadata *cls = GetClassMeta(); + return cls->clIndex; +} + +inline int32_t MClass::GetMonitor() const { + return this->monitor; +} + +inline uint32_t MClass::GetNumOfSuperClasses() const { +#if USE_32BIT_REF + const ClassMetadata *cls = GetClassMeta(); + return cls->numOfSuperclasses; +#else + ClassMetadataRO *classInfoRo = GetClassMetaRo(); + return classInfoRo->numOfSuperclasses; +#endif // USE_32BIT_REF +} + +inline uint32_t MClass::GetNumOfInterface() const { + uint32_t numOfInterface = 0; + if (IsArrayClass()) { + // array class has 2 interfaces. + numOfInterface = 2; + } else { + uint32_t numOfSuper = GetNumOfSuperClasses(); + numOfInterface = (numOfSuper == 0) ? 0 : (IsInterface() ? numOfSuper : numOfSuper - 1); + } + return numOfInterface; +} + +inline MClass *MClass::GetComponentClass() const { + if (UNLIKELY(!IsArrayClass())) { + return nullptr; + } + ClassMetadataRO *classInfoRo = GetClassMetaRo(); + MClass *componentClass = classInfoRo->componentClass.GetDataRef(); + return componentClass; +} + +inline uint32_t MClass::GetComponentSize() const { + DCHECK(IsArrayClass()) << "must array class." << maple::endl; + const ClassMetadata *cls = GetClassMeta(); + return cls->sizeInfo.componentSize; +} + +inline uint8_t *MClass::GetItab() const { + const ClassMetadata *cls = GetClassMeta(); + return cls->iTable.GetDataRef(); +} + +inline uint8_t *MClass::GetVtab() const { + const ClassMetadata *cls = GetClassMeta(); + return cls->vTable.GetDataRef(); +} + +inline uint8_t *MClass::GetGctib() const { + ClassMetadata *cls = GetClassMeta(); + return cls->gctib.GetGctibRef(); +} + +inline uint32_t MClass::GetNumOfFields() const { + ClassMetadataRO *classInfoRo = GetClassMetaRo(); + return classInfoRo->numOfFields; +} + +inline uint32_t MClass::GetNumOfMethods() const { + ClassMetadataRO *classInfoRo = GetClassMetaRo(); + return classInfoRo->numOfMethods; +} + +inline uint32_t MClass::GetObjectSize() const { + const ClassMetadata *cls = GetClassMeta(); + return cls->sizeInfo.objSize; +} + +inline uintptr_t MClass::GetClinitFuncAddr() const { + ClassMetadataRO *classInfoRo = GetClassMetaRo(); + return classInfoRo->clinitAddr.GetDataRef(); +} + +inline char *MClass::GetRawAnnotation() const { + ClassMetadataRO *classInfoRo = GetClassMetaRo(); + return classInfoRo->annotation.GetDataRef(); +} + +inline MClass *MClass::GetSuperClass(uint32_t index) const { + MClass **superClassArray = GetSuperClassArray(); + DCHECK(index < GetNumOfSuperClasses()) << "index too big." << maple::endl; + return ResolveSuperClass(superClassArray + index); +} + +inline MClass *MClass::GetSuperClass() const { + MClass *super = nullptr; + if (!IsInterface() && (GetNumOfSuperClasses() > 0)) { + super = GetSuperClass(0); + } else if (IsArrayClass()) { + super = WellKnown::GetMClassObject(); + } + return super; +} + +ALWAYS_INLINE inline MClass **MClass::GetSuperClassArray() const { + MClass *cls = const_cast(this); + if (UNLIKELY(IsColdClass())) { + LinkerAPI::Instance().ResolveColdClassSymbol(*cls); + } + return GetSuperClassArrayPtr(); +} + +inline MClass *MClass::ResolveSuperClass(MClass **pSuperClass) { + MClass *super = *pSuperClass; + LinkerRef ref(super->AsUintptr()); + // Need more fast check interface. + if (UNLIKELY(ref.IsIndex())) { + return reinterpret_cast(LinkerAPI::Instance().GetSuperClass( + reinterpret_cast(pSuperClass))); + } else { + return super; + } +} + +inline MClass **MClass::GetSuperClassArrayPtr() const { + ClassMetadataRO *classInfoRo = GetClassMetaRo(); + return classInfoRo->superclass.GetDataRef(); +} + +inline MClass *MClass::GetCacheTrueClass() const { +#ifdef USE_32BIT_REF + const ClassMetadata *cls = GetClassMeta(); + return cls->cacheTrueClass.GetRawValue(); +#else + return nullptr; +#endif +} + +inline MClass *MClass::GetCacheFalseClass() const { +#ifdef USE_32BIT_REF + const ClassMetadata *cls = GetClassMeta(); + return cls->cacheFalseClass.GetDataRef(); +#else + return nullptr; +#endif +} + +inline void MClass::SetCacheTrueClass(const MClass &cacheClass) { +#ifdef USE_32BIT_REF + ClassMetadata *cls = GetClassMeta(); + if (LIKELY(IsInitialized())) { + cls->cacheTrueClass.SetDataRef(&cacheClass); + } +#else + (void)cacheClass; +#endif +} + +inline void MClass::SetCacheFalseClass(const MClass &cacheClass) { +#ifdef USE_32BIT_REF + ClassMetadata *cls = GetClassMeta(); + if (LIKELY(IsInitialized())) { + cls->cacheFalseClass.SetDataRef(&cacheClass); + } +#else + (void)cacheClass; +#endif +} + +inline FieldMeta *MClass::GetFieldMetas() const { + __MRT_Profile_FieldMeta(*this); + ClassMetadataRO *classInfoRo = GetClassMetaRo(); + FieldMeta *fields = nullptr; + if (classInfoRo->fields.IsCompact()) { + fields = FieldMetaCompact::DecodeCompactFieldMetas(*const_cast(this)); + } else { + fields = classInfoRo->fields.GetDataRef(); + } + return fields; +} + +inline MethodMeta *MClass::GetMethodMetas() const { + __MRT_Profile_MethodMeta(*this); + ClassMetadataRO *classInfoRo = GetClassMetaRo(); + MethodMeta *methods = nullptr; + if (classInfoRo->methods.IsCompact()) { + methods = MethodMetaCompact::DecodeCompactMethodMetas(*const_cast(this)); + } else { + methods = classInfoRo->methods.GetDataRef(); + } + return methods; +} + +inline MethodMeta *MClass::GetMethodMeta(uint32_t index) const { + CHECK(index < GetNumOfMethods()) << "index too large." << maple::endl; + MethodMeta *methods = GetMethodMetas(); + return &methods[index]; +} + +inline uint32_t MClass::GetMethodMetaIndex(const MethodMeta &srcMethodMeta) const { + MethodMeta *methods = GetMethodMetas(); + DCHECK(methods != nullptr) << "MClass::GetMethodMetaIndex: methods is null." << maple::endl; + uint32_t numOfMethods = GetNumOfMethods(); + for (uint32_t index = 0; index < numOfMethods; ++index) { + if (&methods[index] == &srcMethodMeta) { + return index; + } + } + BUILTIN_UNREACHABLE(); +} + +inline bool MClass::HasFinalizer() const { + uint32_t flags = GetFlag(); + return modifier::hasFinalizer(flags); +} + +inline bool MClass::IsObjectArrayClass() const { + MClass *componentClass = GetComponentClass(); + if (UNLIKELY(componentClass == nullptr)) { + return false; + } + return !componentClass->IsPrimitiveClass(); +} + +inline bool MClass::IsPrimitiveClass() const { + uint32_t flag = GetFlag(); + return modifier::IsPrimitiveClass(flag); +} + +inline bool MClass::IsArrayClass() const { + uint32_t flag = GetFlag(); + return modifier::IsArrayClass(flag); +} + +inline bool MClass::IsStringClass() const { + return this == WellKnown::GetMClassString(); +} + +inline bool MClass::IsAnonymousClass() const { + uint32_t flag = GetFlag(); + return modifier::IsAnonymousClass(flag); +} + +inline bool MClass::IsEnum() const { + return modifier::IsEnum(GetModifier()); +} + +inline bool MClass::IsFinalizable() const { + return modifier::IsFinalizable(GetModifier()); +} + +inline bool MClass::IsDecouple() const { + return modifier::IsDecoupleClass(GetFlag()); +} + +inline bool MClass::IsInterface() const { + uint32_t modifier = GetModifier(); + return modifier::IsInterface(modifier); +} + +inline bool MClass::IsPublic() const { + uint32_t modifier = GetModifier(); + return modifier::IsPublic(modifier); +} + +inline bool MClass::IsProtected() const { + uint32_t modifier = GetModifier(); + return modifier::IsProtected(modifier); +} + +inline bool MClass::IsAbstract() const { + uint32_t modifier = GetModifier(); + return modifier::IsAbstract(modifier); +} + +inline bool MClass::IsProxy() const { + uint32_t modifier = GetModifier(); + return modifier::IsProxy(modifier); +} + +inline bool MClass::IsFinal() const { + uint32_t modifier = GetModifier(); + return modifier::IsFinal(modifier); +} + +inline bool MClass::IsColdClass() const { +#ifdef USE_32BIT_REF + ClassMetadata *cls = GetClassMeta(); + uint16_t flag = __atomic_load_n(&cls->flag, __ATOMIC_ACQUIRE); + return modifier::IsColdClass(flag); +#else + ClassMetadataRO *clsRo = GetClassMetaRo(); + uint16_t flag = __atomic_load_n(&clsRo->flag, __ATOMIC_ACQUIRE); + return modifier::IsColdClass(flag); +#endif // USE_32BIT_REF +} + +inline bool MClass::IsLazyBinding() const { +#ifdef USE_32BIT_REF + uint16_t flag = __atomic_load_n(&GetClassMeta()->flag, __ATOMIC_ACQUIRE); + return modifier::IsLazyBindingClass(flag); +#else + uint16_t flag = __atomic_load_n(&GetClassMetaRo()->flag, __ATOMIC_ACQUIRE); + return modifier::IsLazyBindingClass(flag); +#endif // USE_32BIT_REF +} + +inline bool MClass::IsLazy() const { +#ifdef USE_32BIT_REF + uint16_t flag = __atomic_load_n(&GetClassMeta()->flag, __ATOMIC_ACQUIRE); + return modifier::IsLazyBindingClass(flag) || modifier::IsLazyBoundClass(flag); +#else + uint16_t flag = __atomic_load_n(&GetClassMetaRo()->flag, __ATOMIC_ACQUIRE); + return modifier::IsLazyBindingClass(flag) || modifier::IsLazyBoundClass(flag); +#endif // USE_32BIT_REF +} + +inline bool MClass::IsVerified() const { +#ifdef USE_32BIT_REF + uint16_t flag = __atomic_load_n(&GetClassMeta()->flag, __ATOMIC_ACQUIRE); + return !modifier::IsNotVerifiedClass(flag); +#else + uint16_t flag = __atomic_load_n(&GetClassMetaRo()->flag, __ATOMIC_ACQUIRE); + return !modifier::IsNotVerifiedClass(flag); +#endif // USE_32BIT_REF +} + +inline void MClass::SetVerified() { + uint16_t verifiedFlag = static_cast(~modifier::kClassRuntimeVerify); + ReSetFlag(verifiedFlag); +} + +inline void MClass::SetClassMetaRoData(uintptr_t ro) { + ClassMetadata *cls = GetClassMeta(); + cls->classInfoRo.SetDataRef(ro); +} + +inline void MClass::SetFlag(uint16_t newFlag) { +#ifdef USE_32BIT_REF + ClassMetadata *cls = GetClassMeta(); + uint16_t flag = __atomic_load_n(&cls->flag, __ATOMIC_ACQUIRE) | newFlag; + __atomic_store_n(&cls->flag, flag, __ATOMIC_RELEASE); +#else + ClassMetadataRO *classInfoRo = GetClassMetaRo(); + uint16_t flag = __atomic_load_n(&classInfoRo->flag, __ATOMIC_ACQUIRE) | newFlag; + __atomic_store_n(&classInfoRo->flag, flag, __ATOMIC_RELEASE); +#endif // USE_32BIT_REF +} + +inline void MClass::SetClIndex(uint16_t clIndex) { + ClassMetadata *cls = GetClassMeta(); + cls->clIndex = clIndex; +} + +inline void MClass::SetMonitor(int32_t monitor) { + this->monitor = monitor; +} + +inline void MClass::ReSetFlag(uint16_t newFlag) { +#ifdef USE_32BIT_REF + ClassMetadata *cls = GetClassMeta(); + uint16_t flag = __atomic_load_n(&cls->flag, __ATOMIC_ACQUIRE) & newFlag; + __atomic_store_n(&cls->flag, flag, __ATOMIC_RELEASE); +#else + ClassMetadataRO *classInfoRo = GetClassMetaRo(); + uint16_t flag = __atomic_load_n(&classInfoRo->flag, __ATOMIC_ACQUIRE) & newFlag; + __atomic_store_n(&classInfoRo->flag, flag, __ATOMIC_RELEASE); +#endif +} + +inline void MClass::SetNumOfSuperClasses(uint32_t numOfSuperclasses) { +#if USE_32BIT_REF + ClassMetadata *cls = GetClassMeta(); + cls->numOfSuperclasses = static_cast(numOfSuperclasses); +#else + ClassMetadataRO *classInfoRo = GetClassMetaRo(); + classInfoRo->numOfSuperclasses = numOfSuperclasses; +#endif // USE_32BIT_REF +} + +inline void MClass::SetHotClass() { + ReSetFlag(~modifier::kClassIsColdClass); +} + +inline MethodMeta *MClass::GetRawMethodMetas() const { + ClassMetadataRO *classInfoRo = GetClassMetaRo(); + MethodMeta *methods = classInfoRo->methods.GetDataRef(); + return methods; +} + +inline FieldMeta *MClass::GetRawFieldMetas() const { + ClassMetadataRO *classInfoRo = GetClassMetaRo(); + FieldMeta *fields = classInfoRo->fields.GetDataRef(); + return fields; +} + +inline FieldMeta *MClass::GetFieldMeta(uint32_t index) const { + DCHECK(index < GetNumOfFields()) << "index too large." << maple::endl; + FieldMeta *fields = GetRawFieldMetas(); + DCHECK(fields != nullptr) << "fields is nullptr." << maple::endl; + return fields + index; +} + +inline bool MClass::IsAssignableFrom(MClass &cls) const { +#ifdef USE_32BIT_REF + if ((&cls == this) || (cls.GetCacheTrueClass() == this)) { + return true; + } else if (cls.GetCacheFalseClass() == this) { + return false; + } +#endif + bool result = IsAssignableFromImpl(cls); +#ifdef USE_32BIT_REF + if (result) { + cls.SetCacheTrueClass(*this); + } else { + cls.SetCacheFalseClass(*this); + } +#endif + return result; +} + +ALWAYS_INLINE inline uint16_t MClass::GetMethodFieldHash(const MString *name, const MArray *signature, bool isMethod) { + DCHECK(name != nullptr) << "name must non null." << maple::endl; + uint32_t hash = 5381; // initial value for DJB hash algorithm + constexpr uint32_t kShift = 5; + // cal method name + if (name->IsCompress()) { + uint32_t nameLen = name->GetLength(); + uint8_t *contents = name->GetContentsPtr(); + for (uint32_t i = 0; i < nameLen; ++i) { + hash += (hash << kShift) + (*contents++); + } + } else { + std::string charMethodName = name->GetChars(); + // Note: charMethodName is the name encoded in UTF-8 as a sequence of bytes. + for (char byte : charMethodName) { + // Iterate through the byte sequence. Treat each byte as a uint8_t, and zero-extend it to 32 bits. This is + // consistent with the algorithm used by the AoT compiler. + uint32_t byteU32 = static_cast(static_cast(byte)); + hash += (hash << kShift) + byteU32; + } + } + if (isMethod) { + // cal signature + hash += (hash << kShift) + '('; + if (signature != nullptr) { + uint32_t arrayLen = signature->GetLength(); + for (uint32_t i = 0; i < arrayLen; ++i) { + MClass *clsObj = static_cast(signature->GetObjectElementNoRc(i)); + // we have check the args non null + char *className = clsObj->GetName(); + while (*className) { + hash += (hash << kShift) + (*className++); + } + } + } + hash += (hash << kShift) + ')'; + } + return (hash & 0x7FFFFFFF) % modifier::kMethodFieldHashSize; +} + +ALWAYS_INLINE inline uint16_t MClass::GetMethodFieldHash(const char *name, const char *signature, bool isMethod) { + DCHECK(name != nullptr) << "name must non null." << maple::endl; + DCHECK(!isMethod || (signature != nullptr)) << "signature must non null." << maple::endl; + unsigned int hash = 5381; // initial value for DJB hash algorithm + constexpr uint32_t kShift = 5; + while (*name) { + hash += (hash << kShift) + (*name++); + } + if (isMethod) { + while (*signature) { + hash += (hash << kShift) + (*signature++); + if (*(signature - 1) == ')') { + break; + } + } + } + return (hash & 0x7FFFFFFF) % modifier::kMethodFieldHashSize; +} + +template +ALWAYS_INLINE inline T1 *MClass::CmpHashEqual(int32_t mid, uint32_t num, T1 *metas, T2 name, + T3 signature, uint16_t srcHash) const { + int32_t numOfMetas = static_cast(num); + while (++mid < numOfMetas) { + T1 *meta = metas + mid; + if (srcHash != meta->GetHashCode()) { + break; + } + } + // find left + while (--mid >= 0) { + T1 *meta = metas + mid; + if (srcHash == meta->GetHashCode()) { + if (meta->Cmp(name, signature)) { + return meta; + } + } else { + break; + } + } + return nullptr; +} + +template +ALWAYS_INLINE inline T1 *MClass::GetDeclareMethodFieldUtil(T1 *metas, uint32_t num, T2 name, + T3 signature, bool isMethod) const { + uint16_t srcHash = GetMethodFieldHash(name, signature, isMethod); + int32_t low = 0; + int32_t high = static_cast(num) - 1; + int32_t mid = 0; + while (low <= high) { + mid = static_cast(static_cast(low + high) >> 1); + T1 *meta = metas + mid; + uint16_t dstHash = meta->GetHashCode(); + if (dstHash == srcHash) { + if (meta->Cmp(name, signature) && !isMethod) { + return meta; + } + T1 *result = CmpHashEqual(mid, num, metas, name, signature, srcHash); + if (result != nullptr) { + return result; + } + break; + } + if (dstHash < srcHash) { + low = mid + 1; + } else { + high = mid - 1; + } + } + + // the following code just for compatibility, this may impact performance when field not found + // now, kMajorMplVersion is same in maple 2.1 && 2.2. we will optimise these code when version changed + for (int i = static_cast(num) - 1; i >= 0 ; --i) { + T1 *meta = metas + i; + if (modifier::kHashConflict == meta->GetHashCode()) { + if (meta->Cmp(name, signature)) { + return meta; + } + } else { + break; + } + } + return nullptr; +} + +template +ALWAYS_INLINE inline MethodMeta *MClass::GetDeclaredMethodUtil(T1 methodName, T2 methodSignature) const { + MethodMeta *methods = GetMethodMetas(); + uint32_t num = GetNumOfMethods(); + return GetDeclareMethodFieldUtil(methods, num, methodName, methodSignature, true); +} + +ALWAYS_INLINE inline MethodMeta *MClass::GetDeclaredMethod(const char *methodName, const char *signatureName) const { + if ((methodName == nullptr) || (signatureName == nullptr)) { + return nullptr; + } + return GetDeclaredMethodUtil(methodName, signatureName); +} + +ALWAYS_INLINE inline MethodMeta *MClass::GetDeclaredMethod(const MString *methodName, + const MArray *signatureClass) const { + if ((methodName == nullptr) || ((signatureClass != nullptr) && signatureClass->HasNullElement())) { + return nullptr; + } + MethodMeta *methodMeta = GetDeclaredMethodUtil(methodName, signatureClass); + return (methodMeta != nullptr) ? (methodMeta->IsConstructor() ? nullptr : methodMeta) : nullptr; +} + +ALWAYS_INLINE inline MethodMeta *MClass::GetDefaultConstructor() const { + MethodMeta *methods = GetMethodMetas(); + uint32_t num = GetNumOfMethods(); + for (uint32_t i = 0; i < num; ++i) { + MethodMeta *methodMeta = &methods[i]; + // defaultConstructor has 1 arg + if (methodMeta->IsConstructor() && methodMeta->GetArgSize() == 1) { + return methodMeta; + } + } + return nullptr; +} + +ALWAYS_INLINE inline MethodMeta *MClass::GetVirtualMethod(const MethodMeta &srcMethod) const { + MClass *declarClass = srcMethod.GetDeclaringClass(); + if ((declarClass == this) || srcMethod.IsDirectMethod()) { + return const_cast(&srcMethod); + } + return GetMethodForVirtual(srcMethod); +} + +template +ALWAYS_INLINE inline FieldMeta *MClass::GetDeclaredFieldUtil(T1 fieldName, T2 fieldType) const { + uint32_t num = GetNumOfFields(); + FieldMeta *fields = GetFieldMetas(); + return GetDeclareMethodFieldUtil(fields, num, fieldName, fieldType, false); +} + +ALWAYS_INLINE inline FieldMeta *MClass::GetDeclaredField(const char *name, const char *type) const { + return GetDeclaredFieldUtil(name, type); +} + +ALWAYS_INLINE inline FieldMeta *MClass::GetDeclaredField(const MString *name) const { + return GetDeclaredFieldUtil(name, nullptr); +} + +template +ALWAYS_INLINE inline FieldMeta *MClass::GetFieldUtil(T1 fieldName, T2 fieldType, bool isPublic) const { + FieldMeta *result = nullptr; + const MClass *superClass = this; + while (superClass != nullptr) { + result = superClass->GetDeclaredFieldUtil(fieldName, fieldType); + if ((result != nullptr) && (!isPublic || result->IsPublic())) { + return result; + } + uint32_t numOfInterface = superClass->GetNumOfInterface(); + MClass *interfaceVector[numOfInterface]; + superClass->GetDirectInterfaces(interfaceVector, numOfInterface); + for (uint32_t i = 0; i < numOfInterface; ++i) { + auto interface = interfaceVector[i]; + result = interface->GetFieldUtil(fieldName, fieldType, isPublic); + if ((result != nullptr) && (!isPublic || result->IsPublic())) { + return result; + } + } + if (superClass->IsInterface()) { + break; + } + superClass = superClass->GetSuperClass(); + } + return nullptr; +} + +ALWAYS_INLINE inline FieldMeta *MClass::GetField(const char *fieldName, const char *fieldType, bool isPublic) const { + return GetFieldUtil(fieldName, fieldType, isPublic); +} + +ALWAYS_INLINE inline FieldMeta *MClass::GetField(const MString *fieldName, bool isPublic) const { + return GetFieldUtil(fieldName, nullptr, isPublic); +} + +template +ALWAYS_INLINE inline MethodMeta *MClass::GetDeclaredConstructorUtil(T signature) const { + MethodMeta *methods = GetMethodMetas(); + uint32_t num = GetNumOfMethods(); + for (uint32_t i = 0; i < num; ++i) { + MethodMeta *methodMeta = &methods[i]; + if (methodMeta->IsConstructor() && !methodMeta->IsStatic() && methodMeta->SignatureCmp(signature)) { + return methodMeta; + } + } + return nullptr; +} + +inline MethodMeta *MClass::GetDeclaredConstructor(const char *signature) const { + return GetDeclaredConstructorUtil(signature); +} + +inline MethodMeta *MClass::GetDeclaredConstructor(const MArray *signature) const { + return GetDeclaredConstructorUtil(signature); +} + +inline ClassInitState MClass::GetInitState() const { + ClassMetadata *cls = GetClassMeta(); + uintptr_t val = cls->GetInitStateRawValue(); + uintptr_t state = 0; + // previously ClassInitState::kClassInitStateUninitialized is defined as 0. + if (val == reinterpret_cast(&classInitProtectRegion[kClassUninitialized - 1])) { + return kClassUninitialized; + } else if (kSEGVAddrForClassInitStateMin < val && val < kSEGVAddrFoClassInitStateMax) { + state = val - static_cast(kSEGVAddrForClassInitStateMin); + } else { + return kClassInitialized; + } + + if (kClassInitStateMin < state && state < kClassInitStateMax) { + return static_cast(state); + } else if (kClassInitStateMax <= state) { + return kClassInitialized; + } + BUILTIN_UNREACHABLE(); +} + +inline void MClass::SetInitState(ClassInitState state) { + if (kClassInitStateMin < state && state < kClassInitStateMax) { + uintptr_t stateVal = kSEGVAddrForClassInitStateMin + state; + ClassMetadata *cls = GetClassMeta(); + cls->SetInitStateRawValue(stateVal); + return; + } + BUILTIN_UNREACHABLE(); +} + +inline bool MClass::IsInitialized() const { + return GetInitState() == kClassInitialized; +} + +inline bool MClass::InitClassIfNeeded() const { + return IsInitialized() ? true : MRT_InitClassIfNeeded(*this); +} + +inline uint32_t MClass::GetDimension() const { + return GetDimensionFromDescriptor(GetName()); +} + +inline uint32_t MClass::GetDimensionFromDescriptor(const std::string &descriptor) { + uint32_t dim = 0; + while (descriptor[dim] == '[') { + dim++; + } + return dim; +} + +inline MClass *MClass::GetPrimitiveClass(const char *descriptor) { + DCHECK(descriptor != nullptr); + switch (*descriptor) { + case 'Z': + return WellKnown::GetMClassZ(); + case 'B': + return WellKnown::GetMClassB(); + case 'C': + return WellKnown::GetMClassC(); + case 'S': + return WellKnown::GetMClassS(); + case 'I': + return WellKnown::GetMClassI(); + case 'J': + return WellKnown::GetMClassJ(); + case 'F': + return WellKnown::GetMClassF(); + case 'D': + return WellKnown::GetMClassD(); + case 'V': + return WellKnown::GetMClassV(); + default: + return nullptr; + } +} + +inline MClass *MClass::GetClassFromDescriptor(const MObject *context, const char *descriptor, bool throwException) { + // first check primitive class + MClass *cls = GetPrimitiveClass(descriptor); + return (cls != nullptr) ? cls : GetClassFromDescriptorUtil(context, descriptor, throwException); +} + +template +inline MClass *MClass::JniCast(T c) { + static_assert(std::is_same::value || std::is_same::value, "wrong type"); + return reinterpret_cast(c); +} + +template +inline MClass *MClass::JniCastNonNull(T c) { + DCHECK(c != nullptr); + return JniCast(c); +} + +inline jclass MClass::AsJclass() const { + return reinterpret_cast(const_cast(this)); +} +} // namespace maplert +#endif // MRT_MAPLERT_INCLUDE_MCLASS_INLINE_H_ + diff --git a/src/mrt/maplert/include/methodmeta.h b/src/mrt/maplert/include/methodmeta.h new file mode 100644 index 0000000000..0928f346cf --- /dev/null +++ b/src/mrt/maplert/include/methodmeta.h @@ -0,0 +1,272 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_MAPLERT_INCLUDE_METHODMETA_H_ +#define MRT_MAPLERT_INCLUDE_METHODMETA_H_ +#include "mobject.h" +#include "base/macros.h" +#include "marray.h" +#include "mstring.h" +#include "argvalue.h" + +namespace maplert { +static constexpr uint32_t kMethodHashMask = 0x3FF; +static constexpr uint32_t kMethodFlagBits = 6; +static constexpr uint32_t kMethodFlagVtabIndexBits = 16; + +static constexpr uint32_t kDexMethodTag = 0x1; +static constexpr uint32_t kMethodMetaHashMask = 0x3FF; +static constexpr uint32_t kMethodMetaHashBitIdx = 6; +static constexpr uint32_t kMethodMetaAddrIspAddress = 0x02; + +// MethodMetaBase: It contain compactMetaFlag to indicate +// the struct is compacted or not +// MethodMeta: normal method Meta struct +// MethodMetaCompact: method Meta is compacted by leb128 +class MethodAddress { + public: + uintptr_t GetAddr() const; + void SetAddr(uintptr_t address); + int32_t GetDefTabIndex() const; + + private: + union { + DataRefOffset addr; // func address. + DataRefOffset defTabIndex; // def tab index, before lazy binding resolve for method. + }; +}; + +class MethodSignature { + public: + char *GetSignature() const; + void SetSignature(const char *signature); + MetaRef *GetTypes() const; + + private: + DataRefOffset32 signatureName; + // ponit to a types array, only enable Cache ParametarType(methodMeta flag kMethodParametarType is set). + // otherwise can't use this field. + DataRefOffset32 pTypes; +}; + +class MethodMetaBase { + public: + char *GetName() const; + void GetSignature(std::string &signature) const; + bool IsMethodMetaCompact() const; + MClass *GetDeclaringClass() const; + uint16_t GetFlag() const; + uint32_t GetMod() const; + int16_t GetVtabIndex() const; + uintptr_t GetFuncAddress() const; + + private: + struct { + int16_t vtabIndex; + uint16_t compactMetaFlag; + }; +}; + +class MethodMeta { + public: + uint32_t GetMod() const; + char *GetName() const; + char *GetSignature() const; + void GetShortySignature(char shorty[], const uint32_t size) const; + static void GetShortySignature(const char srcSignature[], char shorty[], const uint32_t size); + static void GetShortySignatureUtil(const char srcSignature[], char shorty[], const uint32_t size); + uintptr_t GetFuncAddress() const; + std::string GetAnnotation() const; + char *GetAnnotationRaw() const; + MClass *GetDeclaringClass() const; + MRT_EXPORT void GetJavaMethodFullName(std::string &name) const; + MRT_EXPORT void GetJavaClassName(std::string &name) const; + MRT_EXPORT void GetJavaMethodName(std::string &name) const; + uint16_t GetArgSize() const; + int16_t GetVtabIndex() const; + uint16_t GetHashCode() const; + void SetHashCode(uint16_t hash); + uint16_t GetFlag() const; + bool IsStatic() const; + bool IsAbstract() const; + bool IsPublic() const; + bool IsPrivate() const; + bool IsDirectMethod() const; + bool IsFinalizeMethod() const; + bool IsFinalMethod() const; + bool IsConstructor() const; + bool IsDefault() const; + bool IsProtected() const; + bool IsNative() const; + bool IsSynchronized() const; + bool IsCriticalNative() const; + bool IsMethodAccessible(const MClass &curClass) const; + bool IsOverrideMethod(const MethodMeta &srcMethod) const; + bool IspAddress() const; + bool IsEnableParametarType() const; + int32_t GetDefTabIndex() const; + MethodAddress *GetpMethodAddress() const; + MClass *GetReturnType() const; + char *GetReturnTypeName() const; + char GetReturnPrimitiveType() const; + void GetParameterTypes(std::vector ¶meterTypes) const; + bool GetParameterTypes(MClass *parameterTypes[], uint32_t size) const; + bool GetParameterTypesUtil(MClass *parameterTypes[], uint32_t size) const; + MArray *GetParameterTypes() const; + MethodSignature *GetMethodSignature() const; + void GetParameterTypesDescriptor(std::vector &descriptors) const; + void GetExceptionTypes(std::vector &types) const; + MArray *GetExceptionTypes() const; + void GetPrettyName(bool needSignature, std::string &dstName) const; + uint32_t GetParameterCount() const; + MObject *GetSignatureAnnotation() const; + bool Cmp(const char *mthName, const char *srcSigName) const; + bool Cmp(const MString *mthName, const MArray *srcSigName) const; + bool NameCmp(const MString *mthName) const; + bool NameCmp(const char *name) const; + bool SignatureCmp(const MArray *sigArray) const; + bool SignatureCmp(const char *srcSigName) const; + void FillMethodMeta(bool copySignature, const char *name, const MethodSignature *mthSignature, const char *annotation, + int32_t inVtabIndex, const MClass &methodDclClass, const uintptr_t funcAddr, uint32_t modifier, + uint16_t methodFlag, uint16_t argSize); + template + static inline void BuildArgstoJvalues(char type, BaseArgValue &values, uintptr_t args); + template + inline void BuildJavaMethodArgJvalues(MObject *obj, BaseArgValue &values) const; + + void BuildJValuesArgsFromVaList(jvalue argsJvalue[], va_list valistArgs) const; + void BuildMArrayArgsFromJValues(MArray &targetValue, jvalue argsJvalue[]) const; + static void BuildJValuesArgsFromStackMemery(DecodeStackArgs &args, std::string &shorty); + void BuildJValuesArgsFromStackMemeryPrefixSigNature(DecodeStackArgs &args, std::string prefix); + // invoke compiled code interface + template + T InvokeJavaMethod(MObject *obj, const uintptr_t methodAargs, uintptr_t calleeFuncAddr = 0) const; + template T InvokeJavaMethodFast(MObject *obj) const; // method without parameter + + template + RetType Invoke(MObject *obj, T methodAargs = 0, uintptr_t calleeFuncAddr = 0) const; + + bool NeedsInterp() const; + + // set interfaces + void SetMod(const uint32_t mod); + void SetName(const char *name); + void SetSignature(const char *signature); + void SetMethodSignature(const MethodSignature *methodSignature); + void SetAddress(const uintptr_t addr); + void SetAnnotation(const char *annotation); + void SetDeclaringClass(const MClass &declaringClass); + void SetFlag(const uint16_t flag); + void AddFlag(const uint16_t flag); + void SetArgsSize(const uint16_t argSize); + void SetVtabIndex(const int32_t methodVtabIndex); + + MethodSignature *CreateMethodSignatureByName(const char *signature); +#ifndef USE_32BIT_REF + uint32_t GetPadding() { + return padding; + } +#endif + static constexpr size_t GetModOffset() { + return offsetof(MethodMeta, mod); + } + static constexpr size_t GetDeclaringClassOffset() { + return offsetof(MethodMeta, declaringClass); + } + static constexpr size_t GetAddrOffset() { + return offsetof(MethodMeta, addr); + } + static constexpr size_t GetArgSizeOffset() { + return offsetof(MethodMeta, argumentSize); + } + + template + static inline MethodMeta *JniCast(T methodMeta); + template + static inline MethodMeta *JniCastNonNull(T methodMeta); + template + static inline MethodMeta *Cast(T methodMeta); + template + static inline MethodMeta *CastNonNull(T methodMeta); + inline jmethodID AsJmethodID(); + + private: + struct { + int16_t vtabIndex; + uint16_t compactMetaFlag; + }; + DataRefOffset declaringClass; + union { + DataRefOffset pAddr; + DataRefOffset addr; + }; + uint32_t mod; + DataRefOffset32 methodName; + // only method flag kMethodParametarType is set, we will use pMethodSignature, + // and compiler will generate MethodSignature struct with pTypes which save types cache, + // otherwise just use signatureOffset. + union { + DataRefOffset32 signatureOffset; + DataRefOffset32 pMethodSignature; + }; + DataRefOffset32 annotationValue; + uint16_t flag; + uint16_t argumentSize; +#ifndef USE_32BIT_REF + uint32_t padding; +#endif +}; + +class MethodMetaCompact { + public: + int16_t GetVtabIndex() const; + uint16_t GetFlag() const; + uint32_t GetMod() const; + uintptr_t GetFuncAddress() const; + const uint8_t *GetpCompact() const; + bool IsAbstract() const; + char *GetName() const; + void GetSignature(std::string &signature) const; + MClass *GetDeclaringClass() const; + int32_t GetDefTabIndex() const; + MethodAddress *GetpMethodAddress() const; + void SetFuncAddress(uintptr_t address); + uint8_t *DecodeCompactMethodMeta(const MClass &cls, uintptr_t &funcAddress, uint32_t &modifier, + std::string &methodName, std::string &signatureName, + std::string &annotationValue, int32_t &methodInVtabIndex, + uint16_t &flags, uint16_t &argsSize) const; + static MethodMeta *DecodeCompactMethodMetas(MClass &cls); + static uintptr_t GetCompactFuncAddr(const MClass &cls, uint32_t index); + static MethodMetaCompact *GetMethodMetaCompact(const MClass &cls, uint32_t index); + + private: + static std::mutex resolveMutex; + struct { + int16_t vtabIndex; + uint16_t compactMetaFlag; + }; + uint8_t leb128Start; + // data layout in compact methodmeta + // =========================================== + // | union {int32_t pAddr; int32_t addr;} | + // | int32_t declaringClass | + // | int32_t modifier | + // | int32_t methodname | + // | int32_t arg size | + // | int32_t methodsignature | + // | int32_t annotation | + // =========================================== +}; +} // namespace maplert +#endif // MRT_MAPLERT_INCLUDE_METHODMETA_H_ diff --git a/src/mrt/maplert/include/methodmeta_inline.h b/src/mrt/maplert/include/methodmeta_inline.h new file mode 100644 index 0000000000..b62e58dcee --- /dev/null +++ b/src/mrt/maplert/include/methodmeta_inline.h @@ -0,0 +1,447 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_MAPLERT_INCLUDE_METHODMETA_INLINE_H_ +#define MRT_MAPLERT_INCLUDE_METHODMETA_INLINE_H_ +#include "methodmeta.h" +#include "marray_inline.h" +#include "mrt_primitive_util.h" +#include "mrt_annotation.h" +#include "mrt_util.h" +#include "interp_support.h" +#include "modifier.h" +#include "mstring_inline.h" +namespace maplert { +inline int32_t MethodAddress::GetDefTabIndex() const { + return static_cast(defTabIndex.GetRawValue()); +} + +inline uintptr_t MethodAddress::GetAddr() const { + return addr.GetDataRef(); +} + +inline void MethodAddress::SetAddr(uintptr_t address) { + addr.SetDataRef(address); +} + +inline char *MethodSignature::GetSignature() const { + char *signature = signatureName.GetDataRef(); + __MRT_Profile_CString(signature); + return signature; +} + +inline void MethodSignature::SetSignature(const char *signature) { + signatureName.SetDataRef(signature); +} + +inline MetaRef *MethodSignature::GetTypes() const { + return pTypes.GetDataRef(); +} + +inline char *MethodMeta::GetName() const { + char *name = methodName.GetDataRef(); + __MRT_Profile_CString(name); + return name; +} + +inline char *MethodMeta::GetSignature() const { + return GetMethodSignature()->GetSignature(); +} + +inline uint32_t MethodMeta::GetMod() const { + return mod; +} + +inline std::string MethodMeta::GetAnnotation() const { + return AnnotationUtil::GetAnnotationUtil(GetAnnotationRaw()); +} + +inline char *MethodMeta::GetAnnotationRaw() const { + char *annotationRaw = annotationValue.GetDataRef(); + __MRT_Profile_CString(annotationRaw); + return annotationRaw; +} + +// This function is inlined in Fterp. If modified, change Fterp accordingly. +inline uintptr_t MethodMeta::GetFuncAddress() const { + MethodAddress *pMethodAddress = GetpMethodAddress(); + return pMethodAddress->GetAddr(); +} + +inline uint16_t MethodMeta::GetFlag() const { + return flag; +} + +inline uint16_t MethodMeta::GetArgSize() const { + return argumentSize; +} + +inline int16_t MethodMeta::GetVtabIndex() const { + return vtabIndex; +} + +inline bool MethodMeta::IsStatic() const { + return modifier::IsStatic(GetMod()); +} + +inline bool MethodMeta::IsAbstract() const { + return modifier::IsAbstract(GetMod()); +} + +inline bool MethodMeta::IsPublic() const { + return modifier::IsPublic(GetMod()); +} + +inline bool MethodMeta::IsPrivate() const { + return modifier::IsPrivate(GetMod()); +} +inline bool MethodMeta::IsDirectMethod() const { + return modifier::IsDirectMethod(GetMod()); +} + +inline bool MethodMeta::IsConstructor() const { + return modifier::IsConstructor(GetMod()); +} + +inline bool MethodMeta::IsDefault() const { + return modifier::IsDefault(GetMod()); +} + +inline bool MethodMeta::IsProtected() const { + return modifier::IsProtected(GetMod()); +} + +inline bool MethodMeta::IsFinalizeMethod() const { + return modifier::IsFinalizeMethod(GetFlag()); +} + +inline bool MethodMeta::IsFinalMethod() const { + return modifier::IsFinal(GetMod()); +} + +inline bool MethodMeta::IsNative() const { + return modifier::IsNative(GetMod()); +} + +inline bool MethodMeta::IsCriticalNative() const { + return false; +} + +inline bool MethodMeta::IspAddress() const { + return static_cast(pAddr.GetRawValue() & kMethodMetaAddrIspAddress); +} + +inline bool MethodMeta::IsEnableParametarType() const { + return (GetFlag() & modifier::kMethodParametarType) == modifier::kMethodParametarType; +} + +inline bool MethodMeta::IsSynchronized() const { + return modifier::IsSynchronized(GetMod()); +} + +inline int32_t MethodMeta::GetDefTabIndex() const { + if (!IspAddress()) { + return -1; + } + MethodAddress *pMethodAddress = GetpMethodAddress(); + return pMethodAddress->GetDefTabIndex(); +} + +inline MClass *MethodMeta::GetDeclaringClass() const { + return declaringClass.GetDataRef(); +} + +inline uint32_t MethodMeta::GetParameterCount() const { + uint32_t argSize = GetArgSize(); + return IsStatic() ? argSize : argSize - 1; +} + +inline char *MethodMeta::GetReturnTypeName() const { + char *methodSig = GetSignature(); + char *rtTypeName = methodSig; + while (*(rtTypeName++) != ')') { } + return rtTypeName; +} + +inline char MethodMeta::GetReturnPrimitiveType() const { + char *typeName = GetReturnTypeName(); + DCHECK(typeName != nullptr) << "MethodMeta::GetReturnPrimitiveType: typeName is nullptr!" << maple::endl; + return typeName[0]; +} + +inline void MethodMeta::AddFlag(const uint16_t methodFlag) { + flag |= methodFlag; +} + +// This function is inlined in Fterp. If modified, change Fterp accordingly. +inline MethodAddress *MethodMeta::GetpMethodAddress() const { + if (IspAddress()) { + uintptr_t value = pAddr.GetDataRef(); + // compile will +2 as flag, here need -2 to remove it + return reinterpret_cast(value - 2); + } else { + uintptr_t value = reinterpret_cast(&pAddr); + return reinterpret_cast(value); + } +} + +inline MethodSignature *MethodMeta::GetMethodSignature() const { + if (IsEnableParametarType()) { + return pMethodSignature.GetDataRef(); + } else { + uintptr_t value = reinterpret_cast(&signatureOffset); + return reinterpret_cast(value); + } +} + +inline uint16_t MethodMeta::GetHashCode() const { + return (flag >> kMethodFlagBits) & kMethodHashMask; +} + +inline void MethodMeta::SetHashCode(uint16_t hash) { + flag &= ~(kMethodHashMask << kMethodFlagBits); + flag |= (hash & kMethodHashMask) << kMethodFlagBits; +} + +ALWAYS_INLINE inline bool MethodMeta::NameCmp(const MString *name) const { + if (UNLIKELY(name == nullptr)) { + return false; + } + return name->Cmp(std::string(GetName())); +} + +ALWAYS_INLINE inline bool MethodMeta::NameCmp(const char *name) const { + if (UNLIKELY(name == nullptr)) { + return false; + } + return !strcmp(name, GetName()); +} + +ALWAYS_INLINE inline bool MethodMeta::SignatureCmp(const MArray *signature) const { + // signature always like (Ljava/lang/String;I), we can skip first char '('. + char *desSigName = GetSignature() + 1; + uint32_t arrayLen = signature == nullptr ? 0 : signature->GetLength(); + uint32_t index = 0; + for (uint32_t i = 0; i < arrayLen; ++i) { + MClass *clsObj = static_cast(signature->GetObjectElementNoRc(i)); + if (UNLIKELY(clsObj == nullptr)) { + return false; + } + char *className = clsObj->GetName(); + for (uint32_t clsNameIndex = 0; className[clsNameIndex] != '\0' && desSigName[clsNameIndex] != '\0'; + ++clsNameIndex, ++index) { + if (className[clsNameIndex] != desSigName[index]) { + return false; + } + } + } + return desSigName[index] == ')' ? true : false; +} + +ALWAYS_INLINE inline bool MethodMeta::SignatureCmp(const char *signature) const { + if (UNLIKELY(signature == nullptr)) { + return false; + } + return !strcmp(signature, GetSignature()); +} + +ALWAYS_INLINE inline bool MethodMeta::Cmp(const MString *mthName, const MArray *sigArray) const { + return NameCmp(mthName) && SignatureCmp(sigArray); +} + +ALWAYS_INLINE inline bool MethodMeta::Cmp(const char *name, const char *signature) const { + return NameCmp(name) && SignatureCmp(signature); +} + +template +ALWAYS_INLINE inline void MethodMeta::BuildArgstoJvalues(char type, BaseArgValue &values, uintptr_t args) { + bool isVaArg = (argsType == calljavastubconst::kVaArg); + switch (type) { + case 'B': + values.AddInt32(isVaArg ? va_arg(*reinterpret_cast(args), int32_t) + : (*reinterpret_cast(args)).b); + break; + case 'C': + values.AddInt32(isVaArg ? va_arg(*reinterpret_cast(args), int32_t) + : static_cast(static_cast((*reinterpret_cast(args)).c))); + break; + case 'I': + values.AddInt32(isVaArg ? va_arg(*reinterpret_cast(args), int32_t) + : (*reinterpret_cast(args)).i); + break; + case 'J': + values.AddInt64(isVaArg ? va_arg(*reinterpret_cast(args), int64_t) + : (*reinterpret_cast(args)).j); + break; + case 'S': + values.AddInt32(isVaArg ? va_arg(*reinterpret_cast(args), int32_t) + : (*reinterpret_cast(args)).s); + break; + case 'Z': + values.AddInt32(isVaArg ? va_arg(*reinterpret_cast(args), int32_t) + : static_cast(static_cast((*reinterpret_cast(args)).z))); + break; + case 'D': + values.AddDouble(isVaArg ? va_arg(*reinterpret_cast(args), double) + : (*reinterpret_cast(args)).d); + break; + case 'F': + values.AddFloat(isVaArg ? va_arg(*reinterpret_cast(args), double) + : (*reinterpret_cast(args)).f); + break; + case 'L': + case '[': + values.AddReference(reinterpret_cast(isVaArg ? va_arg(*reinterpret_cast(args), jobject) + : (*reinterpret_cast(args)).l)); + break; + default:; + } +} + +template +ALWAYS_INLINE inline void MethodMeta::BuildJavaMethodArgJvalues(MObject *obj, BaseArgValue &values) const { + uint32_t parameterCount = GetParameterCount(); + char retTypeNames[parameterCount]; + GetShortySignature(retTypeNames, parameterCount); + if (!IsStatic()) { + values.AddReference(obj); + } + if (values.GetMethodArgs() == 0) { + return; + } + uintptr_t arg = values.GetMethodArgs(); + for (uint32_t i = 0; i < parameterCount; ++i) { + char c = retTypeNames[i]; + if (argsType == calljavastubconst::kJvalue) { + arg = reinterpret_cast((reinterpret_cast(values.GetMethodArgs())) + i); + } + + BuildArgstoJvalues(c, values, arg); + } +} + +template +ALWAYS_INLINE inline T MethodMeta::InvokeJavaMethod(MObject *obj, const uintptr_t methodAargs, + uintptr_t calleeFuncAddr) const { + ArgValue argValues(methodAargs); + BuildJavaMethodArgJvalues(obj, argValues); + + uintptr_t funcPtr = (calleeFuncAddr == 0) ? GetFuncAddress() : calleeFuncAddr; + T result = RuntimeStub::SlowCallCompiledMethod(funcPtr, argValues.GetData(), + argValues.GetStackSize(), argValues.GetFRegSize()); + return result; +} + +template +inline T MethodMeta::InvokeJavaMethodFast(MObject *obj) const { + if (NeedsInterp()) { + return interpreter::InterpJavaMethod(const_cast(this), obj, 0); + } else { + jvalue argJvalues[1]; + argJvalues[0].l = reinterpret_cast(obj); + uintptr_t funcPtr = GetFuncAddress(); + return RuntimeStub::SlowCallCompiledMethod(funcPtr, argJvalues, 0, 0); + } +} + +template +inline RetType MethodMeta::Invoke(MObject *obj, const T methodAargs, uintptr_t calleeFuncAddr) const { + static_assert(std::is_same::value || std::is_same::value || + std::is_same::value, "wrong type"); + RetType result = NeedsInterp() ? interpreter::InterpJavaMethod(const_cast(this), + obj, reinterpret_cast(methodAargs)) : + InvokeJavaMethod(obj, reinterpret_cast(methodAargs), calleeFuncAddr); + return result; +} + +inline void MethodMeta::GetShortySignature(char shorty[], const uint32_t size) const { + GetShortySignature(GetSignature(), shorty, size); +} + +inline void MethodMeta::GetShortySignature(const char srcSignature[], char shorty[], const uint32_t size) { + if (size == 0) { // ()V + } else if (size == 1) { + // if size == 1, just need first signature char. + shorty[0] = srcSignature[size]; // (I)V -> I + } else { + GetShortySignatureUtil(srcSignature, shorty, size); + } +} + +inline bool MethodMeta::GetParameterTypes(MClass *parameterTypes[], uint32_t size) const { + if (size == 0) { + return true; + } else if (size == 1) { + // first check Primitive Class + MClass *c = MClass::GetPrimitiveClass(GetSignature() + 1); // (I)V + if (c != nullptr) { + parameterTypes[0] = c; + return true; + } + } + bool isEnableParametarType = IsEnableParametarType(); + if (isEnableParametarType) { + MetaRef *pTypes = GetMethodSignature()->GetTypes(); + uint32_t index = 0; + for (; index < size; ++index) { + if (pTypes[index] != 0) { + parameterTypes[index] = reinterpret_cast(pTypes[index]); + } else { + break; + } + } + if (index == size) { + return true; + } + } + bool isSuccess = GetParameterTypesUtil(parameterTypes, size); + if (isSuccess && isEnableParametarType) { + MetaRef *pTypes = GetMethodSignature()->GetTypes(); + for (uint32_t i = 0; i < size; ++i) { + pTypes[i] = static_cast(parameterTypes[i]->AsUintptr()); + } + } + return isSuccess; +} + +template +inline MethodMeta *MethodMeta::JniCast(T methodMeta) { + static_assert(std::is_same::value || std::is_same::value, "wrong type"); + return reinterpret_cast(methodMeta); +} + +template +inline MethodMeta *MethodMeta::JniCastNonNull(T methodMeta) { + DCHECK(methodMeta != 0); + return JniCast(methodMeta); +} + +template +inline MethodMeta *MethodMeta::Cast(T methodMeta) { + static_assert(std::is_same::value || std::is_same::value || + std::is_same::value, "wrong type"); + return reinterpret_cast(methodMeta); +} + +template +inline MethodMeta *MethodMeta::CastNonNull(T methodMeta) { + DCHECK(methodMeta != 0); + return Cast(methodMeta); +} + +inline jmethodID MethodMeta::AsJmethodID() { + return reinterpret_cast(this); +} +} // namespace maplert +#endif // MRT_MAPLERT_INCLUDE_METHODMETA_INLINE_H_ diff --git a/src/mrt/maplert/include/mfield.h b/src/mrt/maplert/include/mfield.h new file mode 100644 index 0000000000..feeab8be7d --- /dev/null +++ b/src/mrt/maplert/include/mfield.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_MAPLERT_INCLUDE_MFIELD_H_ +#define MRT_MAPLERT_INCLUDE_MFIELD_H_ +#include "mobject.h" +#include "fieldmeta.h" +namespace maplert { +// Field Object Layout: +// TYPE: field offset:USE_32BIT_REF(!USE_32BIT_REF) +// metaref_t shadow + 0(0) +// int monitor + 4(8) +// int8 override + 8(12) +// int accessFlags +12(16) +// metaref_t declaringClass +16(24) +// int dexFieldIndex +20(20) +// int offset +24(32) +// metaref_t type +28(40) +class MField : public MObject { + public: + FieldMeta *GetFieldMeta() const; + MClass *GetDeclaringClass() const; + MClass *GetType() const; + bool IsAccessible() const; + void SetAccessible(uint8_t flag); +#ifndef __OPENJDK__ + int GetOffset() const; +#endif + int GetAccessFlags() const; + int GetFieldMetaIndex() const; + static MField *NewMFieldObject(const FieldMeta &fieldMeta); + + template + static inline MField *JniCast(T f); + template + static inline MField *JniCastNonNull(T f); + + private: +#ifndef __OPENJDK__ + static uint32_t accessFlagsOffset; + static uint32_t declaringClassOffset; + static uint32_t fieldMetaIndexOffset; + static uint32_t offsetOffset; +#else + static uint32_t declaringClassOffset; + static uint32_t accessFlagsOffset; + static uint32_t fieldMetaIndexOffset; + static uint32_t nameOffset; +#endif + static uint32_t typeOffset; + static uint32_t overrideOffset; +}; +} // namespace maplert +#endif // MRT_MAPLERT_INCLUDE_MFIELD_H_ diff --git a/src/mrt/maplert/include/mfield_inline.h b/src/mrt/maplert/include/mfield_inline.h new file mode 100644 index 0000000000..c22f354412 --- /dev/null +++ b/src/mrt/maplert/include/mfield_inline.h @@ -0,0 +1,73 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_MAPLERT_INCLUDE_MFIELD_INLINE_H_ +#define MRT_MAPLERT_INCLUDE_MFIELD_INLINE_H_ +#include "mfield.h" +#include "mobject_inline.h" +#include "fieldmeta_inline.h" +namespace maplert { +inline FieldMeta *MField::GetFieldMeta() const { + int index = GetFieldMetaIndex(); + MClass *declClass = GetDeclaringClass(); + FieldMeta *fieldMeta = declClass->GetFieldMeta(static_cast(index)); + return fieldMeta; +} + +inline int MField::GetFieldMetaIndex() const { + return Load(fieldMetaIndexOffset, false); +} + +inline MClass *MField::GetDeclaringClass() const { + MetaRef dcl = Load(declaringClassOffset, false); + return MObject::Cast(dcl); +} + +inline MClass *MField::GetType() const { + MetaRef type = Load(typeOffset, false); + return MObject::Cast(type); +} + +inline bool MField::IsAccessible() const { + uint8_t override = Load(overrideOffset, false); + return static_cast(override & 1u); +} + +inline void MField::SetAccessible(uint8_t flag) { + Store(overrideOffset, flag, false); +} + +#ifndef __OPENJDK__ +inline int MField::GetOffset() const { + return Load(offsetOffset, false); +} +#endif + +inline int MField::GetAccessFlags() const { + return Load(accessFlagsOffset, false); +} + +template +inline MField *MField::JniCast(T f) { + static_assert(std::is_same::value, "wrong type"); + return reinterpret_cast(f); +} + +template +inline MField *MField::JniCastNonNull(T f) { + DCHECK(f != nullptr); + return JniCast(f); +} +} // namespace maplert +#endif // MRT_MAPLERT_INCLUDE_MFIELD_INLINE_H_ diff --git a/src/mrt/maplert/include/mmethod.h b/src/mrt/maplert/include/mmethod.h new file mode 100644 index 0000000000..113164cfed --- /dev/null +++ b/src/mrt/maplert/include/mmethod.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_MAPLERT_INCLUDE_MMETHOD_H_ +#define MRT_MAPLERT_INCLUDE_MMETHOD_H_ +#include "mobject.h" +#include "methodmeta.h" + +namespace maplert { +// Field Object Layout: +// TYPE: field offset:USE_32BIT_REF(!USE_32BIT_REF) +// metaref_t shadow + 0(0) +// int monitor + 4(8) +// int8 override + 8(12) +// int accessFlags +12(16) +// long artMethod +(24) +// metaref_t declaringClass +16(32) +// metaref_t declaringClassOfOverriddenMethod +(40) +// int dexMethodIndex +20(20) +// int8 hasRealParameterData +(13) +// int parameters +24(48) +class MMethod : public MObject { + public: + MethodMeta *GetMethodMeta() const; + MClass *GetDeclaringClass() const; + bool IsAccessible() const; + void SetAccessible(bool override); + static MMethod *NewMMethodObject(const MethodMeta &methodMeta); + + template + static inline MMethod *JniCast(T m); + template + static inline MMethod *JniCastNonNull(T m); + + private: +#ifndef __OPENJDK__ + static uint32_t methodMetaOffset; + static uint32_t declaringClassOffset; +#else + static uint32_t methodDeclaringClassOffset; + static uint32_t methodSlotOffset; + static uint32_t methodNameOffset; + static uint32_t methodReturnTypeOffset; + static uint32_t methodParameterTypesOffset; + static uint32_t methodExceptionTypesOffset; + static uint32_t methodModifiersOffset; + + // constructor + static uint32_t constructorDeclaringClassOffset; + static uint32_t constructorSlotOffset; + static uint32_t constructorParameterTypesOffset; + static uint32_t constructorExceptionTypesOffset; + static uint32_t constructorModifiersOffset; +#endif + static uint32_t accessFlagsOffset; + static uint32_t overrideOffset; +}; +} // namespace maplert +#endif // MRT_MAPLERT_INCLUDE_MMETHOD_H_ diff --git a/src/mrt/maplert/include/mmethod_inline.h b/src/mrt/maplert/include/mmethod_inline.h new file mode 100644 index 0000000000..70c469a09c --- /dev/null +++ b/src/mrt/maplert/include/mmethod_inline.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_MAPLERT_INCLUDE_MMETHOD_INLINE_H_ +#define MRT_MAPLERT_INCLUDE_MMETHOD_INLINE_H_ +#include "mmethod.h" +#include "mobject_inline.h" +#include "methodmeta_inline.h" +namespace maplert { +inline MethodMeta *MMethod::GetMethodMeta() const { +#ifndef __OPENJDK__ + return MethodMeta::Cast(Load(methodMetaOffset, false)); +#else + MClass *clazz = GetClass(); + uint32_t slotOffset = (clazz == WellKnown::GetMClassConstructor()) ? constructorSlotOffset : methodSlotOffset; + uint32_t slot = Load(slotOffset, false); + MethodMeta *methodMeta = GetDeclaringClass()->GetMethodMeta(slot); + return methodMeta; +#endif +} + +inline MClass *MMethod::GetDeclaringClass() const { +#ifndef __OPENJDK__ + return MClass::Cast(Load(declaringClassOffset, false)); +#else + MClass *clazz = GetClass(); + uint32_t dclClzzOffset = (clazz == WellKnown::GetMClassConstructor()) ? + constructorDeclaringClassOffset : methodDeclaringClassOffset; + return MClass::Cast(Load(dclClzzOffset, false)); +#endif +} + +inline bool MMethod::IsAccessible() const { + uint8_t override = Load(overrideOffset, false); + return (override & 0x01u) == 0x01u; +} + +inline void MMethod::SetAccessible(bool override) { + Store(overrideOffset, static_cast(override), false); +} + +template +inline MMethod *MMethod::JniCast(T m) { + static_assert(std::is_same::value, "wrong type"); + return reinterpret_cast(m); +} + +template +inline MMethod *MMethod::JniCastNonNull(T m) { + DCHECK(m != nullptr); + return JniCast(m); +} +} // namespace maplert +#endif // MRT_MAPLERT_INCLUDE_MMETHOD_INLINE_H_ diff --git a/src/mrt/maplert/include/mobject.h b/src/mrt/maplert/include/mobject.h new file mode 100644 index 0000000000..1e980a6cd8 --- /dev/null +++ b/src/mrt/maplert/include/mobject.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_MAPLERT_INCLUDE_MOBJECT_H_ +#define MRT_MAPLERT_INCLUDE_MOBJECT_H_ +#include +#include +#include +#include + +#include "jni.h" +#include "metadata_layout.h" +#include "base/logging.h" +namespace maplert { +class MClass; +class MArray; +class MString; +class MMethod; +class MField; +class MethodMeta; +#define PACKED(n) __attribute__ ((__aligned__(n), __packed__)) + +class PACKED(4) MObject { + public: + inline MClass *GetClass() const; + inline uint32_t GetSize() const; + inline bool IsInstanceOf(const MClass &mClass) const; + inline bool IsArray() const; + inline bool IsString() const; + inline bool IsClass() const; + inline bool IsObjectArray() const; + inline bool IsPrimitiveArray() const; + inline bool IsOffHeap() const; + inline void ResetMonitor(); + template + inline T Load(size_t offset, bool isVolatile = false) const; + template + inline void Store(size_t offset, T value, bool isVolatile = false); + inline MObject *LoadObject(size_t offset, bool isVolatile = false) const; + inline void StoreObject(size_t offset, const MObject *value, bool isVolatile = false) const; + inline MObject *LoadObjectNoRc(size_t offset) const; + inline void StoreObjectNoRc(size_t offset, const MObject *value) const; + inline MObject *LoadObjectOffHeap(size_t offset) const; + inline void StoreObjectOffHeap(size_t offset, const MObject *value) const; + static MObject *NewObject(const MClass &klass, size_t objectSize, bool isJNI = false); + static MObject *NewObject(const MClass &klass, bool isJNI = false); + static MObject *NewObject(const MClass &klass, const MethodMeta *constructor, ...); + static MObject *NewObject(const MClass &klass, const MethodMeta &constructor, const jvalue &args, bool isJNI = false); + static uint32_t GetReffieldSize(); + operator jobject () { + return reinterpret_cast(this); + } + + operator jobject () const { + return reinterpret_cast(const_cast(this)); + } + + inline uintptr_t AsUintptr() const; + static inline MObject *JniCast(jobject o); + static inline MObject *JniCastNonNull(jobject o); + template + static inline T0 *Cast(T1 o); + template + static inline T0 *CastNonNull(T1 o); + inline MClass *AsMClass() const; + inline MArray *AsMArray() const; + inline MString *AsMString() const; + inline MMethod *AsMMethod() const; + inline MField *AsMField() const; + inline jobject AsJobject() const; + + protected: + MetaRef shadow; + int32_t monitor; + MObject() = delete; + ~MObject() = delete; + MObject(MObject&&) = delete; + MObject &operator=(MObject&&) = delete; + MObject *SetClass(const MClass &mClass); + + static inline MObject *NewObjectInternal(const MClass &klass, size_t objectSize, bool isJNI); +}; +} // namespace maplert +#endif // MRT_MAPLERT_INCLUDE_MOBJECT_H_ diff --git a/src/mrt/maplert/include/mobject_inline.h b/src/mrt/maplert/include/mobject_inline.h new file mode 100644 index 0000000000..752b077e40 --- /dev/null +++ b/src/mrt/maplert/include/mobject_inline.h @@ -0,0 +1,174 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_MAPLERT_INCLUDE_MOBJECT_INLINE_H_ +#define MRT_MAPLERT_INCLUDE_MOBJECT_INLINE_H_ +#include "mobject.h" +#include "cinterface.h" +#include "mrt_fields_api.h" +#include "mrt_well_known.h" +#include "mfield.h" +#include "mmethod.h" +namespace maplert { +inline MClass *MObject::GetClass() const{ + return MObject::Cast(shadow); +} + +inline bool MObject::IsArray() const { + return GetClass()->IsArrayClass(); +} + +inline bool MObject::IsString() const { + return GetClass() == WellKnown::GetMClassString(); +} + +inline bool MObject::IsClass() const { + return GetClass() == WellKnown::GetMClassClass(); +} + +inline bool MObject::IsObjectArray() const { + MClass *componentClass = GetClass()->GetComponentClass(); + return componentClass == nullptr ? false : !componentClass->IsPrimitiveClass(); +} + +inline bool MObject::IsPrimitiveArray() const { + MClass *componentClass = GetClass()->GetComponentClass(); + return componentClass == nullptr ? false : componentClass->IsPrimitiveClass(); +} + +inline uint32_t MObject::GetSize() const { + if (IsArray()) { + return AsMArray()->GetArraySize(); + } else if (IsString()) { + return AsMString()->GetStringSize(); + } else { + return GetClass()->GetObjectSize(); + } +} + +inline bool MObject::IsOffHeap() const { + return !IS_HEAP_ADDR(AsUintptr()); +} + +template +inline T MObject::Load(size_t offset, bool isVolatile) const { + return isVolatile ? reinterpret_cast*>(AsUintptr() + offset)->load(std::memory_order_seq_cst) + : *(reinterpret_cast(AsUintptr() + offset)); +} + +template +inline void MObject::Store(size_t offset, T value, bool isVolatile) { + if (isVolatile) { + reinterpret_cast*>(AsUintptr() + offset)->store(value, std::memory_order_seq_cst); + } else { + *(reinterpret_cast(AsUintptr() + offset)) = value; + } +} + +inline MObject *MObject::LoadObject(size_t offset, bool isVolatile) const { + return isVolatile ? Cast(MRT_LOAD_JOBJECT_INC_VOLATILE(this, offset)) : + Cast(MRT_LOAD_JOBJECT_INC(this, offset)); +} + +inline void MObject::StoreObject(size_t offset, const MObject *value, bool isVolatile) const { + if (isVolatile) { + MRT_STORE_JOBJECT_VOLATILE(this, offset, value); + } else { + MRT_STORE_JOBJECT(this, offset, value); + } +} + +inline MObject *MObject::LoadObjectNoRc(size_t offset) const { + return Cast(__UNSAFE_MRT_LOAD_JOBJECT_NOINC(this, offset)); +} + +inline void MObject::StoreObjectNoRc(size_t offset, const MObject *value) const { + __UNSAFE_MRT_STORE_JOBJECT_NORC(this, offset, value); +} + +inline MObject *MObject::LoadObjectOffHeap(size_t offset) const { + MObject *obj = LoadObjectNoRc(offset); + DCHECK(obj->IsOffHeap()) << "obj is in heap, but use LoadObjectOffHeap." << maple::endl; + return obj; +} + +inline void MObject::StoreObjectOffHeap(size_t offset, const MObject *value) const { + DCHECK(LoadObjectNoRc(offset)->IsOffHeap()) << "org obj is in heap, but use StoreObjectOffHeap." << maple::endl; + DCHECK(value != nullptr && value->IsOffHeap()) << "obj is in heap, but use StoreObjectOffHeap." << maple::endl; + StoreObjectNoRc(offset, value); +} + +inline bool MObject::IsInstanceOf(const MClass &mClass) const { + return mClass.IsAssignableFrom(*GetClass()); +} + +inline uint32_t MObject::GetReffieldSize() { + return sizeof(reffield_t); +} + +inline void MObject::ResetMonitor() { + monitor = 0; +} + +inline uintptr_t MObject::AsUintptr() const { + return reinterpret_cast(const_cast(this)); +} + +inline MObject *MObject::JniCast(const jobject o) { + return reinterpret_cast(const_cast(o)); +} + +inline MObject *MObject::JniCastNonNull(const jobject o) { + DCHECK(o != nullptr); + return JniCast(o); +} + +template +inline T0 *MObject::Cast(T1 o) { + static_assert(std::is_same::value || std::is_same::value || + (std::is_same::value && std::is_same::value), "wrong type"); + return reinterpret_cast(o); +} + +template +inline T0 *MObject::CastNonNull(T1 o) { + DCHECK(o != 0); + return Cast(o); +} + +inline MClass *MObject::AsMClass() const { + return static_cast(const_cast(this)); +} + +inline MArray *MObject::AsMArray() const { + return static_cast(const_cast(this)); +} + +inline MString *MObject::AsMString() const { + return static_cast(const_cast(this)); +} + +inline MMethod *MObject::AsMMethod() const { + return static_cast(const_cast(this)); +} + +inline MField *MObject::AsMField() const { + return static_cast(const_cast(this)); +} + +inline jobject MObject::AsJobject() const { + return reinterpret_cast(const_cast(this)); +} +} // namespace maplert +#endif // MRT_MAPLERT_INCLUDE_MOBJECT_INLINE_H_ diff --git a/src/mrt/maplert/include/modifier.h b/src/mrt/maplert/include/modifier.h new file mode 100644 index 0000000000..4de8f5a9a3 --- /dev/null +++ b/src/mrt/maplert/include/modifier.h @@ -0,0 +1,287 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_MAPLERT_MODIFIER_H_ +#define MRT_MAPLERT_MODIFIER_H_ +#include +#include + +namespace maplert { +namespace modifier { +static constexpr uint32_t kModifierPublic = 0x00000001; +static constexpr uint32_t kModifierPrivate = 0x00000002; +static constexpr uint32_t kModifierProtected = 0x00000004; +static constexpr uint32_t kModifierStatic = 0x00000008; +static constexpr uint32_t kModifierFinal = 0x00000010; +static constexpr uint32_t kModifierSynchronized = 0x00000020; +static constexpr uint32_t kModifierVolatile = 0x00000040; +static constexpr uint32_t kModifierTransient = 0x00000080; +static constexpr uint32_t kModifierNative = 0x00000100; +static constexpr uint32_t kModifierInterface = 0x00000200; +static constexpr uint32_t kModifierAbstract = 0x00000400; +static constexpr uint32_t kModifierStrict = 0x00000800; +static constexpr uint32_t kModifierSynthetic = 0x00001000; +static constexpr uint32_t kModifierAnnotation = 0x00002000; +static constexpr uint32_t kModifierEnum = 0x00004000; +static constexpr uint32_t kModifierMandated = 0x00008000; +static constexpr uint32_t kModifierConstructor = 0x00010000; +static constexpr uint32_t kModifierCriticalNative = 0x00200000; +static constexpr uint32_t kModifierDefault = 0x00400000; +static constexpr uint32_t kAFOriginPublic = 0x08000000; +static constexpr uint32_t kLocalClass = 0x10000000; +static constexpr uint32_t kLocalClassVaild = 0x20000000; + +// use for annotation +static constexpr size_t kModifierRCUnowned = 0x00800000; +static constexpr size_t kModifierRCWeak = 0x01000000; + +static constexpr uint32_t kModifierBridge = 0x00000040; +static constexpr uint32_t kModifierVarargs = 0x00000080; +static constexpr uint32_t kModifierFinalizable = 0x80000000; +static constexpr uint32_t kModifierProxy = 0x00040000; + +// class flag +static constexpr uint16_t kClassPrim = 0x0001; +static constexpr uint16_t kClassArray = 0x0002; +static constexpr uint16_t kClassHasFinalizer = 0x0004; +static constexpr uint16_t kClassSoftReference = 0x0008; +static constexpr uint16_t kClassWeakReference = 0x0010; +static constexpr uint16_t kClassPhantomReference = 0x0020; +static constexpr uint16_t kClassFinalizerReference = 0x0040; +static constexpr uint16_t kClassCleaner = 0x0080; +static constexpr uint16_t kClassFinalizerReferenceSentinel = 0x0100; +static constexpr uint32_t kClassFastAlloc = 0x0200; +static constexpr uint16_t kClassIsAnonymousClass = 0x0400; +static constexpr uint16_t kClassIsColdClass = 0x0800; +static constexpr uint16_t kClassIsDecoupleClass = 0x1000; +static constexpr uint16_t kClassLazyBindingClass = 0x2000; +static constexpr uint16_t kClassLazyBoundClass = 0x4000; +static constexpr uint16_t kClassRuntimeVerify = 0x8000; // True if need verifier in runtime (error or deferred check). + +static constexpr uint16_t kClassReference = (kClassSoftReference | kClassWeakReference | + kClassCleaner | kClassFinalizerReference | kClassPhantomReference); + +// method & field Hash Size & Conflict +static constexpr uint32_t kHashConflict = 1023; +static constexpr uint32_t kMethodFieldHashSize = 1022; + +// field flag, type: uint16_t +// reserved high 10bits for field's hashcode: 0xFFC0 +static constexpr uint32_t kFieldOffsetIspOffset = 0x0001; + +// method flag, type: uint16_t +// reserved high 10bits for method's hashcode: 0xFFC0 +static constexpr uint32_t kMethodNotVirtual = 0x00000001; +static constexpr uint32_t kMethodFinalize = 0x00000002; +static constexpr uint32_t kMethodMetaCompact = 0x00000004; +static constexpr uint32_t kMethodParametarType = 0x00000008; +static constexpr uint32_t kMethodAbstract = 0x00000010; +static constexpr uint32_t kMethodNativeSignature = 0x00000020; + +static inline bool IsAFOriginPublic(uint32_t modifier) { + return (modifier & kAFOriginPublic) != 0; +} + +static inline bool IsLocalClass(uint32_t modifier) { + return (modifier & kLocalClass) != 0; +} + +static inline bool IsLocalClassVaild(uint32_t modifier) { + return (modifier & kLocalClassVaild) != 0; +} + +static inline bool IsPublic(uint32_t modifier) { + return (modifier & kModifierPublic) != 0; +} + +static inline bool IsPrivate(uint32_t modifier) { + return (modifier & kModifierPrivate) != 0; +} + +static inline bool IsProtected(uint32_t modifier) { + return (modifier & kModifierProtected) != 0; +} + +static inline bool IsStatic(uint32_t modifier) { + return (modifier & kModifierStatic) != 0; +} + +static inline bool IsFinal(uint32_t modifier) { + return (modifier & kModifierFinal) != 0; +} + +static inline bool IsSynchronized(uint32_t modifier) { + return (modifier & kModifierSynchronized) != 0; +} + +static inline bool IsVolatile(uint32_t modifier) { + return (modifier & kModifierVolatile) != 0; +} + +static inline bool IsTransient(uint32_t modifier) { + return (modifier & kModifierTransient) != 0; +} + +static inline bool IsNative(uint32_t modifier) { + return (modifier & kModifierNative) != 0; +} + +static inline bool IsCriticalNative(uint32_t modifier) { + return (modifier & kModifierCriticalNative) != 0; +} + +static inline bool IsInterface(uint32_t modifier) { + return (modifier & kModifierInterface) != 0; +} + +static inline bool IsAbstract(uint32_t modifier) { + return (modifier & kModifierAbstract) != 0; +} + +static inline bool IsStrict(uint32_t modifier) { + return (modifier & kModifierStrict) != 0; +} + +static inline bool IsSynthetic(uint32_t modifier) { + return (modifier & kModifierSynthetic) != 0; +} + +static inline bool IsConstructor(uint32_t modifier) { + return (modifier & kModifierConstructor) != 0; +} + +static inline bool IsDefault(uint32_t modifier) { + return (modifier & kModifierDefault) != 0; +} + +static inline bool IsAnnotation(uint32_t modifier) { + return (modifier & kModifierAnnotation) != 0; +} + +static inline bool IsEnum(uint32_t modifier) { + return (modifier & kModifierEnum) != 0; +} + +static inline bool IsMandated(uint32_t modifier) { + return (modifier & kModifierMandated) != 0; +} + +static inline bool IsBridge(uint32_t modifier) { + return (modifier & kModifierBridge) != 0; +} + +static inline bool IsVarargs(uint32_t modifier) { + return (modifier & kModifierVarargs) != 0; +} + +static inline bool IsFinalizable(uint32_t modifier) { + return (modifier & kModifierFinalizable) != 0; +} + +static inline bool IsProxy(uint32_t modifier) { + return (modifier & kModifierProxy) != 0; +} + +static inline bool IsUnowned(size_t modifier) { + return (modifier & kModifierRCUnowned) != 0; +} + +static inline bool IsWeakRef(size_t modifier) { + return (modifier & kModifierRCWeak) != 0; +} + +static inline bool IsDirectMethod(uint32_t modifier) { + constexpr uint32_t direct = kModifierStatic | kModifierPrivate | kModifierConstructor; + return (modifier & direct) != 0; +} + +static inline uint32_t GetInterfaceModifier() { + return kModifierInterface; +} + +static inline uint32_t GetStaticModifier() { + return kModifierStatic; +} + +static inline uint32_t GetAbstractModifier() { + return kModifierAbstract; +} + +static inline uint32_t GetFinalModifier() { + return kModifierFinal; +} + +static inline uint32_t GetNativeModifier() { + return kModifierNative; +} + +static inline uint32_t GetSynchronizedModifier() { + return kModifierSynchronized; +} + +static inline bool IsPrimitiveClass(uint32_t flag) { + return (flag & kClassPrim) != 0; +} + +static inline bool IsArrayClass(uint32_t flag) { + return (flag & kClassArray) != 0; +} + +static inline bool hasFinalizer(uint32_t flag) { + return (flag & kClassHasFinalizer) != 0; +} + +static inline bool IsReferenceClass(uint32_t flag) { + return (flag & kClassReference) != 0; +} + +static inline bool IsFastAllocClass(uint32_t flag) { + return (flag & kClassFastAlloc) != 0; +} + +static inline bool IsAnonymousClass(uint32_t flag) { + return (flag & kClassIsAnonymousClass) != 0; +} + +static inline bool IsColdClass(uint32_t flag) { + return (flag & kClassIsColdClass) != 0; +} + +static inline bool IsDecoupleClass(uint32_t flag) { + return (flag & kClassIsDecoupleClass) != 0; +} + +static inline bool IsLazyBindingClass(uint32_t flag) { + return (flag & kClassLazyBindingClass) != 0; +} + +static inline bool IsLazyBoundClass(uint32_t flag) { + return (flag & kClassLazyBoundClass) != 0; +} + +static inline bool IsNotVerifiedClass(uint32_t flag) { + return (flag & kClassRuntimeVerify) != 0; +} + +static inline bool IsNotvirtualMethod(uint32_t flag) { + return (flag & kMethodNotVirtual) != 0; +} + +static inline bool IsFinalizeMethod(uint32_t flag) { + return (flag & kMethodFinalize) != 0; +} +void JavaAccessFlagsToString(uint32_t accessFlags, std::string &result); +} // namespace modifier +} // namespace maplert +#endif diff --git a/src/mrt/maplert/include/mrt_annotation.h b/src/mrt/maplert/include/mrt_annotation.h new file mode 100644 index 0000000000..0abf4dd7c4 --- /dev/null +++ b/src/mrt/maplert/include/mrt_annotation.h @@ -0,0 +1,86 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_INCLUDE_MRT_REFLECTION_ANNOTATION_H_ +#define MRT_INCLUDE_MRT_REFLECTION_ANNOTATION_H_ + +#include "spinlock.h" +#include "mrt_annotation_parser_inline.h" +namespace maplert { +using CacheValueType = void*; + +class DcAnnoPresentKeyType { + public: + DcAnnoPresentKeyType(CacheValueType mt, CacheValueType anno) : meta(mt), annoObj(anno) {} + DcAnnoPresentKeyType(const DcAnnoPresentKeyType&) = default; + DcAnnoPresentKeyType &operator=(const DcAnnoPresentKeyType&) = default; + ~DcAnnoPresentKeyType() = default; + CacheValueType meta; + CacheValueType annoObj; + bool operator<(const DcAnnoPresentKeyType &lhs) const { + return lhs.meta < meta || (lhs.meta == meta && lhs.annoObj < annoObj); + } +}; + +class CacheItem { + public: + CacheItem() : key(nullptr), value(nullptr) {} + ~CacheItem() = default; + CacheValueType key; + CacheValueType value; + maple::SpinLock lock; +}; + +class AnnotationUtil { + public: + static CacheValueType Get(annoconstant::CacheLabel label, MClass *classObj) noexcept; + static bool GetCache(annoconstant::CacheLabel label, CacheValueType classObj, CacheValueType &result) noexcept; + static jboolean GetIsAnnoPresent(const std::string&, const std::string&, CacheValueType, + MClass*, annoconstant::CacheLabel) noexcept; + static void GetDeclaredClasses(MClass *classObj, std::set &metalist) noexcept; + static MObject *GetDeclaredAnnotations(const std::string annoStr, MClass *classObj); + static uint32_t GetRealParaCntForConstructor(const MethodMeta &mthd, const char *kMthdAnnoStr); + static bool HasDeclaredAnnotation(MClass *klass, annoconstant::AnnotationClass annoType); + static jboolean IsDeclaredAnnotationPresent(const std::string &annoStr, const std::string&, MClass *currentCls); + static std::string GetAnnotationUtil(const char *annotation); + + private: +#ifndef __OPENJDK__ + static void DeCompressHighFrequencyStr(std::string &str); +#endif + static CacheItem cache[annoconstant::kRTCacheSize]; + static MethodMeta *GetEnclosingMethodValue(MClass *classObj, const std::string &annSet); + static MethodMeta *GetEnclosingMethod(MClass *argObj) noexcept; + static void UpdateCache(annoconstant::CacheLabel label, CacheValueType classObj, CacheValueType result) noexcept; + static MObject *GetEnclosingClass(MClass *classObj) noexcept; + static MObject *GetDeclaringClassFromAnnotation(MClass *classObj) noexcept; +}; + +class MethodDefaultUtil { + public: + MethodDefaultUtil(MethodMeta &mthMeta, MClass *declCls) + : methodMeta(mthMeta), declClass(declCls) {} + static bool HasDefaultValue(const char *methodName, AnnoParser &parser); + ~MethodDefaultUtil() = default; + MObject *GetDefaultValue(const std::unique_ptr &uniqueParser); + private: + MObject *GetDefaultAnnotationValue(const std::unique_ptr &uniqueParser); + MObject *GetDefaultEnumValue(const std::unique_ptr &uniqueParser); + MObject *GetDefaultPrimValue(const std::unique_ptr &uniqueParser, uint32_t type); + + MethodMeta &methodMeta; + MClass *declClass; +}; +} // namespace maplert +#endif diff --git a/src/mrt/maplert/include/mrt_annotation_parser.h b/src/mrt/maplert/include/mrt_annotation_parser.h new file mode 100644 index 0000000000..308fedf87d --- /dev/null +++ b/src/mrt/maplert/include/mrt_annotation_parser.h @@ -0,0 +1,287 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_INCLUDE_MRT_ANNOTATION_PARSER_H_ +#define MRT_INCLUDE_MRT_ANNOTATION_PARSER_H_ + +#include +#include +#include "jni.h" +#include "fieldmeta.h" +#include "mmethod.h" +#include "linker_api.h" + +namespace maplert{ +namespace annoconstant { +constexpr bool kDefParseStrType = false; +constexpr int kDefSkipItemNum = 1; +constexpr char kOldMetaLabel = '0'; +constexpr char kNewMetaLabel = '1'; +constexpr int8_t kMemberPosValidOffset = 2; +constexpr int8_t kIsMemberClassOffset = 1; +constexpr uint8_t kLabelSize = 2; +constexpr char kAnnoDelimiterPrefix = '`'; +constexpr char kAnnoDelimiter = '!'; +constexpr char kAnnoArrayStartDelimiter = '['; +constexpr char kAnnoArrayEndDelimiter = ']'; +constexpr int8_t kNPos = -1; +constexpr char kInheritClass[] = "Ljava/lang/annotation/Inherited;"; +constexpr char kRepeatableClasss[] = "Ljava/lang/annotation/Repeatable;"; +constexpr char kAnnoAccessFlags[] = "accessFlags"; +constexpr char kThrowVerifyError[] = + "Lcom/huawei/ark/annotation/verify/VerfAnnoThrowVerifyError;"; +constexpr char kDeferredOverrideFinalCheck[] = + "Lcom/huawei/ark/annotation/verify/VerfAnnoDeferredOverrideFinalCheck;"; +constexpr char kDeferredExtendFinalCheck[] = + "Lcom/huawei/ark/annotation/verify/VerfAnnoDeferredExtendFinalCheck;"; +constexpr char kAssignableChecksContainer[] = + "Lcom/huawei/ark/annotation/verify/VerfAnnoDeferredAssignableChecks;"; +constexpr char kDeferredAssignableCheck[] = + "Lcom/huawei/ark/annotation/verify/VerfAnnoDeferredAssignableCheck;"; + + +const std::string kEncosilngClass("Lark/annotation/EnclosingClass;"); +const std::string kInnerClass("Lark/annotation/InnerClass;"); +const std::string kMemberClasses("Lark/annotation/MemberClasses;"); +const std::string kSignatureClass("Lark/annotation/Signature;"); +const std::string kEncsloingMethodClass("Lark/annotation/EnclosingMethod;"); +const std::string kAnnoDefault("Lark/annotation/AnnotationDefault;"); +const std::string kMethodParameters("Lark/annotation/MethodParameters;"); +const std::string kThrowsClass("Lark/annotation/Throws;"); +const std::string kMrtTypeClass("Ljava/lang/reflect/Type;"); + +const std::string kRepeatableClasssIndex("Ljava/lang/annotation/Repeatable;"); +const std::string kEncosilngClassIndex("Lark/annotation/EnclosingClass;"); +const std::string kInnerClassIndex("Lark/annotation/InnerClass;"); +const std::string kMemberClassesIndex("Lark/annotation/MemberClasses;"); +const std::string kSignatureClassIndex("Lark/annotation/Signature;"); +const std::string kEncsloingMethodClassIndex("Lark/annotation/EnclosingMethod;"); +const std::string kThrowsClassIndex("Lark/annotation/Throws;"); +const std::string kAnnoDefaultIndex("Lark/annotation/AnnotationDefault;"); +const std::string kMethodParametersIndex("Lark/annotation/MethodParameters;"); +const std::string kInheritClassIndex("Ljava/lang/annotation/Inherited;"); +constexpr char kAnnoArrayStartDelimiterIndex = '{'; +constexpr char kAnnoArrayEndDelimiterIndex = '}'; +constexpr char kThrowVerifyErrorIndex[] = + "Lcom/huawei/ark/annotation/verify/VerfAnnoThrowVerifyError;"; +constexpr char kDeferredOverrideFinalCheckIndex[] = + "Lcom/huawei/ark/annotation/verify/VerfAnnoDeferredOverrideFinalCheck;"; +constexpr char kDeferredExtendFinalCheckIndex[] = + "Lcom/huawei/ark/annotation/verify/VerfAnnoDeferredExtendFinalCheck;"; +constexpr char kAssignableChecksContainerIndex[] = + "Lcom/huawei/ark/annotation/verify/VerfAnnoDeferredAssignableChecks;"; +constexpr char kDeferredAssignableCheckIndex[] = + "Lcom/huawei/ark/annotation/verify/VerfAnnoDeferredAssignableCheck;"; + +enum ParseTypeValue : uint8_t { + kValueByte = 0x00, + kValueShort = 0x02, + kValueChar, + kValueInt, + kValueLong = 0x06, + kValueFloat = 0x10, + kValueDouble, + kValueMethodType = 0x15, + kValueMethodHandle, + kValueString, + kValueType, + kValueField, + kValueMethod, + kValueEnum, + kValueArray, + kValueAnnotation, + kValueNull, + kValueBoolean +}; +using AnnotationClass = enum { + kAnnotationInherited = 0, + kAnnotationRepeatable = 1 +}; +enum CacheLabel : uint8_t { + kEnclosingClass, + kDeclaredClasses, + kDeclaringClass, + kEnclosingMethod, + kHasNoDeclaredAnno, + kClassAnnoPresent, + kMethodAnnoPresent, + kFieldAnnoPresent, + kRTCacheSize +}; +} // namespace annoconstant + +class AnnoParser { + public: + AnnoParser(const char *str, MClass *dClass = nullptr, size_t idx = 0) + : annoStr(str), annoStrIndex(idx), declaringClass(dClass), annoSize(strlen(str)) { + if (declaringClass != nullptr) { + strTab = new StrTab; + LinkerAPI::Instance().GetStrTab(reinterpret_cast(declaringClass), *strTab); + } + } + AnnoParser(const AnnoParser&) = default; + AnnoParser &operator=(const AnnoParser&) = default; + static inline AnnoParser &ConstructParser(const char *str, MClass *dClass = nullptr, size_t idx = 0); + MObject *AllocAnnoObject(MClass *classObj, MClass *annoClass); + MObject *GetAnnotationNative(int32_t index, const MClass *annoClass); + MObject *GetParameterAnnotationsNative(const MethodMeta *methodMeta); + MObject *GetParameters0(MMethod *method); + static MObject *GetSignatureValue(const std::string &annSet, MClass *cls); + virtual bool ExceptAnnotationJudge(const std::string &annoName) const = 0; + virtual std::string ParseStr(bool isSN) = 0; + virtual std::string ParseStrImpl() = 0; + virtual void NextItem(int iter); + virtual void IncIdx(size_t step) = 0; + virtual int32_t Find(const std::string &target) = 0; + int32_t Find(char target); + static bool IsIndexParser(const char *str); + static bool IsMemberClass(const char *str, bool &isValid); + static std::string GetParameterAnnotationInfo(const std::string &entireStr); + MObject *GenerateAnnotationTypeValue(MClass *classInfo, const MClass*, uint32_t); + MObject *GenerateAnnotationProxyInstance(MClass *classInfo, MClass*, uint32_t memberNum); + static std::string RemoveParameterAnnoInfo(std::string &annoStr); + static bool HasAnnoMember(const std::string &annoStr); + std::string ParseStrNotMove(); + std::string ParseStrForLastStringArray(); + double ParseDoubleNum(int type); + int64_t ParseNum(int type); + void SkipNameAndType(int iter = 1); + void SkipAnnoMember(uint32_t iter); +#ifdef __OPENJDK__ + MObject *GenerateMemberValueHashMap(MClass *classInfo, MClass *annotationInfo, uint32_t memberNum); + MObject *CaseArray(MClass *classInfo, MClass *annotationInfo, MethodMeta &mthdObj); + static MObject *InvokeAnnotationParser(MObject *hashMapInst, MObject *annotationInfo); +#else + MObject *GenerateAnnotationMemberArray(MClass *classInfo, MClass *annotationInfo, uint32_t memberNum); + MObject *CaseArray(MClass *classInfo, MClass *annotationInfo, ArgValue &argArr, MethodMeta &mthdObj); +#endif // __OPENJDK__ + void InitAnnoMemberCntArray(uint32_t *annoMemberCntArray, uint32_t annoNum); + bool IsVerificationAnno(const std::string &annotName) const; + void SetIdx(uint32_t idx) noexcept { + annoStrIndex = idx; + } + size_t GetIdx() const noexcept { + return annoStrIndex; + } + const char *GetStr() const noexcept { + return annoStr; + } + char GetCurrentChar() const noexcept { + return annoStr[annoStrIndex]; + } + virtual ~AnnoParser(); + virtual const std::string &GetEnclosingClassStr() const noexcept { + return annoconstant::kEncosilngClass; + } + virtual const std::string &GetInnerClassStr() const noexcept { + return annoconstant::kInnerClass; + } + virtual const std::string &GetMemberClassesStr() const noexcept { + return annoconstant::kMemberClasses; + } + virtual const std::string &GetSignatureClassStr() const noexcept { + return annoconstant::kSignatureClass; + } + virtual const std::string &GetEncsloingMethodClassStr() const noexcept { + return annoconstant::kEncsloingMethodClass; + } + virtual const std::string &GetThrowsClassStr() const noexcept { + return annoconstant::kThrowsClass; + } + virtual const std::string &GetAnnoDefaultStr() const noexcept { + return annoconstant::kAnnoDefault; + } + virtual const std::string &GetMethodParametersStr() const noexcept { + return annoconstant::kMethodParameters; + } + virtual char GetAnnoArrayStartDelimiter() const noexcept { + return annoconstant::kAnnoArrayStartDelimiter; + } + virtual char GetAnnoArrayEndDelimiter() const noexcept { + return annoconstant::kAnnoArrayEndDelimiter; + } + protected: + template + T ParseNumImpl(int type); + char *GetCStringFromStrTab(uint32_t srcIndex) const; + const char *annoStr; + size_t annoStrIndex; + MClass *declaringClass; + size_t annoSize; + StrTab *strTab = nullptr; +}; + +class AnnoIndexParser : public AnnoParser { + public: + AnnoIndexParser(const char *str, MClass *dClass = nullptr, size_t idx = 0) : AnnoParser(str, dClass, idx) {} + std::string ParseStr(bool isSN); + static std::string GetParameterAnnotationInfoIndex(const std::string &entireStr); + bool ExceptAnnotationJudge(const std::string &annoName) const; + std::string ParseStrImpl(); + uint32_t ParseIndex(); + int32_t Find(const std::string &target); + void IncIdx(size_t step __attribute__((unused))) { + return; + } + virtual ~AnnoIndexParser() = default; + virtual void NextItem(int iter); + virtual const std::string &GetEnclosingClassStr() const noexcept { + return annoconstant::kEncosilngClassIndex; + } + virtual const std::string &GetInnerClassStr() const noexcept { + return annoconstant::kInnerClassIndex; + } + virtual const std::string &GetMemberClassesStr() const noexcept { + return annoconstant::kMemberClassesIndex; + } + virtual const std::string &GetSignatureClassStr() const noexcept { + return annoconstant::kSignatureClassIndex; + } + virtual const std::string &GetEncsloingMethodClassStr() const noexcept { + return annoconstant::kEncsloingMethodClassIndex; + } + virtual const std::string &GetThrowsClassStr() const noexcept { + return annoconstant::kThrowsClassIndex; + } + virtual const std::string &GetAnnoDefaultStr() const noexcept { + return annoconstant::kAnnoDefaultIndex; + } + virtual const std::string &GetMethodParametersStr() const noexcept { + return annoconstant::kMethodParametersIndex; + } + virtual char GetAnnoArrayStartDelimiter() const noexcept { + return annoconstant::kAnnoArrayStartDelimiterIndex; + } + virtual char GetAnnoArrayEndDelimiter() const noexcept { + return annoconstant::kAnnoArrayEndDelimiterIndex; + } + static std::set exceptIndexSet; +}; + +class AnnoAsciiParser : public AnnoParser { + public: + AnnoAsciiParser(const char *str, MClass *dClass = nullptr, size_t idx = 0) : AnnoParser(str, dClass, idx) {} + std::string ParseStr(bool isSN); + static std::string GetParameterAnnotationInfoAscii(const std::string &entireStr); + bool ExceptAnnotationJudge(const std::string &kAnnoName) const; + std::string ParseStrImpl(); + virtual ~AnnoAsciiParser() = default; + int32_t Find(const std::string &target); + static std::set exceptAsciiSet; + void IncIdx(size_t step) noexcept { + annoStrIndex += step; + } +}; +} // namespace maplert +#endif diff --git a/src/mrt/maplert/include/mrt_annotation_parser_inline.h b/src/mrt/maplert/include/mrt_annotation_parser_inline.h new file mode 100644 index 0000000000..3d7c08d947 --- /dev/null +++ b/src/mrt/maplert/include/mrt_annotation_parser_inline.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_INCLUDE_MRT_REFLECTION_ANNOTATION_INLINE_H_ +#define MRT_INCLUDE_MRT_REFLECTION_ANNOTATION_INLINE_H_ +#include "mrt_annotation_parser.h" +#include "mrt_profile.h" +namespace maplert { +inline AnnoParser &AnnoParser::ConstructParser(const char *str, MClass *dClass, size_t idx) { + AnnoParser *parser = nullptr; + if (IsIndexParser(str)) { + parser = new AnnoIndexParser(str + annoconstant::kLabelSize, dClass, idx); + } else { + parser = new AnnoAsciiParser(str + annoconstant::kLabelSize, dClass, idx); + } + return *parser; +} + +inline bool AnnoParser::IsIndexParser(const char *str) { + int32_t flag = str[0] - annoconstant::kOldMetaLabel; + return static_cast(flag & 1); +} + +inline bool AnnoParser::HasAnnoMember(const std::string &annoStr) { + if (annoStr.size() >= annoconstant::kLabelSize && annoStr[annoconstant::kLabelSize] > '0') { + return true; + } + return false; +} + +inline bool AnnoParser::IsMemberClass(const char *str, bool &isValid) { + int32_t flag = str[0] - annoconstant::kOldMetaLabel; + isValid = static_cast((flag >> annoconstant::kMemberPosValidOffset) & 1); + if (!isValid) { + return false; + } + return static_cast(flag & (1 << annoconstant::kIsMemberClassOffset)); +} + +inline std::string AnnoParser::ParseStrNotMove() { + size_t oldIndex = annoStrIndex; + std::string retArr = ParseStrImpl(); + annoStrIndex = oldIndex; + return retArr; +} + +inline std::string AnnoParser::ParseStrForLastStringArray() { + if (annoStr[annoStrIndex] == annoconstant::kAnnoArrayEndDelimiter) { + ++annoStrIndex; + } + return ParseStrImpl(); +} + +inline double AnnoParser::ParseDoubleNum(int type) { + return ParseNumImpl(type); +} + +inline int64_t AnnoParser::ParseNum(int type) { + if (annoSize == 0) { + return 0; + } + return ParseNumImpl(type); +} +} // namespace maplert +#endif diff --git a/src/mrt/maplert/include/mrt_array.h b/src/mrt/maplert/include/mrt_array.h new file mode 100644 index 0000000000..c323497812 --- /dev/null +++ b/src/mrt/maplert/include/mrt_array.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef JAVA_MRT_ARRAY_H_ +#define JAVA_MRT_ARRAY_H_ + +#include "jni.h" +#include "mrt_array_api.h" +#include "mclass.h" + +namespace maplert { +void *MRT_JavaArrayToCArray(jarray javaArray); +void MRT_ObjectArrayCopy(address_t javaSrc, address_t javaDst, jint srcPos, jint dstPos, jint length, + bool check = false); +void ThrowArrayStoreException(const MObject &srcComponent, int index, const MClass &dstComponentType); +bool AssignableCheckingObjectCopy(const MClass &dstComponentType, MClass *&lastAssignableComponentType, + const MObject *srcComponent); +} // namespace maplert + +#endif // JAVA_MRT_ARRAY_H_ diff --git a/src/mrt/maplert/include/mrt_class_init.h b/src/mrt/maplert/include/mrt_class_init.h new file mode 100644 index 0000000000..56d877c2c1 --- /dev/null +++ b/src/mrt/maplert/include/mrt_class_init.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef JAVA_MRT_CLASS_INIT_H_ +#define JAVA_MRT_CLASS_INIT_H_ + +#include +#include "mrt_class_api.h" // exported-api declaration. +#include "mrt_object.h" +#include "cinterface.h" + +namespace maplert { +#ifdef __cplusplus +extern "C" { +#endif + +ClassInitState MRT_TryInitClass(const MClass &classInfo, bool recursive = true); +ClassInitState MRT_TryInitClassOnDemand(const MClass &classInfo); +bool MRT_InitClassIfNeeded(const MClass &classInfo); +#ifdef __cplusplus +} +#endif +} +#endif // JAVA_MRT_CLASS_INIT_H_ + diff --git a/src/mrt/maplert/include/mrt_classloader.h b/src/mrt/maplert/include/mrt_classloader.h new file mode 100644 index 0000000000..784559b13f --- /dev/null +++ b/src/mrt/maplert/include/mrt_classloader.h @@ -0,0 +1,163 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_CLASS_CLASSLOADER_IMP_H_ +#define MRT_CLASS_CLASSLOADER_IMP_H_ + +#include + +#include "gc_roots.h" +#include "object_base.h" +#include "jni.h" +#include "mrt_classloader_api.h" +#include "loader/object_locator.h" +#include "loader/object_loader.h" +#include "mrt_api_common.h" +#include "linker_api.h" +namespace maplert { +class CLCache { + private: +#ifdef USE_32BIT_REF + static constexpr int ptrSize = 4; + using CacheType = uint32_t; +#else + static constexpr int ptrSize = 8; + using CacheType = uint64_t; +#endif + static constexpr int kCacheSize = 2 * 1024; // 2k + static constexpr int kMutexNum = 64; + static constexpr int kPerMutexRange = kCacheSize / kMutexNum; + static constexpr int arraySize = kCacheSize * ptrSize / sizeof(CacheType); + CacheType classAddrArray[arraySize] = { 0 }; + CacheType resultArray[arraySize] = { 0 }; + std::mutex cacheMtx[kMutexNum]; + std::atomic valid; + public: + static CLCache instance; + CLCache() { + valid = true; + } + ~CLCache() {} + + jclass GetCache(const jclass contextClass, const std::string &className, uint32_t &index, bool &lockFail) noexcept { + size_t clNameSize = className.size(); + if (valid.load(std::memory_order_acquire)) { + constexpr int kSeed = 31; + constexpr int kDistance = 2; + uint64_t hash = 0; + for (size_t i = 0; i < clNameSize; i += kDistance) { + hash = kSeed * hash + className[i]; + } + hash += reinterpret_cast(contextClass); + index = hash % kCacheSize; + size_t mutexIdx = index / kPerMutexRange; + if (!cacheMtx[mutexIdx].try_lock()) { + lockFail = true; + return nullptr; + } + jclass retVal = reinterpret_cast(static_cast(resultArray[index])); + if (classAddrArray[index] == static_cast(reinterpret_cast(contextClass)) && + retVal != nullptr && className == (reinterpret_cast(retVal))->GetName()) { + cacheMtx[mutexIdx].unlock(); + return retVal; + } + cacheMtx[mutexIdx].unlock(); + } + return nullptr; + } + + void WriteCache(const jclass klass, const jclass contextClass, uint32_t index) noexcept { + if (valid.load(std::memory_order_acquire)) { + size_t mutexIdx = index / kPerMutexRange; + if (!cacheMtx[mutexIdx].try_lock()) { + return; + } + classAddrArray[index] = static_cast(reinterpret_cast(contextClass)); + resultArray[index] = static_cast(reinterpret_cast(klass)); + cacheMtx[mutexIdx].unlock(); + } + } + void ResetCache() { + bool expect = true; + if (!valid.compare_exchange_strong(expect, false, std::memory_order_acq_rel) && !expect) { + return; // other thread is Reseting, just return when cas fail + } + if (memset_s(classAddrArray, kCacheSize * ptrSize, 0, kCacheSize * ptrSize) != EOK) { + LOG(FATAL) << "memset_s fail." << maple::endl; + } + expect = false; + if (!valid.compare_exchange_strong(expect, true, std::memory_order_acq_rel) && expect) { + LOG(FATAL) << "CLCache is working when reseting cache" << maple::endl; + } + } +}; +class ClassLoaderImpl : public ObjectLoader { + public: + ClassLoaderImpl(); + ~ClassLoaderImpl(); + void UnInit() override; + // API Interfaces Begin + bool LoadMplFileInBootClassPath(const std::string &pathString) override; +#ifndef __ANDROID__ + bool LoadMplFileInUserClassPath(const std::string &pathString) override; +#endif + bool LoadMplFileInAppClassPath(jobject classLoader, FileAdapter &adapter) override; + void ResetCLCache() override; + jclass GetCache(const jclass contextClass, const std::string &className, uint32_t &index, bool &lockFail) override; + void WriteCache(const jclass klass, const jclass contextClass, uint32_t index) override; + void RegisterMplFile(const ObjFile &mplFile) override; + bool UnRegisterMplFile(const ObjFile &mplFile) override; + bool RegisterJniClass(IEnv env, jclass javaClass, const std::string &filterName, const std::string &jniClassName, + INativeMethod methods, int32_t methodCount, bool fake) override; + jclass FindClass(const std::string &className, const SearchFilter &filter) override; + // get registered mpl file by exact path-name: the path should be canonicalized + const ObjFile *GetMplFileRegistered(const std::string &name) override; + const ObjFile *GetAppMplFileRegistered(const std::string &package) override; + size_t GetListSize(AdapterFileList type) override; + void DumpUnregisterNativeFunc(std::ostream &os) override; + bool GetMappedClassLoaders( + jobject classLoader, std::vector> &mappedPairs) override; + bool GetMappedClassLoader(const std::string &fileName, jobject classLoader, jobject &realClassLoader) override; + void VisitClasses(maple::rootObjectFunc &func) override; + bool RegisterNativeMethods(ObjFile &objFile, jclass klass, INativeMethod methods, int32_t methodCount) override; + // MRT_EXPORT Split + jclass LocateClass(const std::string &className, const SearchFilter &filter) override; + // API Interfaces End + MClass *GetPrimitiveClass(const std::string &mplClassName) override; + void VisitPrimitiveClass(const maple::rootObjectFunc &func) override; + MClass *CreateArrayClass(const std::string &mplClassName, MClass &componentClass) override; + + protected: + bool SetMappedClassLoader(const std::string &fileName, MObject *classLoader, MObject *realClassLoader); + void RemoveMappedClassLoader(const std::string &fileName); + MClass *FindClassInSingleClassLoader(const std::string &className, SearchFilter &filter); + MClass *LocateInCurrentClassLoader(const std::string &className, SearchFilter &filter); + MClass *LocateInParentClassLoader(const std::string &className, SearchFilter &filter); + private: + MClass *DoCreateArrayClass(MClass &klass, MClass &componentClass, const std::string &name); + void LinkStartUpAndMultiSo(std::vector &mplInfoList, bool hasStartup); + const std::string kAppSoPostfix = "/maple/arm64/mapleclasses.so"; + const std::string kAppPartialSoPostfix = "/maple/arm64/maplepclasses.so"; + const std::string kSystemLibPath = "/system/lib64"; + const std::string kIgnoreJarList = "core-oj.jar:core-libart.jar"; + const static uint16_t kClIndexUnInit = static_cast(-1); + // boot mplfile list + FileAdapter mMplFilesBoot; + // DEX classloader list + FileAdapter mMplFilesOther; + // Mapping classloader for multiple loading .so + std::unordered_multimap> mMappedClassLoader; +}; +} // namespace maplert +#endif // MRT_CLASS_LOCATOR_MANAGER_H_ diff --git a/src/mrt/maplert/include/mrt_cyclequeue.h b/src/mrt/maplert/include/mrt_cyclequeue.h new file mode 100644 index 0000000000..20b8e9f01b --- /dev/null +++ b/src/mrt/maplert/include/mrt_cyclequeue.h @@ -0,0 +1,116 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_CYCLEQUEUE_H_ +#define MRT_CYCLEQUEUE_H_ +#include + +namespace maplert { +template +class CycleQueue { + public: + CycleQueue() : frontIndex(0), rearIndex(0) {} + + ~CycleQueue() { + if (UNLIKELY(queueBuffer != nullptr)) { + delete queueBuffer; + } + } + + inline bool Empty() const { + return (queueBuffer == nullptr) ? EmptyArray() : EmptyQueue(); + } + + inline void Push(T ele) { + if (LIKELY(queueBuffer == nullptr)) { + if (PushArray(ele)) { + return; + } else { + // cycle array is full, move to std::queue + CopyElementToQueue(); + } + } + PushQueue(ele); + } + + inline void Pop() { + if (LIKELY(queueBuffer == nullptr)) { + PopArray(); + } else { + PopQueue(); + } + } + + inline T Front() { + return (queueBuffer == nullptr) ? FrontArray() : FrontQueue(); + } + + static constexpr uint16_t kMaxSize = 30; + uint16_t frontIndex; + uint16_t rearIndex; + T arrayBuffer[kMaxSize]; + std::queue *queueBuffer = nullptr; + + private: + inline bool EmptyArray() const { + return frontIndex == rearIndex; + } + + inline bool PushArray(T ele) { + uint16_t tmpRear = (rearIndex + 1) % kMaxSize; + if (UNLIKELY(frontIndex == tmpRear)) { + return false; + } + arrayBuffer[rearIndex] = ele; + rearIndex = tmpRear; + return true; + } + + // just adjust index, no pop value + inline void PopArray() { + frontIndex = (frontIndex + 1) % kMaxSize; + } + + inline T FrontArray() { + return arrayBuffer[frontIndex]; + } + + inline bool EmptyQueue() const { + return queueBuffer->empty(); + } + + inline void PushQueue(T ele) { + queueBuffer->push(ele); + } + + inline void PopQueue() { + queueBuffer->pop(); + } + + inline T FrontQueue() { + return queueBuffer->front(); + } + + inline void CopyElementToQueue() { + queueBuffer = new (std::nothrow) std::queue; + CHECK(queueBuffer != nullptr) << "queueBuffer is nullptr" << maple::endl; + while (!EmptyArray()) { + PushQueue(FrontArray()); + PopArray(); + } + } +}; +} + +#endif // MRT_CYCLEQUEUE_H_ diff --git a/src/mrt/maplert/include/mrt_handlecommon.h b/src/mrt/maplert/include/mrt_handlecommon.h new file mode 100644 index 0000000000..0846bdafc2 --- /dev/null +++ b/src/mrt/maplert/include/mrt_handlecommon.h @@ -0,0 +1,324 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_HANDLECOMMON +#define MAPLE_RUNTIME_HANDLECOMMON + +#include +#include +#include +#include +#include "cpphelper.h" +#include "chelper.h" +#include "mrt_libs_api.h" +#include "exception/mrt_exception.h" +#include "mrt_well_known.h" +#include "mrt_primitive_util.h" +namespace maplert{ +MClass *GetDcClasingFromFrame(); +void ParseSignatrueType(char *descriptor, const char *&methodSig); + +string GeneIllegalArgumentExceptionString(const MClass *from, const MClass *to); + +string GeneClassCastExceptionString(const MClass *from, const MClass *to) ; + +bool GetPrimShortType(const MClass *klass, char &type); + +bool CheckPrimitiveCanBoxed(char shortType); + +bool GetPrimShortTypeAndValue(const MObject *o, char &type, jvalue &value, const MClass *fromType); + +using DoubleLongConvert = union { + jlong j; + jdouble d; +}; +using FloatIntConvert = union { + jint i; + jfloat f; +}; +using FloatDoubleConvert = union { + jdouble d; + jfloat f; +}; + + +class Arg { + public: + virtual jint GetJint() = 0; + virtual jfloat GetJfloat() = 0; + virtual jdouble GetJdouble() = 0; + virtual jlong GetJlong() = 0; + virtual MObject *GetObject() = 0; + virtual ~Arg() = default; +}; + +class VArg : public Arg { + public: + VArg(va_list ag) : args(ag) {} + jint GetJint() noexcept { + return va_arg(args, int); + } + jfloat GetJfloat() noexcept { + FloatDoubleConvert u; + u.d = va_arg(args, double); + return u.f; + } + jdouble GetJdouble() noexcept { + return va_arg(args, double); + } + jlong GetJlong() noexcept { + return va_arg(args, jlong); + } + MObject *GetObject() noexcept { + return reinterpret_cast(va_arg(args, MObject*)); + } + virtual ~VArg() = default; + private: + va_list args; +}; + +class JValueArg : public Arg { + public: + JValueArg(jvalue *ag) : args(ag) {} + jint GetJint() noexcept { + return args[idx++].i; + } + jfloat GetJfloat() noexcept { + return args[idx++].f; + } + jdouble GetJdouble() noexcept { + return args[idx++].d; + } + jlong GetJlong() noexcept { + return args[idx++].j; + } + MObject *GetObject() noexcept { + return reinterpret_cast(args[idx++].l); + } + virtual ~JValueArg() { + args = nullptr; + } + private: + uint32_t idx = 0; + jvalue *args; +}; + +class Arm32Arg : public Arg { + public: + Arm32Arg(int32_t *params) : args(params) {} + jint GetJint() noexcept { + if (gRegIdx <= gRegMaxIdx) { + return *static_cast(args + gRegIdx++); + } + return *static_cast(args + stackIdx++); + } + jfloat GetJfloat() noexcept { + if (dRegIdx <= dRegMaxIdx) { + return *reinterpret_cast(args + dRegIdx++); + } + return *reinterpret_cast(args + stackIdx++); + } + jdouble GetJdouble() noexcept { + if ((dRegIdx <= dRegMaxIdx) && (dRegIdx & 1)) { + ++dRegIdx; + } + if (dRegIdx <= dRegMaxIdx) { + jdouble val = *reinterpret_cast(args + dRegIdx); + dRegIdx = dRegIdx + kEightBytes; + return val; + } + + if (stackIdx & 1) { + ++stackIdx; + } + jdouble val = *reinterpret_cast(args + stackIdx); + stackIdx += kEightBytes; + return val; + } + + jlong GetJlong() noexcept { + if (stackIdx & 1) { + ++stackIdx; + } + jlong val = *reinterpret_cast(args + stackIdx); + stackIdx += kEightBytes; + return val; + } + + MObject *GetObject() noexcept { + return reinterpret_cast(GetJint()); + } + + virtual ~Arm32Arg() { + args = nullptr; + } + + private: + constexpr static uint8_t kEightBytes = 2; + uint16_t gRegIdx = 0; + constexpr static uint8_t gRegMaxIdx = 3; + constexpr static uint8_t dRegIdxStart = 4; + constexpr static uint8_t dRegMaxIdx = 18; + uint16_t dRegIdx = dRegIdxStart; + constexpr static uint8_t stackStartIdx = 20; + uint16_t stackIdx = stackStartIdx; + int32_t *args; +}; + +class Arm32SoftFPArg : public Arg { + public: + Arm32SoftFPArg(int32_t *params) : args(params) {} + jint GetJint() noexcept { + if (gRegIdx <= gRegMaxIdx) { + return *static_cast(args + gRegIdx++); + } + return *static_cast(args + stackIdx++); + } + jfloat GetJfloat() noexcept { + if (gRegIdx <= gRegMaxIdx) { + return *reinterpret_cast(args + gRegIdx++); + } + return *reinterpret_cast(args + stackIdx++); + } + jdouble GetJdouble() noexcept { + if ((gRegIdx <= gRegMaxIdx) && (gRegIdx & 1)) { + ++gRegIdx; + } + if (gRegIdx == gRegMaxIdx) { + ++gRegIdx; + } + if (gRegIdx <= gRegMaxIdx) { + jdouble val = *reinterpret_cast(args + gRegIdx); + gRegIdx = gRegIdx + kEightBytes; + return val; + } + + if (stackIdx & 1) { + ++stackIdx; + } + jdouble val = *reinterpret_cast(args + stackIdx); + stackIdx += kEightBytes; + return val; + } + + jlong GetJlong() noexcept { + if (stackIdx & 1) { + ++stackIdx; + } + jlong val = *reinterpret_cast(args + stackIdx); + stackIdx += kEightBytes; + return val; + } + + MObject *GetObject() noexcept { + return reinterpret_cast(GetJint()); + } + + virtual ~Arm32SoftFPArg() { + args = nullptr; + } + + private: + constexpr static uint8_t kEightBytes = 2; + uint16_t gRegIdx = 0; + constexpr static uint8_t gRegMaxIdx = 3; + constexpr static uint8_t stackStartIdx = 4; + uint16_t stackIdx = stackStartIdx; + int32_t *args; +}; + +class ArgsWrapper { + public: + ArgsWrapper(Arg &ag) : arg(ag) {} + ~ArgsWrapper() = default; + jint GetJint() const noexcept { + return arg.GetJint(); + } + jfloat GetJfloat() const noexcept { + return arg.GetJfloat(); + } + jdouble GetJdouble() const noexcept { + return arg.GetJdouble(); + } + jlong GetJlong() const noexcept { + return arg.GetJlong(); + } + MObject *GetObject() const noexcept { + return arg.GetObject(); + } + private: + Arg &arg; +}; + +class MethodType { + public: + MethodType(const MObject *methodTypeObj) { + MArray *ptypesVal = + reinterpret_cast(methodTypeObj->LoadObject(WellKnown::GetMFieldMethodHandlePTypesOffset())); + returnType = + reinterpret_cast(methodTypeObj->LoadObjectNoRc(WellKnown::GetMFieldMethodHandlepRTypeOffset())); + uint32_t num = ptypesVal->GetLength(); + for (uint32_t i = 0; i < num; ++i) { + paramsType.push_back(static_cast(ptypesVal->GetObjectElementNoRc(i))); + } + RC_LOCAL_DEC_REF(ptypesVal); + typesArrSize = static_cast(paramsType.size()); + typesArr = nullptr; + } + MethodType(const MethodType&) = default; + MethodType& operator=(const MethodType&) = default; + std::string ToString() const noexcept; + ALWAYS_INLINE const MClass *GetReTType() const { + return returnType; + } + ALWAYS_INLINE const vector &GetParamsType() const noexcept { + return paramsType; + } + ALWAYS_INLINE uint32_t GetTypesArraySize() const noexcept { + return typesArrSize; + } + ALWAYS_INLINE MClass **GetTypesArray() { + if (typesArr == nullptr) { + typesArr = static_cast(malloc((paramsType.size() + 1) * sizeof(MClass*))); + if (typesArr == nullptr) { + LOG(ERROR) << "GetTypesArray malloc error" << maple::endl; + } + } + int idx = 0; + for (auto it = paramsType.begin(); it != paramsType.end(); ++it) { + typesArr[idx++] = *it; + } + typesArr[idx] = returnType; + return typesArr; + } + ~MethodType() { + if (typesArr != nullptr) { + free(typesArr); + } + returnType = nullptr; + typesArr = nullptr; + } + + private: + MClass *returnType; + vector paramsType; + MClass **typesArr; + uint32_t typesArrSize; +}; + +void FillArgsInfoNoCheck(const ArgsWrapper&, const char *typesMark, uint32_t arrSize, + BaseArgValue ¶mArray, uint32_t begin = 0); +void DoInvoke(const MethodMeta &method, jvalue &result, ArgValue ¶mArray); +} +#endif diff --git a/src/mrt/maplert/include/mrt_handleutil.h b/src/mrt/maplert/include/mrt_handleutil.h new file mode 100644 index 0000000000..314afde3d2 --- /dev/null +++ b/src/mrt/maplert/include/mrt_handleutil.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_HANDLEUTIL +#define MAPLE_RUNTIME_HANDLEUTIL + +#include "mrt_methodhandle.h" +#include "mrt_handlecommon.h" +namespace maplert { +struct ConvertJStruct { + MClass **parameterTypes; + uint32_t arraySize; + jvalue *value; +}; +struct TypeInfo { + MClass **types; + char *typesMark; +}; + +bool ConvertJvalue(ConvertJStruct&, const MethodHandle&, const MClass *from, const MClass *to, bool needDec = false); + +bool ConvertParams(CallParam&, const MethodHandle&, const ArgsWrapper&, BaseArgValue&, ScopedHandles&); + +string GeneExceptionString(const MethodHandle &mh, MClass **parameterTypes, uint32_t arraySize); + +bool ConvertReturnValue(const MethodHandle &mh, MClass **paramTypes, uint32_t arraySize, jvalue &value); + +void IsConvertibleOrThrow(uint32_t arraySize, const MethodHandle &mh, MClass **paramTypes, uint32_t paramNum); + +bool ClinitCheck(const MClass *mplFieldOrMethod); + +static inline bool IsConvertible(const MethodHandle &mh, uint32_t arraySize) { + const MethodType *methodTypeMplObj = mh.GetMethodTypeMplObj(); + vector ptypesVal = methodTypeMplObj->GetParamsType(); + if (arraySize - 1 != ptypesVal.size()) { + return false; + } + return true; +} + +void GetParameterAndRtType(MClass **types, char *typesMark, MString *protoStr, SizeInfo &sz, + const MClass *declareClass); +jvalue PolymorphicCallEnter(const MString *calleeStr, const MString*, + uint32_t, MObject *mhObj, Arg&, const MClass *declareClass = nullptr); +MClass *GetContextCls(Arg &arg, MString *&calleeName); + +#if defined(__arm__) +extern "C" int64_t PolymorphicCallEnter32(int32_t *args); +#endif +} +#endif diff --git a/src/mrt/maplert/include/mrt_linker.h b/src/mrt/maplert/include/mrt_linker.h new file mode 100644 index 0000000000..d8f65bf837 --- /dev/null +++ b/src/mrt/maplert/include/mrt_linker.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_CLASS_MPLLINKER_IMPL_H_ +#define MRT_CLASS_MPLLINKER_IMPL_H_ + +#include "jni.h" +#include "linker_api.h" +#include "linker/linker_model.h" +#include "linker/linker.h" +#include "linker/linker_inline.h" +#include "linker/linker_hotfix.h" +#include "linker/linker_debug.h" +#include "linker/linker_cache.h" +#include "linker/linker_method_builder.h" +#ifdef LINKER_DECOUPLE +#include "linker/decouple/linker_decouple.h" +#endif +#include "linker/linker_lazy_binding.h" + +namespace maplert { +class MplLinkerImpl : public LinkerInvoker { + public: + MplLinkerImpl(); + ~MplLinkerImpl() = default; + private: + Linker linkerFeature; + Hotfix hotfixFeature; + MethodBuilder methodBuilderFeature; +#ifdef LINKER_DECOUPLE + Decouple decoupleFeature; +#endif + LazyBinding lazyBindingFeature; +#ifdef LINKER_RT_CACHE + LinkerCache linkerCacheFeature; +#endif // LINKER_RT_CACHE + Debug debugFeature; +}; +} // namespace maplert +#endif // MRT_CLASS_MPLLINKER_IMPL_H_ diff --git a/src/mrt/maplert/include/mrt_methodhandle.h b/src/mrt/maplert/include/mrt_methodhandle.h new file mode 100644 index 0000000000..fe75bdd2b4 --- /dev/null +++ b/src/mrt/maplert/include/mrt_methodhandle.h @@ -0,0 +1,334 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_METHODHANDLE +#define MAPLE_RUNTIME_METHODHANDLE +#include +#include +#include +#include +#include "cpphelper.h" +#include "chelper.h" +#include "mrt_libs_api.h" +#include "exception/mrt_exception.h" +#include "mrt_well_known.h" +#include "mrt_primitive_util.h" +#include "mrt_handlecommon.h" +#include "mstring_inline.h" +namespace maplert { +enum Kind : uint32_t { + kVirtualCall = 0, + kSuperCall, + kDirectCall, + kStaticCall, + kInterfaceCall, + kTransformCall, + kCallSiteTransformCall, + kInstanceGet = 9, + kInstancePut, + kStaticGet, + kStaticPut, +}; +struct SizeInfo { + SizeInfo(uint32_t paramNum) : refNum(0), byteArrSize(0) { + arraySize = paramNum + 1; + } + uint32_t arraySize; + uint32_t refNum; + uint32_t byteArrSize; +}; +struct CallParam { + uint32_t paramNum; + bool isExactMatched; + MClass **paramTypes; + char *typesMark; + bool isStaticMethod; + uint8_t beginIndex; + uint32_t arraySize; + uint32_t dregSize; +}; + +class EmStackFrame { + public: + EmStackFrame(MObject *emStackFrameObj) : emStackFrameJavaObj(emStackFrameObj) {} + ~EmStackFrame() {} + EmStackFrame(const EmStackFrame&) = default; + ALWAYS_INLINE uint32_t GetStackFrameNativeBytesCount() const noexcept { + MArray *stackFrameByteArray = reinterpret_cast(LoadRefField( + reinterpret_cast(emStackFrameJavaObj), WellKnown::GetMFieldEmStackFrameStackFrameOffset())); + return stackFrameByteArray->GetLength(); + } + ALWAYS_INLINE int8_t *GetStackFrameNativeBytes() const noexcept { + MArray *stackFrameByteArray = reinterpret_cast(LoadRefField( + reinterpret_cast(emStackFrameJavaObj), WellKnown::GetMFieldEmStackFrameStackFrameOffset())); + int8_t *cArray = reinterpret_cast(stackFrameByteArray->ConvertToCArray()); + return cArray; + } + ALWAYS_INLINE MObject *GetReferencesReturnObj() const noexcept { + MArray *refArray = reinterpret_cast(LoadRefField( + reinterpret_cast(emStackFrameJavaObj), WellKnown::GetMFieldEmStackFrameReferencesOffset())); + uint32_t idx = refArray->GetLength() - 1; + MObject *elem = refArray->GetObjectElementNoRc(idx); + return elem; + } + ALWAYS_INLINE MObject *GetReferencesObj(uint32_t idx) const noexcept { + MArray *refArray = reinterpret_cast(LoadRefField( + reinterpret_cast(emStackFrameJavaObj), WellKnown::GetMFieldEmStackFrameReferencesOffset())); + if (idx >= refArray->GetLength()) { + return nullptr; + } + MObject *elem = refArray->GetObjectElementNoRc(idx); + return elem; + } + ALWAYS_INLINE void PutReferencesObj(const MObject *obj, uint32_t idx) const noexcept { + MArray *refArray = reinterpret_cast(LoadRefField( + reinterpret_cast(emStackFrameJavaObj), WellKnown::GetMFieldEmStackFrameReferencesOffset())); + refArray->SetObjectElement(idx, obj); + } + + private: + MObject *emStackFrameJavaObj; +}; + +class MethodHandle { + public: + Arg &args; + MethodHandle(MObject *methodHandleObj, Arg &argsP) : args(argsP), methodHandleJavaObj(methodHandleObj) { + validMethodTypeMplObjCache = nullptr; + handleKind = static_cast(MRT_LOAD_JINT(methodHandleObj, WellKnown::GetMFieldMethodHandleHandleKindOffset())); + MObject *methodTypeObj = GetMethodTypeJavaObj(); + mplFieldOrMethod = reinterpret_cast( + MRT_LOAD_JLONG(methodHandleObj, WellKnown::GetMFieldMethodHandleArtFieldOrMethodOffset())); + MObject *nominalType = GetNominalTypeJavaObj(); + if (nominalType != nullptr) { + validMethodTypeMplObjCache = new MethodType(nominalType); + methodTypeMplObjCache = new MethodType(methodTypeObj); + } else { + validMethodTypeMplObjCache = new MethodType(methodTypeObj); + methodTypeMplObjCache = validMethodTypeMplObjCache; + } + handleClassType = methodHandleObj->GetClass(); + } + ~MethodHandle() { + if (GetNominalTypeJavaObj() != nullptr) { + delete validMethodTypeMplObjCache; + delete methodTypeMplObjCache; + } else { + delete validMethodTypeMplObjCache; + } + } + MethodHandle(const MethodHandle&) = default; + bool FieldSGet(MClass **parameterTypes, bool doConvert, uint32_t arraySize, jvalue &result); + bool FieldAccess(MClass **parameterTypes, bool doConvert, uint32_t arraySize, jvalue &result); + void DirectCall(CallParam paramStruct, jvalue &result); + bool FillInvokeArgs( + const ArgsWrapper &argsWrapper, CallParam ¶mStruct, BaseArgValue ¶mArray, ScopedHandles&) const; + bool NoParamFastCall(CallParam ¶mStruct, jvalue &result); + bool InvokeWithEmStackFrame(MObject *emStFrameObj, jvalue &result); + jvalue InvokeMethodNoParameter(MObject *obj, MethodMeta &mthd); + ALWAYS_INLINE MObject *GetMethodTypeJavaObj() const noexcept { + return reinterpret_cast( + LoadRefField(reinterpret_cast(methodHandleJavaObj), WellKnown::GetMFieldMethodHandleTypeOffset())); + } + ALWAYS_INLINE const MethodType *GetMethodTypeMplObj() const noexcept { + return methodTypeMplObjCache; + } + ALWAYS_INLINE const MethodType *GetValidMethodTypeMplObj() const noexcept { + return validMethodTypeMplObjCache; + } + ALWAYS_INLINE Kind GetHandleKind() const noexcept { + return handleKind; + } + ALWAYS_INLINE MethodMeta *GetMethodMeta() const noexcept { + return reinterpret_cast(mplFieldOrMethod); + } + ALWAYS_INLINE FieldMeta *GetFieldMeta() const noexcept { + return reinterpret_cast(mplFieldOrMethod); + } + ALWAYS_INLINE MObject *GetNominalTypeJavaObj() const noexcept { + return reinterpret_cast(LoadRefField( + reinterpret_cast(methodHandleJavaObj), WellKnown::GetMFieldMethodHandleNominalTypeOffset())); + } + ALWAYS_INLINE const MClass *GetHandleJClassType() const noexcept { + return handleClassType; + } + ALWAYS_INLINE const MObject *GetHandleJavaObj() const noexcept { + return methodHandleJavaObj; + } + ALWAYS_INLINE MethodMeta *GetRealMethod(MObject *obj) const noexcept; + ALWAYS_INLINE bool ExactInvokeCheck(const MString *calleeName, MClass **parameterTypes, uint32_t) const; + ALWAYS_INLINE bool InvokeTransform(const SizeInfo&, const char *mark, MClass **paramTypes, jvalue &retVal); + + string ValidTypeToString() const noexcept; + static MObject *GetMemberInternal(const MObject *methodHandle); + ALWAYS_INLINE bool ParamsConvert(const MClass *from, const MClass *to, MClass**, + uint32_t arraySize, jvalue &internVal) const; + + private: + bool FieldGet(MClass **parameterTypes, bool doConvert, uint32_t arraySize, jvalue &result); + bool FieldPut(MClass **parameterTypes, bool doConvert, uint32_t arraySize); + bool FieldSPut(MClass **parameterTypes, bool doConvert, uint32_t arraySize); + ALWAYS_INLINE void GetValFromVargs(jvalue &value, char shortFieldTypee); + ALWAYS_INLINE char GetReturnTypeMark(char markInProto) const noexcept; + ALWAYS_INLINE bool IsExactMatch() const noexcept; + ALWAYS_INLINE bool IsExactMatch(MClass **types, uint32_t arraySize, bool isNominal = false) const noexcept; + void InvokeTransformVirtualMethod(MObject *emStFrameObj, jvalue &retVal) const; + MObject *CreateEmStackFrame(const MArray *bArray, const MObject *refArray, MClass **types, uint32_t arraySize) const; + bool FillEmStackFrameArray(const char *mark, MClass **paramTypes, int8_t *cArray, const MArray*); + + static constexpr char kInvokeMtd[] = + "Ljava_2Flang_2Finvoke_2FMethodHandle_3B_7Cinvoke_7C_28ALjava_2Flang_2FObject_3B_29Ljava_2Flang_2FObject_3B"; + static constexpr char kInvokeExactMtd[] = + "Ljava_2Flang_2Finvoke_2FMethodHandle_3B_7CinvokeExact_7C_28ALjava_2Flang_2FObject_3B_29Ljava_2Flang_2FObject_3B"; + + MObject *methodHandleJavaObj; + Kind handleKind; + MObject *mplFieldOrMethod; + MethodType *validMethodTypeMplObjCache; // return nominal if nominal exist, or methodType + MethodType *methodTypeMplObjCache; + MClass *handleClassType; + uint32_t emByteIdx = 0; + uint32_t realParamNum = 0; +}; + +class EmStackFrameInvoker { + public: + EmStackFrameInvoker(MObject *frameObj, MethodHandle &handle) : emStFrameObj(frameObj), methodHandleMplObj(handle) { + emStackFrameMplObj = new EmStackFrame(emStFrameObj); + MObject *calleeMethodType = reinterpret_cast( + emStFrameObj->LoadObjectNoRc(WellKnown::GetMFieldEmStackFrameCallsiteOffset())); + calleeMethodTypeMplObj = new MethodType(calleeMethodType); + typesArr = calleeMethodTypeMplObj->GetTypesArray(); + paramLength = calleeMethodTypeMplObj->GetTypesArraySize(); + + mplFieldOrMethodMeta = methodHandleMplObj.GetMethodMeta(); + callerType = methodHandleMplObj.GetMethodTypeJavaObj(); + callerTypeMplObj = new MethodType(callerType); + ptypesOfHandle = &callerTypeMplObj->GetParamsType(); + } + ~EmStackFrameInvoker() { + delete emStackFrameMplObj; + delete calleeMethodTypeMplObj; + delete callerTypeMplObj; + } + bool Invoke(jvalue &result); + + private: + bool InvokeInterpMethod(jvalue &result, MethodMeta &realMethod); + bool InvokeStaticCmpileMethod(jvalue &result, const MethodMeta &realMethod); + ALWAYS_INLINE bool FillParamsForEmStackFrame(BaseArgValue ¶mArray); + + MObject *emStFrameObj; + MethodHandle &methodHandleMplObj; + EmStackFrame *emStackFrameMplObj; + + // typesinfo from callee + MethodType *calleeMethodTypeMplObj; + MClass **typesArr; + uint32_t paramLength; + + // info of mpl field or method + const MethodMeta *mplFieldOrMethodMeta; + + // info of caller + MObject *callerType; + MethodType *callerTypeMplObj; + const vector *ptypesOfHandle; + + uint32_t byteIdx = 0; + uint32_t refIdx = 0; +}; + +static std::unordered_map StringToStringFactoryMap = {}; +static std::set stringInitMethodSet = { "newEmptyString", + "newStringFromBytes", + "newStringFromChars", + "newStringFromString", + "newStringFromStringBuffer", + "newStringFromCodePoints", + "newStringFromStringBuilder" }; +static std::mutex mtx; +MethodMeta *RefineTargetMethod(Kind handleKind, MethodMeta *method, const MClass *referClass); + +class EMSFWriter { + public: + static void WriteByMark(char mark, int8_t *cArray, uint32_t &byteIdx, const jvalue &val, bool isDec = false); + private: + static inline void WriteToEmStackFrame(int8_t *array, uint32_t &idx, int32_t value) { + *reinterpret_cast(array + idx) = value; + idx += sizeof(int); + } + static inline void WriteToEmStackFrame(int8_t *array, uint32_t &idx, int64_t value) { + *reinterpret_cast(array + idx) = value; + idx += sizeof(jlong); + } + static inline void WriteToEmStackFrame(int8_t *array, uint32_t &idx, float value) { + *reinterpret_cast(array + idx) = value; + idx += sizeof(float); + } + static inline void WriteToEmStackFrame(int8_t *array, uint32_t &idx, double value) { + *reinterpret_cast(array + idx) = value; + idx += sizeof(double); + } +}; + +class EMSFReader { + public: + static jvalue GetRetValFromEmStackFrame(const MArray *refArray, uint32_t refNum, uint32_t, const int8_t*, char); + static inline int32_t GetIntFromEmStackFrame(const int8_t *array, uint32_t &idx) { + const uint32_t val = *reinterpret_cast(array + idx); + idx += sizeof(int); + return val; + } + static inline int64_t GetLongFromEmStackFrame(const int8_t *array, uint32_t &idx) { + const int64_t val = *reinterpret_cast(array + idx); + idx += sizeof(jlong); + return val; + } +}; + +static inline bool IsCallerTransformerCStr(uint32_t paramNum, const std::string protoString) { + if (paramNum == 1 && strstr(protoString.c_str(), "(Ldalvik/system/EmulatedStackFrame;)") != nullptr) { + return true; + } + return false; +} + +static inline bool IsCallerTransformerJStr(uint32_t paramNum, const MString *protoString) { + std::string protoStr(reinterpret_cast(protoString->GetContentsPtr())); + return IsCallerTransformerCStr(paramNum, protoStr); +} + +inline bool IsFieldAccess(Kind handleKind) { + return (handleKind >= Kind::kInstanceGet && handleKind <= Kind::kStaticPut); +} + +void DoCalculate(char name, uint32_t &byteArrSize, uint32_t &refNum); + +static inline void CalcFrameSize(const MClass *rType, vector &ptypes, uint32_t &byteSz, uint32_t &refNum) { + size_t size = ptypes.size(); + for (size_t i = 0; i < size; ++i) { + MClass *type = ptypes[i]; + const char *name = type->GetName(); + DoCalculate(*name, byteSz, refNum); + } + DoCalculate(*rType->GetName(), byteSz, refNum); +} + +#define STATEMENTCHECK(statement) \ + if (statement) { \ + return false; \ + } +} +#endif diff --git a/src/mrt/maplert/include/mrt_methodhandle_mpl.h b/src/mrt/maplert/include/mrt_methodhandle_mpl.h new file mode 100644 index 0000000000..ad08f18945 --- /dev/null +++ b/src/mrt/maplert/include/mrt_methodhandle_mpl.h @@ -0,0 +1,88 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_METHODHANDLEMPL +#define MAPLE_RUNTIME_METHODHANDLEMPL + +#include +#include +#include +#include +#include "cpphelper.h" +#include "chelper.h" +#include "mrt_libs_api.h" +#include "exception/mrt_exception.h" +#include "mrt_well_known.h" +#include "mrt_primitive_util.h" +#include "mrt_handlecommon.h" +namespace maplert { +enum class OptionFlag : int32_t { + kFinal, + kDropArguments, + kFilterReturnValue, + kBindto, + kPermuteArguments +}; + +jvalue MethodHandleCallEnter(MString *calleeName, MString*, uint32_t paramNum, + MObject *methodHandle, VArg &args, const MClass *declareClass); + +class MethodHandleMpl { + public: + MethodHandleMpl(MObject *obj, bool isExact) :methodHandle(obj), isInvokeExact(isExact) { + // maybe need check the array length + dataArray = + reinterpret_cast(methodHandle->LoadObjectNoRc(WellKnown::GetMFieldMethodHandleDataArrayOffset())); + metaArray = + reinterpret_cast(methodHandle->LoadObjectNoRc(WellKnown::GetMFieldMethodHandleMetaArrayOffset())); + opArray = + reinterpret_cast(methodHandle->LoadObjectNoRc(WellKnown::GetMFieldMethodHandleOpArrayOffset())); + typeArray = + reinterpret_cast(methodHandle->LoadObjectNoRc(WellKnown::GetMFieldMethodHandleTypeArrayOffset())); + transformNum = + static_cast(maplert::MRT_LOAD_JINT(methodHandle, WellKnown::GetMFieldMethodHandleIndexOffset())); + } + ~MethodHandleMpl() = default; + static bool IsExactInvoke(MString *calleeName) { + constexpr char kInvokeExact[] = + "Ljava_2Flang_2Finvoke_2FMethodHandle_3B_7CinvokeExact_7C_28ALjava_2Flang_2FObject_3B_29" \ + "Ljava_2Flang_2FObject_3B"; + if (!strcmp(MRT_GetStringContentsPtrRaw(reinterpret_cast(calleeName)), kInvokeExact)) { + return true; + } + return false; + } + + void CheckReturnType(); + string GeneNoSuchMethodExceptionString(const MethodType &methodType, const MethodMeta &method); + jvalue invoke(vector¶mPtr, uint32_t paramNum, vector&typesMarkPtr, vector¶mTypes, + bool convertRetVal = true); + private: + MethodMeta *GetMeta(uint32_t index) const { + MethodMeta *type = reinterpret_cast(metaArray->GetObjectElementNoRc(index)); + return type; + } + bool CheckParamsType(vector &cSTypes, uint32_t csTypesNum, vector&, uint32_t, bool checkRet = true); + void PrepareVarg(vector ¶mPtr, vector &typesMark, vector &cSTypes, uint32_t &csTypesNum); + jvalue FilterReturnValue(vector&, const uint32_t, const MObject*, vector&, vector&); + MObject *methodHandle; + MArray *dataArray; + MArray *metaArray; + MArray *opArray; + MArray *typeArray; + uint32_t transformNum; + bool isInvokeExact; +}; +} +#endif diff --git a/src/mrt/maplert/include/mrt_object.h b/src/mrt/maplert/include/mrt_object.h new file mode 100644 index 0000000000..7705cd4360 --- /dev/null +++ b/src/mrt/maplert/include/mrt_object.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef JAVA_MRT_OBJECT_H_ +#define JAVA_MRT_OBJECT_H_ +#include "mobject.h" +#include "base/logging.h" + +namespace maplert { +uint32_t GetObjectDWordSize(const MObject &obj); + +inline void *MRT_GetAddress32ByAddress(uint64_t *addr) { + DCHECK(addr != nullptr) << "MRT_GetAddress32ByAddress: addr is nullptr!" << maple::endl; + return reinterpret_cast(uint64_t{ *reinterpret_cast(addr) }); +} + +inline void *MRT_GetAddressByAddress(uint64_t *addr) { + return reinterpret_cast(*addr); +} +} // namespace maplert +#endif // JAVA_MRT_OBJECT_H_ diff --git a/src/mrt/maplert/include/mrt_primitive_util.h b/src/mrt/maplert/include/mrt_primitive_util.h new file mode 100644 index 0000000000..5fa329a223 --- /dev/null +++ b/src/mrt/maplert/include/mrt_primitive_util.h @@ -0,0 +1,193 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_PRIMITIVE_UTIL_H_ +#define MRT_PRIMITIVE_UTIL_H_ +#include "cpphelper.h" +#include "mrt_well_known.h" +#include "mrt_classloader_api.h" +#include "mclass_inline.h" + +// 'V' : void +// 'Z' : jboolean, uint8_t +// 'B' : jbyte, int8_t +// 'S' : jshort, int16_t +// 'C' : jchar, uint16_t +// 'I' : jin, int32_t +// 'J' : jlong, int64_t +// 'F' : jfloat, float +// 'D' : jdouble, double +// 'N' : jobject, MObject*, reflence type + +namespace maplert { +using JValue = union JValueType { + uint8_t z; + int8_t b; + uint16_t c; + int16_t s; + int32_t i; + int64_t j; + float f; + double d; + MObject *l; +}; + +namespace primitiveutil { + char GetPrimitiveTypeFromBoxType(const MClass &type); + bool IsBoxObject(const MObject &o, char srcType); + bool ConvertNarrowToWide(char srcType, char dstType, const jvalue &src, jvalue &dst); + bool CanConvertNarrowToWide(char srcType, char dstType); + bool ConvertToInt(char srcType, const jvalue &src, jvalue &dst); + bool ConvertToLong(char srcType, const jvalue &src, jvalue &dst); + bool ConvertToFloat(char srcType, const jvalue &src, jvalue &dst); + bool ConvertToDouble(char srcType, const jvalue &src, jvalue &dst); + bool UnBoxPrimitive(const MObject &elementObj, jvalue &boxedValue); + MObject *BoxPrimitive(char srcType, const jvalue &value); + MObject *BoxPrimitiveJint(int32_t value); + + static constexpr int16_t kByteCacheOffset = 128; + static constexpr int16_t kShortCacheHigh = 127; + static constexpr int16_t kShortCacheLow = -128; + static constexpr uint16_t kCharCacheHigh = 127; + static constexpr int16_t kLongCacheHigh = 127; + static constexpr int16_t kLongCacheLow = -128; + + static inline MObject *GetCacheMember(MClass &cls, const FieldMeta &cacheMeta, uint32_t value, bool tryInit = true) { + if (tryInit) { + if (!cls.InitClassIfNeeded()) { + LOG(ERROR) << "fail do clinit, " << "class: " << cls.GetName() << maple::endl; + } + } + MObject *addr = MObject::Cast(cacheMeta.GetStaticAddr()); + MArray *cachehArr = reinterpret_cast(addr->LoadObjectNoRc(0)); + return cachehArr->GetObjectElement(value); + } + + static inline MObject *BoxPrimitiveJboolean(uint8_t value) { + FieldMeta *cacheMeta = nullptr; + if (!WellKnown::GetMClassBoolean()->InitClassIfNeeded()) { + LOG(ERROR) << "fail do clinit, " << "class: " << WellKnown::GetMClassBoolean()->GetName() << maple::endl; + } + (value == 0) ? (cacheMeta = WellKnown::GetFieldMetaBooleanFalse()) + : (cacheMeta = WellKnown::GetFieldMetaBooleanTrue()); + MObject *addr = MObject::Cast(cacheMeta->GetStaticAddr()); + return reinterpret_cast(addr->LoadObject(0)); + } + + static inline MObject *BoxPrimitiveJbyte(int8_t value) { + return GetCacheMember(*WellKnown::GetMClassByteCache(), *WellKnown::GetFieldMetaByteCache(), + static_cast(value + kByteCacheOffset)); + } + + static inline MObject *BoxPrimitiveJshort(int16_t value) { + if (value >= kShortCacheLow && value <= kShortCacheHigh) { + return GetCacheMember(*WellKnown::GetMClassShortCache(), *WellKnown::GetFieldMetaShortCache(), + static_cast(value + (-kShortCacheLow))); + } + MObject *ret = MObject::NewObject(*WellKnown::GetMClassShort()); + ret->Store(WellKnown::GetMFieldShortValueOffset(), value, false); + return ret; + } + + static inline MObject *BoxPrimitiveJchar(uint16_t value) { + if (value <= kCharCacheHigh) { + return GetCacheMember(*WellKnown::GetMClassCharacterCache(), *WellKnown::GetFieldMetaCharacterCache(), value); + } + MObject *ret = MObject::NewObject(*WellKnown::GetMClassCharacter()); + ret->Store(WellKnown::GetMFieldCharacterValueOffset(), value, false); + return ret; + } + + static inline MObject *BoxPrimitiveJlong(int64_t value) { + if (value >= kLongCacheLow && value <= kLongCacheHigh) { + return GetCacheMember(*WellKnown::GetMClassLongCache(), *WellKnown::GetFieldMetaLongCache(), + static_cast(value + (-kLongCacheLow))); + } + MObject *ret = MObject::NewObject(*WellKnown::GetMClassLong()); + ret->Store(WellKnown::GetMFieldLongValueOffset(), value, false); + return ret; + } + + static inline MObject *BoxPrimitiveJfloat(float value) { + MObject *ret = MObject::NewObject(*WellKnown::GetMClassFloat()); + ret->Store(WellKnown::GetMFieldFloatValueOffset(), value, false); + return ret; + } + + static inline MObject *BoxPrimitiveJdouble(double value) { + MObject *ret = MObject::NewObject(*WellKnown::GetMClassDouble()); + ret->Store(WellKnown::GetMFieldDoubleValueOffset(), value, false); + return ret; + } + + static inline char GetPrimitiveType(const MClass &type) { + if (&type == WellKnown::GetMClassZ()) { + return 'Z'; + } else if (&type == WellKnown::GetMClassB()) { + return 'B'; + } else if (&type == WellKnown::GetMClassS()) { + return 'S'; + } else if (&type == WellKnown::GetMClassC()) { + return 'C'; + } else if (&type == WellKnown::GetMClassI()) { + return 'I'; + } else if (&type == WellKnown::GetMClassJ()) { + return 'J'; + } else if (&type == WellKnown::GetMClassF()) { + return 'F'; + } else if (&type == WellKnown::GetMClassD()) { + return 'D'; + } else if (&type == WellKnown::GetMClassV()) { + return 'V'; + } + return 'N'; + } + + template + static T UnBoxPrimitive(const MObject &elementObj) { + MClass *elementObjClass = elementObj.GetClass(); + char type = GetPrimitiveTypeFromBoxType(*elementObjClass); + size_t offset = 0; + switch (type) { + case 'Z': + offset = WellKnown::GetMFieldBooleanValueOffset(); + return static_cast(elementObj.Load(offset)); + case 'B': + offset = WellKnown::GetMFieldByteValueOffset(); + return static_cast(elementObj.Load(offset)); + case 'C': + offset = WellKnown::GetMFieldCharacterValueOffset(); + return static_cast(elementObj.Load(offset)); + case 'D': + offset = WellKnown::GetMFieldDoubleValueOffset(); + return static_cast(elementObj.Load(offset)); + case 'F': + offset = WellKnown::GetMFieldFloatValueOffset(); + return static_cast(elementObj.Load(offset)); + case 'I': + offset = WellKnown::GetMFieldIntegerValueOffset(); + return static_cast(elementObj.Load(offset)); + case 'J': + offset = WellKnown::GetMFieldLongValueOffset(); + return static_cast(elementObj.Load(offset)); + case 'S': + offset = WellKnown::GetMFieldShortValueOffset(); + return static_cast(elementObj.Load(offset)); + default: ; + } + return 0; + } +}; +} // namespace maplert +#endif // MRT_PRIMITIVE_UTIL_H_ diff --git a/src/mrt/maplert/include/mrt_profile.h b/src/mrt/maplert/include/mrt_profile.h new file mode 100644 index 0000000000..9529e05623 --- /dev/null +++ b/src/mrt/maplert/include/mrt_profile.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_PROFILE_H_ +#define MRT_PROFILE_H_ +#include "jni.h" +#include "mclass.h" +#include "methodmeta.h" +#include "fieldmeta.h" +namespace maplert { +class FieldMeta; +class MethodMetaBase; +void InsertReflectionString(const char *kHotString); +void InsertClassMetadata(const MClass &klass); +void InsertMethodMetadata(const MethodMetaBase *kMethod); +void InsertMethodMetadata(const MClass &cls); +void InsertFieldMetadata(FieldMeta *fieldMeta); +void InsertFieldMetadata(const MClass &cls); +void InsertMethodSignature(const MethodMeta &method); + +#define __MRT_Profile_CString(hotString) if (VLOG_IS_ON(profiler)) InsertReflectionString(hotString); +#define __MRT_Profile_ClassMeta(klass) if (VLOG_IS_ON(profiler)) InsertClassMetadata(klass); +#define __MRT_Profile_MethodMeta(methodOrCls) if (VLOG_IS_ON(profiler)) InsertMethodMetadata(methodOrCls); +#define __MRT_Profile_FieldMeta(fieldOrCls) if (VLOG_IS_ON(profiler)) InsertFieldMetadata(fieldOrCls); +#define __MRT_Profile_MethodParameterTypes(method) if (VLOG_IS_ON(profiler)) InsertMethodSignature(method); +} // namespace maplert + +#endif // MRT_PROFILE_H_ diff --git a/src/mrt/maplert/include/mrt_reflection.h b/src/mrt/maplert/include/mrt_reflection.h new file mode 100644 index 0000000000..4f743cb620 --- /dev/null +++ b/src/mrt/maplert/include/mrt_reflection.h @@ -0,0 +1,137 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEALL_MAPLERT_JAVA_ANDROID_MRT_INCLUDE_MRT_REFLECTION_H_ +#define MAPLEALL_MAPLERT_JAVA_ANDROID_MRT_INCLUDE_MRT_REFLECTION_H_ + +#include "mclass_inline.h" +#include "modifier.h" +#include "mrt_annotation.h" +#include "interp_support.h" +#include "mrt_classloader_api.h" + +using namespace std; +namespace maplert { +namespace reflection { +static inline bool IsInSamePackage(const MClass &declaringClass, const MClass &callingClass, bool clFlag = true) { + MClass *class1 = const_cast(&declaringClass); + MClass *class2 = const_cast(&callingClass); + if (class1 == class2) { + return true; + } + // clFlag is used for classLoader. + if (clFlag) { + jobject classLoader1 = MRT_GetClassLoader(*class1); + jobject classLoader2 = MRT_GetClassLoader(*class2); + // The premise of comparison is the same class loader. + if (classLoader1 != classLoader2) { + return false; + } + } + // Find and compare elements of arrays. + while (class1->IsArrayClass()) { + class1 = class1->GetComponentClass(); + } + while (class2->IsArrayClass()) { + class2 = class2->GetComponentClass(); + } + if (class1 == class2) { + return true; + } + // Compare whether in the same package. + const char *declaringClassName = class1->GetName(); + const char *callingClassName = class2->GetName(); + const char *declaringPackageChar = strrchr(declaringClassName, '/'); + const char *callingPackageChar = strrchr(callingClassName, '/'); + if ((declaringPackageChar == nullptr) && (callingPackageChar == nullptr)) { + return true; + } + if ((declaringPackageChar == nullptr) || (callingPackageChar == nullptr)) { + return false; + } + size_t declaringPackageLen = static_cast(declaringPackageChar - declaringClassName); + size_t callingPackageLen = static_cast(callingPackageChar - callingClassName); + return (declaringPackageLen == callingPackageLen) ? + (strncmp(declaringClassName, callingClassName, declaringPackageLen) == 0) : false; +} + +static inline MClass *GetCallerClass(uint32_t level) { + std::vector uwContextStack; + constexpr uint32_t kReserveSize = 10; + uwContextStack.reserve(kReserveSize); + MapleStack::FastRecordCurrentJavaStack(uwContextStack, level + 1); + MClass *klass = nullptr; + uint32_t size = static_cast(uwContextStack.size()); + if (size > level && !uwContextStack.empty()) { + if (!uwContextStack[level].IsInterpretedContext()) { + klass = MClass::JniCast(uwContextStack[level].frame.GetDeclaringClass()); + } else { + klass = UnwindContextInterpEx::GetDeclaringClassFromUnwindContext(uwContextStack[level]); + } + } + return klass; +} + +static ALWAYS_INLINE inline bool CanAccess(const MClass &thatKlass, const MClass &callerClass) { + return thatKlass.IsPublic() || IsInSamePackage(thatKlass, callerClass) || + (modifier::IsAFOriginPublic(thatKlass.GetModifier()) && thatKlass.IsInnerClass()); +} + +static ALWAYS_INLINE inline bool VerifyAccess(const MObject *obj, const MClass *declaringClass, + uint32_t accessFlags, MClass *&callingClass, uint32_t level) { + DCHECK(declaringClass != nullptr) << "VerifyAccess: declaringClass is nullptr!" << maple::endl; + if (modifier::IsPublic(accessFlags)) { + return true; + } + if (callingClass == nullptr) { + callingClass = GetCallerClass(level); + } + if (declaringClass == callingClass || callingClass == nullptr) { + return true; + } + if (modifier::IsPrivate(accessFlags)) { + return false; + } + if (modifier::IsProtected(accessFlags)) { + if (obj != nullptr && !obj->IsInstanceOf(*callingClass) && !IsInSamePackage(*declaringClass, *callingClass)) { + return false; + } else if (declaringClass->IsAssignableFrom(*callingClass)) { + return true; + } + } + return IsInSamePackage(*declaringClass, *callingClass); +} + +extern "C" void MRT_ThrowNewException(const char *className, const char *msg); +static ALWAYS_INLINE inline bool CheckIsInstaceOf(const MClass &declaringClass, const MObject *mo) { + if (UNLIKELY(mo == nullptr)) { + MRT_ThrowNewException("java/lang/NullPointerException", "null receiver"); + return false; + } + if (mo->IsInstanceOf(declaringClass)) { + return true; + } + // Throw Exception + std::string declaringClassName, objectClassName; + declaringClass.GetTypeName(declaringClassName); + MClass *objectClass = mo->GetClass(); + objectClass->GetTypeName(objectClassName); + std::ostringstream msg; + msg << "Expected receiver of type " << declaringClassName << ", but got " << objectClassName; + MRT_ThrowNewException("java/lang/IllegalArgumentException", msg.str().c_str()); + return false; +} +} +} +#endif // MAPLEALL_MAPLERT_JAVA_ANDROID_MRT_INCLUDE_MRT_REFLECTION_H_ diff --git a/src/mrt/maplert/include/mrt_reflection_class.h b/src/mrt/maplert/include/mrt_reflection_class.h new file mode 100644 index 0000000000..4d48e21fe5 --- /dev/null +++ b/src/mrt/maplert/include/mrt_reflection_class.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_REFLECTION_ClASS_H +#define MRT_REFLECTION_ClASS_H +#include +#include + +#include "mclass.h" +namespace maplert { +#ifndef USE_ARM32_MACRO +#ifdef USE_32BIT_REF +constexpr uint32_t leftShift32Bit = 32; +#endif // ~USE_32BIT_REF +#endif // ~USE_ARM32_MACRO +#ifdef USE_32BIT_REF +constexpr uint32_t kLowBitOfItabLength = 0xffff; +constexpr uint32_t kHighBitOfItabLength = 0x7fffffff; +#else +constexpr uint64_t kLowBitOfItabLength = 0xffffffff; +constexpr uint64_t kHighBitOfItabLength = 0x7fffffffffffffff; +#endif + +#if PLATFORM_SDK_VERSION >= 27 +MObject *ReflectClassGetPrimitiveClass(const MClass &classObj, const MString *name); +#endif +// class.field for reflection_class_jni.cpp +MObject *ReflectClassGetField(const MClass &classObj, const MString *s); +MObject *ReflectClassGetFields(const MClass &classObj); +MObject *ReflectClassGetDeclaredField(const MClass &classObj, const MString *s); +MObject *ReflectClassGetDeclaredFields(const MClass &classObj); +MObject *ReflectClassGetDeclaredFieldsUnchecked(const MClass &classObj, bool publicOnly); + +// class.method & constructor for reflection_class_jni.cpp +MObject *ReflectClassGetMethod(const MClass &classObj, const MString *name, const MArray *arrayClass); +MObject *ReflectClassGetMethods(const MClass &classObj); +MObject *ReflectClassGetDeclaredMethod(const MClass &classObj, const MString *name, const MArray *arrayClass); +MObject *ReflectClassGetDeclaredMethods(const MClass &classObj); +MObject *ReflectClassGetDeclaredMethodsUnchecked(const MClass &classObj, bool publicOnly); +void ReflectClassGetPublicMethodsInternal(const MClass &classObj, MObject *listObject); +MObject *ReflectClassGetDeclaredConstructorsInternal(const MClass &classObj, bool publicOnly); +MObject *ReflectClassGetInstanceMethod(const MClass &classObj, const MString *name, const MArray *arrayClass); +MObject *ReflectClassFindInterfaceMethod(const MClass &classObj, const MString *name, const MArray *arrayClass); +MObject *ReflectClassGetDeclaredMethodInternal(const MClass &classObj, const MString *name, const MArray *arraycls); +MObject *ReflectClassGetDeclaredConstructor(const MClass &classObj, const MArray *arrayClass); +MObject *ReflectClassGetDeclaredConstructors(const MClass &classObj); +MObject *ReflectClassGetDeclaredConstructorInternal(const MClass &classObj, const MArray *arrayClass); +MObject *ReflectClassGetConstructor(const MClass &classObj, const MArray *arrayClass); +MObject *ReflectClassGetConstructors(const MClass &classObj); +void ReflectClassGetPublicFieldsRecursive(const MClass &classObj, MObject *listObject); +MObject *ReflectClassGetPublicFieldRecursive(const MClass &classObj, const MString *s); +MObject *ReflectClassGetPublicDeclaredFields(const MClass &classObj); +MObject *ReflectClassNewInstance(const MClass &classObj); +MObject *ReflectClassGetInterfacesInternal(const MClass &classObj); +MObject *ReflectClassGetSignatureAnnotation(const MClass &classObj); +MObject *ReflectClassGetEnclosingMethodNative(const MClass &classObj); +MObject *ReflectClassGetEnclosingConstructorNative(const MClass &classObj); +MObject *ReflectClassGetClassLoader(const MClass &classObj); +MObject *ReflectClassGetDeclaredAnnotation(const MClass &classObj, const MClass *annoClass); +MObject *ReflectClassGetClasses(const MClass &classObj); +MObject *ReflectClassGetEnclosingClass(const MClass &classObj); +MObject *ReflectClassGetInnerClassName(const MClass &classObj); +bool ReflectClassIsDeclaredAnnotationPresent(const MClass &classObj, const MObject *annoObj); +bool ReflectClassIsMemberClass(const MClass &classObj); +bool ReflectClassIsLocalClass(const MClass &classObj); +MObject *ReflectClassGetAnnotation(const MClass &klass, const MClass *annotationType); +} // namespace maplert +#endif diff --git a/src/mrt/maplert/include/mrt_reflection_constructor.h b/src/mrt/maplert/include/mrt_reflection_constructor.h new file mode 100644 index 0000000000..b94696d15f --- /dev/null +++ b/src/mrt/maplert/include/mrt_reflection_constructor.h @@ -0,0 +1,20 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_REFLECTION_CONSTRUCTOR_H +#define MRT_REFLECTION_CONSTRUCTOR_H +namespace maplert { +} // namespace maplert +#endif + diff --git a/src/mrt/maplert/include/mrt_reflection_executable.h b/src/mrt/maplert/include/mrt_reflection_executable.h new file mode 100644 index 0000000000..697e9abad6 --- /dev/null +++ b/src/mrt/maplert/include/mrt_reflection_executable.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEALL_MAPLERT_JAVA_ANDROID_MRT_INCLUDE_MRT_REFLECTION_EXECUTABLE_H_ +#define MAPLEALL_MAPLERT_JAVA_ANDROID_MRT_INCLUDE_MRT_REFLECTION_EXECUTABLE_H_ +#include "mrt_reflection_api.h" // some of the exported API defined here +#ifdef __cplusplus +namespace maplert { +} // namespace maplert +#endif + +#endif diff --git a/src/mrt/maplert/include/mrt_reflection_field.h b/src/mrt/maplert/include/mrt_reflection_field.h new file mode 100644 index 0000000000..3fa5087b98 --- /dev/null +++ b/src/mrt/maplert/include/mrt_reflection_field.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_REFLECTION_FIELD_H +#define MRT_REFLECTION_FIELD_H +#include "fieldmeta.h" +#include "mfield.h" +namespace maplert { +uint32_t ReflectFieldGetSize(const FieldMeta &fieldMeta); +uint32_t ReflectCompactFieldGetSize(const std::string &name); +// java/lang/reflect/Field native API implement +// NOTE: fieldObj is field object(MField), not FieldMeta +MObject *ReflectFieldGetDeclaredAnnotations(const MField &fieldObj); +MObject *ReflectFieldGetAnnotation(const MField &fieldObj, const MClass *classArg); +MObject *ReflectFieldGetNameInternal(const MField &fieldObj); +MObject *ReflectFieldGetSignatureAnnotation(const MField &fieldObj); +bool ReflectFieldIsAnnotationPresentNative(const MField &fieldObj, const MClass *classArg); +FieldMeta *ReflectFieldGetArtField(const MField &fieldObj); +uint8_t ReflectGetFieldNativeUint8(const MField &fieldObj, const MObject *obj); +int8_t ReflectGetFieldNativeInt8(const MField &fieldObj, const MObject *obj); +uint16_t ReflectGetFieldNativeUint16(const MField &fieldObj, const MObject *obj); +int16_t ReflectGetFieldNativeInt16(const MField &fieldObj, const MObject *obj); +int32_t ReflectGetFieldNativeInt32(const MField &fieldObj, const MObject *obj); +int64_t ReflectGetFieldNativeInt64(const MField &fieldObj, const MObject *obj); +float ReflectGetFieldNativeFloat(const MField &fieldObj, const MObject *obj); +double ReflectGetFieldNativeDouble(const MField &fieldObj, const MObject *obj); +MObject *ReflectGetFieldNativeObject(const MField &fieldObj, const MObject *obj); + +void ReflectSetFieldNativeUint8(const MField &fieldObj, MObject *obj, uint8_t value); +void ReflectSetFieldNativeInt8(const MField &fieldObj, MObject *obj, int8_t value); +void ReflectSetFieldNativeUint16(const MField &fieldObj, MObject *obj, uint16_t value); +void ReflectSetFieldNativeInt16(const MField &fieldObj, MObject *obj, int16_t value); +void ReflectSetFieldNativeInt32(const MField &fieldObj, MObject *obj, int32_t value); +void ReflectSetFieldNativeInt64(const MField &fieldObj, MObject *obj, int64_t value); +void ReflectSetFieldNativeFloat(const MField &fieldObj, MObject *obj, float value); +void ReflectSetFieldNativeDouble(const MField &fieldObj, MObject *obj, double value); +void ReflectSetFieldNativeObject(const MField &fieldObj, MObject *obj, const MObject *value); +} // namespace maplert +#endif \ No newline at end of file diff --git a/src/mrt/maplert/include/mrt_reflection_method.h b/src/mrt/maplert/include/mrt_reflection_method.h new file mode 100644 index 0000000000..5d57aea694 --- /dev/null +++ b/src/mrt/maplert/include/mrt_reflection_method.h @@ -0,0 +1,26 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_REFLECTION_METHOD_H +#define MRT_REFLECTION_METHOD_H +#include "mrt_annotation.h" +namespace maplert { +// declaring method for use in method reflection +MString *ReflectMethodGetName(const MMethod &methodObj); +MObject *ReflectMethodGetReturnType(const MMethod &methodObj); +MObject *ReflectMethodInvoke(const MMethod &methodObj, MObject *obj, const MArray *arg, uint8_t frames); +MObject *ReflectInvokeJavaMethodFromArrayArgsJobject(MObject*, const MMethod&, const MArray*, uint8_t); +void ReflectInvokeJavaMethodFromArrayArgsVoid(MObject *obj, const MMethod &method, const MArray *arg, uint8_t frames); +} // namespace maplert +#endif // MRT_REFLECTION_METHOD_H diff --git a/src/mrt/maplert/include/mrt_reflection_proxy.h b/src/mrt/maplert/include/mrt_reflection_proxy.h new file mode 100644 index 0000000000..fb183b70e3 --- /dev/null +++ b/src/mrt/maplert/include/mrt_reflection_proxy.h @@ -0,0 +1,21 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_REFLECTION_PROXY_H +#define MRT_REFLECTION_PROXY_H +#include "mclass.h" +#include "mrt_reflection_api.h" +namespace maplert { +} // namespace maplert +#endif diff --git a/src/mrt/maplert/include/mrt_string.h b/src/mrt/maplert/include/mrt_string.h new file mode 100644 index 0000000000..bfee14169f --- /dev/null +++ b/src/mrt/maplert/include/mrt_string.h @@ -0,0 +1,170 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef JAVA_MRT_STRING_H_ +#define JAVA_MRT_STRING_H_ + +#include +#include +#include +#include +#include +#include +#include "cinterface.h" +#include "address.h" +#include "mrt_string_api.h" +#include "collector/mpl_thread_pool.h" +#include "mstring.h" +#include "gc_callback.h" +#include "panic.h" + +namespace maplert { +// for GC root scanning. This is a C++ function, not extern "C" +// roots in the constant-string pool. +void VisitStringPool(const RefVisitor &visitor); +void CreateAppStringPool(); + +// Return the size (in bytes) of a given String object. +// It is a specialized function for GetObjectSize. +size_t ConstStringPoolSize(bool literal); +size_t ConstStringPoolNum(bool literal); +size_t ConstStringAppPoolNum(bool literal); +void DumpConstStringPool(std::ostream &os, bool literal); + +MString *NewStringUtfFromPoolForClassName(const MClass &classObj); +MArray *MStringToMutf8(const MString &stringObj, int32_t offset, int32_t length, int compress); +MArray *MStringToBytes(const MString &stringObj, int32_t offset, int32_t length, uint16_t maxValidChar, int compress); + +MString *StringNewStringFromString(const MString &stringObj); +MString *StringNewStringFromCharArray(int32_t offset, uint32_t charCount, const MArray &arrayObj); +MString *StringNewStringFromByteArray(const MArray &arrayObj, int32_t highByteT, int32_t offset, uint32_t byteLength); +MString *StringNewStringFromSubstring(const MString &stringObj, int32_t start, uint32_t length); +MString *JStringDoReplace(const MString &stringObj, uint16_t oldChar, uint16_t newChar); +MString *StringConcat(MString &stringObj1, MString &stringObj2); +int32_t StringFastIndexOf(const MString &stringObj, int32_t ch, int32_t start); +MString *NewStringFromUTF16(const std::string &str); + +#ifdef __OPENJDK__ +// new native functions for openjdk +int32_t StringNativeIndexOfP3(MString &subStrObj, MString &srcStrObj, int32_t fromIndex); +int32_t StringNativeIndexOfP5(MString &subStrObj, MArray &srcArray, + int32_t srcOffset, int32_t srcCount, int32_t fromIndex); +int32_t StringNativeLastIndexOfP3(MString &subStrObj, MString &srcStrObj, int32_t fromIndex); +int32_t StringNativeLastIndexOfP5(MString &subStrObj, MArray &srcArray, + int32_t srcOffset, int32_t srcCount, int32_t fromIndex); +MString *StringNewStringFromCodePoints(MArray &mArray, int32_t offset, int32_t count); +int32_t StringNativeCodePointAt(MString &strObj, int32_t index); +int32_t StringNativeCodePointBefore(MString &strObj, int32_t index); +int32_t StringNativeCodePointCount(MString &strObj, int32_t beginIndex, int32_t endIndex); +int32_t StringNativeOffsetByCodePoint(MString &strObj, int32_t index, int32_t codePointOffset); +#endif // __OPENJDK__ + +// if c-string is in 16bit coding, then use CStrToJStr, otherwise use NewStringUTF +MString *NewStringUTF(const char *kCStr, size_t cStrLen); + +// for normal MString internization, use GetOrInsertStringPool +MString *GetOrInsertStringPool(MString &stringObj); +// get/insert a MFile literal into const pool. +// used by mpl-linker to insert literal (also stored in static-fields) +MString *GetOrInsertLiteral(MString &literalObj); + +// only for RC +void RemoveStringFromPool(MString &stringObj); +size_t RemoveDeadStringFromPool(); + +// for gc +void StringPrepareConcurrentSweeping(); +size_t ConcurrentSweepDeadStrings(MplThreadPool *threadPool); + +#ifdef __cplusplus +extern "C" { +#endif + +class MStringRef { + public: +#ifdef USE_32BIT_REF + using RawType = uint32_t; +#else + using RawType = address_t; +#endif + + MStringRef() : ref(0) {} // default initialized to 0 as empty + RawType GetRef() const { + return ref; + } + RawType &GetRawRef() { + return ref; + } + void SetRef(const address_t addr) { + __MRT_ASSERT(addr <= std::numeric_limits::max(), "String address overflow for MStringRef!"); + ref = static_cast(addr); + } + void SetRef(const MString *str) { + SetRef(reinterpret_cast(str)); + } + void Clear() { + ref = 0; + } + bool IsEmpty() const { + return ref == 0; + } + private: + RawType ref; +}; + +jstring CStrToJStr(const char *ca, jint len); +// empty, hash and equal +struct MStringHashEqual { + // compare mstring with MFile literal + size_t operator()(const MString *a) const { + return a->GetHash(); + } + + size_t operator()(const MStringRef a) const { + return reinterpret_cast(a.GetRef())->GetHash(); + } + + bool operator()(const MString *a, const MStringRef b) const { + return a->Equals(*(reinterpret_cast(b.GetRef()))); + } + + bool operator()(const MStringRef a, const MStringRef b) const { + return reinterpret_cast(a.GetRef())->Equals(*(reinterpret_cast(b.GetRef()))); + } + + void Clear(MStringRef &ref) const { + ref.Clear(); + } + + bool IsEmpty(const MStringRef ref) const { + return ref.IsEmpty(); + } +}; + +struct MStringMapNotEqual { + // compare mstring with MFile literal + size_t operator()(const MString *a) const { + return a->GetHash(); + } + + bool operator()(const MString *a, const MString *b) const { + return !a->Equals(*b); + } +}; + +#ifdef __cplusplus +} +#endif // __cplusplus +} // namespace maplert +#endif // JAVA_MRT_STRING_H_ diff --git a/src/mrt/maplert/include/mrt_util.h b/src/mrt/maplert/include/mrt_util.h new file mode 100644 index 0000000000..21a22f7207 --- /dev/null +++ b/src/mrt/maplert/include/mrt_util.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef JAVA_MRT_UTIL_H_ +#define JAVA_MRT_UTIL_H_ +#include + +// provide "secure" version of common utility functions +#include "securec.h" + +#include "jni.h" + +#ifdef __cplusplus +namespace maplert { +std::string GetClassNametoDescriptor(const std::string &className); +extern "C" { +#endif +#define OPENSSL_VERSION_TEXT "OpenSSL 1.0.2 (compatible; BoringSSL)" +char *MRT_FuncnameToPrototypeNames(char *funcName, int argNum, char **typeNames); +#ifdef __cplusplus +} // extern "C" +} // namespace maplert +#endif // __cplusplus +#endif // JAVA_MRT_UTIL_H_ diff --git a/src/mrt/maplert/include/mrt_well_known.h b/src/mrt/maplert/include/mrt_well_known.h new file mode 100644 index 0000000000..01dfa260ca --- /dev/null +++ b/src/mrt/maplert/include/mrt_well_known.h @@ -0,0 +1,865 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_WELL_KNOWN_H_ +#define MRT_WELL_KNOWN_H_ +#include + +#include "mclass.h" +namespace maplert { +class WellKnown { + public: + // public fields + // array class interface always Ljava/lang/Cloneable; Ljava/io/Serializable; + static constexpr uint8_t kArrayInterfaceNum = 2; + static std::vector arrayInterface; + static size_t kReferenceReferentOffset; // java/lang/Reference::Referent + static size_t kReferenceQueueOffset; // java/lang/Reference::Queue + // java/lang/Reference::Pendingnext for libcore or java/lang/Reference::discovered for OpenJDK + static size_t kReferencePendingnextOffset; + // java/lang/ref/FinalizerReference::Zombie for libcore only + static size_t kFinalizereferenceZombieOffset; + + // public methods + static inline MClass *GetMClassZ() { + return primitiveClassZ; + } + + static inline MClass *GetMClassB() { + return primitiveClassB; + } + + static inline MClass *GetMClassS() { + return primitiveClassS; + } + + static inline MClass *GetMClassC() { + return primitiveClassC; + } + + static inline MClass *GetMClassI() { + return primitiveClassI; + } + + static inline MClass *GetMClassJ() { + return primitiveClassJ; + } + + static inline MClass *GetMClassF() { + return primitiveClassF; + } + + static inline MClass *GetMClassD() { + return primitiveClassD; + } + + static inline MClass *GetMClassV() { + return primitiveClassV; + } + + static inline MClass *GetMClassAZ() { + return primitiveClassAZ; + } + + static inline MClass *GetMClassAB() { + return primitiveClassAB; + } + + static inline MClass *GetMClassAS() { + return primitiveClassAS; + } + + static inline MClass *GetMClassAC() { + return primitiveClassAC; + } + + static inline MClass *GetMClassAI() { + return primitiveClassAI; + } + + static inline MClass *GetMClassAJ() { + return primitiveClassAJ; + } + + static inline MClass *GetMClassAF() { + return primitiveClassAF; + } + + static inline MClass *GetMClassAD() { + return primitiveClassAD; + } + + static inline MClass *GetMClassAAZ() { + return primitiveClassAAZ; + } + + static inline MClass *GetMClassAAB() { + return primitiveClassAAB; + } + + static inline MClass *GetMClassAAS() { + return primitiveClassAAS; + } + + static inline MClass *GetMClassAAC() { + return primitiveClassAAC; + } + + static inline MClass *GetMClassAAI() { + return primitiveClassAAI; + } + + static inline MClass *GetMClassAAJ() { + return primitiveClassAAJ; + } + + static inline MClass *GetMClassAAF() { + return primitiveClassAAF; + } + + static inline MClass *GetMClassAAD() { + return primitiveClassAAD; + } + + static inline MClass *GetMClassVoid() { + return Ljava_2Flang_2FVoid_3B; + } + + static inline MClass *GetMClassBoolean() { + return Ljava_2Flang_2FBoolean_3B; + } + + static inline MClass *GetMClassByte() { + return Ljava_2Flang_2FByte_3B; + } + + static inline MClass *GetMClassCharacter() { + return Ljava_2Flang_2FCharacter_3B; + } + + static inline MClass *GetMClassShort() { + return Ljava_2Flang_2FShort_3B; + } + + static inline MClass *GetMClassInteger() { + return Ljava_2Flang_2FInteger_3B; + } + + static inline MClass *GetMClassLong() { + return Ljava_2Flang_2FLong_3B; + } + + static inline MClass *GetMClassFloat() { + return Ljava_2Flang_2FFloat_3B; + } + + static inline MClass *GetMClassDouble() { + return Ljava_2Flang_2FDouble_3B; + } + + static inline MClass *GetMClassNumber() { + return Ljava_2Flang_2FNumber_3B; + } + + static inline MClass *GetMClassObject() { + return Ljava_2Flang_2FObject_3B; + } + + static inline MClass *GetMClassClass() { + return Ljava_2Flang_2FClass_3B; + } + + static inline MClass *GetMClassField() { + return Ljava_2Flang_2Freflect_2FField_3B; + } + + static inline MClass *GetMClassConstructor() { + return Ljava_2Flang_2Freflect_2FConstructor_3B; + } + + static inline MClass *GetMClassMethod() { + return Ljava_2Flang_2Freflect_2FMethod_3B; + } + + static inline MClass *GetMClassProxy() { + return Ljava_2Flang_2Freflect_2FProxy_3B; + } + + static inline MClass *GetMClassParameter() { + return Ljava_2Flang_2Freflect_2FParameter_3B; + } + + static inline MClass *GetMClassGenericSignatureParser() { + return Llibcore_2Freflect_2FGenericSignatureParser_3B; + } + + static inline MClass *GetMClassCloneable() { + return Ljava_2Flang_2FCloneable_3B; + } + + static inline MClass *GetMClassSerializable() { + return Ljava_2Fio_2FSerializable_3B; + } + + static inline MClass *GetMClassString() { + return Ljava_2Flang_2FString_3B; + } + + static inline MClass *GetMClassStringFactory() { + return Ljava_2Flang_2FStringFactory_3B; + } + + static inline MClass *GetMClassError() { + return Ljava_2Flang_2FError_3B; + } + + static inline MClass *GetMClassThrowable() { + return Ljava_2Flang_2FThrowable_3B; + } + + static inline MClass *GetMClassArithmeticException() { + return Ljava_2Flang_2FArithmeticException_3B; + } + + static inline MClass *GetMClassInterruptedException() { + return Ljava_2Flang_2FInterruptedException_3B; + } + + static inline MClass *GetMClassClassCastException() { + return Ljava_2Flang_2FClassCastException_3B; + } + + static inline MClass *GetMClassUnsatisfiedLinkError() { + return Ljava_2Flang_2FUnsatisfiedLinkError_3B; + } + + static inline MClass *GetMClassStringIndexOutOfBoundsException() { + return Ljava_2Flang_2FStringIndexOutOfBoundsException_3B; + } + + static inline MClass *GetMClassNoClassDefFoundError() { + return Ljava_2Flang_2FNoClassDefFoundError_3B; + } + + static inline MClass *GetMClassNoSuchMethodError() { + return Ljava_2Flang_2FNoSuchMethodError_3B; + } + + static inline MClass *GetMClassNoSuchFieldError() { + return Ljava_2Flang_2FNoSuchFieldError_3B; + } + + static inline MClass *GetMClassVerifyError() { + return Ljava_2Flang_2FVerifyError_3B; + } + + static inline MClass *GetMClassExceptionInInitializerError() { + return Ljava_2Flang_2FExceptionInInitializerError_3B; + } + + static inline MClass *GetMClassRuntimeException() { + return Ljava_2Flang_2FRuntimeException_3B; + } + + static inline MClass *GetMClassSecurityException() { + return Ljava_2Flang_2FSecurityException_3B; + } + + static inline MClass *GetMClassUndeclaredThrowableException() { + return Ljava_2Flang_2Freflect_2FUndeclaredThrowableException_3B; + } + + static inline MClass *GetMClassArrayStoreException() { + return Ljava_2Flang_2FArrayStoreException_3B; + } + + static inline MClass *GetMClassArrayIndexOutOfBoundsException() { + return Ljava_2Flang_2FArrayIndexOutOfBoundsException_3B; + } + + static inline MClass *GetMClassNullPointerException() { + return Ljava_2Flang_2FNullPointerException_3B; + } + + static inline MClass *GetMClassEnum() { + return Ljava_2Flang_2FEnum_3B; + } + + static inline MClass *GetMClassAnnotation() { + return Ljava_2Flang_2Fannotation_2FAnnotation_3B; + } + + static inline MClass *GetMClassAnnotationMember() { + return Llibcore_2Freflect_2FAnnotationMember_3B; + } + + static inline MClass *GetMClassAnnotationFactory() { + return Llibcore_2Freflect_2FAnnotationFactory_3B; + } + + static inline MClass *GetMClassDelegateLastClassLoader() { + return Ldalvik_2Fsystem_2FDelegateLastClassLoader_3B; + } + + static inline MClass *GetMClassPathClassLoader() { + return Ldalvik_2Fsystem_2FPathClassLoader_3B; + } + + static inline MClass *GetMClassDexClassLoader() { + return Ldalvik_2Fsystem_2FDexClassLoader_3B; + } + + static inline MClass *GetMClassInMemoryDexClassLoader() { + return Ldalvik_2Fsystem_2FInMemoryDexClassLoader_3B; + } + + static inline MClass *GetMClassMethodType() { + return Ljava_2Flang_2Finvoke_2FMethodType_3B; + } + + static inline MClass *GetMClassMethodHandle() { + return Ljava_2Flang_2Finvoke_2FMethodHandle_3B; + } + + static inline size_t GetMFieldMethodHandleDataArrayOffset() { + return Ljava_2Flang_2FMethodHandle_3B_dataArray_offset; + } + + static inline size_t GetMFieldMethodHandleMetaArrayOffset() { + return Ljava_2Flang_2FMethodHandle_3B_metaArray_offset; + } + + static inline size_t GetMFieldMethodHandleTypeArrayOffset() { + return Ljava_2Flang_2FMethodHandle_3B_typeArray_offset; + } + + static inline size_t GetMFieldMethodHandleOpArrayOffset() { + return Ljava_2Flang_2FMethodHandle_3B_opArray_offset; + } + + static inline size_t GetMFieldMethodHandleIndexOffset() { + return Ljava_2Flang_2FMethodHandle_3B_index_offset; + } + + static inline size_t GetMFieldBindToDataReceiverOffset() { + return Ljava_2Flang_2FBindToData_3B_receiver_offset; + } + + static inline size_t GetMFieldDropArgumentsDataNumDroppedOffset() { + return Ljava_2Flang_2FDropArgumentsData_3B_numDropped_offset; + } + + static inline size_t GetMFieldDropArgumentsDataStartPosOffset() { + return Ljava_2Flang_2FDropArgumentsData_3B_startPos_offset; + } + + static inline size_t GetMFieldFilterReturnValueDataTargetOffset() { + return Ljava_2Flang_2FFilterReturnValueData_3B_target_offset; + } + + static inline size_t GetMFieldFilterReturnValueDataFilterOffset() { + return Ljava_2Flang_2FFilterReturnValueData_3B_filter_offset; + } + + static inline size_t GetMFieldPermuteArgumentsDataTargetOffset() { + return Ljava_2Flang_2FPermuteArgumentsData_3B_target_offset; + } + + static inline size_t GetMFieldPermuteArgumentsDataReorderOffset() { + return Ljava_2Flang_2FPermuteArgumentsData_3B_reorder_offset; + } + + static inline MClass *GetMClassEmulatedStackFrame() { + return Ldalvik_2Fsystem_2FEmulatedStackFrame_3B; + } + + static inline size_t GetMFieldMethodHandleArtFieldOrMethodOffset() { + return Ljava_2Flang_2FMethodHandle_3B_artFieldOrMethod_offset; + } + + static inline size_t GetMFieldMethodHandleHandleKindOffset() { + return Ljava_2Flang_2FMethodHandle_3B_handleKind_offset; + } + + static inline size_t GetMFieldMethodHandleNominalTypeOffset() { + return Ljava_2Flang_2FMethodHandle_3B_nominalType_offset; + } + + static inline size_t GetMFieldMethodHandleTypeOffset() { + return Ljava_2Flang_2FMethodHandle_3B_type_offset; + } + + static inline size_t GetMFieldEmStackFrameCallsiteOffset() { + return Ldalvik_2Fsystem_2FEmulatedStackFrame_3B_callsiteType_offset; + } + + static inline size_t GetMFieldEmStackFrameReferencesOffset() { + return Ldalvik_2Fsystem_2FEmulatedStackFrame_3B_references_offset; + } + + static inline size_t GetMFieldEmStackFrameStackFrameOffset() { + return Ldalvik_2Fsystem_2FEmulatedStackFrame_3B_stackFrame_offset; + } + + static inline size_t GetMFieldEmStackFrameTypeOffset() { + return Ldalvik_2Fsystem_2FEmulatedStackFrame_3B_type_offset; + } + +#ifdef __OPENJDK__ + static inline MClass *GetMClassHashMap() { + return Ljava_2Futil_2FHashMap_3B; + } + + static inline MClass *GetMClassAnnotationParser() { + return Lsun_2Freflect_2Fannotation_2FAnnotationParser_3B; + } +#endif // __OPENJDK__ + + static inline MClass *GetMClassIntegerCache() { + return Ljava_2Flang_2FInteger_24IntegerCache_3B; + } + + static inline MClass *GetMClassByteCache() { + return Ljava_2Flang_2FByte_24ByteCache_3B; + } + + static inline MClass *GetMClassShortCache() { + return Ljava_2Flang_2FShort_24ShortCache_3B; + } + + static inline MClass *GetMClassCharacterCache() { + return Ljava_2Flang_2FCharacter_24CharacterCache_3B; + } + + static inline MClass *GetMClassLongCache() { + return Ljava_2Flang_2FLong_24LongCache_3B; + } + + static inline MClass *GetMClassReference() { + return Ljava_2Flang_2Fref_2FReference_3B; + } + + static inline MClass *GetMClassFinalizerReference() { + return Ljava_2Flang_2Fref_2FFinalizerReference_3B; + } + + static inline MClass *GetMClassAObject() { + return ALjava_2Flang_2FObject_3B; + } + + static inline MClass *GetMClassAClass() { + return ALjava_2Flang_2FClass_3B; + } + + static inline MClass *GetMClassAField() { + return ALjava_2Flang_2Freflect_2FField_3B; + } + + static inline MClass *GetMClassAString() { + return ALjava_2Flang_2FString_3B; + } + + static inline MClass *GetMClassABoolean() { + return ALjava_2Flang_2FBoolean_3B; + } + + static inline MClass *GetMClassAByte() { + return ALjava_2Flang_2FByte_3B; + } + + static inline MClass *GetMClassACharacter() { + return ALjava_2Flang_2FCharacter_3B; + } + + static inline MClass *GetMClassAShort() { + return ALjava_2Flang_2FShort_3B; + } + + static inline MClass *GetMClassAInteger() { + return ALjava_2Flang_2FInteger_3B; + } + + static inline MClass *GetMClassALong() { + return ALjava_2Flang_2FLong_3B; + } + + static inline MClass *GetMClassAFloat() { + return ALjava_2Flang_2FFloat_3B; + } + + static inline MClass *GetMClassADouble() { + return ALjava_2Flang_2FDouble_3B; + } + + static inline MClass *GetMClassAMethod() { + return ALjava_2Flang_2Freflect_2FMethod_3B; + } + + static inline MClass *GetMClassAAnnotation() { + return ALjava_2Flang_2Fannotation_2FAnnotation_3B; + } + + static inline MClass *GetMClassAConstructor() { + return ALjava_2Flang_2Freflect_2FConstructor_3B; + } + + static inline MClass *GetMClassAParameter() { + return ALjava_2Flang_2Freflect_2FParameter_3B; + } + + static inline MClass *GetMClassAAAnnotation() { + return AALjava_2Flang_2Fannotation_2FAnnotation_3B; + } + + static inline MClass *GetPrimitiveArrayClass(maple::Primitive::Type pType) { + return primitiveArrayClass[pType]; + } + + static inline FieldMeta *GetFieldMetaIntegerCache() { + return Ljava_2Flang_2FInteger_24IntegerCache_3B_cache; + } + + static inline FieldMeta *GetFieldMetaIntegerCacheLow() { + return Ljava_2Flang_2FInteger_24IntegerCache_3B_low; + } + + static inline FieldMeta *GetFieldMetaIntegerCacheHigh() { + return Ljava_2Flang_2FInteger_24IntegerCache_3B_high; + } + + static inline FieldMeta *GetFieldMetaBooleanTrue() { + return Ljava_2Flang_2FBoolean_3B_TRUE; + } + + static inline FieldMeta *GetFieldMetaBooleanFalse() { + return Ljava_2Flang_2FBoolean_3B_FALSE; + } + + static inline FieldMeta *GetFieldMetaByteCache() { + return Ljava_2Flang_2FByte_24ByteCache_3B_cache; + } + + static inline FieldMeta *GetFieldMetaShortCache() { + return Ljava_2Flang_2FShort_24ShortCache_3B_cache; + } + + static inline FieldMeta *GetFieldMetaCharacterCache() { + return Ljava_2Flang_2FCharacter_24CharacterCache_3B_cache; + } + + static inline FieldMeta *GetFieldMetaLongCache() { + return Ljava_2Flang_2FLong_24LongCache_3B_cache; + } + + static inline uintptr_t GetMMethodClassLoaderLoadClassAddr() { + return Ljava_2Flang_2FClassLoader_3B_LoadClass_Addr; + } + + static inline uintptr_t GetMMethodAnnotationMemberInitAddr() { + return Llibcore_2Freflect_2FAnnotationMember_3B_7C_3Cinit_Addr; + } + + static inline uintptr_t GetMMethodAnnotationFactoryCreateAnnotationAddr() { + return Llibcore_2Freflect_2FAnnotationFactory_3B_7CcreateAnnotation_Addr; + } + + static inline uintptr_t GetMMethodBooleanValueOfAddr() { + return Ljava_2Flang_2FBoolean_3B_ValueOf_Addr; + } + + static inline uintptr_t GetMMethodByteValueOfAddr() { + return Ljava_2Flang_2FByte_3B_ValueOf_Addr; + } + + static inline uintptr_t GetMMethodCharacterValueOfAddr() { + return Ljava_2Flang_2FCharacter_3B_ValueOf_Addr; + } + + static inline uintptr_t GetMMethodShortValueOfAddr() { + return Ljava_2Flang_2FShort_3B_ValueOf_Addr; + } + + static inline uintptr_t GetMMethodIntegerValueOfAddr() { + return Ljava_2Flang_2FInteger_3B_ValueOf_Addr; + } + + static inline uintptr_t GetMMethodLongValueOfAddr() { + return Ljava_2Flang_2FLong_3B_ValueOf_Addr; + } + + static inline uintptr_t GetMMethodFloatValueOfAddr() { + return Ljava_2Flang_2FFloat_3B_ValueOf_Addr; + } + + static inline uintptr_t GetMMethodDoubleValueOfAddr() { + return Ljava_2Flang_2FDouble_3B_ValueOf_Addr; + } + + static inline size_t GetMFieldBooleanValueOffset() { + return Ljava_2Flang_2FBoolean_3B_value_offset; + } + + static inline size_t GetMFieldByteValueOffset() { + return Ljava_2Flang_2FByte_3B_value_offset; + } + + static inline size_t GetMFieldCharacterValueOffset() { + return Ljava_2Flang_2FCharacter_3B_value_offset; + } + + static inline size_t GetMFieldShortValueOffset() { + return Ljava_2Flang_2FShort_3B_value_offset; + } + + static inline size_t GetMFieldIntegerValueOffset() { + return Ljava_2Flang_2FInteger_3B_value_offset; + } + + static inline size_t GetMFieldLongValueOffset() { + return Ljava_2Flang_2FLong_3B_value_offset; + } + + static inline size_t GetMFieldFloatValueOffset() { + return Ljava_2Flang_2FFloat_3B_value_offset; + } + + static inline size_t GetMFieldDoubleValueOffset() { + return Ljava_2Flang_2FDouble_3B_value_offset; + } + + static inline size_t GetMFieldMethodHandlePTypesOffset() { + return Ljava_2Flang_2FMethodType_3B_ptypes_offset; + } + + static inline size_t GetMFieldMethodHandlepRTypeOffset() { + return Ljava_2Flang_2FMethodType_3B_rtype_offset; + } + + static void InitCache(); + static void InitArrayInterfaceVector(); + static void InitCacheClasses(); + static void InitCacheMethodHandleClasses(); + static void InitCachePrimitiveBoxClass(); + static void InitCacheMethodAddrs(); + static void InitCacheArrayClass(); + static void InitCacheFieldOffsets(); + static void InitCacheFieldMethodHandleOffsets(); + static void InitCacheExceptionClass(); + static void InitCacheFieldMetas(); + static void InitCacheClass(MClass *&cls, const char *className); + static void InitCacheMethodAddr(uintptr_t &methodAddr, const MClass &cls, + const char *methodName, const char *signatureName); + static void InitCacheFieldOffset(size_t &fieldOffset, const MClass &cls, const char *fieldName); + static void InitCacheFieldMeta(FieldMeta *&fieldMeta, const MClass &cls, const char *fieldName); + static MClass *GetCacheArrayClass(const MClass &componentClass); + static MClass *GetWellKnowClassWithFlag(uint8_t classFlag, const MClass &caller, const char *className); + static MethodMeta *GetStringFactoryConstructor(const MethodMeta &stringConstructor); + + private: + static constexpr uint8_t kMaxPrimitiveSize = 9; + static constexpr uint8_t kMaxFrameworksSize = 7; + static constexpr uint32_t kCacheArrayClassSize = 16; + static uint32_t currentCacheArrayClassIndex; + + static MClass *primitiveClassZ; + static MClass *primitiveClassB; + static MClass *primitiveClassS; + static MClass *primitiveClassC; + static MClass *primitiveClassI; + static MClass *primitiveClassJ; + static MClass *primitiveClassF; + static MClass *primitiveClassD; + static MClass *primitiveClassV; + + static MClass *primitiveClassAZ; + static MClass *primitiveClassAB; + static MClass *primitiveClassAS; + static MClass *primitiveClassAC; + static MClass *primitiveClassAI; + static MClass *primitiveClassAJ; + static MClass *primitiveClassAF; + static MClass *primitiveClassAD; + + static MClass *primitiveClassAAZ; + static MClass *primitiveClassAAB; + static MClass *primitiveClassAAS; + static MClass *primitiveClassAAC; + static MClass *primitiveClassAAI; + static MClass *primitiveClassAAJ; + static MClass *primitiveClassAAF; + static MClass *primitiveClassAAD; + + static MClass *Ljava_2Flang_2FVoid_3B; + static MClass *Ljava_2Flang_2FBoolean_3B; + static MClass *Ljava_2Flang_2FByte_3B; + static MClass *Ljava_2Flang_2FCharacter_3B; + static MClass *Ljava_2Flang_2FShort_3B; + static MClass *Ljava_2Flang_2FInteger_3B; + static MClass *Ljava_2Flang_2FLong_3B; + static MClass *Ljava_2Flang_2FFloat_3B; + static MClass *Ljava_2Flang_2FDouble_3B; + static MClass *Ljava_2Flang_2FNumber_3B; + + static MClass *Ljava_2Flang_2FObject_3B; + static MClass *Ljava_2Flang_2FClass_3B; + static MClass *Ljava_2Flang_2FClassLoader_3B; + static MClass *Ljava_2Flang_2Freflect_2FField_3B; + static MClass *Ljava_2Flang_2Freflect_2FConstructor_3B; + static MClass *Ljava_2Flang_2Freflect_2FMethod_3B; + static MClass *Ljava_2Flang_2Freflect_2FProxy_3B; + static MClass *Ljava_2Flang_2Freflect_2FParameter_3B; + static MClass *Llibcore_2Freflect_2FGenericSignatureParser_3B; + static MClass *Ljava_2Flang_2Fref_2FReference_3B; + static MClass *Ljava_2Flang_2Fref_2FFinalizerReference_3B; + + static MClass *Ljava_2Flang_2FCloneable_3B; + static MClass *Ljava_2Fio_2FSerializable_3B; + + static MClass *Ljava_2Flang_2FString_3B; + static MClass *Ljava_2Flang_2FStringFactory_3B; + + static MClass *Ljava_2Flang_2FError_3B; + static MClass *Ljava_2Flang_2FThrowable_3B; + static MClass *Ljava_2Flang_2FArithmeticException_3B; + static MClass *Ljava_2Flang_2FInterruptedException_3B; + static MClass *Ljava_2Flang_2FClassCastException_3B; + static MClass *Ljava_2Flang_2FUnsatisfiedLinkError_3B; + static MClass *Ljava_2Flang_2FStringIndexOutOfBoundsException_3B; + static MClass *Ljava_2Flang_2FNoClassDefFoundError_3B; + static MClass *Ljava_2Flang_2FNoSuchMethodError_3B; + static MClass *Ljava_2Flang_2FNoSuchFieldError_3B; + static MClass *Ljava_2Flang_2FVerifyError_3B; + static MClass *Ljava_2Flang_2FExceptionInInitializerError_3B; + static MClass *Ljava_2Flang_2FRuntimeException_3B; + static MClass *Ljava_2Flang_2FSecurityException_3B; + static MClass *Ljava_2Flang_2Freflect_2FUndeclaredThrowableException_3B; + static MClass *Ljava_2Flang_2FArrayStoreException_3B; + static MClass *Ljava_2Flang_2FArrayIndexOutOfBoundsException_3B; + static MClass *Ljava_2Flang_2FNullPointerException_3B; + + static MClass *Ljava_2Flang_2FEnum_3B; + static MClass *Ljava_2Flang_2Fannotation_2FAnnotation_3B; + static MClass *Llibcore_2Freflect_2FAnnotationMember_3B; + static MClass *Llibcore_2Freflect_2FAnnotationFactory_3B; + static MClass *Ldalvik_2Fsystem_2FDelegateLastClassLoader_3B; + static MClass *Ldalvik_2Fsystem_2FPathClassLoader_3B; + static MClass *Ldalvik_2Fsystem_2FDexClassLoader_3B; + static MClass *Ldalvik_2Fsystem_2FInMemoryDexClassLoader_3B; + + static MClass *Ljava_2Flang_2Finvoke_2FMethodType_3B; + static MClass *Ljava_2Flang_2Finvoke_2FMethodHandle_3B; + +#ifdef __OPENJDK__ + static MClass *Ljava_2Futil_2FHashMap_3B; + static MClass *Lsun_2Freflect_2Fannotation_2FAnnotationParser_3B; + +#endif // __OPENJDK__ + + static MClass *Ljava_2Flang_2FInteger_24IntegerCache_3B; + static MClass *Ljava_2Flang_2FByte_24ByteCache_3B; + static MClass *Ljava_2Flang_2FShort_24ShortCache_3B; + static MClass *Ljava_2Flang_2FCharacter_24CharacterCache_3B; + static MClass *Ljava_2Flang_2FLong_24LongCache_3B; + + static MClass *Ljava_2Flang_2Finvoke_2FInvokeData_24BindToData_3B; + static MClass *Ljava_2Flang_2Finvoke_2FInvokeData_24DropArgumentsData_3B; + static MClass *Ljava_2Flang_2Finvoke_2FInvokeData_24FilterReturnValueData_3B; + static MClass *Ljava_2Flang_2Finvoke_2FInvokeData_24PermuteArgumentsData_3B; + static size_t Ljava_2Flang_2FMethodHandle_3B_dataArray_offset; + static size_t Ljava_2Flang_2FMethodHandle_3B_metaArray_offset; + static size_t Ljava_2Flang_2FMethodHandle_3B_typeArray_offset; + static size_t Ljava_2Flang_2FMethodHandle_3B_opArray_offset; + static size_t Ljava_2Flang_2FMethodHandle_3B_index_offset; + static size_t Ljava_2Flang_2FBindToData_3B_receiver_offset; + static size_t Ljava_2Flang_2FDropArgumentsData_3B_numDropped_offset; + static size_t Ljava_2Flang_2FDropArgumentsData_3B_startPos_offset; + static size_t Ljava_2Flang_2FFilterReturnValueData_3B_target_offset; + static size_t Ljava_2Flang_2FFilterReturnValueData_3B_filter_offset; + static size_t Ljava_2Flang_2FPermuteArgumentsData_3B_target_offset; + static size_t Ljava_2Flang_2FPermuteArgumentsData_3B_reorder_offset; + + static MClass *Ldalvik_2Fsystem_2FEmulatedStackFrame_3B; + static size_t Ljava_2Flang_2FMethodHandle_3B_artFieldOrMethod_offset; + static size_t Ljava_2Flang_2FMethodHandle_3B_handleKind_offset; + static size_t Ljava_2Flang_2FMethodHandle_3B_nominalType_offset; + static size_t Ljava_2Flang_2FMethodHandle_3B_type_offset; + static size_t Ldalvik_2Fsystem_2FEmulatedStackFrame_3B_callsiteType_offset; + static size_t Ldalvik_2Fsystem_2FEmulatedStackFrame_3B_references_offset; + static size_t Ldalvik_2Fsystem_2FEmulatedStackFrame_3B_stackFrame_offset; + static size_t Ldalvik_2Fsystem_2FEmulatedStackFrame_3B_type_offset; + + static MClass *ALjava_2Flang_2FObject_3B; + static MClass *ALjava_2Flang_2FClass_3B; + static MClass *ALjava_2Flang_2Freflect_2FField_3B; + static MClass *ALjava_2Flang_2Freflect_2FMethod_3B; + static MClass *ALjava_2Flang_2Fannotation_2FAnnotation_3B; + static MClass *ALjava_2Flang_2Freflect_2FConstructor_3B; + static MClass *ALjava_2Flang_2Freflect_2FParameter_3B; + static MClass *ALjava_2Flang_2FString_3B; + static MClass *ALjava_2Flang_2FBoolean_3B; + static MClass *ALjava_2Flang_2FByte_3B; + static MClass *ALjava_2Flang_2FCharacter_3B; + static MClass *ALjava_2Flang_2FShort_3B; + static MClass *ALjava_2Flang_2FInteger_3B; + static MClass *ALjava_2Flang_2FLong_3B; + static MClass *ALjava_2Flang_2FFloat_3B; + static MClass *ALjava_2Flang_2FDouble_3B; + + static MClass *AALjava_2Flang_2Fannotation_2FAnnotation_3B; + + static MClass *primitiveArrayClass[]; + static MClass *cacheArrayClasses[]; + static MClass *arrayFrameWorksClasses[]; + + static FieldMeta *Ljava_2Flang_2FBoolean_3B_TRUE; + static FieldMeta *Ljava_2Flang_2FBoolean_3B_FALSE; + static FieldMeta *Ljava_2Flang_2FByte_24ByteCache_3B_cache; + static FieldMeta *Ljava_2Flang_2FShort_24ShortCache_3B_cache; + static FieldMeta *Ljava_2Flang_2FCharacter_24CharacterCache_3B_cache; + static FieldMeta *Ljava_2Flang_2FLong_24LongCache_3B_cache; + static FieldMeta *Ljava_2Flang_2FInteger_24IntegerCache_3B_cache; + static FieldMeta *Ljava_2Flang_2FInteger_24IntegerCache_3B_low; + static FieldMeta *Ljava_2Flang_2FInteger_24IntegerCache_3B_high; + + // method loadClass address + static uintptr_t Ljava_2Flang_2FClassLoader_3B_LoadClass_Addr; + + // Annotation method address + static uintptr_t Llibcore_2Freflect_2FAnnotationMember_3B_7C_3Cinit_Addr; + static uintptr_t Llibcore_2Freflect_2FAnnotationFactory_3B_7CcreateAnnotation_Addr; + + // method ValueOf address + static uintptr_t Ljava_2Flang_2FBoolean_3B_ValueOf_Addr; + static uintptr_t Ljava_2Flang_2FByte_3B_ValueOf_Addr; + static uintptr_t Ljava_2Flang_2FCharacter_3B_ValueOf_Addr; + static uintptr_t Ljava_2Flang_2FShort_3B_ValueOf_Addr; + static uintptr_t Ljava_2Flang_2FInteger_3B_ValueOf_Addr; + static uintptr_t Ljava_2Flang_2FLong_3B_ValueOf_Addr; + static uintptr_t Ljava_2Flang_2FFloat_3B_ValueOf_Addr; + static uintptr_t Ljava_2Flang_2FDouble_3B_ValueOf_Addr; + + // field value offset + static size_t Ljava_2Flang_2FBoolean_3B_value_offset; + static size_t Ljava_2Flang_2FByte_3B_value_offset; + static size_t Ljava_2Flang_2FCharacter_3B_value_offset; + static size_t Ljava_2Flang_2FShort_3B_value_offset; + static size_t Ljava_2Flang_2FInteger_3B_value_offset; + static size_t Ljava_2Flang_2FLong_3B_value_offset; + static size_t Ljava_2Flang_2FFloat_3B_value_offset; + static size_t Ljava_2Flang_2FDouble_3B_value_offset; + static size_t Ljava_2Flang_2FMethodType_3B_ptypes_offset; + static size_t Ljava_2Flang_2FMethodType_3B_rtype_offset; +}; +} // namespace maplert +#endif // MRT_WELL_KNOWN_H_ diff --git a/src/mrt/maplert/include/mstring.h b/src/mrt/maplert/include/mstring.h new file mode 100644 index 0000000000..81c19b8fef --- /dev/null +++ b/src/mrt/maplert/include/mstring.h @@ -0,0 +1,74 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_MAPLERT_INCLUDE_MSTRING_H_ +#define MRT_MAPLERT_INCLUDE_MSTRING_H_ +#include "mobject.h" +#include +namespace maplert { +class PACKED(4) MString : public MObject { + public: + uint32_t GetCountValue() const; + inline void SetCountValue(uint32_t value); + inline uint32_t GetLength() const; + uint32_t GetHash() const; + inline void SetHash(uint32_t hash); + uint8_t *GetContentsPtr() const; + bool IsCompress() const; + bool IsLiteral() const; + inline uint16_t CharAt(uint32_t index) const; + uint32_t GetStringSize() const; + inline bool Cmp(const std::string &src) const; + std::string GetChars() const; + MString *Intern(); + bool Equals(const MString &src) const; + inline void SetStringClass(); + static MString *InternUtf8(const std::string &str); + static MString *InternUtf16(const std::string &str); + static MString *InternUtf(const std::string &str); + static uint32_t GetStringBaseSize(); + static uint32_t GetCountOffset(); + static bool GetCompressFlag(); + template + static inline bool IsCompressChars(const Type *chars, uint32_t length); + static bool IsCompressChar(const uint16_t c); + static bool IsCompressedFromCount(const uint32_t countValue); + static inline bool IsCompressCharsExcept(const uint16_t *chars, uint32_t length, uint16_t except); + template + static inline MString *NewStringObject(uint32_t stringLen, const Func &fillContents, bool isJNI = false); + template + static inline MString *NewStringObject(const srcType *src, uint32_t stringLen, bool isJNI = false); + template + static inline MString *NewStringObject(const srcType *src, uint32_t stringLen, bool isJNI = false); + static inline MString *NewStringObject(const uint16_t *src, uint32_t stringLen, bool isJNI = false); + static MString *NewEmptyStringObject(); + static MString *NewConstEmptyStringObject(); + static MString *NewStringObjectFromUtf8(const std::string &str); + static MString *NewStringObjectFromUtf16(const std::string &str); + static MString *NewStringObjectFromUtf(const std::string &str); + + template + static inline MString *JniCast(T s); + template + static inline MString *JniCastNonNull(T s); + jstring inline AsJstring() const; + + private: + uint32_t count; + uint32_t hash; + uint8_t content[]; + static constexpr bool enableStringCompression = true; +}; +} // namespace maplert +#endif // MRT_MAPLERT_INCLUDE_MSTRING_H_ diff --git a/src/mrt/maplert/include/mstring_inline.h b/src/mrt/maplert/include/mstring_inline.h new file mode 100644 index 0000000000..7f8f0293d3 --- /dev/null +++ b/src/mrt/maplert/include/mstring_inline.h @@ -0,0 +1,215 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_MAPLERT_INCLUDE_MSTRING_INLINE_H_ +#define MRT_MAPLERT_INCLUDE_MSTRING_INLINE_H_ + +#include "mstring.h" +#include "heap_stats.h" +#include "mobject_inline.h" +#include "mrt_well_known.h" +namespace maplert { +inline uint32_t MString::GetCountValue() const { + return count; +} + +inline void MString::SetCountValue(uint32_t countValue) { + count = countValue; +} + +inline uint32_t MString::GetLength() const { + return GetCountValue() >> 1; +} + +inline uint32_t MString::GetHash() const { + return hash; +} + +inline void MString::SetHash(uint32_t hashCode) { + hash = hashCode; +} + +inline uint8_t *MString::GetContentsPtr() const { + return reinterpret_cast(AsUintptr() + GetStringBaseSize()); +} + +inline uint32_t MString::GetCountOffset() { + return static_cast(reinterpret_cast(&static_cast(0)->count)); +} + +inline bool MString::GetCompressFlag() { + return enableStringCompression; +} + +inline bool MString::IsCompress() const{ + uint32_t countValue = GetCountValue(); + return static_cast(countValue) & 0x01; +} + +inline bool MString::IsCompressedFromCount(const uint32_t countValue) { + return countValue & 0x01; +} + +inline bool MString::IsLiteral() const { + // literal is off-heap java-string or string in PERM-space + return IsOffHeap(); +} + +inline uint16_t MString::CharAt(uint32_t index) const { + bool isCompress = IsCompress(); + uint8_t *strContent = GetContentsPtr(); + return isCompress ? *(strContent + index) : *(reinterpret_cast(strContent) + index); +} + +inline bool MString::IsCompressChar(const uint16_t c) { + return (c - 1u) < 0x7fu; +} + +template +inline bool MString::IsCompressChars(const Type *chars, uint32_t length) { + if (!enableStringCompression) { + return false; + } + DCHECK(chars != nullptr) << "MString::IsCompressChars: chars is nullptr." << maple::endl; + for (uint32_t i = 0; i < length; ++i) { + if (!IsCompressChar(chars[i])) { + return false; + } + } + return true; +} + +inline bool MString::IsCompressCharsExcept(const uint16_t *chars, uint32_t length, uint16_t except) { + DCHECK(chars != nullptr) << "MString::IsCompressChars: chars is nullptr." << maple::endl; + for (uint32_t i = 0; i < length; ++i) { + uint16_t c = chars[i]; + if (!IsCompressChar(c) && c != except) { + return false; + } + } + return true; +} + +inline uint32_t MString::GetStringSize() const { + return IsCompress() ? GetStringBaseSize() + GetLength() : GetStringBaseSize() + GetCountValue(); +} + +inline uint32_t MString::GetStringBaseSize() { + return static_cast(reinterpret_cast(&(static_cast(0))->content)); +} + +inline bool MString::Cmp(const std::string &src) const { + bool isCompress = IsCompress(); + bool res = true; + if (isCompress) { + size_t dstLen = GetLength(); // extended to size_t + size_t srcLen = src.length(); // may be > INT32_MAX + uint8_t *dstName = GetContentsPtr(); + if (srcLen == dstLen) { // if srcLen > INT32_MAX, this must not be equal. + for (size_t i = 0; i < dstLen; ++i) { + if (src[i] != dstName[i]) { + res = false; + break; + } + } + } else { + res = false; + } + } else { + std::string charStr = GetChars(); + res = (src == charStr); + } + return res; +} + +template +inline MString *MString::NewStringObject(uint32_t stringLen, const Func &fillContents, bool isJNI) { + constexpr bool compressible = (sizeof(uint8_t) == sizeof(srcType)); + DCHECK(stringLen <= ((UINT32_MAX - GetStringBaseSize()) / sizeof(srcType))) << "stringLen must valid" << maple::endl; + size_t memLen = GetStringBaseSize() + stringLen * sizeof(srcType); + uint32_t stringCount = (stringLen != 0) ? (((static_cast(stringLen) << 1)) | compressible) : 0; + MString *stringObj = static_cast(MObject::NewObject(*WellKnown::GetMClassString(), memLen, isJNI)); + if (UNLIKELY(stringObj == nullptr)) { + return nullptr; + } + stringObj->SetCountValue(stringCount); + fillContents(*stringObj); + // add memory order release + std::atomic_thread_fence(std::memory_order_release); + return stringObj; +} + +template +inline MString *MString::NewStringObject(const srcType *src, uint32_t stringLen, bool isJNI) { + DCHECK(src != nullptr) << "MString::NewStringObject: src is nullptr." << maple::endl; + DCHECK(stringLen <= UINT32_MAX / sizeof(srcType)) << "stringLen must valid" << maple::endl; + size_t memLen = stringLen * sizeof(srcType); + MString *res = NewStringObject(stringLen, [&](MString &stringObj) { + srcType *resDst = reinterpret_cast(stringObj.GetContentsPtr()); + if (memLen != 0) { + if (memcpy_s(resDst, memLen, src, memLen) != EOK) { + LOG(ERROR) << "newStringObject memcpy_s() not return EOK" << maple::endl; + } + } + }, isJNI); + return res; +} + +template +inline MString *MString::NewStringObject(const srcType *src, uint32_t stringLen, bool isJNI) { + DCHECK(src != nullptr) << "MString::NewStringObject: src is nullptr." << maple::endl; + MString *res = NewStringObject(stringLen, [&](MString &stringObj) { + dstType *resDst = reinterpret_cast(stringObj.GetContentsPtr()); + for (uint32_t i = 0; i < stringLen; ++i) { + resDst[i] = static_cast(src[i]); + } + }, isJNI); + return res; +} + +inline MString *MString::NewStringObject(const uint16_t *src, uint32_t stringLen, bool isJNI) { + bool isCompress = IsCompressChars(src, stringLen); + return isCompress ? NewStringObject(src, stringLen, isJNI) : + NewStringObject(src, stringLen, isJNI); +} + +inline MString *MString::NewEmptyStringObject() { + return NewStringObject(0, [](MString&) { }); +} + +inline void MString::SetStringClass() { + // only use to set literal string + if (GetClass() == nullptr) { + StoreObjectOffHeap(0, WellKnown::GetMClassString()); + std::atomic_thread_fence(std::memory_order_release); + } +} + +template +inline MString *MString::JniCast(T s) { + static_assert(std::is_same::value || std::is_same::value, "wrong type"); + return reinterpret_cast(s); +} + +template +inline MString *MString::JniCastNonNull(T s) { + DCHECK(s != nullptr); + return JniCast(s); +} + +inline jstring MString::AsJstring() const { + return reinterpret_cast(const_cast(this)); +} +} // namespace maplert +#endif // MRT_MAPLERT_INCLUDE_MSTRING_INLINE_H_ diff --git a/src/mrt/maplert/include/reflection_list.def b/src/mrt/maplert/include/reflection_list.def new file mode 100644 index 0000000000..3f339ba88b --- /dev/null +++ b/src/mrt/maplert/include/reflection_list.def @@ -0,0 +1,30 @@ +"Ljava_2Flang_2FClass_3B_7CfindInterfaceMethod_7C_28Ljava_2Flang_2FString_3BALjava_2Flang_2FClass_3B_29Ljava_2Flang_2Freflect_2FMethod_3B", +"Ljava_2Flang_2FClass_3B_7CgetAccessFlags_7C_28_29I", +"Ljava_2Flang_2FClass_3B_7CgetClasses_7C_28_29ALjava_2Flang_2FClass_3B", +"Ljava_2Flang_2FClass_3B_7CgetClassLoader_7C_28_29Ljava_2Flang_2FClassLoader_3B", +"Ljava_2Flang_2FClass_3B_7CgetComponentType_7C_28_29Ljava_2Flang_2FClass_3B", +"Ljava_2Flang_2FClass_3B_7CgetConstructor_7C_28ALjava_2Flang_2FClass_3B_29Ljava_2Flang_2Freflect_2FConstructor_3B", +"Ljava_2Flang_2FClass_3B_7CgetConstructors_7C_28_29ALjava_2Flang_2Freflect_2FConstructor_3B", +"Ljava_2Flang_2FClass_3B_7CgetDeclaredConstructor_7C_28ALjava_2Flang_2FClass_3B_29Ljava_2Flang_2Freflect_2FConstructor_3B", +"Ljava_2Flang_2FClass_3B_7CgetDeclaredConstructors_7C_28_29ALjava_2Flang_2Freflect_2FConstructor_3B", +"Ljava_2Flang_2FClass_3B_7CgetDeclaredMethod_7C_28Ljava_2Flang_2FString_3BALjava_2Flang_2FClass_3B_29Ljava_2Flang_2Freflect_2FMethod_3B", +"Ljava_2Flang_2FClass_3B_7CgetDeclaredMethods_7C_28_29ALjava_2Flang_2Freflect_2FMethod_3B", +"Ljava_2Flang_2FClass_3B_7CgetField_7C_28Ljava_2Flang_2FString_3B_29Ljava_2Flang_2Freflect_2FField_3B", +"Ljava_2Flang_2FClass_3B_7CgetFields_7C_28_29ALjava_2Flang_2Freflect_2FField_3B", +"Ljava_2Flang_2FClass_3B_7CgetInstanceMethod_7C_28Ljava_2Flang_2FString_3BALjava_2Flang_2FClass_3B_29Ljava_2Flang_2Freflect_2FMethod_3B", +"Ljava_2Flang_2FClass_3B_7CgetInterfaces_7C_28_29ALjava_2Flang_2FClass_3B", +"Ljava_2Flang_2FClass_3B_7CgetMethod_7C_28Ljava_2Flang_2FString_3BALjava_2Flang_2FClass_3B_29Ljava_2Flang_2Freflect_2FMethod_3B", +"Ljava_2Flang_2FClass_3B_7CgetMethods_7C_28_29ALjava_2Flang_2Freflect_2FMethod_3B", +"Ljava_2Flang_2FClass_3B_7CgetModifiers_7C_28_29I", +"Ljava_2Flang_2FClass_3B_7CgetName_7C_28_29Ljava_2Flang_2FString_3B", +"Ljava_2Flang_2FClass_3B_7CgetPublicFieldsRecursive_7C_28Ljava_2Futil_2FList_3B_29V", +"Ljava_2Flang_2FClass_3B_7CgetPublicMethodsInternal_7C_28Ljava_2Futil_2FList_3B_29V", +"Ljava_2Flang_2FClass_3B_7CgetSuperclass_7C_28_29Ljava_2Flang_2FClass_3B", +"Ljava_2Flang_2FClass_3B_7CisAssignableFrom_7C_28Ljava_2Flang_2FClass_3B_29Z", +"Ljava_2Flang_2FClass_3B_7CisInterface_7C_28_29Z", +"Ljava_2Flang_2FClass_3B_7CisPrimitive_7C_28_29Z", +"Ljava_2Flang_2FClass_3B_7CisProxy_7C_28_29Z", +"Ljava_2Flang_2FClass_3B_7CisMemberClass_7C_28_29Z", +"Ljava_2Flang_2FClass_3B_7CisLocalClass_7C_28_29Z", +"Ljava_2Flang_2Fref_2FReference_3B_7Cget_7C_28_29Ljava_2Flang_2FObject_3B", +"Ljava_2Flang_2Fref_2FReference_3B_7Cclear_7C_28_29V", diff --git a/src/mrt/maplert/include/white_list.def b/src/mrt/maplert/include/white_list.def new file mode 100644 index 0000000000..7ea27fede3 --- /dev/null +++ b/src/mrt/maplert/include/white_list.def @@ -0,0 +1,496 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +CLASS_PREFIX(Ljava_2Flang_2FOutOfMemoryError_3B) +CLASS_PREFIX(Ljava_2Flang_2FThreadGroup_3B) +CLASS_PREFIX(Ljava_2Flang_2FThread_3B) +CLASS_PREFIX(Llibcore_2Futil_2FEmptyArray_3B) +CLASS_PREFIX(Ljava_2Futil_2FCollections_3B) +CLASS_PREFIX(Ljava_2Flang_2FFloat_3B) +CLASS_PREFIX(Ljava_2Fnio_2FDirectByteBuffer_3B) +CLASS_PREFIX(Ldalvik_2Fsystem_2FVMRuntime_3B) +CLASS_PREFIX(Ljava_2Futil_2FProperties_3B) +CLASS_PREFIX(Ljava_2Flang_2FDouble_3B) +CLASS_PREFIX(Ljava_2Flang_2FMath_3B) +CLASS_PREFIX(Llibcore_2Fio_2FLibcore_3B) +CLASS_PREFIX(Llibcore_2Ficu_2FICU_3B) +CLASS_PREFIX(Ljava_2Flang_2FAndroidHardcodedSystemProperties_3B) +CLASS_PREFIX(Ljava_2Flang_2Fref_2FReferenceQueue_3B) +CLASS_PREFIX(Ljava_2Futil_2FArrayList_3B) +CLASS_PREFIX(Ljava_2Flang_2FRuntime_3B) +CLASS_PREFIX(Landroid_2Fsystem_2FOsConstants_3B) +CLASS_PREFIX(Ljava_2Flang_2FInteger_3B) +CLASS_PREFIX(Lsun_2Fmisc_2FUnsafe_3B) +//CLASS_PREFIX(Ljava_2Futil_2Fconcurrent_2FConcurrentHashMap_3B) +//CLASS_PREFIX(Lsun_2Futil_2Flocale_2FBaseLocale_3B) +CLASS_PREFIX(Lsun_2Futil_2Flocale_2FBaseLocale_24Key_3B) +//CLASS_PREFIX(Ljava_2Futil_2FLocale_3B) +CLASS_PREFIX(Lsun_2Futil_2Flocale_2FLanguageTag_3B) +CLASS_PREFIX(Lsun_2Futil_2Flocale_2FInternalLocaleBuilder_3B) +CLASS_PREFIX(Lsun_2Fmisc_2FVersion_3B) +CLASS_PREFIX(Ljava_2Fio_2FFileDescriptor_3B) +CLASS_PREFIX(Ldalvik_2Fsystem_2FCloseGuard_3B) +CLASS_PREFIX(Ljava_2Flang_2FEnum_3B) +CLASS_PREFIX(Llibcore_2Fio_2FIoTracker_24Mode_3B) +CLASS_PREFIX(Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicReferenceFieldUpdater_24AtomicReferenceFieldUpdaterImpl_3B) +CLASS_PREFIX(Ljava_2Fio_2FBufferedInputStream_3B) +CLASS_PREFIX(Lsun_2Fmisc_2FVM_3B) +CLASS_PREFIX(Ljava_2Flang_2FSystem_3B) +CLASS_PREFIX(Ljava_2Fio_2FFileSystem_3B) +CLASS_PREFIX(Ljava_2Fio_2FUnixFileSystem_3B) +CLASS_PREFIX(Ljava_2Fio_2FFile_3B) +CLASS_PREFIX(Ljava_2Futil_2Fregex_2FPattern_3B) +CLASS_PREFIX(Ljava_2Fio_2FFile_24PathStatus_3B) +CLASS_PREFIX(Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B) +CLASS_PREFIX(Ljava_2Flang_2FThreadLocal_3B) +CLASS_PREFIX(Ldalvik_2Fsystem_2FBlockGuard_3B) +CLASS_PREFIX(Ljava_2Flang_2FString_3B) +CLASS_PREFIX(Ldalvik_2Fsystem_2FBaseDexClassLoader_3B) +CLASS_PREFIX(Ljava_2Flang_2FClassLoader_24SystemClassLoader_3B) +CLASS_PREFIX(Ljava_2Flang_2FByte_3B) +CLASS_PREFIX(Ljava_2Flang_2FInteger_24IntegerCache_3B) +CLASS_PREFIX(Ljava_2Futil_2FUUID_3B) +CLASS_PREFIX(Ljava_2Futil_2FHashSet_3B) +CLASS_PREFIX(Lsun_2Fnio_2Fcs_2FStreamDecoder_3B) +CLASS_PREFIX(Ljava_2Fnio_2Fcharset_2FCharset_3B) +CLASS_PREFIX(Llibcore_2Ficu_2FNativeConverter_3B) +CLASS_PREFIX(Ljava_2Fnio_2Fcharset_2FCodingErrorAction_3B) +CLASS_PREFIX(Lsun_2Fmisc_2FCleaner_3B) +CLASS_PREFIX(Ljava_2Fnio_2FByteOrder_3B) +CLASS_PREFIX(Ljava_2Fnio_2FBits_3B) +CLASS_PREFIX(Ljava_2Fio_2FBufferedReader_3B) +CLASS_PREFIX(Ljava_2Fnio_2Fcharset_2FCoderResult_3B) +CLASS_PREFIX(Ljava_2Flang_2FLong_3B) +CLASS_PREFIX(Ljava_2Flang_2FShort_3B) +CLASS_PREFIX(Ljava_2Flang_2FCharacter_3B) +CLASS_PREFIX(Ljava_2Flang_2FBoolean_3B) +CLASS_PREFIX(Ljava_2Flang_2FStringBuffer_3B) +CLASS_PREFIX(Lorg_2Fapache_2Fharmony_2Fdalvik_2Fddmc_2FChunkHandler_3B) +CLASS_PREFIX(Lorg_2Fapache_2Fharmony_2Fdalvik_2Fddmc_2FDdmServer_3B) +CLASS_PREFIX(Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicBoolean_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2FCacheValue_24Strength_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2FCacheValue_3B) +CLASS_PREFIX(Ljava_2Futil_2FLocale_24NoImagePreloadHolder_3B) +CLASS_PREFIX(Landroid_2Ficu_2Futil_2FULocale_24Category_3B) +CLASS_PREFIX(Ljava_2Futil_2FLocale_24Category_3B) +CLASS_PREFIX(Landroid_2Ficu_2Futil_2FULocale_24JDKLocaleHelper_3B) +CLASS_PREFIX(Landroid_2Ficu_2Futil_2FULocale_3B) +CLASS_PREFIX(Landroid_2Ficu_2Ftext_2FDecimalFormatSymbols_3B) +CLASS_PREFIX(Landroid_2Ficu_2Ftext_2FNumberingSystem_3B) +CLASS_PREFIX(Ljava_2Futil_2FResourceBundle_3B) +CLASS_PREFIX(Landroid_2Ficu_2Futil_2FUResourceBundle_3B) +CLASS_PREFIX(Ljava_2Futil_2Fconcurrent_2FConcurrentHashMap_24TreeBin_3B) +CLASS_PREFIX(Landroid_2Ficu_2Futil_2FVersionInfo_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2FICUDebug_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2FICUResourceBundle_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2FICUResourceBundle_24OpenType_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2FICUResourceBundleReader_3B) +CLASS_PREFIX(Ljava_2Fnet_2FURI_3B) +CLASS_PREFIX(Ljava_2Futil_2Fzip_2FZipFile_3B) +CLASS_PREFIX(Ljava_2Futil_2Fjar_2FJarFile_3B) +CLASS_PREFIX(Ljava_2Fnio_2Fcharset_2FStandardCharsets_3B) +CLASS_PREFIX(Ljava_2Futil_2FWeakHashMap_3B) +CLASS_PREFIX(Ljava_2Futil_2FArrayDeque_3B) +CLASS_PREFIX(Ljava_2Flang_2FVMClassLoader_3B) +CLASS_PREFIX(Ljava_2Fnio_2Fcharset_2FCharsetEncoder_3B) +CLASS_PREFIX(Ljava_2Fnio_2Fcharset_2FCharsetEncoderICU_3B) +CLASS_PREFIX(Ljava_2Futil_2FBitSet_3B) +CLASS_PREFIX(Lsun_2Fnet_2Fwww_2FParseUtil_3B) +CLASS_PREFIX(Ljava_2Fnet_2FURL_3B) +CLASS_PREFIX(Ljava_2Flang_2FCaseMapper_3B) +CLASS_PREFIX(Ljava_2Fnet_2FURLConnection_3B) +CLASS_PREFIX(Ljava_2Futil_2Fzip_2FInflater_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2FICUConfig_3B) +CLASS_PREFIX(Ljava_2Fnio_2Fchannels_2FFileChannel_3B) +CLASS_PREFIX(Lsun_2Fnio_2Fch_2FFileChannelImpl_3B) +CLASS_PREFIX(Lsun_2Fnio_2Fch_2FNativeThreadSet_3B) +CLASS_PREFIX(Ljava_2Fnio_2Fchannels_2FFileChannel_24MapMode_3B) +CLASS_PREFIX(Lsun_2Fnio_2Fch_2FFileChannelImpl_24Unmapper_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2FICUBinary_24DatPackageReader_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2FICUBinary_3B) +CLASS_PREFIX(Ljava_2Fnio_2FByteBufferAsCharBuffer_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2FICUResourceBundleReader_24ResourceCache_3B) +CLASS_PREFIX(Landroid_2Ficu_2Futil_2FUResourceBundle_24RootType_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2FUResource_24Key_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2FCurrencyData_3B) +CLASS_PREFIX(Ljava_2Futil_2FTreeSet_3B) +CLASS_PREFIX(Ljava_2Futil_2FTreeMap_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2FBMPSet_3B) +CLASS_PREFIX(Landroid_2Ficu_2Ftext_2FUnicodeSet_3B) +CLASS_PREFIX(Landroid_2Ficu_2Futil_2FMeasureUnit_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2FLocaleIDs_3B) +CLASS_PREFIX(Landroid_2Ficu_2Futil_2FCurrency_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2Flocale_2FBaseLocale_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2FUtility_3B) +CLASS_PREFIX(Landroid_2Ficu_2Ftext_2FCurrencyMetaInfo_3B) +CLASS_PREFIX(Landroid_2Ficu_2Ftext_2FCurrencyMetaInfo_24CurrencyFilter_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2FCurrencyData_24CurrencySpacingInfo_24SpacingType_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2FCurrencyData_24CurrencySpacingInfo_24SpacingPattern_3B) +CLASS_PREFIX(Ljava_2Flang_2FVoid_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2FCurrencyData_24CurrencySpacingInfo_3B) +CLASS_PREFIX(Ljava_2Flang_2FThread_24State_3B) +CLASS_PREFIX(Llibcore_2Freflect_2FAnnotationMember_24DefaultValues_3B) +CLASS_PREFIX(Llibcore_2Freflect_2FAnnotationMember_3B) +CLASS_PREFIX(Ljava_2Flang_2Fannotation_2FAnnotation_3B) +CLASS_PREFIX(Llibcore_2Freflect_2FAnnotationFactory_3B) +CLASS_PREFIX(Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicLong_3B) +//CLASS_PREFIX(Ljava_2Flang_2Freflect_2FProxy_24ProxyClassFactory_3B) +CLASS_PREFIX(Ljava_2Flang_2Freflect_2FProxy_3B) +//CLASS_PREFIX(Ljava_2Flang_2Freflect_2FWeakCache_24CacheKey_3B) +CLASS_PREFIX(Ljava_2Flang_2Freflect_2FWeakCache_24Factory_3B) +CLASS_PREFIX(Ljava_2Futil_2FTimSort_3B) +CLASS_PREFIX(Ljava_2Flang_2Freflect_2FMethod_3B) +CLASS_PREFIX(Ljava_2Futil_2FHashMap_24TreeNode_3B) +CLASS_PREFIX(Ljava_2Futil_2FCollections_24EmptyIterator_3B) +CLASS_PREFIX(Landroid_2Ficu_2Flang_2FUScript_24ScriptUsage_3B) +CLASS_PREFIX(Landroid_2Ficu_2Flang_2FUScript_3B) +CLASS_PREFIX(Ljava_2Fnio_2FByteBufferAsIntBuffer_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2FUPropertyAliases_3B) +CLASS_PREFIX(Landroid_2Ficu_2Futil_2FBytesTrie_24Result_3B) +CLASS_PREFIX(Landroid_2Ficu_2Flang_2FUScript_24ScriptMetadata_3B) +CLASS_PREFIX(Lorg_2Fxmlpull_2Fv1_2FXmlPullParser_3B) +//CLASS_PREFIX(Lorg_2Fkxml2_2Fio_2FKXmlParser_3B) +//CLASS_PREFIX(Lorg_2Fkxml2_2Fio_2FKXmlParser_24ValueContext_3B) +CLASS_PREFIX(Ljava_2Futil_2Fregex_2FMatcher_3B) +CLASS_PREFIX(Lsun_2Fmisc_2FFloatingDecimal_24ExceptionalBinaryToASCIIBuffer_3B) +CLASS_PREFIX(Lsun_2Fmisc_2FFloatingDecimal_24BinaryToASCIIBuffer_3B) +CLASS_PREFIX(Lsun_2Fmisc_2FFloatingDecimal_3B) +CLASS_PREFIX(Lsun_2Fmisc_2FFloatingDecimal_24ASCIIToBinaryBuffer_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2FCaseMapImpl_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2FTrie_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2FCharTrie_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2FICUCache_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2FICUResourceBundleReader_24ResourceCache_24Level_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2FICUService_3B) +CLASS_PREFIX(Landroid_2Ficu_2Ftext_2FStringPrep_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2FStringPrepDataReader_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2FTrie2_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2FTrie2_24ValueWidth_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2FUCharacterProperty_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2FUBiDiProps_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2FIDNA2003_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2FNorm2AllModes_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2FNormalizer2Impl_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2FNorm2AllModes_24NFKCSingleton_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2FPatternProps_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2FStandardPlural_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2FPluralRulesLoader_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2FUCaseProps_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fmath_2FMathContext_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fmath_2FBigDecimal_3B) +CLASS_PREFIX(Landroid_2Ficu_2Ftext_2FBreakIterator_3B) +CLASS_PREFIX(Ljava_2Futil_2Fconcurrent_2Flocks_2FReentrantReadWriteLock_3B) +CLASS_PREFIX(Ljava_2Futil_2Fconcurrent_2Flocks_2FAbstractQueuedSynchronizer_3B) +CLASS_PREFIX(Landroid_2Ficu_2Ftext_2FBreakIteratorFactory_3B) +CLASS_PREFIX(Landroid_2Ficu_2Ftext_2FCaseMap_24Upper_3B) +CLASS_PREFIX(Landroid_2Ficu_2Ftext_2FNumberFormat_3B) +CLASS_PREFIX(Landroid_2Ficu_2Ftext_2FDecimalFormat_3B) +CLASS_PREFIX(Landroid_2Ficu_2Ftext_2FDisplayContext_24Type_3B) +CLASS_PREFIX(Landroid_2Ficu_2Ftext_2FDisplayContext_3B) +CLASS_PREFIX(Landroid_2Ficu_2Ftext_2FNormalizer_3B) +CLASS_PREFIX(Landroid_2Ficu_2Ftext_2FNormalizer_24NFKDModeImpl_3B) +CLASS_PREFIX(Landroid_2Ficu_2Ftext_2FNumberFormat_24Field_3B) +CLASS_PREFIX(Landroid_2Ficu_2Ftext_2FPluralRules_3B) +CLASS_PREFIX(Landroid_2Ficu_2Ftext_2FPluralRules_24Operand_3B) +CLASS_PREFIX(Landroid_2Ficu_2Ftext_2FPluralRules_24PluralType_3B) +CLASS_PREFIX(Landroid_2Ficu_2Ftext_2FPluralRules_24SampleType_3B) +CLASS_PREFIX(Landroid_2Ficu_2Ftext_2FPluralRules_24SimpleTokenizer_3B) +//CLASS_PREFIX(Landroid_2Ficu_2Ftext_2FRBBIDataWrapper_3B) +CLASS_PREFIX(Landroid_2Ficu_2Ftext_2FRuleBasedBreakIterator_3B) +CLASS_PREFIX(Landroid_2Ficu_2Ftext_2FRuleBasedBreakIterator_24LookAheadResults_3B) +CLASS_PREFIX(Landroid_2Ficu_2Ftext_2FStringPrepParseException_3B) +CLASS_PREFIX(Landroid_2Ficu_2Ftext_2FTimeZoneNames_24NameType_3B) +CLASS_PREFIX(Landroid_2Ficu_2Futil_2FCurrency_24CurrencyUsage_3B) +CLASS_PREFIX(Ljava_2Futil_2FEnumMap_3B) +//CLASS_PREFIX(Lsun_2Fnet_2Fspi_2FDefaultProxySelector_3B) +CLASS_PREFIX(Ljava_2Fnet_2FProxySelector_3B) +CLASS_PREFIX(Ljavax_2Fnet_2Fssl_2FSSLSocketFactory_3B) +CLASS_PREFIX(Landroid_2Fsystem_2FUnixSocketAddress_3B) +CLASS_PREFIX(Ljava_2Flang_2FLong_24LongCache_3B) +CLASS_PREFIX(Ljava_2Futil_2Flogging_2FLevel_24KnownLevel_3B) +CLASS_PREFIX(Ljava_2Futil_2Flogging_2FLevel_3B) +CLASS_PREFIX(Ljava_2Futil_2Flogging_2FHandler_3B) +CLASS_PREFIX(Ljava_2Flang_2Fref_2FReference_3B) +CLASS_PREFIX(Ljava_2Flang_2Fref_2FWeakReference_3B) +CLASS_PREFIX(Ljava_2Futil_2Fconcurrent_2FTimeUnit_3B) +CLASS_PREFIX(Ljava_2Futil_2Fconcurrent_2FCopyOnWriteArrayList_3B) +CLASS_PREFIX(Llibcore_2Fnet_2Fevent_2FNetworkEventDispatcher_3B) +CLASS_PREFIX(Ljava_2Futil_2Fconcurrent_2FAbstractExecutorService_3B) +CLASS_PREFIX(Ljava_2Futil_2Fconcurrent_2FThreadPoolExecutor_3B) +CLASS_PREFIX(Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicReference_3B) +CLASS_PREFIX(Ljava_2Fnet_2FHttpURLConnection_3B) +CLASS_PREFIX(Lsun_2Fsecurity_2Futil_2FDebug_3B) +CLASS_PREFIX(Ljava_2Fsecurity_2FProvider_3B) +CLASS_PREFIX(Ljava_2Futil_2FCalendar_3B) +CLASS_PREFIX(Ldalvik_2Fsystem_2FSocketTagger_3B) +CLASS_PREFIX(Ldalvik_2Fsystem_2FVMDebug_3B) +CLASS_PREFIX(Ljava_2Fio_2FConsole_3B) +CLASS_PREFIX(Ljava_2Fio_2FObjectStreamConstants_3B) +CLASS_PREFIX(Ljava_2Fio_2FObjectInputStream_3B) +CLASS_PREFIX(Ljava_2Fio_2FObjectStreamClass_3B) +CLASS_PREFIX(Ljava_2Flang_2FByte_24ByteCache_3B) +CLASS_PREFIX(Ljava_2Flang_2FCharacter_24CharacterCache_3B) +CLASS_PREFIX(Ljava_2Flang_2FCharacter_24UnicodeBlock_3B) +CLASS_PREFIX(Ljava_2Flang_2FClass_24Caches_3B) +CLASS_PREFIX(Ljava_2Flang_2Fref_2FFinalizerReference_3B) +CLASS_PREFIX(Ljava_2Flang_2FDaemons_24FinalizerDaemon_3B) +CLASS_PREFIX(Ljava_2Flang_2FDaemons_24FinalizerWatchdogDaemon_3B) +CLASS_PREFIX(Ljava_2Flang_2FDaemons_24HeapTaskDaemon_3B) +CLASS_PREFIX(Ljava_2Flang_2FDaemons_24ReferenceQueueDaemon_3B) +CLASS_PREFIX(Ljava_2Futil_2FRandom_3B) +CLASS_PREFIX(Ljava_2Flang_2FMath_24RandomNumberGeneratorHolder_3B) +CLASS_PREFIX(Ljava_2Flang_2FPackage_3B) +CLASS_PREFIX(Ljava_2Flang_2FProcessBuilder_3B) +CLASS_PREFIX(Ljava_2Flang_2FProcessEnvironment_3B) +CLASS_PREFIX(Ljava_2Flang_2FShort_24ShortCache_3B) +CLASS_PREFIX(Ljava_2Flang_2FStrictMath_3B) +CLASS_PREFIX(Ljava_2Flang_2FThread_24Caches_3B) +CLASS_PREFIX(Ljava_2Flang_2FThrowable_24SentinelHolder_3B) +CLASS_PREFIX(Ljava_2Flang_2FUNIXProcess_24ProcessReaperThreadFactory_3B) +CLASS_PREFIX(Ljava_2Futil_2Fconcurrent_2FSynchronousQueue_3B) +CLASS_PREFIX(Ljava_2Futil_2Fconcurrent_2FSynchronousQueue_24TransferStack_3B) +CLASS_PREFIX(Ljava_2Flang_2FUNIXProcess_3B) +CLASS_PREFIX(Ljava_2Flang_2Finvoke_2FMethodHandleStatics_3B) +CLASS_PREFIX(Ljava_2Flang_2Finvoke_2FCallSite_3B) +CLASS_PREFIX(Ljava_2Flang_2Finvoke_2FMethodHandle_3B) +CLASS_PREFIX(Ljava_2Flang_2Finvoke_2FMethodHandles_24Lookup_3B) +CLASS_PREFIX(Ljava_2Flang_2Finvoke_2FMethodType_3B) +CLASS_PREFIX(Ljava_2Flang_2Finvoke_2FMethodTypeForm_3B) +CLASS_PREFIX(Ljava_2Flang_2Freflect_2FMethod_241_3B) +CLASS_PREFIX(Llibcore_2Fmath_2FMathUtils_3B) +CLASS_PREFIX(Ljava_2Fmath_2FBigInt_3B) +CLASS_PREFIX(Ljava_2Fmath_2FBigInteger_3B) +CLASS_PREFIX(Ljava_2Fmath_2FMultiplication_3B) +CLASS_PREFIX(Ljava_2Fmath_2FBigDecimal_3B) +CLASS_PREFIX(Ljava_2Fmath_2FRoundingMode_3B) +CLASS_PREFIX(Ljava_2Fnet_2FInet6AddressImpl_3B) +CLASS_PREFIX(Ljava_2Fnet_2FInetAddress_3B) +CLASS_PREFIX(Ljava_2Fnet_2FInet4Address_3B) +CLASS_PREFIX(Ljava_2Fnet_2FInet6Address_3B) +CLASS_PREFIX(Ljava_2Fnet_2FInetSocketAddress_3B) +CLASS_PREFIX(Ljava_2Fnet_2FNetworkInterface_3B) +CLASS_PREFIX(Ljava_2Fnet_2FProxy_24Type_3B) +CLASS_PREFIX(Ljava_2Fnet_2FProxy_3B) +CLASS_PREFIX(Ljava_2Fnet_2FServerSocket_3B) +CLASS_PREFIX(Ljava_2Fnet_2FSocket_3B) +CLASS_PREFIX(Ljava_2Fnet_2FSocksSocketImpl_3B) +CLASS_PREFIX(Ljava_2Fnet_2FURLEncoder_3B) +CLASS_PREFIX(Ljava_2Fnio_2FByteBufferAsDoubleBuffer_3B) +CLASS_PREFIX(Ljava_2Fnio_2FByteBufferAsFloatBuffer_3B) +CLASS_PREFIX(Ljava_2Fnio_2FByteBufferAsLongBuffer_3B) +CLASS_PREFIX(Ljava_2Fnio_2FByteBufferAsShortBuffer_3B) +CLASS_PREFIX(Ljava_2Fnio_2Fchannels_2Fspi_2FAbstractSelectableChannel_3B) +CLASS_PREFIX(Ljava_2Fnio_2Fchannels_2FSocketChannel_3B) +CLASS_PREFIX(Ljava_2Fnio_2Fcharset_2FCharsetDecoder_3B) +CLASS_PREFIX(Ljava_2Fsecurity_2FCryptoPrimitive_3B) +CLASS_PREFIX(Ljava_2Fsecurity_2FKeyFactory_3B) +CLASS_PREFIX(Ljava_2Fsecurity_2FProvider_24Service_3B) +CLASS_PREFIX(Ljava_2Fsecurity_2FSecureRandom_3B) +CLASS_PREFIX(Ljava_2Fsecurity_2FSecurity_3B) +CLASS_PREFIX(Ljava_2Fsecurity_2FSignature_3B) +CLASS_PREFIX(Ljava_2Fsecurity_2FSignature_24Delegate_3B) +CLASS_PREFIX(Lsun_2Fsecurity_2Futil_2FObjectIdentifier_3B) +CLASS_PREFIX(Ljava_2Fsecurity_2Fcert_2FX509CertSelector_3B) +CLASS_PREFIX(Ljava_2Fsecurity_2Fspec_2FECPoint_3B) +CLASS_PREFIX(Ljava_2Ftext_2FAttributedCharacterIterator_24Attribute_3B) +CLASS_PREFIX(Ljava_2Ftext_2FDateFormat_24Field_3B) +CLASS_PREFIX(Ljava_2Ftext_2FDateFormatSymbols_3B) +CLASS_PREFIX(Ljava_2Ftext_2FNumberFormat_3B) +CLASS_PREFIX(Ljava_2Ftext_2FDecimalFormat_3B) +CLASS_PREFIX(Ljava_2Ftext_2FDecimalFormatSymbols_3B) +CLASS_PREFIX(Ljava_2Ftext_2FDontCareFieldPosition_3B) +CLASS_PREFIX(Ljava_2Ftext_2FNormalizer_24Form_3B) +CLASS_PREFIX(Ljava_2Futil_2FEnumSet_3B) +CLASS_PREFIX(Ljava_2Ftext_2FSimpleDateFormat_3B) +CLASS_PREFIX(Ljava_2Futil_2FArrays_24NaturalOrder_3B) +CLASS_PREFIX(Ljava_2Futil_2FCollections_24CopiesList_3B) +CLASS_PREFIX(Ljava_2Futil_2FCollections_24EmptyEnumeration_3B) +CLASS_PREFIX(Ljava_2Futil_2FCollections_24EmptyListIterator_3B) +CLASS_PREFIX(Ljava_2Futil_2FCollections_24ReverseComparator_3B) +CLASS_PREFIX(Ljava_2Futil_2FCollections_24ReverseComparator2_3B) +CLASS_PREFIX(Ljava_2Futil_2FCollections_24UnmodifiableNavigableMap_3B) +CLASS_PREFIX(Ljava_2Futil_2FCollections_24UnmodifiableNavigableSet_3B) +CLASS_PREFIX(Ljava_2Futil_2FComparableTimSort_3B) +CLASS_PREFIX(Ljava_2Futil_2FComparators_24NaturalOrderComparator_3B) +CLASS_PREFIX(Ljava_2Futil_2FCurrency_3B) +CLASS_PREFIX(Lsun_2Futil_2Fcalendar_2FCalendarSystem_3B) +CLASS_PREFIX(Ljava_2Futil_2FDate_3B) +CLASS_PREFIX(Ljava_2Futil_2FFormatter_24Flags_3B) +CLASS_PREFIX(Ljava_2Futil_2FFormatter_24FormatSpecifier_3B) +CLASS_PREFIX(Ljava_2Futil_2FGregorianCalendar_3B) +CLASS_PREFIX(Ljava_2Futil_2FLocale_24FilteringMode_3B) +CLASS_PREFIX(Ljava_2Futil_2FSimpleTimeZone_3B) +CLASS_PREFIX(Ljava_2Futil_2FSpliterators_3B) +CLASS_PREFIX(Ljava_2Futil_2Fconcurrent_2FConcurrentLinkedDeque_24Node_3B) +CLASS_PREFIX(Ljava_2Futil_2Fconcurrent_2FConcurrentLinkedDeque_3B) +CLASS_PREFIX(Ljava_2Futil_2Fconcurrent_2FConcurrentLinkedQueue_3B) +CLASS_PREFIX(Ljava_2Futil_2Fconcurrent_2FForkJoinTask_3B) +CLASS_PREFIX(Ljava_2Futil_2Fconcurrent_2FCountedCompleter_3B) +CLASS_PREFIX(Ljava_2Futil_2Fconcurrent_2FExecutors_24DefaultThreadFactory_3B) +// CLASS_PREFIX(Ljava_2Futil_2Fconcurrent_2FForkJoinPool_3B) +CLASS_PREFIX(Ljava_2Futil_2Fconcurrent_2FFutureTask_3B) +CLASS_PREFIX(Ljava_2Futil_2Fconcurrent_2FPriorityBlockingQueue_3B) +CLASS_PREFIX(Ljava_2Futil_2Fconcurrent_2FScheduledThreadPoolExecutor_3B) +CLASS_PREFIX(Ljava_2Futil_2Fconcurrent_2FSynchronousQueue_24TransferStack_24SNode_3B) +CLASS_PREFIX(Ljava_2Futil_2Fconcurrent_2FThreadLocalRandom_3B) +CLASS_PREFIX(Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicReferenceArray_3B) +CLASS_PREFIX(Ljava_2Futil_2Fconcurrent_2Flocks_2FAbstractQueuedSynchronizer_24Node_3B) +CLASS_PREFIX(Ljava_2Futil_2Fconcurrent_2Flocks_2FLockSupport_3B) +CLASS_PREFIX(Ljava_2Futil_2Flogging_2FLogManager_24LoggerContext_3B) +//CLASS_PREFIX(Ljava_2Futil_2Flogging_2FLogManager_3B) +CLASS_PREFIX(Ljava_2Futil_2Flogging_2FLogManager_242_3B) +CLASS_PREFIX(Ljava_2Futil_2Flogging_2FLogRecord_3B) +CLASS_PREFIX(Ljava_2Futil_2Flogging_2FLogger_3B) +CLASS_PREFIX(Ljava_2Futil_2Flogging_2FLoggingProxyImpl_3B) +CLASS_PREFIX(Ljava_2Futil_2Fprefs_2FPreferences_3B) +CLASS_PREFIX(Ljava_2Futil_2Fprefs_2FAbstractPreferences_3B) +//CLASS_PREFIX(Ljava_2Futil_2Fprefs_2FFileSystemPreferences_3B) +CLASS_PREFIX(Ljava_2Futil_2Fregex_2FPatternSyntaxException_3B) +CLASS_PREFIX(Ljava_2Futil_2Fstream_2FAbstractPipeline_3B) +CLASS_PREFIX(Ljava_2Futil_2Fstream_2FStreamOpFlag_24Type_3B) +CLASS_PREFIX(Ljava_2Futil_2Fstream_2FStreamOpFlag_3B) +CLASS_PREFIX(Ljava_2Futil_2Fstream_2FStreamShape_3B) +CLASS_PREFIX(Ljava_2Futil_2Fzip_2FAdler32_3B) +CLASS_PREFIX(Ljava_2Futil_2Fzip_2FCRC32_3B) +CLASS_PREFIX(Ljava_2Futil_2Fzip_2FDeflater_3B) +CLASS_PREFIX(Ljavax_2Fcrypto_2FCipher_24InitType_3B) +CLASS_PREFIX(Ljavax_2Fcrypto_2FCipher_24NeedToSet_3B) +CLASS_PREFIX(Ljavax_2Fnet_2Fssl_2FHttpsURLConnection_3B) +CLASS_PREFIX(Ljavax_2Fnet_2Fssl_2FSNIServerName_3B) +CLASS_PREFIX(Ljavax_2Fnet_2Fssl_2FSSLServerSocketFactory_3B) +CLASS_PREFIX(Ljavax_2Fsecurity_2Fcert_2FX509Certificate_3B) +CLASS_PREFIX(Llibcore_2Ficu_2FLocaleData_3B) +//CLASS_PREFIX(Llibcore_2Fio_2FDropBox_3B) +//CLASS_PREFIX(Llibcore_2Fio_2FEventLogger_3B) +CLASS_PREFIX(Llibcore_2Fnet_2FNetworkSecurityPolicy_3B) +CLASS_PREFIX(Llibcore_2Freflect_2FListOfTypes_3B) +CLASS_PREFIX(Llibcore_2Freflect_2FTypes_3B) +CLASS_PREFIX(Llibcore_2Futil_2FZoneInfo_3B) +CLASS_PREFIX(Lorg_2Fapache_2Fharmony_2Fxml_2FExpatParser_3B) +CLASS_PREFIX(Lorg_2Fjson_2FJSONObject_3B) +CLASS_PREFIX(Lorg_2Fjson_2FJSONStringer_24Scope_3B) +CLASS_PREFIX(Lsun_2Finvoke_2Futil_2FVerifyAccess_3B) +CLASS_PREFIX(Lsun_2Finvoke_2Futil_2FWrapper_24Format_3B) +CLASS_PREFIX(Lsun_2Finvoke_2Futil_2FWrapper_3B) +CLASS_PREFIX(Lsun_2Fmisc_2FFDBigInteger_3B) +CLASS_PREFIX(Lsun_2Fmisc_2FFormattedFloatingDecimal_3B) +CLASS_PREFIX(Lsun_2Fmisc_2FFormattedFloatingDecimal_24Form_3B) +CLASS_PREFIX(Lsun_2Fnet_2Fspi_2FDefaultProxySelector_24NonProxyInfo_3B) +CLASS_PREFIX(Lsun_2Fnio_2Fch_2FDatagramChannelImpl_3B) +CLASS_PREFIX(Lsun_2Fnio_2Fch_2FFileLockImpl_3B) +CLASS_PREFIX(Lsun_2Fnio_2Fch_2FIOUtil_3B) +CLASS_PREFIX(Lsun_2Fnio_2Fch_2FNet_3B) +CLASS_PREFIX(Lsun_2Fnio_2Fch_2FServerSocketChannelImpl_3B) +CLASS_PREFIX(Lsun_2Fnio_2Fch_2FSharedFileLockTable_3B) +CLASS_PREFIX(Lsun_2Fnio_2Fch_2FSocketChannelImpl_3B) +CLASS_PREFIX(Lsun_2Fnio_2Fch_2FUtil_3B) +CLASS_PREFIX(Lsun_2Fnio_2Fch_2FUtil_24BufferCache_3B) +CLASS_PREFIX(Lsun_2Fnio_2Fcs_2FStreamEncoder_3B) +CLASS_PREFIX(Lsun_2Fsecurity_2Fjca_2FProviderConfig_3B) +CLASS_PREFIX(Lsun_2Fsecurity_2Fjca_2FProviderList_3B) +CLASS_PREFIX(Lsun_2Fsecurity_2Fx509_2FCertificateExtensions_3B) +CLASS_PREFIX(Lsun_2Fsecurity_2Fpkcs_2FPKCS9Attribute_3B) +CLASS_PREFIX(Lsun_2Fsecurity_2Futil_2FDisabledAlgorithmConstraints_3B) +CLASS_PREFIX(Lsun_2Fsecurity_2Futil_2FAlgorithmDecomposer_3B) +CLASS_PREFIX(Lsun_2Fsecurity_2Futil_2FDisabledAlgorithmConstraints_24Constraints_3B) +CLASS_PREFIX(Lsun_2Fsecurity_2Fpkcs_2FSignerInfo_3B) +CLASS_PREFIX(Lsun_2Fsecurity_2Fprovider_2FX509Factory_3B) +CLASS_PREFIX(Lsun_2Fsecurity_2Fprovider_2Fcertpath_2FAdaptableX509CertSelector_3B) +CLASS_PREFIX(Lsun_2Fsecurity_2Futil_2FDisabledAlgorithmConstraints_24Constraint_24Operator_3B) +CLASS_PREFIX(Lsun_2Fsecurity_2Fprovider_2Fcertpath_2FBasicChecker_3B) +CLASS_PREFIX(Lsun_2Fsecurity_2Fprovider_2Fcertpath_2FConstraintsChecker_3B) +CLASS_PREFIX(Lsun_2Fsecurity_2Fprovider_2Fcertpath_2FKeyChecker_3B) +CLASS_PREFIX(Lsun_2Fsecurity_2Fprovider_2Fcertpath_2FPKIX_3B) +CLASS_PREFIX(Lsun_2Fsecurity_2Fprovider_2Fcertpath_2FPKIXCertPathValidator_3B) +CLASS_PREFIX(Lsun_2Fsecurity_2Fprovider_2Fcertpath_2FPKIXMasterCertPathValidator_3B) +CLASS_PREFIX(Lsun_2Fsecurity_2Fprovider_2Fcertpath_2FPolicyChecker_3B) +CLASS_PREFIX(Lsun_2Fsecurity_2Futil_2FBitArray_3B) +CLASS_PREFIX(Lsun_2Fsecurity_2Futil_2FDerOutputStream_3B) +CLASS_PREFIX(Lsun_2Fsecurity_2Fx509_2FAVA_3B) +CLASS_PREFIX(Lsun_2Fsecurity_2Fx509_2FX500Name_3B) +CLASS_PREFIX(Lsun_2Fsecurity_2Fx509_2FAVAKeyword_3B) +CLASS_PREFIX(Lsun_2Fsecurity_2Fx509_2FAccessDescription_3B) +CLASS_PREFIX(Lsun_2Fsecurity_2Fx509_2FAlgorithmId_3B) +CLASS_PREFIX(Ljava_2Fsecurity_2Fcert_2FCRLReason_3B) +CLASS_PREFIX(Lsun_2Fsecurity_2Fx509_2FCRLReasonCodeExtension_3B) +CLASS_PREFIX(Lsun_2Fsecurity_2Fx509_2FDistributionPoint_3B) +CLASS_PREFIX(Lsun_2Fsecurity_2Fx509_2FExtendedKeyUsageExtension_3B) +CLASS_PREFIX(Lsun_2Fsecurity_2Fx509_2FInhibitAnyPolicyExtension_3B) +CLASS_PREFIX(Lsun_2Fsecurity_2Fx509_2FNetscapeCertTypeExtension_3B) +CLASS_PREFIX(Lsun_2Fsecurity_2Fx509_2FPKIXExtensions_3B) +CLASS_PREFIX(Lsun_2Fsecurity_2Fx509_2FOIDMap_3B) +CLASS_PREFIX(Lsun_2Fsecurity_2Fx509_2FX509CertInfo_3B) +CLASS_PREFIX(Lsun_2Futil_2Fcalendar_2FBaseCalendar_3B) +CLASS_PREFIX(Lsun_2Futil_2Flogging_2FLoggingSupport_3B) +CLASS_PREFIX(Lsun_2Futil_2Flogging_2FPlatformLogger_24Level_3B) +CLASS_PREFIX(Lsun_2Futil_2Flogging_2FPlatformLogger_3B) +CLASS_PREFIX(Ljava_2Fio_2FBufferedWriter_3B) +//CLASS_PREFIX(Lsun_2Fsecurity_2Fjca_2FProviders_3B) +//CLASS_PREFIX(Ljava_2Futil_2Fconcurrent_2FCompletableFuture_3B) +CLASS_PREFIX(Ljava_2Futil_2FScanner_3B) +CLASS_PREFIX(Ljava_2Futil_2Fstream_2FCollectors_3B) +CLASS_PREFIX(Ljava_2Futil_2Fstream_2FNodes_3B) +CLASS_PREFIX(Ljava_2Futil_2Fstream_2FReferencePipeline_3B) +CLASS_PREFIX(Ljava_2Futil_2FTaskQueue_3B) +CLASS_PREFIX(Ljava_2Futil_2FTimer_3B) +CLASS_PREFIX(Ljava_2Futil_2FTimerThread_3B) +//CLASS_PREFIX(Ljavax_2Fcrypto_2FJceSecurity_3B) +CLASS_PREFIX(Ljavax_2Fnet_2Fssl_2FSNIHostName_3B) +CLASS_PREFIX(Ljavax_2Fxml_2Fparsers_2FFilePathToURI_3B) +//CLASS_PREFIX(Llibcore_2Ficu_2FTimeZoneNames_3B) +CLASS_PREFIX(Llibcore_2Futil_2FHexEncoding_3B) +CLASS_PREFIX(Lorg_2Fapache_2Fharmony_2Fxml_2Fdom_2FCommentImpl_3B) +CLASS_PREFIX(Lorg_2Fapache_2Fharmony_2Fxml_2Fdom_2FDocumentImpl_3B) +CLASS_PREFIX(Lorg_2Fapache_2Fharmony_2Fxml_2Fdom_2FDocumentTypeImpl_3B) +CLASS_PREFIX(Lorg_2Fapache_2Fharmony_2Fxml_2Fdom_2FElementImpl_3B) +CLASS_PREFIX(Lorg_2Fapache_2Fharmony_2Fxml_2Fdom_2FTextImpl_3B) +CLASS_PREFIX(Lorg_2Fapache_2Fharmony_2Fxml_2Fparsers_2FDocumentBuilderImpl_3B) +CLASS_PREFIX(Lsun_2Fnet_2FNetProperties_3B) +CLASS_PREFIX(Lsun_2Fnet_2FProgressMonitor_3B) +CLASS_PREFIX(Lsun_2Fnet_2Fwww_2Fprotocol_2Ffile_2FFileURLConnection_3B) +CLASS_PREFIX(Lsun_2Fnio_2Ffs_2FNativeBuffer_3B) +CLASS_PREFIX(Lsun_2Fnio_2Ffs_2FNativeBuffers_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2Fcoll_2FCollationData_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2Fcoll_2FCollationDataReader_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2Fcoll_2FCollationFastLatin_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2Fcoll_2FCollationLoader_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2Fcoll_2FCollationRoot_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2Fcoll_2FCollationSettings_3B) +CLASS_PREFIX(Landroid_2Ficu_2Fimpl_2Fcoll_2FCollationTailoring_3B) +CLASS_PREFIX(Landroid_2Ficu_2Ftext_2FCollator_3B) +//CLASS_PREFIX(Landroid_2Ficu_2Ftext_2FDigitList_3B) +CLASS_PREFIX(Landroid_2Ficu_2Ftext_2FRuleBasedCollator_3B) +CLASS_PREFIX(Landroid_2Ficu_2Futil_2FBytesTrie_3B) +CLASS_PREFIX(Landroid_2Ficu_2Futil_2FTimeUnit_3B) +CLASS_PREFIX(Ldalvik_2Fsystem_2FPathClassLoader_3B) +CLASS_PREFIX(Ljava_2Ftext_2FMessageFormat_3B) +CLASS_PREFIX(Llibcore_2Fio_2FStreams_3B) +CLASS_PREFIX(Lsun_2Fnio_2Ffs_2FUnixNativeDispatcher_3B) +CLASS_PREFIX(Lsun_2Fnio_2Ffs_2FUnixPath_3B) +CLASS_PREFIX(Lsun_2Fnio_2Ffs_2FUtil_3B) +CLASS_PREFIX(Lsun_2Futil_2Fcalendar_2FGregorian_3B) +CLASS_PREFIX(Ljava_2Flang_2FInheritableThreadLocal_3B) +CLASS_PREFIX(Ljava_2Flang_2Fref_2FPhantomReference_3B) +CLASS_PREFIX(Ljava_2Fnio_2Fcharset_2FCharsetDecoderICU_3B) +CLASS_PREFIX(Ljava_2Fnio_2Ffile_2FFiles_3B) +CLASS_PREFIX(Ljava_2Futil_2FIdentityHashMap_3B) +CLASS_PREFIX(Ljava_2Futil_2FLinkedHashSet_3B) +CLASS_PREFIX(Ljava_2Futil_2FRegularEnumSet_3B) +//CLASS_PREFIX(Llibcore_2Futil_2FZoneInfoDB_3B) +//CLASS_PREFIX(Ljava_2Flang_2FBootClassLoader_3B) +CLASS_PREFIX(Ljava_2Flang_2Finvoke_2FMethodType_3B) +CLASS_PREFIX(Ldalvik_2Fsystem_2FEmulatedStackFrame_3B) +CLASS_PREFIX(Ljava_2Flang_2FStringFactory_3B) +CLASS_PREFIX(Ljava_2Flang_2FNumber_3B) +CLASS_PREFIX(Ljava_2Futil_2FHashMap_3B) diff --git a/src/mrt/maplert/linker/mapleArm32lld.so.lds b/src/mrt/maplert/linker/mapleArm32lld.so.lds new file mode 100644 index 0000000000..4231364886 --- /dev/null +++ b/src/mrt/maplert/linker/mapleArm32lld.so.lds @@ -0,0 +1,438 @@ +/* Script for -pie -z combreloc: position independent executable, combine & sort relocs */ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm", + "elf32-littlearm") +OUTPUT_ARCH(arm) +SEARCH_DIR("=/usr/arm-linux-gnueabihf/lib"); SEARCH_DIR("=/lib/arm-linux-gnueabihf"); SEARCH_DIR("=/usr/lib/arm-linux-gnueabihf"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib"); SEARCH_DIR("=/usr/local/arm-linux-gnueabihf/lib"); +PHDRS +{ + text PT_LOAD FILEHDR PHDRS; + data PT_LOAD; + dynamic PT_DYNAMIC; + note PT_NOTE; + gnu_eh_frame PT_GNU_EH_FRAME; + gnu_stack PT_GNU_STACK; + gnu_relro PT_GNU_RELRO; + /* headers PT_PHDR PHDRS; */ /* Reserve segment for lld */ + /* interp PT_INTERP; */ /* Reserve segment for lld */ + thread_local PT_TLS; /* Reserve segment for lld */ +} + +SECTIONS +{ + PROVIDE_HIDDEN(__maple_start__ = .); + /* Read-only sections, merged into text segment: */ + PROVIDE (__executable_start = SEGMENT_START("text-segment", 0)); . = SEGMENT_START("text-segment", 0) + SIZEOF_HEADERS; + .interp : { *(.interp) } + .note.gnu.build-id : + { + PROVIDE_HIDDEN (__linkerHashSo_begin = .); + KEEP (*(.note.gnu.build-id)) + PROVIDE_HIDDEN (__linkerHashSo_end = .); + } :text :note + .hash : { *(.hash) } :text + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + .rela.dyn : + { + *(.rela.init) + *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) + *(.rela.fini) + *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) + *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) + *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) + *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) + *(.rela.ctors) + *(.rela.dtors) + *(.rela.got) + *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) + *(.rela.ifunc) + } + .rela.plt : + { + *(.rela.plt) + PROVIDE_HIDDEN (__rela_iplt_start = .); + *(.rela.iplt) + PROVIDE_HIDDEN (__rela_iplt_end = .); + } + .init : + { + KEEP (*(.init)) + } =0 + .plt : ALIGN(16) { *(.plt) *(.iplt) } + .text : + { + *(.text.unlikely .text.*_unlikely .text.unlikely.*) + *(.text.exit .text.exit.*) + *(.text.startup .text.startup.*) + *(.text.hot .text.hot.*) + *(.text .stub .text.* .gnu.linkonce.t.*) + /* .gnu.warning sections are handled specially by elf32.em. */ + *(.gnu.warning) + } =0 + .java_text : + { + PROVIDE_HIDDEN (java_text_begin = .); + KEEP (*(.java_text)) + PROVIDE_HIDDEN (java_text_end = .); + } + .fini : + { + KEEP (*(.fini)) + } =0 + PROVIDE (__etext = .); + PROVIDE (_etext = .); + PROVIDE (etext = .); + .rodata : + { + *(.rodata .rodata.* .gnu.linkonce.r.*) + } + .rometadata : + { + mfile_rometadata_method_start = .; + KEEP (*(.rometadata.method)) + mfile_rometadata_method_end = .; + + mfile_rometadata_field_start = .; + KEEP (*(.rometadata.field)) + mfile_rometadata_field_end = .; + } + .romuidtab : + { + mfile_romuidtab_start = .; + KEEP (*(.__muid_ro_func_def_orig_tab)) + KEEP (*(.__muid_ro_data_def_orig_tab)) + /* madvised */ + KEEP (*(.__muid_ro_func_inf_tab)) + /* madvised */ + KEEP (*(.__muid_ro_func_def_muid_tab)) + /* madvised */ + KEEP (*(.__muid_ro_data_def_muid_tab)) + KEEP (*(.__muid_ro_func_undef_muid_tab)) + KEEP (*(.__muid_ro_data_undef_muid_tab)) + /* madvised */ + KEEP (*(.__muid_ro_func_muid_idx_tab)) + mfile_romuidtab_end = .; + } + .rodata1 : { *(.rodata1) } + .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) } :text :gnu_eh_frame + .eh_frame : ONLY_IF_RO + { + PROVIDE_HIDDEN ( __eh_frame_start = .); + KEEP (*(.eh_frame)) *(.eh_frame.*) + PROVIDE_HIDDEN ( __eh_frame_end = .); + } :text + .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table + .gcc_except_table.*) } + .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) } + /* These sections are generated by the Sun/Oracle C++ compiler. */ + .exception_ranges : ONLY_IF_RO { *(.exception_ranges + .exception_ranges*) } + + /* Adjust the address for the data segment. We want to adjust up to + the same address within the page on the next page up. */ + . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE)); + /* Exception handling */ + .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) } + .gnu_extab : ONLY_IF_RW { *(.gnu_extab) } + .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) } + .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) } + /* Thread Local Storage sections */ + .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) } :thread_local + .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) } :thread_local + .dynamic : { *(.dynamic) } :data :gnu_relro :dynamic + .preinit_array : + { + PROVIDE_HIDDEN (__preinit_array_start = .); + KEEP (*(.preinit_array)) + PROVIDE_HIDDEN (__preinit_array_end = .); + } + .init_array : + { + PROVIDE_HIDDEN (__init_array_start = .); + KEEP (*(.init_array.* .ctors.*)) + KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .ctors)) + PROVIDE_HIDDEN (__init_array_end = .); + } :data :gnu_relro + .fini_array : + { + PROVIDE_HIDDEN (__fini_array_start = .); + KEEP (*(.fini_array.* .dtors.*)) + KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .dtors)) + PROVIDE_HIDDEN (__fini_array_end = .); + } :data :gnu_relro + .ctors : + { + /* gcc uses crtbegin.o to find the start of + the constructors, so we make sure it is + first. Because this is a wildcard, it + doesn't matter if the user does not + actually link against crtbegin.o; the + linker won't look for a file to match a + wildcard. The wildcard also means that it + doesn't matter which directory crtbegin.o + is in. */ + KEEP (*crtbegin.o(.ctors)) + KEEP (*crtbegin?.o(.ctors)) + /* We don't want to include the .ctor section from + the crtend.o file until after the sorted ctors. + The .ctor section from the crtend file contains the + end of ctors marker and it must be last */ + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .ctors)) + KEEP (*(SORT(.ctors.*))) + KEEP (*(.ctors)) + } + .dtors : + { + KEEP (*crtbegin.o(.dtors)) + KEEP (*crtbegin?.o(.dtors)) + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .dtors)) + KEEP (*(SORT(.dtors.*))) + KEEP (*(.dtors)) + } + .jcr : { KEEP (*(.jcr)) } + .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) } + .got : { *(.got) *(.igot) } :data :gnu_relro + . = DATA_SEGMENT_RELRO_END (24, .); + .got.plt : { *(.got.plt) *(.igot.plt) } :data + .data : + { + PROVIDE_HIDDEN (__data_section_begin = .); + *(.data .data.* .gnu.linkonce.d.*) + SORT(CONSTRUCTORS) + PROVIDE_HIDDEN (__data_section_end = .); + } + .data1 : { *(.data1) } + _edata = .; PROVIDE (edata = .); + /* maple specific sections */ + . = ALIGN( 8 ); + .maple_global_variable : + { + KEEP (*(.maple_global_variable)) + } + + . = ALIGN( 8 ); + .maple_literal_string : + { + KEEP (*(.maple_literal_string)) + } + + . = ALIGN( 8 ); + .maple.gcrootsmap : + { + KEEP (*(.maple.gcrootsmap)) + } + + . = ALIGN( 8 ); + .reg_jni_tab : + { + PROVIDE_HIDDEN (__reg_jni_tab_begin = .); + KEEP (*(.reg_jni_tab)) + PROVIDE_HIDDEN (__reg_jni_tab_end = .); + } + + . = ALIGN( 8 ); + .reg_jni_func_tab : + { + PROVIDE_HIDDEN (__reg_jni_func_tab_begin = .); + KEEP (*(.reg_jni_func_tab)) + PROVIDE_HIDDEN (__reg_jni_func_tab_end = .); + } + + . = ALIGN( 8 ); + .bb_profile_strtab : + { + PROVIDE_HIDDEN (__bb_profile_strtab_begin = .); + KEEP (*(.__bb_profile_strtab)) + PROVIDE_HIDDEN (__bb_profile_strtab_end = .); + } + + . = ALIGN( 8 ); + .bb_profile_tab : + { + PROVIDE_HIDDEN (__bb_profile_tab_begin = .); + KEEP (*(.__bb_profile_tab)) + PROVIDE_HIDDEN (__bb_profile_tab_end = .); + } + + . = ALIGN( 8 ); + .muid_tab : + { + PROVIDE_HIDDEN (__muid_tab_start = .); + + PROVIDE_HIDDEN (__decouple_start__ = .); + KEEP (*(.__decouple)) + PROVIDE_HIDDEN (__decouple_end__ = .); + + PROVIDE_HIDDEN (__muid_range_tab_begin__ = .); + KEEP (*(.__muid_range_tab)) + PROVIDE_HIDDEN (__muid_range_tab_end__ = .); + + PROVIDE_HIDDEN (__muid_conststr_start__ = .); + KEEP (*(.__muid_conststr)) + PROVIDE_HIDDEN (__muid_conststr_end__ = .); + + KEEP (*(.__muid_func_def_tab)) + KEEP (*(.__muid_func_undef_tab)) + KEEP (*(.__muid_data_def_tab)) + KEEP (*(.__muid_data_undef_tab)) + KEEP (*(.__muid_itab)) + KEEP (*(.__muid_vtab)) + KEEP (*(.__muid_vtab_offset_tab)) + KEEP (*(.__muid_field_offset_tab)) + KEEP (*(.__muid_superclass)) + KEEP (*(.__muid_offset_value_table)) + KEEP (*(.__muid_local_classinfo_tab)) + + PROVIDE_HIDDEN (__muid_tab_end = .); + } + + . = ALIGN( 8 ); + .mpl_version : + { + PROVIDE_HIDDEN (__compilerVersionNumTab_begin__ = .); + KEEP (*(.__compilerVersionNumTab)) + PROVIDE_HIDDEN (__compilerVersionNumTab_end__ = .); + + KEEP (*(.__sourceMuidTab)) + + PROVIDE_HIDDEN (__compiler_mfile_status_begin__ = .); + KEEP (*(.__compiler_mfile_status)) + PROVIDE_HIDDEN (__compiler_mfile_status_end__ = .); + } + + . = ALIGN( 8 ); + .profile_tab : + { + KEEP (*(.__profile_func_tab)) + } + + . = ALIGN( 8 ); + .classmetadata : + { + KEEP (*(.__muid_classmetadata)) + } + + . = ALIGN( 8 ); + .classmetadata_bucket : + { + PROVIDE_HIDDEN (__muid_classmetadata_bucket_begin = .); + *(.__muid_classmetadata_bucket) + PROVIDE_HIDDEN (__muid_classmetadata_bucket_end = .); + } + + .refl_strtab : + { + . = ALIGN( 8 ); + PROVIDE_HIDDEN(__reflection_strtab_start__ = .); + KEEP (*(.reflection_strtab)) + PROVIDE_HIDDEN(__reflection_strtab_end__ = .); + } + + .refl_strtab.hot : + { + . = ALIGN( 8 ); + PROVIDE_HIDDEN(__reflection_start_hot_strtab_start__ = .); + KEEP (*(.reflection_start_hot_strtab)) + PROVIDE_HIDDEN(__reflection_start_hot_strtab_end__ = .); + + . = ALIGN( 8 ); + PROVIDE_HIDDEN(__reflection_both_hot_strtab_start__ = .); + KEEP (*(.reflection_both_hot_strtab)) + PROVIDE_HIDDEN(__reflection_both_hot_strtab_end__ = .); + + . = ALIGN( 8 ); + PROVIDE_HIDDEN(__reflection_run_hot_strtab_start__ = .); + KEEP (*(.reflection_run_hot_strtab)) + PROVIDE_HIDDEN(__reflection_run_hot_strtab_end__ = .); + } + + . = ALIGN( 4096 ); + __bss_start = . ; __bss_start__ = . ; + .bss : + { + PROVIDE_HIDDEN(__bss_section_begin = .); + *(.dynbss) + *(.bss .bss.* .gnu.linkonce.b.*) + *(COMMON) + /* Align here to ensure that the .bss section occupies space up to + _end. Align after .bss to ensure correct alignment even if the + .bss section disappears because there are no input sections. + FIXME: Why do we need it? When there is no .bss section, we don't + pad the .data section. */ + . = ALIGN(. != 0 ? 64 / 8 : 1); + PROVIDE_HIDDEN(__bss_section_end = .); + } + _bss_end__ = . ; __bss_end__ = . ; + + . = ALIGN(64 / 8); + . = SEGMENT_START("ldata-segment", .); + . = ALIGN(64 / 8); + __end__ = .; + PROVIDE_HIDDEN(__maple_end__ = .); + _end = .; PROVIDE (end = .); + . = DATA_SEGMENT_END (.); + /* Stabs debugging sections. */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } + /* DWARF debug sections. + Symbols in the DWARF debugging sections are relative to the beginning + of the section so we begin them at 0. */ + /* DWARF 1 */ + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + /* GNU DWARF 1 extensions */ + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + /* DWARF 1.1 and DWARF 2 */ + .debug_aranges 0 : { *(.debug_aranges) } + .maple_java_debug_aranges 0 : { KEEP(*(.maple_java_debug_aranges)) } + .debug_pubnames 0 : { *(.debug_pubnames) } + /* DWARF 2 */ + .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } + .maple_java_debug_info 0 : { KEEP(*(.maple_java_debug_info)) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .maple_java_debug_abbrev 0 : { KEEP(*(.maple_java_debug_abbrev)) } + .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) } + .maple_java_debug_line 0 : { KEEP(*(.maple_java_debug_line)) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } + /* SGI/MIPS DWARF 2 extensions */ + .debug_weaknames 0 : { *(.debug_weaknames) } + .debug_funcnames 0 : { *(.debug_funcnames) } + .debug_typenames 0 : { *(.debug_typenames) } + .debug_varnames 0 : { *(.debug_varnames) } + /* DWARF 3 */ + .debug_pubtypes 0 : { *(.debug_pubtypes) } + .debug_ranges 0 : { *(.debug_ranges) } + .maple_java_debug_ranges 0 : { KEEP(*(.maple_java_debug_ranges)) } + /* DWARF Extension. */ + .debug_macro 0 : { *(.debug_macro) } + .ARM.attributes 0 : { KEEP (*(.ARM.attributes)) KEEP (*(.gnu.attributes)) } + .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) } + /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) } +} diff --git a/src/mrt/maplert/linker/mapleld.so.lds b/src/mrt/maplert/linker/mapleld.so.lds new file mode 100644 index 0000000000..1e67683722 --- /dev/null +++ b/src/mrt/maplert/linker/mapleld.so.lds @@ -0,0 +1,409 @@ +/* Script for -pie -z combreloc: position independent executable, combine & sort relocs */ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +OUTPUT_FORMAT("elf64-littleaarch64", "elf64-bigaarch64", + "elf64-littleaarch64") +OUTPUT_ARCH(aarch64) +SEARCH_DIR("=/usr/local/lib/aarch64-linux-gnu"); SEARCH_DIR("=/lib/aarch64-linux-gnu"); SEARCH_DIR("=/usr/lib/aarch64-linux-gnu"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib"); SEARCH_DIR("=/usr/aarch64-linux-gnu/lib"); +SECTIONS +{ + PROVIDE_HIDDEN(__maple_start__ = .); + /* Read-only sections, merged into text segment: */ + PROVIDE (__executable_start = SEGMENT_START("text-segment", 0)); . = SEGMENT_START("text-segment", 0) + SIZEOF_HEADERS; + .interp : { *(.interp) } + .note.gnu.build-id : + { + PROVIDE_HIDDEN (__linkerHashSo_begin = .); + KEEP (*(.note.gnu.build-id)) + PROVIDE_HIDDEN (__linkerHashSo_end = .); + } + .hash : { *(.hash) } + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + .rela.dyn : + { + *(.rela.init) + *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) + *(.rela.fini) + *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) + *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) + *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) + *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) + *(.rela.ctors) + *(.rela.dtors) + *(.rela.got) + *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) + *(.rela.ifunc) + } + .rela.plt : + { + *(.rela.plt) + PROVIDE_HIDDEN (__rela_iplt_start = .); + *(.rela.iplt) + PROVIDE_HIDDEN (__rela_iplt_end = .); + } + .init : + { + KEEP (*(.init)) + } =0 + .plt : ALIGN(16) { *(.plt) *(.iplt) } + .text : + { + *(.text.unlikely .text.*_unlikely .text.unlikely.*) + *(.text.exit .text.exit.*) + *(.text.startup .text.startup.*) + *(.text.hot .text.hot.*) + *(.text .stub .text.* .gnu.linkonce.t.*) + /* .gnu.warning sections are handled specially by elf32.em. */ + *(.gnu.warning) + } =0 + .java_text : + { + PROVIDE_HIDDEN (java_text_begin = .); + KEEP (*(.java_text)) + PROVIDE_HIDDEN (java_text_end = .); + } + .fini : + { + KEEP (*(.fini)) + } =0 + PROVIDE (__etext = .); + PROVIDE (_etext = .); + PROVIDE (etext = .); + .rodata : + { + *(.rodata .rodata.* .gnu.linkonce.r.*) + } + .rometadata : + { + mfile_rometadata_method_start = .; + KEEP (*(.rometadata.method)) + mfile_rometadata_method_end = .; + + mfile_rometadata_field_start = .; + KEEP (*(.rometadata.field)) + mfile_rometadata_field_end = .; + } + .romuidtab : + { + mfile_romuidtab_start = .; + KEEP (*(.__muid_ro_func_def_orig_tab)) + KEEP (*(.__muid_ro_data_def_orig_tab)) + /* madvised */ + KEEP (*(.__muid_ro_func_inf_tab)) + /* madvised */ + KEEP (*(.__muid_ro_func_def_muid_tab)) + /* madvised */ + KEEP (*(.__muid_ro_data_def_muid_tab)) + KEEP (*(.__muid_ro_func_undef_muid_tab)) + KEEP (*(.__muid_ro_data_undef_muid_tab)) + /* madvised */ + KEEP (*(.__muid_ro_func_muid_idx_tab)) + mfile_romuidtab_end = .; + } + .rodata1 : { *(.rodata1) } + .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) } + .eh_frame : ONLY_IF_RO + { + PROVIDE_HIDDEN ( __eh_frame_start = .); + KEEP (*(.eh_frame)) *(.eh_frame.*) + PROVIDE_HIDDEN ( __eh_frame_end = .); + } + .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table + .gcc_except_table.*) } + .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) } + /* These sections are generated by the Sun/Oracle C++ compiler. */ + .exception_ranges : ONLY_IF_RO { *(.exception_ranges + .exception_ranges*) } + /* Adjust the address for the data segment. We want to adjust up to + the same address within the page on the next page up. */ + . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE)); + /* Exception handling */ + .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) } + .gnu_extab : ONLY_IF_RW { *(.gnu_extab) } + .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) } + .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) } + /* Thread Local Storage sections */ + .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) } + .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) } + .preinit_array : + { + PROVIDE_HIDDEN (__preinit_array_start = .); + KEEP (*(.preinit_array)) + PROVIDE_HIDDEN (__preinit_array_end = .); + } + .init_array : + { + PROVIDE_HIDDEN (__init_array_start = .); + KEEP (*(.init_array.* .ctors.*)) + KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .ctors)) + PROVIDE_HIDDEN (__init_array_end = .); + } + .fini_array : + { + PROVIDE_HIDDEN (__fini_array_start = .); + KEEP (*(.fini_array.* .dtors.*)) + KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .dtors)) + PROVIDE_HIDDEN (__fini_array_end = .); + } + .ctors : + { + /* gcc uses crtbegin.o to find the start of + the constructors, so we make sure it is + first. Because this is a wildcard, it + doesn't matter if the user does not + actually link against crtbegin.o; the + linker won't look for a file to match a + wildcard. The wildcard also means that it + doesn't matter which directory crtbegin.o + is in. */ + KEEP (*crtbegin.o(.ctors)) + KEEP (*crtbegin?.o(.ctors)) + /* We don't want to include the .ctor section from + the crtend.o file until after the sorted ctors. + The .ctor section from the crtend file contains the + end of ctors marker and it must be last */ + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .ctors)) + KEEP (*(SORT(.ctors.*))) + KEEP (*(.ctors)) + } + .dtors : + { + KEEP (*crtbegin.o(.dtors)) + KEEP (*crtbegin?.o(.dtors)) + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .dtors)) + KEEP (*(SORT(.dtors.*))) + KEEP (*(.dtors)) + } + .jcr : { KEEP (*(.jcr)) } + .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) } + .dynamic : { *(.dynamic) } + .got : { *(.got) *(.igot) } + . = DATA_SEGMENT_RELRO_END (24, .); + .got.plt : { *(.got.plt) *(.igot.plt) } + .data : + { + PROVIDE_HIDDEN (__data_section_begin = .); + *(.data .data.* .gnu.linkonce.d.*) + SORT(CONSTRUCTORS) + PROVIDE_HIDDEN (__data_section_end = .); + } + .data1 : { *(.data1) } + _edata = .; PROVIDE (edata = .); + /* maple specific sections */ + . = ALIGN( 8 ); + .maple.gcrootsmap : + { + KEEP (*(.maple.gcrootsmap)) + } + + . = ALIGN( 8 ); + .reg_jni_tab : + { + PROVIDE_HIDDEN (__reg_jni_tab_begin = .); + KEEP (*(.reg_jni_tab)) + PROVIDE_HIDDEN (__reg_jni_tab_end = .); + } + + . = ALIGN( 8 ); + .reg_jni_func_tab : + { + PROVIDE_HIDDEN (__reg_jni_func_tab_begin = .); + KEEP (*(.reg_jni_func_tab)) + PROVIDE_HIDDEN (__reg_jni_func_tab_end = .); + } + + . = ALIGN( 8 ); + .bb_profile_strtab : + { + PROVIDE_HIDDEN (__bb_profile_strtab_begin = .); + KEEP (*(.__bb_profile_strtab)) + PROVIDE_HIDDEN (__bb_profile_strtab_end = .); + } + + . = ALIGN( 8 ); + .bb_profile_tab : + { + PROVIDE_HIDDEN (__bb_profile_tab_begin = .); + KEEP (*(.__bb_profile_tab)) + PROVIDE_HIDDEN (__bb_profile_tab_end = .); + } + + . = ALIGN( 8 ); + .muid_tab : + { + PROVIDE_HIDDEN (__muid_tab_start = .); + + PROVIDE_HIDDEN (__decouple_start__ = .); + KEEP (*(.__decouple)) + PROVIDE_HIDDEN (__decouple_end__ = .); + + PROVIDE_HIDDEN (__muid_range_tab_begin__ = .); + KEEP (*(.__muid_range_tab)) + PROVIDE_HIDDEN (__muid_range_tab_end__ = .); + + PROVIDE_HIDDEN (__muid_conststr_start__ = .); + KEEP (*(.__muid_conststr)) + PROVIDE_HIDDEN (__muid_conststr_end__ = .); + + KEEP (*(.__muid_func_def_tab)) + KEEP (*(.__muid_func_undef_tab)) + KEEP (*(.__muid_data_def_tab)) + KEEP (*(.__muid_data_undef_tab)) + KEEP (*(.__muid_itab)) + KEEP (*(.__muid_vtab)) + KEEP (*(.__muid_vtab_offset_tab)) + KEEP (*(.__muid_field_offset_tab)) + KEEP (*(.__muid_superclass)) + + PROVIDE_HIDDEN (__muid_tab_end = .); + } + + . = ALIGN( 8 ); + .mpl_version : + { + PROVIDE_HIDDEN (__compilerVersionNumTab_begin__ = .); + KEEP (*(.__compilerVersionNumTab)) + PROVIDE_HIDDEN (__compilerVersionNumTab_end__ = .); + + KEEP (*(.__sourceMuidTab)) + + PROVIDE_HIDDEN (__compiler_mfile_status_begin__ = .); + KEEP (*(.__compiler_mfile_status)) + PROVIDE_HIDDEN (__compiler_mfile_status_end__ = .); + } + + . = ALIGN( 8 ); + .profile_tab : + { + KEEP (*(.__profile_func_tab)) + } + + . = ALIGN( 8 ); + .classmetadata : + { + KEEP (*(.__muid_classmetadata)) + } + + . = ALIGN( 8 ); + .classmetadata_bucket : + { + PROVIDE_HIDDEN (__muid_classmetadata_bucket_begin = .); + *(.__muid_classmetadata_bucket) + PROVIDE_HIDDEN (__muid_classmetadata_bucket_end = .); + } + + .refl_strtab : + { + . = ALIGN( 8 ); + PROVIDE_HIDDEN(__reflection_strtab_start__ = .); + KEEP (*(.reflection_strtab)) + PROVIDE_HIDDEN(__reflection_strtab_end__ = .); + } + + .refl_strtab.hot : + { + . = ALIGN( 8 ); + PROVIDE_HIDDEN(__reflection_start_hot_strtab_start__ = .); + KEEP (*(.reflection_start_hot_strtab)) + PROVIDE_HIDDEN(__reflection_start_hot_strtab_end__ = .); + + . = ALIGN( 8 ); + PROVIDE_HIDDEN(__reflection_both_hot_strtab_start__ = .); + KEEP (*(.reflection_both_hot_strtab)) + PROVIDE_HIDDEN(__reflection_both_hot_strtab_end__ = .); + + . = ALIGN( 8 ); + PROVIDE_HIDDEN(__reflection_run_hot_strtab_start__ = .); + KEEP (*(.reflection_run_hot_strtab)) + PROVIDE_HIDDEN(__reflection_run_hot_strtab_end__ = .); + } + + . = ALIGN( 4096 ); + __bss_start = . ; __bss_start__ = . ; + .bss : + { + PROVIDE_HIDDEN(__bss_section_begin = .); + *(.dynbss) + *(.bss .bss.* .gnu.linkonce.b.*) + *(COMMON) + /* Align here to ensure that the .bss section occupies space up to + _end. Align after .bss to ensure correct alignment even if the + .bss section disappears because there are no input sections. + FIXME: Why do we need it? When there is no .bss section, we don't + pad the .data section. */ + . = ALIGN(. != 0 ? 64 / 8 : 1); + PROVIDE_HIDDEN(__bss_section_end = .); + } + _bss_end__ = . ; __bss_end__ = . ; + + . = ALIGN(64 / 8); + . = SEGMENT_START("ldata-segment", .); + . = ALIGN(64 / 8); + __end__ = .; + PROVIDE_HIDDEN(__maple_end__ = .); + _end = .; PROVIDE (end = .); + . = DATA_SEGMENT_END (.); + /* Stabs debugging sections. */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } + /* DWARF debug sections. + Symbols in the DWARF debugging sections are relative to the beginning + of the section so we begin them at 0. */ + /* DWARF 1 */ + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + /* GNU DWARF 1 extensions */ + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + /* DWARF 1.1 and DWARF 2 */ + .debug_aranges 0 : { *(.debug_aranges) } + .maple_java_debug_aranges 0 : { KEEP(*(.maple_java_debug_aranges)) } + .debug_pubnames 0 : { *(.debug_pubnames) } + /* DWARF 2 */ + .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } + .maple_java_debug_info 0 : { KEEP(*(.maple_java_debug_info)) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .maple_java_debug_abbrev 0 : { KEEP(*(.maple_java_debug_abbrev)) } + .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) } + .maple_java_debug_line 0 : { KEEP(*(.maple_java_debug_line)) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } + /* SGI/MIPS DWARF 2 extensions */ + .debug_weaknames 0 : { *(.debug_weaknames) } + .debug_funcnames 0 : { *(.debug_funcnames) } + .debug_typenames 0 : { *(.debug_typenames) } + .debug_varnames 0 : { *(.debug_varnames) } + /* DWARF 3 */ + .debug_pubtypes 0 : { *(.debug_pubtypes) } + .debug_ranges 0 : { *(.debug_ranges) } + .maple_java_debug_ranges 0 : { KEEP(*(.maple_java_debug_ranges)) } + /* DWARF Extension. */ + .debug_macro 0 : { *(.debug_macro) } + .ARM.attributes 0 : { KEEP (*(.ARM.attributes)) KEEP (*(.gnu.attributes)) } + .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) } + /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) } +} diff --git a/src/mrt/maplert/linker/maplelld.so.lds b/src/mrt/maplert/linker/maplelld.so.lds new file mode 100644 index 0000000000..57dd331c50 --- /dev/null +++ b/src/mrt/maplert/linker/maplelld.so.lds @@ -0,0 +1,450 @@ +/* Script for -pie -z combreloc: position independent executable, combine & sort relocs */ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +OUTPUT_FORMAT("elf64-littleaarch64", "elf64-bigaarch64", + "elf64-littleaarch64") +OUTPUT_ARCH(aarch64) +SEARCH_DIR("=/usr/local/lib/aarch64-linux-gnu"); SEARCH_DIR("=/lib/aarch64-linux-gnu"); SEARCH_DIR("=/usr/lib/aarch64-linux-gnu"); SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib"); SEARCH_DIR("=/usr/aarch64-linux-gnu/lib"); +PHDRS +{ + text PT_LOAD FILEHDR PHDRS; + data PT_LOAD; + dynamic PT_DYNAMIC; + note PT_NOTE; + gnu_eh_frame PT_GNU_EH_FRAME; + gnu_stack PT_GNU_STACK; + gnu_relro PT_GNU_RELRO; + /* headers PT_PHDR PHDRS; */ /* Reserve segment for lld */ + /* interp PT_INTERP; */ /* Reserve segment for lld */ + thread_local PT_TLS; /* Reserve segment for lld */ +} + +SECTIONS +{ + PROVIDE_HIDDEN(__maple_start__ = .); + /* Read-only sections, merged into text segment: */ + PROVIDE (__executable_start = SEGMENT_START("text-segment", 0)); . = SEGMENT_START("text-segment", 0) + SIZEOF_HEADERS; + .interp : { *(.interp) } + .note.gnu.build-id : + { + PROVIDE_HIDDEN (__linkerHashSo_begin = .); + KEEP (*(.note.gnu.build-id)) + PROVIDE_HIDDEN (__linkerHashSo_end = .); + } :text :note + .hash : { *(.hash) } :text + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + .rela.dyn : + { + *(.rela.init) + *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) + *(.rela.fini) + *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) + *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) + *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) + *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) + *(.rela.ctors) + *(.rela.dtors) + *(.rela.got) + *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) + *(.rela.ifunc) + } + .rela.plt : + { + *(.rela.plt) + PROVIDE_HIDDEN (__rela_iplt_start = .); + *(.rela.iplt) + PROVIDE_HIDDEN (__rela_iplt_end = .); + } + .init : + { + KEEP (*(.init)) + } =0 + .plt : ALIGN(16) { *(.plt) *(.iplt) } + .text : + { + *(.text.unlikely .text.*_unlikely .text.unlikely.*) + *(.text.exit .text.exit.*) + *(.text.startup .text.startup.*) + *(.text.hot .text.hot.*) + *(.text .stub .text.* .gnu.linkonce.t.*) + /* .gnu.warning sections are handled specially by elf32.em. */ + *(.gnu.warning) + } =0 + .java_text : + { + PROVIDE_HIDDEN (java_text_begin = .); + KEEP (*(.java_text)) + PROVIDE_HIDDEN (java_text_end = .); + } + .fini : + { + KEEP (*(.fini)) + } =0 + PROVIDE (__etext = .); + PROVIDE (_etext = .); + PROVIDE (etext = .); + .rodata : + { + *(.rodata .rodata.* .gnu.linkonce.r.*) + } + .rometadata : + { + mfile_rometadata_method_start = .; + KEEP (*(.rometadata.method)) + mfile_rometadata_method_end = .; + + mfile_rometadata_field_start = .; + KEEP (*(.rometadata.field)) + mfile_rometadata_field_end = .; + } + .romuidtab : + { + mfile_romuidtab_start = .; + KEEP (*(.__muid_ro_func_def_orig_tab)) + KEEP (*(.__muid_ro_data_def_orig_tab)) + /* madvised */ + KEEP (*(.__muid_ro_func_inf_tab)) + /* madvised */ + KEEP (*(.__muid_ro_func_def_muid_tab)) + /* madvised */ + KEEP (*(.__muid_ro_data_def_muid_tab)) + KEEP (*(.__muid_ro_func_undef_muid_tab)) + KEEP (*(.__muid_ro_data_undef_muid_tab)) + /* madvised */ + KEEP (*(.__muid_ro_func_muid_idx_tab)) + mfile_romuidtab_end = .; + } + .rodata1 : { *(.rodata1) } + .eh_frame_hdr : { *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) } :text :gnu_eh_frame + .eh_frame : ONLY_IF_RO + { + PROVIDE_HIDDEN ( __eh_frame_start = .); + KEEP (*(.eh_frame)) *(.eh_frame.*) + PROVIDE_HIDDEN ( __eh_frame_end = .); + } :text + .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table + .gcc_except_table.*) } + .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) } + /* These sections are generated by the Sun/Oracle C++ compiler. */ + .exception_ranges : ONLY_IF_RO { *(.exception_ranges + .exception_ranges*) } + + /* Adjust the address for the data segment. We want to adjust up to + the same address within the page on the next page up. */ + . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE)); + /* Exception handling */ + .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) } + .gnu_extab : ONLY_IF_RW { *(.gnu_extab) } + .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) } + .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) } + /* Thread Local Storage sections */ + .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) } :thread_local + .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) } :thread_local + .dynamic : { *(.dynamic) } :data :gnu_relro :dynamic + .preinit_array : + { + PROVIDE_HIDDEN (__preinit_array_start = .); + KEEP (*(.preinit_array)) + PROVIDE_HIDDEN (__preinit_array_end = .); + } + .init_array : + { + PROVIDE_HIDDEN (__init_array_start = .); + KEEP (*(.init_array.* .ctors.*)) + KEEP (*(.init_array EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .ctors)) + PROVIDE_HIDDEN (__init_array_end = .); + } :data :gnu_relro + .fini_array : + { + PROVIDE_HIDDEN (__fini_array_start = .); + KEEP (*(.fini_array.* .dtors.*)) + KEEP (*(.fini_array EXCLUDE_FILE (*crtbegin.o *crtbegin?.o *crtend.o *crtend?.o ) .dtors)) + PROVIDE_HIDDEN (__fini_array_end = .); + } :data :gnu_relro + .ctors : + { + /* gcc uses crtbegin.o to find the start of + the constructors, so we make sure it is + first. Because this is a wildcard, it + doesn't matter if the user does not + actually link against crtbegin.o; the + linker won't look for a file to match a + wildcard. The wildcard also means that it + doesn't matter which directory crtbegin.o + is in. */ + KEEP (*crtbegin.o(.ctors)) + KEEP (*crtbegin?.o(.ctors)) + /* We don't want to include the .ctor section from + the crtend.o file until after the sorted ctors. + The .ctor section from the crtend file contains the + end of ctors marker and it must be last */ + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .ctors)) + KEEP (*(SORT(.ctors.*))) + KEEP (*(.ctors)) + } + .dtors : + { + KEEP (*crtbegin.o(.dtors)) + KEEP (*crtbegin?.o(.dtors)) + KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .dtors)) + KEEP (*(SORT(.dtors.*))) + KEEP (*(.dtors)) + } + .jcr : { KEEP (*(.jcr)) } + .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro .data.rel.ro.* .gnu.linkonce.d.rel.ro.*) } + .got : { *(.got) *(.igot) } :data :gnu_relro + . = DATA_SEGMENT_RELRO_END (24, .); + .got.plt : { *(.got.plt) *(.igot.plt) } :data + .data : + { + PROVIDE_HIDDEN (__data_section_begin = .); + *(.data .data.* .gnu.linkonce.d.*) + SORT(CONSTRUCTORS) + PROVIDE_HIDDEN (__data_section_end = .); + } + .data1 : { *(.data1) } + _edata = .; PROVIDE (edata = .); + /* maple specific sections */ + . = ALIGN( 8 ); + .maple_global_variable : + { + KEEP (*(.maple_global_variable)) + } + + . = ALIGN( 8 ); + .maple_literal_string : + { + KEEP (*(.maple_literal_string)) + } + + . = ALIGN( 8 ); + .maple.gcrootsmap : + { + KEEP (*(.maple.gcrootsmap)) + } + + . = ALIGN( 8 ); + .reg_jni_tab : + { + PROVIDE_HIDDEN (__reg_jni_tab_begin = .); + KEEP (*(.reg_jni_tab)) + PROVIDE_HIDDEN (__reg_jni_tab_end = .); + } + + . = ALIGN( 8 ); + .reg_jni_func_tab : + { + PROVIDE_HIDDEN (__reg_jni_func_tab_begin = .); + KEEP (*(.reg_jni_func_tab)) + PROVIDE_HIDDEN (__reg_jni_func_tab_end = .); + } + + . = ALIGN( 8 ); + .bb_profile_strtab : + { + PROVIDE_HIDDEN (__bb_profile_strtab_begin = .); + KEEP (*(.__bb_profile_strtab)) + PROVIDE_HIDDEN (__bb_profile_strtab_end = .); + } + + . = ALIGN( 8 ); + .bb_profile_tab : + { + PROVIDE_HIDDEN (__bb_profile_tab_begin = .); + KEEP (*(.__bb_profile_tab)) + PROVIDE_HIDDEN (__bb_profile_tab_end = .); + } + + . = ALIGN( 8 ); + .muid_tab : + { + PROVIDE_HIDDEN (__muid_tab_start = .); + + PROVIDE_HIDDEN (__decouple_start__ = .); + KEEP (*(.__decouple)) + PROVIDE_HIDDEN (__decouple_end__ = .); + + PROVIDE_HIDDEN (__muid_range_tab_begin__ = .); + KEEP (*(.__muid_range_tab)) + PROVIDE_HIDDEN (__muid_range_tab_end__ = .); + + PROVIDE_HIDDEN (__muid_conststr_start__ = .); + KEEP (*(.__muid_conststr)) + PROVIDE_HIDDEN (__muid_conststr_end__ = .); + + KEEP (*(.__muid_func_def_tab)) + KEEP (*(.__muid_func_undef_tab)) + KEEP (*(.__muid_data_def_tab)) + KEEP (*(.__muid_data_undef_tab)) + KEEP (*(.__muid_vtab_offset_tab)) + KEEP (*(.__muid_field_offset_tab)) + KEEP (*(.__muid_superclass)) + KEEP (*(.__muid_offset_value_table)) + KEEP (*(.__muid_local_classinfo_tab)) + + PROVIDE_HIDDEN (__muid_tab_end = .); + } + + . = ALIGN( 8 ); + .muid_itab_conflict : + { + KEEP (*(.__muid_itab_conflict)) + KEEP (*(.__muid_cold_itab_conflict)) + } + + . = ALIGN( 8 ); + .muid_vtab_and_itab : + { + KEEP (*(.__muid_vtab_and_itab)) + KEEP (*(.__muid_cold_vtab_and_itab)) + } + + . = ALIGN( 8 ); + .mpl_version : + { + PROVIDE_HIDDEN (__compilerVersionNumTab_begin__ = .); + KEEP (*(.__compilerVersionNumTab)) + PROVIDE_HIDDEN (__compilerVersionNumTab_end__ = .); + + KEEP (*(.__sourceMuidTab)) + + PROVIDE_HIDDEN (__compiler_mfile_status_begin__ = .); + KEEP (*(.__compiler_mfile_status)) + PROVIDE_HIDDEN (__compiler_mfile_status_end__ = .); + } + + . = ALIGN( 8 ); + .profile_tab : + { + KEEP (*(.__profile_func_tab)) + } + + . = ALIGN( 8 ); + .classmetadata : + { + KEEP (*(.__muid_classmetadata)) + } + + . = ALIGN( 8 ); + .classmetadata_bucket : + { + PROVIDE_HIDDEN (__muid_classmetadata_bucket_begin = .); + *(.__muid_classmetadata_bucket) + PROVIDE_HIDDEN (__muid_classmetadata_bucket_end = .); + } + + .refl_strtab : + { + . = ALIGN( 8 ); + PROVIDE_HIDDEN(__reflection_strtab_start__ = .); + KEEP (*(.reflection_strtab)) + PROVIDE_HIDDEN(__reflection_strtab_end__ = .); + } + + .refl_strtab.hot : + { + . = ALIGN( 8 ); + PROVIDE_HIDDEN(__reflection_start_hot_strtab_start__ = .); + KEEP (*(.reflection_start_hot_strtab)) + PROVIDE_HIDDEN(__reflection_start_hot_strtab_end__ = .); + + . = ALIGN( 8 ); + PROVIDE_HIDDEN(__reflection_both_hot_strtab_start__ = .); + KEEP (*(.reflection_both_hot_strtab)) + PROVIDE_HIDDEN(__reflection_both_hot_strtab_end__ = .); + + . = ALIGN( 8 ); + PROVIDE_HIDDEN(__reflection_run_hot_strtab_start__ = .); + KEEP (*(.reflection_run_hot_strtab)) + PROVIDE_HIDDEN(__reflection_run_hot_strtab_end__ = .); + } + + . = ALIGN( 4096 ); + __bss_start = . ; __bss_start__ = . ; + .bss : + { + PROVIDE_HIDDEN(__bss_section_begin = .); + *(.dynbss) + *(.bss .bss.* .gnu.linkonce.b.*) + *(COMMON) + /* Align here to ensure that the .bss section occupies space up to + _end. Align after .bss to ensure correct alignment even if the + .bss section disappears because there are no input sections. + FIXME: Why do we need it? When there is no .bss section, we don't + pad the .data section. */ + . = ALIGN(. != 0 ? 64 / 8 : 1); + PROVIDE_HIDDEN(__bss_section_end = .); + } + _bss_end__ = . ; __bss_end__ = . ; + + . = ALIGN(64 / 8); + . = SEGMENT_START("ldata-segment", .); + . = ALIGN(64 / 8); + __end__ = .; + PROVIDE_HIDDEN(__maple_end__ = .); + _end = .; PROVIDE (end = .); + . = DATA_SEGMENT_END (.); + /* Stabs debugging sections. */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } + /* DWARF debug sections. + Symbols in the DWARF debugging sections are relative to the beginning + of the section so we begin them at 0. */ + /* DWARF 1 */ + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + /* GNU DWARF 1 extensions */ + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + /* DWARF 1.1 and DWARF 2 */ + .debug_aranges 0 : { *(.debug_aranges) } + .maple_java_debug_aranges 0 : { KEEP(*(.maple_java_debug_aranges)) } + .debug_pubnames 0 : { *(.debug_pubnames) } + /* DWARF 2 */ + .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } + .maple_java_debug_info 0 : { KEEP(*(.maple_java_debug_info)) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .maple_java_debug_abbrev 0 : { KEEP(*(.maple_java_debug_abbrev)) } + .debug_line 0 : { *(.debug_line .debug_line.* .debug_line_end ) } + .maple_java_debug_line 0 : { KEEP(*(.maple_java_debug_line)) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } + /* SGI/MIPS DWARF 2 extensions */ + .debug_weaknames 0 : { *(.debug_weaknames) } + .debug_funcnames 0 : { *(.debug_funcnames) } + .debug_typenames 0 : { *(.debug_typenames) } + .debug_varnames 0 : { *(.debug_varnames) } + /* DWARF 3 */ + .debug_pubtypes 0 : { *(.debug_pubtypes) } + .debug_ranges 0 : { *(.debug_ranges) } + .maple_java_debug_ranges 0 : { KEEP(*(.maple_java_debug_ranges)) } + /* DWARF Extension. */ + .debug_macro 0 : { *(.debug_macro) } + .ARM.attributes 0 : { KEEP (*(.ARM.attributes)) KEEP (*(.gnu.attributes)) } + .note.gnu.arm.ident 0 : { KEEP (*(.note.gnu.arm.ident)) } + /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) } +} diff --git a/src/mrt/maplert/linker/unified.macros.whitelist b/src/mrt/maplert/linker/unified.macros.whitelist new file mode 100644 index 0000000000..fcf6929731 --- /dev/null +++ b/src/mrt/maplert/linker/unified.macros.whitelist @@ -0,0 +1,40 @@ +Ljava_2Flang_2FString_24CaseInsensitiveComparator +Ljava_2Futil_2FSpliterators_24DoubleArraySpliterator +Ljava_2Flang_2FClass +Ljava_2Flang_2FString +Ljava_2Flang_2FObject +Ljava_2Flang_2Freflect_2FExecutable_3B +Ljava_2Flang_2Freflect_2FConstructor +Ljava_2Flang_2Freflect_2FMethod +Ljava_2Flang_2Freflect_2FField +Ljava_2Flang_2Freflect_2FAccessibleObject_3B +Ljava_2Flang_2Fref_2FReference +Ljava_2Flang_2Freflect_2FParameter +Ljava_2Flang_2FThrowable +Ljava_2Flang_2FCloneable +Ljava_2Fio_2FSerializable +Ljava_2Flang_2Freflect_2FType +Ljava_2Flang_2FCharacter +Ljava_2Flang_2FInteger +Ljava_2Flang_2FThreadGroup +Ljava_2Flang_2FThread_24State +Ljava_2Flang_2FThread +Ljava_2Flang_2FNullPointerException +Ljava_2Flang_2FArithmeticException +Ljava_2Flang_2FInterruptedException +Ljava_2Flang_2FClassCastException +Ljava_2Flang_2FArrayIndexOutOfBoundsException +Ljava_2Flang_2FUnsatisfiedLinkError +Llibcore_2Freflect_2FAnnotationMember +Llibcore_2Freflect_2FAnnotationFactory +Ljava_2Fnio_2FDirectByteBuffer +Ljava_2Flang_2Fannotation_2FAnnotation +Ljava_2Fnio_2FBuffer +Ljava_2Flang_2Finvoke_2FMethodType +Ldalvik_2Fsystem_2FEmulatedStackFrame +Ljava_2Flang_2FStringFactory +Ljava_2Flang_2Finvoke_2FArrayElementVarHandle +Ljava_2Flang_2Finvoke_2FByteArrayViewVarHandle +Ljava_2Flang_2Finvoke_2FByteBufferViewVarHandle +Ljava_2Flang_2Finvoke_2FFieldVarHandle +Ljava_2Flang_2Finvoke_2FVarHandle diff --git a/src/mrt/maplert/public-headers/java2c_rule.h b/src/mrt/maplert/public-headers/java2c_rule.h new file mode 100644 index 0000000000..847780777a --- /dev/null +++ b/src/mrt/maplert/public-headers/java2c_rule.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_JAVA2C_RULE_H_ +#define MAPLE_JAVA2C_RULE_H_ +#include + +#include "jni.h" +#include "mobject.h" +#include "exception/mrt_exception.h" + +namespace maplert { +#define INIT_ARGS + +#define j2cRule(INIT_ARGS) j2cRule + +class Java2CRule { + public: + Java2CRule() = default; + ~Java2CRule() = default; + void Prologue() const { } + // when a java function exit, we need to call it + void Epilogue() const { + if (MRT_HasPendingException()) { + MRT_CheckThrowPendingExceptionRet(); + } + } +}; +} // namespace maplert + +#endif //MAPLE_JAVA2C_RULE_H_ diff --git a/src/mrt/maplert/public-headers/jsan.h b/src/mrt/maplert/public-headers/jsan.h new file mode 100644 index 0000000000..b904afb45e --- /dev/null +++ b/src/mrt/maplert/public-headers/jsan.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef JSAN_H +#define JSAN_H + + +#include +#include +#include +#include "jni.h" +#include "mrt_mm_config_common.h" + +namespace maplert { +typedef uintptr_t address_t; + +#define JSAN_ADD_OBJ(objAddr, objSize) +#define JSAN_ADD_CLASS_METADATA(objAddr) +#define JSAN_FREE(obj, RealFreeFunc, internalSize) internalSize = RealFreeFunc(obj) +#define JSAN_CHECK_OBJ(obj) + +static inline void JsanliteFree(address_t) { + return; +} + +static inline void JsanliteError(address_t) { + return; +} + +static inline size_t JsanliteGetPayloadSize(size_t) { + return 0; +}; + +static inline void JsanliteInit() { + return; +} +} // namespace maplert + +#endif diff --git a/src/mrt/maplert/public-headers/mrt_api_common.h b/src/mrt/maplert/public-headers/mrt_api_common.h new file mode 100644 index 0000000000..8ef9c5fc0d --- /dev/null +++ b/src/mrt/maplert/public-headers/mrt_api_common.h @@ -0,0 +1,20 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_API_COMMON_H +#define MRT_API_COMMON_H + +#define MRT_EXPORT __attribute__((visibility("default"))) + +#endif diff --git a/src/mrt/maplert/public-headers/mrt_array_api.h b/src/mrt/maplert/public-headers/mrt_array_api.h new file mode 100644 index 0000000000..b2e0a8af72 --- /dev/null +++ b/src/mrt/maplert/public-headers/mrt_array_api.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_LIB_CORE_MPL_ARRAY_H_ +#define MAPLE_LIB_CORE_MPL_ARRAY_H_ + +#include "mrt_api_common.h" +#include "securec.h" +#include "primitive.h" +#ifdef __cplusplus +namespace maplert { +extern "C" { +#endif + +// PrimitiveArray +MRT_EXPORT jclass MRT_GetPrimitiveArrayClassJboolean(void); +MRT_EXPORT jclass MRT_GetPrimitiveArrayClassJbyte(void); +MRT_EXPORT jclass MRT_GetPrimitiveArrayClassJchar(void); +MRT_EXPORT jclass MRT_GetPrimitiveArrayClassJdouble(void); +MRT_EXPORT jclass MRT_GetPrimitiveArrayClassJfloat(void); +MRT_EXPORT jclass MRT_GetPrimitiveArrayClassJint(void); +MRT_EXPORT jclass MRT_GetPrimitiveArrayClassJlong(void); +MRT_EXPORT jclass MRT_GetPrimitiveArrayClassJshort(void); + +MRT_EXPORT jobject MRT_NewArray(jint length, jclass elementClass, jint componentSize); +MRT_EXPORT jobject MRT_NewPrimitiveArray(jint length, maple::Primitive::Type pType, + jint componentSize, jboolean isJNI = JNI_FALSE); +MRT_EXPORT jobject MRT_NewObjArray(const jint length, const jclass elementClass, const jobject initialElement); + +MRT_EXPORT jint MRT_GetArrayElementCount(jarray ja); +MRT_EXPORT jobject MRT_GetObjectArrayElement(jobjectArray javaArray, jsize index, jboolean maintainRC); +MRT_EXPORT void MRT_SetObjectArrayElement(jobjectArray javaArray, jsize index, + jobject javaValue, jboolean maintainRC); + +// openjdk for primitiveArrayElement +MRT_EXPORT jobject MRT_GetArrayElement(jobjectArray javaArray, jsize index, jboolean maintain); +MRT_EXPORT void MRT_SetArrayElement(jobjectArray javaArray, jsize index, jobject javaValue); +MRT_EXPORT jvalue MRT_GetPrimitiveArrayElement(jarray arr, jint index, char arrType); +MRT_EXPORT void MRT_SetPrimitiveArrayElement(jarray arr, jint index, jvalue value, char arrType); +MRT_EXPORT jboolean MRT_TypeWidenConvertCheck(char currentType, char wideType, const jvalue &srcVal, jvalue &dstVal); +MRT_EXPORT jboolean MRT_TypeWidenConvertCheckObject(jobject val); +MRT_EXPORT char MRT_GetPrimitiveType(jclass clazz); +MRT_EXPORT char MRT_GetPrimitiveTypeFromBoxType(jclass clazz); + +MRT_EXPORT jboolean MRT_IsArray(jobject javaArray); +MRT_EXPORT jboolean MRT_IsObjectArray(jobject javaArray); +MRT_EXPORT jboolean MRT_IsPrimitveArray(jobject javaArray); +MRT_EXPORT jboolean MRT_IsMultiDimArray(jobject javaArray); + +MRT_EXPORT void *MRT_JavaArrayToCArray(jarray javaArray); +MRT_EXPORT jint MRT_GetArrayContentOffset(void); +MRT_EXPORT jobject MRT_RecursiveCreateMultiArray(const jclass arrayClass, + const jint currentDimension, const jint dimensions, jint *dimArray); +#ifdef __cplusplus +} // extern "C" +} // namespace maplert +#endif + +#endif // MAPLE_LIB_CORE_MPL_ARRAY_H_ diff --git a/src/mrt/maplert/public-headers/mrt_class_api.h b/src/mrt/maplert/public-headers/mrt_class_api.h new file mode 100644 index 0000000000..f012c4969d --- /dev/null +++ b/src/mrt/maplert/public-headers/mrt_class_api.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_LIB_CORE_MPL_CLASS_H_ +#define MAPLE_LIB_CORE_MPL_CLASS_H_ + +#include "mrt_api_common.h" +#include "jni.h" + +#ifdef __cplusplus +namespace maplert { +extern "C" { +#endif + +MRT_EXPORT char *MRT_ReflectGetClassCharName(const jclass klass); +MRT_EXPORT jint MRT_ReflectGetObjSize(const jclass klass); +MRT_EXPORT jboolean MRT_ReflectClassIsPrimitive(jclass klass); +MRT_EXPORT jstring MRT_ReflectClassGetName(jclass klass); + +MRT_EXPORT jclass MRT_GetClassObject(void); +MRT_EXPORT jclass MRT_GetClassClass(void); +MRT_EXPORT jclass MRT_GetClassString(void); +MRT_EXPORT bool MRT_ClassIsSuperClassValid(jclass clazz); + +MRT_EXPORT bool MRT_ClassInitialized(jclass classInfo); +MRT_EXPORT void MRT_InitProtectedMemoryForClinit(); +MRT_EXPORT void MRT_DumpClassClinit(std::ostream &os); +MRT_EXPORT int64_t MRT_ClinitGetTotalTime(); +MRT_EXPORT int64_t MRT_ClinitGetTotalCount(); +MRT_EXPORT void MRT_ClinitResetStats(); +MRT_EXPORT void MRT_ClinitEnableCount(bool enable); +MRT_EXPORT void MRT_BootstrapClinit(void); +MRT_EXPORT void MRT_BootstrapWellKnown(); + +MRT_EXPORT void MRT_SaveProfile(const std::string &path, bool isSystemServer); +MRT_EXPORT void MRT_EnableMetaProfile(); +MRT_EXPORT void MRT_DisableMetaProfile(); +MRT_EXPORT void MRT_ClearMetaProfile(); + +// Returns the component size of the specified array class. +MRT_EXPORT size_t MRT_ReflectClassGetComponentSize(jclass klass); +#ifdef __cplusplus +} // extern "C" +} // namespace maplert +#endif +#endif //MAPLE_LIB_CORE_MPL_CLASS_H_ diff --git a/src/mrt/maplert/public-headers/mrt_classloader_api.h b/src/mrt/maplert/public-headers/mrt_classloader_api.h new file mode 100644 index 0000000000..580f6ca38a --- /dev/null +++ b/src/mrt/maplert/public-headers/mrt_classloader_api.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_CLASS_CLASSLOADER_API_H_ +#define MRT_CLASS_CLASSLOADER_API_H_ + +#include "jni.h" +#include "mrt_api_common.h" +#ifdef __cplusplus +namespace maplert { +extern "C" { +#endif +// C interfaces +MRT_EXPORT bool MRT_IsClassInitialized(jclass klass); +MRT_EXPORT jobject MRT_GetBootClassLoader(); +MRT_EXPORT jobject MRT_GetClassLoader(jclass klass); +MRT_EXPORT jclass MRT_GetClassByClassLoader(jobject classLoader, const std::string className); +MRT_EXPORT jclass MRT_GetClassByContextClass(jclass klass, const std::string className); +MRT_EXPORT jclass MRT_GetClassByContextObject(jobject obj, const std::string className); +MRT_EXPORT jclass MRT_GetNativeContexClass(); // Get the class loader by current native context +MRT_EXPORT jobject MRT_GetNativeContexClassLoader(); // Get the contextclass by current native context +MRT_EXPORT jclass MRT_GetClass(jclass klass, const std::string className); +MRT_EXPORT void MRT_RegisterDynamicClass(jobject classLoader, jclass klass); +MRT_EXPORT void MRT_UnregisterDynamicClass(jobject classLoader, jclass klass); +#ifdef __cplusplus +} // extern "C" +} // namespace maplert +#endif +#endif // MRT_CLASS_LOCATOR_MGR_H_ diff --git a/src/mrt/maplert/public-headers/mrt_compiler_api.h b/src/mrt/maplert/public-headers/mrt_compiler_api.h new file mode 100644 index 0000000000..0f329555de --- /dev/null +++ b/src/mrt/maplert/public-headers/mrt_compiler_api.h @@ -0,0 +1,263 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_COMPILER_API_H +#define MRT_COMPILER_API_H +#include "cinterface.h" +#include "exception/eh_personality.h" +#include "metadata_layout.h" +#include "mrt_api_common.h" +#include "mclass.h" +namespace maplert { +// MCC_ prefixed function is external interface, the compiler guarantees parameter safety +// when calling such interface during the compilation phase. +extern "C" { +// exception +MRT_EXPORT void MCC_ThrowNullArrayNullPointerException(); +MRT_EXPORT void MCC_CheckThrowPendingException(); +MRT_EXPORT void MCC_ThrowNullPointerException(); +MRT_EXPORT void MCC_ThrowArithmeticException(); +MRT_EXPORT void MCC_ThrowInterruptedException(); +MRT_EXPORT void MCC_ThrowClassCastException(const char *msg); +MRT_EXPORT void MCC_ThrowArrayIndexOutOfBoundsException(const char *msg); +MRT_EXPORT void MCC_ThrowUnsatisfiedLinkError(); +MRT_EXPORT void MCC_ThrowSecurityException(); +MRT_EXPORT void MCC_ThrowExceptionInInitializerError(MrtClass cause); +MRT_EXPORT void MCC_ThrowNoClassDefFoundError(MrtClass classInfo); +MRT_EXPORT void MCC_ThrowStringIndexOutOfBoundsException(); +MRT_EXPORT void *MCC_JavaBeginCatch(const _Unwind_Exception *unwind_exception); +MRT_EXPORT void MCC_ThrowException(MrtClass obj); +MRT_EXPORT void MCC_ThrowPendingException(); +MRT_EXPORT void MCC_RethrowException(MrtClass obj); + +// fast stack unwind +MRT_EXPORT void MCC_SetRiskyUnwindContext(uint32_t *pc, void *fp); +MRT_EXPORT void MCC_SetReliableUnwindContext(); + +// decouple for lazybinding, called while visiting offset table at first time +MRT_EXPORT int32_t MCC_FixOffsetTableVtable(uint32_t offsetVal, char *offsetEntry); +MRT_EXPORT int32_t MCC_FixOffsetTableField(uint32_t offsetVal, char *offsetEntry); + +// string +MRT_EXPORT bool MCC_String_Equals_NotallCompress(jstring thisStr, jstring anotherStr); +MRT_EXPORT jstring MCC_CStrToJStr(const char *ca, jint len); +MRT_EXPORT jstring MCC_GetOrInsertLiteral(jstring literal); +MRT_EXPORT jstring MCC_StringAppend(uint64_t toStringFlag, ...); +MRT_EXPORT jstring MCC_StringAppend_StringString(jstring strObj1, jstring strObj2); +MRT_EXPORT jstring MCC_StringAppend_StringInt(jstring strObj1, jint intValue); +MRT_EXPORT jstring MCC_StringAppend_StringJcharString(jstring strObj1, uint16_t charValue, jstring strObj2); + +// mclasslocatormanager +MRT_EXPORT jclass MCC_GetClass(jclass klass, const char *className); +MRT_EXPORT jobject MCC_GetCurrentClassLoader(jobject obj); + +// class init +MRT_EXPORT void MCC_PreClinitCheck(ClassMetadata &classInfo __attribute__((unused))); +MRT_EXPORT void MCC_PostClinitCheck(ClassMetadata &classInfo __attribute__((unused))); + +// reflection class +MRT_EXPORT void MCC_Array_Boundary_Check(jobjectArray javaArray, jint index); +MRT_EXPORT void MCC_Reflect_ThrowCastException(const jclass sourceInfo, jobject targetObject, jint dim); +MRT_EXPORT void MCC_Reflect_Check_Casting_Array(jclass sourceClass, jobject targetObject, jint arrayDim); +MRT_EXPORT void MCC_ThrowCastException(jclass targetClass, jobject castObj); +MRT_EXPORT void MCC_Reflect_Check_Arraystore(jobject arrayObject, jobject elemObject); +MRT_EXPORT void MCC_Reflect_Check_Casting_NoArray(jclass sourceClass, jobject targetClass); +MRT_EXPORT jboolean MCC_Reflect_IsInstance(jobject, jobject); + +MRT_EXPORT uintptr_t MCC_getFuncPtrFromItabSlow64(const MObject *obj, uintptr_t hashCode, + uintptr_t secondHashCode, const char *signature); +MRT_EXPORT uintptr_t MCC_getFuncPtrFromItabSlow32(const MObject *obj, uint32_t hashCode, + uint32_t secondHashCode, const char *signature); +MRT_EXPORT uintptr_t MCC_getFuncPtrFromItabInlineCache(uint64_t* cacheEntryAddr, const MClass *klass, uint32_t hashCode, + uint32_t secondHashCode, const char *signature); +MRT_EXPORT uintptr_t MCC_getFuncPtrFromItabSecondHash64(const uintptr_t *itab, uintptr_t hashCode, + const char *signature); +MRT_EXPORT uintptr_t MCC_getFuncPtrFromItabSecondHash32(const uint32_t *itab, uint32_t hashCode, + const char *signature); +#if defined(__arm__) +MRT_EXPORT uintptr_t MCC_getFuncPtrFromItab(const uint32_t *itab, const char *signature, uint32_t hashCode); +#else // ~__arm__ +MRT_EXPORT uintptr_t MCC_getFuncPtrFromItab(const uint32_t *itab, uint32_t hashCode, const char *signature); +#endif // ~__arm__ +MRT_EXPORT uintptr_t MCC_getFuncPtrFromVtab64(const MObject *obj, uint32_t offset); +MRT_EXPORT uintptr_t MCC_getFuncPtrFromVtab32(const MObject *obj, uint32_t offset); + +MRT_EXPORT void MCC_ArrayMap_String_Int_put(jstring key, jint value); +MRT_EXPORT jint MCC_ArrayMap_String_Int_size(); +MRT_EXPORT jint MCC_ArrayMap_String_Int_getOrDefault(jstring key, jint defaultValue); +MRT_EXPORT void MCC_ArrayMap_String_Int_clear(); + +// reference api +MRT_EXPORT void MCC_CheckObjAllocated(address_t obj); +MRT_EXPORT address_t MCC_IncRef_NaiveRCFast(address_t obj); +MRT_EXPORT address_t MCC_LoadRefField_NaiveRCFast(address_t obj, address_t *filedAddress); +MRT_EXPORT address_t MCC_DecRef_NaiveRCFast(address_t obj); +MRT_EXPORT void MCC_IncDecRef_NaiveRCFast(address_t incAddr, address_t decAddr); +MRT_EXPORT void MCC_IncDecRefReset(address_t incAddr, address_t *decAddr); +MRT_EXPORT void MCC_DecRefResetPair(address_t *incAddr, address_t *decAddr); +MRT_EXPORT void MCC_CleanupLocalStackRef_NaiveRCFast(address_t *localStart, size_t count); +MRT_EXPORT void MCC_CleanupLocalStackRefSkip_NaiveRCFast(address_t *localStart, size_t count, size_t skip); +MRT_EXPORT void MCC_CleanupNonescapedVar(address_t obj); +MRT_EXPORT void MCC_ClearLocalStackRef(address_t *var); +MRT_EXPORT void MCC_WriteRefFieldStaticNoInc(address_t *field, address_t value); +MRT_EXPORT void MCC_WriteRefFieldStaticNoDec(address_t *field, address_t value); +MRT_EXPORT void MCC_WriteRefFieldStaticNoRC(address_t *field, address_t value); +MRT_EXPORT void MCC_WriteVolatileStaticFieldNoInc(address_t *objAddr, address_t value); +MRT_EXPORT void MCC_WriteVolatileStaticFieldNoDec(address_t *objAddr, address_t value); +MRT_EXPORT void MCC_WriteVolatileStaticFieldNoRC(address_t *objAddr, address_t value); +MRT_EXPORT void MCC_WriteRefFieldNoRC(address_t obj, address_t *field, address_t value); +MRT_EXPORT void MCC_WriteRefFieldNoDec(address_t obj, address_t *field, address_t value); +MRT_EXPORT void MCC_WriteRefFieldNoInc(address_t obj, address_t *field, address_t value); +MRT_EXPORT void MCC_WriteVolatileFieldNoInc(address_t obj, address_t *objAddr, address_t value); +MRT_EXPORT void MCC_WriteWeakField(address_t obj, address_t *fieldAddr, address_t value); +MRT_EXPORT address_t MCC_LoadWeakField(address_t obj, address_t *fieldAddr); +MRT_EXPORT void MCC_WriteVolatileWeakField(address_t obj, address_t *fieldAddr, address_t value); +MRT_EXPORT address_t MCC_LoadVolatileWeakField(address_t obj, address_t *fieldAddr); +MRT_EXPORT void MCC_CleanupLocalStackRef(const address_t *localStart, size_t count); +MRT_EXPORT void MCC_CleanupLocalStackRefSkip(const address_t *localStart, size_t count, size_t skip); +MRT_EXPORT address_t MCC_NewObj(size_t size, size_t align); +MRT_EXPORT address_t MCC_LoadRefStatic(address_t *fieldAddr); +MRT_EXPORT address_t MCC_LoadVolatileStaticField(address_t *fieldAddr); +MRT_EXPORT address_t MCC_LoadReferentField(address_t obj, address_t *fieldAddr); +MRT_EXPORT void MCC_WriteRefFieldStatic(address_t *field, address_t value); +MRT_EXPORT void MCC_WriteVolatileStaticField(address_t *objAddr, address_t value); +MRT_EXPORT address_t MCC_LoadVolatileField(address_t obj, address_t *fieldAddr); +MRT_EXPORT void MCC_PreWriteRefField(address_t obj); +MRT_EXPORT void MCC_WriteRefField(address_t obj, address_t *field, address_t value); +MRT_EXPORT void MCC_WriteVolatileField(address_t obj, address_t *objAddr, address_t value); +MRT_EXPORT void MCC_WriteReferent(address_t obj, address_t value); +MRT_EXPORT void MCC_InitializeLocalStackRef(address_t *localStart, size_t count); +MRT_EXPORT void MCC_RunFinalization(); + +// Call fast native function with no more than 8 args. +MRT_EXPORT void *MCC_CallFastNative(...); +// Call fast native function with arbitrary number of args. +MRT_EXPORT void *MCC_CallFastNativeExt(...); +// Call slow native function with no more than 8 args. +MRT_EXPORT void *MCC_CallSlowNative0(...); +MRT_EXPORT void *MCC_CallSlowNative1(...); +MRT_EXPORT void *MCC_CallSlowNative2(...); +MRT_EXPORT void *MCC_CallSlowNative3(...); +MRT_EXPORT void *MCC_CallSlowNative4(...); +MRT_EXPORT void *MCC_CallSlowNative5(...); +MRT_EXPORT void *MCC_CallSlowNative6(...); +MRT_EXPORT void *MCC_CallSlowNative7(...); +MRT_EXPORT void *MCC_CallSlowNative8(...); +// Call slow native function with arbitrary number of args. +MRT_EXPORT void *MCC_CallSlowNativeExt(...); +MRT_EXPORT void MCC_RecordStaticField(address_t *field, const char *name); + +// libs fast +MRT_EXPORT void MCC_SyncExitFast(address_t obj); +MRT_EXPORT void MCC_SyncEnterFast2(address_t obj); +MRT_EXPORT void MCC_SyncEnterFast0(address_t obj); +MRT_EXPORT void MCC_SyncEnterFast1(address_t obj); +MRT_EXPORT void MCC_SyncEnterFast3(address_t obj); +MRT_EXPORT JNIEnv *MCC_PreNativeCall(jobject caller); +MRT_EXPORT void MCC_PostNativeCall(JNIEnv *env); + +// libs +MRT_EXPORT int32_t MCC_DexArrayLength(const void *p); +MRT_EXPORT void MCC_DexArrayFill(void *d, void *s, int32_t len); +MRT_EXPORT void *MCC_DexCheckCast(void *i, void *c); +MRT_EXPORT void *MCC_GetReferenceToClass(void *c); +MRT_EXPORT bool MCC_DexInstanceOf(void *i, void *c); +MRT_EXPORT void MCC_DexInterfaceCall(void *dummy); +MRT_EXPORT bool MCC_IsAssignableFrom(jclass subClass, jclass superClass); +#if defined(__aarch64__) +#define POLYRETURNTYPE jvalue +#elif defined(__arm__) +#define POLYRETURNTYPE jlong +#endif + +MRT_EXPORT POLYRETURNTYPE MCC_DexPolymorphicCall(jstring calleeName, + jstring protoString, int paramNum, jobject methodHandle, ...); + +MRT_EXPORT int32_t MCC_JavaArrayLength(const void *p); +MRT_EXPORT void MCC_JavaArrayFill(void *d, void *s, int32_t len); +MRT_EXPORT void *MCC_JavaCheckCast(void *i, void *c); +MRT_EXPORT bool MCC_JavaInstanceOf(void *i, void *c); +MRT_EXPORT void MCC_JavaInterfaceCall(void *dummy); +MRT_EXPORT POLYRETURNTYPE MCC_JavaPolymorphicCall(jstring calleeName, + jstring protoString, int paramNum, jobject methodHandle, ...); + +// chelper +MRT_EXPORT void MCC_SetJavaClass(address_t objaddr, address_t klass); +MRT_EXPORT void MCC_SetObjectPermanent(address_t objaddr); +MRT_EXPORT address_t MCC_NewObj_fixed_class(address_t klass); +MRT_EXPORT address_t MCC_Reflect_ThrowInstantiationError(); +MRT_EXPORT address_t MCC_NewObj_flexible_cname(size_t elemSize, size_t nElems, + const char *classNameOrclassObj, + address_t callerObj, unsigned long isClassObj); +MRT_EXPORT address_t MCC_NewObject(address_t klass); +MRT_EXPORT address_t MCC_NewArray8(size_t nElems, address_t klass); +MRT_EXPORT address_t MCC_NewArray16(size_t nElems, address_t klass); +MRT_EXPORT address_t MCC_NewArray32(size_t nElems, address_t klass); +MRT_EXPORT address_t MCC_NewArray64(size_t nElems, address_t klass); +MRT_EXPORT address_t MCC_NewArray(size_t nElems, const char *descriptor, address_t callerObj); +MRT_EXPORT address_t MCC_NewPermanentObject(address_t klass); +MRT_EXPORT address_t MCC_NewPermanentArray(size_t elemSize, size_t nElems, + const char *classNameOrclassObj, address_t callerObj, + unsigned long isClassObj); +MRT_EXPORT address_t MCC_NewPermObject(address_t klass); +MRT_EXPORT address_t MCC_NewPermArray8(size_t nElems, address_t klass); +MRT_EXPORT address_t MCC_NewPermArray16(size_t nElems, address_t klass); +MRT_EXPORT address_t MCC_NewPermArray32(size_t nElems, address_t klass); +MRT_EXPORT address_t MCC_NewPermArray64(size_t nElems, address_t klass); +MRT_EXPORT address_t MCC_NewPermArray(size_t nElems, const char *descriptor, address_t callerObj); + +// jsan +MRT_EXPORT void MCC_CheckObjMem(void *obj); + +MRT_EXPORT void MCC_CheckRefCount(address_t obj, uint32_t index); + +// native binding +MRT_EXPORT jobject MCC_CannotFindNativeMethod(const char *signature); +MRT_EXPORT jstring MCC_CannotFindNativeMethod_S(const char *signature); +MRT_EXPORT jobject MCC_CannotFindNativeMethod_A(const char *signature); +MRT_EXPORT void *MCC_FindNativeMethodPtr(uintptr_t **regFuncTabAddr); +MRT_EXPORT void *MCC_FindNativeMethodPtrWithoutException(uintptr_t **regFuncTabAddr); +MRT_EXPORT void *MCC_DummyNativeMethodPtr(); +MRT_EXPORT jobject MCC_DecodeReference(jobject obj); + +// deferredaccess +MRT_EXPORT void MCC_DeferredClinitCheck(address_t daiClass, const MObject *caller, const MString *className); +MRT_EXPORT MClass *MCC_DeferredConstClass(address_t daiClass, const MObject *caller, const MString *className); +MRT_EXPORT bool MCC_DeferredInstanceOf(address_t daiClass, const MObject *caller, const MString *className, + const MObject *obj); +MRT_EXPORT MObject *MCC_DeferredCheckCast(address_t daiClass, const MObject *caller, const MString *className, + MObject *obj); +MRT_EXPORT MObject *MCC_DeferredNewInstance(address_t daiClass, const MObject *caller, const MString *className); +MRT_EXPORT MObject *MCC_DeferredNewArray(address_t daiClass, const MObject *caller, const MString *arrayTypeName, + uint32_t length); +MRT_EXPORT MObject *MCC_DeferredFillNewArray(address_t daiClass, const MObject *caller, + const MString *arrayTypeName, uint32_t length, ...); +MRT_EXPORT jvalue MCC_DeferredLoadField(address_t daiField, MObject *caller, const MString *className, + const MString *fieldName, const MString *fieldTypeName, const MObject *obj); +MRT_EXPORT void MCC_DeferredStoreField(address_t daiField, MObject *caller, const MString *className, + const MString *fieldName, const MString *fieldTypeName, MObject *obj, + jvalue value); +MRT_EXPORT jvalue MCC_DeferredInvoke(address_t daiMethod, int kind, const char *className, const char *methodName, + const char *signature, MObject *obj, ...); + +// type conversion, these two function is used to convert java double or java float to java long on arm32 platform +MRT_EXPORT int64_t MCC_JDouble2JLong(double num); +MRT_EXPORT int64_t MCC_JFloat2JLong(float num); + +// profile +MRT_EXPORT void MCC_SaveProfile(); + +} // extern "C" +} // maplert +#endif diff --git a/src/mrt/maplert/public-headers/mrt_exception_api.h b/src/mrt/maplert/public-headers/mrt_exception_api.h new file mode 100644 index 0000000000..72b46d11ff --- /dev/null +++ b/src/mrt/maplert/public-headers/mrt_exception_api.h @@ -0,0 +1,80 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_MRT_EXCEPTION_H_ +#define MAPLE_MRT_EXCEPTION_H_ + +#include +#include "mrt_api_common.h" + +#ifdef __cplusplus +namespace maplert { +extern "C" { +#endif + +MRT_EXPORT void MRT_ClearThrowingException(); + +// null if no pending exception. if return value is not null, an exception is +// raised earlier, and should be handled or rethrown some time later. +MRT_EXPORT bool MRT_FaultHandler(int sig, siginfo_t *info, ucontext_t *context, bool isFromJava); +MRT_EXPORT bool MRT_FaultDebugHandler(int sig, siginfo_t *info, void *context); + + +typedef void (*RealHandler1)(const void*); +MRT_EXPORT bool MRT_PrepareToHandleJavaSignal( + ucontext_t *ucontext, RealHandler1 handler, void *arg1, RealHandler1 rhandler = nullptr, + void *r1 = nullptr, int returnPCOffset = 1); + +MRT_EXPORT void MRT_ThrowExceptionSafe(jobject ex); // check whether ex is throwable +MRT_EXPORT void MRT_ThrowExceptionUnsafe(jobject ex); // do not check, throw anything +MRT_EXPORT void MRT_ClearPendingException(); +MRT_EXPORT jobject MRT_PendingException(); + +MRT_EXPORT void MRT_DumpException(jthrowable ex, std::string *exception_stack = nullptr); +MRT_EXPORT void MRT_DumpExceptionForLog(jthrowable ex); +MRT_EXPORT void MRT_DumpExceptionTypeCount(std::ostream &os); +MRT_EXPORT void MRT_DumpExceptionStack(std::ostream &os); +MRT_EXPORT void MRT_DumpNativeExceptionTypeCount(std::ostream &os); +MRT_EXPORT void MRT_DumpNativeExceptionStack(std::ostream &os); +MRT_EXPORT void MRT_CheckException(bool ok, std::string msg = ""); +MRT_EXPORT void MRT_ThrowNewExceptionUnw(const char *className, const char *msg = "unknown reason"); +MRT_EXPORT void MRT_ThrowNewExceptionRet(const char *className, const char *msg = "unknown reason"); +// check pending exception and raise it if existed. +// MRT_CheckThrowPendingExceptionUnw is used only in runtime functions called +// directly by java code, for example, fast path of some jni methods. +// MRT_CheckThrowPendingExceptionUnw is different from MRT_CheckThrowPendingExceptionUnw +MRT_EXPORT void MRT_CheckThrowPendingExceptionUnw(); +MRT_EXPORT void MRT_ThrowNullPointerExceptionUnw(); // when throwing NPE in signal handler +MRT_EXPORT void MRT_ThrowArrayIndexOutOfBoundsException(int32_t length, int32_t index); +MRT_EXPORT void MRT_ThrowArrayIndexOutOfBoundsExceptionUnw(int32_t length, int32_t index); + +// The function at the end of Ret is a fast exception handling interface, +// which must be used to ensure that it is a tail function. +MRT_EXPORT void MRT_CheckThrowPendingExceptionRet(); + +// Unwind +MRT_EXPORT void MRT_SetReliableUnwindContextStatus(); +MRT_EXPORT void MRT_SetIgnoredUnwindContextStatus(); +MRT_EXPORT void MRT_SetRiskyUnwindContext(const uint32_t *pc, void *fp); +MRT_EXPORT void MRT_UpdateLastJavaFrame(const uint32_t *pc, void *fp); +MRT_EXPORT void MRT_UpdateLastUnwindContextIfReliable(const uint32_t *pc, void *fp); + +// exception +MRT_EXPORT void MRT_GetHandlerCatcherArgs(struct HandlerCatcherArgs *pArgs); +#ifdef __cplusplus +} // extern "C" +} // namespace maplert +#endif + +#endif //MAPLE_MRT_EXCEPTION_H_ diff --git a/src/mrt/maplert/public-headers/mrt_fields_api.h b/src/mrt/maplert/public-headers/mrt_fields_api.h new file mode 100644 index 0000000000..3bc56ea882 --- /dev/null +++ b/src/mrt/maplert/public-headers/mrt_fields_api.h @@ -0,0 +1,367 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_LIB_CORE_MPL_FILEDS_H_ +#define MAPLE_LIB_CORE_MPL_FILEDS_H_ + +#include +#include + +#include "jsan.h" +#include "gc_config.h" +#include "address.h" +#include "mrt_api_common.h" + +#ifdef __cplusplus +namespace maplert { +extern "C" { +#endif + +// Load and store operations for fields +MRT_EXPORT address_t MRT_LoadRefField(address_t obj, address_t *fieldAddr); +MRT_EXPORT void MRT_WriteRefField(address_t obj, address_t *field, address_t value); +MRT_EXPORT address_t MRT_LoadVolatileField(address_t obj, address_t *fieldAddr); +MRT_EXPORT void MRT_WriteVolatileField(address_t obj, address_t *objAddr, address_t value); +MRT_EXPORT void MRT_WriteVolatileStaticField(address_t *fieldAddr, address_t value); +MRT_EXPORT void MRT_WriteRefFieldStatic(address_t *field, address_t value); + +// used for field with rc_weak_field annotation +MRT_EXPORT void MRT_WriteWeakField(address_t obj, address_t *field, address_t value, bool isVolatile); +MRT_EXPORT address_t MRT_LoadWeakField(address_t obj, address_t *field, bool isVolatile); +MRT_EXPORT void MRT_WriteReferentField(address_t obj, address_t *fieldAddr, address_t value, + bool isResurrectWeak); +MRT_EXPORT address_t MRT_LoadReferentField(address_t obj, address_t *fieldAddr); + +static inline bool MRT_LoadJBoolean(address_t obj, size_t offset) { + JSAN_CHECK_OBJ(obj); + return *reinterpret_cast(obj + offset); +} + +static inline bool MRT_LoadJBooleanVolatile(address_t obj, size_t offset) { + JSAN_CHECK_OBJ(obj); + return reinterpret_cast*>(obj + offset)->load(std::memory_order_seq_cst); +} + +static inline int8_t MRT_LoadJByte(address_t obj, size_t offset) { + JSAN_CHECK_OBJ(obj); + return *reinterpret_cast(obj + offset); +} + +static inline int8_t MRT_LoadJByteVolatile(address_t obj, size_t offset) { + JSAN_CHECK_OBJ(obj); + return reinterpret_cast*>(obj + offset)->load(std::memory_order_seq_cst); +} + +static inline int16_t MRT_LoadJShort(address_t obj, size_t offset) { + JSAN_CHECK_OBJ(obj); + return *reinterpret_cast(obj + offset); +} + +static inline int16_t MRT_LoadJShortVolatile(address_t obj, size_t offset) { + JSAN_CHECK_OBJ(obj); + return reinterpret_cast*>(obj + offset)->load(std::memory_order_seq_cst); +} + +static inline uint16_t MRT_LoadJChar(address_t obj, size_t offset) { + JSAN_CHECK_OBJ(obj); + return *reinterpret_cast(obj + offset); +} + +static inline uint16_t MRT_LoadJCharVolatile(address_t obj, size_t offset) { + JSAN_CHECK_OBJ(obj); + return reinterpret_cast*>(obj + offset)->load(std::memory_order_seq_cst); +} + +static inline int32_t MRT_LoadJInt(address_t obj, size_t offset) { + JSAN_CHECK_OBJ(obj); + return *reinterpret_cast(obj + offset); +} + +static inline int32_t MRT_LoadJIntVolatile(address_t obj, size_t offset) { + JSAN_CHECK_OBJ(obj); + return reinterpret_cast*>(obj + offset)->load(std::memory_order_seq_cst); +} + +static inline int64_t MRT_LoadJLong(address_t obj, size_t offset) { + JSAN_CHECK_OBJ(obj); + return *reinterpret_cast(obj + offset); +} + +static inline int64_t MRT_LoadJLongVolatile(address_t obj, size_t offset) { + JSAN_CHECK_OBJ(obj); + return reinterpret_cast*>(obj + offset)->load(std::memory_order_seq_cst); +} + +static inline float MRT_LoadJFloat(address_t obj, size_t offset) { + JSAN_CHECK_OBJ(obj); + return *reinterpret_cast(obj + offset); +} + +static inline float MRT_LoadJFloatVolatile(address_t obj, size_t offset) { + JSAN_CHECK_OBJ(obj); + return reinterpret_cast*>(obj + offset)->load(std::memory_order_seq_cst); +} + +static inline double MRT_LoadJDouble(address_t obj, size_t offset) { + JSAN_CHECK_OBJ(obj); + return *reinterpret_cast(obj + offset); +} + +static inline double MRT_LoadJDoubleVolatile(address_t obj, size_t offset) { + JSAN_CHECK_OBJ(obj); + return reinterpret_cast*>(obj + offset)->load(std::memory_order_seq_cst); +} + +static inline uintptr_t MRT_LoadJObject(address_t obj, size_t offset) { + JSAN_CHECK_OBJ(obj); + return reinterpret_cast(LoadRefField(obj, offset)); +} + +static inline uintptr_t MRT_LoadJObjectInc(address_t obj, size_t offset) { + JSAN_CHECK_OBJ(obj); + uintptr_t res = reinterpret_cast(MRT_LoadRefField(obj, reinterpret_cast(obj + offset))); + return res; +} + +static inline uintptr_t MRT_LoadJObjectIncVolatile(address_t obj, size_t offset) { + JSAN_CHECK_OBJ(obj); + uintptr_t res = reinterpret_cast(MRT_LoadVolatileField(obj, reinterpret_cast(obj + offset))); + return res; +} + +static inline uintptr_t MRT_LoadJObjectIncReferent(address_t obj, size_t offset) { + JSAN_CHECK_OBJ(obj); + uintptr_t res = reinterpret_cast(MRT_LoadReferentField(obj, reinterpret_cast(obj + offset))); + return res; +} + +static inline void MRT_StoreJBoolean(address_t obj, size_t offset, bool value) { + JSAN_CHECK_OBJ(obj); + *reinterpret_cast(obj + offset) = value; +} + +static inline void MRT_StoreJBooleanVolatile(address_t obj, size_t offset, bool value) { + JSAN_CHECK_OBJ(obj); + reinterpret_cast*>(obj + offset)->store(value, std::memory_order_seq_cst); +} + +static inline void MRT_StoreJByte(address_t obj, size_t offset, int8_t value) { + JSAN_CHECK_OBJ(obj); + *reinterpret_cast(obj + offset) = value; +} + +static inline void MRT_StoreJByteVolatile(address_t obj, size_t offset, int8_t value) { + JSAN_CHECK_OBJ(obj); + reinterpret_cast*>(obj + offset)->store(value, std::memory_order_seq_cst); +} + +static inline void MRT_StoreJShort(address_t obj, size_t offset, int16_t value) { + JSAN_CHECK_OBJ(obj); + *reinterpret_cast(obj + offset) = value; +} + +static inline void MRT_StoreJShortVolatile(address_t obj, size_t offset, int16_t value) { + JSAN_CHECK_OBJ(obj); + reinterpret_cast*>(obj + offset)->store(value, std::memory_order_seq_cst); +} + +static inline void MRT_StoreJChar(address_t obj, size_t offset, uint16_t value) { + JSAN_CHECK_OBJ(obj); + *reinterpret_cast(obj + offset) = value; +} + +static inline void MRT_StoreJCharVolatile(address_t obj, size_t offset, uint16_t value) { + JSAN_CHECK_OBJ(obj); + reinterpret_cast*>(obj + offset)->store(value, std::memory_order_seq_cst); +} + +static inline void MRT_StoreJInt(address_t obj, size_t offset, int32_t value) { + JSAN_CHECK_OBJ(obj); + *reinterpret_cast(obj + offset) = value; +} + +static inline void MRT_StoreJIntVolatile(address_t obj, size_t offset, int32_t value) { + JSAN_CHECK_OBJ(obj); + reinterpret_cast*>(obj + offset)->store(value, std::memory_order_seq_cst); +} + +static inline void MRT_StoreOrderedJInt(address_t obj, size_t offset, int32_t value) { + JSAN_CHECK_OBJ(obj); + reinterpret_cast*>(obj + offset)->store(value, std::memory_order_release); +} + +static inline void MRT_StoreJLong(address_t obj, size_t offset, int64_t value) { + JSAN_CHECK_OBJ(obj); + *reinterpret_cast(obj + offset) = value; +} + +static inline void MRT_StoreJLongVolatile(address_t obj, size_t offset, int64_t value) { + JSAN_CHECK_OBJ(obj); + reinterpret_cast*>(obj + offset)->store(value, std::memory_order_seq_cst); +} + +static inline void MRT_StoreOrderedJLong(address_t obj, size_t offset, int64_t value) { + JSAN_CHECK_OBJ(obj); + reinterpret_cast*>(obj + offset)->store(value, std::memory_order_release); +} + + +static inline void MRT_StoreJFloat(address_t obj, size_t offset, float value) { + JSAN_CHECK_OBJ(obj); + *reinterpret_cast(obj + offset) = value; +} + +static inline void MRT_StoreJFloatVolatile(address_t obj, size_t offset, float value) { + JSAN_CHECK_OBJ(obj); + reinterpret_cast*>(obj + offset)->store(value, std::memory_order_seq_cst); +} + +static inline void MRT_StoreJDouble(address_t obj, size_t offset, double value) { + JSAN_CHECK_OBJ(obj); + *reinterpret_cast(obj + offset) = value; +} + +static inline void MRT_StoreJDoubleVolatile(address_t obj, size_t offset, double value) { + JSAN_CHECK_OBJ(obj); + reinterpret_cast*>(obj + offset)->store(value, std::memory_order_seq_cst); +} + +static inline void MRT_StoreJObject(address_t obj, size_t offset, uintptr_t value) { + JSAN_CHECK_OBJ(obj); + MRT_WriteRefField(obj, reinterpret_cast(obj + offset), reinterpret_cast(value)); +} + +static inline void MRT_StoreJObjectStatic(address_t *addr, uintptr_t value) { + JSAN_CHECK_OBJ(*addr); + MRT_WriteRefFieldStatic(addr, reinterpret_cast(value)); +} + +static inline void MRT_StoreMetaJobject(address_t obj, size_t offset, uintptr_t value) { + JSAN_CHECK_OBJ(obj); + StoreRefField(obj, offset, reinterpret_cast(value)); +} + +static inline void MRT_StoreJObjectNoRc(address_t obj, size_t offset, uintptr_t value) { + JSAN_CHECK_OBJ(obj); + StoreRefField(obj, offset, reinterpret_cast(value)); +} + +static inline void MRT_StoreJObjectVolatile(address_t obj, size_t offset, uintptr_t value) { + JSAN_CHECK_OBJ(obj); + MRT_WriteVolatileField(obj, reinterpret_cast(obj + offset), reinterpret_cast(value)); +} + +static inline void MRT_StoreJObjectVolatileStatic(address_t *addr, uintptr_t value) { + JSAN_CHECK_OBJ(*addr); + MRT_WriteVolatileStaticField(addr, reinterpret_cast(value)); +} + +static inline void MRT_WriteReferent(address_t obj, size_t offset, uintptr_t value, bool isResurrectWeak) { + JSAN_CHECK_OBJ(obj); + MRT_WriteReferentField(obj, reinterpret_cast(obj + offset), + reinterpret_cast(value), isResurrectWeak); +} + +#define MRT_LOAD_JBOOLEAN(obj, offset) MRT_LoadJBoolean(reinterpret_cast(obj), offset) +#define MRT_LOAD_JBYTE(obj, offset) MRT_LoadJByte(reinterpret_cast(obj), offset) +#define MRT_LOAD_JSHORT(obj, offset) MRT_LoadJShort(reinterpret_cast(obj), offset) +#define MRT_LOAD_JCHAR(obj, offset) MRT_LoadJChar(reinterpret_cast(obj), offset) +#define MRT_LOAD_JINT(obj, offset) MRT_LoadJInt(reinterpret_cast(obj), offset) +#define MRT_LOAD_JLONG(obj, offset) MRT_LoadJLong(reinterpret_cast(obj), offset) +#define MRT_LOAD_JFLOAT(obj, offset) MRT_LoadJFloat(reinterpret_cast(obj), offset) +#define MRT_LOAD_JDOUBLE(obj, offset) MRT_LoadJDouble(reinterpret_cast(obj), offset) + +#define MRT_LOAD_JBOOLEAN_VOLATILE(obj, offset) MRT_LoadJBooleanVolatile(reinterpret_cast(obj), offset) +#define MRT_LOAD_JBYTE_VOLATILE(obj, offset) MRT_LoadJByteVolatile(reinterpret_cast(obj), offset) +#define MRT_LOAD_JSHORT_VOLATILE(obj, offset) MRT_LoadJShortVolatile(reinterpret_cast(obj), offset) +#define MRT_LOAD_JCHAR_VOLATILE(obj, offset) MRT_LoadJCharVolatile(reinterpret_cast(obj), offset) +#define MRT_LOAD_JINT_VOLATILE(obj, offset) MRT_LoadJIntVolatile(reinterpret_cast(obj), offset) +#define MRT_LOAD_JLONG_VOLATILE(obj, offset) MRT_LoadJLongVolatile(reinterpret_cast(obj), offset) +#define MRT_LOAD_JFLOAT_VOLATILE(obj, offset) MRT_LoadJFloatVolatile(reinterpret_cast(obj), offset) +#define MRT_LOAD_JDOUBLE_VOLATILE(obj, offset) MRT_LoadJDoubleVolatile(reinterpret_cast(obj), offset) + +// not used in mrt, native(android-mrt) should use defines in +// maplert/public-headers/mrt_fields_api.h +// MRT_LOAD_JOBJECT_INC load object and IncRef +#define MRT_LOAD_JOBJECT(obj, offset) \ + MRT_LoadJObject(reinterpret_cast(obj), offset) trigger_compiler_err // should not use +#define MRT_LOAD_JOBJECT_INC(obj, offset) \ + MRT_LoadJObjectInc(reinterpret_cast(obj), offset) // used in runtime code (maplert) +#define MRT_LOAD_JOBJECT_INC_VOLATILE(obj, offset) \ + MRT_LoadJObjectIncVolatile(reinterpret_cast(obj), offset) // used in runtime code (maplert) +#define MRT_LOAD_JOBJECT_INC_REFERENT(obj, offset) \ + MRT_LoadJObjectIncReferent(reinterpret_cast(obj), offset) // used in runtime code (maplert) +#define MRT_LOAD_META_JOBJECT(obj, offset) \ + MRT_LoadJObject(reinterpret_cast(obj), offset) // used in runtime code (maplert) + +// ONLY use in mrt_array.cpp for unsafe array copying. +#define __UNSAFE_MRT_LOAD_JOBJECT_NOINC(obj, offset) MRT_LoadJObject(reinterpret_cast(obj), offset) +#define __UNSAFE_MRT_STORE_JOBJECT_NORC(obj, offset, value) \ + MRT_StoreJObjectNoRc(reinterpret_cast(obj), offset, reinterpret_cast(value)) + +#define MRT_STORE_JBOOLEAN(obj, offset, value) MRT_StoreJBoolean(reinterpret_cast(obj), offset, value) +#define MRT_STORE_JBYTE(obj, offset, value) MRT_StoreJByte(reinterpret_cast(obj), offset, value) +#define MRT_STORE_JSHORT(obj, offset, value) MRT_StoreJShort(reinterpret_cast(obj), offset, value) +#define MRT_STORE_JCHAR(obj, offset, value) MRT_StoreJChar(reinterpret_cast(obj), offset, value) +#define MRT_STORE_JINT(obj, offset, value) MRT_StoreJInt(reinterpret_cast(obj), offset, value) +#define MRT_STORE_JLONG(obj, offset, value) MRT_StoreJLong(reinterpret_cast(obj), offset, value) +#define MRT_STORE_JFLOAT(obj, offset, value) MRT_StoreJFloat(reinterpret_cast(obj), offset, value) +#define MRT_STORE_JDOUBLE(obj, offset, value) MRT_StoreJDouble(reinterpret_cast(obj), offset, value) +#define MRT_STORE_ORDERED_JINT(obj, offset, value) \ + MRT_StoreOrderedJInt(reinterpret_cast(obj), offset, value) +#define MRT_STORE_ORDERED_JLONG(obj, offset, value) \ + MRT_StoreOrderedJLong(reinterpret_cast(obj), offset, value) + +#define MRT_STORE_JBOOLEAN_VOLATILE(obj, offset, value) \ + MRT_StoreJBooleanVolatile(reinterpret_cast(obj), offset, value) +#define MRT_STORE_JBYTE_VOLATILE(obj, offset, value) \ + MRT_StoreJByteVolatile(reinterpret_cast(obj), offset, value) +#define MRT_STORE_JSHORT_VOLATILE(obj, offset, value) \ + MRT_StoreJShortVolatile(reinterpret_cast(obj), offset, value) +#define MRT_STORE_JCHAR_VOLATILE(obj, offset, value) \ + MRT_StoreJCharVolatile(reinterpret_cast(obj), offset, value) +#define MRT_STORE_JINT_VOLATILE(obj, offset, value) \ + MRT_StoreJIntVolatile(reinterpret_cast(obj), offset, value) +#define MRT_STORE_JLONG_VOLATILE(obj, offset, value) \ + MRT_StoreJLongVolatile(reinterpret_cast(obj), offset, value) +#define MRT_STORE_JFLOAT_VOLATILE(obj, offset, value) \ + MRT_StoreJFloatVolatile(reinterpret_cast(obj), offset, value) +#define MRT_STORE_JDOUBLE_VOLATILE(obj, offset, value) \ + MRT_StoreJDoubleVolatile(reinterpret_cast(obj), offset, value) + +#define MRT_STORE_JOBJECT(obj, offset, value) \ + MRT_StoreJObject(reinterpret_cast(obj), offset, reinterpret_cast(value)) + +#define MRT_STORE_JOBJECT_STATIC(addr, value) \ + MRT_StoreJObjectStatic(addr, reinterpret_cast(value)) + +#define MRT_STORE_META_JOBJECT(obj, offset, value) \ + MRT_StoreMetaJobject(reinterpret_cast(obj), offset, reinterpret_cast(value)) + +#define MRT_STORE_JOBJECT_VOLATILE(obj, offset, value) \ + MRT_StoreJObjectVolatile(reinterpret_cast(obj), offset, reinterpret_cast(value)) + +#define MRT_STORE_JOBJECT_VOLATILE_STATIC(addr, value) \ + MRT_StoreJObjectVolatileStatic(addr, reinterpret_cast(value)) + +#define MRT_WRITE_REFERENT(obj, offset, value, isResurrectWeak) \ + MRT_WriteReferent(reinterpret_cast(obj), offset, reinterpret_cast(value), isResurrectWeak) + +#ifdef __cplusplus +} // namespace maplert +} // extern "C" +#endif + +#endif //MAPLE_LIB_CORE_MPL_FILEDS_H_ + diff --git a/src/mrt/maplert/public-headers/mrt_libs_api.h b/src/mrt/maplert/public-headers/mrt_libs_api.h new file mode 100644 index 0000000000..146efb66dd --- /dev/null +++ b/src/mrt/maplert/public-headers/mrt_libs_api.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_LIBS_H_ +#define MAPLE_RUNTIME_LIBS_H_ + +#include "mrt_api_common.h" + +#ifdef __cplusplus +namespace maplert { +extern "C" { +#endif + +MRT_EXPORT void MRT_DumpMethodUse(std::ostream &os); + +#ifdef __cplusplus +} // extern "C" +} // namespace maplert +#endif +#endif diff --git a/src/mrt/maplert/public-headers/mrt_linker_api.h b/src/mrt/maplert/public-headers/mrt_linker_api.h new file mode 100644 index 0000000000..9be9d1d4a0 --- /dev/null +++ b/src/mrt/maplert/public-headers/mrt_linker_api.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_CLASS_LINKER_API_H_ +#define MRT_CLASS_LINKER_API_H_ + +#include "mrt_api_common.h" + +#ifdef __cplusplus +namespace maplert { +extern "C" { +#endif +MRT_EXPORT bool MRT_LinkerIsJavaText(const void *addr); +MRT_EXPORT void *MRT_LinkerGetSymbolAddr(void *handle, const char *symbol, bool isFunction); +MRT_EXPORT void MRT_LinkerSetCachePath(const char *path); +#ifdef __cplusplus +} // extern "C" +} // namespace maplert +#endif +#endif // MRT_CLASS_LOCATOR_MGR_H_ diff --git a/src/mrt/maplert/public-headers/mrt_mm_config_common.h b/src/mrt/maplert/public-headers/mrt_mm_config_common.h new file mode 100644 index 0000000000..730811666c --- /dev/null +++ b/src/mrt/maplert/public-headers/mrt_mm_config_common.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_MM_CONFIG_COMMON_H +#define MAPLE_RUNTIME_MM_CONFIG_COMMON_H + +#ifndef CONFIG_JSAN +#define CONFIG_JSAN 0 +#endif + +// By default, enable unit test code on non-android +// platform (for example: qemu on host linux). +#ifndef MRT_UNIT_TEST +#ifndef __ANDROID__ +#define MRT_UNIT_TEST 1 +#endif +#endif + +namespace maplert { +// Use this inline function to "use" a variable so that the compiler will not +// complain about unused variables. This is especially useful when implementing +// gc-only where functions like RC_LOCAL_INC_REF and RC_LOCAL_DEC_REF do not +// do anything. +template +__attribute__((always_inline)) inline void MRT_DummyUse(Types&& ... args __attribute__((unused))) {} +} // namespace maplert + + +#endif //MAPLE_RUNTIME_MMCONFIG_COMMON_H diff --git a/src/mrt/maplert/public-headers/mrt_monitor_api.h b/src/mrt/maplert/public-headers/mrt_monitor_api.h new file mode 100644 index 0000000000..a1a373bcc7 --- /dev/null +++ b/src/mrt/maplert/public-headers/mrt_monitor_api.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_RUNTIME_MONITOR_H_ +#define MAPLE_RUNTIME_MONITOR_H_ + +#include "mrt_api_common.h" + +#ifdef __cplusplus +namespace maplert { +extern "C" { +#endif +MRT_EXPORT jobject MRT_CloneJavaObject(jobject obj); +/* + * Returns memory used by the specified object. + * NOTICE: Includes the object header. + */ +MRT_EXPORT size_t MRT_SizeOfObject(jobject obj); + +#ifdef __cplusplus +} // extern "C" +} // namespace maplert +#endif + +#endif //MAPLE_RUNTIME_MONITOR_H_ diff --git a/src/mrt/maplert/public-headers/mrt_naming_api.h b/src/mrt/maplert/public-headers/mrt_naming_api.h new file mode 100644 index 0000000000..77ec7dfd17 --- /dev/null +++ b/src/mrt/maplert/public-headers/mrt_naming_api.h @@ -0,0 +1,27 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_NAMEMANGLER_API_H_ +#define MRT_NAMEMANGLER_API_H_ + +#include +#include "mrt_api_common.h" + +// The namespace is shared between RT and compiler +namespace namemanglerapi { +MRT_EXPORT std::string MangleForJni(const std::string &s); +MRT_EXPORT std::string MangleForJniDex(const std::string &s); +} + +#endif //MRT_NAMEMANGLER_API_H_ diff --git a/src/mrt/maplert/public-headers/mrt_poisonstack.h b/src/mrt/maplert/public-headers/mrt_poisonstack.h new file mode 100644 index 0000000000..69430ada77 --- /dev/null +++ b/src/mrt/maplert/public-headers/mrt_poisonstack.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEALL_MAPLERT_JAVA_ANDROID_MRT_INCLUDE_MRT_POISIONSTACK_H_ +#define MAPLEALL_MAPLERT_JAVA_ANDROID_MRT_INCLUDE_MRT_POISIONSTACK_H_ + +#include +#include "mrt_api_common.h" +#ifdef __cplusplus +namespace maplert { +extern "C" { +#endif // __cplusplus + +MRT_EXPORT void MRT_InitPoisonStack(uintptr_t framePtr); + +#ifdef __cplusplus +} // extern "C" +} // namespace maplert +#endif // __cplusplus +#endif diff --git a/src/mrt/maplert/public-headers/mrt_primitive_api.h b/src/mrt/maplert/public-headers/mrt_primitive_api.h new file mode 100644 index 0000000000..f7e699cd7e --- /dev/null +++ b/src/mrt/maplert/public-headers/mrt_primitive_api.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_LIB_CORE_MPL_PRIMITIVE_H_ +#define MAPLE_LIB_CORE_MPL_PRIMITIVE_H_ + +#include "mrt_api_common.h" + +#ifdef __cplusplus +namespace maplert { +extern "C" { +#endif + +// PrimitiveClass +MRT_EXPORT jclass MRT_GetPrimitiveClassJboolean(void); +MRT_EXPORT jclass MRT_GetPrimitiveClassJbyte(void); +MRT_EXPORT jclass MRT_GetPrimitiveClassJchar(void); +MRT_EXPORT jclass MRT_GetPrimitiveClassJdouble(void); +MRT_EXPORT jclass MRT_GetPrimitiveClassJfloat(void); +MRT_EXPORT jclass MRT_GetPrimitiveClassJint(void); +MRT_EXPORT jclass MRT_GetPrimitiveClassJlong(void); +MRT_EXPORT jclass MRT_GetPrimitiveClassJshort(void); +MRT_EXPORT jclass MRT_GetPrimitiveClassVoid(void); + +#ifdef __cplusplus +} // extern "C" +} // namespace maplert +#endif + +#endif //MAPLE_LIB_CORE_MPL_PRIMITIVE_H_ diff --git a/src/mrt/maplert/public-headers/mrt_public_api.h b/src/mrt/maplert/public-headers/mrt_public_api.h new file mode 100644 index 0000000000..c0d01acf63 --- /dev/null +++ b/src/mrt/maplert/public-headers/mrt_public_api.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MRT_PUBLIC_API +#define MRT_PUBLIC_API +#include +#include "mrt_class_api.h" +#include "mrt_string_api.h" +#include "mrt_primitive_api.h" +#include "mrt_reflection_api.h" +#include "mrt_reference_api.h" +#include "mrt_array_api.h" +#include "mrt_monitor_api.h" +#include "mrt_fields_api.h" +#include "mrt_exception_api.h" +#include "mrt_thread_api.h" +#include "mrt_libs_api.h" +#include "mrt_naming_api.h" +#include "mrt_classloader_api.h" +#include "mrt_linker_api.h" +#endif diff --git a/src/mrt/maplert/public-headers/mrt_reference_api.h b/src/mrt/maplert/public-headers/mrt_reference_api.h new file mode 100644 index 0000000000..844d2141e7 --- /dev/null +++ b/src/mrt/maplert/public-headers/mrt_reference_api.h @@ -0,0 +1,224 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_LIB_CORE_MPL_REFERENCE_H_ +#define MAPLE_LIB_CORE_MPL_REFERENCE_H_ + +#include + +// mrt/libmrtbase/include/gc_config.h +#include +#include +#include "mrt_api_common.h" +#include "jni.h" +#include "gc_config.h" +#include "gc_callback.h" +#include "gc_reason.h" +#include "gc_roots.h" + +#ifdef __cplusplus +namespace maplert { +extern "C" { +#endif + +// cycle pattern valid flag +const int kCycleValid = 0; +const int kCycleNotValid = 1; +const int kCyclePermernant = 2; +// cycle pattern node flag +const unsigned int kCycleNodeSubClass = 0x1; +// cycle pattern edge flag +const unsigned int kCycleEdgeSkipMatch = 0x1; + +MRT_EXPORT void MRT_IncRef(address_t obj); +MRT_EXPORT void MRT_DecRef(address_t obj); +MRT_EXPORT void MRT_IncDecRef(address_t incObj, address_t decObj); +MRT_EXPORT void MRT_DecRefUnsync(address_t obj); +MRT_EXPORT void MRT_IncResurrectWeak(address_t obj); + +MRT_EXPORT void MRT_ReleaseObj(address_t obj); +MRT_EXPORT void MRT_CollectWeakObj(address_t obj); +MRT_EXPORT address_t MRT_IncRefNaiveRCFast(address_t obj); +MRT_EXPORT address_t MRT_DecRefNaiveRCFast(address_t obj); +MRT_EXPORT void MRT_IncDecRefNaiveRCFast(address_t incAddr, address_t decAddr); +// encoding the cycle pattern +MRT_EXPORT void MRT_GetCyclePattern(std::ostream& os); +MRT_EXPORT std::ostream *MRT_GetCycleLogFile(); + +// collector specific JNI interface +// Unsafe +MRT_EXPORT bool MRT_UnsafeCompareAndSwapObject(address_t obj, ssize_t offset, + address_t expectedValue, address_t newValue); +MRT_EXPORT address_t MRT_UnsafeGetObjectVolatile(address_t obj, ssize_t offset); +MRT_EXPORT address_t MRT_UnsafeGetObject(address_t obj, ssize_t offset); +MRT_EXPORT void MRT_UnsafePutObject(address_t obj, ssize_t offset, address_t newValue); +MRT_EXPORT void MRT_UnsafePutObjectVolatile(address_t obj, ssize_t offset, address_t newValue); +MRT_EXPORT void MRT_UnsafePutObjectOrdered(address_t obj, ssize_t offset, address_t newValue); + +// weak global reference decoding barrier +MRT_EXPORT void MRT_WeakRefGetBarrier(address_t referent); + +// get the reference count of an object. only for external usage. +MRT_EXPORT uint32_t MRT_RefCount(address_t obj); + +MRT_EXPORT bool MRT_CheckHeapObj(uintptr_t obj); +MRT_EXPORT bool MRT_IsGarbage(address_t obj); +MRT_EXPORT bool MRT_IsValidOffHeapObject(jobject obj); + +MRT_EXPORT bool MRT_EnterSaferegion(bool rememberLastJavaFrame = true); +MRT_EXPORT bool MRT_LeaveSaferegion(); + +MRT_EXPORT address_t MRT_GetHeapLowerBound(); +MRT_EXPORT address_t MRT_GetHeapUpperBound(); +MRT_EXPORT bool MRT_IsValidObjAddr(address_t obj); + +// Keep these consistent with compiler-rt/include/cinterface.h +struct VMHeapParam { + size_t heapStartSize; + size_t heapSize; + size_t heapGrowthLimit; + size_t heapMinFree; + size_t heapMaxFree; + float heapTargetUtilization; + bool ignoreMaxFootprint; + bool gcOnly = false; + bool enableGCLog = false; + bool isZygote = false; +}; + +MRT_EXPORT bool MRT_GCInitGlobal(const VMHeapParam &vmHeapParam); +MRT_EXPORT bool MRT_GCFiniGlobal(); +MRT_EXPORT bool MRT_GCInitThreadLocal(bool isMain); +MRT_EXPORT bool MRT_GCFiniThreadLocal(); +MRT_EXPORT void MRT_GCStart(); + +// fork handlers. +MRT_EXPORT void MRT_GCPreFork(); +MRT_EXPORT void MRT_GCPostForkChild(bool isSystem); +MRT_EXPORT void MRT_GCPostForkCommon(bool isZygote); +MRT_EXPORT void MRT_ForkInGC(bool flag); +MRT_EXPORT void MRT_GCLogPostFork(); +MRT_EXPORT void MRT_RegisterNativeAllocation(size_t byte); +MRT_EXPORT void MRT_RegisterNativeFree(size_t byte); +MRT_EXPORT void MRT_NotifyNativeAllocation(); + +// returns number of GC occurred since this function was called last time and +// stop-the-world time maximum in ms. both counts are reset after the call +MRT_EXPORT void MRT_GetGcCounts(size_t &gcCount, uint64_t &maxGcMs); + +// returns info for memory leak identified by backup tracing +// average leak and peak leak memory size in Bytes +// both numbers are reset are the call +MRT_EXPORT void MRT_GetMemLeak(size_t &avgLeak, size_t &peakLeak); + +// returns the memory utilization rate (higher the better) +// and allocation +MRT_EXPORT void MRT_GetMemAlloc(float &util, size_t &abnormalCount); + +// returns number of RC abnormal such as rc counts +// increased from zero to one +MRT_EXPORT void MRT_GetRCParam(size_t &abnormalCount); + +inline unsigned long MRT_GetGCUsedHeapMemoryTotal() { + return 0; +}; + +MRT_EXPORT void MRT_ResetHeapStats(); +MRT_EXPORT size_t MRT_AllocSize(); +MRT_EXPORT size_t MRT_AllocCount(); +MRT_EXPORT size_t MRT_FreeSize(); +MRT_EXPORT size_t MRT_FreeCount(); +MRT_EXPORT size_t MRT_TotalMemory(); +// returns the max memory usage +MRT_EXPORT size_t MRT_MaxMemory(); +// returns the free memory within the limit +MRT_EXPORT size_t MRT_FreeMemory(); +MRT_EXPORT void MRT_SetHeapProfile(int hp); +// return all live instances of the given class +MRT_EXPORT void MRT_GetInstances(jclass klass, bool includeAssignable, + size_t max_count, std::vector &instances); + +MRT_EXPORT void MRT_DebugCleanup(); +MRT_EXPORT void MRT_RegisterGCRoots(address_t *gcroots[], size_t len); +MRT_EXPORT void MRT_RegisterRCCheckAddr(uint64_t *addr); + +MRT_EXPORT void MRT_SetReferenceProcessMode(bool immediate); +MRT_EXPORT void *MRT_ProcessReferences(void *args); +MRT_EXPORT void MRT_StopProcessReferences(bool doFinalizeOnStop = false); +MRT_EXPORT void MRT_WaitProcessReferencesStopped(); +MRT_EXPORT void MRT_WaitProcessReferencesStarted(); + +MRT_EXPORT void MRT_WaitGCStopped(); +MRT_EXPORT void MRT_CheckSaferegion(bool expect, const char *msg); + +// dump RC and GC information into stream os +MRT_EXPORT void MRT_DumpRCAndGCPerformanceInfo(std::ostream &os); + +// Enable/disable GC triggers +MRT_EXPORT void MRT_TriggerGC(maplert::GCReason reason); +MRT_EXPORT bool MRT_IsGcThread(); +MRT_EXPORT bool MRT_IsNaiveRCCollector(); + +MRT_EXPORT bool MRT_IsValidObjectAddress(address_t obj); +MRT_EXPORT void MRT_VisitAllocatedObjects(maple::rootObjectFunc func); + +// Alloc tracking +MRT_EXPORT void MRT_SetAllocRecordingCallback(std::function callback); + +// Trim memory +MRT_EXPORT void MRT_Trim(bool aggressive); +MRT_EXPORT void MRT_RequestTrim(); + +MRT_EXPORT void MRT_SetHwBlobClass(jclass cls); +MRT_EXPORT void MRT_SetSurfaceControlClass(jclass cls); +MRT_EXPORT void MRT_SetBinderProxyClass(jclass cls); + +// Udpate process state +MRT_EXPORT void MRT_UpdateProcessState(ProcessState processState, bool isSystemServer); + +// cycle pattern +MRT_EXPORT void MRT_DumpDynamicCyclePatterns(std::ostream &os, size_t limit); +MRT_EXPORT void MRT_SendCyclePatternJob(std::function job); +MRT_EXPORT void MRT_SetPeriodicSaveCpJob(std::function job); +MRT_EXPORT void MRT_SetPeriodicLearnCpJob(std::function job); +MRT_EXPORT bool MRT_IsCyclePatternUpdated(); + +// Set a callback function which is called after GC finished, but before +// starting the world. Useful for performing cleaning up after a GC. +MRT_EXPORT void MRT_DumpStaticField(std::ostream &os); +MRT_EXPORT void MRT_logRefqueuesSize(); + +// MRT_PreWriteRefField should be called before we directly write a reference field +// of an java heap object but not use write barrier such as MRT_WriteRefField(). +// This is required for concurrent marking. +MRT_EXPORT void MRT_PreWriteRefField(address_t obj); +MRT_EXPORT size_t MRT_GetNativeAllocBytes(); +MRT_EXPORT void MRT_SetNativeAllocBytes(size_t size); + +MRT_EXPORT address_t MRT_LoadVolatileField(address_t obj, address_t *fieldAddr); +MRT_EXPORT address_t MRT_LoadRefFieldCommon(address_t obj, address_t *fieldAddr); +MRT_EXPORT void MRT_ClassInstanceNum(std::map &objNameCntMp); + +// reference api +MRT_EXPORT address_t MRT_ReferenceGetReferent(address_t javaThis); +MRT_EXPORT void MRT_ReferenceClearReferent(address_t javaThis); +MRT_EXPORT void MRT_RunFinalization(); + + +#ifdef __cplusplus +} // extern "C" +} // namespace maplert +#endif + +#endif //MAPLE_LIB_CORE_MPL_REFERENCE_H_ diff --git a/src/mrt/maplert/public-headers/mrt_reflection_api.h b/src/mrt/maplert/public-headers/mrt_reflection_api.h new file mode 100644 index 0000000000..22c0b66f43 --- /dev/null +++ b/src/mrt/maplert/public-headers/mrt_reflection_api.h @@ -0,0 +1,176 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_MRT_REFLECTION_API_H +#define MAPLE_MRT_REFLECTION_API_H + +#include "mrt_api_common.h" +#include "jni.h" +#ifdef __cplusplus +namespace maplert { +extern "C" { +#endif + +// bridging API between JNI jmethodID/jfieldID to Java Method/Field object +MRT_EXPORT jfieldID MRT_ReflectFromReflectedField(jobject fieldObj); +MRT_EXPORT jobject MRT_ReflectToReflectedField(jclass clazz, jfieldID fid); +MRT_EXPORT jmethodID MRT_ReflectFromReflectedMethod(jobject jobj); +MRT_EXPORT jobject MRT_ReflectToReflectedMethod(jclass clazz, jmethodID mid); + +MRT_EXPORT jclass MRT_ReflectClassForCharName(const char *className, bool init, + jobject ClassLoader, bool internalName = false); +MRT_EXPORT jclass MRT_ReflectGetObjectClass(jobject jobj); +MRT_EXPORT jobject MRT_ReflectGetClassLoader(jobject jobj); +MRT_EXPORT jint MRT_ReflectGetArrayIndexScaleForComponentType(jclass componentClassObj); + +MRT_EXPORT bool MRT_ClassSetGctib(jclass classObj, char *newBuffer, jint offset); +MRT_EXPORT uint32_t MRT_GetFieldOffset(jfieldID field); +MRT_EXPORT jboolean MRT_ReflectClassIsArray(jclass classObj); +MRT_EXPORT jboolean MRT_ReflectClassIsPrimitive(jclass classObj); +MRT_EXPORT jboolean MRT_ReflectIsString(const jclass classObj); +MRT_EXPORT jboolean MRT_ReflectIsClass(const jobject classObj); +MRT_EXPORT jboolean MRT_ReflectClassIsInterface(jclass classObj); +MRT_EXPORT jboolean MRT_ReflectIsInit(jclass classObj); +MRT_EXPORT jboolean MRT_ReflectInitialized(jclass classObj); +MRT_EXPORT jclass MRT_ReflectClassGetSuperClass(jclass clazz); +MRT_EXPORT jboolean MRT_ReflectIsInstanceOf(jobject jobj, jclass javaClass); +MRT_EXPORT jboolean MRT_ReflectClassIsAssignableFrom(jclass superClass, jclass subClass); +MRT_EXPORT uint32_t MRT_ReflectClassGetNumofFields(jclass cls); +MRT_EXPORT jclass MRT_ReflectClassGetComponentType(jclass classObj); +MRT_EXPORT jint MRT_ReflectClassGetAccessFlags(jclass classObj); +MRT_EXPORT jobject MRT_ReflectClassGetDeclaredMethods(jclass classObj, jboolean publicOnly); +MRT_EXPORT jobjectArray MRT_ReflectClassGetInterfaces(jclass classObj); +MRT_EXPORT jint MRT_ReflectClassGetModifiers(jclass classObj); +MRT_EXPORT jobject MRT_ReflectClassGetDeclaredFields(jclass classObj, jboolean publicOnly); +MRT_EXPORT jobject MRT_ReflectClassGetDeclaredAnnotations(jclass classObj); +MRT_EXPORT jobjectArray MRT_ReflectClassGetDeclaredClasses(jclass classObj); +MRT_EXPORT jclass MRT_ReflectClassGetDeclaringClass(jclass classObj); +MRT_EXPORT jobject MRT_ReflectClassGetDeclaredConstructors(jclass classObj, jboolean publicOnly); + +MRT_EXPORT bool MRT_IsMetaObject(jobject jobj); +MRT_EXPORT bool MRT_IsValidMethod(jmethodID jmid); +MRT_EXPORT bool MRT_IsValidField(jfieldID jfid); +MRT_EXPORT bool MRT_IsValidClass(jclass jclazz); + +MRT_EXPORT jobject MRT_ReflectAllocObject(const jclass javaClass, bool isJNI = false); +MRT_EXPORT jobject MRT_ReflectNewObjectA(const jclass javaClass, const jmethodID mid, + const jvalue *args, bool isJNI = false); + +MRT_EXPORT jfieldID MRT_ReflectClassGetFieldsPtr(jclass classObj); +MRT_EXPORT jfieldID MRT_ReflectClassGetIndexField(jfieldID head, int i); + +MRT_EXPORT jfieldID MRT_ReflectGetCharField(jclass classObj, const char *fieldName, const char *fieldType = nullptr); +MRT_EXPORT jfieldID MRT_ReflectGetStaticCharField(jclass classObj, const char *fieldName); +MRT_EXPORT jclass MRT_ReflectFieldGetDeclaringClass(jfieldID fieldMeta); +MRT_EXPORT jboolean MRT_ReflectFieldIsStatic(jfieldID fieldObj); +MRT_EXPORT char *MRT_ReflectFieldGetCharFieldName(jfieldID obj); +MRT_EXPORT char *MRT_ReflectFieldGetTypeName(jfieldID fieldMeta); +MRT_EXPORT jclass MRT_ReflectFieldGetType(jfieldID fieldMeta); +MRT_EXPORT jint MRT_ReflectFieldGetOffset(jobject fieldMeta); + +MRT_EXPORT jboolean MRT_ReflectGetFieldjboolean(jfieldID fieldObj, jobject obj); +MRT_EXPORT jbyte MRT_ReflectGetFieldjbyte(jfieldID fieldObj, jobject obj); +MRT_EXPORT jchar MRT_ReflectGetFieldjchar(jfieldID fieldObj, jobject obj); +MRT_EXPORT jdouble MRT_ReflectGetFieldjdouble(jfieldID fieldObj, jobject obj); +MRT_EXPORT jfloat MRT_ReflectGetFieldjfloat(jfieldID fieldObj, jobject obj); +MRT_EXPORT jint MRT_ReflectGetFieldjint(jfieldID fieldObj, jobject obj); +MRT_EXPORT jlong MRT_ReflectGetFieldjlong(jfieldID fieldObj, jobject obj); +MRT_EXPORT jshort MRT_ReflectGetFieldjshort(jfieldID fieldObj, jobject obj); +MRT_EXPORT jobject MRT_ReflectGetFieldjobject(jfieldID fieldObj, jobject obj); + +MRT_EXPORT void MRT_ReflectSetFieldjboolean(jfieldID fieldObj, jobject obj, jboolean value); +MRT_EXPORT void MRT_ReflectSetFieldjbyte(jfieldID fieldObj, jobject obj, jbyte value); +MRT_EXPORT void MRT_ReflectSetFieldjchar(jfieldID fieldObj, jobject obj, jchar value); +MRT_EXPORT void MRT_ReflectSetFieldjshort(jfieldID fieldObj, jobject obj, jshort value); +MRT_EXPORT void MRT_ReflectSetFieldjint(jfieldID fieldObj, jobject obj, jint value); +MRT_EXPORT void MRT_ReflectSetFieldjfloat(jfieldID fieldObj, jobject obj, jfloat value); +MRT_EXPORT void MRT_ReflectSetFieldjlong(jfieldID fieldObj, jobject obj, jlong value); +MRT_EXPORT void MRT_ReflectSetFieldjdouble(jfieldID fieldObj, jobject obj, jdouble value); +MRT_EXPORT void MRT_ReflectSetFieldjobject(jfieldID fieldObj, jobject obj, jobject value); + +// function for dump heap +MRT_EXPORT jboolean MRT_ReflectGetFieldjbooleanUnsafe(jfieldID fieldObj, jobject obj); +MRT_EXPORT jbyte MRT_ReflectGetFieldjbyteUnsafe(jfieldID fieldObj, jobject obj); +MRT_EXPORT jchar MRT_ReflectGetFieldjcharUnsafe(jfieldID fieldObj, jobject obj); +MRT_EXPORT jdouble MRT_ReflectGetFieldjdoubleUnsafe(jfieldID fieldObj, jobject obj); +MRT_EXPORT jfloat MRT_ReflectGetFieldjfloatUnsafe(jfieldID fieldObj, jobject obj); +MRT_EXPORT jint MRT_ReflectGetFieldjintUnsafe(jfieldID fieldObj, jobject obj); +MRT_EXPORT jlong MRT_ReflectGetFieldjlongUnsafe(jfieldID fieldObj, jobject obj); +MRT_EXPORT jshort MRT_ReflectGetFieldjshortUnsafe(jfieldID fieldObj, jobject obj); +MRT_EXPORT jobject MRT_ReflectGetFieldjobjectUnsafe(jfieldID fieldObj, jobject obj); + +MRT_EXPORT jmethodID MRT_ReflectGetCharMethod(jclass classObj, const char *methodName, const char *signatureName); +MRT_EXPORT jmethodID MRT_ReflectGetStaticCharMethod(jclass classObj, const char *methodName, + const char *signatureName); +MRT_EXPORT jmethodID MRT_ReflectGetMethodFromMethodID(jclass clazz, jmethodID methodID, const char *signature); +MRT_EXPORT char *MRT_ReflectGetMethodName(jmethodID mid); +MRT_EXPORT jint MRT_ReflectGetMethodArgsize(jmethodID mid); +MRT_EXPORT void MRT_ReflectGetMethodArgsType(const char *signame, const jint argsize, char *shorty); +MRT_EXPORT char *MRT_ReflectGetMethodSig(jmethodID mid); +MRT_EXPORT jclass MRT_ReflectMethodGetDeclaringClass(jmethodID methodId); +MRT_EXPORT jboolean MRT_ReflectMethodIsStatic(jmethodID methodMeta); +MRT_EXPORT jboolean MRT_ReflectMethodIsConstructor(jmethodID methodMeta); +MRT_EXPORT jobjectArray MRT_ReflectMethodGetExceptionTypes(jobject methodObj); +MRT_EXPORT jobject MRT_ReflectMethodGetDefaultValue(jobject methodObj); +MRT_EXPORT jobject MRT_ReflectMethodGetAnnotationNative(jobject executable, jint index, jclass annoClass); +MRT_EXPORT void MRT_ReflectMethodForward(jobject from, jobject to); +MRT_EXPORT jmethodID MRT_ReflectMethodClone(jmethodID methodObj); + +#ifndef TYIPLE_MRT_REFLECT_INVOKE_DECL +#define TYIPLE_MRT_REFLECT_INVOKE_DECL(TYPE) \ +MRT_EXPORT TYPE MRT_ReflectInvokeMethodA##TYPE(jobject obj, const jmethodID mid, const jvalue *args); \ +MRT_EXPORT TYPE MRT_ReflectInvokeMethodAZ##TYPE(jobject obj, const jmethodID mid, const jvalue *args, \ + uintptr_t calleeFuncAddr); +#endif + +// invoke a slow method +TYIPLE_MRT_REFLECT_INVOKE_DECL(void) +TYIPLE_MRT_REFLECT_INVOKE_DECL(jboolean) +TYIPLE_MRT_REFLECT_INVOKE_DECL(jbyte) +TYIPLE_MRT_REFLECT_INVOKE_DECL(jchar) +TYIPLE_MRT_REFLECT_INVOKE_DECL(jdouble) +TYIPLE_MRT_REFLECT_INVOKE_DECL(jfloat) +TYIPLE_MRT_REFLECT_INVOKE_DECL(jint) +TYIPLE_MRT_REFLECT_INVOKE_DECL(jlong) +TYIPLE_MRT_REFLECT_INVOKE_DECL(jobject) +TYIPLE_MRT_REFLECT_INVOKE_DECL(jshort) + +// proxy interface api +MRT_EXPORT jclass MRT_ReflectProxyGenerateProxy(jstring name, jobjectArray interfaces, jobject loader, + jobjectArray methods, jobjectArray throws); +MRT_EXPORT jobject MRT_ReflectConstructorNewInstance0(jobject javaMethod, jobjectArray javaArgs); +MRT_EXPORT jobject MRT_ReflectConstructorNewInstanceFromSerialization(jclass ctorClass, const jclass allocClass); +// Executable interface api +MRT_EXPORT jobject MRT_ReflectExecutableGetSignatureAnnotation(jobject methodObj); +MRT_EXPORT jobject MRT_ReflectExecutableGetParameterAnnotationsNative(jobject method); +MRT_EXPORT jboolean MRT_ReflectExecutableIsAnnotationPresentNative(jobject methodObj, jclass annoObj); +MRT_EXPORT jint MRT_ReflectExecutableCompareMethodParametersInternal(jobject obj1, jobject obj2); +MRT_EXPORT jobjectArray MRT_ReflectExecutableGetDeclaredAnnotationsNative(jobject methodObj); +MRT_EXPORT jobject MRT_ReflectExecutableGetAnnotationNative(jobject methodObj, jclass annoClass); +MRT_EXPORT jobject MRT_ReflectExecutableGetParameters0(jobject thisObj); +MRT_EXPORT jobjectArray MRT_ReflectExecutableGetParameterTypesInternal(jobject methodObj); +MRT_EXPORT jint MRT_ReflectExecutableGetParameterCountInternal(jobject methodObj); +MRT_EXPORT jclass MRT_ReflectExecutableGetMethodReturnTypeInternal(jobject methodObj); +MRT_EXPORT jstring MRT_ReflectExecutableGetMethodNameInternal(jobject methodObj); +// MethodHandleImpl interface api +MRT_EXPORT jobject MRT_MethodHandleImplGetMemberInternal(jobject methodHandle); +// openjdk +MRT_EXPORT jclass MRT_ReflectGetOrCreateArrayClassObj(jclass elementClass); + +#ifdef __cplusplus +} // extern "C" +} // namespace maplert +#endif + +#endif // MAPLE_MRT_REFLECTION_API_H diff --git a/src/mrt/maplert/public-headers/mrt_string_api.h b/src/mrt/maplert/public-headers/mrt_string_api.h new file mode 100644 index 0000000000..4a09731cd6 --- /dev/null +++ b/src/mrt/maplert/public-headers/mrt_string_api.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_LIB_CORE_MPL_STRING_H_ +#define MAPLE_LIB_CORE_MPL_STRING_H_ + +#include +#include "mrt_api_common.h" +#include "jni.h" + +#ifdef __cplusplus +namespace maplert { +extern "C" { +#endif +MRT_EXPORT jint MRT_StringGetStringLength(jstring jstr); +MRT_EXPORT void MRT_ScanLiteralPoolRoots(std::function visitRoot); +MRT_EXPORT void MRT_ReleaseStringUTFChars(jstring jstr, const char *chars); +MRT_EXPORT void MRT_ReleaseStringChars(jstring jstr, const jchar *chars); +MRT_EXPORT jstring MRT_NewHeapJStr(const jchar *ca, jint len, bool isJNI = false); +MRT_EXPORT bool MRT_IsStrCompressed(const jstring jstr); +MRT_EXPORT char *MRT_GetStringContentsPtrRaw(jstring jstr); +MRT_EXPORT jchar *MRT_GetStringContentsPtrCopy(jstring jstr, jboolean *isCopy); +MRT_EXPORT size_t MRT_GetStringObjectSize(jstring jstr); +// only for MUTF-8 string JNI +MRT_EXPORT jstring MRT_NewStringMUTF(const char *inMutf, size_t inMutfLen, bool isJNI = false); +MRT_EXPORT jsize MRT_GetStringMUTFLength(jstring jstr); +MRT_EXPORT char *MRT_GetStringMUTFChars(jstring jstr, jboolean *isCopy); +MRT_EXPORT void MRT_GetStringMUTFRegion(jstring jstr, jsize start, jsize length, char *buf); + +#ifdef __cplusplus +} // extern "C" +} // namespace maplert +#endif + +#endif // MAPLE_LIB_CORE_MPL_STRING_H_ diff --git a/src/mrt/maplert/public-headers/mrt_thread_api.h b/src/mrt/maplert/public-headers/mrt_thread_api.h new file mode 100644 index 0000000000..c841273bf6 --- /dev/null +++ b/src/mrt/maplert/public-headers/mrt_thread_api.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_MRT_CLINIT_H_ +#define MAPLE_MRT_CLINIT_H_ + +#include "mrt_api_common.h" +#ifdef __cplusplus +namespace maplert { +extern "C" { +#endif + +#ifdef __cplusplus +} // extern "C" +} // namespace maplert +#endif + +#endif //MAPLE_MRT_CLINIT_H_ diff --git a/src/mrt/maplert/src/deferredaccess.cpp b/src/mrt/maplert/src/deferredaccess.cpp new file mode 100644 index 0000000000..a40a80a74a --- /dev/null +++ b/src/mrt/maplert/src/deferredaccess.cpp @@ -0,0 +1,470 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "deferredaccess.h" +#include "fieldmeta_inline.h" +#include "exception/mrt_exception.h" + +namespace maplert { +using namespace deferredaccess; +// implement MCC interface function +extern "C" { +MClass *MCC_DeferredConstClass(address_t daiClass, const MObject *caller, const MString *className) { + deferredaccess::DaiClass *dclass = reinterpret_cast(daiClass); + if (dclass != nullptr && dclass->daiClass != nullptr) { + return dclass->daiClass; + } + + if (className == nullptr) { + MRT_ThrowNullPointerExceptionUnw(); + return nullptr; + } + std::string descriptor = className->GetChars(); + MClass *classObject = deferredaccess::GetConstClass(*caller, descriptor.c_str()); + if (classObject != nullptr) { + if (dclass != nullptr) { + dclass->daiClass = classObject; + } + return classObject; + } + MRT_CheckThrowPendingExceptionUnw(); + return nullptr; +} + +void MCC_DeferredClinitCheck(address_t daiClass, const MObject *caller, const MString *className) { + MClass *classObject = MCC_DeferredConstClass(daiClass, caller, className); + DCHECK(classObject != nullptr) << "MCC_DeferredClinitCheck: classObject is nullptr!" << maple::endl; + deferredaccess::ClinitCheck(*classObject); +} + +bool MCC_DeferredInstanceOf(address_t daiClass, const MObject *caller, const MString *className, const MObject *obj) { + if (obj == nullptr) { + return false; + } + MClass *classObject = MCC_DeferredConstClass(daiClass, caller, className); + DCHECK(classObject != nullptr) << "MCC_DeferredInstanceOf: classObject is nullptr!" << maple::endl; + bool res = deferredaccess::IsInstanceOf(*classObject, *obj); + return res; +} + +MObject *MCC_DeferredCheckCast(address_t daiClass, const MObject *caller, const MString *className, MObject *obj) { + MClass *classObject = MCC_DeferredConstClass(daiClass, caller, className); + DCHECK(classObject != nullptr) << "MCC_DeferredCheckCast: classObject is nullptr!" << maple::endl; + if (obj == nullptr) { + return obj; + } + if (classObject == nullptr) { + return nullptr; + } + bool res = deferredaccess::IsInstanceOf(*classObject, *obj); + if (res) { + RC_LOCAL_INC_REF(obj); + return obj; + } + + { + // Cast Fail, ThrowClassCastException + MClass *targerObjClass = obj->GetClass(); + std::string sourceName; + std::string targerName; + classObject->GetBinaryName(sourceName); + targerObjClass->GetBinaryName(targerName); + std::ostringstream msg; + msg << targerName << " cannot be cast to " << sourceName; + MRT_ThrowNewException("java/lang/ClassCastException", msg.str().c_str()); + } + MRT_CheckThrowPendingExceptionUnw(); + return nullptr; +} + +MObject *MCC_DeferredNewInstance(address_t daiClass, const MObject *caller, const MString *className) { + MClass *classObject = MCC_DeferredConstClass(daiClass, caller, className); + if (classObject == nullptr) { + return nullptr; + } + MObject *object = deferredaccess::NewInstance(*classObject); + return object; +} + +MObject *MCC_DeferredNewArray(address_t daiClass, const MObject *caller, + const MString *arrayTypeName, uint32_t length) { + MClass *classObject = MCC_DeferredConstClass(daiClass, caller, arrayTypeName); + if (classObject == nullptr) { + return nullptr; + } + MArray *arrayObject = deferredaccess::NewArray(*classObject, length); + return arrayObject; +} + +MObject *MCC_DeferredFillNewArray(address_t daiClass, const MObject *caller, const MString *arrayTypeName, + uint32_t length, ...) { + MClass *classObject = MCC_DeferredConstClass(daiClass, caller, arrayTypeName); + if (classObject == nullptr) { + return nullptr; + } + va_list args; + va_start(args, length); + MArray *arrayObject = deferredaccess::NewArray(*classObject, length, args); + va_end(args); + return arrayObject; +} + +jvalue MCC_DeferredLoadField(address_t daiField, MObject *caller, const MString *className, + const MString *fieldName, const MString *fieldTypeName, const MObject *obj) { + DCHECK(caller != 0); + DCHECK(fieldName != 0); + DCHECK(fieldTypeName != 0); + jvalue result; + result.l = 0UL; + MClass *classObject = MCC_DeferredConstClass(0, caller, className); + if (classObject == nullptr) { + return result; + } + deferredaccess::DaiField *dField = reinterpret_cast(daiField); + FieldMeta *fieldMeta = nullptr; + if (dField == nullptr || dField->fieldMeta == nullptr) { + fieldMeta = deferredaccess::InitDaiField(dField, *classObject, *fieldName, *fieldTypeName); + if (UNLIKELY(MRT_HasPendingException())) { + MRT_CheckThrowPendingExceptionUnw(); + return result; + } + } else { + fieldMeta = dField->fieldMeta; + } + + CHECK(fieldMeta != nullptr) << "fieldMeta is nullptr!"; + if (!deferredaccess::CheckFieldAccess(*caller, *fieldMeta, obj)) { + MRT_CheckThrowPendingExceptionUnw(); + return result; + } + + result = deferredaccess::LoadField(*fieldMeta, obj); + if (UNLIKELY(MRT_HasPendingException())) { + MRT_CheckThrowPendingExceptionUnw(); + } + return result; +} + +void MCC_DeferredStoreField(address_t daiField, MObject *caller, const MString *className, + const MString *fieldName, const MString *fieldTypeName, MObject *obj, jvalue value) { + MClass *classObject = MCC_DeferredConstClass(0, caller, className); + if (classObject == nullptr) { + return; + } + DCHECK(caller != nullptr); + DCHECK(fieldName != nullptr); + DCHECK(fieldTypeName != nullptr); + deferredaccess::DaiField *dField = reinterpret_cast(daiField); + FieldMeta *fieldMeta = nullptr; + if (dField == nullptr || dField->fieldMeta == nullptr) { + fieldMeta = deferredaccess::InitDaiField(dField, *classObject, *fieldName, *fieldTypeName); + if (UNLIKELY(MRT_HasPendingException())) { + MRT_CheckThrowPendingExceptionUnw(); + return; + } + } else { + fieldMeta = dField->fieldMeta; + } + + CHECK(fieldMeta != nullptr) << "fieldMeta is nullptr!"; + if (!deferredaccess::CheckFieldAccess(*caller, *fieldMeta, obj)) { + MRT_CheckThrowPendingExceptionUnw(); + return; + } + + deferredaccess::StoreField(*fieldMeta, obj, value); + if (UNLIKELY(MRT_HasPendingException())) { + MRT_CheckThrowPendingExceptionUnw(); + } +} + +// compiler invoke MCC_DeferredInvoke, args order: +// (address_t daiMethod, int32_t kind, const char *className, const char *methodName, +// const char *signature, MObject *obj, ...) { +extern "C" int64_t EnterDeferredInvoke(intptr_t *stack) { + CHECK(stack != nullptr); + deferredaccess::DaiMethod *dMethod = reinterpret_cast(stack[kDaiMethod]); + deferredaccess::DeferredInvokeType invokeType = static_cast(stack[kInvokeType]); + const char *className = reinterpret_cast(stack[kClassName]); + const char *methodName = reinterpret_cast(stack[kMethodName]); + const char *signature = reinterpret_cast(stack[kSignature]); + MObject *obj = reinterpret_cast(stack[kThisObj]); + jvalue result; + result.l = 0UL; + if (UNLIKELY(obj == nullptr)) { + MRT_ThrowNewExceptionUnw("java/lang/NullPointerException", "Attempt to invoke method on a null object reference"); + return result.j; + } + + MethodMeta *methodMeta = nullptr; + if (dMethod == nullptr || dMethod->methodMeta == nullptr) { + MClass *classObject = deferredaccess::GetConstClass(*obj, className); + if (classObject == nullptr) { + MRT_CheckThrowPendingExceptionUnw(); + return result.j; + } + methodMeta = deferredaccess::InitDaiMethod(invokeType, dMethod, *classObject, methodName, signature); + if (UNLIKELY(MRT_HasPendingException())) { + MRT_CheckThrowPendingExceptionUnw(); + return result.j; + } + } else { + methodMeta = dMethod->methodMeta; + } + { + DecodeStackArgs stackArgs(stack); + std::string prefix("IIIIIL"); + methodMeta->BuildJValuesArgsFromStackMemeryPrefixSigNature(stackArgs, prefix); + result = deferredaccess::Invoke(invokeType, methodMeta, obj, &(stackArgs.GetData()[prefix.length()])); + } + if (UNLIKELY(MRT_HasPendingException())) { + MRT_CheckThrowPendingExceptionUnw(); + } + return result.j; +} +} // extern "C" + +// implement deferredaccess functions +MClass *deferredaccess::GetConstClass(const MObject &caller, const char *descriptor) { + MClass *classObject = MClass::GetClassFromDescriptor(&caller, descriptor); + return classObject; +} + +void deferredaccess::ClinitCheck(const MClass &classObject) { + if (!classObject.InitClassIfNeeded()) { + // exception + MRT_CheckThrowPendingExceptionUnw(); + } +} + +bool deferredaccess::IsInstanceOf(const MClass &classObject, const MObject &obj) { + return obj.IsInstanceOf(classObject); +} + +MObject *deferredaccess::NewInstance(const MClass &classObject) { + MObject *o = MObject::NewObject(classObject); + return o; +} + +MArray *deferredaccess::NewArray(const MClass &classObject, uint32_t length) { + MClass *componentClass = classObject.GetComponentClass(); + DCHECK(componentClass != nullptr) << "deferredaccess::NewArray: componentClass is nullptr!" << maple::endl; + bool isPrimitive = componentClass->IsPrimitiveClass(); + CHECK(isPrimitive == false) << " always New Object Array, but get " << + classObject.GetName() << maple::endl; + MArray *arrayObj = MArray::NewObjectArrayComponentClass(length, *componentClass); + return arrayObj; +} + +MArray *deferredaccess::NewArray(const MClass &classObject, uint32_t length, va_list initialElement) { + MArray *mArrayObj = NewArray(classObject, length); + CHECK(mArrayObj->IsObjectArray()) << " always ObjectArray, but get " << + classObject.GetName() << maple::endl; + for (uint32_t i = 0; i < length; ++i) { + MObject *value = reinterpret_cast(va_arg(initialElement, jobject)); + mArrayObj->SetObjectElement(i, value); + } + return mArrayObj; +} + +jvalue deferredaccess::LoadField(const FieldMeta &fieldMeta, const MObject *obj) { + jvalue result; + result.l = 0UL; + if (!fieldMeta.IsStatic() && obj == nullptr) { + std::string msg = "Attempt to read from field "; + std::string fieldFullName = fieldMeta.GetFullName(fieldMeta.GetDeclaringclass(), true); + msg += "'" + fieldFullName + "' " + "on a null object reference"; + MRT_ThrowNewException("java/lang/NullPointerException", msg.c_str()); + return result; + } + const char *fieldMetaTypeName = fieldMeta.GetTypeName(); + DCHECK(fieldMetaTypeName != nullptr) << "fieldMetaTypeName cannot be nullptr." << maple::endl; + char srcType = fieldMetaTypeName[0]; + switch (srcType) { + case 'Z': + result.z = fieldMeta.GetPrimitiveValue(obj, srcType); + break; + case 'B': + result.b = fieldMeta.GetPrimitiveValue(obj, srcType); + break; + case 'C': + result.c = fieldMeta.GetPrimitiveValue(obj, srcType); + break; + case 'S': + result.s = fieldMeta.GetPrimitiveValue(obj, srcType); + break; + case 'I': + result.i = fieldMeta.GetPrimitiveValue(obj, srcType); + break; + case 'J': + result.j = fieldMeta.GetPrimitiveValue(obj, srcType); + break; + case 'F': + result.f = fieldMeta.GetPrimitiveValue(obj, srcType); + break; + case 'D': + result.d = fieldMeta.GetPrimitiveValue(obj, srcType); + break; + default: + result.l = reinterpret_cast(fieldMeta.GetObjectValue(obj)); + } + return result; +} + +void deferredaccess::StoreField(const FieldMeta &fieldMeta, MObject *obj, jvalue value) { + if (!fieldMeta.IsStatic() && obj == nullptr) { + std::string msg = "Attempt to write to field "; + std::string fieldFullName = fieldMeta.GetFullName(fieldMeta.GetDeclaringclass(), true); + msg += "'" + fieldFullName + "' " + "on a null object reference"; + MRT_ThrowNewException("java/lang/NullPointerException", msg.c_str()); + return; + } + const char *fieldMetaTypeName = fieldMeta.GetTypeName(); + DCHECK(fieldMetaTypeName != nullptr) << "fieldMetaTypeName cannot be nullptr." << maple::endl; + DCHECK(obj != nullptr) << "obj cannot be nullptr." << maple::endl; + char srcType = fieldMetaTypeName[0]; + switch (srcType) { + case 'Z': + fieldMeta.SetPrimitiveValue(obj, srcType, value.z); + break; + case 'B': + fieldMeta.SetPrimitiveValue(obj, srcType, value.b); + break; + case 'C': + fieldMeta.SetPrimitiveValue(obj, srcType, value.c); + break; + case 'S': + fieldMeta.SetPrimitiveValue(obj, srcType, value.s); + break; + case 'I': + fieldMeta.SetPrimitiveValue(obj, srcType, value.i); + break; + case 'J': + fieldMeta.SetPrimitiveValue(obj, srcType, value.j); + break; + case 'F': + fieldMeta.SetPrimitiveValue(obj, srcType, value.f); + break; + case 'D': + fieldMeta.SetPrimitiveValue(obj, srcType, value.d); + break; + default: + fieldMeta.SetObjectValue(obj, reinterpret_cast(value.l)); + } +} + +FieldMeta *deferredaccess::InitDaiField(DaiField *daiField, const MClass &classObject, + const MString &fieldName, const MString &fieldTypeName) { + std::string fName = fieldName.GetChars(); + std::string tName = fieldTypeName.GetChars(); + FieldMeta *fieldMeta = classObject.GetField(fName.c_str(), tName.c_str()); + if (UNLIKELY(fieldMeta == nullptr)) { + std::string msg = "No field "; + msg += fName + std::string(" of type ") + tName + + " in class " + classObject.GetName() + " or its superclasses"; + MRT_ThrowNewException("java/lang/NoSuchFieldError", msg.c_str()); + } else { + if (daiField != nullptr) { + daiField->fieldMeta = fieldMeta; + } + } + return fieldMeta; +} + +MethodMeta *deferredaccess::InitDaiMethod(DeferredInvokeType invokeType, DaiMethod *daiMethod, const MClass &classObj, + const char *methodName, const char *signatureName) { + MethodMeta *methodMeta = nullptr; + if (invokeType == kDirectCall) { + methodMeta = classObj.GetDeclaredMethod(methodName, signatureName); + } else { + // invokeType: kVirtualCall, kInterfaceCall, kSuperCall, kStaticCall + methodMeta = classObj.GetMethod(methodName, signatureName); + } + + if (UNLIKELY(methodMeta == nullptr)) { + std::string msg = "No method "; + msg += methodName + std::string(signatureName) + " in class " + classObj.GetName() + " or its superclasses"; + MRT_ThrowNewException("java/lang/NoSuchMethodError", msg.c_str()); + } else { + if (daiMethod != nullptr) { + daiMethod->methodMeta = methodMeta; + } + } + return methodMeta; +} + +jvalue deferredaccess::Invoke(DeferredInvokeType invokeType, const MethodMeta *methodMeta, MObject *obj, + jvalue args[]) { + jvalue result; + result.l = 0UL; + DCHECK(obj != nullptr) << "deferredaccess::Invoke: obj is nullptr!" << maple::endl; + CHECK(methodMeta != nullptr) << "deferredaccess::Invoke: methodMeta must not be nullptr!" << maple::endl; + if (!methodMeta->IsDirectMethod() && ((invokeType == kVirtualCall) || (invokeType == kInterfaceCall))) { + methodMeta = obj->GetClass()->GetMethod(methodMeta->GetName(), methodMeta->GetSignature()); + } + CHECK(methodMeta != nullptr) << "deferredaccess::Invoke: can not find method: " << + obj->GetClass()->GetName() << maple::endl; + char *retTypeName = methodMeta->GetReturnTypeName(); + DCHECK(retTypeName != nullptr) << "deferredaccess::retTypeName is nullptr!" << maple::endl; + char retType = retTypeName[0]; + switch (retType) { + case 'Z': + result.z = methodMeta->Invoke(obj, args); + break; + case 'B': + result.b = methodMeta->Invoke(obj, args); + break; + case 'C': + result.c = methodMeta->Invoke(obj, args); + break; + case 'S': + result.s = methodMeta->Invoke(obj, args); + break; + case 'I': + result.i = methodMeta->Invoke(obj, args); + break; + case 'J': + result.j = methodMeta->Invoke(obj, args); + break; + case 'F': + result.f = methodMeta->Invoke(obj, args); + break; + case 'D': + result.d = methodMeta->Invoke(obj, args); + break; + default: + result.l = methodMeta->Invoke(obj, args); + } + return result; +} + +bool deferredaccess::CheckFieldAccess(MObject &caller, const FieldMeta &fieldMeta, const MObject *obj) { + uint32_t modifier = fieldMeta.GetMod(); + MClass *declaringClass = fieldMeta.GetDeclaringclass(); + obj = fieldMeta.IsStatic() ? declaringClass : obj; + MClass *callerClass = + caller.GetClass() == WellKnown::GetMClassClass() ? static_cast(&caller) : caller.GetClass(); + if (!reflection::VerifyAccess(obj, declaringClass, modifier, callerClass, 0)) { + std::string callerStr, declearClassStr; + callerClass->GetTypeName(callerStr); + declaringClass->GetTypeName(declearClassStr); + std::ostringstream msg; + msg << "Field '" << declearClassStr << "." << fieldMeta.GetName() << + "' is inaccessible to class '" << callerStr <<"'"; + MRT_ThrowNewException("java/lang/IllegalAccessError", msg.str().c_str()); + return false; + } + return true; +} +} diff --git a/src/mrt/maplert/src/fieldmeta.cpp b/src/mrt/maplert/src/fieldmeta.cpp new file mode 100644 index 0000000000..dd46bd355e --- /dev/null +++ b/src/mrt/maplert/src/fieldmeta.cpp @@ -0,0 +1,322 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "fieldmeta.h" +#include "mclass_inline.h" +#include "fieldmeta_inline.h" +#include "namemangler.h" +namespace maplert { +std::mutex FieldMetaCompact::resolveMutex; +void FieldMetaCompact::SetCompactFieldMetaOffset(const MClass &cls, uint32_t index, int32_t fieldOffset) { + FieldMetaCompact *leb = cls.GetCompactFields(); + uint32_t numOfField = cls.GetNumOfFields(); + for (uint32_t i = 0; i < numOfField; ++i) { + if (i == index) { + leb->SetBitOffset(static_cast(fieldOffset)); + return; + } + const uint8_t *pLeb = leb->GetpCompact(); + (void)namemangler::GetUnsignedLeb128Decode(&pLeb); + (void)namemangler::GetUnsignedLeb128Decode(&pLeb); + (void)namemangler::GetUnsignedLeb128Decode(&pLeb); + (void)namemangler::GetUnsignedLeb128Decode(&pLeb); + leb = reinterpret_cast(const_cast(pLeb)); + } + LOG(FATAL) << "FieldMetaCompact::GetCompactFieldMetasOffsetAddr: " << cls.GetName() << + ", index:" << index << maple::endl; +} + +FieldMetaCompact *FieldMetaCompact::DecodeCompactFieldMetasToVector( + const MClass &cls, uint32_t vecSize, char **typeNameVec, char **fieldNameVec, size_t *offsetVec, + uint32_t *modifierVec, FieldMetaCompact **fieldMetaCompact, char **annotationVec) { + uint32_t numOfField = cls.GetNumOfFields(); + if (numOfField == 0 || numOfField > vecSize) { + return nullptr; + } + FieldMetaCompact *leb = cls.GetCompactFields(); + for (uint32_t i = 0; i < numOfField; ++i) { + if (fieldMetaCompact != nullptr) { + fieldMetaCompact[i] = leb; + } + const uint8_t *pLeb = leb->GetpCompact(); + uint32_t modifier = namemangler::GetUnsignedLeb128Decode(&pLeb); + uintptr_t offset = leb->GetOffsetOrAddress(modifier::IsStatic(modifier)); + if (offsetVec != nullptr) { + offsetVec[i] = offset; + } + + if (modifierVec != nullptr) { + modifierVec[i] = modifier; + } + uint32_t typeNameIndex = namemangler::GetUnsignedLeb128Decode(&pLeb); + char *typeName = LinkerAPI::Instance().GetCString(cls.AsJclass(), typeNameIndex); + if (typeNameVec != nullptr) { + typeNameVec[i] = typeName; + } + uint32_t fieldNameIndex = namemangler::GetUnsignedLeb128Decode(&pLeb); + char *fieldName = LinkerAPI::Instance().GetCString(cls.AsJclass(), fieldNameIndex); + if (fieldNameVec != nullptr) { + fieldNameVec[i] = fieldName; + } + uint32_t annotationIndex = namemangler::GetUnsignedLeb128Decode(&pLeb); + char *annotation = LinkerAPI::Instance().GetCString(cls.AsJclass(), annotationIndex); + if (annotationVec != nullptr) { + annotationVec[i] = annotation; + } + leb = reinterpret_cast(const_cast(pLeb)); + } + return cls.GetCompactFields(); +} + +FieldMeta *FieldMetaCompact::DecodeCompactFieldMetas(MClass &cls) { + uint32_t numOfField = cls.GetNumOfFields(); + if (numOfField == 0) { + return nullptr; + } + CHECK(numOfField <= (std::numeric_limits::max() / sizeof(FieldMeta))) << + "field count too large. numOfField " << numOfField << maple::endl; + { + std::lock_guard lock(resolveMutex); + FieldMeta *fields = cls.GetRawFieldMetas(); + if (!cls.IsCompactMetaFields()) { + return fields; + } + // Compact, need resolve + char *typeNameVec[numOfField]; + char *fieldNameVec[numOfField]; + char *annotationVec[numOfField]; + size_t offsetVec[numOfField]; + uint32_t modifierVec[numOfField]; + (void)DecodeCompactFieldMetasToVector(cls, numOfField, typeNameVec, fieldNameVec, offsetVec, + modifierVec, nullptr, annotationVec); +#ifndef USE_32BIT_REF + for (uint32_t i = 0; i < numOfField; ++i) { + char *fieldNameStr = fieldNameVec[i]; + char *annoStr = annotationVec[i]; + size_t fieldNameStrLen = strlen(fieldNameStr) + 1; + size_t annoStrLen = strlen(annoStr) + 1; + char *strBuffer = reinterpret_cast(MRT_AllocFromMeta(fieldNameStrLen + annoStrLen, kNativeStringData)); + char *fieldNameStrBuffer = strBuffer; + char *annoStrBuffer = strBuffer + fieldNameStrLen; + errno_t tmpResult1 = memcpy_s(fieldNameStrBuffer, fieldNameStrLen, fieldNameStr, fieldNameStrLen); + errno_t tmpResult2 = memcpy_s(annoStrBuffer, annoStrLen, annoStr, annoStrLen); + if (UNLIKELY(tmpResult1 != EOK || tmpResult2 != EOK)) { + LOG(FATAL) << "FieldMetaCompact::DecodeCompactFieldMetas : memcpy_s() failed" << maple::endl; + return nullptr; + } + fieldNameVec[i] = fieldNameStrBuffer; + annotationVec[i] = annoStrBuffer; + } +#endif + size_t size = sizeof(FieldMeta) * numOfField; + FieldMeta *fieldMetas = FieldMeta::Cast(MRT_AllocFromMeta(size, kFieldMetaData)); + for (uint32_t i = 0; i < numOfField; ++i) { + FieldMeta *newFieldMeta = fieldMetas + i; + newFieldMeta->FillFieldMeta(modifierVec[i], offsetVec[i], typeNameVec[i], modifier::kHashConflict, + static_cast(i), fieldNameVec[i], annotationVec[i], cls); + } + cls.SetFields(*fieldMetas); + return fieldMetas; + } +} + +uint8_t *FieldMetaCompact::GetpCompact() { + return &leb128Start; +} + +FieldOffset *FieldMetaCompact::GetFieldpOffset() const { + // Compact is always ro + FieldOffset *fieldOffset = pOffset.GetDataRef(); + return fieldOffset; +} + +uint32_t FieldMetaCompact::GetBitOffset() const { + FieldOffset *fieldOffset = GetFieldpOffset(); + DCHECK(fieldOffset != nullptr) << "FieldMetaCompact::GetBitOffset(): fieldOffset is nullptr!" << maple::endl; + return fieldOffset->GetBitOffset(); +} + +void FieldMetaCompact::SetBitOffset(uint32_t offset) { + FieldOffset *fieldOffset = GetFieldpOffset(); + fieldOffset->SetBitOffset(offset); +} + +uintptr_t FieldMetaCompact::GetOffsetOrAddress(bool isStatic) const { + uintptr_t offsetOrAddress; + FieldOffset *fieldOffset = GetFieldpOffset(); + DCHECK(fieldOffset != nullptr) << "FieldMetaCompact::GetOffsetOrAddress: fieldOffset is nullptr!" << maple::endl; + if (isStatic) { + offsetOrAddress = fieldOffset->GetAddress(); + } else { + offsetOrAddress = fieldOffset->GetBitOffset(); + } + return offsetOrAddress; +} + +void FieldMeta::SetFieldName(const char *name) { + fieldName.SetDataRef(name); +} + +void FieldMeta::SetAnnotation(const char *fieldAnnotation) { + annotation.SetDataRef(fieldAnnotation); +} + +void FieldMeta::SetDeclaringClass(const MClass &dlClass) { + declaringClass.SetDataRef(&dlClass); +} + +std::string FieldMeta::GetFullName(const MClass *dclClass, bool needType) const { + char *name = GetName(); + DCHECK(name != nullptr) << "FieldMeta::GetFullName:name is nullptr" << maple::endl; + std::string fullName, declaringClassName; + if (dclClass != nullptr) { + dclClass->GetBinaryName(declaringClassName); + } + if (needType) { + std::string typeStr; + MClass *fieldType = GetType(); + if (fieldType != nullptr) { + fieldType->GetTypeName(typeStr); + } else { + typeStr = GetTypeName(); + } + fullName = typeStr + " "; + } + fullName += declaringClassName; + fullName += "."; + fullName += name; + return fullName; +} + +MObject *FieldMeta::GetSignatureAnnotation() const { + std::string annotationSet = GetAnnotation(); + if (annotationSet.empty()) { + return nullptr; + } + MObject *ret = AnnoParser::GetSignatureValue(annotationSet, GetDeclaringclass()); + return ret; +} + +void FieldMeta::FillFieldMeta(uint32_t modifier, size_t offset, const char *srcTypeName, uint16_t hash, + uint16_t fieldIndex, const char *name, const char *strAnnotation, const MClass &cls) { + SetMod(modifier); + SetOffsetOrAddress(offset); + SetTypeName(srcTypeName); + SetHashCode(hash); + SetIndex(fieldIndex); + SetFieldName(name); + SetAnnotation(strAnnotation); + SetDeclaringClass(cls); +} + +static MObject *ReflectGetField(const MObject *o, uint32_t modifier, size_t offset) { + const bool isVolatile = modifier::IsVolatile(modifier); + if (Collector::Instance().Type() == kNaiveRC) { + if (modifier::IsWeakRef(modifier)) { + if (UNLIKELY(offset == 0)) { + LOG(FATAL) << "static field is rc weak ref " << maple::endl; + } + return MObject::Cast(MRT_LoadWeakField(reinterpret_cast(o), + reinterpret_cast(reinterpret_cast(o) + offset), isVolatile)); + } else if(modifier::IsUnowned(modifier)) { + if (UNLIKELY(offset == 0)) { + LOG(FATAL) << "static field is rc unowned ref " << maple::endl; + } else if (UNLIKELY(isVolatile)) { + LOG(FATAL) << "volatile can not be rc unowned " << o->GetClass()->GetName() << " " << offset << maple::endl; + } + } + } + return o->LoadObject(offset, isVolatile); +} + +static void ReflectSetField(const MObject *o, uint32_t modifier, size_t offset, const MObject *newValue) { + const bool isVolatile = modifier::IsVolatile(modifier); + if (Collector::Instance().Type() == kNaiveRC) { + if (modifier::IsWeakRef(modifier)) { + if (UNLIKELY(offset == 0)) { + LOG(FATAL) << "static field is rc weak ref " << maple::endl; + } + MRT_WriteWeakField(reinterpret_cast(o), + reinterpret_cast(reinterpret_cast(o) + offset), + reinterpret_cast(newValue), isVolatile); + return; + } else if(modifier::IsUnowned(modifier)) { + if (UNLIKELY(offset == 0)) { + LOG(FATAL) << "static field is rc unowned ref " << maple::endl; + } else if (UNLIKELY(isVolatile)) { + LOG(FATAL) << "volatile can not be rc unowned " << o->GetClass()->GetName() << " " << offset << maple::endl; + } + o->StoreObjectNoRc(offset, newValue); + return; + } + } + o->StoreObject(offset, newValue, isVolatile); +} + +static void ReflectSetFieldStatic(uint32_t modifier, address_t *addr, const MObject *newValue) { + if (Collector::Instance().Type() == kNaiveRC) { + if (modifier::IsWeakRef(modifier)) { + LOG(FATAL) << "static field is rc weak ref " << maple::endl; + } else if(modifier::IsUnowned(modifier)) { + LOG(FATAL) << "static field is rc unowned ref " << maple::endl; + } + } + const bool isVolatile = modifier::IsVolatile(modifier); + if (isVolatile) { + MRT_STORE_JOBJECT_VOLATILE_STATIC(addr, newValue); + } else { + MRT_STORE_JOBJECT_STATIC(addr, newValue); + } +} + +void FieldMeta::SetObjectValue(MObject *o, const MObject *value, bool clinitCheck) const { + o = GetRealMObject(o, clinitCheck); + if (UNLIKELY(o == nullptr)) { + return; + } + uint32_t offset = GetOffset(); + uint32_t modifier = GetMod(); + if (IsStatic()) { + ReflectSetFieldStatic(modifier, reinterpret_cast(o), value); + } else { + ReflectSetField(o, modifier, offset, value); + } +} + +MObject *FieldMeta::GetObjectValue(const MObject *o) const { + o = GetRealMObject(o); + if (UNLIKELY(o == nullptr)) { + return nullptr; + } + uint32_t offset = GetOffset(); + uint32_t modifier = GetMod(); + return ReflectGetField(o, modifier, offset); +} + +void FieldMeta::SetOffsetOrAddress(size_t setOffset) { + if (IsStatic()) { + SetStaticAddr(setOffset); + } else { + SetBitOffset(static_cast(setOffset)); + } +} + +void FieldMeta::SetStaticAddr(uintptr_t addr) { + // static field address + DCHECK(IsStatic()) << "should be static field"; + FieldOffset *fieldOffset = GetFieldpOffset(); + DCHECK(fieldOffset != nullptr) << "fieldOffset is nullptr" << maple::endl; + fieldOffset->SetAddress(addr); +} +} // namespace maplert diff --git a/src/mrt/maplert/src/itab_util.cpp b/src/mrt/maplert/src/itab_util.cpp new file mode 100644 index 0000000000..4cb0969a41 --- /dev/null +++ b/src/mrt/maplert/src/itab_util.cpp @@ -0,0 +1,47 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "itab_util.h" +#include +#include +#include + +namespace maple { +unsigned int DJBHash(const char *str) { + unsigned int hash = 5381; // initial value for DJB hash algorithm + while (*str) { + hash += (hash << 5) + (unsigned char)(*str++); // calculate the hash code of data + } + return (hash & 0x7FFFFFFF); +} + +unsigned int GetHashIndex(const char *name) { + unsigned int hashcode = DJBHash(name); + return (hashcode % kHashSize); +} + +struct CmpStr { + bool operator()(char const *a, char const *b) const { + return std::strcmp(a, b) < 0; + } +}; + +std::mutex mapLock; + +unsigned int GetSecondHashIndex(const char *name) { + std::lock_guard guard(mapLock); + unsigned int hashcode = DJBHash(name); + return hashcode % kItabSecondHashSize; +} +} // namespace maple diff --git a/src/mrt/maplert/src/java2c_rule.cpp b/src/mrt/maplert/src/java2c_rule.cpp new file mode 100644 index 0000000000..bfb190d395 --- /dev/null +++ b/src/mrt/maplert/src/java2c_rule.cpp @@ -0,0 +1,22 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "java2c_rule.h" + +#ifndef UNIT_TEST +#include "exception/mrt_exception.h" +#endif + +namespace maplert { +} // namespace maplert diff --git a/src/mrt/maplert/src/literalstrname.cpp b/src/mrt/maplert/src/literalstrname.cpp new file mode 100644 index 0000000000..504e1dc1dd --- /dev/null +++ b/src/mrt/maplert/src/literalstrname.cpp @@ -0,0 +1,65 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "literalstrname.h" + +// literal string name is shared between maple compiler and runtime, thus not in namespace maplert +// note there is a macor kConstString "_C_STR_" in literalstrname.h +// which need to match +static std::string mplConstStr("_C_STR_00000000000000000000000000000000"); +const uint32_t kMaxBytesLength = 15; + +namespace { +const char *kMplDigits = "0123456789abcdef"; +} + +// Return the hex string of bytes. The result is the combination of prefix "_C_STR_" and hex string of bytes. +// The upper 4 bits and lower 4 bits of bytes[i] are transformed to hex form and restored separately in hex string. +std::string LiteralStrName::GetHexStr(const uint8_t *bytes, uint32_t len) { + if (bytes == nullptr) { + return std::string(); + } + std::string str(mplConstStr, 0, (len << 1) + kConstStringLen); + for (unsigned i = 0; i < len; ++i) { + str[2 * i + kConstStringLen] = kMplDigits[(bytes[i] & 0xf0) >> 4]; // get the hex value of upper 4 bits of bytes[i] + str[2 * i + kConstStringLen + 1] = kMplDigits[bytes[i] & 0x0f]; // get the hex value of lower 4 bits of bytes[i] + } + return str; +} + +// Return the hash code of data. The hash code is computed as +// s[0] * 31 ^ (len - 1) + s[1] * 31 ^ (len - 2) + ... + s[len - 1], +// where s[i] is the value of swapping the upper 8 bits and lower 8 bits of data[i]. +int32_t LiteralStrName::CalculateHashSwapByte(const char16_t *data, uint32_t len) { + uint32_t hash = 0; + const char16_t *end = data + len; + while (data < end) { + hash = (hash << 5) - hash; + char16_t val = *data++; + hash += (((val << 8) & 0xff00) | ((val >> 8) & 0xff)); + } + return static_cast(hash); +} + +std::string LiteralStrName::GetLiteralStrName(const uint8_t *bytes, uint32_t len) { + if (len <= kMaxBytesLength) { + return GetHexStr(bytes, len); + } + return ComputeMuid(bytes, len); +} + +std::string LiteralStrName::ComputeMuid(const uint8_t *bytes, uint32_t len) { + DigestHash digestHash = GetDigestHash(*bytes, len); + return GetHexStr(digestHash.bytes, kDigestHashLength); +} \ No newline at end of file diff --git a/src/mrt/maplert/src/marray.cpp b/src/mrt/maplert/src/marray.cpp new file mode 100644 index 0000000000..f47a08094e --- /dev/null +++ b/src/mrt/maplert/src/marray.cpp @@ -0,0 +1,89 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "fast_alloc_inline.h" +#include "sizes.h" +#include "marray_inline.h" +namespace maplert { +void MArray::InitialObjectArray(const MObject *initialElement) const { + uint32_t length = GetLength(); + for (uint32_t i = 0; i < length; ++i) { + SetObjectElement(i, initialElement); + } +} + +MArray *MArray::NewPrimitiveArray(uint32_t length, const MClass &arrayClass, bool isJNI) { + uint32_t componentSize = arrayClass.GetComponentSize(); + MArray *array = MObject::Cast(MRT_TryNewArray(componentSize, static_cast(length), + arrayClass.AsUintptr())); + if (LIKELY(array != nullptr)) { + return array; + } + if (isJNI) { + array = MObject::Cast(MRT_NEW_PRIMITIVE_ARRAY_JNI(componentSize, length, &arrayClass, isJNI)); + } else { + array = MObject::Cast(MRT_NEW_PRIMITIVE_ARRAY(componentSize, length, &arrayClass)); + } + return array; +} + +// please supply the exponent (base 2) of the size of the elements (0, 1, 2, or 3) +template +MArray *MArray::NewPrimitiveArray(uint32_t length, const MClass &arrayClass, bool isJNI) { + MArray *array = MObject::Cast(MRT_TryNewArray(static_cast(length), + arrayClass.AsUintptr())); + if (LIKELY(array != nullptr)) { + return array; + } + return NewPrimitiveArray(length, arrayClass, isJNI); +} + +template MArray *MArray::NewPrimitiveArray(uint32_t length, const MClass&, bool); +template MArray *MArray::NewPrimitiveArray(uint32_t length, const MClass&, bool); +template MArray *MArray::NewPrimitiveArray(uint32_t length, const MClass&, bool); +template MArray *MArray::NewPrimitiveArray(uint32_t length, const MClass&, bool); + +MArray *MArray::NewPrimitiveArrayComponentClass(uint32_t length, const MClass &componentClass) { + MClass *arrayClass = WellKnown::GetCacheArrayClass(componentClass); + MArray *arrayObject = MArray::NewPrimitiveArray(length, *arrayClass); + return arrayObject; +} + +template +MArray *MArray::NewPrimitiveArrayComponentClass(uint32_t length, const MClass &componentClass) { + MClass *arrayClass = WellKnown::GetCacheArrayClass(componentClass); + MArray *arrayObject = MArray::NewPrimitiveArray(length, *arrayClass); + return arrayObject; +} + +template MArray *MArray::NewPrimitiveArrayComponentClass(uint32_t, const MClass&); +template MArray *MArray::NewPrimitiveArrayComponentClass(uint32_t, const MClass&); +template MArray *MArray::NewPrimitiveArrayComponentClass(uint32_t, const MClass&); +template MArray *MArray::NewPrimitiveArrayComponentClass(uint32_t, const MClass&); + +MArray *MArray::NewObjectArray(uint32_t length, const MClass &arrayClass) { + MArray *array = MObject::Cast(MRT_TryNewArray(static_cast(length), + arrayClass.AsUintptr())); + if (LIKELY(array != nullptr)) { + return array; + } + array = MObject::Cast(MRT_NEW_JOBJECT_ARRAY(length, &arrayClass)); + return array; +} + +MArray *MArray::NewObjectArrayComponentClass(uint32_t length, const MClass &componentClass) { + MClass *arrayJClass = maplert::WellKnown::GetCacheArrayClass(componentClass); + return MArray::NewObjectArray(length, *arrayJClass); +} +} // namespace maplert diff --git a/src/mrt/maplert/src/mclass.cpp b/src/mrt/maplert/src/mclass.cpp new file mode 100644 index 0000000000..c43129a4c5 --- /dev/null +++ b/src/mrt/maplert/src/mclass.cpp @@ -0,0 +1,441 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mclass.h" +#include "mclass_inline.h" +#include "fieldmeta_inline.h" +#include "mrt_cyclequeue.h" +#include "exception/mrt_exception.h" +#include +namespace maplert { +void MClass::GetPrettyClass(std::string &dstName) const { + std::string clsName; + GetBinaryName(clsName); + dstName = "java.lang.Class<"; + dstName += clsName; + dstName += ">"; +} + +void MClass::GetBinaryName(std::string &dstName) const { + char *className = GetName(); + if (IsProxy()) { + dstName = className; + } else { + ConvertDescriptorToBinaryName(className, dstName); + } +} + +void MClass::GetTypeName(std::string &dstName) const { + ConvertDescriptorToTypeName(GetName(), dstName); +} + +void MClass::ConvertDescriptorToBinaryName(const std::string &descriptor, std::string &binaryName) { + if (descriptor[0] != 'L' && descriptor[0] != '[') { + maple::Primitive::GetPrimitiveClassName(descriptor.c_str(), binaryName); + } else { + binaryName = descriptor; + std::replace(binaryName.begin(), binaryName.end(), '/', '.'); + size_t len = binaryName.length(); + // descriptor must > 2, like:LA; + if (binaryName[0] == 'L') { + binaryName = binaryName.substr(1, len - 2); // remove char 'L', ';' + } + } +} + +void MClass::ConvertDescriptorToTypeName(const std::string &descriptor, std::string &defineName) { + uint32_t dim = GetDimensionFromDescriptor(descriptor); + ConvertDescriptorToBinaryName(descriptor.substr(dim), defineName); + for (uint32_t i = 0; i < dim; ++i) { + defineName += "[]"; + } +} + +void MClass::GetDescriptor(std::string &dstName) const { + dstName = GetName(); +} + +bool MClass::IsInnerClass() const { + std::string annoStr = GetAnnotation(); + AnnoParser &parser = AnnoParser::ConstructParser(annoStr.c_str(), const_cast(this)); + int32_t loc = parser.Find(parser.GetInnerClassStr()); + delete &parser; + return loc != annoconstant::kNPos; +} + +std::string MClass::GetAnnotation() const { + char *annotation = GetRawAnnotation(); + return AnnotationUtil::GetAnnotationUtil(annotation); +} + +FieldMetaCompact *MClass::GetCompactFields() const { + ClassMetadataRO *classInfoRo = GetClassMetaRo(); + FieldMetaCompact *compactFields = classInfoRo->fields.GetCompactData(); + return compactFields; +} + +MethodMetaCompact *MClass::GetCompactMethods() const { + ClassMetadataRO *classInfoRo = GetClassMetaRo(); + MethodMetaCompact *methodsCompact = classInfoRo->methods.GetCompactData(); + return methodsCompact; +} + +bool MClass::IsCompactMetaMethods() const { + ClassMetadataRO *classInfoRo = GetClassMetaRo(); + return classInfoRo->methods.IsCompact(); +} + +bool MClass::IsCompactMetaFields() const { + ClassMetadataRO *classInfoRo = GetClassMetaRo(); + return classInfoRo->fields.IsCompact(); +} + +void MClass::GetDeclaredMethods(std::vector &methodsVector, bool publicOnly) const { + uint32_t numOfMethod = GetNumOfMethods(); + MethodMeta *methodS = GetMethodMetas(); + constexpr int kReserveTw = 20; + methodsVector.reserve(kReserveTw); + for (uint32_t i = 0; i < numOfMethod; ++i) { + MethodMeta *method = &methodS[i]; + if (method->IsConstructor()) { + continue; + } + if (!publicOnly || (method->IsPublic())) { + methodsVector.push_back(method); + } + } +} + +void MClass::GetDirectInterfaces(MClass *interfaceVector[], uint32_t size) const { + uint32_t numOfInterface = GetNumOfInterface(); + CHECK(size <= numOfInterface) << "buffer size is wrong." << maple::endl; + if (IsArrayClass()) { + interfaceVector[0] = WellKnown::arrayInterface[0]; + interfaceVector[1] = WellKnown::arrayInterface[1]; + } else { + MClass **supers = GetSuperClassArray(); + MClass **pInterfaces = IsInterface() ? supers : supers + 1; + for (uint32_t index = 0; index < numOfInterface; ++index) { + MClass *interface = ResolveSuperClass(pInterfaces + index); + interfaceVector[index] = interface; + } + } +} + +void MClass::GetSuperClassInterfaces(uint32_t numOfSuperClass, MClass **superArray, + std::vector &interfaceVector, uint32_t firstSuperClass) const { + std::vector tempVector; + for (uint32_t i = firstSuperClass; i < numOfSuperClass; ++i) { + MClass *super = ResolveSuperClass(superArray + i); + std::vector::iterator it = std::find(interfaceVector.begin(), interfaceVector.end(), super); + if (it == interfaceVector.end()) { + interfaceVector.push_back(super); + tempVector.push_back(super); + } + } + for (auto itf : tempVector) { + itf->GetInterfaces(interfaceVector); + } +} + +void MClass::GetInterfaces(std::vector &interfaceVector) const { + if (IsPrimitiveClass()) { + return; + } else if (IsArrayClass()) { + interfaceVector.push_back(WellKnown::arrayInterface[0]); + interfaceVector.push_back(WellKnown::arrayInterface[1]); + return; + } else { + uint32_t numOfSuperClass = GetNumOfSuperClasses(); + MClass **superArray = GetSuperClassArray(); + if (superArray == nullptr) { + return; + } else if (IsInterface()) { + GetSuperClassInterfaces(numOfSuperClass, superArray, interfaceVector, 0); + } else { // is class + GetSuperClassInterfaces(numOfSuperClass, superArray, interfaceVector, 1); + + if (numOfSuperClass > 0) { + MClass *info = GetSuperClass(); + if (info != nullptr) { + info->GetInterfaces(interfaceVector); + } + } + } + } +} + +MethodMeta *MClass::GetInterfaceMethod(const char *methodName, const char *signatureName) const { + MethodMeta *method = nullptr; + std::vector interfaceVector; + constexpr int kReserveTw = 20; + interfaceVector.reserve(kReserveTw); + GetInterfaces(interfaceVector); + MethodMeta *resultMethod = nullptr; + for (auto interface : interfaceVector) { + method = interface->GetDeclaredMethod(methodName, signatureName); + if (method != nullptr) { + if (resultMethod == nullptr) { + resultMethod = method; + } else if (resultMethod->GetDeclaringClass()->IsAssignableFrom(*method->GetDeclaringClass())) { + resultMethod = method; + } + } + } + return resultMethod; +} + +MethodMeta *MClass::GetMethod(const char *methodName, const char *signatureName) const { + MethodMeta *method = nullptr; + const MClass *superClass = this; + while (superClass != nullptr) { + method = superClass->GetDeclaredMethod(methodName, signatureName); + if (method != nullptr) { + return method; + } else { + superClass = superClass->GetSuperClass(); + } + } + return GetInterfaceMethod(methodName, signatureName); +} + +MethodMeta *MClass::GetMethodForVirtual(const MethodMeta &srcMethod) const { + MethodMeta *method = nullptr; + const MClass *superClass = this; + char *methodName = srcMethod.GetName(); + char *signatureName = srcMethod.GetSignature(); + while (superClass != nullptr) { + method = superClass->GetDeclaredMethod(methodName, signatureName); + if (method != nullptr && method->IsOverrideMethod(srcMethod)) { + return method; + } else { + superClass = superClass->GetSuperClass(); + } + } + return GetInterfaceMethod(methodName, signatureName); +} + +MethodMeta *MClass::GetDeclaredFinalizeMethod() const { + MethodMeta *methodS = GetMethodMetas(); + uint32_t numOfMethod = GetNumOfMethods(); + if (numOfMethod == 0) { + return nullptr; + } + MethodMeta *method = &methodS[numOfMethod - 1]; + return method->IsFinalizeMethod() ? method : nullptr; +} + +MethodMeta *MClass::GetFinalizeMethod() const { + MethodMeta *finalizeMethod = nullptr; + const MClass *superClass = this; + while (superClass != nullptr) { + finalizeMethod = superClass->GetDeclaredFinalizeMethod(); + if (finalizeMethod != nullptr) { + break; + } + superClass = superClass->GetSuperClass(); + } + return finalizeMethod; +} + +bool MClass::IsAssignableFromInterface(const MClass &cls) const { + if (cls.IsArrayClass()) { + // array class has 2 interface + return (this == WellKnown::arrayInterface[0]) ? true : (this == WellKnown::arrayInterface[1]); + } + CycleQueue interfaceQueue; + interfaceQueue.Push(&cls); + while (!interfaceQueue.Empty()) { + const MClass *tmpClass = interfaceQueue.Front(); + interfaceQueue.Pop(); + uint32_t numofsuperclass = tmpClass->GetNumOfSuperClasses(); + MClass **superArray = tmpClass->GetSuperClassArray(); + for (uint32_t i = 0; i < numofsuperclass; ++i) { + MClass *super = ResolveSuperClass(superArray + i); + if (super == this) { + return true; + } + interfaceQueue.Push(super); + } + } + return false; +} + +bool MClass::IsAssignableFromImpl(const MClass &cls) const { + if (&cls == this) { + return true; + } else if (WellKnown::GetMClassObject() == this) { + return !cls.IsPrimitiveClass(); + } else if (IsArrayClass()) { + return cls.IsArrayClass() && GetComponentClass()->IsAssignableFromImpl(*cls.GetComponentClass()); + } else if (IsInterface()) { + return IsAssignableFromInterface(cls); + } else if (!cls.IsInterface()) { + MClass **superClassArray = cls.GetSuperClassArrayPtr(); + MClass *super = cls.GetNumOfSuperClasses() == 0 ? nullptr : ResolveSuperClass(superClassArray); + while (super != nullptr) { + if (super == this) { + return true; + } + superClassArray = super->GetSuperClassArrayPtr(); + super = super->GetNumOfSuperClasses() == 0 ? nullptr : ResolveSuperClass(superClassArray); + } + } + return false; +} + +void MClass::SetName(const char *name) { + ClassMetadataRO *classInfoRo = GetClassMetaRo(); + classInfoRo->className.SetRef(name); +} + +void MClass::SetModifier(uint32_t newMod) { + ClassMetadataRO *classInfoRo = GetClassMetaRo(); + classInfoRo->mod = newMod; +} + +void MClass::SetGctib(uintptr_t newGctib) { + ClassMetadata *cls = GetClassMeta(); + cls->gctib.SetGctibRef(newGctib); +} + +void MClass::SetSuperClassArray(uintptr_t newValue) { + ClassMetadataRO *classInfoRo = GetClassMetaRo(); + classInfoRo->superclass.SetDataRef(newValue); +} + +void MClass::SetObjectSize(uint32_t newSize) { + ClassMetadata *cls = GetClassMeta(); + cls->sizeInfo.objSize = newSize; +} + +void MClass::SetItable(uintptr_t itab) { + ClassMetadata *cls = GetClassMeta(); + cls->iTable.SetDataRef(itab); +} + +void MClass::SetVtable(uintptr_t vtab) { + ClassMetadata *cls = GetClassMeta(); + cls->vTable.SetDataRef(vtab); +} + +void MClass::SetNumOfFields(uint32_t newValue) { + ClassMetadataRO *classInfoRo = GetClassMetaRo(); + classInfoRo->numOfFields = newValue; +} + +void MClass::SetNumOfMethods(uint32_t newValue) { + ClassMetadataRO *classInfoRo = GetClassMetaRo(); + classInfoRo->numOfMethods = newValue; +} + +void MClass::SetMethods(const MethodMeta &newMethods) { + ClassMetadataRO *classInfoRo = GetClassMetaRo(); + classInfoRo->methods.SetDataRef(&newMethods); +} + +void MClass::SetFields(const FieldMeta &newFields) { + ClassMetadataRO *classInfoRo = GetClassMetaRo(); + classInfoRo->fields.SetDataRef(&newFields); +} + +void MClass::SetInitStateRawValue(uintptr_t newValue) { + ClassMetadata *cls = GetClassMeta(); + cls->SetInitStateRawValue(newValue); +} + +MClass *MClass::NewMClass(uintptr_t newClsMem) { + if (newClsMem == 0) { + newClsMem = reinterpret_cast( + MRT_AllocFromMeta(sizeof(ClassMetadata) + sizeof(ClassMetadataRO), kClassMetaData)); + } + MClass *newMClass = MObject::Cast(newClsMem); + uintptr_t newClsRo = newClsMem + sizeof(ClassMetadata); + newMClass->SetClassMetaRoData(newClsRo); + MClass *mClassClass = WellKnown::GetMClassClass(); + // store java.lang.Class + newMClass->StoreObjectNoRc(0, mClassClass); + return newMClass; +} + +MClass *MClass::GetArrayClass(const MClass &componentClass) { + return maplert::WellKnown::GetCacheArrayClass(componentClass); +} + +MClass *MClass::GetClassFromDescriptorUtil(const MObject *context, const char *descriptor, bool throwException) { + CHECK(descriptor != nullptr); + // get context class + const MClass *contextClass = context != nullptr ? + (context->GetClass() == WellKnown::GetMClassClass() ? + static_cast(context) : context->GetClass()) : nullptr; + MClass *cls = MClass::JniCast(MRT_GetClassByContextClass(*contextClass, descriptor)); + if (UNLIKELY(throwException && (cls == nullptr))) { + std::string msg = "Failed resolution of: "; + msg += descriptor; + MRT_ThrowNewException("java/lang/NoClassDefFoundError", msg.c_str()); + return nullptr; + } + return cls; +} + +MObject *MClass::GetSignatureAnnotation() const { + std::string annotationSet = GetAnnotation(); + if (annotationSet.empty()) { + return nullptr; + } + VLOG(reflect) << "Enter GetSignatureAnnotation, annoStr: " << annotationSet << maple::endl; + MObject *ret = AnnoParser::GetSignatureValue(annotationSet, const_cast(this)); + return ret; +} + +uint32_t MClass::GetArrayModifiers() const { + const MClass *componentClass = this; + while (componentClass->IsArrayClass()) { + componentClass = componentClass->GetComponentClass(); + } + uint32_t componentModifiers = componentClass->GetModifier() & 0xFFFF; + if (modifier::IsInterface(componentModifiers)) { + componentModifiers &= ~(modifier::GetInterfaceModifier() | modifier::GetStaticModifier()); + } + return modifier::GetAbstractModifier() | modifier::GetFinalModifier() | componentModifiers; +} + +MethodMeta *MClass::GetClinitMethodMeta() const { + uint32_t numOfMethods = GetNumOfMethods(); + MethodMeta *methodS = GetMethodMetas(); + for (uint32_t i = 0; i < numOfMethods; ++i) { + MethodMeta *method = methodS + i; + bool isConstructor = method->IsConstructor(); + bool isStatic = method->IsStatic(); + if (isConstructor && isStatic) { + return method; + } + } + return nullptr; +} + +void MClass::ResolveVtabItab() { + ClassMetadata *cls = GetClassMeta(); + uint8_t *itab = cls->iTable.GetDataRef(); + cls->iTable.SetDataRef(itab); + + uint8_t *vtab = cls->vTable.GetDataRef(); + cls->vTable.SetDataRef(vtab); +} + +void MClass::SetComponentClass(const MClass &klass) { + ClassMetadataRO *classInfoRo = GetClassMetaRo(); + classInfoRo->componentClass.SetDataRef(&klass); +} +} // namespace maplert diff --git a/src/mrt/maplert/src/methodmeta.cpp b/src/mrt/maplert/src/methodmeta.cpp new file mode 100644 index 0000000000..2cdf661fcf --- /dev/null +++ b/src/mrt/maplert/src/methodmeta.cpp @@ -0,0 +1,872 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "methodmeta_inline.h" +#include "exception/mrt_exception.h" +namespace maplert { +using namespace annoconstant; +std::mutex MethodMetaCompact::resolveMutex; +bool MethodMetaBase::IsMethodMetaCompact() const { + return (compactMetaFlag & modifier::kMethodMetaCompact) == modifier::kMethodMetaCompact; +} + +char *MethodMetaBase::GetName() const { + __MRT_Profile_MethodMeta(this); + bool isMethodMetaCompact = IsMethodMetaCompact(); + char *name = nullptr; + if (isMethodMetaCompact) { + const MethodMetaCompact *methodMetaCompact = reinterpret_cast(this); + name = methodMetaCompact->GetName(); + } else { + const MethodMeta *methodMeta = reinterpret_cast(this); + name = methodMeta->GetName(); + } + return name; +} + +void MethodMetaBase::GetSignature(std::string &signature) const { + bool isMethodMetaCompact = IsMethodMetaCompact(); + if (isMethodMetaCompact) { + const MethodMetaCompact *methodMetaCompact = reinterpret_cast(this); + methodMetaCompact->GetSignature(signature); + } else { + const MethodMeta *methodMeta = reinterpret_cast(this); + signature = methodMeta->GetSignature(); + } +} + +MClass *MethodMetaBase::GetDeclaringClass() const { + bool isMethodMetaCompact = IsMethodMetaCompact(); + MClass *dclClass = nullptr; + if (isMethodMetaCompact) { + const MethodMetaCompact *methodMetaCompact = reinterpret_cast(this); + dclClass = methodMetaCompact->GetDeclaringClass(); + } else { + const MethodMeta *methodMeta = reinterpret_cast(this); + dclClass = methodMeta->GetDeclaringClass(); + } + return dclClass; +} + +uint16_t MethodMetaBase::GetFlag() const { + bool isMethodMetaCompact = IsMethodMetaCompact(); + uint16_t flag = 0; + if (isMethodMetaCompact) { + const MethodMetaCompact *methodMetaCompact = reinterpret_cast(this); + flag = methodMetaCompact->GetFlag(); + } else { + const MethodMeta *methodMeta = reinterpret_cast(this); + flag = methodMeta->GetFlag(); + } + return flag; +} + +uint32_t MethodMetaBase::GetMod() const { + bool isMethodMetaCompact = IsMethodMetaCompact(); + uint32_t mod = 0; + if (isMethodMetaCompact) { + const MethodMetaCompact *methodMetaCompact = reinterpret_cast(this); + mod = methodMetaCompact->GetMod(); + } else { + const MethodMeta *methodMeta = reinterpret_cast(this); + mod = methodMeta->GetMod(); + } + return mod; +} + +int16_t MethodMetaBase::GetVtabIndex() const { + bool isMethodMetaCompact = IsMethodMetaCompact(); + int16_t vtabIndex = 0; + if (isMethodMetaCompact) { + const MethodMetaCompact *methodMetaCompact = reinterpret_cast(this); + vtabIndex = methodMetaCompact->GetVtabIndex(); + } else { + const MethodMeta *methodMeta = reinterpret_cast(this); + vtabIndex = methodMeta->GetVtabIndex(); + } + return vtabIndex; +} + +uintptr_t MethodMetaBase::GetFuncAddress() const { + bool isMethodMetaCompact = IsMethodMetaCompact(); + uintptr_t funcAddr = 0; + if (isMethodMetaCompact) { + const MethodMetaCompact *methodMetaCompact = reinterpret_cast(this); + funcAddr = methodMetaCompact->GetFuncAddress(); + } else { + const MethodMeta *methodMeta = reinterpret_cast(this); + funcAddr = methodMeta->GetFuncAddress(); + } + return funcAddr; +} + +int16_t MethodMetaCompact::GetVtabIndex() const { + return vtabIndex; +} + +uint16_t MethodMetaCompact::GetFlag() const { + return compactMetaFlag; +} + +int32_t MethodMetaCompact::GetDefTabIndex() const { + MethodAddress *pMethodAddress = GetpMethodAddress(); + if (pMethodAddress == nullptr) { + return -1; + } + return pMethodAddress->GetDefTabIndex(); +} + +MethodAddress *MethodMetaCompact::GetpMethodAddress() const { + if (IsAbstract()) { + return nullptr; + } + uintptr_t value = reinterpret_cast(&leb128Start)->GetDataRef(); + // compile will +2 as flag, here need -2 to remove it + return reinterpret_cast(value - 2); +} + +uintptr_t MethodMetaCompact::GetFuncAddress() const { + if (IsAbstract()) { + return 0; + } + MethodAddress *pMethodAddress = GetpMethodAddress(); + if (pMethodAddress == nullptr) { + return 0; + } + return pMethodAddress->GetAddr(); +} + +void MethodMetaCompact::SetFuncAddress(uintptr_t address) { + MethodAddress *pMethodAddress = GetpMethodAddress(); + if (pMethodAddress != nullptr) { + pMethodAddress->SetAddr(address); + } +} + +bool MethodMetaCompact::IsAbstract() const { + uint16_t flag = GetFlag(); + return ((flag & modifier::kMethodAbstract) == modifier::kMethodAbstract); +} + +const uint8_t *MethodMetaCompact::GetpCompact() const { + return &leb128Start; +} + +char *MethodMetaCompact::GetName() const { + const uint8_t *pLeb = GetpCompact(); + pLeb = IsAbstract() ? pLeb : (pLeb + sizeof(int32_t)); + // ingore declaring Class index + (void)namemangler::GetUnsignedLeb128Decode(&pLeb); + // modifier + (void)namemangler::GetUnsignedLeb128Decode(&pLeb); + // methodname + uint32_t methodNameIndex = namemangler::GetUnsignedLeb128Decode(&pLeb); + char *method = LinkerAPI::Instance().GetCString(reinterpret_cast(GetDeclaringClass()), methodNameIndex); + __MRT_Profile_CString(method); + return method; +} + +void MethodMetaCompact::GetSignature(std::string &signature) const { + const uint8_t *pLeb = GetpCompact(); + pLeb = IsAbstract() ? pLeb : (pLeb + sizeof(int32_t)); + // ingore declaring Class index + (void)namemangler::GetUnsignedLeb128Decode(&pLeb); + // modifier + uint32_t modifier = namemangler::GetUnsignedLeb128Decode(&pLeb); + // methodname + (void)namemangler::GetUnsignedLeb128Decode(&pLeb); + // args size + uint32_t argsSize = namemangler::GetUnsignedLeb128Decode(&pLeb); + // methodsignature + signature = "("; + // retrun type +1 + uint32_t typeSize = modifier::IsStatic(modifier) ? argsSize + 1 : argsSize; + MClass *cls = GetDeclaringClass(); + for (uint32_t i = 0; i < typeSize; ++i) { + uint32_t typeIndex = namemangler::GetUnsignedLeb128Decode(&pLeb); + char *typeCName = LinkerAPI::Instance().GetCString(reinterpret_cast(cls), typeIndex); + if (UNLIKELY(typeCName == nullptr)) { + LOG(FATAL) << "MethodMetaCompact::GetSignature: typeName must not be nullptr!" << maple::endl; + } + std::string typeName = typeCName; + __MRT_Profile_CString(typeName.c_str()); + if (i == (typeSize - 1)) { + signature += ")"; + signature += typeName; + } else { + signature += typeName; + } + } +} + +uint32_t MethodMetaCompact::GetMod() const { + const uint8_t *pLeb = GetpCompact(); + pLeb = IsAbstract() ? pLeb : (pLeb + sizeof(int32_t)); + // ingore declaring Class index + (void)namemangler::GetUnsignedLeb128Decode(&pLeb); + // modifier + uint32_t modifier = namemangler::GetUnsignedLeb128Decode(&pLeb); + return modifier; +} + +MClass *MethodMetaCompact::GetDeclaringClass() const { + const uint8_t *pLeb = GetpCompact(); + pLeb = IsAbstract() ? pLeb : (pLeb + sizeof(int32_t)); + const uint8_t *prePleb = pLeb; + uint32_t declClassOffset = namemangler::GetUnsignedLeb128Decode(&pLeb); + const DataRefOffset32 *declClassAddr = reinterpret_cast(prePleb - declClassOffset); + MClass *declClass = declClassAddr->GetDataRef(); + return declClass; +} + +static MethodMetaCompact *Leb128DecodeOneByOne(MethodMetaCompact *leb) { + // start decode + const uint8_t *pLeb = leb->GetpCompact(); + pLeb = leb->IsAbstract() ? pLeb : (pLeb + sizeof(int32_t)); + // ingore declaring Class index + (void)namemangler::GetUnsignedLeb128Decode(&pLeb); + // modifier + uint32_t modifier = namemangler::GetUnsignedLeb128Decode(&pLeb); + // methodname + (void)namemangler::GetUnsignedLeb128Decode(&pLeb); + // args size + uint32_t argsSize = namemangler::GetUnsignedLeb128Decode(&pLeb); + // methodsignature + uint32_t typeSize = modifier::IsStatic(modifier) ? (argsSize + 1) : argsSize; + for (uint32_t j = 0; j < typeSize; ++j) { + (void)namemangler::GetUnsignedLeb128Decode(&pLeb); + } + // annotation + (void)namemangler::GetUnsignedLeb128Decode(&pLeb); + leb = reinterpret_cast(const_cast(pLeb)); + return leb; +} + +uintptr_t MethodMetaCompact::GetCompactFuncAddr(const MClass &cls, uint32_t index) { + uint32_t numOfMethod = cls.GetNumOfMethods(); + MethodMetaCompact *leb = cls.GetCompactMethods(); + for (uint32_t i = 0; i < numOfMethod; ++i) { + if (i == index) { + return leb->GetFuncAddress(); + } + leb = Leb128DecodeOneByOne(leb); + } + LOG(FATAL) << "MethodMetaCompact::GetCompactFuncAddr class: " << cls.GetName() << + ", index:" << index << maple::endl; + return 0; +} + +MethodMetaCompact *MethodMetaCompact::GetMethodMetaCompact(const MClass &cls, uint32_t index) { + uint32_t numOfMethod = cls.GetNumOfMethods(); + MethodMetaCompact *leb = cls.GetCompactMethods(); + for (uint32_t i = 0; i < numOfMethod; ++i) { + if (i == index) { + return leb; + } + leb = Leb128DecodeOneByOne(leb); + } + LOG(FATAL) << "MethodMetaCompact::GetMethodMetaCompact class: " << cls.GetName() << + ", index:" << index << maple::endl; + return 0; +} + +uint8_t *MethodMetaCompact::DecodeCompactMethodMeta(const MClass &cls, uintptr_t &funcAddress, uint32_t &modifier, + std::string &methodName, std::string &signatureName, + std::string &annotationValue, int32_t &methodInVtabIndex, + uint16_t &flags, uint16_t &argsSize) const { + methodInVtabIndex = GetVtabIndex(); + flags = GetFlag(); + funcAddress = reinterpret_cast(GetFuncAddress()); + // start decode + const uint8_t *pLeb = GetpCompact(); + pLeb = IsAbstract() ? pLeb : (pLeb + sizeof(int32_t)); + // ingore declaring Class index + (void)namemangler::GetUnsignedLeb128Decode(&pLeb); + // modifier + modifier = namemangler::GetUnsignedLeb128Decode(&pLeb); + // methodname + uint32_t methodNameIndex = namemangler::GetUnsignedLeb128Decode(&pLeb); + methodName = LinkerAPI::Instance().GetCString(cls.AsJclass(), methodNameIndex); + // args size + argsSize = static_cast(namemangler::GetUnsignedLeb128Decode(&pLeb)); + // methodsignature + std::string signature("("); + // retrun type +1 + uint32_t typeSize = modifier::IsStatic(modifier) ? (argsSize + 1) : argsSize; + uint32_t parameterSize = typeSize - 1; + for (uint32_t j = 0; j < typeSize; ++j) { + uint32_t typeIndex = namemangler::GetUnsignedLeb128Decode(&pLeb); + std::string typeName = LinkerAPI::Instance().GetCString(cls.AsJclass(), typeIndex); + __MRT_Profile_CString(typeName.c_str()); + if (j == parameterSize) { + signature += ")"; + signature += typeName; + } else { + signature += typeName; + } + } + signatureName = signature; + // annotation + uint32_t annotationIndex = namemangler::GetUnsignedLeb128Decode(&pLeb); + annotationValue = LinkerAPI::Instance().GetCString(cls.AsJclass(), annotationIndex); + return const_cast(pLeb); +} + +MethodMeta *MethodMetaCompact::DecodeCompactMethodMetas(MClass &cls) { + uint32_t numOfMethod = cls.GetNumOfMethods(); + if (numOfMethod == 0) { + return nullptr; + } + { + std::lock_guard lock(resolveMutex); + MethodMeta *methods = cls.GetRawMethodMetas(); + if (!cls.IsCompactMetaMethods()) { + return methods; + } + // Compact, need resolve + MethodMetaCompact *leb = cls.GetCompactMethods(); + + CHECK(numOfMethod < (std::numeric_limits::max() / sizeof(MethodMeta))) << + "method count too large. numOfMethod " << numOfMethod << maple::endl; + MethodMeta *methodMetas = reinterpret_cast( + MRT_AllocFromMeta(sizeof(MethodMeta) * numOfMethod, kMethodMetaData)); + MethodMeta *methodMeta = methodMetas; + for (uint32_t i = 0; i < numOfMethod; ++i) { + uintptr_t funcAddress = 0; + uint32_t modifier = 0; + std::string methodName; + std::string signatureName; + std::string annotationValue; + int32_t methodInVtabIndex = 0; + uint16_t flags = 0; + uint16_t argsSize = 0; + uint8_t *pLeb = leb->DecodeCompactMethodMeta(cls, funcAddress, modifier, methodName, signatureName, + annotationValue, methodInVtabIndex, flags, argsSize); + MethodSignature *mthSig = methodMeta->CreateMethodSignatureByName(signatureName.c_str()); + methodMeta->FillMethodMeta(true, methodName.c_str(), mthSig, annotationValue.c_str(), + methodInVtabIndex, cls, funcAddress, modifier, flags, argsSize); + methodMeta++; + leb = reinterpret_cast(pLeb); + } + cls.SetMethods(*methodMetas); + return methodMetas; + } +} + +MethodSignature *MethodMeta::CreateMethodSignatureByName(const char *signature) { + size_t sigStrLen = strlen(signature) + 1; + char *signatureBuffer = reinterpret_cast(MRT_AllocFromMeta(sigStrLen, kNativeStringData)); + errno_t tmpResult = memcpy_s(signatureBuffer, sigStrLen, signature, sigStrLen); + if (UNLIKELY(tmpResult != EOK)) { + LOG(FATAL) << "MethodMeta::CreateMethodSignatureByName : memcpy_s() failed" << maple::endl; + } + signature = signatureBuffer; + MethodSignature *pMethodSignature = reinterpret_cast(MRT_AllocFromMeta(sizeof(MethodSignature), + kMethodMetaData)); + pMethodSignature->SetSignature(signature); + return pMethodSignature; +} + +void MethodMeta::FillMethodMeta(bool copySignature, const char *name, + const MethodSignature *methodSignature, const char *annotation, int32_t inVtabIndex, + const MClass &methodDclClass, const uintptr_t funcAddr, + uint32_t modifier, uint16_t methodFlag, uint16_t argSize) { + DCHECK(name != nullptr) << "MethodMeta::FillMethodMeta: name is nullptr!" << maple::endl; + DCHECK(methodSignature != nullptr) << "MethodMeta::FillMethodMeta: methodSignature is nullptr!" << maple::endl; + DCHECK(annotation != nullptr) << "MethodMeta::FillMethodMeta: annotation is nullptr!" << maple::endl; +#ifndef USE_32BIT_REF + size_t methodNameStrLen = strlen(name) + 1; + size_t annotationStrLen = strlen(annotation) + 1; + char *strBuffer = reinterpret_cast(MRT_AllocFromMeta(annotationStrLen + methodNameStrLen, kNativeStringData)); + char *methodNameBuffer = strBuffer; + char *annoBuffer = strBuffer + methodNameStrLen; + errno_t tmpResult1 = memcpy_s(methodNameBuffer, methodNameStrLen, name, methodNameStrLen); + errno_t tmpResult2 = memcpy_s(annoBuffer, annotationStrLen, annotation, annotationStrLen); + if (UNLIKELY(tmpResult1 != EOK || tmpResult2 != EOK)) { + LOG(FATAL) << "MethodMeta::FillMethodMeta : memcpy_s() failed" << maple::endl; + } + name = methodNameBuffer; + annotation = annoBuffer; + methodFlag &= ~modifier::kMethodParametarType; + if (!copySignature) { + char *signature = methodSignature->GetSignature(); + methodSignature = CreateMethodSignatureByName(signature); + } +#endif + (void)copySignature; + SetMod(modifier); + SetName(name); + SetFlag(methodFlag); + SetMethodSignature(methodSignature); + SetAnnotation(annotation); + SetAddress(funcAddr); + SetDeclaringClass(methodDclClass); + SetVtabIndex(inVtabIndex); + SetArgsSize(argSize); +} + +MClass *MethodMeta::GetReturnType() const { + __MRT_Profile_MethodParameterTypes(*this); + bool isEnableParametarType = IsEnableParametarType(); + if (isEnableParametarType) { + MetaRef *pTypes = GetMethodSignature()->GetTypes(); + uint32_t returnTypeIndex = GetParameterCount(); + if (pTypes[returnTypeIndex] != 0) { + return reinterpret_cast(pTypes[returnTypeIndex]); + } + } + + char *rtTypeName = GetReturnTypeName(); + MClass *declClass = GetDeclaringClass(); + MClass *returnType = MClass::GetClassFromDescriptor(declClass, rtTypeName); + if (isEnableParametarType) { + MetaRef *pTypes = GetMethodSignature()->GetTypes(); + uint32_t returnTypeIndex = GetParameterCount(); + pTypes[returnTypeIndex] = static_cast(returnType->AsUintptr()); + } + return returnType; +} + +static void GetDescriptorBySignature(char *descriptor, char *&methodSig, char &c) { + char *tmpDescriptor = descriptor; + if (c == '[') { + while (c == '[') { + *tmpDescriptor++ = c; + c = *methodSig++; + } + } + if (c == 'L') { + while (c != ';') { + *tmpDescriptor++ = c; + c = *methodSig++; + } + } + *tmpDescriptor++ = c; + c = *methodSig++; + *tmpDescriptor = '\0'; +} + +bool MethodMeta::GetParameterTypesUtil(MClass *parameterTypes[], uint32_t size) const { + uint32_t parameterCount = GetParameterCount(); + CHECK(parameterCount == size) << "size is wrong." << maple::endl; + if (parameterCount == 0) { + return true; + } + uint32_t index = 0; + char *methodSig = GetSignature(); + __MRT_Profile_MethodParameterTypes(*this); + MClass *declClass = GetDeclaringClass(); + size_t len = strlen(methodSig) + 1; + // skip first '(' + methodSig++; + char descriptor[len]; + char c = *methodSig++; + while (c != ')') { + GetDescriptorBySignature(descriptor, methodSig, c); + MClass *ptype = MClass::GetClassFromDescriptor(declClass, descriptor); + if (UNLIKELY(ptype == nullptr)) { + return false; + } + parameterTypes[index++] = ptype; + } + return true; +} + +void MethodMeta::GetParameterTypes(std::vector ¶meterTypes) const { + uint32_t parameterCount = GetParameterCount(); + MClass *types[parameterCount]; + bool isSuccess = GetParameterTypes(types, parameterCount); + if (!isSuccess) { + return; + } + for (uint32_t i = 0; i < parameterCount; ++i) { + parameterTypes.push_back(types[i]); + } +} + +MArray *MethodMeta::GetParameterTypes() const { + std::vector parameterTypes; + GetParameterTypes(parameterTypes); + if (MRT_HasPendingException()) { + return nullptr; + } + uint32_t size = static_cast(parameterTypes.size()); + MArray *parameterTypesArray = MArray::NewObjectArray(size, *WellKnown::GetMClassAClass()); + uint32_t currentIndex = 0; + for (auto parameterType : parameterTypes) { + parameterTypesArray->SetObjectElementOffHeap(currentIndex++, parameterType); + } + return parameterTypesArray; +} + +void MethodMeta::GetParameterTypesDescriptor(std::vector &descriptors) const { + char *methodSig = GetSignature(); + size_t len = strlen(methodSig) + 1; + // skip first '(' + methodSig++; + char descriptor[len]; + char c = *methodSig++; + while (c != ')') { + GetDescriptorBySignature(descriptor, methodSig, c); + descriptors.push_back(descriptor); + } +} + +MObject *MethodMeta::GetSignatureAnnotation() const { + std::string methodAnnoStr = GetAnnotation(); + if (methodAnnoStr.empty()) { + return nullptr; + } + MObject *ret = AnnoParser::GetSignatureValue(methodAnnoStr, GetDeclaringClass()); + return ret; +} + +void MethodMeta::GetExceptionTypes(std::vector &types) const { + std::string annoStr = GetAnnotation(); + if (annoStr.empty()) { + return; + } + MClass *cl = GetDeclaringClass(); + AnnoParser &annoParser = AnnoParser::ConstructParser(annoStr.c_str(), cl); + std::unique_ptr parser(&annoParser); + int32_t loc = parser->Find(parser->GetThrowsClassStr()); + if (loc != kNPos) { + std::string exception = parser->ParseStr(kDefParseStrType); + int64_t annoType = parser->ParseNum(kValueInt); + if (exception == "value" && annoType == kValueArray) { + size_t start = annoStr.find(parser->GetAnnoArrayStartDelimiter(), parser->GetIdx()); + size_t end = annoStr.find(parser->GetAnnoArrayEndDelimiter(), parser->GetIdx()); + if ((start != std::string::npos) && (end != std::string::npos)) { + parser->SetIdx(static_cast(start - kLabelSize)); + int64_t exceptionNum = parser->ParseNum(kValueInt); + parser->SkipNameAndType(); + std::string exceptionStr; + for (int64_t i = 0; i < exceptionNum; ++i) { + exceptionStr.clear(); + exceptionStr = parser->ParseStr(kDefParseStrType); + MClass *exceptionClass = MClass::GetClassFromDescriptor(cl, exceptionStr.c_str(), false); + if (exceptionClass != nullptr) { + types.push_back(exceptionClass); + } else { + MRT_ThrowNewException("java/lang/TypeNotPresentException", nullptr); + break; + } + } + } + } + } + return; +} + +MArray *MethodMeta::GetExceptionTypes() const { + std::vector exceptionTypes; + GetExceptionTypes(exceptionTypes); + if (MRT_HasPendingException()) { + return nullptr; + } + uint32_t size = static_cast(exceptionTypes.size()); + MArray *excetpionArray = MArray::NewObjectArray(size, *WellKnown::GetMClassAClass()); + uint32_t index = 0; + for (auto type : exceptionTypes) { + excetpionArray->SetObjectElementOffHeap(index++, type); + } + return excetpionArray; +} + +void MethodMeta::GetPrettyName(bool needSignature, std::string &dstName) const { + char *name = GetName(); + MClass *declClass = GetDeclaringClass(); + std::string declaringClassName; + declClass->GetTypeName(declaringClassName); + if (needSignature) { + std::string retTypeStr; + MClass::ConvertDescriptorToTypeName(GetReturnTypeName(), retTypeStr); + dstName = retTypeStr + " "; + } + dstName += declaringClassName + "." + name; + if (needSignature) { + dstName += "("; + std::vector parameterTypes; + GetParameterTypesDescriptor(parameterTypes); + uint64_t numOfParam = static_cast(parameterTypes.size()); + auto it = parameterTypes.begin(); + uint64_t i = 0; + std::string paramStr; + for (; it != parameterTypes.end(); ++it, ++i) { + paramStr.clear(); + MClass::ConvertDescriptorToTypeName((*it).c_str(), paramStr); + dstName += paramStr; + if (i != (numOfParam - 1)) { + dstName += ", "; + } + } + dstName += ")"; + } +} + +// ([ZJSIDF[Ljava/lang/String;Ljava/lang/String;)V -> [JSIDF[L +void MethodMeta::GetShortySignatureUtil(const char srcSignature[], char shorty[], const uint32_t size) { + DCHECK(shorty != nullptr); + DCHECK(srcSignature != nullptr); + const char *name = srcSignature; + char *ret = shorty; + name++; // skip '(' + while ((*name != '\0') && (*name != ')')) { + if (*name == '[') { + while (*name == '[') { + name++; + } + if (*name == 'L') { + while (*name != ';') { + name++; + } + } + name++; + *ret++ = '['; + continue; + } + + if (*name == 'L') { + while (*name != ';') { + name++; + } + name++; + *ret++ = 'L'; + continue; + } + *ret++ = *name++; + } + CHECK((ret - shorty) <= static_cast(size)); +} + +void MethodMeta::SetMod(const uint32_t modifier) { + mod = modifier; +} + +void MethodMeta::SetName(const char *name) { + methodName.SetDataRef(name); +} + +void MethodMeta::SetSignature(const char *signature) { + GetMethodSignature()->SetSignature(signature); +} + +void MethodMeta::SetMethodSignature(const MethodSignature *methodSignature) { + if (IsEnableParametarType()) { + pMethodSignature.SetDataRef(methodSignature); + } else { + char *signature = methodSignature->GetSignature(); + SetSignature(signature); + } +} + +void MethodMeta::SetAddress(const uintptr_t address) { + MethodAddress *pMethodAddress = GetpMethodAddress(); + pMethodAddress->SetAddr(address); +} + +void MethodMeta::SetAnnotation(const char *annotation) { + annotationValue.SetDataRef(annotation); +} + +void MethodMeta::SetDeclaringClass(const MClass &dlClass) { + declaringClass.SetDataRef(&dlClass); +} + +void MethodMeta::SetFlag(const uint16_t methodFlag) { + flag = methodFlag; +} + +void MethodMeta::SetArgsSize(const uint16_t methodArgSize) { + argumentSize = methodArgSize; +} + +void MethodMeta::SetVtabIndex(const int32_t methodVtabIndex) { + vtabIndex = methodVtabIndex; +} + +void MethodMeta::GetJavaMethodFullName(std::string &name) const { + MClass *cls = GetDeclaringClass(); + if (cls != nullptr) { + cls->GetBinaryName(name); + } else { + name.append("figo.internal.class"); + } + char *mathodName = GetName(); + char *sigName = GetSignature(); + name.append(".").append(mathodName).append(sigName); +} + +void MethodMeta::GetJavaClassName(std::string &name) const { + MClass *cls = GetDeclaringClass(); + if (cls != nullptr) { + cls->GetBinaryName(name); + } else { + name.append("fig.internal.class"); + } +} + +void MethodMeta::GetJavaMethodName(std::string &name) const { + name = GetName(); +} + +bool MethodMeta::IsMethodAccessible(const MClass &curClass) const { + if (IsPublic() || IsProtected()) { + return true; + } + if (IsPrivate()) { + return false; + } + // package + return reflection::IsInSamePackage(curClass, *GetDeclaringClass()); +} + +// overrideMethod can be overrided by this method. +bool MethodMeta::IsOverrideMethod(const MethodMeta &overrideMethod) const { + if (&overrideMethod == this) { + return true; + } + if (IsPrivate()) { + return false; + } + DCHECK(!overrideMethod.IsPrivate()); + if (overrideMethod.IsPublic() || overrideMethod.IsProtected()) { + return true; + } + + // package + return reflection::IsInSamePackage(*overrideMethod.GetDeclaringClass(), *GetDeclaringClass()); +} + +void MethodMeta::BuildJValuesArgsFromVaList(jvalue argsJvalue[], va_list args) const { + uint32_t parameterSize = GetParameterCount(); + char retTypeNames[parameterSize]; + GetShortySignature(retTypeNames, parameterSize); + char *result = retTypeNames; + DCHECK(argsJvalue != nullptr); + for (uint32_t i = 0; i < parameterSize; ++i) { + argsJvalue[i].j = 0; + switch (result[i]) { + case 'Z': + argsJvalue[i].z = static_cast(va_arg(args, int32_t)); + break; + case 'B': + argsJvalue[i].b = static_cast(va_arg(args, int32_t)); + break; + case 'C': + argsJvalue[i].c = static_cast(va_arg(args, int32_t)); + break; + case 'S': + argsJvalue[i].s = static_cast(va_arg(args, int32_t)); + break; + case 'I': + argsJvalue[i].i = va_arg(args, int32_t); + break; + case 'J': + argsJvalue[i].j = static_cast(va_arg(args, int64_t)); + break; + case 'F': { + jvalue tmp; + tmp.d = va_arg(args, double); + argsJvalue[i].f = tmp.f; + break; + } + case 'D': + argsJvalue[i].d = va_arg(args, double); + break; + default: + argsJvalue[i].l = va_arg(args, jobject); + } + } +} + +void MethodMeta::BuildMArrayArgsFromJValues(MArray &targetValue, jvalue args[]) const { + uint32_t parameterSize = GetParameterCount(); + char retTypeNames[parameterSize]; + GetShortySignature(retTypeNames, parameterSize); + char *result = retTypeNames; + MObject *obj = nullptr; + for (uint32_t i = 0; i < parameterSize; ++i) { + switch (result[i]) { + case 'B': + obj = primitiveutil::BoxPrimitiveJbyte(args[i].b); + targetValue.SetObjectElementNoRc(i, obj); + break; + case 'C': + obj = primitiveutil::BoxPrimitiveJchar(args[i].c); + targetValue.SetObjectElementNoRc(i, obj); + break; + case 'D': + obj = primitiveutil::BoxPrimitiveJdouble(args[i].d); + targetValue.SetObjectElementNoRc(i, obj); + break; + case 'F': + obj = primitiveutil::BoxPrimitiveJfloat(args[i].f); + targetValue.SetObjectElementNoRc(i, obj); + break; + case 'I': + obj = primitiveutil::BoxPrimitiveJint(args[i].i); + targetValue.SetObjectElementNoRc(i, obj); + break; + case 'J': + obj = primitiveutil::BoxPrimitiveJlong(args[i].j); + targetValue.SetObjectElementNoRc(i, obj); + break; + case 'S': + obj = primitiveutil::BoxPrimitiveJshort(args[i].s); + targetValue.SetObjectElementNoRc(i, obj); + break; + case 'Z': + obj = primitiveutil::BoxPrimitiveJboolean(args[i].z); + targetValue.SetObjectElementNoRc(i, obj); + break; + default: // default ref + obj = reinterpret_cast(args[i].l); + targetValue.SetObjectElement(i, obj); + } + } +} + +void MethodMeta::BuildJValuesArgsFromStackMemeryPrefixSigNature(DecodeStackArgs &args, std::string prefix) { + uint32_t parameterSize = GetParameterCount(); + size_t oldLen = prefix.length(); + prefix.resize(prefix.length() + parameterSize); + char *prefixCStr = const_cast(prefix.c_str()); + GetShortySignature(prefixCStr + oldLen, parameterSize); + BuildJValuesArgsFromStackMemery(args, prefix); +} + +void MethodMeta::BuildJValuesArgsFromStackMemery(DecodeStackArgs &args, std::string &shorty) { + size_t len = shorty.length(); + for (size_t i = 0; i < len; ++i) { + switch (shorty[i]) { + case 'F': + args.DecodeFloat(); + break; + case 'D': + args.DecodeDouble(); + break; + case 'J': + args.DecodeInt64(); + break; + case 'L': + case '[': + args.DecodeReference(); + break; + default: + args.DecodeInt32(); + } + } +} +} // namespace maplert diff --git a/src/mrt/maplert/src/mfield.cpp b/src/mrt/maplert/src/mfield.cpp new file mode 100644 index 0000000000..ad54db708b --- /dev/null +++ b/src/mrt/maplert/src/mfield.cpp @@ -0,0 +1,74 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mfield.h" +#include "fieldmeta_inline.h" +#include "chelper.h" +#include "mrt_well_known.h" +#include "mclass_inline.h" +#ifndef UNIFIED_MACROS_DEF +#define UNIFIED_MACROS_DEF +#include "unified.macros.def" +#endif + +namespace maplert { +#ifndef __OPENJDK__ +uint32_t MField::accessFlagsOffset = MRT_FIELD_OFFSET(Ljava_2Flang_2Freflect_2FField_3B, accessFlags); +uint32_t MField::declaringClassOffset = MRT_FIELD_OFFSET(Ljava_2Flang_2Freflect_2FField_3B, declaringClass); +uint32_t MField::fieldMetaIndexOffset = MRT_FIELD_OFFSET(Ljava_2Flang_2Freflect_2FField_3B, dexFieldIndex); +uint32_t MField::offsetOffset = MRT_FIELD_OFFSET(Ljava_2Flang_2Freflect_2FField_3B, offset); +#else +uint32_t MField::declaringClassOffset = MRT_FIELD_OFFSET(Ljava_2Flang_2Freflect_2FField_3B, clazz); +uint32_t MField::accessFlagsOffset = MRT_FIELD_OFFSET(Ljava_2Flang_2Freflect_2FField_3B, modifiers); +uint32_t MField::fieldMetaIndexOffset = MRT_FIELD_OFFSET(Ljava_2Flang_2Freflect_2FField_3B, slot); +uint32_t MField::nameOffset = MRT_FIELD_OFFSET(Ljava_2Flang_2Freflect_2FField_3B, name); +#endif +uint32_t MField::typeOffset = MRT_FIELD_OFFSET(Ljava_2Flang_2Freflect_2FField_3B, type); +uint32_t MField::overrideOffset = MRT_FIELD_OFFSET(Ljava_2Flang_2Freflect_2FAccessibleObject_3B, override); + +MField *MField::NewMFieldObject(const FieldMeta &fieldMeta) { + MClass *declaringClass = fieldMeta.GetDeclaringclass(); + MClass *type = fieldMeta.GetType(); + if (UNLIKELY(type == nullptr)) { + return nullptr; + } + MClass *fieldClass = WellKnown::GetMClassField(); + MField *fieldObject = MObject::NewObject(*fieldClass)->AsMField(); + if (UNLIKELY(fieldObject == nullptr)) { + return nullptr; + } + uint32_t accessFlags = fieldMeta.GetMod(); + fieldObject->Store(accessFlagsOffset, static_cast(accessFlags), false); + fieldObject->StoreObjectOffHeap(declaringClassOffset, declaringClass); + uint16_t index = fieldMeta.GetIndex(); + fieldObject->Store(fieldMetaIndexOffset, index, false); + fieldObject->StoreObjectOffHeap(typeOffset, type); + +#ifndef __OPENJDK__ + // set offset + int32_t offset = + fieldMeta.IsStatic() ? static_cast((fieldMeta.GetStaticAddr() - declaringClass->AsUintptr())) + : static_cast(static_cast(fieldMeta.GetOffset())); + fieldObject->Store(offsetOffset, offset, false); + return fieldObject; +#else + ScopedHandles sHandles; + ObjHandle o(fieldObject); + char *fieldName = fieldMeta.GetName(); + MString *mStringName = MString::InternUtf(std::string(fieldName)); + fieldObject->StoreObjectNoRc(nameOffset, mStringName); + return o.ReturnObj()->AsMField(); +#endif +} +} // namespace maplert diff --git a/src/mrt/maplert/src/mmethod.cpp b/src/mrt/maplert/src/mmethod.cpp new file mode 100644 index 0000000000..ba480795f1 --- /dev/null +++ b/src/mrt/maplert/src/mmethod.cpp @@ -0,0 +1,108 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mmethod.h" +#include "cpphelper.h" +#include "mclass_inline.h" +#include "methodmeta_inline.h" +#ifndef UNIFIED_MACROS_DEF +#define UNIFIED_MACROS_DEF +#include "unified.macros.def" +#endif + +namespace maplert { +#ifndef __OPENJDK__ +uint32_t MMethod::methodMetaOffset = MRT_FIELD_OFFSET(Ljava_2Flang_2Freflect_2FExecutable_3B, artMethod); +uint32_t MMethod::declaringClassOffset = MRT_FIELD_OFFSET(Ljava_2Flang_2Freflect_2FExecutable_3B, declaringClass); +#else +uint32_t MMethod::methodDeclaringClassOffset = MRT_FIELD_OFFSET(Ljava_2Flang_2Freflect_2FMethod_3B, clazz); +uint32_t MMethod::methodSlotOffset = MRT_FIELD_OFFSET(Ljava_2Flang_2Freflect_2FMethod_3B, slot); +uint32_t MMethod::methodNameOffset = MRT_FIELD_OFFSET(Ljava_2Flang_2Freflect_2FMethod_3B, name); +uint32_t MMethod::methodReturnTypeOffset = MRT_FIELD_OFFSET(Ljava_2Flang_2Freflect_2FMethod_3B, returnType); +uint32_t MMethod::methodParameterTypesOffset = MRT_FIELD_OFFSET(Ljava_2Flang_2Freflect_2FMethod_3B, parameterTypes); +uint32_t MMethod::methodExceptionTypesOffset = MRT_FIELD_OFFSET(Ljava_2Flang_2Freflect_2FMethod_3B, exceptionTypes); +uint32_t MMethod::methodModifiersOffset = MRT_FIELD_OFFSET(Ljava_2Flang_2Freflect_2FMethod_3B, modifiers); + + // constructor +uint32_t MMethod::constructorDeclaringClassOffset = MRT_FIELD_OFFSET(Ljava_2Flang_2Freflect_2FConstructor_3B, clazz); +uint32_t MMethod::constructorSlotOffset = MRT_FIELD_OFFSET(Ljava_2Flang_2Freflect_2FConstructor_3B, slot); +uint32_t MMethod::constructorParameterTypesOffset = + MRT_FIELD_OFFSET(Ljava_2Flang_2Freflect_2FConstructor_3B, parameterTypes); +uint32_t MMethod::constructorExceptionTypesOffset = + MRT_FIELD_OFFSET(Ljava_2Flang_2Freflect_2FConstructor_3B, exceptionTypes); +uint32_t MMethod::constructorModifiersOffset = MRT_FIELD_OFFSET(Ljava_2Flang_2Freflect_2FConstructor_3B, modifiers); +#endif +uint32_t MMethod::accessFlagsOffset = MRT_FIELD_OFFSET(Ljava_2Flang_2Freflect_2FExecutable_3B, accessFlags); +uint32_t MMethod::overrideOffset = MRT_FIELD_OFFSET(Ljava_2Flang_2Freflect_2FAccessibleObject_3B, override); + +#ifndef __OPENJDK__ +MMethod *MMethod::NewMMethodObject(const MethodMeta &methodMeta) { + bool isConsrtuct = methodMeta.IsConstructor(); + MClass *methodClass = isConsrtuct ? WellKnown::GetMClassConstructor() : WellKnown::GetMClassMethod(); + MMethod *methodObject = MObject::NewObject(*methodClass)->AsMMethod(); + uint32_t accessFlags = methodMeta.GetMod(); + MClass *declearingClass = methodMeta.GetDeclaringClass(); + methodObject->Store(accessFlagsOffset, accessFlags, false); + methodObject->StoreObjectOffHeap(declaringClassOffset, declearingClass); + methodObject->Store(methodMetaOffset, reinterpret_cast(&methodMeta), false); + return methodObject; +} +#else +MMethod *MMethod::NewMMethodObject(const MethodMeta &methodMeta) { + bool isConsrtuct = methodMeta.IsConstructor(); + MClass *methodClass = isConsrtuct ? WellKnown::GetMClassConstructor() : WellKnown::GetMClassMethod(); + MMethod *methodObject = MObject::NewObject(*methodClass)->AsMMethod(); + uint32_t accessFlags = methodMeta.GetMod(); + MClass *declearingClass = methodMeta.GetDeclaringClass(); + ScopedHandles sHandles; + ObjHandle o(methodObject); + if (!methodClass->InitClassIfNeeded()) { + LOG(FATAL) << "fail do clinit !!! " << "class: " << methodClass->GetName() << maple::endl; + } + uint32_t dclClzzOffset = isConsrtuct ? constructorDeclaringClassOffset : methodDeclaringClassOffset; + uint32_t slotOffset = isConsrtuct ? constructorSlotOffset : methodSlotOffset; + uint32_t nameOffset = methodNameOffset; + uint32_t returnTypeOffset = methodReturnTypeOffset; + uint32_t parameterTypesOffset = isConsrtuct ? constructorParameterTypesOffset : methodParameterTypesOffset; + uint32_t exceptionTypesOffset = isConsrtuct ? constructorExceptionTypesOffset : methodExceptionTypesOffset; + uint32_t modifiersOffset = isConsrtuct ? constructorModifiersOffset : methodModifiersOffset; + accessFlags = isConsrtuct ? accessFlags & (~modifier::kModifierConstructor) : accessFlags; + methodObject->StoreObjectOffHeap(dclClzzOffset, declearingClass); + uint32_t slot = declearingClass->GetMethodMetaIndex(methodMeta); + methodObject->Store(slotOffset, slot, false); + if (!isConsrtuct) { + MString *methodNameObj = MString::InternUtf(std::string(methodMeta.GetName())); + methodObject->StoreObject(nameOffset, methodNameObj); + MClass *retType = methodMeta.GetReturnType(); + if (retType == nullptr) { + return nullptr; + } + methodObject->StoreObjectOffHeap(returnTypeOffset, retType); + } + ObjHandle parameterTypesArray(methodMeta.GetParameterTypes()); + if (parameterTypesArray() == nullptr) { + return nullptr; + } + methodObject->StoreObject(parameterTypesOffset, parameterTypesArray.AsArray()); + ObjHandle excetpionArray(methodMeta.GetExceptionTypes()); + if (excetpionArray() == nullptr) { + return nullptr; + } + methodObject->StoreObject(exceptionTypesOffset, excetpionArray.AsArray()); + methodObject->Store(modifiersOffset, accessFlags & 0xffff, false); + methodObject->Store(accessFlagsOffset, accessFlags, false); + return o.ReturnObj()->AsMMethod(); +} +#endif +} // namespace maplert diff --git a/src/mrt/maplert/src/mobject.cpp b/src/mrt/maplert/src/mobject.cpp new file mode 100644 index 0000000000..2455ce3975 --- /dev/null +++ b/src/mrt/maplert/src/mobject.cpp @@ -0,0 +1,94 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "fast_alloc_inline.h" +#include "sizes.h" +namespace maplert { +MObject *MObject::SetClass(const MClass &mClass) { + if (mClass.HasFinalizer()) { + ScopedHandles sHandles; + ObjHandle newObj(this); + MRT_SetJavaClass(newObj.AsRaw(), mClass.AsUintptr()); + return newObj.ReturnObj(); + } else { + MRT_SetJavaClass(AsUintptr(), mClass.AsUintptr()); + return this; + } +} + +inline MObject *MObject::NewObjectInternal(const MClass &klass, size_t objectSize, bool isJNI) { + address_t addr = (*theAllocator).NewObj(objectSize); + if (UNLIKELY(addr == 0)) { + (*theAllocator).OutOfMemory(isJNI); + } + MObject *newObj = MObject::Cast(addr); + if (UNLIKELY(newObj == nullptr)) { + CHECK(MRT_HasPendingException()) << "has no OOM exception when new obj is null" << maple::endl; + return nullptr; + } + newObj = newObj->SetClass(klass); + return newObj; +} + +MObject *MObject::NewObject(const MClass &klass, size_t objectSize, bool isJNI) { + MObject *objAddr = MObject::Cast(MRT_TryNewObject(klass.AsUintptr(), objectSize)); + if (LIKELY(objAddr != nullptr)) { + return objAddr; + } + return NewObjectInternal(klass, objectSize, isJNI); +} + +MObject *MObject::NewObject(const MClass &klass, bool isJNI) { + MObject *objAddr = MObject::Cast(MRT_TryNewObject(klass.AsUintptr())); + if (LIKELY(objAddr != nullptr)) { + return objAddr; + } + size_t objectSize = klass.GetObjectSize(); + CHECK(klass.IsArrayClass() == false) << "must not Array class." << maple::endl; + return NewObjectInternal(klass, objectSize, isJNI); +} + +MObject *MObject::NewObject(const MClass &klass, const MethodMeta *constructor, ...) { + va_list args; + va_start(args, constructor); + if (UNLIKELY(!klass.InitClassIfNeeded())) { + va_end(args); + return nullptr; + } + ScopedHandles sHandles; + ObjHandle obj(NewObject(klass)); + DCHECK(obj.AsJObj() != nullptr) << "new object fail." << maple::endl; + DCHECK(constructor != nullptr) << "MObject::NewObject::constructor is nullptr" << maple::endl; + (void)constructor->Invoke(obj.AsObject(), &args); + va_end(args); + if (UNLIKELY(MRT_HasPendingException())) { + return nullptr; + } + return obj.ReturnObj(); +} + +MObject *MObject::NewObject(const MClass &klass, const MethodMeta &constructor, const jvalue &args, bool isJNI) { + ScopedHandles sHandles; + ObjHandle obj(NewObject(klass, isJNI)); + if (UNLIKELY(obj() == 0)) { + CHECK(MRT_HasPendingException()) << "has no OOM exception when new obj is null" << maple::endl; + return nullptr; + } + (void)constructor.Invoke(obj.AsObject(), &args); + if (UNLIKELY(MRT_HasPendingException())) { + return nullptr; + } + return obj.ReturnObj(); +} +} // namespace maplert diff --git a/src/mrt/maplert/src/modifier.cpp b/src/mrt/maplert/src/modifier.cpp new file mode 100644 index 0000000000..460af4152b --- /dev/null +++ b/src/mrt/maplert/src/modifier.cpp @@ -0,0 +1,56 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "modifier.h" + +namespace maplert { +void modifier::JavaAccessFlagsToString(uint32_t accessFlags, std::string &result) { + if ((accessFlags & kModifierPublic) != 0) { + result += "public "; + } + if ((accessFlags & kModifierProtected) != 0) { + result += "protected "; + } + if ((accessFlags & kModifierPrivate) != 0) { + result += "private "; + } + if ((accessFlags & kModifierFinal) != 0) { + result += "final "; + } + if ((accessFlags & kModifierStatic) != 0) { + result += "static "; + } + if ((accessFlags & kModifierAbstract) != 0) { + result += "abstract "; + } + if ((accessFlags & kModifierInterface) != 0) { + result += "interface "; + } + if ((accessFlags & kModifierTransient) != 0) { + result += "transient "; + } + if ((accessFlags & kModifierVolatile) != 0) { + result += "volatile "; + } + if ((accessFlags & kModifierSynchronized) != 0) { + result += "synchronized "; + } + if ((accessFlags & kModifierRCUnowned) != 0) { + result += "rcunowned "; + } + if ((accessFlags & kModifierRCWeak) != 0) { + result += "rcweak "; + } +} +} // namespace maplert diff --git a/src/mrt/maplert/src/mrt_annotation.cpp b/src/mrt/maplert/src/mrt_annotation.cpp new file mode 100644 index 0000000000..87a1dca840 --- /dev/null +++ b/src/mrt/maplert/src/mrt_annotation.cpp @@ -0,0 +1,563 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mrt_annotation.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "jni.h" +#include "chelper.h" +#include "mrt_classloader_api.h" +#include "exception/mrt_exception.h" +#include "fieldmeta_inline.h" +#ifndef UNIFIED_MACROS_DEF +#define UNIFIED_MACROS_DEF +#include "unified.macros.def" +#endif +namespace maplert { +using namespace annoconstant; +MObject *AnnotationUtil::GetDeclaredAnnotations(const string annoStr, MClass *classObj) { + AnnoParser &annoParser = AnnoParser::ConstructParser(annoStr.c_str(), classObj); + std::unique_ptr parser(&annoParser); + uint32_t annoNum = static_cast(parser->ParseNum(kValueInt)); + uint32_t annoMemberCntArray[annoNum]; + parser->InitAnnoMemberCntArray(annoMemberCntArray, annoNum); + ScopedHandles sHandles; + ObjHandle prepareAnnotations(MArray::NewObjectArray(annoNum, *WellKnown::GetMClassAAnnotation())); + uint16_t realCount = 0; + for (uint32_t j = 0; j < annoNum; ++j) { + string retArr(parser->ParseStr(kDefParseStrType)); + if (parser->ExceptAnnotationJudge(retArr) || parser->IsVerificationAnno(retArr)) { + // prevent unnecessary annotation objects from being created + parser->SkipAnnoMember(annoMemberCntArray[j]); + continue; + } + MClass *annoType = MClass::JniCast(MRT_GetClassByContextClass(*classObj, retArr)); + if (annoType == nullptr) { + LOG(ERROR) << "AnnotationType: " << retArr << " Not Found, Location:GetDeclaredAnnotations" << maple::endl; + parser->SkipAnnoMember(annoMemberCntArray[j]); + continue; + } +#ifdef __OPENJDK__ + ObjHandle hashMapInst(parser->GenerateMemberValueHashMap(classObj, annoType, annoMemberCntArray[j])); + ObjHandle proxyInstance( + parser->InvokeAnnotationParser(hashMapInst.AsObject(), annoType)); +#else + ObjHandle proxyInstance( + parser->GenerateAnnotationProxyInstance(classObj, annoType, annoMemberCntArray[j])); +#endif + prepareAnnotations->SetObjectElement(realCount, proxyInstance.AsObject()); + ++realCount; + } + if (realCount == 0) { + AnnotationUtil::UpdateCache(kHasNoDeclaredAnno, classObj, nullptr); + } + if (annoNum != realCount) { + MArray *retAnnotations = reinterpret_cast( + MRT_NewObjArray(realCount, *WellKnown::GetMClassAnnotation(), nullptr)); + for (uint32_t i = 0; i < realCount; ++i) { + MObject *obj = prepareAnnotations->GetObjectElementNoRc(i); + // obj will be recorded in retAnnotations, no need to use slv.Push + retAnnotations->SetObjectElement(i, obj); + } + return retAnnotations; + } + return prepareAnnotations.ReturnObj(); +} + +bool AnnotationUtil::HasDeclaredAnnotation(MClass *klass, AnnotationClass annoType) { + // simply get the klass's raw annotation, and checks for compressed annoType string. + std::string annoStr = klass->GetAnnotation(); + AnnoParser &annoParser = AnnoParser::ConstructParser(annoStr.c_str(), klass); + std::unique_ptr parser(&annoParser); + const char *annoTypeName = nullptr; + if (annoType == kAnnotationInherited) { + annoTypeName = kInheritClass; + } else if (annoType == kAnnotationRepeatable) { + annoTypeName = kRepeatableClasss; + } + + if (annoTypeName == nullptr) { + return false; + } + bool ret = parser->Find(annoTypeName) != kNPos; + return ret; +} + +jboolean AnnotationUtil::IsDeclaredAnnotationPresent(const string &annoStr, const string &annoTypeName, + MClass *currentCls) { + VLOG(reflect) << "Enter isDeclaredAnnotationPresent, annoStr: " << annoStr << " annotationTypeName: " << + annoTypeName << maple::endl; + if (!AnnoParser::HasAnnoMember(annoStr)) { + return JNI_FALSE; + } + AnnoParser &annoParser = AnnoParser::ConstructParser(annoStr.c_str(), currentCls); + std::unique_ptr parser(&annoParser); + uint32_t annoNum = static_cast(parser->ParseNum(kValueInt)); + uint32_t annoMemberCntArray[annoNum]; + parser->InitAnnoMemberCntArray(annoMemberCntArray, annoNum); + for (uint32_t j = 0; j < annoNum; ++j) { + string currentAnnoType = parser->ParseStr(kDefParseStrType); + if (currentAnnoType == annoTypeName) { + return JNI_TRUE; + } + parser->SkipAnnoMember(annoMemberCntArray[j]); + } + return JNI_FALSE; +} + +uint32_t AnnotationUtil::GetRealParaCntForConstructor(const MethodMeta &mthd, const char *kMthdAnnoStr) { + MClass *cls = mthd.GetDeclaringClass(); + if (!mthd.IsConstructor() || cls->IsAnonymousClass()) { + return mthd.GetParameterCount(); + } + string annoChar = cls->GetAnnotation(); + if (annoChar.empty()) { + return mthd.GetParameterCount(); + } + constexpr int paramsNum = 2; + if (cls->IsEnum()) { + if (kMthdAnnoStr != nullptr && kMthdAnnoStr[kLabelSize] == '0') { + return mthd.GetParameterCount(); + } + // no implicit arguments for the enclosing instance, so fix for the compiler wrong params num + return mthd.GetParameterCount() - paramsNum; + } + AnnoParser &annoParser = AnnoParser::ConstructParser(annoChar.c_str(), cls); + std::unique_ptr parser(&annoParser); + int32_t loc = parser->Find(parser->GetEnclosingClassStr()); + if (loc != kNPos) { + return mthd.GetParameterCount(); + } + parser->SetIdx(0); + if (parser->Find(parser->GetEncsloingMethodClassStr()) != kNPos) { + uint32_t outClassFieldNum = 0; + FieldMeta *fields = cls->GetFieldMetas(); + uint32_t numOfFields = cls->GetNumOfFields(); + for (uint32_t i = 0; i < numOfFields; ++i) { + FieldMeta *fieldMeta = &fields[i]; + string fName = fieldMeta->GetName(); + if (fName.find("$") != string::npos) { + ++outClassFieldNum; + } + } + uint32_t paramNum = mthd.GetParameterCount(); + return paramNum - outClassFieldNum; + } + return mthd.GetParameterCount(); +} + +std::string AnnotationUtil::GetAnnotationUtil(const char *annotation) { + if (annotation == nullptr) { + return ""; + } + __MRT_Profile_CString(annotation); +#ifndef __OPENJDK__ + if (annotation[0] == '0') { + std::string retArray(annotation); + DeCompressHighFrequencyStr(retArray); + return retArray; + } +#endif + return annotation; +} + +CacheItem AnnotationUtil::cache[]; + +MObject *AnnotationUtil::GetEnclosingClass(MClass *classObj) noexcept { + if (classObj->IsProxy()) { + return nullptr; + } + MObject *declaringClass = GetDeclaringClassFromAnnotation(classObj); + if (declaringClass != nullptr) { + return declaringClass; + } + string annoChar = classObj->GetAnnotation(); + if (annoChar.empty()) { + return nullptr; + } + VLOG(reflect) << "Enter __MRT_Reflect_Class_getEnclosingClass, annostr: " << annoChar << maple::endl; + AnnoParser &annoParser = AnnoParser::ConstructParser(annoChar.c_str(), classObj); + std::unique_ptr parser(&annoParser); + int32_t emloc = parser->Find(parser->GetEncsloingMethodClassStr()); + if (emloc == kNPos) { + return nullptr; + } + parser->SkipNameAndType(); + string methodName = parser->ParseStr(kDefParseStrType); + size_t loc = methodName.find("|"); + if (loc == string::npos) { + return nullptr; + } + return MObject::JniCast(MRT_GetClassByContextClass(*classObj, methodName.substr(0, loc))); +} + +CacheValueType AnnotationUtil::Get(CacheLabel label, MClass *classObj) noexcept { + CHECK_E_P(classObj == nullptr, "Get: classObj is nullptr"); + CacheValueType result = nullptr; + bool valid = AnnotationUtil::GetCache(label, classObj, result); + if (valid) { + return result; + } + switch (label) { + case kEnclosingClass: + result = GetEnclosingClass(classObj); + break; + case kDeclaringClass: + result = GetDeclaringClassFromAnnotation(classObj); + break; + case kDeclaredClasses: + result = new set(); + GetDeclaredClasses(classObj, *reinterpret_cast*>(result)); + break; + case kEnclosingMethod: + result = GetEnclosingMethod(classObj); + break; + default: { + LOG(ERROR) << "Wrong label in AnnotationUtil::Get" << maple::endl; + break; + } + } + UpdateCache(label, classObj, result); + return result; +} + +MethodMeta *AnnotationUtil::GetEnclosingMethodValue(MClass *classObj, const std::string &annSet) { + AnnoParser &annoParser = AnnoParser::ConstructParser(annSet.c_str(), classObj); + std::unique_ptr parser(&annoParser); + int32_t loc = parser->Find(parser->GetEncsloingMethodClassStr()); + if (loc == kNPos) { + return nullptr; + } + parser->SkipNameAndType(); + std::string buffStr = parser->ParseStr(kDefParseStrType); + auto posClassName = buffStr.find("|"); + auto posMethodName = buffStr.rfind("|"); + if (posClassName == std::string::npos || posMethodName == std::string::npos) { + return nullptr; + } + std::string className = buffStr.substr(0, posClassName); + MClass *classData = MClass::JniCast(MRT_GetClassByContextClass(*classObj, className)); + CHECK(classData != nullptr) << "GetEnclosingMethodValue : classData is nullptr" << maple::endl; + std::string mthName = buffStr.substr(posClassName + 1, posMethodName - posClassName - 1); + std::string sigName = buffStr.substr(posMethodName + 1, buffStr.length() - posMethodName - 1); + MethodMeta *mth = classData->GetMethod(mthName.c_str(), sigName.c_str()); + if (mth == nullptr) { + return nullptr; + } + return mth; +} + +jboolean AnnotationUtil::GetIsAnnoPresent(const string &annoStr, const string &annotationTypeName, + CacheValueType meta, MClass *annoObj, CacheLabel label) noexcept { + DcAnnoPresentKeyType key(meta, reinterpret_cast(annoObj)); + jboolean result = JNI_FALSE; + CacheValueType resultP = &result; + if (AnnotationUtil::GetCache(label, &key, resultP)) { + result = *static_cast(resultP); + return result; + } + switch (label) { + case kClassAnnoPresent: + result = IsDeclaredAnnotationPresent(annoStr, annotationTypeName, reinterpret_cast(meta)); + break; + case kMethodAnnoPresent: + result = IsDeclaredAnnotationPresent(annoStr, annotationTypeName, + reinterpret_cast(meta)->GetDeclaringClass()); + break; + case kFieldAnnoPresent: + result = IsDeclaredAnnotationPresent(annoStr, annotationTypeName, + reinterpret_cast(meta)->GetDeclaringclass()); + break; + default: + LOG(ERROR) << "Wrong label in AnnotationUtil::GetIsAnnoPresent" << maple::endl; + } + + AnnotationUtil::UpdateCache(label, &key, &result); + return result; +} + +MethodMeta *AnnotationUtil::GetEnclosingMethod(MClass *argObj) noexcept { + string annotationSet = argObj->GetAnnotation(); + if (annotationSet.empty()) { + return nullptr; + } + VLOG(reflect) << "Enter GetEnclosingMethod, annostr: " << annotationSet << maple::endl; + MethodMeta *ret = GetEnclosingMethodValue(argObj, annotationSet); + return ret; +} + +bool AnnotationUtil::GetCache(CacheLabel label, CacheValueType target, CacheValueType &result) noexcept { + cache[label].lock.Lock(); + if (label == kHasNoDeclaredAnno) { + set *cacheSet = reinterpret_cast*>(cache[label].key); + if (cacheSet != nullptr && cacheSet->find(reinterpret_cast(target)) != cacheSet->end()) { + cache[label].lock.Unlock(); + return true; + } + cache[label].lock.Unlock(); + return false; + } + if (label == kClassAnnoPresent || label == kMethodAnnoPresent || label == kFieldAnnoPresent) { + map *cacheMap = reinterpret_cast< + map*>(cache[label].key); + + if (cacheMap != nullptr) { + auto findResult = cacheMap->find(*reinterpret_cast(target)); + if (findResult != cacheMap->end()) { + cache[label].lock.Unlock(); + if (result != nullptr) { + *static_cast(result) = findResult->second; + } + return true; + } + } + cache[label].lock.Unlock(); + return false; + } + if (target == cache[label].key) { + result = cache[label].value; + cache[label].lock.Unlock(); + return true; + } + cache[label].lock.Unlock(); + return false; +} + +void AnnotationUtil::UpdateCache(CacheLabel label, CacheValueType target, CacheValueType result) noexcept { + cache[label].lock.Lock(); + if (label == kHasNoDeclaredAnno) { + if (cache[label].key == nullptr) { + set *newCache = new set(); // cache, always living + cache[label].key = reinterpret_cast(newCache); + } + set *cacheSet = reinterpret_cast*>(cache[label].key); + if (cacheSet->size() > kRTCacheSize) { + cacheSet->clear(); + } + cacheSet->insert(reinterpret_cast(target)); + cache[label].lock.Unlock(); + return; + } + + if (label == kClassAnnoPresent || label == kMethodAnnoPresent || label == kFieldAnnoPresent) { + if (cache[label].key == nullptr) { + set *newCache = new set(); // cache, always living + cache[label].key = reinterpret_cast(newCache); + } + map *cacheMap = + reinterpret_cast*>(cache[label].key); + if (cacheMap->size() > kRTCacheSize) { + cacheMap->clear(); + } + DcAnnoPresentKeyType tmp = *reinterpret_cast(target); + (*cacheMap)[tmp] = *reinterpret_cast(result); + cache[label].lock.Unlock(); + return; + } + + cache[label].key = target; + if (label == kDeclaredClasses && cache[label].value != nullptr) { + delete static_cast*>(cache[label].value); + } + cache[label].value = result; + cache[label].lock.Unlock(); +} + +MObject *AnnotationUtil::GetDeclaringClassFromAnnotation(MClass *classObj) noexcept { + string annoStr = classObj->GetAnnotation(); + if (annoStr.empty()) { + return nullptr; + } + VLOG(reflect) << "Enter getDeclaringClassFromAnnotation, annostr: " << annoStr << maple::endl; + AnnoParser &annoParser = AnnoParser::ConstructParser(annoStr.c_str(), classObj); + std::unique_ptr parser(&annoParser); + int32_t loc = parser->Find(parser->GetEnclosingClassStr()); + if (loc == kNPos) { + return nullptr; + } + parser->SkipNameAndType(); + string retStr = parser->ParseStr(kDefParseStrType); + return MObject::JniCast(MRT_GetClassByContextClass(*classObj, retStr)); +} + +void AnnotationUtil::GetDeclaredClasses(MClass *classObj, std::set &metaList) noexcept { + string annoStr = classObj->GetAnnotation(); + if (annoStr.empty()) { + return; + } + VLOG(reflect) << "Enter getDeclaredClasses, annoStr: " << annoStr << maple::endl; + AnnoParser &annoParser = AnnoParser::ConstructParser(annoStr.c_str(), classObj); + std::unique_ptr parser(&annoParser); + int32_t loc = parser->Find(parser->GetMemberClassesStr()); + if (loc == kNPos) { + return; + } + int32_t lloc = parser->Find(parser->GetAnnoArrayStartDelimiter()); + if (lloc == kNPos) { + return; + } + + uint32_t annoNum = static_cast(parser->ParseNum(kValueInt)); + parser->SkipNameAndType(); + for (uint32_t j = 0; j < annoNum; ++j) { + string retArr = parser->ParseStr(kDefParseStrType); + metaList.insert(MClass::JniCast(MRT_GetClassByContextClass(*classObj, retArr))); + } +} + +MObject *MethodDefaultUtil::GetDefaultPrimValue(const std::unique_ptr &uniqueParser, uint32_t type) { + switch (type) { + case kValueInt: + return primitiveutil::BoxPrimitiveJint(static_cast(uniqueParser->ParseNum(kValueInt))); + case kValueShort: + return primitiveutil::BoxPrimitiveJshort(uniqueParser->ParseNum(kValueShort)); + case kValueChar: + return primitiveutil::BoxPrimitiveJchar(uniqueParser->ParseNum(kValueShort)); + case kValueByte: + return primitiveutil::BoxPrimitiveJbyte(uniqueParser->ParseNum(kValueShort)); + case kValueBoolean: + return primitiveutil::BoxPrimitiveJboolean(uniqueParser->ParseNum(kValueShort)); + case kValueLong: + return primitiveutil::BoxPrimitiveJlong(uniqueParser->ParseNum(kValueLong)); + case kValueFloat: + return primitiveutil::BoxPrimitiveJfloat(uniqueParser->ParseDoubleNum(kValueFloat)); + case kValueDouble: + return primitiveutil::BoxPrimitiveJdouble(uniqueParser->ParseDoubleNum(kValueDouble)); + default: + LOG(FATAL) << "Unexpected primitive type: " << type; + } + return nullptr; +} + +MObject *MethodDefaultUtil::GetDefaultEnumValue(const std::unique_ptr &uniqueParser) { + MClass *enumType = methodMeta.GetReturnType(); + if (enumType == nullptr) { + return nullptr; + } + string retArr = uniqueParser->ParseStr(kDefParseStrType); + FieldMeta *fieldMeta = enumType->GetDeclaredField(retArr.c_str()); + CHECK_E_P(fieldMeta == nullptr, "GetDefaultEnumValue() : fieldMeta is nullptr."); + return fieldMeta->GetObjectValue(nullptr); +} + +MObject *MethodDefaultUtil::GetDefaultAnnotationValue(const std::unique_ptr &uniqueParser) { + uint32_t memberNum = static_cast(uniqueParser->ParseNum(kValueInt)); + string retArr = uniqueParser->ParseStr(kDefParseStrType); + MClass *annoClass = MClass::JniCast(MRT_GetClassByContextClass(*declClass, retArr)); + CHECK(annoClass != nullptr) << "GetDefaultAnnotationValue : annoClass is nullptr" << maple::endl; +#ifdef __OPENJDK__ + ScopedHandles sHandles; + ObjHandle hashMapRef(uniqueParser->GenerateMemberValueHashMap(declClass, annoClass, memberNum)); + MObject *proxyInstance = uniqueParser->InvokeAnnotationParser(hashMapRef(), annoClass); +#else + MObject *proxyInstance = uniqueParser->GenerateAnnotationProxyInstance(declClass, annoClass, memberNum); +#endif + return proxyInstance; +} + +MObject *MethodDefaultUtil::GetDefaultValue(const std::unique_ptr &uniqueParser) { + uint32_t type = static_cast(uniqueParser->ParseNum(kValueInt)); + string retArr; + switch (type) { + case kValueInt: + case kValueShort: + case kValueChar: + case kValueByte: + case kValueBoolean: + case kValueLong: + case kValueFloat: + case kValueDouble: + return GetDefaultPrimValue(uniqueParser, type); + case kValueType: + retArr = uniqueParser->ParseStr(kDefParseStrType); + return MObject::JniCast(MRT_GetClassByContextClass(*declClass, retArr)); + case kValueString: { + if (uniqueParser->GetCurrentChar() == ']') { // valid for AnnoAsciiParser + uniqueParser->IncIdx(1); // skip ']' + } + retArr = uniqueParser->ParseStr(kDefParseStrType); + return NewStringFromUTF16(retArr); + } + case kValueArray: { +#ifdef __OPENJDK__ + MObject *ret = uniqueParser->CaseArray(declClass, declClass, methodMeta); +#else + ArgValue val(0); + MObject *ret = uniqueParser->CaseArray(declClass, declClass, val, methodMeta); +#endif + return ret; + } + case kValueAnnotation: + return GetDefaultAnnotationValue(uniqueParser); + case kValueEnum: + return GetDefaultEnumValue(uniqueParser); + default: + LOG(FATAL) << "ParserType Not Found." << maple::endl; + } + return nullptr; +} + +bool MethodDefaultUtil::HasDefaultValue(const char *methodName, AnnoParser &parser) { + parser.SkipNameAndType(); + int64_t defaultNum = parser.ParseNum(kValueInt); + parser.NextItem(kDefSkipItemNum); + string str; + int64_t idx; + for (idx = 0; idx < defaultNum; ++idx) { + str = parser.ParseStr(kDefParseStrType); + if (str == methodName) { + break; + } + parser.SkipNameAndType(); + } + if (idx == defaultNum) { + return false; + } + return true; +} + +#ifndef __OPENJDK__ +// use char array rather than map for it have too much overhead +namespace { +constexpr uint8_t kMapSize = 7; +constexpr uint8_t kKeySize = 4; +constexpr uint8_t kValueSize = 34; +constexpr char kHighFrequencyKey[kMapSize][kKeySize] = { "`IH", "`RP", "`Cl", "`Oj", "`ST", "`AF", "`VL" }; +constexpr char kHighFrequencyValue[kMapSize][kValueSize] = { + "Ljava/lang/annotation/Inherited;", + "Ljava/lang/annotation/Repeatable;", "Ljava/lang/Class", "Ljava/lang/Object;", + "Ljava/lang/String;", "accessFlags", "value" +}; +} + +void AnnotationUtil::DeCompressHighFrequencyStr(string &str) { + constexpr uint8_t constCompressSize = 3; + size_t highFrequencyValueSize[kMapSize]; + for (int i = 0; i < kMapSize; ++i) { + size_t loc = str.find(kHighFrequencyKey[i]); + while (loc != string::npos) { + str.replace(loc, constCompressSize, kHighFrequencyValue[i]); + loc = str.find(kHighFrequencyKey[i], loc + highFrequencyValueSize[i]); + } + } +} +#endif +} // namespace maplert diff --git a/src/mrt/maplert/src/mrt_annotation_parser.cpp b/src/mrt/maplert/src/mrt_annotation_parser.cpp new file mode 100644 index 0000000000..3364421909 --- /dev/null +++ b/src/mrt/maplert/src/mrt_annotation_parser.cpp @@ -0,0 +1,1445 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mrt_annotation_parser.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "jni.h" +#include "chelper.h" +#include "mmethod_inline.h" +#include "exception/mrt_exception.h" +#include "mrt_classloader_api.h" +#include "file_layout.h" + +namespace maplert { +using namespace annoconstant; +std::set AnnoIndexParser::exceptIndexSet = { + kEncosilngClassIndex, + kInnerClassIndex, + kMemberClassesIndex, + kThrowsClassIndex, + kSignatureClassIndex, + kEncsloingMethodClassIndex, + kRepeatableClasssIndex, + kInheritClassIndex +}; + +std::set AnnoAsciiParser::exceptAsciiSet = { + kEncosilngClass, + kInnerClass, + kMemberClasses, + kThrowsClass, + kSignatureClass, + kEncsloingMethodClass, + kRepeatableClasss, + kInheritClass +}; + +// Parameter::init need 4 params +enum ParameterIdx : int8_t { + kParaNamePos, + kParaModifiersPos, + kParaExecutablePos, + kParaIndexPos, + kParaSize +}; + +AnnoParser::~AnnoParser() { + annoStr = nullptr; + declaringClass = nullptr; + if (strTab != nullptr) { + delete strTab; + strTab = nullptr; + } +} + +std::string AnnoParser::RemoveParameterAnnoInfo(std::string &annoStr) { + size_t size = annoStr.size(); + if (size != 0) { + for (size_t i = size - 1; i > 0; --i) { + if (annoStr[i] == '|') { + return annoStr.substr(0, i); + } + } + } + return ""; +} + +MObject *AnnoParser::AllocAnnoObject(MClass *classObj, MClass *annoClass) { + if (GetStr() == nullptr || annoClass == nullptr) { + return nullptr; + } + uint32_t annoNum = static_cast(ParseNum(kValueInt)); + if (annoNum == 0) { + return nullptr; + } + bool findLabel = false; + uint32_t whichAnno = 0; + uint32_t annoMemberCntArray[annoNum]; + InitAnnoMemberCntArray(annoMemberCntArray, annoNum); + char *annotationName = annoClass->GetName(); + for (uint32_t i = 0; i < annoNum; ++i) { + string currentAnnoName = ParseStr(kDefParseStrType); + if (currentAnnoName.compare(annotationName) == 0 && !IsVerificationAnno(currentAnnoName)) { + whichAnno = i; + findLabel = true; + break; + } else { + SkipAnnoMember(annoMemberCntArray[i]); + } + } + if (!findLabel) { + return nullptr; + } +#ifdef __OPENJDK__ + ScopedHandles sHandles; + ObjHandle hashMapInst(GenerateMemberValueHashMap(classObj, annoClass, annoMemberCntArray[whichAnno])); + MObject *ret = InvokeAnnotationParser(hashMapInst(), annoClass); +#else + MObject *ret = GenerateAnnotationProxyInstance(classObj, annoClass, annoMemberCntArray[whichAnno]); +#endif + return ret; +} + +std::string AnnoParser::GetParameterAnnotationInfo(const std::string &entireStr) { + std::string annoStr; + if (!IsIndexParser(entireStr.c_str())) { + annoStr = AnnoAsciiParser::GetParameterAnnotationInfoAscii(entireStr); + *annoStr.begin() = annoconstant::kOldMetaLabel; + } else { + annoStr = AnnoIndexParser::GetParameterAnnotationInfoIndex(entireStr); + *annoStr.begin() = annoconstant::kNewMetaLabel; + } + return annoStr; +} + +MObject *AnnoParser::GetAnnotationNative(int32_t index, const MClass *annoClass) { + uint32_t annoNum = static_cast(ParseNum(kValueInt)); + uint32_t paramNumArray[annoNum]; + uint32_t annoMemberCntArray[annoNum]; + InitAnnoMemberCntArray(annoMemberCntArray, annoNum); + for (uint32_t i = 0; i < annoNum; ++i) { + paramNumArray[i] = static_cast(ParseNum(kValueInt)) + 1; + } + bool haveIndexFlag = false; + for (int32_t i = 0; i < (int32_t)annoNum; ++i) { + if (static_cast(paramNumArray[i]) - 1 == index) { + haveIndexFlag = true; + } + } + if (!haveIndexFlag) { + return nullptr; + } + CHECK_E_P(annoClass == nullptr, "AnnoParser::GetAnnotationNative : annoClass is nullptr"); + for (uint32_t i = 0; i < annoNum; ++i) { + if (paramNumArray[i] - 1 == static_cast(index)) { + string currentAnnoName = ParseStr(kDefParseStrType); + if (currentAnnoName != annoClass->GetName()) { + // skip an annotationMember Info + SkipAnnoMember(annoMemberCntArray[i]); + continue; + } + MClass *annoType = MClass::JniCast(MRT_GetClassByContextClass(*annoClass, currentAnnoName)); + if (annoType == nullptr) { + return nullptr; + } +#ifdef __OPENJDK__ + ScopedHandles sHandles; + ObjHandle hashMapInst(GenerateMemberValueHashMap(declaringClass, annoType, annoMemberCntArray[i])); + MObject *proxyInstance = InvokeAnnotationParser(hashMapInst.AsObject(), annoType); +#else + MObject *proxyInstance = GenerateAnnotationProxyInstance(declaringClass, annoType, annoMemberCntArray[i]); +#endif + return proxyInstance; + } + else { + constexpr int8_t annoCountFlagNum = 1; + constexpr int8_t annoMemberComponentNum = 3; + NextItem(annoCountFlagNum + annoMemberComponentNum * annoMemberCntArray[i]); // skip an annotationinfo + } + } + return nullptr; +} + +MObject *AnnoParser::GetParameterAnnotationsNative(const MethodMeta *methodMeta) { + string paramAnnoInfo = AnnoParser::GetParameterAnnotationInfo(methodMeta->GetAnnotation()); + uint32_t annoNum = static_cast(ParseNum(kValueInt)); + uint32_t paramNum = AnnotationUtil::GetRealParaCntForConstructor(*methodMeta, paramAnnoInfo.c_str()); + uint32_t paramNumArray[paramNum]; + std::fill(paramNumArray, paramNumArray + paramNum, 0); + uint32_t annoMemberCntArray[annoNum]; + InitAnnoMemberCntArray(annoMemberCntArray, annoNum); + for (uint32_t i = 0; i < annoNum; ++i) { + paramNumArray[ParseNum(kValueInt)]++; + } + uint32_t annoMemberCntArrayIdx = 0; + ScopedHandles sHandles; + ObjHandle twoDimAnnos(MArray::NewObjectArray(paramNum, *WellKnown::GetMClassAAAnnotation())); + for (uint32_t i = 0; i < paramNum; ++i) { + if (paramNumArray[i] > 0) { + uint32_t index = 0; + ObjHandle OneDimAnnos(MRT_NewObjArray(paramNumArray[i], *WellKnown::GetMClassAnnotation(), nullptr)); + for (uint32_t j = 0; j < paramNumArray[i]; ++j) { + string retArr = ParseStr(kDefParseStrType); + MClass *annotationInfo = MClass::JniCast(MRT_GetClassByContextClass(*declaringClass, retArr)); + if (annotationInfo == nullptr) { + return nullptr; + } +#ifdef __OPENJDK__ + ObjHandle hashMapInst(GenerateMemberValueHashMap(declaringClass, + annotationInfo, annoMemberCntArray[annoMemberCntArrayIdx])); + ObjHandle proxyInstance(InvokeAnnotationParser(hashMapInst.AsObject(), annotationInfo)); +#else + ObjHandle proxyInstance(GenerateAnnotationProxyInstance(declaringClass, + annotationInfo, annoMemberCntArray[annoMemberCntArrayIdx])); +#endif + ++annoMemberCntArrayIdx; + if (!proxyInstance()) { + return nullptr; + } + OneDimAnnos->SetObjectElement(index++, proxyInstance.AsObject()); + } + twoDimAnnos->SetObjectElement(i, OneDimAnnos.AsObject()); + } else { + ObjHandle OneDimAnnos(MArray::NewObjectArray(0, *WellKnown::GetMClassAAnnotation())); + twoDimAnnos->SetObjectElement(i, OneDimAnnos.AsObject()); + } + } + return twoDimAnnos.ReturnObj(); +} + +template +void SetPrimArrayContent(const MObject *retInArray, AnnoParser &parser, + uint32_t subArrayLength, uint8_t parseType, bool isFloat = false) { + CHECK_E_V(retInArray == nullptr, "SetPrimArrayContent: retInArray is nullptr"); + MObject **p = reinterpret_cast( + reinterpret_cast(const_cast(retInArray))->ConvertToCArray()); + parser.NextItem(kDefSkipItemNum); + PrimType *newP = reinterpret_cast(p); + if (isFloat) { + for (uint32_t i = 0; i < subArrayLength; ++i) { + newP[i] = static_cast(parser.ParseDoubleNum(parseType)); + } + } else { + for (uint32_t i = 0; i < subArrayLength; ++i) { + newP[i] = static_cast(parser.ParseNum(parseType)); + } + } +} + +#ifdef __OPENJDK__ +enum XregValIdx : uint8_t { + kAnnoInfoIdx = 0, + kHashMapInstIdx, + kXregValSize +}; +enum ArgIdx : uint8_t { + kMapObjIdx = 0, + kKeyIdx, + kValueIdx, + kArgSize +}; + +class AnnoHashMapFactory { + public: + AnnoHashMapFactory(MClass *annoCls, MClass *dCls, AnnoParser &p) + : annoType(annoCls), classInfo(dCls), parser(p) { + } + + ~AnnoHashMapFactory() = default; + + MethodMeta *GetDefineMethod(const string &annoMemberName) { + MethodMeta *methodMetas = annoType->GetMethodMetas(); + uint32_t numOfFields = annoType->GetNumOfMethods(); + for (uint32_t methodIndex = 0; methodIndex < numOfFields; ++methodIndex) { + MethodMeta *methodMeta = &(methodMetas[methodIndex]); + if (methodMeta->GetName() == annoMemberName) { + return methodMeta; + } + } + return nullptr; + } + + void GetPrimValue(jvalue &xregVal, uint32_t annoFlag) { + switch (annoFlag) { + case kValueChar: + (&xregVal)[kValueIdx].l = + reinterpret_cast(primitiveutil::BoxPrimitiveJchar(parser.ParseNum(kValueShort))); + break; + case kValueInt: + (&xregVal)[kValueIdx].l = reinterpret_cast( + primitiveutil::BoxPrimitiveJint(static_cast(parser.ParseNum(kValueInt)))); + break; + case kValueShort: + (&xregVal)[kValueIdx].l = + reinterpret_cast(primitiveutil::BoxPrimitiveJshort(parser.ParseNum(kValueShort))); + break; + case kValueByte: + (&xregVal)[kValueIdx].l = + reinterpret_cast(primitiveutil::BoxPrimitiveJbyte(parser.ParseNum(kValueShort))); + break; + case kValueLong: + (&xregVal)[kValueIdx].l = + reinterpret_cast(primitiveutil::BoxPrimitiveJlong(parser.ParseNum(kValueLong))); + break; + case kValueFloat: + (&xregVal)[kValueIdx].l = reinterpret_cast( + primitiveutil::BoxPrimitiveJfloat(parser.ParseDoubleNum(kValueFloat))); + break; + case kValueDouble: + (&xregVal)[kValueIdx].l = reinterpret_cast( + primitiveutil::BoxPrimitiveJdouble(parser.ParseDoubleNum(kValueDouble))); + break; + default: + LOG(FATAL) << "annoFlag Not Found." << maple::endl; + } + } + + void GetAnnoValue(jvalue &xregVal) { + uint32_t subAnnoMemberNum = static_cast(parser.ParseNum(kValueInt)); + string retArr = parser.ParseStr(kDefParseStrType); + ScopedHandles sHandles; + MClass *annoInfo = MClass::JniCast(MRT_GetClassByContextClass(*classInfo, retArr)); + CHECK(annoInfo) << "annoInfo is nullptr" << std::endl; + ObjHandle hashMapIns(parser.GenerateMemberValueHashMap(classInfo, annoInfo, subAnnoMemberNum)); + MObject *proxyInstance = parser.InvokeAnnotationParser(hashMapIns.AsObject(), annoInfo); + (&xregVal)[kValueIdx].l = reinterpret_cast(proxyInstance); + } + + void GetBooleanValue(jvalue &xregVal) { + jboolean value = parser.ParseNum(kValueInt) == 1 ? JNI_TRUE : JNI_FALSE; + (&xregVal)[kValueIdx].l = reinterpret_cast(primitiveutil::BoxPrimitiveJboolean(value)); + } + + void GetEnumValue(jvalue &xregVal, string &annoMemberName) { + string valStr = parser.ParseStr(kDefParseStrType); + MethodMeta *method = GetDefineMethod(annoMemberName); + if (method == nullptr) { + return; + } + MClass *retType = method->GetReturnType(); + CHECK(retType != nullptr) << "GetEnumValue : GetReturnType return nullptr" << maple::endl; + FieldMeta *fieldMeta = retType->GetDeclaredField(valStr.c_str()); + CHECK(fieldMeta != nullptr) << "GetEnumValue : GetDeclaredField return nullptr" << maple::endl; + (&xregVal)[kValueIdx].l = fieldMeta->GetObjectValue(nullptr)->AsJobject(); + } + + void GetValue(jvalue &xregVal, uint32_t annoFlag, string &annoMemberName) { + switch (annoFlag) { + case kValueString: { + string valStr = parser.ParseStr(kDefParseStrType); + (&xregVal)[kValueIdx].l = reinterpret_cast(NewStringFromUTF16(valStr.c_str())); + break; + } + case kValueChar: + case kValueInt: + case kValueShort: + case kValueByte: + case kValueLong: + case kValueFloat: + case kValueDouble: + GetPrimValue(xregVal, annoFlag); + break; + case kValueAnnotation: + GetAnnoValue(xregVal); + break; + case kValueArray: { + MethodMeta *defineMethod = GetDefineMethod(annoMemberName); + CHECK(defineMethod != nullptr) << "GetDefineMethod return nullptr" << maple::endl; + (&xregVal)[kValueIdx].l = reinterpret_cast(parser.CaseArray(classInfo, annoType, *defineMethod)); + break; + } + case kValueType: { + string valStr = parser.ParseStr(kDefParseStrType); + MClass *typeVal = MClass::JniCast(MRT_GetClassByContextClass(*classInfo, valStr)); + (&xregVal)[kValueIdx].l = reinterpret_cast(typeVal); + break; + } + case kValueEnum: + GetEnumValue(xregVal, annoMemberName); + break; + case kValueBoolean: + GetBooleanValue(xregVal); + break; + default: { + LOG(ERROR) << "GenerateMemberValueHashMap decode error: " << maple::endl; + } + } + } + private: + MClass *annoType; + MClass *classInfo; + AnnoParser &parser; +}; + +MObject *AnnoParser::GenerateMemberValueHashMap(MClass *classInfo, MClass *annotationInfo, uint32_t memberNum) { + ScopedHandles sHandles; + MClass *hashMapCls = WellKnown::GetMClassHashMap(); + MethodMeta *hashMapConstruct = hashMapCls->GetDeclaredConstructor("()V"); + ObjHandle hashMapInst(MObject::NewObject(*hashMapCls, hashMapConstruct)); + if (memberNum == 0) { + return reinterpret_cast(hashMapInst.Return()); + } + MethodMeta *insertMthd = hashMapCls->GetDeclaredMethod("put", + "(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;"); + CHECK(insertMthd != nullptr) << "__MRT_Reflect_GetCharDeclaredMethod return nullptr" << maple::endl; + jvalue xregVal[kArgSize]; // map_obj, key , value + xregVal[kMapObjIdx].l = reinterpret_cast(hashMapInst()); + AnnoHashMapFactory factory(annotationInfo, classInfo, *this); + for (uint32_t i = 0; i < memberNum; ++i) { + string annoMemberName = ParseStr(kDefParseStrType); + ObjHandle memberNameJ(NewStringFromUTF16(annoMemberName.c_str())); + xregVal[kKeyIdx].l = memberNameJ.AsJObj(); + uint32_t annoType = static_cast(ParseNum(kValueInt)); + factory.GetValue(*xregVal, annoType, annoMemberName); + ObjHandle keepAlive(xregVal[kValueIdx].l); + constexpr int zeroConst = 0; + RuntimeStub::SlowCallCompiledMethod(insertMthd->GetFuncAddress(), xregVal, zeroConst, zeroConst); + } + return reinterpret_cast(hashMapInst.Return()); +} + +class AnnoArrayMemberFactory { + public: + AnnoArrayMemberFactory(MClass *cls, AnnoParser &par, MClass *annoCls, MethodMeta &mthd) + : classInfo(cls), parser(par), annotationInfo(annoCls), mthdObj(mthd) { + subArrayLength = static_cast(parser.ParseNum(kValueInt)); + } + + ~AnnoArrayMemberFactory() = default; + + MObject *GetZeroLenArray() noexcept { + MClass *returnType = mthdObj.GetReturnType(); + CHECK_E_P(returnType == nullptr, "GetZeroLenArray() : GetReturnType() return nullptr."); + MArray *realRetArray = MArray::NewObjectArray(0, *returnType); + return reinterpret_cast(realRetArray); + } + + MObject *GetAnnotationTypeValue() noexcept { + ScopedHandles sHandles; + ObjHandle realRetArray(reinterpret_cast( + parser.GenerateAnnotationTypeValue(classInfo, annotationInfo, subArrayLength))); + constexpr size_t skipStep = 2; + parser.IncIdx(skipStep); // skip ']!' + return reinterpret_cast(realRetArray.Return()); + } + + MObject *GetStringTypeArray() noexcept { + ScopedHandles sHandles; + ObjHandle retInArray(MRT_NewObjArray(subArrayLength, *WellKnown::GetMClassString(), nullptr)); + parser.NextItem(kDefSkipItemNum); + MArray *mArray = retInArray.AsArray(); + for (uint16_t i = 0; i < subArrayLength; ++i) { + string valStr = parser.ParseStr(kDefParseStrType); + MString *memberVal = NewStringFromUTF16(valStr.c_str()); + mArray->SetObjectElementNoRc(i, memberVal); + } + return reinterpret_cast(retInArray.Return()); + } + + MObject *GetTypeTypeArray() noexcept { + ScopedHandles sHandles; + MClass *type = mthdObj.GetReturnType(); + CHECK_E_P(type == nullptr, "GetTypeTypeArray() : GetReturnType() return nullptr."); + ObjHandle retInArray(MArray::NewObjectArray(subArrayLength, *type)); + parser.NextItem(kDefSkipItemNum); + for (uint16_t i = 0; i < subArrayLength; ++i) { + string valStr = parser.ParseStr(kDefParseStrType); + MObject *clType = reinterpret_cast(MRT_GetClassByContextClass(*annotationInfo, valStr)); + if (clType == nullptr) { + return nullptr; + } + retInArray->SetObjectElement(i, clType); + } + return retInArray.ReturnObj(); + } + + template + MObject *GetPrimiTypeArray(MClass *primType, uint8_t parseType) noexcept { + MObject *retInArray = reinterpret_cast(MRT_NewArray(subArrayLength, *primType, sizeof(jType))); + SetPrimArrayContent(retInArray, parser, subArrayLength, parseType); + return retInArray; + } + + template + MObject *GetFloatPrimiTypeArray(MClass *primType, uint8_t parseType) noexcept { + MObject *retInArray = reinterpret_cast(MRT_NewArray(subArrayLength, *primType, sizeof(jType))); + SetPrimArrayContent(retInArray, parser, subArrayLength, parseType, true); + return retInArray; + } + + MObject *GetEnumTypeArray() noexcept { + ScopedHandles sHandles; + parser.NextItem(kDefSkipItemNum); + MClass *returnType = mthdObj.GetReturnType(); + CHECK_E_P(returnType == nullptr, "GetEnumTypeArray() : GetReturnType() return nullptr."); + MClass *enumType = returnType->GetComponentClass(); + MArray *mArray = MArray::NewObjectArray(subArrayLength, *returnType); + ObjHandle retInArray(mArray); + for (int32_t j = 0; j < subArrayLength; ++j) { + string valStr = parser.ParseStr(kDefParseStrType); + FieldMeta *fieldMeta = enumType->GetDeclaredField(valStr.c_str()); + MObject *newObj = fieldMeta->GetObjectValue(nullptr); + mArray->SetObjectElementNoRc(j, newObj); + } + return reinterpret_cast(retInArray.Return()); + } + + MObject *GetBooleanTypeArray() noexcept { + MObject *retInArray = reinterpret_cast( + MRT_NewArray(subArrayLength, *WellKnown::GetMClassZ(), sizeof(jboolean))); + jboolean *p = reinterpret_cast(reinterpret_cast(retInArray)->ConvertToCArray()); + parser.NextItem(kDefSkipItemNum); + for (uint16_t i = 0; i < subArrayLength; ++i) { + p[i] = parser.ParseNum(kValueInt) == 1 ? JNI_TRUE : JNI_FALSE; + } + return retInArray; + } + + uint16_t GetArrayLength() const noexcept { + return subArrayLength; + } + + private: + MClass *classInfo; + AnnoParser &parser; + MClass *annotationInfo; + MethodMeta &mthdObj; + uint16_t subArrayLength; +}; + +MObject *AnnoParser::CaseArray(MClass *classInfo, MClass *annotationInfo, MethodMeta &mthdObj) { + AnnoArrayMemberFactory factory(classInfo, *this, annotationInfo, mthdObj); + if (factory.GetArrayLength() == 0) { + return factory.GetZeroLenArray(); + } + uint32_t typeInArray = static_cast(ParseNum(kValueInt)); // element type in array + switch (typeInArray) { + case kValueAnnotation: { + return factory.GetAnnotationTypeValue(); + } + case kValueString: + return factory.GetStringTypeArray(); + case kValueType: + return factory.GetTypeTypeArray(); + case kValueInt: + return factory.GetPrimiTypeArray(WellKnown::GetMClassI(), kValueInt); + case kValueShort: + return factory.GetPrimiTypeArray(WellKnown::GetMClassS(), kValueShort); + case kValueByte: + return factory.GetPrimiTypeArray(WellKnown::GetMClassB(), kValueShort); + case kValueLong: + return factory.GetPrimiTypeArray(WellKnown::GetMClassJ(), kValueLong); + case kValueFloat: + return factory.GetFloatPrimiTypeArray(WellKnown::GetMClassF(), kValueFloat); + case kValueDouble: + return factory.GetFloatPrimiTypeArray(WellKnown::GetMClassD(), kValueDouble); + case kValueEnum: + return factory.GetEnumTypeArray(); + case kValueBoolean: + return factory.GetBooleanTypeArray(); + case kValueChar: + return factory.GetPrimiTypeArray(WellKnown::GetMClassC(), kValueInt); + default: + LOG(FATAL) << "Unexpected primitive type: " << typeInArray; + } + return nullptr; +} + +MObject *AnnoParser::InvokeAnnotationParser(MObject *hashMapInst, MObject *annotationInfo) { + MClass *annotationParserCls = WellKnown::GetMClassAnnotationParser(); + MethodMeta *createMthd = annotationParserCls->GetDeclaredMethod("annotationForMap", + "(Ljava/lang/Class;Ljava/util/Map;)Ljava/lang/annotation/Annotation;"); + CHECK(createMthd != nullptr) << "__MRT_Reflect_GetCharDeclaredMethod return nullptr" << maple::endl; + jvalue xregVal[kXregValSize]; // annotationForMap method has two params + xregVal[kHashMapInstIdx].l = reinterpret_cast(hashMapInst); + xregVal[kAnnoInfoIdx].l = reinterpret_cast(annotationInfo); + MObject *ret = RuntimeStub::SlowCallCompiledMethod(createMthd->GetFuncAddress(), xregVal, 0, 0); + CHECK(ret != nullptr) << "InvokeAnnotationParser : proxyInstance null " << maple::endl; + return ret; +} + +#else + +class AnnoArrayMemberFactory { + public: + AnnoArrayMemberFactory(MClass *cls, AnnoParser &par, MClass *annoCls, ArgValue &val, MethodMeta &mthd) + : classInfo(cls), parser(par), annotationType(annoCls), argArr(val), mthdObj(mthd) { + subArrayLength = static_cast(parser.ParseNum(kValueInt)); + } + + ~AnnoArrayMemberFactory() = default; + + static void SetArray(ArgValue &xregVal, MObject *realRetArray, MClass *type) { + if (xregVal.GetGIdx() != 0) { + xregVal.AddReference(realRetArray); + xregVal.AddReference(type); + } + } + MObject *GetZeroLenArray() noexcept { + MClass *returnType = mthdObj.GetReturnType(); + CHECK_E_P(returnType == nullptr, "GetZeroLenArray() : GetReturnType() return nullptr."); + MClass *type = returnType->GetComponentClass(); // remove A + MArray *realRetArray = MArray::NewObjectArray(0, *returnType); + SetArray(argArr, reinterpret_cast(realRetArray), type); + if (argArr.GetGIdx() == 0) { + return realRetArray; + } + return nullptr; + } + + MObject *GetAnnotationTypeArray() noexcept { + MObject *realRetArray = parser.GenerateAnnotationTypeValue(classInfo, annotationType, subArrayLength); + MClass *retType = mthdObj.GetReturnType(); + CHECK_E_P(retType == nullptr, "GetAnnotationTypeArray() : GetReturnType() return nullptr."); + SetArray(argArr, realRetArray, retType); + constexpr size_t skipStep = 2; + parser.IncIdx(skipStep); // skip '}!' + return realRetArray; + } + + MObject *GetStringTypeArray() noexcept { + ScopedHandles sHandles; + MArray *mArray = reinterpret_cast(MRT_NewObjArray(subArrayLength, *WellKnown::GetMClassString(), nullptr)); + ObjHandle retInArray(mArray); + parser.NextItem(kDefSkipItemNum); + for (uint16_t i = 0; i < subArrayLength; ++i) { + string valStr; + if (i != subArrayLength - 1) { + valStr = parser.ParseStr(kDefParseStrType); + } else { + valStr = parser.ParseStrForLastStringArray(); + } + MString *memberVal = NewStringFromUTF16(valStr.c_str()); + mArray->SetObjectElementNoRc(i, memberVal); + } + SetArray(argArr, retInArray(), WellKnown::GetMClassAString()); + return reinterpret_cast(retInArray.Return()); + } + + MObject *GetTypeTypeArray() noexcept { + ScopedHandles sHandles; + MClass *type = mthdObj.GetReturnType(); + ObjHandle retInArray(MArray::NewObjectArray(subArrayLength, *type)); + parser.NextItem(kDefSkipItemNum); + for (uint32_t i = 0; i < subArrayLength; ++i) { + string valStr = parser.ParseStr(kDefParseStrType); + // clType is off heap + MObject *clType = reinterpret_cast(MRT_GetClassByContextClass(*annotationType, valStr)); + if (clType == nullptr) { + return nullptr; + } + retInArray->SetObjectElement(i, clType); + } + SetArray(argArr, retInArray(), WellKnown::GetMClassAClass()); + return retInArray.ReturnObj(); + } + + template + MObject *GetPrimiTypeArray(MClass *boxedType, MClass *primType, uint8_t parseType) noexcept { + MObject *retInArray = reinterpret_cast(MRT_NewArray(subArrayLength, *primType, sizeof(jType))); + SetPrimArrayContent(retInArray, parser, subArrayLength, parseType); + SetArray(argArr, retInArray, boxedType); + return retInArray; + } + + template + MObject *GetFloatPrimiTypeArray(MClass *boxedType, MClass *primType, uint8_t parseType) noexcept { + MObject *retInArray = reinterpret_cast(MRT_NewArray(subArrayLength, *primType, sizeof(jType))); + SetPrimArrayContent(retInArray, parser, subArrayLength, parseType, true); + SetArray(argArr, retInArray, boxedType); + return retInArray; + } + + MObject *GetEnumTypeArray() noexcept { + ScopedHandles sHandles; + parser.NextItem(kDefSkipItemNum); + MClass *returnType = mthdObj.GetReturnType(); + CHECK_E_P(returnType == nullptr, "GetEnumTypeArray() : GetReturnType() return nullptr."); + MClass *enumType = returnType->GetComponentClass(); + // array store exception might happen + MArray *mArray = MArray::NewObjectArray(subArrayLength, *returnType); + ObjHandle retInArray(mArray); + for (uint32_t j = 0; j < subArrayLength; ++j) { + string valStr = parser.ParseStr(kDefParseStrType); + FieldMeta *fieldMeta = enumType->GetDeclaredField(valStr.c_str()); + CHECK_E_P(fieldMeta == nullptr, "GetEnumTypeArray() : fieldMeta is nullptr."); + MObject *newObj = fieldMeta->GetObjectValue(nullptr); // skip slr + mArray->SetObjectElementNoRc(j, newObj); + } + SetArray(argArr, retInArray(), enumType); + return reinterpret_cast(retInArray.Return()); + } + + MObject *GetbooleanTypeArray() noexcept { + // parse and init boolean will not trigger GC + MArray *retInArray = reinterpret_cast( + MRT_NewArray(subArrayLength, *WellKnown::GetMClassZ(), sizeof(jboolean))); + jboolean *p = reinterpret_cast(retInArray->ConvertToCArray()); + parser.NextItem(kDefSkipItemNum); + for (uint32_t i = 0; i < subArrayLength; ++i) { + p[i] = parser.ParseNum(kValueInt) == 1 ? JNI_TRUE : JNI_FALSE; + } + SetArray(argArr, retInArray, WellKnown::GetMClassABoolean()); + return retInArray; + } + + uint32_t GetArrayLength() const noexcept{ + return subArrayLength; + } + + private: + MClass *classInfo; + AnnoParser &parser; + MClass *annotationType; + ArgValue &argArr; + MethodMeta &mthdObj; + uint32_t subArrayLength; +}; + +class AnnoMemberFactory { + public: + AnnoMemberFactory(MClass *annoCls, MClass *dCls, AnnoParser &p) + : annotationType(annoCls), declaringCls(dCls), parser(p) { + } + + ~AnnoMemberFactory() = default; + + MethodMeta *GetDefineMethod(const string &annoMemberName) { + MethodMeta *methodMetas = annotationType->GetMethodMetas(); + uint32_t numOfMethod = annotationType->GetNumOfMethods(); + for (uint32_t methodIndex = 0; methodIndex < numOfMethod; ++methodIndex) { + MethodMeta *methodMeta = methodMetas + methodIndex; + if (methodMeta->GetName() == annoMemberName) { + return methodMeta; + } + } + return nullptr; + } + void GetStringValAndType(ArgValue &xregVal) { + string valStr = parser.ParseStr(kDefParseStrType); + MString *memberVal = nullptr; + if (namemangler::NeedConvertUTF16(valStr)) { + memberVal = NewStringFromUTF16(valStr.c_str()); + } else { + memberVal = NewStringUTF(valStr.c_str(), valStr.size()); + } + xregVal.AddReference(memberVal); + xregVal.AddReference(WellKnown::GetMClassString()); + } + + void GetPrimValAndType(ArgValue &xregVal, uint32_t annoType) { + switch (annoType) { + case kValueChar: { + xregVal.AddReference(primitiveutil::BoxPrimitiveJchar(parser.ParseNum(kValueShort))); + xregVal.AddReference(WellKnown::GetMClassC()); + break; + } + case kValueInt: { + xregVal.AddReference(primitiveutil::BoxPrimitiveJint(static_cast(parser.ParseNum(kValueInt)))); + xregVal.AddReference(WellKnown::GetMClassI()); + break; + } + case kValueShort: { + xregVal.AddReference(primitiveutil::BoxPrimitiveJshort(parser.ParseNum(kValueShort))); + xregVal.AddReference(WellKnown::GetMClassS()); + break; + } + case kValueByte: { + xregVal.AddReference(primitiveutil::BoxPrimitiveJbyte(parser.ParseNum(kValueShort))); + xregVal.AddReference(WellKnown::GetMClassB()); + break; + } + case kValueLong: { + xregVal.AddReference(primitiveutil::BoxPrimitiveJlong(parser.ParseNum(kValueLong))); + xregVal.AddReference(WellKnown::GetMClassJ()); + break; + } + case kValueFloat: { + xregVal.AddReference(primitiveutil::BoxPrimitiveJfloat(parser.ParseDoubleNum(kValueFloat))); + xregVal.AddReference(WellKnown::GetMClassF()); + break; + } + case kValueDouble: { + xregVal.AddReference(primitiveutil::BoxPrimitiveJdouble(parser.ParseDoubleNum(kValueDouble))); + xregVal.AddReference(WellKnown::GetMClassD()); + break; + } + default: {} + } + } + + void GetEnumValAndType(ArgValue &xregVal, const string &annoMemberName) { + string valStr = parser.ParseStr(kDefParseStrType); + MethodMeta *method = GetDefineMethod(annoMemberName); + CHECK_E_V(method == nullptr, "GetEnumValAndType() : method is nullptr."); + MClass *retType = method->GetReturnType(); + CHECK_E_V(retType == nullptr, "GetEnumValAndType() : retType is nullptr."); + FieldMeta *fieldMeta = retType->GetDeclaredField(valStr.c_str()); + CHECK_E_V(fieldMeta == nullptr, "GetEnumValAndType() : fieldMeta is nullptr."); + xregVal.AddReference(fieldMeta->GetObjectValue(nullptr)); + xregVal.AddReference(retType); + } + + void GetAnnoValAndType(ArgValue &xregVal) { + uint32_t subAnnoMemberNum = static_cast(parser.ParseNum(kValueInt)); + string valStr = parser.ParseStr(kDefParseStrType); + MClass *annoInfo = MClass::JniCast(MRT_GetClassByContextClass(*declaringCls, valStr)); + CHECK(annoInfo != nullptr) << "annoInfo is nullptr" << maple::endl; + xregVal.AddReference(parser.GenerateAnnotationProxyInstance(declaringCls, annoInfo, subAnnoMemberNum)); + xregVal.AddReference(WellKnown::GetMClassAnnotation()); + } + + void GetValAndType(ArgValue &xregVal, uint32_t annoType, const string &annoMemberName) { + switch (annoType) { + case kValueString: { + GetStringValAndType(xregVal); + break; + } + case kValueChar: + case kValueInt: + case kValueShort: + case kValueByte: + case kValueLong: + case kValueFloat: + case kValueDouble: + GetPrimValAndType(xregVal, annoType); + break; + case kValueAnnotation: + GetAnnoValAndType(xregVal); + break; + case kValueArray: { + MethodMeta *defMtd = GetDefineMethod(annoMemberName); + CHECK_E_V(defMtd == nullptr, "GetValAndType() : defMtd is nullptr."); + if (parser.CaseArray(declaringCls, annotationType, xregVal, *defMtd) == nullptr) { + LOG(ERROR) << "caseArray in GenerateAnnotationMemberArray() fail" << maple::endl; + } + break; + } + case kValueType: { + string valStr = parser.ParseStr(kDefParseStrType); + xregVal.AddReference(MObject::JniCast(MRT_GetClassByContextClass(*declaringCls, valStr))); + xregVal.AddReference(MObject::JniCast(MRT_GetClassByContextClass(*declaringCls, kMrtTypeClass))); + break; + } + case kValueEnum: + GetEnumValAndType(xregVal, annoMemberName); + break; + case kValueBoolean: { + jboolean value = parser.ParseNum(kValueInt) == 1 ? JNI_TRUE : JNI_FALSE; + xregVal.AddReference(primitiveutil::BoxPrimitiveJboolean(value)); + xregVal.AddReference(WellKnown::GetMClassZ()); + break; + } + default: { + LOG(ERROR) << "GenerateAnnotationMemberArray decode error: " << maple::endl; + } + } + } + + MMethod *GetMethodObject(const string &annoMemberName, ArgValue &xregVal, const MethodMeta &definingMthd) { + MMethod *definingMthdObj = nullptr; + if (!MRT_IsNaiveRCCollector()) { + definingMthdObj = MMethod::NewMMethodObject(definingMthd); + } else { + CHECK_E_P(annotationType == nullptr, "AnnoMemberFactory::GetMethodObject : annotationType is nullptr"); + string annoStr = annotationType->GetAnnotation(); + AnnoParser &annoParser = AnnoParser::ConstructParser(annoStr.c_str(), annotationType); + std::unique_ptr defParser(&annoParser); + int32_t loc = defParser->Find(parser.GetAnnoDefaultStr()); + if (loc != kNPos) { + if (!MethodDefaultUtil::HasDefaultValue(annoMemberName.c_str(), *defParser)) { + definingMthdObj = MMethod::NewMMethodObject(definingMthd); + } + } else { + definingMthdObj = MMethod::NewMMethodObject(definingMthd); + } + } + xregVal.AddReference(definingMthdObj); + return definingMthdObj; + } + + private: + MClass *annotationType; + MClass *declaringCls; + AnnoParser &parser; +}; + +MObject *AnnoParser::GenerateAnnotationMemberArray(MClass *classInfo, MClass *annotationInfo, uint32_t memberNum) { + if (memberNum == 0) { + return nullptr; + } + ScopedHandles sHandles; + ObjHandle memberArr(MRT_NewObjArray(memberNum, *WellKnown::GetMClassObject(), nullptr)); + AnnoMemberFactory factory(annotationInfo, classInfo, *this); + uint8_t constexpr memberValIdx = 2; + for (uint32_t i = 0; i < memberNum; ++i) { + string annoMemberName = ParseStr(kDefParseStrType); + MethodMeta *definingMthd = factory.GetDefineMethod(annoMemberName); + if (definingMthd == nullptr) { + return nullptr; + } + ArgValue xregVal(0); // AnnotationMember Constructor argument array + ObjHandle obj(MObject::NewObject(*WellKnown::GetMClassAnnotationMember())); + MString *memberNameJ = nullptr; + if (namemangler::NeedConvertUTF16(annoMemberName)) { + memberNameJ = NewStringFromUTF16(annoMemberName.c_str()); + } else { + memberNameJ = NewStringUTF(annoMemberName.c_str(), annoMemberName.size()); + } + ObjHandle memberNameJRef(memberNameJ); + xregVal.AddReference(obj.AsObject()); + xregVal.AddReference(memberNameJRef.AsObject()); + + factory.GetValAndType(xregVal, static_cast(ParseNum(kValueInt)), annoMemberName); + + ObjHandle memberVall(xregVal.GetReferenceFromGidx(memberValIdx)); + // Keep return method Obj in LocalRefs, as it recorded in xregVal without strong reference + ObjHandle retRef(factory.GetMethodObject(annoMemberName, xregVal, *definingMthd)); + constexpr int zeroCnst = 0; + RuntimeStub::SlowCallCompiledMethod(WellKnown::GetMMethodAnnotationMemberInitAddr(), xregVal.GetData(), + zeroCnst, zeroCnst); + memberArr->SetObjectElement(i, obj()); + } + return memberArr.ReturnObj(); +} + +MObject *AnnoParser::GenerateAnnotationProxyInstance(MClass *classInfo, MClass *annotationInfo, uint32_t memberNum) { + ScopedHandles sHandles; + ObjHandle memberArr(GenerateAnnotationMemberArray(classInfo, annotationInfo, memberNum)); + uintptr_t createFactory = WellKnown::GetMMethodAnnotationFactoryCreateAnnotationAddr(); + ArgValue xregVal(0); + xregVal.AddReference(annotationInfo); + xregVal.AddReference(memberArr()); + constexpr int zeroConst = 0; + MObject *proxyInstance = RuntimeStub::SlowCallCompiledMethod(createFactory, xregVal.GetData(), + zeroConst, zeroConst); + CHECK(proxyInstance != nullptr) << "GenerateAnnotationProxyInstance : proxyInstance null " << maple::endl; + return proxyInstance; +} + + +MObject *AnnoParser::CaseArray(MClass *classInfo, MClass *annotationInfo, ArgValue &argArr, MethodMeta &mthdObj) { + AnnoArrayMemberFactory factory(classInfo, *this, annotationInfo, argArr, mthdObj); + if (factory.GetArrayLength() == 0) { + return factory.GetZeroLenArray(); + } + uint32_t typeInArray = static_cast(ParseNum(kValueInt)); // element type in array + switch (typeInArray) { + case kValueAnnotation: + return factory.GetAnnotationTypeArray(); + case kValueString: + return factory.GetStringTypeArray(); + case kValueType: + return factory.GetTypeTypeArray(); + case kValueInt: + return factory.GetPrimiTypeArray(WellKnown::GetMClassAInteger(), WellKnown::GetMClassI(), kValueInt); + case kValueShort: + return factory.GetPrimiTypeArray(WellKnown::GetMClassAShort(), WellKnown::GetMClassS(), kValueShort); + case kValueByte: + return factory.GetPrimiTypeArray(WellKnown::GetMClassAByte(), WellKnown::GetMClassB(), kValueShort); + case kValueLong: + return factory.GetPrimiTypeArray(WellKnown::GetMClassALong(), WellKnown::GetMClassJ(), kValueLong); + case kValueFloat: + return factory.GetFloatPrimiTypeArray(WellKnown::GetMClassAFloat(), + WellKnown::GetMClassF(), kValueFloat); + case kValueDouble: + return factory.GetFloatPrimiTypeArray(WellKnown::GetMClassADouble(), + WellKnown::GetMClassD(), kValueDouble); + case kValueEnum: + return factory.GetEnumTypeArray(); + case kValueBoolean: + return factory.GetbooleanTypeArray(); + case kValueChar: + return factory.GetPrimiTypeArray(WellKnown::GetMClassACharacter(), WellKnown::GetMClassC(), kValueInt); + default: + LOG(FATAL) << "Unexpected primitive type: " << typeInArray; + } + return nullptr; +} +#endif // __OPENJDK__ + +MObject *AnnoParser::GenerateAnnotationTypeValue(MClass *classInfo, const MClass *annotationInfo, + uint32_t subArrayLength) { + if (!subArrayLength) { + MObject *zeroLenArray = reinterpret_cast(MRT_NewObjArray(0, *WellKnown::GetMClassAnnotation(), nullptr)); + return zeroLenArray; + } + ScopedHandles sHandles; + string classnameInArray = ParseStrNotMove(); + MClass *annoInArray = MClass::JniCast(MRT_GetClassByContextClass(*annotationInfo, classnameInArray)); + CHECK(annoInArray != nullptr) << "annoInArray is nullptr" << maple::endl; + MArray *mArray = reinterpret_cast(MRT_NewObjArray(subArrayLength, *annoInArray, nullptr)); + ObjHandle realRetArray(mArray); + for (uint32_t i = 0; i < subArrayLength; ++i) { + NextItem(kDefSkipItemNum); + uint32_t memberNum = static_cast(ParseNum(kValueInt)); +#ifdef __OPENJDK__ + ObjHandle hashMapInst(GenerateMemberValueHashMap(classInfo, annoInArray, memberNum)); + MObject *proxyInstance = InvokeAnnotationParser(hashMapInst.AsObject(), annoInArray); +#else + MObject *proxyInstance = GenerateAnnotationProxyInstance(classInfo, annoInArray, memberNum); +#endif + mArray->SetObjectElementNoRc(i, proxyInstance); + } + return realRetArray.ReturnObj(); +} + +static void SetNewPrameter(const std::string argsBuff, uint32_t argsAccessFlag, + uint32_t i, const MArray ¶Array, MObject *obj) { + ScopedHandles sHandles; + ObjHandle js(NewStringUTF(argsBuff.c_str(), argsBuff.length())); + jvalue paraIns[kParaSize]; + paraIns[kParaNamePos].l = js.AsJObj(); + paraIns[kParaModifiersPos].j = argsAccessFlag; + paraIns[kParaExecutablePos].l = reinterpret_cast(obj); + paraIns[kParaIndexPos].j = i; + MClass *parameterClass = WellKnown::GetMClassParameter(); + MethodMeta *initMethod = + parameterClass->GetDeclaredConstructor("(Ljava/lang/String;ILjava/lang/reflect/Executable;I)V"); + CHECK_E_V(initMethod == nullptr, "initMethod is nullptr"); + MObject *paraObj = MObject::NewObject(*parameterClass, *initMethod, *paraIns); + paraArray.SetObjectElementNoRc(i, paraObj); +} + +MObject *AnnoParser::GetParameters0(MMethod *method) { + MethodMeta *methodMeta = method->GetMethodMeta(); + uint32_t cnt = methodMeta->GetParameterCount(); + ScopedHandles sHandles; + ObjHandle paraArray(MArray::NewObjectArray(cnt, *WellKnown::GetMClassAParameter())); + + int32_t loc = Find(GetMethodParametersStr()); + if (loc == kNPos) { + return nullptr; + } + (void)Find(kAnnoAccessFlags); + NextItem(kDefSkipItemNum); + uint32_t numParamsAccessFlags = static_cast(ParseNum(kValueInt)); + uint32_t accessFlagArray[numParamsAccessFlags]; + SkipNameAndType(); + for (uint32_t i = 0; i < numParamsAccessFlags; ++i) { + accessFlagArray[i] = static_cast(ParseNum(kValueInt)); + } + SetIdx(loc); + (void)Find("names"); + NextItem(kDefSkipItemNum); + uint32_t numParamsNames = static_cast(ParseNum(kValueInt)); + uint32_t ParamsNamesType = static_cast(ParseNum(kValueInt)); + NextItem(kDefSkipItemNum); + string namesArray[numParamsNames]; + for (uint32_t i = 0; i < numParamsNames; ++i) { + namesArray[i] = ParseStr(kDefParseStrType); + } + if (numParamsAccessFlags == 0 || numParamsNames == 0) { + return nullptr; + } + // check array sizes match each other + if (numParamsAccessFlags != numParamsNames || numParamsAccessFlags != cnt || numParamsNames != cnt) { + MRT_ThrowNewException("java/lang/IllegalArgumentException", "Inconsistent parameter metadata"); + return nullptr; + } + // Parameters information get from annotations + for (uint32_t i = 0; i < cnt; ++i) { + uint32_t argsAccessFlag = accessFlagArray[i]; + string argsName = ParamsNamesType == kValueNull ? "arg" + std::to_string(i) : namesArray[i]; + if (argsName.empty()) { + MRT_ThrowNewException("java/lang/IllegalArgumentException", "Inconsistent parameter metadata"); + return nullptr; + } + SetNewPrameter(argsName, argsAccessFlag, i, *paraArray.AsArray(), method); + } + return paraArray.ReturnObj(); +} + +MObject *AnnoParser::GetSignatureValue(const std::string &annSet, MClass *cls) { + AnnoParser &annoParser = AnnoParser::ConstructParser(annSet.c_str(), cls); + std::unique_ptr parser(&annoParser); + int32_t loc = parser->Find(parser->GetSignatureClassStr()); + if (loc == kNPos) { + return nullptr; + } + parser->SkipNameAndType(); + uint32_t arrLen = static_cast(parser->ParseNum(kValueInt)); + if (arrLen == 0) { + return nullptr; + } + parser->SkipNameAndType(); + ScopedHandles sHandles; + MArray *mArray = reinterpret_cast(MRT_NewObjArray(arrLen, *WellKnown::GetMClassString(), nullptr)); + ObjHandle strArr(mArray); + for (uint32_t i = 0; i < arrLen; ++i) { + string buffStr = parser->ParseStr(true); + MString *stringObj = NewStringFromUTF16(buffStr.c_str()); + mArray->SetObjectElementNoRc(i, stringObj); + } + return strArr.ReturnObj(); +} + +void AnnoParser::NextItem(int iter) { + for (int j = 0; j < iter; ++j) { + if (annoStr[annoStrIndex] == kAnnoArrayEndDelimiter) { + ++annoStrIndex; + if (annoStr[annoStrIndex] == kAnnoDelimiter) { + ++annoStrIndex; + } + } + + for (size_t i = annoStrIndex; i < annoSize; ++i) { + if (annoStr[i] == kAnnoDelimiter) { + annoStrIndex = i; + break; + } + int leftBrackets = 0; + if (annoStr[i] == kAnnoArrayStartDelimiter && annoStr[i - 1] == kAnnoDelimiter) { + ++leftBrackets; + ++i; + while (i < annoSize) { + if (annoStr[i] == kAnnoArrayStartDelimiter && annoStr[i - 1] == kAnnoDelimiter) { + ++leftBrackets; + } else if (annoStr[i] == kAnnoArrayEndDelimiter) { + --leftBrackets; + } + if (leftBrackets != 0) { + break; + } + ++i; + } + annoStrIndex = i + 1; + break; + } + } + ++annoStrIndex; // skip delimiter '!' + } +} + +void AnnoParser::SkipNameAndType(int iter) { + constexpr int skipItemNums = 2; + for (int j = 0; j < iter; ++j) { + NextItem(skipItemNums); // skip name & type + } +} + +void AnnoParser::SkipAnnoMember(uint32_t iter) { + constexpr int skipItemNums = 3; + for (uint32_t j = 0; j < iter; ++j) { + NextItem(skipItemNums); // member has 3 items + } +} + +void AnnoParser::InitAnnoMemberCntArray(uint32_t *annoMemberCntArray, uint32_t annoNum) { + for (uint32_t i = 0; i < annoNum; ++i) { + annoMemberCntArray[i] = static_cast(ParseNum(kValueInt)); + } +} + +bool AnnoParser::IsVerificationAnno(const std::string &annotName) const { + auto *mClass = reinterpret_cast(declaringClass); + if (mClass->IsVerified()) { + // if Verification is not enabled, no verification annotations will present + return false; + } + static const std::vector verificationAnnos = { + kThrowVerifyError, + kDeferredOverrideFinalCheck, + kDeferredExtendFinalCheck, + kAssignableChecksContainer, + kDeferredAssignableCheck + }; + auto cmpStr = [&annotName](const char *element) { + return annotName.compare(element) == 0; + }; + return std::any_of(verificationAnnos.begin(), verificationAnnos.end(), cmpStr); +} + +template +T AnnoParser::ParseNumImpl(int type) { + // digit , '-' , "inf", "nan" is valid + while (!isdigit(annoStr[annoStrIndex]) && annoStr[annoStrIndex] != '-' && + annoStr[annoStrIndex] != 'i' && annoStr[annoStrIndex] != 'n' && annoStrIndex < annoSize) { + ++annoStrIndex; + } + size_t numStartIdx = annoStrIndex; + for (size_t i = annoStrIndex; i < annoSize; ++i) { + if (annoStr[i] == kAnnoDelimiter) { + annoStrIndex = i; + ++annoStrIndex; // skip delimiter '!' + break; + } + } + switch (type) { + case kValueFloat: + case kValueDouble: + return static_cast(atof(annoStr + numStartIdx)); + case kValueInt: + case kValueShort: + return static_cast(atoi(annoStr + numStartIdx)); + case kValueLong: + return static_cast(atoll(annoStr + numStartIdx)); + default: + LOG(ERROR) << " ParseNum type error" << maple::endl; + return 0; + } +} + +int32_t AnnoParser::Find(char target) { + while (annoStrIndex < annoSize) { + if (annoStr[annoStrIndex] == target) { + ++annoStrIndex; + return static_cast(annoStrIndex); + } + ++annoStrIndex; + } + return kNPos; +} + +char *AnnoParser::GetCStringFromStrTab(uint32_t srcIndex) const { + char *cStrStart = nullptr; + constexpr int32_t realIndexStart = 2; + uint32_t index = srcIndex & 0xFFFFFFFF; + // 0x03 is 0011, index & 0x03 is to check isHotReflectStr. + bool isHotReflectStr = (index & 0x03) != 0; + uint32_t cStrIndex = index >> realIndexStart; + if (isHotReflectStr) { + uint32_t tag = (index & 0x03) - maple::kCStringShift; + if (tag == static_cast(maple::kLayoutBootHot)) { + cStrStart = strTab->startHotStrTab; + } else if (tag == static_cast(maple::kLayoutBothHot)) { + cStrStart = strTab->bothHotStrTab; + } else { + cStrStart = strTab->runHotStrTab; + } + } else { + cStrStart = strTab->coldStrTab; + } + if (cStrStart == nullptr) { + return nullptr; + } + return cStrStart + cStrIndex; +} + +string AnnoIndexParser::ParseStr(bool isSN __attribute__((unused))) { + while (annoStr[annoStrIndex] != kAnnoDelimiterPrefix) { + ++annoStrIndex; + } + return ParseStrImpl(); +} + +string AnnoIndexParser::GetParameterAnnotationInfoIndex(const std::string &entireStr) { + size_t size = entireStr.size(); + if (size != 0) { + for (size_t i = size - 1; i > 0; --i) { + if (entireStr[i] == '|') { + return entireStr.substr(i - 1); + } + } + } + return ""; +} + +void AnnoIndexParser::NextItem(int iter) { + for (int j = 0; j < iter; ++j) { + if (annoStr[annoStrIndex] == kAnnoArrayEndDelimiterIndex) { + ++annoStrIndex; + if (annoStr[annoStrIndex] == kAnnoDelimiter) { + ++annoStrIndex; + } + } + for (size_t i = annoStrIndex; i < annoSize; ++i) { + if (annoStr[i] == kAnnoDelimiter) { + annoStrIndex = i; + break; + } + int leftBrackets = 0; + if (annoStr[i] == kAnnoArrayStartDelimiterIndex) { + ++leftBrackets; + ++i; + while (i < annoSize) { + if (annoStr[i] == kAnnoArrayStartDelimiterIndex) { + ++leftBrackets; + } else if (annoStr[i] == kAnnoArrayEndDelimiterIndex) { + --leftBrackets; + } + if (!leftBrackets) { + break; + } + ++i; + } + annoStrIndex = i + 1; + break; + } + } + ++annoStrIndex; // skip delimiter '!' + } +} + +uint32_t AnnoIndexParser::ParseIndex() { + constexpr uint32_t base = 10; + int tmp = 0; + uint32_t index = 0; + for (size_t i = annoStrIndex; i < annoSize; ++i) { + if (annoStr[i] != kAnnoDelimiter && isdigit(annoStr[i])) { + tmp = annoStr[i] - '0'; + index *= base; + index += static_cast(tmp); + } else { + annoStrIndex = i; + while (annoStr[annoStrIndex] != kAnnoDelimiter) { + ++annoStrIndex; + } + ++annoStrIndex; // skip delimiter '!' + break; + } + } + return index; +} + +string AnnoIndexParser::ParseStrImpl() { + if (annoStr[annoStrIndex] == kAnnoDelimiterPrefix) { + ++annoStrIndex; + uint32_t strIdx = ParseIndex(); + --annoStrIndex; + if (declaringClass != nullptr && declaringClass->IsProxy()) { + if (declaringClass->GetNumOfSuperClasses() >= 1) { + declaringClass = declaringClass->GetSuperClassArray()[1]; + } else { + ++annoStrIndex; // skip delimiter '!' + return ""; + } + } + char *cPtr = nullptr; + if (declaringClass != nullptr) { + cPtr = GetCStringFromStrTab(strIdx); + } + ++annoStrIndex; // skip delimiter '!' + return cPtr == nullptr ? interpreter::GetStringFromInterpreterStrTable(static_cast(strIdx)) : cPtr; + } + ++annoStrIndex; // skip delimiter '!' + LOG(ERROR) << "Annotation ParseStrImpl Exception" << annoStr << maple::endl; + return ""; +} + +int32_t AnnoIndexParser::Find(const string &target) { + while (annoStrIndex < annoSize) { + if (annoStr[annoStrIndex] != kAnnoDelimiterPrefix) { + ++annoStrIndex; + continue; + } + if (ParseStr(kDefParseStrType) == target) { + return static_cast(annoStrIndex); + } + } + return kNPos; +} + +int32_t AnnoAsciiParser::Find(const string &target) { + string str = annoStr; + size_t loc = str.find(target, annoStrIndex); + if (loc == string::npos) { + return kNPos; + } + annoStrIndex = static_cast(loc + target.size()); + return static_cast(++annoStrIndex); +} + +string AnnoAsciiParser::ParseStrImpl() { + uint8_t endLable = 0; + string retArr = ""; + if (annoStr[annoStrIndex] == kAnnoDelimiterPrefix && annoStrIndex + 1 < annoSize && + isdigit(annoStr[annoStrIndex + 1])) { + ++annoStrIndex; + uint32_t strIdx = static_cast(ParseNum(kValueInt)); + --annoStrIndex; + if (declaringClass->IsProxy()) { + if (declaringClass->GetNumOfSuperClasses() >= 1) { + declaringClass = declaringClass->GetSuperClassArray()[1]; + } else { + ++annoStrIndex; // skip delimiter '!' + return retArr; + } + } + retArr = LinkerAPI::Instance().GetCString(*declaringClass, strIdx); + ++annoStrIndex; // skip delimiter '!' + return retArr; + } + for (size_t i = annoStrIndex; i < annoSize; ++i) { + if (annoStr[i] == kAnnoDelimiterPrefix && i + 1 < annoSize && (annoStr[i + 1] == kAnnoDelimiter || + annoStr[i + 1] == '|' || annoStr[i + 1] == kAnnoDelimiterPrefix)) { + retArr += annoStr[i + 1]; + ++i; + continue; + } + if (annoStr[i] != kAnnoDelimiter) { + retArr += annoStr[i]; + } else { + annoStrIndex = i; + endLable = 1; + break; + } + } + if (endLable == 0) { + annoStrIndex = annoSize - 1; + } + while (*retArr.rbegin() == kAnnoArrayEndDelimiter) { + retArr.pop_back(); + } + ++annoStrIndex; // skip delimiter '!' + return retArr; +} + +std::string AnnoAsciiParser::ParseStr(bool isSN) { + if (!isSN) { + if (annoStr[annoStrIndex] == kAnnoArrayStartDelimiter || + annoStr[annoStrIndex] == kAnnoArrayEndDelimiter) { + ++annoStrIndex; + if (annoStr[annoStrIndex] == kAnnoDelimiter) { + ++annoStrIndex; + } + } + } + return ParseStrImpl(); +} + +string AnnoAsciiParser::GetParameterAnnotationInfoAscii(const std::string &entireStr) { + size_t size = entireStr.size(); + if (size != 0) { + for (size_t i = size - 1; i > 0; --i) { + if (entireStr[i] == '|' && (i - 1 < size) && entireStr[i - 1] != kAnnoDelimiterPrefix) { + return entireStr.substr(i - 1); + } + } + } + return ""; +} +bool AnnoIndexParser::ExceptAnnotationJudge(const std::string &annoName) const { + if (exceptIndexSet.find(annoName) != exceptIndexSet.end()) { + return true; + } + return false; +} + +bool AnnoAsciiParser::ExceptAnnotationJudge(const std::string &kAnnoName) const { + if (exceptAsciiSet.find(kAnnoName) != exceptAsciiSet.end()) { + return true; + } + return false; +} +} // namespace maplert diff --git a/src/mrt/maplert/src/mrt_array.cpp b/src/mrt/maplert/src/mrt_array.cpp new file mode 100644 index 0000000000..3932eeb7fa --- /dev/null +++ b/src/mrt/maplert/src/mrt_array.cpp @@ -0,0 +1,330 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mrt_array.h" +#include "mrt_primitive_util.h" +#include "mclass_inline.h" + +namespace maplert { +// new primitive array +jobject MRT_NewArray(jint length, jclass elementClass, jint componentSize __attribute__((unused))) { + MClass *elementCls = MClass::JniCastNonNull(elementClass); + MArray *arrayObject = MArray::NewPrimitiveArrayComponentClass(length, *elementCls); + return arrayObject->AsJobject(); +} + +// new primitive array, pType is element Type +jobject MRT_NewPrimitiveArray(jint length, maple::Primitive::Type pType, + jint componentSize __attribute__((unused)), jboolean isJNI) { + MClass *arrayJClass = WellKnown::GetPrimitiveArrayClass(pType); + DCHECK(arrayJClass != nullptr) << "arrayJClass is nullptr." << maple::endl; + MArray *arrayObject = MArray::NewPrimitiveArray(length, *arrayJClass, isJNI); + return arrayObject->AsJobject(); +} + +// new object array +jobject MRT_NewObjArray(const jint length, const jclass elementClass, const jobject initialElement) { + ScopedHandles sHandles; + const MClass *elementCls = MClass::JniCastNonNull(elementClass); + MArray *arrayObject = MArray::NewObjectArrayComponentClass(length, *elementCls); + ObjHandle arrayObjectRef(arrayObject); + if (UNLIKELY(initialElement != nullptr)) { + // might possible throw ArrayStoreException, need push slr + arrayObject->InitialObjectArray(MObject::JniCast(initialElement)); + } + return arrayObjectRef.ReturnObj()->AsJobject(); +} + +void *MRT_JavaArrayToCArray(jarray javaArray) { + MArray *arrayObject = MArray::JniCastNonNull(javaArray); + return arrayObject->ConvertToCArray(); +} + +jboolean MRT_IsArray(jobject javaArray) { + MObject *o = MObject::JniCastNonNull(javaArray); + return o->IsArray(); +} + +jboolean MRT_IsObjectArray(jobject javaArray) { + MObject *o = MObject::JniCastNonNull(javaArray); + return o->IsObjectArray(); +} + +jboolean MRT_IsPrimitveArray(jobject javaArray) { + MObject *o = MObject::JniCastNonNull(javaArray); + return o->IsPrimitiveArray(); +} + +jboolean MRT_IsMultiDimArray(jobject javaArray) { + MObject *o = MObject::JniCastNonNull(javaArray); + MClass *arrayClass = o->GetClass(); + MClass *arrayCompClass = arrayClass->GetComponentClass(); + if (UNLIKELY(arrayCompClass == nullptr)) { + return false; + } + return arrayCompClass->IsArrayClass(); +} + +// ArrayIndexOutOfBoundsException: if index does not specify a valid index in the array. +jobject MRT_GetObjectArrayElement(jobjectArray javaArray, jsize index, jboolean maintainRC) { + MArray *arrayObject = MArray::JniCastNonNull(javaArray); + MObject *elementObject = maintainRC ? + arrayObject->GetObjectElement(index) : arrayObject->GetObjectElementNoRc(index); + return elementObject->AsJobject(); +} + +jint MRT_GetArrayContentOffset(void) { + return MArray::GetArrayContentOffset(); +} + +static void SetJValueByArrayPrimitiveElement(MArray &arrayObject, const jsize index, jvalue &desVal, const char type) { + switch (type) { + case 'Z': + desVal.z = arrayObject.GetPrimitiveElement(static_cast(index)); + break; + case 'C': + desVal.c = arrayObject.GetPrimitiveElement(static_cast(index)); + break; + case 'F': + desVal.f = arrayObject.GetPrimitiveElement(static_cast(index)); + break; + case 'D': + desVal.d = arrayObject.GetPrimitiveElement(static_cast(index)); + break; + case 'B': + desVal.b = arrayObject.GetPrimitiveElement(static_cast(index)); + break; + case 'S': + desVal.s = arrayObject.GetPrimitiveElement(static_cast(index)); + break; + case 'I': + desVal.i = arrayObject.GetPrimitiveElement(static_cast(index)); + break; + case 'J': + desVal.j = arrayObject.GetPrimitiveElement(static_cast(index)); + break; + default: + BUILTIN_UNREACHABLE(); + } +} + +void MRT_SetObjectArrayElement( + jobjectArray javaArray, jsize index, jobject javaValue, jboolean maintainRC __attribute__((unused))) { + MArray *arrayObject = MArray::JniCastNonNull(javaArray); + MObject *o = MObject::JniCast(javaValue); + arrayObject->SetObjectElement(index, o); +} + +jobject MRT_GetArrayElement(jobjectArray javaArray, jsize index, jboolean maintain) { + MArray *arrayObject = MArray::JniCast(javaArray); + if (UNLIKELY(arrayObject == nullptr)) { + return nullptr; + } + int length = arrayObject->GetLength(); + if (UNLIKELY(index < 0 || index >= length)) { + return nullptr; + } + + MObject *res = nullptr; + if (arrayObject->IsObjectArray()) { + res = maintain ? arrayObject->GetObjectElement(index) : arrayObject->GetObjectElementNoRc(index); + } else { + jvalue desVal; + desVal.l = 0; + MClass *javaArrayClass = arrayObject->GetClass(); + MClass *arrayCompClass = javaArrayClass->GetComponentClass(); + char type = primitiveutil::GetPrimitiveType(*arrayCompClass); + SetJValueByArrayPrimitiveElement(*arrayObject, index, desVal, type); + res = primitiveutil::BoxPrimitive(type, desVal); + } + return res->AsJobject(); +} + +void MRT_SetArrayElement(jobjectArray javaArray, jsize index, jobject value) { + MArray *arrayObject = MArray::JniCast(javaArray); + MObject *javaValue = MArray::JniCast(value); + if (UNLIKELY(arrayObject == nullptr)) { + return; + } + uint32_t length = arrayObject->GetLength(); + if (UNLIKELY(index < 0 || index >= static_cast(length))) { + return; + } + if (arrayObject->IsObjectArray()) { + arrayObject->SetObjectElement(index, javaValue); + } else { + jvalue primitiveValue; + __MRT_ASSERT(javaValue != nullptr, "MRT_SetArrayElement: javaValue is a null ptr!"); + bool unBoxRet = primitiveutil::UnBoxPrimitive(*javaValue, primitiveValue); + if (unBoxRet == true) { + MClass *arrClass = arrayObject->GetClass(); + MClass *arrCompClass = arrClass->GetComponentClass(); + char arrType = primitiveutil::GetPrimitiveType(*arrCompClass); + MRT_SetPrimitiveArrayElement(javaArray, static_cast(index), primitiveValue, arrType); + } + } + return; +} + +jint MRT_GetArrayElementCount(jarray ja) { + MArray *arrayObject = MArray::JniCastNonNull(ja); + return arrayObject->GetLength(); +} + +// openjdk for primitiveArrayElement +jvalue MRT_GetPrimitiveArrayElement(jarray arr, jint index, char arrType) { + jvalue desVal; + MArray *arrayObject = MArray::JniCastNonNull(arr); + bool isPrimitiveArray = arrayObject->IsPrimitiveArray(); + if (UNLIKELY(isPrimitiveArray == false)) { + desVal.l = nullptr; + return desVal; + } + int length = arrayObject->GetLength(); + if (UNLIKELY(index < 0 || index >= length)) { + desVal.l = nullptr; + return desVal; + } + SetJValueByArrayPrimitiveElement(*arrayObject, index, desVal, arrType); + return desVal; +} + +jboolean MRT_TypeWidenConvertCheckObject(jobject val) { + MObject *mVal = MObject::JniCastNonNull(val); + MClass *valueClass = mVal->GetClass(); + char valueType = primitiveutil::GetPrimitiveTypeFromBoxType(*valueClass); + return valueType == 'N' ? true : false; +} + +jboolean MRT_TypeWidenConvertCheck(char currentType, char wideType, const jvalue& srcVal, jvalue &dstVal) { + bool checkRet = primitiveutil::ConvertNarrowToWide(currentType, wideType, srcVal, dstVal); + return checkRet; +} + +char MRT_GetPrimitiveType(jclass clazz) { + MClass *klass = MClass::JniCastNonNull(clazz); + return primitiveutil::GetPrimitiveType(*klass); +} + +char MRT_GetPrimitiveTypeFromBoxType(jclass clazz) { + MClass *klass = MClass::JniCastNonNull(clazz); + char dstType = primitiveutil::GetPrimitiveTypeFromBoxType(*klass); + return dstType; +} + +void MRT_SetPrimitiveArrayElement(jarray arr, jint index, jvalue value, char arrType) { + MArray *arrayObject = MArray::JniCastNonNull(arr); + bool isPrimitiveArray = arrayObject->IsPrimitiveArray(); + if (UNLIKELY(isPrimitiveArray == false)) { + return; + } + uint32_t length = arrayObject->GetLength(); + if (UNLIKELY(index < 0 || index >= static_cast(length))) { + return; + } + switch (arrType) { + case 'Z': + arrayObject->SetPrimitiveElement(index, value.z); + break; + case 'C': + arrayObject->SetPrimitiveElement(index, value.c); + break; + case 'F': + arrayObject->SetPrimitiveElement(index, value.f); + break; + case 'D': + arrayObject->SetPrimitiveElement(index, value.d); + break; + case 'B': + arrayObject->SetPrimitiveElement(index, value.b); + break; + case 'S': + arrayObject->SetPrimitiveElement(index, value.s); + break; + case 'I': + arrayObject->SetPrimitiveElement(index, value.i); + break; + case 'J': + arrayObject->SetPrimitiveElement(index, value.j); + break; + default: + LOG(ERROR) << "MRT_SetPrimitiveArrayElement array type not known" << maple::endl; + } + return; +} + +static MObject *RecursiveCreateMultiArray(const MClass &arrayClass, const int currentDimension, + const int dimensions, uint32_t *dimArray) { + DCHECK(dimArray != nullptr) << "RecursiveCreateMultiArray: dimArray is nullptr!" << maple::endl; + MClass *componentClass = arrayClass.GetComponentClass(); + DCHECK(componentClass != nullptr) << "RecursiveCreateMultiArray: componentClass is nullptr!" << maple::endl; + if (currentDimension == dimensions - 1) { + return componentClass->IsPrimitiveClass() ? MArray::NewPrimitiveArray(dimArray[currentDimension], arrayClass) + : MArray::NewObjectArray(dimArray[currentDimension], arrayClass); + } + ScopedHandles sHandles; + MArray *array = MArray::NewObjectArray(dimArray[currentDimension], arrayClass); + if (array == nullptr) { + return nullptr; + } + ObjHandle res(array); + for (uint32_t j = 0; j < dimArray[currentDimension]; j++) { + MObject *element = RecursiveCreateMultiArray(*componentClass, currentDimension + 1, dimensions, dimArray); + if (element == nullptr) { + return nullptr; + } + res->SetObjectElementNoRc(j, element); + } + return res.ReturnObj(); +} + +jobject MRT_RecursiveCreateMultiArray(const jclass arrayClass, const jint currentDimension, + const jint dimensions, jint* dimArray) { + const MClass *mArrayClass = MClass::JniCastNonNull(arrayClass); + MObject *res = RecursiveCreateMultiArray(*mArrayClass, currentDimension, + dimensions, reinterpret_cast(dimArray)); + return res->AsJobject(); +} + +void MRT_ObjectArrayCopy(address_t javaSrc, address_t javaDst, jint srcPos, jint dstPos, jint length, bool check) { + Collector::Instance().ObjectArrayCopy(javaSrc, javaDst, srcPos, dstPos, length, check); +} + +void ThrowArrayStoreException(const MObject &srcComponent, int index, const MClass &dstComponentType) { + std::ostringstream msg; + std::string srcBinaryName; + std::string dstBinaryName; + srcComponent.GetClass()->GetBinaryName(srcBinaryName); + dstComponentType.GetBinaryName(dstBinaryName); + msg << "source[" << std::to_string(index) << "] of type " << srcBinaryName << + " cannot be stored in destination array of type " << dstBinaryName << "[]"; + MRT_ThrowNewExceptionUnw("java/lang/ArrayStoreException", msg.str().c_str()); +} + +bool AssignableCheckingObjectCopy(const MClass &dstComponentType, MClass *&lastAssignableComponentType, + const MObject *srcComponent) { + if (srcComponent != nullptr) { + MClass *srcComponentType = srcComponent->GetClass(); + if (LIKELY(srcComponentType == lastAssignableComponentType)) { + } else if (LIKELY(dstComponentType.IsAssignableFrom(*srcComponentType))) { + lastAssignableComponentType = srcComponentType; + } else { + return false; + } + return true; + } else { + return true; + } +} +} // namespace maplert diff --git a/src/mrt/maplert/src/mrt_class_init.cpp b/src/mrt/maplert/src/mrt_class_init.cpp new file mode 100644 index 0000000000..0495c43458 --- /dev/null +++ b/src/mrt/maplert/src/mrt_class_init.cpp @@ -0,0 +1,489 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mrt_class_init.h" +#include +#include +#include +#include +#include +#include + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif +#include +#include "libs.h" +#include "object_base.h" +#include "exception/mrt_exception.h" +#include "allocator/mem_map.h" +#include "verifier.h" + +namespace maplert { +class ClinitLog { + static std::mutex logMtx; + + public: + static std::ostringstream logStream; + + ClinitLog() { + logMtx.lock(); + } + + ~ClinitLog() { + logMtx.unlock(); + } + + std::ostream &Stream() { + return logStream; + } +}; + +std::ostringstream ClinitLog::logStream; +std::mutex ClinitLog::logMtx; + +#ifdef __cplusplus +extern "C" { +#endif + +__attribute__((aligned(4096), visibility("default"))) +uint8_t classInitProtectRegion[kPageSize]; + +void MRT_InitProtectedMemoryForClinit() { + int err = mprotect(&classInitProtectRegion, kPageSize, PROT_NONE); + if (err != 0) { + EHLOG(FATAL) << "failed to protect classInitProtectRegion" << maple::endl; + } + + MemMap::Option opt = MemMap::kDefaultOptions; + opt.tag = "maple_clinited_state"; + opt.lowestAddr = kClInitStateAddrBase; + opt.highestAddr = kClInitStateAddrBase + kPageSize; + opt.prot = PROT_READ; + MemMap *memMap = MemMap::CreateMemMapAtExactAddress(reinterpret_cast(opt.lowestAddr), kPageSize, opt); + if (memMap == nullptr || reinterpret_cast(memMap->GetBaseAddr()) != opt.lowestAddr) { + EHLOG(FATAL) << "failed to initialize memory page for ClassInitState::kClassInitialized" << maple::endl; + } +} + +static ClassInitState TryInitClass(const MClass &classObj, bool recursive); +static void RecordKlassInitFailed(const MClass &klass); +static void RecordKlassInitializing(const MClass &klass); +static void RecordKlassInitialized(const MClass &klass); +static void CallClinit(uintptr_t clinitFuncAddr, MClass &klass); +static void RecordNoClinitFuncState(MClass &klass); +bool MRT_ClassInitialized(const jclass klass) { + const MClass *classInfo = MClass::JniCast(klass); + return classInfo->IsInitialized(); +} + +static atomic classCountEnable; +struct ClassCountEnableInit { + public: + ClassCountEnableInit() { + classCountEnable.store(VLOG_IS_ON(classinit), std::memory_order_relaxed); + } +}; + +ClassCountEnableInit classCountEnableInit; + +extern "C" void MRT_ClinitEnableCount(bool enable) { + if (enable) { + classCountEnable.store(1, std::memory_order_relaxed); + } else { + classCountEnable.store(0, std::memory_order_relaxed); + } +} + +inline int MrtGetClassCountEnable() { + return classCountEnable.load(std::memory_order_relaxed); +} + +// only initialize its super class and interfaces, excluding itself. +static void InitSuperClasses(MClass &mrtClass, bool recursive) { + MClass *klass = &mrtClass; + uint32_t numOfSuperClass = klass->GetNumOfSuperClasses(); + MClass **superArray = klass->GetSuperClassArray(); + if (superArray == nullptr || !recursive) { + return; + } + + for (uint32_t i = 0; i < numOfSuperClass; ++i) { + MClass *superKlass = superArray[i]; + if (UNLIKELY(superKlass == nullptr)) { + MRT_ThrowNoClassDefFoundError("Failed to init super class, maybe it is not defined!"); + return; + } + if (superKlass->IsInterface()) { + if (VLOG_IS_ON(classinit)) { + ClinitLog().Stream() << "\t\t2. " << klass->GetName() << ": run for super interface " << + superKlass->GetName() << maple::endl; + } + + // initialize the interface that declares a non-abstract, non-static method + std::vector methodsVector; + superKlass->GetDeclaredMethods(methodsVector, false); + bool needclinit = std::any_of(methodsVector.cbegin(), methodsVector.cend(), [](const MethodMeta *elementObj) { + return elementObj->IsDefault(); + }); + if (needclinit) { + (void)TryInitClass(*superKlass, true); + } else { + InitSuperClasses(*superKlass, true); + } + } else { + if (VLOG_IS_ON(classinit)) { + ClinitLog().Stream() << "\t\t2. " << klass->GetName() << ": run for super class " << + superKlass->GetName() << maple::endl; + } + (void)TryInitClass(*superKlass, true); + } + } +} + +static ClassInitState InitClassImpl(MClass &mrtclass, bool recursive) { + MClass *klass = &mrtclass; +#ifdef LINKER_LAZY_BINDING + // We link the class forwardly when clinit. + if (klass->IsLazyBinding() && LinkerAPI::Instance().GetLinkerMFileInfoByAddress(klass, true) != nullptr) { + VLOG(lazybinding) << "InitClassImpl(), link lazily for " << klass->GetName() << + ", lazy=" << klass->IsLazyBinding() << ", cold=" << klass->IsColdClass() << maple::endl; + (void)LinkerAPI::Instance().LinkClassLazily(reinterpret_cast(klass)); + } +#endif // LINKER_LAZY_BINDING + if (VLOG_IS_ON(classinit)) { + ClinitLog().Stream() << "\t\t0. " << klass->GetName() << ": try " << maple::endl; + } + // class info won't free, so don't add ScopedObjectAccess + if (maple::ObjectBase::MonitorEnter(reinterpret_cast(klass)) == JNI_ERR) { + EHLOG(ERROR) << "maple::ObjectBase::MonitorEnter in InitClassImpl() fail" << maple::endl; + } + + // after we obtain the lock, we should double-check init state + ClassInitState state = klass->GetInitState(); + // when state is failure, we should unlock + if (state == kClassInitFailed) { + RecordKlassInitFailed(*klass); + return state; + } + if (state == kClassInitializing) { + RecordKlassInitializing(*klass); + return state; + } + if (state == kClassInitialized) { + RecordKlassInitialized(*klass); + return state; + } + + klass->SetInitState(kClassInitializing); + + InitSuperClasses(*klass, recursive); + if (UNLIKELY(MRT_HasPendingException())) { + klass->SetInitState(kClassInitFailed); + // pending exception indicates what the problem is + EHLOG(ERROR) << klass->GetName() << " failed to be initialized due to pending exception" << maple::endl; + } else { + // find and prepare to call if existed + uintptr_t clinitFuncAddr = klass->GetClinitFuncAddr(); + if (clinitFuncAddr != 0) { + CallClinit(clinitFuncAddr, *klass); + } else { + RecordNoClinitFuncState(*klass); + } + } + + if (maple::ObjectBase::MonitorExit(reinterpret_cast(klass)) == JNI_ERR) { + EHLOG(ERROR) << "maple::ObjectBase::MonitorExit in InitClassImpl() fail" << maple::endl; + } + + if (VLOG_IS_ON(classinit)) { + ClinitLog().Stream() << "\t\t4. " << klass->GetName() << ": run successfully" << maple::endl; + } + return klass->GetInitState(); +} + + +// Record the state of klass init failed and throw exception +static void RecordKlassInitFailed(const MClass &klass) { + if (VLOG_IS_ON(classinit)) { + ClinitLog().Stream() << "\t\t1. " << klass.GetName() << ": running failure" << maple::endl; + } + if (maple::ObjectBase::MonitorExit(reinterpret_cast(const_cast(&klass))) == JNI_ERR) { + LOG(ERROR) << "maple::ObjectBase::MonitorExit in InitClassImpl() return false" << maple::endl; + } + std::string msg; + klass.GetTypeName(msg); + msg.insert(0, "Could not initialize class "); + MRT_ThrowNoClassDefFoundError(msg); +} + +// Record the state of klass initializing +static void RecordKlassInitializing(const MClass &klass) { + if (VLOG_IS_ON(classinit)) { + ClinitLog().Stream() << "\t\t1. " << klass.GetName() << ": running recursively" << maple::endl; + } + if (maple::ObjectBase::MonitorExit(reinterpret_cast(const_cast(&klass))) == JNI_ERR) { + LOG(ERROR) << "maple::ObjectBase::MonitorExit in InitClassImpl() return false" << maple::endl; + } +} + +// Record the state of klass initialized +static void RecordKlassInitialized(const MClass &klass) { + if (VLOG_IS_ON(classinit)) { + ClinitLog().Stream() << "\t\t1. " << klass.GetName() << ": succeeded in other thread" << maple::endl; + } + if (maple::ObjectBase::MonitorExit(reinterpret_cast(const_cast(&klass))) == JNI_ERR) { + LOG(ERROR) << "maple::ObjectBase::MonitorExit in InitClassImpl() return false" << maple::endl; + } +} + +// Call clinit function when exists +static void CallClinit(uintptr_t clinitFuncAddr, MClass &klass) { + uint64_t clinitStartTime = 0; + uint64_t clinitEndTime = 0; + if (VLOG_IS_ON(classinit) || MrtGetClassCountEnable()) { + clinitStartTime = timeutils::ThreadCpuTimeNs(); + } + // Interp + if (clinitFuncAddr & 0x01) { + MethodMeta *clinitMethodMeta = reinterpret_cast(&klass)->GetClinitMethodMeta(); + if (clinitMethodMeta != nullptr) { + // clinit method return void + (void)clinitMethodMeta->InvokeJavaMethodFast(reinterpret_cast(&klass)); + } + } else { + // clinit is a java method, so we use maplert::RuntimeStub<>::FastCallCompiledMethod. + maplert::RuntimeStub::FastCallCompiledMethod(clinitFuncAddr); + } + if (VLOG_IS_ON(classinit) || MrtGetClassCountEnable()) { + clinitEndTime = timeutils::ThreadCpuTimeNs(); + } + if (VLOG_IS_ON(classinit)) { + ClinitLog().Stream() << "\t\t3. " << klass.GetName() << ": run cost time (ns) " << + (clinitEndTime - clinitStartTime) << maple::endl; + } + if (UNLIKELY(MRT_HasPendingException())) { + klass.SetInitState(kClassInitFailed); + // pending exception indicates what the problem is + LOG(ERROR) << klass.GetName() << " failed to be initialized due to pending exception" << maple::endl; + } else { + // tag this class is initialized already if no pending exception. + // any readable address is ok for now. + ClassInitState initState = klass.GetInitState(); + if (initState == kClassInitializing) { + klass.SetInitStateRawValue(reinterpret_cast(&klass)); + } else { + LOG(FATAL) << "class init state has been modified from kClassInitializing to " << initState << maple::endl; + } + } +} + +// Record no clinit func state +static void RecordNoClinitFuncState(MClass &klass) { + if (VLOG_IS_ON(classinit)) { + ClinitLog().Stream() << "\t\t1. " << klass.GetName() << ": no " << maple::endl; + } + ClassInitState initState = klass.GetInitState(); + if (initState == kClassInitializing) { + klass.SetInitStateRawValue(reinterpret_cast(&klass)); + } else { + LOG(FATAL) << "class init state has been modified from kClassInitializing to " << initState << maple::endl; + } +} + +__thread std::vector *classTnitStartTimeStack = nullptr; +__thread uint64_t classInitCurStartTime = 0; +static int64_t classInitTotalTime = 0; +static int64_t classInitTotalCount = 0; +static int64_t classInitTotalTryCount = 0; + +static int64_t clinitCheckTotalCount = 0; + +static inline int64_t IncClassInitTotalTime(int64_t delta) { + return __atomic_add_fetch(&classInitTotalTime, delta, __ATOMIC_ACQ_REL); +} + +static inline int64_t IncClassInitTotalCount(int64_t count) { + return __atomic_add_fetch(&classInitTotalCount, count, __ATOMIC_ACQ_REL); +} + +static inline int64_t IncClassInitTotalTryCount(int64_t count) { + return __atomic_add_fetch(&classInitTotalTryCount, count, __ATOMIC_ACQ_REL); +} + +static inline int64_t IncClinitCheckTotalCount(int64_t count) { + return __atomic_add_fetch(&clinitCheckTotalCount, count, __ATOMIC_ACQ_REL); +} + +extern "C" int64_t MRT_ClinitGetTotalTime() { + return classInitTotalTime; +} + +extern "C" int64_t MRT_ClinitGetTotalCount() { + return classInitTotalCount; +} + +extern "C" void MRT_ClinitResetStats() { + classInitTotalTime = 0; + classInitTotalCount = 0; +} + + +extern "C" void MCC_PreClinitCheck(ClassMetadata &classInfo __attribute__((unused))) { + int64_t totalCount = IncClinitCheckTotalCount(1); + if (VLOG_IS_ON(classinit)) { + ClinitLog().Stream() << "-- clinit-check total count " << totalCount << maple::endl; + } +} + +extern "C" void MCC_PostClinitCheck(ClassMetadata &classInfo __attribute__((unused))) {} + +static void PreInitClass(const MClass &classInfo) { + if (!(VLOG_IS_ON(classinit) || MrtGetClassCountEnable())) { + return; + } + + int64_t totalCount = IncClassInitTotalCount(1); + + if (classTnitStartTimeStack == nullptr) { + classTnitStartTimeStack = new std::vector(); + } + classTnitStartTimeStack->push_back(classInitCurStartTime); + if (VLOG_IS_ON(classinit)) { + ClinitLog().Stream() << "\t- init-class " << classInfo.GetName() << + " start, recursive depth " << classTnitStartTimeStack->size() << + ", total init count " << totalCount << maple::endl; + } + classInitCurStartTime = timeutils::ThreadCpuTimeNs(); +} + +static void PostInitClass(const MClass &classInfo) { + if (!(VLOG_IS_ON(classinit) || MrtGetClassCountEnable())) { + return; + } + + uint64_t curEndTime = timeutils::ThreadCpuTimeNs(); + int64_t cpuTime = curEndTime - classInitCurStartTime; + if (VLOG_IS_ON(classinit)) { + ClinitLog().Stream() << "\t- init-class " << classInfo.GetName() << + " end, recursive depth " << classTnitStartTimeStack->size() << ", cost time (ns) " << cpuTime << maple::endl; + } + if (!classTnitStartTimeStack->empty()) { + classInitCurStartTime = classTnitStartTimeStack->back(); + classTnitStartTimeStack->pop_back(); + } + if (classTnitStartTimeStack->size() == 0) { + int64_t totalTime = IncClassInitTotalTime (cpuTime); + ClinitLog().Stream() << "\t- init-class total time (ns) " << totalTime << maple::endl; + } +} + +static ClassInitState TryInitClass(const MClass &classObj, bool recursive) { + ClassInitState state = classObj.GetInitState(); + switch (state) { + case kClassUninitialized: + case kClassInitializing: { + maplert::ScopedObjectAccess soa; + PreInitClass(classObj); + ClassInitState newState = InitClassImpl(const_cast(classObj), recursive); + PostInitClass(classObj); + return newState; + } + case kClassInitFailed: { + std::string msg; + classObj.GetTypeName(msg); + msg.insert(0, "Could not initialize class "); + // when a class initialization has failed (usually an exception is raised in earlier) + MRT_ThrowNoClassDefFoundError(msg); + return state; + } + default: // default to "already initialized" + return state; + } +} + +ClassInitState MRT_TryInitClass(const MClass &classInfo, bool recursive) { + if (VLOG_IS_ON(classinit) || MrtGetClassCountEnable()) { + int64_t totalCount = IncClassInitTotalTryCount(1); + ClinitLog().Stream() << "\ttry-init-class " << classInfo.GetName() << + ", total try-init count " << totalCount << maple::endl; + } + if (UNLIKELY(!classInfo.IsVerified())) { + VLOG(bytecodeverify) << "Verify class " << classInfo.GetName() << " before .\n"; + VerifyClass(const_cast(classInfo), true); + } + + ClassInitState state = TryInitClass(classInfo, !classInfo.IsInterface() && recursive); + MrtClass ex = MRT_PendingException(); + // if we come to a pending exception due to , this is an Initializer Error + if (ex != nullptr) { + MRT_ClearPendingException(); + if (reinterpret_cast(ex)->IsInstanceOf(*WellKnown::GetMClassError())) { + MRT_ThrowExceptionSafe(reinterpret_cast(ex)); + RC_LOCAL_DEC_REF(ex); + } else { + MRT_ThrowExceptionInInitializerError(ex); + } + } + return state; +} + +ClassInitState MRT_TryInitClassOnDemand(const MClass &classInfo) { + if (VLOG_IS_ON(classinit) || MrtGetClassCountEnable()) { + int64_t totalCount = IncClassInitTotalTryCount(1); + ClinitLog().Stream() << "\ttry-init-class-on-demand " << classInfo.GetName() << + ", total try-init count " << totalCount << maple::endl; + } + if (UNLIKELY(!classInfo.IsVerified())) { + VLOG(bytecodeverify) << "Verify class " << classInfo.GetName() << " before on demand.\n"; + VerifyClass(const_cast(classInfo), false); + } + // clinit is triggered from signal + ClassInitState state = TryInitClass(classInfo, !classInfo.IsInterface()); + MrtClass ex = MRT_PendingException(); + if (ex != nullptr) { + MRT_ClearPendingException(); + if (reinterpret_cast(ex)->IsInstanceOf(*WellKnown::GetMClassError())) { + ThrowExceptionUnw(ex); + } else { + MRT_ThrowExceptionInInitializerErrorUnw(ex); + } + } + return state; +} + +bool MRT_InitClassIfNeeded(const MClass &classInfo) { + if (UNLIKELY(!MRT_ReflectIsInit(reinterpret_cast(const_cast(&classInfo))))) { + if (UNLIKELY(MRT_TryInitClass(classInfo) == ClassInitState::kClassInitFailed)) { + EHLOG(ERROR) << "MRT_TryInitClass return fail" << maple::endl; + } + MrtClass ex = MRT_PendingException(); + if (ex != nullptr) { + MRT_DumpExceptionForLog(reinterpret_cast(ex)); + RC_LOCAL_DEC_REF(ex); + return false; + } + } + return true; +} + +void MRT_DumpClassClinit(std::ostream &os) { + os << ClinitLog::logStream.str(); + ClinitLog::logStream.str(""); +} + +#ifdef __cplusplus +} // extern "C" +#endif +} // namespace maplert diff --git a/src/mrt/maplert/src/mrt_classloader.cpp b/src/mrt/maplert/src/mrt_classloader.cpp new file mode 100644 index 0000000000..5e7343c063 --- /dev/null +++ b/src/mrt/maplert/src/mrt_classloader.cpp @@ -0,0 +1,916 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "mrt_classloader.h" + +#include +#include +#include + +#include "cpphelper.h" +#include "mrt_well_known.h" +#include "exception/mrt_exception.h" +#include "loader/object_locator.h" +#include "yieldpoint.h" +#include "interp_support.h" +#include "mrt_primitive_class.def" +#include "base/systrace.h" +#include "chosen.h" +namespace maplert { +LoaderAPI *LoaderAPI::pInstance = nullptr; +LoaderAPI &LoaderAPI::Instance() { + if (pInstance == nullptr) { + pInstance = new (std::nothrow) ClassLoaderImpl(); + if (pInstance == nullptr) { + CL_LOG(FATAL) << "new ClassLoaderImpl failed" << maple::endl; + } + } + return *pInstance; +} +ClassLoaderImpl::ClassLoaderImpl() { + primitiveClasses.push_back(reinterpret_cast(&MRT_CLASSINFO(ALjava_2Flang_2FObject_3B))); + primitiveClasses.push_back(reinterpret_cast(&MRT_CLASSINFO(ALjava_2Flang_2FString_3B))); + primitiveClasses.push_back(reinterpret_cast(&MRT_CLASSINFO(ALjava_2Flang_2FClass_3B))); + primitiveClasses.push_back(reinterpret_cast(&MRT_CLASSINFO(ALjava_2Futil_2FFormatter_24Flags_3B))); + primitiveClasses.push_back(reinterpret_cast(&MRT_CLASSINFO(ALjava_2Futil_2FHashMap_24Node_3B))); + primitiveClasses.push_back(reinterpret_cast(&MRT_CLASSINFO(ALjava_2Futil_2FFormatter_24FormatString_3B))); + primitiveClasses.push_back(reinterpret_cast(&MRT_CLASSINFO(ALjava_2Flang_2FCharSequence_3B))); + primitiveClasses.push_back(reinterpret_cast( + &MRT_CLASSINFO(ALjava_2Flang_2FThreadLocal_24ThreadLocalMap_24Entry_3B))); +#ifdef __OPENJDK__ + primitiveClasses.push_back(reinterpret_cast(&MRT_CLASSINFO(ALjava_2Futil_2FHashtable_24Entry_3B))); +#else // libcore + primitiveClasses.push_back(reinterpret_cast(&MRT_CLASSINFO(ALjava_2Futil_2FHashtable_24HashtableEntry_3B))); + primitiveClasses.push_back(reinterpret_cast(&MRT_CLASSINFO(ALlibcore_2Freflect_2FAnnotationMember_3B))); + primitiveClasses.push_back(reinterpret_cast(&MRT_CLASSINFO(ALsun_2Fsecurity_2Futil_2FDerValue_3B))); + primitiveClasses.push_back(reinterpret_cast(&MRT_CLASSINFO(ALsun_2Fsecurity_2Fx509_2FAVA_3B))); +#endif // __OPENJDK__ +} +ClassLoaderImpl::~ClassLoaderImpl() { + mMappedClassLoader.clear(); +} + +void ClassLoaderImpl::UnInit() { + std::set reqMplClsLoaders; + mMplFilesOther.GetObjLoaders(reqMplClsLoaders); + mMplFilesBoot.GetObjLoaders(reqMplClsLoaders); + for (auto it = reqMplClsLoaders.begin(); it != reqMplClsLoaders.end(); ++it) { + UnloadClasses(reinterpret_cast(*it)); + } + + std::vector regMplFiles; + mMplFilesOther.GetObjFiles(regMplFiles); + mMplFilesBoot.GetObjFiles(regMplFiles); + + for (auto it = regMplFiles.begin(); it != regMplFiles.end(); ++it) { + delete *it; + *it = nullptr; + } + ObjectLoader::UnInit(); +} + +void ClassLoaderImpl::ResetCLCache() { + CLCache::instance.ResetCache(); +} + +jclass ClassLoaderImpl::GetCache(const jclass contextClass, const std::string &className, + uint32_t &index, bool &lockFail) { + return CLCache::instance.GetCache(contextClass, className, index, lockFail); +} + +void ClassLoaderImpl::WriteCache(const jclass klass, const jclass contextClass, uint32_t index) { + CLCache::instance.WriteCache(klass, contextClass, index); +} + +// API Interfaces Begin +void ClassLoaderImpl::RegisterMplFile(const ObjFile &objFile) { + const ObjFile *pmf = mMplFilesOther.Get(objFile.GetName()); + + if (pmf == nullptr) { + mMplFilesOther.Put(objFile.GetName(), objFile); + } +} + +bool ClassLoaderImpl::UnRegisterMplFile(const ObjFile &objFile) { + if (GetMplFileRegistered(objFile.GetName()) == nullptr) { + RemoveMappedClassLoader(objFile.GetName()); + delete &objFile; + return true; + } + return false; +} + +bool ClassLoaderImpl::RegisterJniClass(IEnv env, jclass javaClass, const std::string &mFileName, + const std::string &jniClassName, INativeMethod methods, int32_t methodCount, bool fake) { + if (mFileName.empty()) { + return mMplFilesOther.Register(env, javaClass, jniClassName, methods, methodCount, fake); + } + const ObjFile *mf = mMplFilesBoot.Get(mFileName); + if (mf == nullptr) { + mf = mMplFilesOther.Get(mFileName); + } + ObjFile *objFile = const_cast(mf); + if (mf != nullptr && objFile->CanRegisterNativeMethods(jniClassName) && (objFile->RegisterNativeMethods(env, + javaClass, jniClassName, methods, methodCount, fake) == true)) { + return true; + } + return false; +} + +size_t ClassLoaderImpl::GetListSize(AdapterFileList type) { + if (type == kMplFileBootList) { + return mMplFilesBoot.GetRegisterSize(); + } else { + return mMplFilesOther.GetRegisterSize(); + } +} + +const ObjFile *ClassLoaderImpl::GetMplFileRegistered(const std::string &name) { + const ObjFile *pmf = mMplFilesBoot.Get(name); + + if (pmf != nullptr) { + return pmf; + } + + return mMplFilesOther.Get(name); +} + +const ObjFile *ClassLoaderImpl::GetAppMplFileRegistered(const std::string &packageApk) { + std::vector files; + mMplFilesOther.GetObjFiles(files); + std::string package; + std::string apkName; + std::string::size_type index = packageApk.find("/"); + if (index == std::string::npos) { + CL_LOG(ERROR) << "packageApk must $package_name/xx.apk " << packageApk.c_str() << maple::endl; + return nullptr; + } else { + package = packageApk.substr(0, index); + apkName = packageApk.substr(index + 1); + } + for (auto it = files.begin(); it != files.end(); ++it) { + std::string soName = (*it)->GetName(); + // different installation, different so name + // adb push: /system/priv-app/Calendar/Calendar.apk!/maple/arm64/mapleclasses.so + // adb install: /data/app/com.android.calendar-C5VubFGVWn3V4prwdHIuHQ==/base.apk!/maple/arm64/mapleclasses.so + if ((soName.find(kAppSoPostfix) != std::string::npos || soName.find(kAppPartialSoPostfix) != std::string::npos) && + (soName.find(package) != std::string::npos || soName.find(apkName) != std::string::npos)) { + return *it; + } + } + return nullptr; +} + +bool ClassLoaderImpl::LoadMplFileInBootClassPath(const std::string &pathString) { + maple::ScopedTrace trace("LoadMplFileInBootClassPath, %p", pathString.c_str()); + if (pathString.length() < 1) { + return false; + } + // split class_pathString to arraylist; + std::vector classPaths = stringutils::Split(pathString, ':'); + // store the boot class path + jobject classLoader = nullptr; + // loop to process boot classpath + std::vector objList; + pLinker->SetLoadState(kLoadStateBoot); + for (std::string &path : classPaths) { + std::string jarName = path; + size_t pos = path.rfind("/"); + if (pos != std::string::npos && pos < path.length()) { + jarName = path.substr(pos + 1); + } + if (kIgnoreJarList.find(jarName) != std::string::npos) { + continue; + } + + FileAdapter adapter(path, pAdapterEx); + ObjFile *objFile = adapter.OpenObjectFile(classLoader); + + if (objFile == nullptr) { + std::string message = "Failed to dlopen " + path; + (void)(pAdapterEx->EnableMygote(false, message)); + return false; + } + + if (objFile->GetFileType() == FileType::kMFile) { + if (!pLinker->Add(*objFile, classLoader)) { + CL_LOG(ERROR) << "invalid maple file" << path << ", lazy=" << objFile->IsLazyBinding() << objFile->GetName() << + maple::endl; + delete objFile; + continue; + } + + mMplFilesBoot.Put(objFile->GetName(), *objFile); + objFile->Load(); + objList.push_back(objFile); + } else { + CL_LOG(ERROR) << "open Non maple file: " << path << ", " << objFile->GetName() << maple::endl; + delete objFile; + objFile = nullptr; + } + } + if (!LoadClasses(reinterpret_cast(classLoader), objList)) { + (void)pAdapterEx->EnableMygote(false, "Failed to load classes."); + return false; + } + // For boot class loader, we just link when load so. + (void)(pLinker->Link()); + SetLinked(classLoader, false); + return true; +} + +bool ClassLoaderImpl::LoadMplFileInAppClassPath(jobject clLoader, FileAdapter &adapter) { + maple::ScopedTrace trace("LoadMplFileInAppClassPath, %p", adapter.GetConvertPath().c_str()); + MObject *classLoader = reinterpret_cast(clLoader); + bool isFallBack = false; + do { + std::vector pathList; + adapter.GetObjFileList(pathList, isFallBack); + // Load classes in all MplFiles and add LinkerMFileInfo + std::vector mplInfoList; + for (auto path : pathList) { + // 1. check mpl file if registered + const ObjFile *pmfCookie = GetMplFileRegistered(path); + if (LIKELY(pmfCookie == nullptr)) { + // 2. open mpl file + ObjFile *pmf = adapter.OpenObjectFile(clLoader, isFallBack, path); + if (pmf == nullptr) { + CL_LOG(ERROR) << "failed to open maple file " << path << ", classloader:" << classLoader << maple::endl; + if (isFallBack) { + MRT_ThrowNewException("java/io/IOException", path.c_str()); + } + continue; + } + if (!pLinker->IsFrontPatchMode(path) && classLoader != nullptr) { + // 3. load mpl file's classes to classloader + if (!LoadClassesFromMplFile(classLoader, *pmf, mplInfoList, adapter.HasSiblings())) { + delete pmf; + CL_LOG(ERROR) << "failed to load maple file " << path << ", classloader:" << classLoader << maple::endl; + MRT_ThrowNewException("java/io/IOException", path.c_str()); + continue; + } + } + RegisterMplFile(*pmf); + pmfCookie = pmf; + } else if (pmfCookie->GetClassLoader() != clLoader) { + std::string message = "Attempt to load the same maple file " + path + " in with multiple class loaders"; + CL_LOG(ERROR) << message << maple::endl; + if (!SetMappedClassLoader(path, classLoader, reinterpret_cast(pmfCookie->GetClassLoader()))) { + MRT_ThrowNewException("java/lang/InternalError", message.c_str()); + continue; + } else if (VLOG_IS_ON(classloader)) { + adapter.DumpMethodName(); + } + } + if (pmfCookie != nullptr) { + adapter.Put(adapter.GetOriginPath(), *pmfCookie); + } + } + // A workaround for class's muid failed to resolve issue when multi-so loading + if (!isFallBack && adapter.HasSiblings()) { + LinkStartUpAndMultiSo(mplInfoList, adapter.HasStartUp()); + } + isFallBack = !isFallBack && (adapter.GetSize() == 0 || adapter.IsPartialAot()); + } while (isFallBack); + return (adapter.GetSize() > 0) ? true : false; +} + +#ifndef __ANDROID__ +// Only for QEMU mplsh test. +bool ClassLoaderImpl::LoadMplFileInUserClassPath(const std::string &paths) { + maple::ScopedTrace trace("LoadMplFileInUserClassPath, %p", paths.c_str()); + CL_VLOG(classloader) << paths << maple::endl; + + if (paths.length() < 1) { + CL_LOG(ERROR) << "class path is null for SystemClassLoader!!" << maple::endl; + return true; + } + + // split class path string (paths) to arraylist; + std::vector clsPaths = stringutils::Split(paths, ':'); + + MObject *classLoader = mSystemClassLoader; + std::vector objList; + for (auto path : clsPaths) { + FileAdapter adapter(path, pAdapterEx); + ObjFile *objFile = adapter.OpenObjectFile(reinterpret_cast(classLoader)); // check for MFile first + if (objFile != nullptr && objFile->GetFileType() == FileType::kMFile) { + // MFile open OK. continue the linking process + if (!pLinker->Add(*objFile, reinterpret_cast(classLoader))) { + CL_LOG(ERROR) << "invalid maple file or multiple loading:" << path << ", " << objFile->GetName() << maple::endl; + delete objFile; + continue; + } + mMplFilesOther.Put(path, *objFile); + objFile->Load(); + objList.push_back(objFile); + } else { + objFile = adapter.OpenObjectFile(reinterpret_cast(classLoader), true); + // try MFile failed, then check for DFile + if (objFile == nullptr) { // try DFile also failed, stop loading. + CL_LOG(ERROR) << "open " << path << "failed!" << maple::endl; + return false; + } + // DFile open OK. continue the linking process + // Note: objFile should be of type FileType::kDFile + std::vector infoList; + if (!LoadClassesFromMplFile(classLoader, *objFile, infoList)) { + // loading the classes list in this dexFile failed + CL_LOG(ERROR) << "open " << path << "failed! size=" << infoList.size() << maple::endl; + delete objFile; + continue; + } + mMplFilesOther.Put(path, *objFile); + } + } + (void)(pLinker->Link()); + SetLinked(reinterpret_cast(classLoader), false); + if (!LoadClasses(classLoader, objList)) { + CL_LOG(ERROR) << "load class failed!!" << maple::endl; + return false; + } + return true; +} +#endif + +jclass ClassLoaderImpl::FindClass(const std::string &className, const SearchFilter &constFilter) { + std::string javaDescriptor; // Like Ljava/lang/Object; + MClass *klass = nullptr; + jobject systemClassLoader = reinterpret_cast(mSystemClassLoader); + SearchFilter &filter = const_cast(constFilter); + // Like Ljava/lang/Object; + javaDescriptor = filter.isInternalName ? GetClassNametoDescriptor(className) : className; + if (UNLIKELY(javaDescriptor.empty())) { + CL_LOG(ERROR) << "javaDescriptor is nullptr" << maple::endl; + } + filter.contextCL = IsBootClassLoader(filter.specificCL) ? nullptr : filter.specificCL; + if (filter.isLowerDelegate && filter.contextCL != nullptr) { + // For delegate last class loader, the search order is as below: + // . boot class loader + // . current class loader + // . parent class loader + // Otherwise, the order is as below: + // . parent class loader (Includes boot class loader) + // . current class loader + if (filter.isDelegateLast) { + klass = LocateInCurrentClassLoader(javaDescriptor, filter.Reset()); + } else { + klass = LocateInParentClassLoader(javaDescriptor, filter.Reset()); + } + if (klass == nullptr && !filter.IsNullOrSystem(systemClassLoader)) { + filter.currentCL = systemClassLoader; + klass = reinterpret_cast(LocateClass(javaDescriptor, filter.ResetFile())); + } + if (klass == nullptr && !filter.IsBootOrSystem(systemClassLoader)) { + klass = reinterpret_cast( + LinkerAPI::Instance().InvokeClassLoaderLoadClass(filter.contextCL, javaDescriptor)); + } + } else { + klass = FindClassInSingleClassLoader(javaDescriptor, filter.Reset()); + } + // It could be normal here. We are searching class by parent-delegation-model, so we may not find class in parent + // classLoader temporarily. + if (UNLIKELY(klass == nullptr)) { + CL_DLOG(classloader) << "failed, classloader=" << filter.contextCL << ", cl name=" << + (filter.contextCL == nullptr ? "BootCL(null)" : reinterpret_cast( + filter.contextCL)->GetClass()->GetName()) << ", class name=" << className << maple::endl; + return nullptr; + } + MObject *pendingClassLoader = GetClassCL(klass); + if (pendingClassLoader == nullptr && klass->IsLazyBinding()) { + CL_VLOG(classloader) << className << ", from boot, and lazy" << maple::endl; + SetClassCL(reinterpret_cast(klass), filter.contextCL); + } + return reinterpret_cast(klass); +} + +void ClassLoaderImpl::DumpUnregisterNativeFunc(std::ostream &os) { + if (VLOG_IS_ON(binding)) { + CL_VLOG(classloader) << "boot class:" << maple::endl; + mMplFilesBoot.DumpUnregisterNativeFunc(os); + CL_VLOG(classloader) << "application class:" << maple::endl; + mMplFilesOther.DumpUnregisterNativeFunc(os); + } +} + +void ClassLoaderImpl::VisitClasses(maple::rootObjectFunc &func) { + std::vector files; + std::vector files2; + mMplFilesOther.GetObjFiles(files); + mMplFilesBoot.GetObjFiles(files2); + std::unordered_set loaders; + + for (auto it = files.begin(); it != files.end(); ++it) { + (void)(loaders.insert(reinterpret_cast((*it)->GetClassLoader()))); // de-duplicate + } + + for (auto it = files2.begin(); it != files2.end(); ++it) { + (void)(loaders.insert(reinterpret_cast((*it)->GetClassLoader()))); // de-duplicate + } + + (void)(loaders.insert(nullptr)); // bootstrap class loader + + for (auto it = loaders.begin(); it != loaders.end(); ++it) { + VisitClassesByLoader(*it, func); + } +} + +// Get all mapped {classLoader, mpl_file_handle} by latter classloader. +bool ClassLoaderImpl::GetMappedClassLoaders(const jobject classLoader, + std::vector> &mappedPairs) { + bool ret = false; + + for (auto it = mMappedClassLoader.begin(); it != mMappedClassLoader.end(); ++it) { + std::string fileName = it->first; + jobject latterClassLoader = reinterpret_cast(it->second.first); + jobject mappedClassLoader = reinterpret_cast(it->second.second); + + if (classLoader == latterClassLoader) { + const ObjFile *pmfCookies = GetMplFileRegistered(fileName); + mappedPairs.push_back(std::make_pair(mappedClassLoader, pmfCookies)); + ret = true; + CL_LOG(INFO) << classLoader << "->{" << mappedClassLoader << "," << fileName << "}" << maple::endl; + } + } + return ret; +} + +bool ClassLoaderImpl::GetMappedClassLoader(const std::string &fileName, jobject classLoader, jobject &realClassLoader) { + auto range = mMappedClassLoader.equal_range(fileName); + + for (auto val = range.first; val != range.second; ++val) { + if (classLoader == reinterpret_cast(val->second.first)) { + realClassLoader = reinterpret_cast(val->second.second); + CL_LOG(INFO) << "get mapped classLoader from " << classLoader << ":" << realClassLoader << " for " << fileName << + maple::endl; + return true; + } + } + + CL_LOG(ERROR) << "failed, get mapped classLoader from " << classLoader << " for " << fileName << maple::endl; + return false; +} + +bool ClassLoaderImpl::RegisterNativeMethods(ObjFile &objFile, + jclass klass, INativeMethod methods, int32_t methodCount) { + return MClassLocatorManagerInterpEx::RegisterNativeMethods(*this, objFile, + klass, methods.As(), methodCount); +} +// -----------------------MRT_EXPORT Split-------------------------------------- +jclass ClassLoaderImpl::LocateClass(const std::string &className, const SearchFilter &constFilter) { + SearchFilter &filter = const_cast(constFilter); + ClassLocator *classLocator = GetCLClassTable(filter.currentCL).As(); + if (classLocator == nullptr) { + return nullptr; + } + MClass *klass = classLocator->InquireClass(className, filter); + if (klass == nullptr) { + return nullptr; + } + + if (klass->GetClIndex() == kClIndexUnInit) { // For lazy binding. + SetClassCL(reinterpret_cast(klass), filter.currentCL); + } + + return reinterpret_cast(klass); +} +// API Interfaces End +bool ClassLoaderImpl::SetMappedClassLoader(const std::string &fileName, + MObject *classLoader, MObject *realClassLoader) { + auto range = mMappedClassLoader.equal_range(fileName); + + for (auto val = range.first; val != range.second; ++val) { + if (classLoader == val->second.first) { + CL_LOG(ERROR) << "failed, shouldn't double map " << classLoader << " to " << realClassLoader << " for " << + fileName << maple::endl; + return true; + } + } + (void)(mMappedClassLoader.emplace(fileName, std::make_pair(classLoader, realClassLoader))); + CL_LOG(INFO) << "mapped " << classLoader << " to " << realClassLoader << " for " << fileName << maple::endl; + return true; +} + +void ClassLoaderImpl::RemoveMappedClassLoader(const std::string &fileName) { + (void)(mMappedClassLoader.erase(fileName)); +} + +// For delegate last class loader, the search order is as below: +// . boot class loader +// . current class loader +// . parent class loader +MClass *ClassLoaderImpl::LocateInCurrentClassLoader(const std::string &className, SearchFilter &filter) { + MClass *klass = nullptr; + if (IsBootClassLoader(filter.contextCL)) { // Boot class loder + klass = reinterpret_cast(LocateClass(className, filter.Clear())); + return klass; + } + + // Current class loader + klass = reinterpret_cast(LocateClass(className, filter.ResetClear())); + if (klass != nullptr) { + return klass; + } + // To traverse in parents. + filter.currentCL = GetCLParent(filter.contextCL); + filter.ignoreBootSystem = true; + klass = LocateInParentClassLoader(className, filter.ResetFile()); + return klass; +} + +MClass *ClassLoaderImpl::LocateInParentClassLoader(const std::string &className, SearchFilter &filter) { + MClass *klass = nullptr; + if (IsBootClassLoader(filter.currentCL)) { // Boot class loder + if (filter.ignoreBootSystem) { // Not to search in BootClassLoader and SystemClassLoader + return nullptr; + } + klass = reinterpret_cast(LocateClass(className, filter.ClearFile())); + return klass; + } + + jobject classLoader = filter.currentCL; + filter.currentCL = GetCLParent(classLoader); + if ((klass = LocateInParentClassLoader(className, filter)) == nullptr) { + filter.currentCL = classLoader; + klass = reinterpret_cast(LocateClass(className, filter.ResetFile())); + } + return klass; +} + +// Reduce cyclomatic complexity of FindClass(). +// Before find the class in current classloader, check the boot firstly. +MClass *ClassLoaderImpl::FindClassInSingleClassLoader(const std::string &javaDescriptor, SearchFilter &filter) { + MClass *klass = nullptr; + jobject systemClassLoader = reinterpret_cast(mSystemClassLoader); + // If current is boot classloader, or not boot but its parent is null, we check the boot classLoader firsly. + if (filter.contextCL == nullptr || GetCLParent(filter.contextCL) == nullptr) { + klass = reinterpret_cast(LocateClass(javaDescriptor, filter.Clear())); + } + // check the current classloader, if it's not boot or system classloader + if (klass == nullptr && !filter.IsBootOrSystem(systemClassLoader)) { + // Find in current classloader. + klass = reinterpret_cast(LocateClass(javaDescriptor, filter.Reset())); + } + + // at last for the shared libraries, we check system classloader + if (klass == nullptr && filter.contextCL != nullptr && systemClassLoader != nullptr) { + // Find in systemclassloader. + filter.currentCL = systemClassLoader; + klass = reinterpret_cast(LocateClass(javaDescriptor, filter.ClearFile())); + } + return klass; +} + +void ClassLoaderImpl::LinkStartUpAndMultiSo(std::vector &mplInfoList, bool hasStartup) { + if (mplInfoList.size() > 0) { // We always load the first one, no matter is startup or not. + (void)(pLinker->Link(*(mplInfoList[0]), false)); + } + for (size_t i = 1; i < mplInfoList.size(); ++i) { + auto mplInfo = mplInfoList[i]; + void *param[] = { reinterpret_cast(pLinker), reinterpret_cast(mplInfo) }; + if (hasStartup) { + pAdapterEx->CreateThreadAndLoadFollowingClasses([](void *data)->void* { + void **p = reinterpret_cast(data); + LinkerAPI *linker = reinterpret_cast(p[0]); + LinkerMFileInfo *info = reinterpret_cast(p[1]); + (void)(linker->Link(*info, false)); + return nullptr; + }, param); + } else { + (void)(pLinker->Link(*mplInfo, false)); + } + } +#ifdef LINKER_DECOUPLE + (void)pLinker->HandleDecouple(mplInfoList); +#endif +} + +MClass *ClassLoaderImpl::GetPrimitiveClass(const std::string &mplClassName) { + MClass *classInfo = nullptr; + // check the dimension of the type + size_t dim = 0; + while (mplClassName[dim] == '[') { + ++dim; + } + // predefined primitive types has a dimension <= 3 + if (dim > 3) { + return nullptr; + } + + char typeChar = mplClassName[0]; + if (dim > 0) { + typeChar = mplClassName[dim]; + } + switch (typeChar) { + case 'Z': + classInfo = reinterpret_cast(__mrt_pclasses_Z[dim]); // boolean + break; + case 'B': + classInfo = reinterpret_cast(__mrt_pclasses_B[dim]); // byte + break; + case 'S': + classInfo = reinterpret_cast(__mrt_pclasses_S[dim]); // short + break; + case 'C': + classInfo = reinterpret_cast(__mrt_pclasses_C[dim]); // char + break; + case 'I': + classInfo = reinterpret_cast(__mrt_pclasses_I[dim]); // int + break; + case 'F': + classInfo = reinterpret_cast(__mrt_pclasses_F[dim]); // float + break; + case 'D': + classInfo = reinterpret_cast(__mrt_pclasses_D[dim]); // double + break; + case 'J': + classInfo = reinterpret_cast(__mrt_pclasses_J[dim]); // long + break; + case 'V': + classInfo = reinterpret_cast(__mrt_pclasses_V[dim]); // void + break; + default: + break; + } + if (classInfo != nullptr) { + JSAN_ADD_CLASS_METADATA(classInfo); // Need move this func to init func. + classInfo->SetClIndex(static_cast(kClIndexFlag | 0)); // Initialize each class cl index as boot cl. + } + return classInfo; +} + +void ClassLoaderImpl::VisitPrimitiveClass(const maple::rootObjectFunc &func) { + // primitive and primitive array classes + func((maple::address_t)__mrt_pclasses_V[0]); // void + // predefined primitive types has a dimension <= 3 + for (int dim = 0; dim <= 3; ++dim) { + func((maple::address_t)__mrt_pclasses_Z[dim]); // boolean + func((maple::address_t)__mrt_pclasses_B[dim]); // byte + func((maple::address_t)__mrt_pclasses_S[dim]); // short + func((maple::address_t)__mrt_pclasses_C[dim]); // char + func((maple::address_t)__mrt_pclasses_I[dim]); // int + func((maple::address_t)__mrt_pclasses_F[dim]); // float + func((maple::address_t)__mrt_pclasses_D[dim]); // double + func((maple::address_t)__mrt_pclasses_J[dim]); // long + } +} + +MClass *ClassLoaderImpl::DoCreateArrayClass(MClass &klass, MClass &componentClass, const std::string &name) { + MRTSetMetadataShadow(reinterpret_cast(&klass), WellKnown::GetMClassClass()); + klass.SetMonitor(0); + klass.SetClIndex(componentClass.GetClIndex()); + klass.SetObjectSize(sizeof(reffield_t)); // here should all be object classes + +#ifdef USE_32BIT_REF + klass.SetFlag(FLAG_CLASS_ARRAY); + klass.SetNumOfSuperClasses(0); +#endif // USE_32BIT_REF + + ClassMetadataRO *classMetadataRo = reinterpret_cast( + reinterpret_cast(&klass) + sizeof(ClassMetadata)); + klass.SetClassMetaRoData(reinterpret_cast(classMetadataRo)); + klass.SetItable(0); + klass.SetVtable(VTAB_OBJECT); + klass.SetGctib(reinterpret_cast(GCTIB_OBJECT_ARRAY)); + classMetadataRo->className.SetRef(name.c_str()); + classMetadataRo->fields.SetDataRef(nullptr); + classMetadataRo->methods.SetDataRef(nullptr); + classMetadataRo->componentClass.SetDataRef(&componentClass); + uint32_t modifiers = componentClass.GetArrayModifiers(); + classMetadataRo->numOfFields = 0; + classMetadataRo->numOfMethods = 0; +#ifndef USE_32BIT_REF + classMetadataRo->flag = FLAG_CLASS_ARRAY; + classMetadataRo->numOfSuperclasses = 0; + classMetadataRo->padding = 0; +#endif // !USE_32BIT_REF + classMetadataRo->mod = modifiers; + classMetadataRo->annotation.SetDataRef(nullptr); + classMetadataRo->clinitAddr.SetDataRef(nullptr); + // set this class as initialized with a readable address *klass* + klass.SetInitStateRawValue(reinterpret_cast(&klass)); + return &klass; +} + +// Only create this array class, don't recursively create missing ones +MClass *ClassLoaderImpl::CreateArrayClass(const std::string &mplClassName, MClass &componentClass) { + if (mplClassName.empty()) { + CL_LOG(ERROR) << "failed, mplClassName is null." << maple::endl; + return nullptr; + } + MClass *klass = reinterpret_cast(MRT_AllocFromMeta(sizeof(ClassMetadata) + sizeof(ClassMetadataRO), + kClassMetaData)); + const std::string *allocName; + // must alloc head obj, otherwise mplClassName will be free in advance + allocName = new (std::nothrow) std::string(mplClassName); + if (allocName == nullptr) { + LOG(FATAL) << "ClassLoaderImpl::CreateArrayClass: new string failed" << maple::endl; + } + return DoCreateArrayClass(*klass, componentClass, *allocName); +} + +#ifdef __cplusplus +extern "C" { +#endif +// MRT API Interfaces Begin +bool MRT_IsClassInitialized(jclass klass) { + return reinterpret_cast(klass)->GetClIndex() != static_cast(-1); +} + +jobject MRT_GetNativeContexClassLoader() { + jclass contextCls = MRT_GetNativeContexClass(); + if (contextCls != nullptr) { + return MRT_GetClassLoader(contextCls); + } + return nullptr; +} + +jclass MRT_GetNativeContexClass() { + UnwindContext context; + UnwindContext &lastContext = maplert::TLMutator().GetLastJavaContext(); + (void)MapleStack::GetLastJavaContext(context, lastContext, 0); + jclass clazz = nullptr; + if (!TryGetNativeContexClassLoaderForInterp(context, clazz)) { + if (context.IsCompiledContext()) { + clazz = context.frame.GetDeclaringClass(); + } + } + return clazz; +} + +jobject MRT_GetClassLoader(jclass klass) { + if (klass == nullptr) { + CL_LOG(ERROR) << "failed, class object is null!" << maple::endl; + return nullptr; + } + MObject *classLoader = LoaderAPI::As().GetClassCL(reinterpret_cast(klass)); + if (classLoader == nullptr) { + CL_DLOG(classloader) << "failed, classLoader returns null" << maple::endl; + } + return reinterpret_cast(classLoader); +} + +jobject MRT_ReflectGetClassLoader(jobject jobj) +__attribute__ ((alias ("MCC_GetCurrentClassLoader"))); + +jobject MCC_GetCurrentClassLoader(jobject caller) { + MClass *callerObj = reinterpret_cast(caller); + MClass *callerClass = callerObj->GetClass(); + // when the caller function is a static method, the caller itself is the classInfo + if (callerClass == WellKnown::GetMClassClass()) { + callerClass = callerObj; + } + jobject classLoader = MRT_GetClassLoader(reinterpret_cast(callerClass)); + if (classLoader != nullptr) { + RC_LOCAL_INC_REF(classLoader); + } + return classLoader; +} + +jobject MRT_GetBootClassLoader() { + // nullptr represents BootClassLoader in lower implementation. + // For upper layer, always return BootClassLoader instance, but not nullptr. + return LoaderAPI::As().GetBootClassLoaderInstance(); +} + +// Get class in specific classloader. +// If classLoader is null, it means finding class in bootclassloader. +jclass MRT_GetClassByClassLoader(jobject classLoader, const std::string className) { + bool isDelegateLast = false; + MObject *mapleCl = reinterpret_cast(classLoader); + if (classLoader != nullptr) { + MClass *classLoaderClass = mapleCl->GetClass(); + isDelegateLast = classLoaderClass == WellKnown::GetMClassDelegateLastClassLoader(); + if (isDelegateLast) { + CL_VLOG(classloader) << "name:" << className << ", clname:" << classLoaderClass->GetName() << maple::endl; + } + } + jclass klass = LoaderAPI::Instance().FindClass(className, SearchFilter(classLoader, false, true, isDelegateLast)); + if (klass != nullptr) { + (void)LinkerAPI::Instance().LinkClassLazily(klass); + } + return klass; +} + +// Get class by reference to class in context. +// If context class is null, it means finding class in bootclassloader. +jclass MRT_GetClassByContextClass(jclass contextClass, const std::string className) { + bool isInternalName = true; + size_t len = className.size(); + if (len == 0) { + return nullptr; + } + + // try cache first + uint32_t index = 0; + bool lockFail(false); + jclass cacheResult = CLCache::instance.GetCache(contextClass, className, index, lockFail); + if (cacheResult) { + return cacheResult; + } + + if (className[len - 1] == ';') { + isInternalName = false; + } + + jclass klass = nullptr; + if (contextClass == nullptr) { + klass = LoaderAPI::Instance().FindClass(className, SearchFilter(nullptr, isInternalName, true, false)); + } else { + klass = LoaderAPI::Instance().FindClass(className, + SearchFilter(MRT_GetClassLoader(contextClass), isInternalName, true, false)); + } + if (klass != nullptr) { + (void)LinkerAPI::Instance().LinkClassLazily(klass); + } + if (!lockFail) { + CLCache::instance.WriteCache(klass, contextClass, index); + } + return klass; +} + +// Get class by reference to class in context. +// If context object is null, it means finding class in bootclassloader. +jclass MRT_GetClassByContextObject(jobject obj, const std::string className) { + if (obj == nullptr) { + return MRT_GetClassByContextClass(nullptr, className);; + } + MObject *clsObj = reinterpret_cast(obj); + return MRT_GetClassByContextClass(reinterpret_cast(clsObj->GetClass()), className); +} +CLCache CLCache::instance; + +jclass MRT_GetClass(jclass caller, const std::string className) { + uint32_t index = 0; + bool lockFail(false); + jclass cacheResult = CLCache::instance.GetCache(caller, className, index, lockFail); + if (cacheResult != nullptr) { + return cacheResult; + } + + MClass *callerCls = reinterpret_cast(caller); + MClass *callerClass = callerCls->GetClass(); + // When the caller function is a static method, the caller itself is the classInfo + if (callerClass == WellKnown::GetMClassClass()) { + callerClass = callerCls; + } + jobject classLoader = MRT_GetClassLoader(reinterpret_cast(callerClass)); + jclass klass = LoaderAPI::Instance().FindClass(className, SearchFilter(classLoader, false, true, false)); + if (klass != nullptr) { + (void)LinkerAPI::Instance().LinkClassLazily(klass); + } else { + CL_LOG(ERROR) << "callerClass=" << callerClass->GetName() << ", lazy=" << callerClass->IsLazyBinding() << + ", classLoader=" << classLoader << ", className=" << className << maple::endl; + } + if (!lockFail) { + CLCache::instance.WriteCache(klass, caller, index); + } + return klass; +} + +// Notice: It's invoked by compiler generating routine. +jclass MCC_GetClass(jclass caller, const char *className) { + constexpr int eightBit = 256; + jclass klass = MRT_GetClass(caller, className); + if (UNLIKELY(klass == nullptr)) { + char msg[eightBit] = { 0 }; + + MClass *callerClass = reinterpret_cast(caller)->GetClass(); + // When the caller function is a static method, the caller itself is the classInfo + if (callerClass == WellKnown::GetMClassClass()) { + callerClass = reinterpret_cast(caller); + } + if (sprintf_s(msg, sizeof(msg), "No class found for %s, by %s", className, callerClass->GetName()) < 0) { + CL_LOG(ERROR) << "sprintf_s failed" << maple::endl; + } + MRT_ThrowNoClassDefFoundErrorUnw(msg); + return nullptr; + } + return klass; +} + +void MRT_RegisterDynamicClass(jobject classLoader, jclass klass) { + LoaderAPI::As().RegisterDynamicClass( + reinterpret_cast(classLoader), reinterpret_cast(klass)); +} + +void MRT_UnregisterDynamicClass(jobject classLoader, jclass klass) { + LoaderAPI::As().UnregisterDynamicClass( + reinterpret_cast(classLoader), reinterpret_cast(klass)); +} +#ifdef __cplusplus +} +#endif +} diff --git a/src/mrt/maplert/src/mrt_handlecommon.cpp b/src/mrt/maplert/src/mrt_handlecommon.cpp new file mode 100644 index 0000000000..36775e056e --- /dev/null +++ b/src/mrt/maplert/src/mrt_handlecommon.cpp @@ -0,0 +1,259 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mrt_handlecommon.h" +namespace maplert{ +MClass *GetDcClasingFromFrame() { + JavaFrame frame; + (void)MapleStack::GetLastJavaFrame(frame); + frame.ResolveMethodMetadata(); + MethodMetaBase *md = reinterpret_cast(const_cast(frame.GetMetadata())); + return md->GetDeclaringClass(); +} + +void ParseSignatrueType(char *descriptor, const char *&methodSig) { + char *tmpDescriptor = descriptor; + DCHECK(tmpDescriptor != nullptr) << "ParseSignatrueType::tmpDescriptor is nullptr" << maple::endl; + if (*methodSig != 'L' && *methodSig != '[') { + *descriptor = *methodSig; + } else { + if (*methodSig == '[') { + while (*methodSig == '[') { + *tmpDescriptor++ = *methodSig++; + } + } + if (*methodSig != 'L') { + *tmpDescriptor = *methodSig; + } else { + while (*methodSig != ';') { + *tmpDescriptor++ = *methodSig++; + } + *tmpDescriptor = ';'; + } + } +} + +string GeneIllegalArgumentExceptionString(const MClass *from, const MClass *to) { + string fromClassName, toClassName; + string str = "Expected receiver of type "; + if (to != nullptr) { + to->GetTypeName(toClassName); + str += toClassName; + } else { + str += "void"; + } + str += ", but got "; + if (from != nullptr) { + from->GetTypeName(fromClassName); + str += fromClassName; + } else { + str += "void"; + } + return str; +} + +string GeneClassCastExceptionString(const MClass *from, const MClass *to) { + string str, fromClassName, toClassName; + if (from != nullptr) { + from->GetTypeName(fromClassName); + str = fromClassName; + } else { + str = "void"; + } + str += " cannot be cast to "; + if (to != nullptr) { + to->GetTypeName(toClassName); + str += toClassName; + } else { + str += "void"; + } + return str; +} + +void FillArgsInfoNoCheck(const ArgsWrapper &args, const char *typesMark, uint32_t arrSize, + BaseArgValue ¶mArray, uint32_t begin) { + uint32_t paramNum = arrSize - 1; + for (uint32_t i = begin; i < paramNum; i++) { + switch (typesMark[i]) { + case 'C': + paramArray.AddInt32(static_cast(args.GetJint())); + break; + case 'S': + paramArray.AddInt32(static_cast(args.GetJint())); + break; + case 'B': + paramArray.AddInt32(static_cast(args.GetJint())); + break; + case 'Z': + paramArray.AddInt32(static_cast(args.GetJint())); + break; + case 'I': + paramArray.AddInt32(args.GetJint()); + break; + case 'D': + paramArray.AddDouble(args.GetJdouble()); + break; + case 'F': + paramArray.AddFloat(args.GetJfloat()); + break; + case 'J': + paramArray.AddInt64(args.GetJlong()); + break; + default: + paramArray.AddReference(reinterpret_cast(reinterpret_cast(args.GetObject()))); + break; + } + } +} + +void DoInvoke(const MethodMeta &method, jvalue &result, ArgValue ¶mArray) { + MClass *retType = method.GetReturnType(); + CHECK_E_V(retType == nullptr, "DoInvoke: reType is nullptr"); + uintptr_t addr = method.GetFuncAddress(); + char *mark = retType->GetName(); + __MRT_ASSERT(mark != nullptr, "DoInvoke: mark is null"); + uint32_t stackSize = paramArray.GetStackSize(); + uint32_t dregSize = paramArray.GetFRegSize(); + switch (*mark) { + case 'V': + RuntimeStub::SlowCallCompiledMethod(addr, paramArray.GetData(), stackSize, dregSize); + return; + case 'I': + result.i = RuntimeStub::SlowCallCompiledMethod(addr, paramArray.GetData(), stackSize, dregSize); + return; + case 'B': + result.b = RuntimeStub::SlowCallCompiledMethod(addr, paramArray.GetData(), stackSize, dregSize); + return; + case 'C': + result.c = RuntimeStub::SlowCallCompiledMethod(addr, paramArray.GetData(), stackSize, dregSize); + return; + case 'S': + result.s = RuntimeStub::SlowCallCompiledMethod(addr, paramArray.GetData(), stackSize, dregSize); + return; + case 'Z': + result.z = RuntimeStub::SlowCallCompiledMethod(addr, paramArray.GetData(), stackSize, dregSize); + return; + case 'D': + result.d = RuntimeStub::SlowCallCompiledMethod(addr, paramArray.GetData(), stackSize, dregSize); + return; + case 'F': + result.f = RuntimeStub::SlowCallCompiledMethod(addr, paramArray.GetData(), stackSize, dregSize); + return; + case 'J': + result.j = RuntimeStub::SlowCallCompiledMethod(addr, paramArray.GetData(), stackSize, dregSize); + return; + default: // L [ + result.l = RuntimeStub::SlowCallCompiledMethod(addr, paramArray.GetData(), stackSize, dregSize); + return; + } +} + +bool GetPrimShortType(const MClass *klass, char &type) { + if (klass == nullptr) { + LOG(FATAL) << "klass is nullptr." << maple::endl; + } + const char *descriptor = klass->GetName(); + if (!strcmp(descriptor, "Ljava/lang/Boolean;")) { + type = 'Z'; + return true; + } else if (!strcmp(descriptor, "Ljava/lang/Byte;")) { + type = 'B'; + return true; + } else if (!strcmp(descriptor, "Ljava/lang/Character;")) { + type = 'C'; + return true; + } else if (!strcmp(descriptor, "Ljava/lang/Short;")) { + type = 'S'; + return true; + } else if (!strcmp(descriptor, "Ljava/lang/Integer;")) { + type = 'I'; + return true; + } else if (!strcmp(descriptor, "Ljava/lang/Long;")) { + type = 'J'; + return true; + } else if (!strcmp(descriptor, "Ljava/lang/Float;")) { + type = 'F'; + return true; + } else if (!strcmp(descriptor, "Ljava/lang/Double;")) { + type = 'D'; + return true; + } else if (!strcmp(descriptor, "Ljava/lang/Void;")) { + type = 'V'; + return true; + } + return false; +} + +bool CheckPrimitiveCanBoxed(char shortType) { + switch (shortType) { // full through + case 'Z': + case 'B': + case 'C': + case 'S': + case 'I': + case 'F': + case 'D': + case 'J': + return true; + default: // void or not primitive + return false; + } +} + +bool GetPrimShortTypeAndValue(const MObject *o, char &type, jvalue &value, const MClass *fromType) { + const MClass *klass = (o == nullptr) ? fromType : o->GetClass(); + if (fromType->IsAbstract() && o == nullptr) { + return true; + } + FieldMeta *valuefield = klass->GetDeclaredField("value"); + if (valuefield == nullptr && klass != WellKnown::GetMClassObject()) { + return false; + } + if (o == nullptr) { + return true; + } + if (valuefield == nullptr) { + return false; + } + if (klass == WellKnown::GetMClassBoolean()) { + type = 'Z'; + value.z = MRT_LOAD_JBOOLEAN(o, valuefield->GetOffset()); + } else if (klass == WellKnown::GetMClassByte()) { + type = 'B'; + value.b = MRT_LOAD_JBYTE(o, valuefield->GetOffset()); + } else if (klass == WellKnown::GetMClassCharacter()) { + type = 'C'; + value.c = MRT_LOAD_JCHAR(o, valuefield->GetOffset()); + } else if (klass == WellKnown::GetMClassFloat()) { + type = 'F'; + value.f = MRT_LOAD_JFLOAT(o, valuefield->GetOffset()); + } else if (klass == WellKnown::GetMClassDouble()) { + type = 'D'; + value.d = MRT_LOAD_JDOUBLE(o, valuefield->GetOffset()); + } else if (klass == WellKnown::GetMClassInteger()) { + type = 'I'; + value.i = MRT_LOAD_JINT(o, valuefield->GetOffset()); + } else if (klass == WellKnown::GetMClassLong()) { + type = 'J'; + value.j = MRT_LOAD_JLONG(o, valuefield->GetOffset()); + } else if (klass == WellKnown::GetMClassShort()) { + type = 'S'; + value.s = MRT_LOAD_JSHORT(o, valuefield->GetOffset()); + } + if (type != '0') { + return true; + } + return false; +} +} diff --git a/src/mrt/maplert/src/mrt_handleutil.cpp b/src/mrt/maplert/src/mrt_handleutil.cpp new file mode 100644 index 0000000000..ba471b50c9 --- /dev/null +++ b/src/mrt/maplert/src/mrt_handleutil.cpp @@ -0,0 +1,490 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mrt_handleutil.h" +namespace maplert { +class Converter{ + public: + Converter(ConvertJStruct ¶ms, const MethodHandle &mh, const MClass *fromCls, const MClass *toCls, bool decFlag) + : from(fromCls), to(toCls), param(params), handle(mh), needDec(decFlag) { + toShortType = to ? *to->GetName() : 'V'; + fromShortType = *from->GetName(); + fromIsPrim = (from != nullptr) ? from->IsPrimitiveClass() : true; + toIsPrim = (to != nullptr) ? to->IsPrimitiveClass() : true; + } + ~Converter() = default; + + bool Convert() noexcept { + if (!toIsPrim && (from == WellKnown::GetMClassVoid() || from == WellKnown::GetMClassV())) { + return true; + } + jvalue kSrcValue(*(param.value)); + (*(param.value)).j = 0; + if (fromIsPrim && toIsPrim) { + if (UNLIKELY(!primitiveutil::ConvertNarrowToWide(fromShortType, toShortType, kSrcValue, *(param.value)))) { + ThrowWMT(); + return false; + } + return true; + } else if (!fromIsPrim && !toIsPrim) { + if (kSrcValue.l) { + from = reinterpret_cast(kSrcValue.l)->GetClass(); + } + param.value->l = kSrcValue.l; + if (UNLIKELY(!MRT_ReflectClassIsAssignableFrom(*to, *from))) { + MRT_ThrowNewException("java/lang/ClassCastException", GeneClassCastExceptionString(from, to).c_str()); + return false; + } + return true; + } else if (!toIsPrim) { + return ConvertPrimToObj(kSrcValue); + } else { + return ConvertObjToPrim(kSrcValue); + } + } + private: + bool ConvertObjToPrim(const jvalue &kSrcValue) noexcept { + char unboxedType = '0'; + jvalue unboxedVal; + unboxedVal.j = 0; + if (UNLIKELY(!GetPrimShortTypeAndValue(reinterpret_cast(kSrcValue.l), unboxedType, unboxedVal, from))) { + ThrowWMT(); + return false; + } + + if (UNLIKELY(kSrcValue.l == nullptr)) { + MRT_ThrowNewException("java/lang/NullPointerException", ""); + return false; + } + if (needDec) { + RC_LOCAL_DEC_REF(kSrcValue.l); + } + + if (UNLIKELY(!primitiveutil::ConvertNarrowToWide(unboxedType, toShortType, unboxedVal, *(param.value)))) { + if (from == WellKnown::GetMClassNumber()) { // boxed to type must be assignablefrom number + MRT_ThrowNewException("java/lang/ClassCastException", GeneClassCastExceptionString(from, to).c_str()); + } else { + ThrowWMT(); + } + return false; + } + return true; + } + + bool ConvertPrimToObj(const jvalue &kSrcValue) noexcept { + char type; + if (!GetPrimShortType(to, type)) { + // to_type unboxed failed, so if from_type can be boxed(is prim type), + // it could be converted if to_type is Number or Object + if (CheckPrimitiveCanBoxed(fromShortType) && (to == WellKnown::GetMClassNumber() || + to == WellKnown::GetMClassObject())) { + type = fromShortType; + } else { + ThrowWMT(); + return false; + } + } else if (UNLIKELY(fromShortType != type)) { + ThrowWMT(); + return false; + } + + if (UNLIKELY(!primitiveutil::ConvertNarrowToWide(fromShortType, type, kSrcValue, *(param.value)))) { + ThrowWMT(); + return false; + } + jobject boxed = reinterpret_cast(primitiveutil::BoxPrimitive(type, kSrcValue)); + param.value->l = boxed; + return true; + } + + void ThrowWMT() const noexcept { + string exceptionStr = GeneExceptionString(handle, param.parameterTypes, param.arraySize); + MRT_ThrowNewException("java/lang/invoke/WrongMethodTypeException", exceptionStr.c_str()); + } + + const MClass *from; + const MClass *to; + ConvertJStruct ¶m; + const MethodHandle &handle; + bool needDec; + char toShortType; + char fromShortType; + bool fromIsPrim; + bool toIsPrim; +}; + +bool ConvertJvalue(ConvertJStruct ¶m, const MethodHandle &handle, + const MClass *from, const MClass *to, bool needDec) { + Converter converter(param, handle, from, to, needDec); + return converter.Convert(); +} + +bool IllegalArgumentCheck(const ArgsWrapper &args, MClass *from, MClass *to, jvalue &value) { + jobject obj = *args.GetObject(); + value.l = obj; + if (from != to) { + if (obj != nullptr) { + from = reinterpret_cast(reinterpret_cast(obj))->GetClass(); + } + if (UNLIKELY(!MRT_ReflectClassIsAssignableFrom(*to, *from))) { + MRT_ThrowNewException("java/lang/IllegalArgumentException", GeneIllegalArgumentExceptionString(from, to).c_str()); + return false; + } + } + return true; +} + +bool DirectCallParamsConvert(jvalue &val, CallParam ¶m, const MethodHandle &mh, + const MClass *from, const MClass *to) { + ConvertJStruct convertJStruct = { param.paramTypes, param.arraySize, &val }; + if (to != from && !ConvertJvalue(convertJStruct, mh, from, to)) { + return false; + } + return true; +} + +#define VALUECONVERT(statement) \ + STATEMENTCHECK(statement) \ + if (to == WellKnown::GetMClassD()) { \ + paramArray.AddDouble(curValue.d); \ + } else if (to == WellKnown::GetMClassF()) { \ + paramArray.AddFloat(curValue.f); \ + } else if (to == WellKnown::GetMClassJ()) { \ + paramArray.AddInt64(curValue.j); \ + } else if (to == WellKnown::GetMClassI() || \ + to == WellKnown::GetMClassZ() || \ + to == WellKnown::GetMClassB() || \ + to == WellKnown::GetMClassS() || \ + to == WellKnown::GetMClassC()) { \ + paramArray.AddInt32(curValue.i); \ + } else { \ + paramArray.AddReference(reinterpret_cast(reinterpret_cast(curValue.l)));\ + } \ + break; + +// we put converted jobject to SLR, and will be destructed in caller +bool ConvertParams(CallParam ¶m, const MethodHandle &mh, + const ArgsWrapper &args, BaseArgValue ¶mArray, ScopedHandles&) { + MethodMeta *method = mh.GetMethodMeta(); + vector ptypesVal = mh.GetMethodTypeMplObj()->GetParamsType(); + uint32_t size = param.arraySize - 1; + for (uint32_t i = param.beginIndex; i < size; ++i) { + MClass *to = ptypesVal[i]; + MClass *from = param.paramTypes[i]; + jvalue curValue; + curValue.j = 0; + switch (param.typesMark[i]) { + case 'B': + curValue.b = static_cast(args.GetJint()); + VALUECONVERT(!DirectCallParamsConvert(curValue, param, mh, from, to)) + case 'C': + curValue.c = static_cast(args.GetJint()); + VALUECONVERT(!DirectCallParamsConvert(curValue, param, mh, from, to)) + case 'S': + curValue.s = static_cast(args.GetJint()); + VALUECONVERT(!DirectCallParamsConvert(curValue, param, mh, from, to)) + case 'Z': + curValue.z = static_cast(args.GetJint()); + VALUECONVERT(!DirectCallParamsConvert(curValue, param, mh, from, to)) + case 'I': + curValue.i = args.GetJint(); + VALUECONVERT(!DirectCallParamsConvert(curValue, param, mh, from, to)) + case 'D': + curValue.d = args.GetJdouble(); + VALUECONVERT(!DirectCallParamsConvert(curValue, param, mh, from, to)) + case 'F': + curValue.f = args.GetJfloat(); + VALUECONVERT(!DirectCallParamsConvert(curValue, param, mh, from, to)) + case 'J': + curValue.j = args.GetJlong(); + VALUECONVERT(!DirectCallParamsConvert(curValue, param, mh, from, to)) + default: { + // maple throw IllegalArgumentException when this obj convert fail + if (i == 0 && !method->IsStatic() && from != to) { + VALUECONVERT(!IllegalArgumentCheck(args, from, to, curValue)) + } else { + curValue.l = *args.GetObject(); + VALUECONVERT(!DirectCallParamsConvert(curValue, param, mh, from, to)) + } + } + } + if (from->IsPrimitiveClass() && !to->IsPrimitiveClass()) { + ObjHandle keepAlive(curValue.l); + } + } + return true; +} + +string GeneExceptionString(const MethodHandle &mh, MClass **parameterTypes, uint32_t arraySize) { + string str = "Expected "; + string className; + str += mh.ValidTypeToString(); + str += " but was ("; + uint32_t paraNum = arraySize - 1; // Remove the return value. + for (uint32_t i = 0; i < paraNum; ++i) { + if (parameterTypes[i] != nullptr) { + className.clear(); + parameterTypes[i]->GetTypeName(className); + str += className; + } else { + str += "void"; + } + if (i != paraNum - 1) { + str += ", "; + } + } + str += ")"; + if (parameterTypes[arraySize - 1] != nullptr) { + className.clear(); + parameterTypes[arraySize - 1]->GetTypeName(className); + str += className; + } else { + str += "void"; + } + return str; +} + +bool ConvertReturnValue(const MethodHandle &mh, MClass **parameterTypes, uint32_t arraySize, jvalue &value) { + const MClass *from = mh.GetMethodTypeMplObj()->GetReTType(); + const MClass *to = parameterTypes[arraySize - 1]; + if (from == to) { + return true; + } + if (to == WellKnown::GetMClassV()) { + if (!from->IsPrimitiveClass()) { + RC_LOCAL_DEC_REF(value.l); + } + return true; + } + if (from == WellKnown::GetMClassVoid() || from == WellKnown::GetMClassV()) { + value.j = 0UL; + return true; + } + ConvertJStruct convertJStruct = { parameterTypes, arraySize, &value }; + bool convertResult = ConvertJvalue(convertJStruct, mh, from, to, true); + if (!convertResult) { + RC_LOCAL_DEC_REF(value.l); + } + return convertResult; +} + +void IsConvertibleOrThrow(uint32_t arraySize, const MethodHandle &mh, MClass **parameterTypes, uint32_t paramNum) { + if (!IsConvertible(mh, arraySize)) { + if (paramNum == 1 && mh.GetHandleKind() != kStaticCall) { + MClass *from = parameterTypes[0]; + const vector ¶mVec = mh.GetMethodTypeMplObj()->GetParamsType(); + MClass *to = paramVec.size() > 0 ? paramVec[0] : nullptr; + string msg = GeneIllegalArgumentExceptionString(to, from); + MRT_ThrowNewExceptionUnw("java/lang/IllegalArgumentException", msg.c_str()); + } else { + string exceptionStr = GeneExceptionString(mh, parameterTypes, arraySize); + MRT_ThrowNewExceptionUnw("java/lang/invoke/WrongMethodTypeException", exceptionStr.c_str()); + } + } +} + +bool ClinitCheck(const MClass *decCls) { + if (UNLIKELY(!MRT_ReflectIsInit(*decCls))) { + if (UNLIKELY(MRT_TryInitClass(*decCls) == ClassInitState::kClassInitFailed)) { + LOG(ERROR) << "MRT_TryInitClass return fail" << maple::endl; + } + if (UNLIKELY(MRT_HasPendingException())) { + return false; + } + } + return true; +} + +static MClass *GetClassFromDescriptorAndCalSize(const char *descriptor, uint32_t &refNum, uint32_t &byteArrSize, + char *typesMark, const MClass *callerCls) { + DCHECK(typesMark != nullptr) << "typesMark is nullptr" << maple::endl; + DCHECK(descriptor != nullptr) << "GetClassFromDescriptorAndCalSize::descriptor is nullptr" << maple::endl; + switch (*descriptor) { + case 'I': + byteArrSize += sizeof(int); + *typesMark = 'I'; + return WellKnown::GetMClassI(); + case 'B': + byteArrSize += sizeof(int); + *typesMark = 'B'; + return WellKnown::GetMClassB(); + case 'F': + byteArrSize += sizeof(int); + *typesMark = 'F'; + return WellKnown::GetMClassF(); + case 'C': + byteArrSize += sizeof(int); + *typesMark = 'C'; + return WellKnown::GetMClassC(); + case 'S': + byteArrSize += sizeof(int); + *typesMark = 'S'; + return WellKnown::GetMClassS(); + case 'J': + byteArrSize += sizeof(double); + *typesMark = 'J'; + return WellKnown::GetMClassJ(); + case 'D': + byteArrSize += sizeof(double); + *typesMark = 'D'; + return WellKnown::GetMClassD(); + case 'Z': + byteArrSize += sizeof(int); + *typesMark = 'Z'; + return WellKnown::GetMClassZ(); + case 'V': + byteArrSize += sizeof(int); + *typesMark = 'V'; + return WellKnown::GetMClassV(); + default: + *typesMark = 'L'; + ++refNum; + return MClass::JniCast(MRT_GetClassByContextClass(*callerCls, descriptor)); + } +} + +// the last element is return type +void GetParameterAndRtType(MClass **types, char *typesMark, const MString *protoStr, + SizeInfo &sz, const MClass *declareClass) { + DCHECK(protoStr != nullptr); + const char *methodSig = reinterpret_cast(protoStr->GetContentsPtr()); + uint32_t count = protoStr->GetLength(); + + int idx = 0; + DCHECK(methodSig != nullptr) << "GetParameterAndRtTypeA::methodSig is nullptr" << maple::endl; + ++methodSig; + size_t len = count + 1; + char *descriptor = reinterpret_cast(calloc(len, 1)); + if (UNLIKELY(descriptor == nullptr)) { + return; + } + DCHECK(types != nullptr) << "rtTypeParam.types is nullptr" << maple::endl; + while (*methodSig != ')') { + if (memset_s(descriptor, len, 0, len) != EOK) { + LOG(FATAL) << "memset_s fail." << maple::endl; + } + ParseSignatrueType(descriptor, methodSig); + MClass *ptype = GetClassFromDescriptorAndCalSize(descriptor, sz.refNum, sz.byteArrSize, + typesMark + idx, declareClass); + *(types + idx) = ptype; + ++idx; + ++methodSig; + } + ++methodSig; + if (memset_s(descriptor, len, 0, len) != EOK) { + LOG(FATAL) << "memset_s fail." << maple::endl; + } + ParseSignatrueType(descriptor, methodSig); + MClass *rtType = GetClassFromDescriptorAndCalSize(descriptor, sz.refNum, sz.byteArrSize, + typesMark + idx, declareClass); + *(types + idx) = rtType; + free(descriptor); +} + +static bool TransformEnter(MethodHandle &mh, SizeInfo &sz, const TypeInfo &typeInfo, jvalue &result) { + vector handleTypes = mh.GetMethodTypeMplObj()->GetParamsType(); + Kind handleKind = mh.GetHandleKind(); + // invoke transform , marshal EmStackFrame and invoke transform method + if (handleKind == kTransformCall || handleKind == kCallSiteTransformCall) { + if (handleKind != kCallSiteTransformCall) { + sz.byteArrSize = 0; + sz.refNum = 0; + CalcFrameSize(mh.GetMethodTypeMplObj()->GetReTType(), handleTypes, sz.byteArrSize, sz.refNum); + } + if (!mh.InvokeTransform(sz, typeInfo.typesMark, typeInfo.types, result)) { + MRT_CheckThrowPendingExceptionUnw(); + } + return true; + } + return false; +} + +MClass *GetContextCls(Arg &arg, MString *&calleeName) { + MClass *callerClass = nullptr; + constexpr int kNewABIFlag = 1; + if (reinterpret_cast(calleeName) & kNewABIFlag) { + calleeName = reinterpret_cast(reinterpret_cast(calleeName) - kNewABIFlag); + MClass *callerCls = reinterpret_cast(arg.GetObject()); + if (callerCls != nullptr) { + callerClass = callerCls->GetClass(); + } + if (callerClass == WellKnown::GetMClassClass()) { + callerClass = callerCls; + } + } + return callerClass; +} + +#if defined(__arm__) +int64_t PolymorphicCallEnter32(int32_t *args) { +#if defined(__ARM_PCS_VFP) + Arm32Arg argsHandle(args); +#elif defined(__ARM_PCS) + Arm32SoftFPArg argsHandle(args); +#endif + MString *calleeStr = reinterpret_cast(argsHandle.GetObject()); + MString *protoString = reinterpret_cast(argsHandle.GetObject()); + uint32_t paramNum = static_cast(argsHandle.GetJint()); + MObject *mhObj = argsHandle.GetObject(); + MClass *callerClass = GetContextCls(argsHandle, calleeStr); + jvalue result = PolymorphicCallEnter(calleeStr, protoString, paramNum, mhObj, argsHandle, callerClass); + return result.j; +} +#endif + +jvalue PolymorphicCallEnter(const MString *calleeStr, const MString *protoString, + uint32_t paramNum, MObject *mhObj, Arg &args, const MClass *declareClass) { + if (mhObj == nullptr) { + MRT_ThrowNewExceptionUnw("java/lang/NullPointerException", ""); + } + SizeInfo sz(paramNum); + char typesMark[sz.arraySize]; + MClass *paramTypes[sz.arraySize]; + + GetParameterAndRtType(paramTypes, typesMark, protoString, sz, declareClass); + jvalue result; + result.j = 0; + MethodHandle mh = MethodHandle(mhObj, args); + MethodMeta &mplFieldOrMethodMeta = *mh.GetMethodMeta(); + bool isStaticMethod = mplFieldOrMethodMeta.IsStatic(); + bool isExactMatched = mh.ExactInvokeCheck(calleeStr, paramTypes, sz.arraySize); + if (IsCallerTransformerJStr(paramNum, protoString)) { // handle EmStackFrame invoke + if (!mh.InvokeWithEmStackFrame(args.GetObject(), result)) { + MRT_CheckThrowPendingExceptionUnw(); + } + return result; + } + if (IsFieldAccess(mh.GetHandleKind())) { + if (!mh.FieldAccess(paramTypes, !isExactMatched, sz.arraySize, result)) { + MRT_CheckThrowPendingExceptionUnw(); + } + return result; + } + TypeInfo typeInfo = { paramTypes, typesMark }; + if (TransformEnter(mh, sz, typeInfo, result) || !ClinitCheck(mplFieldOrMethodMeta.GetDeclaringClass())) { + return result; + } + if (!isExactMatched) { // isExactmatch check, if not we continue check IsConvertible + IsConvertibleOrThrow(sz.arraySize, mh, paramTypes, paramNum); + } + CallParam dCP = { paramNum, isExactMatched, paramTypes, typesMark, isStaticMethod, 0, sz.arraySize, 0 }; + if (!mh.NoParamFastCall(dCP, result)) { + mh.DirectCall(dCP, result); + } + if (UNLIKELY(MRT_HasPendingException() || !ConvertReturnValue(mh, paramTypes, sz.arraySize, result))) { + MRT_CheckThrowPendingExceptionUnw(); + } + return result; +} +} // namespace maple diff --git a/src/mrt/maplert/src/mrt_linker.cpp b/src/mrt/maplert/src/mrt_linker.cpp new file mode 100644 index 0000000000..305d71a001 --- /dev/null +++ b/src/mrt/maplert/src/mrt_linker.cpp @@ -0,0 +1,72 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mrt_linker.h" +#include "mrt_linker_api.h" + +namespace maplert { +LinkerAPI *LinkerAPI::pInstance = nullptr; +LinkerAPI &LinkerAPI::Instance() { + if (pInstance == nullptr) { + pInstance = new (std::nothrow) MplLinkerImpl(); + if (pInstance == nullptr) { + LINKER_LOG(FATAL) << "new MplLinkerImpl failed" << maple::endl; + } + } + return *pInstance; +} +MplLinkerImpl::MplLinkerImpl() + : linkerFeature(*this), + hotfixFeature(*this), + methodBuilderFeature(*this), +#ifdef LINKER_DECOUPLE + decoupleFeature(*this, methodBuilderFeature), +#endif + lazyBindingFeature(*this, methodBuilderFeature), +#ifdef LINKER_RT_CACHE + linkerCacheFeature(*this), +#endif // LINKER_RT_CACHE + debugFeature(*this) { + features[kFLinker] = &linkerFeature; + features[kFHotfix] = &hotfixFeature; + features[kFDebug] = &debugFeature; + features[kFMethodBuilder] = &methodBuilderFeature; +#ifdef LINKER_DECOUPLE + features[kFDecouple] = &decoupleFeature; +#endif + features[kFLazyBinding] = &lazyBindingFeature; +#ifdef LINKER_RT_CACHE + features[kFLinkerCache] = &linkerCacheFeature; +#endif // LINKER_RT_CACHE +} +#ifdef __cplusplus +extern "C" { +#endif +bool MRT_LinkerIsJavaText(const void *addr) { + return LinkerAPI::Instance().IsJavaText(addr); +} +void *MRT_LinkerGetSymbolAddr(void *handle, const char *symbol, bool isFunction) { + return LinkerAPI::Instance().GetSymbolAddr(handle, symbol, isFunction); +} +void MRT_LinkerSetCachePath(const char *path) { +#ifdef LINKER_RT_CACHE + LinkerAPI::Instance().SetCachePath(path); +#else + (void)path; +#endif // LINKER_RT_CACHE +} +#ifdef __cplusplus +} +#endif +} // namespace maplert diff --git a/src/mrt/maplert/src/mrt_methodhandle.cpp b/src/mrt/maplert/src/mrt_methodhandle.cpp new file mode 100644 index 0000000000..70778c252c --- /dev/null +++ b/src/mrt/maplert/src/mrt_methodhandle.cpp @@ -0,0 +1,950 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mrt_methodhandle.h" +#include "mrt_handleutil.h" +#include "mstring_inline.h" + +namespace maplert { +constexpr char MethodHandle::kInvokeMtd[]; +constexpr char MethodHandle::kInvokeExactMtd[]; + +MethodMeta *RefineTargetMethod(Kind handleKind, MethodMeta *method, const MClass *referClass) { + MClass *decClass = method->GetDeclaringClass(); + if (handleKind == kSuperCall) { + if (decClass == referClass) { + return method; + } else { + if (!MRT_ReflectClassIsInterface(*decClass)) { + CHECK_E_P(referClass == nullptr, "RefineTargetMethod: referClass is nullptr."); + MClass *superClass = referClass->GetSuperClass(); + if (superClass != nullptr) { + return superClass->GetDeclaredMethod(method->GetName(), method->GetSignature()); + } + } + } + } else if (handleKind == kVirtualCall || handleKind == kInterfaceCall) { + if (decClass != referClass && referClass != nullptr) { + MethodMeta *realMthd = referClass->GetMethod(method->GetName(), method->GetSignature()); + if (realMthd == nullptr) { + string msg = GeneIllegalArgumentExceptionString(referClass, decClass); + MRT_ThrowNewException("java/lang/IllegalArgumentException", msg.c_str()); + return method; + } + return realMthd; + } + } else if (handleKind == kDirectCall && method->IsConstructor() && decClass == WellKnown::GetMClassString()) { + if (StringToStringFactoryMap.size() == 0) { + MClass *strFactory = + MClass::JniCast(MRT_GetClassByContextClass(NULL, "Ljava_2Flang_2FStringFactory_3B")); + CHECK_E_P(strFactory == nullptr, "MRT_GetClassByContextClass return nullptr."); + std::vector methodsVec; + reinterpret_cast(strFactory)->GetDeclaredMethods(methodsVec, false); + std::lock_guard guard(mtx); + std::for_each(methodsVec.begin(), methodsVec.end(), [](MethodMeta *mthd) { + if (stringInitMethodSet.find(mthd->GetName()) != stringInitMethodSet.end()) { + StringToStringFactoryMap[mthd->GetSignature()] = mthd; + } + }); + } + string originSig = method->GetSignature(); + originSig.replace(originSig.size() - 1, 1, "Ljava/lang/String;"); + MethodMeta *realMthd = StringToStringFactoryMap[originSig]; + return realMthd; + } + return method; +} + +std::string MethodType::ToString() const noexcept { + string s = "("; + string className; + size_t size = paramsType.size(); + for (size_t i = 0; i < size; ++i) { + if (paramsType[i] != nullptr) { + className.clear(); + reinterpret_cast(paramsType[i])->GetTypeName(className); + s += className; + } else { + s += "void"; + } + if (i != size - 1) { + s += ", "; + } + } + s += ")"; + if (returnType != nullptr) { + className.clear(); + reinterpret_cast(returnType)->GetTypeName(className); + s += className; + } else { + s += "void"; + } + return s; +} + +std::string MethodHandle::ValidTypeToString() const noexcept { + return validMethodTypeMplObjCache->ToString(); +} + +MethodMeta *MethodHandle::GetRealMethod(MObject *obj) const noexcept { + MethodMeta *realMethod = nullptr; + auto getRealMethod = std::bind(RefineTargetMethod, handleKind, + reinterpret_cast(mplFieldOrMethod), placeholders::_1); + if (handleKind == kSuperCall) { + realMethod = getRealMethod(GetMethodTypeMplObj()->GetParamsType()[0]); + } else { + bool isLiteralString = false; + if (obj != nullptr && obj->GetClass() == WellKnown::GetMClassString()) { + MString *jstr = reinterpret_cast(obj); + isLiteralString = jstr->IsLiteral(); + } + if (IS_HEAP_ADDR(reinterpret_cast(obj)) || isLiteralString) { + realMethod = getRealMethod(obj->GetClass()); + } else { + realMethod = getRealMethod(nullptr); + } + } + if (realMethod == nullptr) { + LOG(ERROR) << "GetRealMethod return null" << maple::endl; + } + return realMethod; +} + +char MethodHandle::GetReturnTypeMark(char markInProto) const noexcept { + char mark; + const MClass *retType = methodTypeMplObjCache->GetReTType(); + if (handleKind != kCallSiteTransformCall) { + mark = *(retType->GetName()); + } else { + mark = markInProto; + } + return mark; +} + +MObject *MethodHandle::CreateEmStackFrame(const MArray *bArray, const MObject *refArray, + MClass **types, uint32_t arraySize) const { + ScopedHandles sHandles; + ObjHandle callerType(MObject::NewObject(*WellKnown::GetMClassMethodType())); + if (callerType == 0) { + LOG(FATAL) << "CreateEmStackFrame: callerType is nullptr" << maple::endl; + } + callerType->StoreObjectNoRc(WellKnown::GetMFieldMethodHandlepRTypeOffset(), types[arraySize - 1]); + ObjHandle paramArray(MRT_NewObjArray(static_cast(arraySize) - 1, *WellKnown::GetMClassClass(), nullptr)); + for (uint32_t i = 0; i < arraySize - 1; ++i) { + paramArray->SetObjectElement(i, types[i]); + } + callerType()->StoreObject(WellKnown::GetMFieldMethodHandlePTypesOffset(), paramArray()); + + ObjHandle EmStackFrameObj(MObject::NewObject(*WellKnown::GetMClassEmulatedStackFrame())); + EmStackFrameObj->StoreObject(WellKnown::GetMFieldEmStackFrameCallsiteOffset(), callerType()); + + if (handleKind == kCallSiteTransformCall) { + EmStackFrameObj->StoreObject(WellKnown::GetMFieldEmStackFrameTypeOffset(), callerType()); + } else { + EmStackFrameObj->StoreObject(WellKnown::GetMFieldEmStackFrameTypeOffset(), GetMethodTypeJavaObj()); + } + EmStackFrameObj->StoreObject(WellKnown::GetMFieldEmStackFrameStackFrameOffset(), bArray); + EmStackFrameObj->StoreObject(WellKnown::GetMFieldEmStackFrameReferencesOffset(), refArray); + return EmStackFrameObj.ReturnObj(); +} + +bool MethodHandle::FieldSGet(MClass **parameterTypes, bool doConvert, uint32_t arraySize, jvalue &result) { + char shortFieldType = *(GetFieldMeta()->GetTypeName()); + jfieldID field = reinterpret_cast(GetFieldMeta()); + switch (shortFieldType) { + case 'Z': + result.z = MRT_ReflectGetFieldjboolean(field, nullptr); + break; + case 'B': + result.b = MRT_ReflectGetFieldjbyte(field, nullptr); + break; + case 'C': + result.c = MRT_ReflectGetFieldjchar(field, nullptr); + break; + case 'S': + result.s = MRT_ReflectGetFieldjshort(field, nullptr); + break; + case 'I': + result.i = MRT_ReflectGetFieldjint(field, nullptr); + break; + case 'J': + result.j = MRT_ReflectGetFieldjlong(field, nullptr); + break; + case 'F': + result.f = MRT_ReflectGetFieldjfloat(field, nullptr); + break; + case 'D': + result.d = MRT_ReflectGetFieldjdouble(field, nullptr); + break; + case 'V': + LOG(FATAL) << "Unreachable: " << shortFieldType; + break; + default: { // reference + result.l = MRT_ReflectGetFieldjobject(field, nullptr); + break; + } + } + if (doConvert && !ConvertReturnValue(*this, parameterTypes, arraySize, result)) { + return false; + } + return true; +} + +bool MethodHandle::FieldSPut(MClass **parameterTypes, bool doConvert, uint32_t arraySize) { + jvalue setVal; + setVal.j = 0; + constexpr size_t paramTypeIndex = 0; + GetValFromVargs(setVal, *parameterTypes[paramTypeIndex]->GetName()); + vector ptypesVal = MethodType(GetMethodTypeJavaObj()).GetParamsType(); + if (doConvert) { + MClass *from = parameterTypes[paramTypeIndex]; + MClass *to = ptypesVal[0]; + if (from != to) { + ConvertJStruct convertJStruct = { parameterTypes, arraySize, &setVal }; + STATEMENTCHECK(!ConvertJvalue(convertJStruct, *this, from, to)) + } + } + jfieldID field = reinterpret_cast(GetFieldMeta()); + switch (*(GetFieldMeta()->GetTypeName())) { + case 'Z': + MRT_ReflectSetFieldjboolean(field, nullptr, setVal.z); + break; + case 'B': + MRT_ReflectSetFieldjbyte(field, nullptr, setVal.b); + break; + case 'C': + MRT_ReflectSetFieldjchar(field, nullptr, setVal.c); + break; + case 'S': + MRT_ReflectSetFieldjshort(field, nullptr, setVal.s); + break; + case 'I': + MRT_ReflectSetFieldjint(field, nullptr, setVal.i); + break; + case 'J': + MRT_ReflectSetFieldjlong(field, nullptr, setVal.j); + break; + case 'F': + MRT_ReflectSetFieldjfloat(field, nullptr, setVal.f); + break; + case 'D': + MRT_ReflectSetFieldjdouble(field, nullptr, setVal.d); + break; + case 'V': + LOG(FATAL) << "Unreachable: " << *(GetFieldMeta()->GetTypeName()); + break; + default: { // reference + MRT_ReflectSetFieldjobject(field, nullptr, setVal.l); + break; + } + } + return true; +} + +bool MethodHandle::FieldPut(MClass **parameterTypes, bool doConvert, uint32_t arraySize) { + jobject obj = *args.GetObject(); + jvalue setVal; + setVal.j = 0; + constexpr size_t paramTypeIndex = 1; + GetValFromVargs(setVal, *parameterTypes[paramTypeIndex]->GetName()); + vector ptypesVal = MethodType(GetMethodTypeJavaObj()).GetParamsType(); + if (doConvert) { + MClass *from = parameterTypes[paramTypeIndex]; + MClass *to = ptypesVal[1]; + ConvertJStruct convertJStruct = { parameterTypes, arraySize, &setVal }; + STATEMENTCHECK(from != to && !ConvertJvalue(convertJStruct, *this, from, to)) + } + char shortFieldType = *(GetFieldMeta()->GetTypeName()); + jfieldID field = reinterpret_cast(GetFieldMeta()); + switch (shortFieldType) { + case 'Z': + MRT_ReflectSetFieldjboolean(field, obj, setVal.z); + break; + case 'B': + MRT_ReflectSetFieldjbyte(field, obj, setVal.b); + break; + case 'C': + MRT_ReflectSetFieldjchar(field, obj, setVal.c); + break; + case 'S': + MRT_ReflectSetFieldjshort(field, obj, setVal.s); + break; + case 'I': + MRT_ReflectSetFieldjint(field, obj, setVal.i); + break; + case 'J': + MRT_ReflectSetFieldjlong(field, obj, setVal.j); + break; + case 'F': + MRT_ReflectSetFieldjfloat(field, obj, setVal.f); + break; + case 'D': + MRT_ReflectSetFieldjdouble(field, obj, setVal.d); + break; + case 'V': + LOG(FATAL) << "Unreachable: " << shortFieldType; + break; + default: { // reference + MRT_ReflectSetFieldjobject(field, obj, setVal.l); + break; + } + } + return true; +} + +bool MethodHandle::FieldGet(MClass **parameterTypes, bool doConvert, uint32_t arraySize, jvalue &result) { + jobject obj = *args.GetObject(); + char shortFieldType = *(GetFieldMeta()->GetTypeName()); + jfieldID field = reinterpret_cast(GetFieldMeta()); + switch (shortFieldType) { + case 'Z': + result.z = MRT_ReflectGetFieldjbooleanUnsafe(field, obj); + break; + case 'B': + result.b = MRT_ReflectGetFieldjbyteUnsafe(field, obj); + break; + case 'C': + result.c = MRT_ReflectGetFieldjcharUnsafe(field, obj); + break; + case 'S': + result.s = MRT_ReflectGetFieldjshortUnsafe(field, obj); + break; + case 'I': + result.i = MRT_ReflectGetFieldjintUnsafe(field, obj); + break; + case 'J': + result.j = MRT_ReflectGetFieldjlongUnsafe(field, obj); + break; + case 'F': + result.f = MRT_ReflectGetFieldjfloatUnsafe(field, obj); + break; + case 'D': + result.d = MRT_ReflectGetFieldjdoubleUnsafe(field, obj); + break; + case 'V': + LOG(FATAL) << "Unreachable: " << shortFieldType; + break; + default: { // reference + result.l = MRT_ReflectGetFieldjobject(field, obj); + break; + } + } + STATEMENTCHECK(doConvert && !ConvertReturnValue(*this, parameterTypes, arraySize, result)) + return true; +} + +void MethodHandle::GetValFromVargs(jvalue &value, char shortFieldType) { + switch (shortFieldType) { + case 'Z': + value.z = static_cast(args.GetJint()); + break; + case 'B': + value.b = static_cast(args.GetJint()); + break; + case 'C': + value.c = static_cast(args.GetJint()); + break; + case 'S': + value.s = static_cast(args.GetJint()); + break; + case 'I': + value.i = args.GetJint(); + break; + case 'J': + value.j = args.GetJlong(); + break; + case 'F': + value.f = args.GetJfloat(); + break; + case 'D': + value.d = args.GetJdouble(); + break; + case 'V': + LOG(FATAL) << "Unreachable: " << shortFieldType; + break; + default: { // reference + value.l = *args.GetObject(); + break; + } + } +} + +bool MethodHandle::FieldAccess(MClass **paramTypes, bool doConvert, uint32_t arraySize, jvalue &result) { + switch (handleKind) { + case Kind::kInstanceGet: { + return FieldGet(paramTypes, doConvert, arraySize, result); + } + case Kind::kStaticGet: { + STATEMENTCHECK(!ClinitCheck(GetFieldMeta()->GetDeclaringclass())) + return FieldSGet(paramTypes, doConvert, arraySize, result); + } + case Kind::kInstancePut: { + return FieldPut(paramTypes, doConvert, arraySize); + } + case Kind::kStaticPut: { + STATEMENTCHECK(!ClinitCheck(GetFieldMeta()->GetDeclaringclass())) + return FieldSPut(paramTypes, doConvert, arraySize); + } + default: + LOG(FATAL) << "Unreachable: " << static_cast(handleKind); + BUILTIN_UNREACHABLE(); + } +} + +bool MethodHandle::IsExactMatch() const noexcept { // check for type and nominalType + vector ptypes = methodTypeMplObjCache->GetParamsType(); + vector ptypesNominal = validMethodTypeMplObjCache->GetParamsType(); + STATEMENTCHECK(ptypes.size() != ptypesNominal.size()) + STATEMENTCHECK(!std::equal(ptypes.begin(), ptypes.end(), ptypesNominal.begin())) + return true; +} + +bool MethodHandle::IsExactMatch(MClass **types, uint32_t arraySize, bool isNominal) const noexcept { + const MethodType *methodTypeMplObj = nullptr; + if (isNominal) { + methodTypeMplObj = GetValidMethodTypeMplObj(); + } else { + methodTypeMplObj = GetMethodTypeMplObj(); + } + vector ptypesVal = methodTypeMplObj->GetParamsType(); + size_t calleePtypeSize = arraySize - 1; // remove return type + STATEMENTCHECK(calleePtypeSize != ptypesVal.size()) + STATEMENTCHECK(!std::equal(ptypesVal.begin(), ptypesVal.end(), types)) + return methodTypeMplObj->GetReTType() == types[calleePtypeSize]; +} + +bool MethodHandle::ExactInvokeCheck(const MString *calleeName, MClass **parameterTypes, uint32_t arraySize) const { + DCHECK(calleeName != nullptr); + char *calleeNameStr = reinterpret_cast(calleeName->GetContentsPtr()); + if (!strcmp(calleeNameStr, kInvokeExactMtd)) { + constexpr bool isNominal = true; + if (GetNominalTypeJavaObj() != nullptr) { + if (!IsExactMatch(parameterTypes, arraySize, isNominal)) { + string exceptionStr = GeneExceptionString(*this, parameterTypes, arraySize); + MRT_ThrowNewExceptionUnw("java/lang/invoke/WrongMethodTypeException", exceptionStr.c_str()); + } + if (IsExactMatch() && IsExactMatch(parameterTypes, arraySize)) { + return true; + } + } else { + if (!IsExactMatch(parameterTypes, arraySize)) { + string exceptionStr = GeneExceptionString(*this, parameterTypes, arraySize); + MRT_ThrowNewExceptionUnw("java/lang/invoke/WrongMethodTypeException", exceptionStr.c_str()); + } + return true; + } + } + return false; +} + +bool MethodHandle::FillInvokeArgs(const ArgsWrapper &argsWrapper, CallParam ¶mStruct, + BaseArgValue ¶mArray, ScopedHandles &sHandles) const { + if (paramStruct.isExactMatched) { + FillArgsInfoNoCheck(argsWrapper, paramStruct.typesMark, paramStruct.arraySize, paramArray, paramStruct.beginIndex); + return true; + } + return ConvertParams(paramStruct, *this, argsWrapper, paramArray, sHandles); +} + +void MethodHandle::DirectCall(CallParam paramStruct, jvalue &result) { + ArgValue paramArray(0); + ArgValueInterp paramArrayInterp(0); + MethodMeta *method = GetMethodMeta(); + ArgsWrapper argsWrapper(args); + ScopedHandles sHandles; + + if (!paramStruct.isStaticMethod) { + // get first arg when arraySize >= 2 + if (paramStruct.arraySize >= 2) { + uint32_t argArraySize = paramStruct.arraySize; + paramStruct.arraySize = 2; // Only get receiver + if (!FillInvokeArgs(argsWrapper, paramStruct, paramArray, sHandles)) { + return; + } + paramArrayInterp.AddReference(paramArray.GetReceiver()); + paramStruct.arraySize = argArraySize; + paramStruct.beginIndex = 1; // Skip receiver + } + // GetReceiver will return nullptr if arraySize < 2 + method = GetRealMethod(paramArray.GetReceiver()); + } + CHECK_E_V(method == nullptr, "method is nullptr"); + + BaseArgValue *param = method->NeedsInterp() ? reinterpret_cast(¶mArrayInterp) : + reinterpret_cast(¶mArray); + // get remaining args. + if (!FillInvokeArgs(argsWrapper, paramStruct, *param, sHandles)) { + return; + } + + if (method->NeedsInterp()) { + if (method->IsStatic()) { + result = maplert::interpreter::InterpJavaMethod(method, + nullptr, reinterpret_cast(paramArrayInterp.GetData())); + } else { + result = maplert::interpreter::InterpJavaMethod(method, + paramArrayInterp.GetReceiver(), reinterpret_cast(paramArrayInterp.GetData() + 1)); + } + } else { + DoInvoke(*method, result, paramArray); + } +} + +bool MethodHandle::NoParamFastCall(CallParam ¶mStruct, jvalue &result) { + vector handleTypes = GetMethodTypeMplObj()->GetParamsType(); + MethodMeta *mplFieldOrMethodMeta = GetMethodMeta(); + uint32_t arraySize = paramStruct.paramNum + 1; + if (paramStruct.paramNum == 1 && !paramStruct.isStaticMethod) { + jvalue thisObj; + thisObj.l = *args.GetObject(); + // NPE can not be thrown without this check when isExactMatched is true + if (paramStruct.isExactMatched == true && thisObj.l == nullptr) { + MRT_ThrowNewException("java/lang/NullPointerException", ""); + } + MethodMeta *realMethod = nullptr; + ConvertJStruct convertJStruct = { paramStruct.paramTypes, arraySize, &thisObj }; + if (!paramStruct.isExactMatched && !handleTypes.empty() && + !ConvertJvalue(convertJStruct, *this, paramStruct.paramTypes[0], handleTypes[0])) { + MRT_CheckThrowPendingExceptionUnw(); + } + realMethod = reinterpret_cast(GetRealMethod(reinterpret_cast(thisObj.l))); + result = InvokeMethodNoParameter(reinterpret_cast(thisObj.l), *realMethod); + return true; + } else if (paramStruct.paramNum == 0 && paramStruct.isStaticMethod) { + result = InvokeMethodNoParameter(nullptr, *mplFieldOrMethodMeta); + return true; + } + return false; +} + +void EMSFWriter::WriteByMark(char mark, int8_t *cArray, uint32_t &byteIdx, const jvalue &val, bool isDec) { + switch (mark) { // full through + case 'Z': + case 'B': + case 'C': + case 'S': + case 'I': + WriteToEmStackFrame(cArray, byteIdx, val.i); + break; + case 'F': + WriteToEmStackFrame(cArray, byteIdx, val.f); + break; + case 'J': + WriteToEmStackFrame(cArray, byteIdx, val.j); + break; + case 'D': + WriteToEmStackFrame(cArray, byteIdx, val.d); + break; + default: + if (isDec) { + RC_LOCAL_DEC_REF(val.l); + } + LOG(ERROR) << "WriteByMark() don't match mark: " << mark; + } +} + +void DoCalculate(char name, uint32_t &byteArrSize, uint32_t &refNum) { + switch (name) { // full through + case 'I': + case 'B': + case 'F': + case 'C': + case 'S': + case 'Z': + case 'V': + byteArrSize += sizeof(int); + break; + case 'J': + case 'D': + byteArrSize += sizeof(double); + break; + default: + ++refNum; + break; + } +} + +jvalue MethodHandle::InvokeMethodNoParameter(MObject *obj, MethodMeta &mthd) { + if (mthd.NeedsInterp()) { + if ((obj == nullptr) && (!mthd.IsStatic())) { + LOG(ERROR) << "obj can not be nullptr!" << maple::endl; + if (MRT_HasPendingException()) { + MRT_CheckThrowPendingExceptionUnw(); + } + MRT_ThrowNullPointerExceptionUnw(); + } + return interpreter::InterpJavaMethod( + &mthd, obj, reinterpret_cast(nullptr)); + } else { + MClass *methodReturnType = mthd.GetReturnType(); + if (methodReturnType == WellKnown::GetMClassD() || methodReturnType == WellKnown::GetMClassF()) { + jvalue v; + v.d = RuntimeStub::FastCallCompiledMethod(mthd.GetFuncAddress(), obj); + return v; + } else { + return RuntimeStub::FastCallCompiledMethodJ(mthd.GetFuncAddress(), obj); + } + } +} + +bool MethodHandle::FillEmStackFrameArray(const char *typesMark, MClass **parameterTypes, + int8_t *cArray, const MArray *refArray) { + uint32_t refIdx = 0; + const vector &ptypesVal = methodTypeMplObjCache->GetParamsType(); + uint32_t returnTypeIdx = realParamNum; + for (uint32_t i = 0; i < returnTypeIdx; ++i) { + MClass *from = parameterTypes[i]; + jvalue val; + val.j = 0; + GetValFromVargs(val, *from->GetName()); + if (handleKind == kCallSiteTransformCall) { // write according by from type + if (typesMark[i] != 'L') { + EMSFWriter::WriteByMark(*parameterTypes[i]->GetName(), cArray, emByteIdx, val); + } else { + refArray->SetObjectElement(refIdx++, reinterpret_cast(val.l)); + } + continue; + } + + MClass *to = ptypesVal[i]; + ConvertJStruct convertJStruct = { parameterTypes, returnTypeIdx + 1, &val }; + if (from != to) { + STATEMENTCHECK(!ConvertJvalue(convertJStruct, *this, from, to)) + } + + if (MRT_ReflectClassIsPrimitive(*to)) { + EMSFWriter::WriteByMark(*to->GetName(), cArray, emByteIdx, val); + } else { + refArray->SetObjectElement(refIdx++, reinterpret_cast(val.l)); + } + + if (typesMark[i] != 'L' && !MRT_ReflectClassIsPrimitive(*to)) { + RC_LOCAL_DEC_REF(val.l); + } + } + return true; +} + +jvalue EMSFReader::GetRetValFromEmStackFrame(const MArray *refArray, uint32_t refNum, uint32_t idx, + const int8_t *cArray, char mark) { + jvalue retVal; + retVal.j = 0UL; + switch (mark) { + case 'L': + case '[': + retVal.l = reinterpret_cast(refArray->GetObjectElement(refNum - 1)); + break; + case 'J': + retVal.j = GetLongFromEmStackFrame(cArray, idx); + break; + case 'D': { + DoubleLongConvert u; + u.j = GetLongFromEmStackFrame(cArray, idx); + retVal.d = u.d; + break; + } + case 'F': { + FloatIntConvert u; + u.i = GetIntFromEmStackFrame(cArray, idx); + retVal.f = u.f; + break; + } + default: { // I B C S Z + retVal.i = GetIntFromEmStackFrame(cArray, idx); + break; + } + } + return retVal; +} + +void MethodHandle::InvokeTransformVirtualMethod(MObject *emStFrameObj, jvalue &retVal) const { + MethodMeta *transformMethod = GetHandleJClassType()->GetMethod("transform", "(Ldalvik/system/EmulatedStackFrame;)V"); + CHECK_E_V(transformMethod == nullptr, "Class.GetMethod return nullptr"); + ArgValue xregValue(0); + xregValue.AddReference(const_cast(GetHandleJavaObj())); + xregValue.AddReference(emStFrameObj); + if (transformMethod->NeedsInterp()) { + jvalue temp; + temp.l = reinterpret_cast(emStFrameObj); + retVal = interpreter::InterpJavaMethod(transformMethod, + const_cast(GetHandleJavaObj()), reinterpret_cast(&temp)); + } else { + RuntimeStub::SlowCallCompiledMethod(transformMethod->GetFuncAddress(), xregValue.GetData(), 0, 0); + } +} + +// transform invoke, marshal EmStackFrame and invoke transform +bool MethodHandle::InvokeTransform(const SizeInfo &sz, const char *typesMark, // 120 + MClass **parameterTypes, jvalue &retVal) { + if (handleKind != kCallSiteTransformCall && sz.arraySize - 1 != methodTypeMplObjCache->GetParamsType().size()) { + if (!MRT_HasPendingException()) { + string exceptionStr = GeneExceptionString(*this, parameterTypes, sz.arraySize); + MRT_ThrowNewException("java/lang/invoke/WrongMethodTypeException", exceptionStr.c_str()); + } + return false; + } + ScopedHandles sHandles; + ObjHandle byteArray( + MRT_NewPrimitiveArray(static_cast(sz.byteArrSize), maple::Primitive::kByte, sizeof(int8_t))); + CHECK_E_B(byteArray() == 0, "MRT_NewPrimitiveArray return nullptr"); + ObjHandle referenceArray( + MRT_NewObjArray(static_cast(sz.refNum), *WellKnown::GetMClassObject(), nullptr)); + CHECK_E_B(referenceArray() == 0, "MRT_NewObjArray return nullptr"); + int8_t *cArray = reinterpret_cast(byteArray.AsArray()->ConvertToCArray()); + realParamNum = sz.arraySize - 1; + if (!FillEmStackFrameArray(typesMark, parameterTypes, cArray, referenceArray.AsArray())) { + return false; + } + ObjHandle emStFrameObj( + CreateEmStackFrame(byteArray.AsArray(), referenceArray.AsObject(), parameterTypes, sz.arraySize)); + CHECK_E_B(emStFrameObj() == 0, "CreateEmStackFrame return nullptr"); + InvokeTransformVirtualMethod(emStFrameObj.AsObject(), retVal); + if (UNLIKELY(MRT_HasPendingException())) { + return false; + } + MClass *returnType = parameterTypes[sz.arraySize - 1]; + if (MRT_ReflectClassIsPrimitive(*returnType)) { + emByteIdx = (returnType == WellKnown::GetMClassD() || returnType == WellKnown::GetMClassJ()) ? + sz.byteArrSize - sizeof(jlong) : sz.byteArrSize - sizeof(int); + } + retVal = EMSFReader::GetRetValFromEmStackFrame( + referenceArray.AsArray(), sz.refNum, emByteIdx, cArray, GetReturnTypeMark(typesMark[sz.arraySize - 1])); + if (handleKind != kCallSiteTransformCall) { + return ConvertReturnValue(*this, parameterTypes, sz.arraySize, retVal); + } else { + return !MRT_HasPendingException(); + } +} + +bool MethodHandle::ParamsConvert(const MClass *from, const MClass *to, MClass **parameterTypes, + uint32_t arraySize, jvalue &internVal) const { + ConvertJStruct convertJStruct = { parameterTypes, arraySize, &internVal }; + STATEMENTCHECK(to != from && !ConvertJvalue(convertJStruct, *this, from, to)) + return true; +} + +static void GetReturnValue(MObject *emStFrameObj, jvalue &result) { + EmStackFrame emStackFrameMplObj = EmStackFrame(emStFrameObj); + MObject *callsiteObj = emStFrameObj->LoadObjectNoRc(WellKnown::GetMFieldEmStackFrameCallsiteOffset()); + MClass *retFromType = + reinterpret_cast(callsiteObj->LoadObjectNoRc(WellKnown::GetMFieldMethodHandlepRTypeOffset())); + int8_t *cArray = emStackFrameMplObj.GetStackFrameNativeBytes(); + uint32_t cArrayLen = emStackFrameMplObj.GetStackFrameNativeBytesCount(); + + int8_t constexpr doubleOrLongLength = 8; + int8_t constexpr intOrFloatLength = 4; + char mark = *(retFromType->GetName()); + uint32_t idx; + switch (mark) { + case 'L': + case '[': { + result.l = reinterpret_cast(emStackFrameMplObj.GetReferencesReturnObj()); + break; + } + case 'D': { + DoubleLongConvert u; + idx = cArrayLen - doubleOrLongLength; + u.j = EMSFReader::GetLongFromEmStackFrame(cArray, idx); + result.d = u.d; + break; + } + case 'F': { + FloatIntConvert u; + idx = cArrayLen - intOrFloatLength; + u.i = EMSFReader::GetIntFromEmStackFrame(cArray, idx); + result.f = u.f; + break; + } + case 'J': { + idx = cArrayLen - doubleOrLongLength; + result.j = EMSFReader::GetLongFromEmStackFrame(cArray, idx); + break; + } + case 'V': { + break; + } + default: { + idx = cArrayLen - intOrFloatLength; + result.i = EMSFReader::GetIntFromEmStackFrame(cArray, idx); + break; + } + } +} + +static bool RecursiveInvokePoly(MObject *emStFrameObj, const MethodHandle &methodHandleMplObj, jvalue &result) { + Kind handleKind = methodHandleMplObj.GetHandleKind(); + if (handleKind == kTransformCall || handleKind == kCallSiteTransformCall) { + ArgValue xregValue(0); + xregValue.AddReference(const_cast(methodHandleMplObj.GetHandleJavaObj())); + xregValue.AddReference(emStFrameObj); + MethodMeta *transformMethod = methodHandleMplObj.GetHandleJClassType()->GetMethod( + "transform", "(Ldalvik/system/EmulatedStackFrame;)V"); + __MRT_ASSERT(transformMethod != nullptr, "RecursiveInvokePoly: transformMethod is null"); + if (transformMethod->NeedsInterp()) { + result = interpreter::InterpJavaMethod(transformMethod, + reinterpret_cast(const_cast(methodHandleMplObj.GetHandleJavaObj())), + reinterpret_cast(emStFrameObj)); + } else { + RuntimeStub::SlowCallCompiledMethod(transformMethod->GetFuncAddress(), xregValue.GetData(), 0, 0); + } + MRT_CheckThrowPendingExceptionUnw(); + GetReturnValue(emStFrameObj, result); + return true; + } + return false; +} + +bool EmStackFrameInvoker::FillParamsForEmStackFrame(BaseArgValue ¶mArray) { + int8_t *cArray = emStackFrameMplObj->GetStackFrameNativeBytes(); // object might moving in ParamsConvert + for (uint32_t i = 0; i < paramLength; ++i) { + MClass *from = typesArr[i]; + MClass *to = (*ptypesOfHandle)[i]; + switch (*to->GetName()) { + case 'I': + case 'B': + case 'C': + case 'S': + case 'Z': { + paramArray.AddInt32(EMSFReader::GetIntFromEmStackFrame(cArray, byteIdx)); + break; + } + case 'D': { + DoubleLongConvert u; + u.j = EMSFReader::GetLongFromEmStackFrame(cArray, byteIdx); + paramArray.AddDouble(u.d); + break; + } + case 'F': { + FloatIntConvert u; + u.i = EMSFReader::GetIntFromEmStackFrame(cArray, byteIdx); + paramArray.AddFloat(u.f); + break; + } + case 'J': { + paramArray.AddInt64(EMSFReader::GetLongFromEmStackFrame(cArray, byteIdx)); + break; + } + default: { + jvalue value; + value.l = reinterpret_cast(emStackFrameMplObj->GetReferencesObj(refIdx++)); + STATEMENTCHECK(!methodHandleMplObj.ParamsConvert(from, to, typesArr, paramLength, value)); + paramArray.AddReference(reinterpret_cast(value.l)); + break; + } + } + } + return true; +} + +bool EmStackFrameInvoker::InvokeInterpMethod(jvalue &result, MethodMeta &realMethod) { + ArgValueInterp paramArray(0); + STATEMENTCHECK(!FillParamsForEmStackFrame(paramArray)); + if (MRT_HasPendingException()) { + return false; + } + if (realMethod.IsStatic()) { + result = maplert::interpreter::InterpJavaMethod(&realMethod, + nullptr, reinterpret_cast(paramArray.GetData())); + } else { + result = maplert::interpreter::InterpJavaMethod(&realMethod, + paramArray.GetReceiver(), reinterpret_cast(paramArray.GetData() + 1)); + } + return true; +} + +bool EmStackFrameInvoker::InvokeStaticCmpileMethod(jvalue &result, const MethodMeta &realMethod) { + ArgValue paramArray(0); + STATEMENTCHECK(!FillParamsForEmStackFrame(paramArray)); + if (MRT_HasPendingException()) { + return false; + } + DoInvoke(realMethod, result, paramArray); + return true; +} + +bool EmStackFrameInvoker::Invoke(jvalue &result) { + if (RecursiveInvokePoly(emStFrameObj, methodHandleMplObj, result)) { + return true; + } + // check pramater length + if (paramLength != ptypesOfHandle->size()) { + string exceptionStr = GeneExceptionString(methodHandleMplObj, typesArr, paramLength); + MRT_ThrowNewException("java/lang/invoke/WrongMethodTypeException", exceptionStr.c_str()); + return false; + } // check end + MObject *param0 = emStackFrameMplObj->GetReferencesObj(0); + if (!mplFieldOrMethodMeta->IsStatic() && param0 == nullptr) { + MRT_ThrowNewExceptionUnw("java/lang/NullPointerException", "null receiver"); + } + MethodMeta *realMethod = methodHandleMplObj.GetRealMethod(param0); + STATEMENTCHECK(UNLIKELY(MRT_HasPendingException())) + STATEMENTCHECK(!ClinitCheck(realMethod->GetDeclaringClass())) + // interpreter path + if (realMethod->NeedsInterp()) { + // Interp + STATEMENTCHECK(!InvokeInterpMethod(result, *realMethod)); + } else { + // O2 + STATEMENTCHECK(!InvokeStaticCmpileMethod(result, *realMethod)); + } + STATEMENTCHECK(UNLIKELY(MRT_HasPendingException())) + MClass *retType = mplFieldOrMethodMeta->GetReturnType(); + if (retType == WellKnown::GetMClassV()) { + return true; + } + MObject *callsiteObj = emStFrameObj->LoadObjectNoRc(WellKnown::GetMFieldEmStackFrameCallsiteOffset()); + MClass *retFromType = + reinterpret_cast(callsiteObj->LoadObjectNoRc(WellKnown::GetMFieldMethodHandlepRTypeOffset())); + STATEMENTCHECK(retFromType != WellKnown::GetMClassV() && !ConvertReturnValue(methodHandleMplObj, + typesArr, paramLength + 1, result)) + + // set return value to EMStackFrame + if (!MRT_ReflectClassIsPrimitive(*retFromType)) { + emStackFrameMplObj->PutReferencesObj(reinterpret_cast(result.l), refIdx); + RC_LOCAL_DEC_REF(result.l); + } else { + char typeMark = *(retFromType->GetName()); + int8_t *cArray = emStackFrameMplObj->GetStackFrameNativeBytes(); + EMSFWriter::WriteByMark(typeMark, cArray, byteIdx, result, !mplFieldOrMethodMeta->IsConstructor()); + } + return true; +} + +bool MethodHandle::InvokeWithEmStackFrame(MObject *emStFrameObj, jvalue &result) { + CHECK_E_B(emStFrameObj == nullptr, "InvokeWithEmStackFrame : emStFrameObj is nullptr"); + EmStackFrameInvoker invoker(emStFrameObj, *this); + return invoker.Invoke(result); +} + +MObject *MethodHandle::GetMemberInternal(const MObject *methodHandle) { + DCHECK(methodHandle != nullptr); + Kind tkind = static_cast(methodHandle->Load(WellKnown::GetMFieldMethodHandleHandleKindOffset(), false)); + long fieldOrMethodMeta = methodHandle->Load(WellKnown::GetMFieldMethodHandleArtFieldOrMethodOffset(), false); + if (tkind >= Kind::kInstanceGet) { + FieldMeta *fieldMeta = reinterpret_cast(fieldOrMethodMeta); + MObject *mField = MField::NewMFieldObject(*fieldMeta); + return mField; + } else { + MethodMeta *methodMeta = reinterpret_cast(fieldOrMethodMeta); + MObject *mMethod = MMethod::NewMMethodObject(*methodMeta); + return mMethod; + } +} + +extern "C" jobject MRT_MethodHandleImplGetMemberInternal(const jobject methodHandle) { + MObject *internal = MethodHandle::GetMemberInternal(MObject::JniCast(methodHandle)); + DCHECK(internal != nullptr); + return internal->AsJobject(); +} +} // namespace maple diff --git a/src/mrt/maplert/src/mrt_methodhandle_mpl.cpp b/src/mrt/maplert/src/mrt_methodhandle_mpl.cpp new file mode 100644 index 0000000000..3845bca388 --- /dev/null +++ b/src/mrt/maplert/src/mrt_methodhandle_mpl.cpp @@ -0,0 +1,766 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mrt_methodhandle_mpl.h" +namespace maplert { +static MClass *GetClassFromDescriptorAndCalSize(const char *desc, vector &typesMark, const MClass *callerCls) { + DCHECK(desc != nullptr) << "GetClassFromDescriptorAndCalSize::descriptor is nullptr" << maple::endl; + switch (*desc) { + case 'I': + typesMark.push_back('I'); + return WellKnown::GetMClassI(); + case 'B': + typesMark.push_back('B'); + return WellKnown::GetMClassB(); + case 'F': + typesMark.push_back('F'); + return WellKnown::GetMClassF(); + case 'C': + typesMark.push_back('C'); + return WellKnown::GetMClassC(); + case 'S': + typesMark.push_back('S'); + return WellKnown::GetMClassS(); + case 'J': + typesMark.push_back('J'); + return WellKnown::GetMClassJ(); + case 'D': + typesMark.push_back('D'); + return WellKnown::GetMClassD(); + case 'Z': + typesMark.push_back('Z'); + return WellKnown::GetMClassZ(); + case 'V': + typesMark.push_back('V'); + return WellKnown::GetMClassV(); + default: + typesMark.push_back('L'); + string str(desc); + return reinterpret_cast(MRT_GetClassByContextClass(*callerCls, str)); + } +} + +// the last element is return type +void GetParameterAndRtType(vector &types, vector &typesMark, + MObject *protoStr, const MClass *declareClass) { + const char *methodSig = MRT_GetStringContentsPtrRaw(reinterpret_cast(protoStr)); + int count = MRT_StringGetStringLength(reinterpret_cast(protoStr)); + + DCHECK(methodSig != nullptr) << "GetParameterAndRtType::methodSig is nullptr" << maple::endl; + ++methodSig; + size_t len = static_cast(count) + 1; + char descriptor[len]; + + while (*methodSig != ')') { + if (memset_s(descriptor, len, 0, len) != EOK) { + LOG(FATAL) << "memset_s fail." << maple::endl; + } + ParseSignatrueType(descriptor, methodSig); + MClass *ptype = GetClassFromDescriptorAndCalSize(descriptor, typesMark, declareClass); + types.push_back(ptype); + ++methodSig; + } + ++methodSig; + if (memset_s(descriptor, len, 0, len) != EOK) { + LOG(FATAL) << "memset_s fail." << maple::endl; + } + ParseSignatrueType(descriptor, methodSig); + MClass *rtType = GetClassFromDescriptorAndCalSize(descriptor, typesMark, declareClass); + types.push_back(rtType); +} + +void VargToJvalueArray(vector ¶mArray, VArg &varg, uint32_t paramNum, vector &typesMark) { + for (uint32_t i = 0; i < paramNum; ++i) { + jvalue val; + switch (typesMark[i]) { + case 'C': + val.c = static_cast(varg.GetJint()); + break; + case 'B': + val.b = static_cast(varg.GetJint()); + break; + case 'S': + val.s = static_cast(varg.GetJint()); + break; + case 'Z': + val.z = static_cast(varg.GetJint()); + break; + case 'I': + val.i = varg.GetJint(); + break; + case 'D': + val.d = varg.GetJdouble(); + break; + case 'F': + val.f = varg.GetJfloat(); + break; + case 'J': + val.j = varg.GetJlong(); + break; + default: + val.l = *varg.GetObject(); + break; + } + paramArray.push_back(val); + } +} + +jvalue MethodHandleCallEnter(MString *calleeName, MString *protoString, uint32_t paramNum, + MObject *methodHandle, VArg &args, const MClass *declareClass) { + if (methodHandle == nullptr) { + maplert::MRT_ThrowNewExceptionUnw("java/lang/NullPointerException", ""); + } + vector typesMark; + vector paramTypes; + vector paramValueArray; + + GetParameterAndRtType(paramTypes, typesMark, protoString, declareClass); + VargToJvalueArray(paramValueArray, args, paramNum, typesMark); + MethodHandleMpl methodHandleMplObj(methodHandle, MethodHandleMpl::IsExactInvoke(calleeName)); + methodHandleMplObj.CheckReturnType(); + jvalue val = methodHandleMplObj.invoke(paramValueArray, paramNum, typesMark, paramTypes); + MRT_CheckThrowPendingExceptionUnw(); + return val; +} + +void DropArguments(vector ¶mArray, uint32_t ¶mNum, const MObject *data, + vector &typesMark, vector &cSTypes) { + size_t numDroppedOffset = WellKnown::GetMFieldDropArgumentsDataNumDroppedOffset(); + size_t startPosOffset = WellKnown::GetMFieldDropArgumentsDataStartPosOffset(); + int32_t numDropped = MRT_LOAD_JINT(data, numDroppedOffset); + int32_t startPos = MRT_LOAD_JINT(data, startPosOffset); + uint32_t newSize = paramNum - static_cast(numDropped); + typesMark.erase(typesMark.begin() + startPos, typesMark.begin() + numDropped + startPos); + cSTypes.erase(cSTypes.begin() + startPos, cSTypes.begin() + numDropped + startPos); + paramArray.erase(paramArray.begin() + startPos, paramArray.begin() + numDropped + startPos); + paramNum = newSize; +} + +void BindTo(vector ¶mArray, const MObject *data, uint32_t ¶mNum, vector &typesMark, + vector &cSTypes) { + size_t receiverOffset = WellKnown::GetMFieldBindToDataReceiverOffset(); + MObject *receiver = reinterpret_cast(data->LoadObjectNoRc(receiverOffset)); + if (receiver == nullptr) { + MRT_ThrowNewExceptionUnw("java/lang/NullPointerException", ""); + return; + } + MClass *receiverCls = receiver->GetClass(); + jvalue val; + val.l = reinterpret_cast(receiver); + paramArray.insert(paramArray.begin(), val); + typesMark.insert(typesMark.begin(), 'L'); + cSTypes.insert(cSTypes.begin(), receiverCls); + ++paramNum; +} + +jvalue MethodHandleMpl::FilterReturnValue(vector ¶mArray, const uint32_t paramNum, + const MObject *data, vector &typesMark, vector &cSTypes) { + size_t targetOffset = WellKnown::GetMFieldFilterReturnValueDataTargetOffset(); + size_t filterOffset = WellKnown::GetMFieldFilterReturnValueDataFilterOffset(); + MObject *target = reinterpret_cast(data->LoadObjectNoRc(targetOffset)); + MObject *filter = reinterpret_cast(data->LoadObjectNoRc(filterOffset)); + MethodHandleMpl targetHandle(target, isInvokeExact); + MethodHandleMpl filterHandle(filter, isInvokeExact); + + jvalue val = targetHandle.invoke(paramArray, paramNum, typesMark, cSTypes, false); + vector filterParams; + filterParams.push_back(val); // no needed if returnytpe=V + MObject *finalType = targetHandle.typeArray->GetObjectElementNoRc(0); + MethodType mt(finalType); + MClass *returnType = const_cast(mt.GetReTType()); + vector retTypeMark; + retTypeMark.push_back(returnType->GetName()[0]); + if (returnType != WellKnown::GetMClassV() && returnType != WellKnown::GetMClassVoid()) { + MClass *csRetType = cSTypes.back(); + cSTypes.clear(); + cSTypes.push_back(returnType); + cSTypes.push_back(csRetType); + jvalue ret = filterHandle.invoke(filterParams, 1, retTypeMark, cSTypes); + if (!returnType->IsPrimitiveClass()) { + RC_LOCAL_DEC_REF(val.l); + } + return ret; + // filterReturnValue Transform just have one param + } else { + MClass *csRetType = cSTypes.back(); + cSTypes.clear(); + cSTypes.push_back(csRetType); + return filterHandle.invoke(filterParams, 0, retTypeMark, cSTypes); + } +} + +void PermuteArguments(vector ¶mArray, const uint32_t paramNum, const MObject *data, vector &typesMark, + vector &cSTypes) { + size_t targetOffset = WellKnown::GetMFieldPermuteArgumentsDataTargetOffset(); + size_t reorderOffset = WellKnown::GetMFieldPermuteArgumentsDataReorderOffset(); + jobject target = reinterpret_cast(data->LoadObjectNoRc(targetOffset)); + if (target == nullptr) { + MRT_ThrowNewExceptionUnw("java/lang/NullPointerException", ""); + return; + } + int *reorder = reinterpret_cast( + reinterpret_cast(data->LoadObjectNoRc(reorderOffset))->ConvertToCArray()); + vector paramArrayS = paramArray; + vector typesMarkS = typesMark; + vector cSTypeS = cSTypes; + for (uint32_t i = 0; i < paramNum; ++i) { + uint32_t idx = static_cast(reorder[i]); + paramArray[i] = paramArrayS[idx]; + typesMark[i] = typesMarkS[idx]; + cSTypes[i] = cSTypeS[idx]; + } +} + +static MethodMeta *RefineTargetMethodMpl(MethodMeta *method, const MObject *ref) { + MClass *decClass = method->GetDeclaringClass(); + MClass *referClass = ref->GetClass(); + if (decClass != referClass && referClass != nullptr) { + MethodMeta *realMthd = referClass->GetMethod(method->GetName(), method->GetSignature()); + if (realMthd == nullptr) { + string msg = GeneIllegalArgumentExceptionString(referClass, decClass); + MRT_ThrowNewException("java/lang/IllegalArgumentException", msg.c_str()); + return method; + } + return realMthd; + } + return method; +} + +jvalue FinalNode(uint32_t paramNum, vector ¶mPtr, vector &typesMarkPtr, MObject *meta) { + ArgValue paramArray(0); + JValueArg valueArg(paramPtr.data()); + ArgsWrapper argsWrapper(valueArg); + FillArgsInfoNoCheck(argsWrapper, typesMarkPtr.data(), paramNum + 1, paramArray); + jvalue result; + result.l = 0UL; + MethodMeta *method = reinterpret_cast(meta); + if (!method->IsStatic()) { + method = RefineTargetMethodMpl(reinterpret_cast(meta), paramArray.GetReceiver()); + } + DoInvoke(*method, result, paramArray); + return result; +} + +string GeneExpectedNotMatchExceptionString(const MethodType &mt, vector cSTypes) { + string str = "expected ("; + string className; + vector types = mt.GetParamsType(); + size_t typeSize = types.size(); + for (size_t i = 0; i < typeSize; ++i) { + className.clear(); + types[i]->GetTypeName(className); + size_t idx = className.rfind("."); + str += className.substr(idx + 1); + if (i != typeSize - 1) { + str += ","; + } + } + str += ")"; + className.clear(); + const MClass *retType = mt.GetReTType(); // return type + retType->GetTypeName(className); + size_t index = className.rfind("."); + str += className.substr(index + 1); + + str += " but found ("; + size_t paramNum = cSTypes.size() - 1; // Remove the return value. + for (size_t i = 0; i < paramNum; ++i) { + className.clear(); + cSTypes[i]->GetTypeName(className); + size_t idx = className.rfind("."); + str += className.substr(idx + 1); + if (i != paramNum - 1) { + str += ","; + } + } + str += ")"; + className.clear(); + cSTypes[paramNum]->GetTypeName(className); // return value + str += className; + return str; +} + +class ConverterMPL { + public: + ConverterMPL(const MethodType mt, const vector types, jvalue *val, const MClass *from, const MClass *to) + : from(from), to(to), value(val), methodType(mt), cSTypes(types) { + toShortType = to ? *to->GetName() : 'V'; + fromShortType = *from->GetName(); + } + + ~ConverterMPL() { + from = nullptr; + to = nullptr; + value = nullptr; + } + + bool Convert() noexcept { + if (from == to) { + return true; + } + bool toIsPrim = (to != nullptr) ? to->IsPrimitiveClass() : true; + if (!toIsPrim && (from == WellKnown::GetMClassVoid() || from == WellKnown::GetMClassV())) { + return true; + } + jvalue kSrcValue(*(value)); + (*(value)).j = 0; + bool fromIsPrim = (from != nullptr) ? from->IsPrimitiveClass() : true; + if (fromIsPrim && toIsPrim) { + if (UNLIKELY(!primitiveutil::ConvertNarrowToWide(fromShortType, toShortType, kSrcValue, *(value)))) { + ThrowWMT(); + return false; + } + return true; + } else if (!fromIsPrim && !toIsPrim) { + if (kSrcValue.l) { + from = reinterpret_cast(kSrcValue.l)->GetClass(); + } + value->l = kSrcValue.l; + if (UNLIKELY(!MRT_ReflectClassIsAssignableFrom(*to, *from))) { + MRT_ThrowNewException("java/lang/ClassCastException", GeneClassCastExceptionString(from, to).c_str()); + return false; + } + return true; + } else if (!toIsPrim) { + return ConvertPrimToObj(kSrcValue); + } else { + bool ret = ConvertObjToPrim(kSrcValue); + if (ret == false) { + value->j = kSrcValue.j; + } + return ret; + } + } + + void SetDec() { + needDec = true; + } + + private: + bool ConvertObjToPrim(const jvalue &kSrcValue) noexcept { + char unboxedType = '0'; + jvalue unboxedVal; + unboxedVal.j = 0; + if (UNLIKELY(!GetPrimShortTypeAndValue(reinterpret_cast(kSrcValue.l), unboxedType, unboxedVal, from))) { + ThrowWMT(); + return false; + } + + if (UNLIKELY(kSrcValue.l == nullptr)) { + MRT_ThrowNewException("java/lang/NullPointerException", ""); + return false; + } + if (needDec) { + RC_LOCAL_DEC_REF(kSrcValue.l); + } + + if (UNLIKELY(!primitiveutil::ConvertNarrowToWide(unboxedType, toShortType, unboxedVal, *(value)))) { + if (from == WellKnown::GetMClassNumber()) { // boxed to type must be assignablefrom number + MRT_ThrowNewException("java/lang/ClassCastException", GeneClassCastExceptionString(from, to).c_str()); + } else { + ThrowWMT(); + } + return false; + } + return true; + } + + bool ConvertPrimToObj(const jvalue &kSrcValue) noexcept { + char type; + if (!GetPrimShortType(to, type)) { + if (CheckPrimitiveCanBoxed(fromShortType) && (to == WellKnown::GetMClassNumber() || + to == WellKnown::GetMClassObject())) { + type = fromShortType; + } else { + ThrowWMT(); + return false; + } + } else if (UNLIKELY(fromShortType != type)) { + ThrowWMT(); + return false; + } + + if (UNLIKELY(!primitiveutil::ConvertNarrowToWide(fromShortType, type, kSrcValue, *(value)))) { + ThrowWMT(); + return false; + } + jobject boxed = reinterpret_cast(primitiveutil::BoxPrimitive(type, kSrcValue)); + value->l = boxed; + return true; + } + + string GeneConvertFailExceptionString(const MethodType mt, vector callsiteTypes) const { + string str = "cannot convert MethodHandle("; + string className; + vector types = mt.GetParamsType(); + size_t paramNum = callsiteTypes.size() - 1; // Remove the return value. + uint32_t typesIdx = 0; + for (size_t i = 0; i < paramNum; ++i) { + className.clear(); + if (types[typesIdx]->IsArrayClass()) { + types[typesIdx]->GetTypeName(className); + size_t idxL = className.rfind("."); + size_t idxR = className.rfind("["); + str += className.substr(idxL + 1, idxR - idxL - 1); + } else { + types[i]->GetTypeName(className); + size_t idx = className.rfind("."); + str += className.substr(idx + 1); + ++typesIdx; + } + if (i != paramNum - 1) { + str += ","; + } + } + str += ")"; + className.clear(); + const MClass *retType = mt.GetReTType(); // return type + retType->GetTypeName(className); + size_t index = className.rfind("."); + str += className.substr(index + 1); + str += " to ("; + for (size_t i = 0; i < paramNum; ++i) { + className.clear(); + callsiteTypes[i]->GetTypeName(className); + size_t idx = className.rfind("."); + str += className.substr(idx + 1); + if (i != paramNum - 1) { + str += ","; + } + } + str += ")"; + className.clear(); + callsiteTypes[paramNum]->GetTypeName(className); // return value + index = className.rfind("."); + str += className.substr(index + 1); + return str; + } + + void ThrowWMT() const noexcept { + string exceptionStr = GeneConvertFailExceptionString(methodType, cSTypes); + MRT_ThrowNewException("java/lang/invoke/WrongMethodTypeException", exceptionStr.c_str()); + } + + const MClass *from; + const MClass *to; + jvalue *value; + bool needDec = false; + char toShortType; + char fromShortType; + MethodType methodType; + vector cSTypes; +}; + +bool ConvertJvalueMPL(const MethodType mt, const vector cSTypes, + jvalue *val, const MClass *from, const MClass *to) { + ConverterMPL converter(mt, cSTypes, val, from, to); + return converter.Convert(); +} + +bool ConvertJvalueWithDecMPL(const MethodType mt, const vector cSTypes, jvalue *val, + const MClass *from, const MClass *to) { + ConverterMPL converter(mt, cSTypes, val, from, to); + converter.SetDec(); + return converter.Convert(); +} + +bool MethodHandleMpl::CheckParamsType(vector &cSTypes, uint32_t csTypesNum, vector ¶mPtr, + uint32_t idx, bool checkRet) { + MObject *type = typeArray->GetObjectElementNoRc(idx); + MethodType mt(type); + const vector ¶msVec = mt.GetParamsType(); + if (paramsVec.size() != csTypesNum) { + string exceptionStr = GeneExpectedNotMatchExceptionString(mt, cSTypes); + MRT_ThrowNewException("java/lang/invoke/WrongMethodTypeException", exceptionStr.c_str()); + return false; + } + if (isInvokeExact) { // todo, consider nominal type + if (std::equal(paramsVec.begin(), paramsVec.end(), cSTypes.begin()) && + (!checkRet || mt.GetReTType() == cSTypes[csTypesNum])) { + return true; + } + string exceptionStr = GeneExpectedNotMatchExceptionString(mt, cSTypes); + MRT_ThrowNewException("java/lang/invoke/WrongMethodTypeException", exceptionStr.c_str()); + return false; + } + for (uint32_t i = 0; i < csTypesNum; ++i) { + if (!ConvertJvalueMPL(mt, cSTypes, ¶mPtr[i], cSTypes[i], paramsVec[i])) { + string exceptionStr = GeneExpectedNotMatchExceptionString(mt, cSTypes); + MRT_ThrowNewException("java/lang/invoke/WrongMethodTypeException", exceptionStr.c_str()); + return false; + } + bool fromIsPrim = (cSTypes[i] != nullptr) ? cSTypes[i]->IsPrimitiveClass() : true; + bool toIsPrim = (paramsVec[i] != nullptr) ? paramsVec[i]->IsPrimitiveClass() : true; + if (fromIsPrim && !toIsPrim) { + ObjHandle convertedVal(reinterpret_cast(paramPtr[i].j)); + } + } + if (checkRet && mt.GetReTType() != cSTypes[csTypesNum]) { + string exceptionStr = GeneExpectedNotMatchExceptionString(mt, cSTypes); + MRT_ThrowNewException("java/lang/invoke/WrongMethodTypeException", exceptionStr.c_str()); + return false; + } + return true; +} + +bool ConvertReturnValueMPL(const MethodType mt, const vector cSTypes, + const MClass *from, const MClass *to, jvalue *val) { + if (from == to) { + return true; + } + if (to == WellKnown::GetMClassV()) { + if (!from->IsPrimitiveClass()) { + RC_LOCAL_DEC_REF(val->l); + } + return true; + } + if (from == WellKnown::GetMClassVoid() || from == WellKnown::GetMClassV()) { + val->l = 0L; + return true; + } + bool convertResult = ConvertJvalueWithDecMPL(mt, cSTypes, val, from, to); + if (!convertResult) { + RC_LOCAL_DEC_REF(val->l); + } + return convertResult; +} +static MArray *GeneratePrimMArray(const MethodType mt, uint32_t arrayLength, vector &cSTypes, + vector ¶mPtr, const MClass *elemType) { + ObjHandle arrayHandle(MArray::NewPrimitiveArrayComponentClass(arrayLength, *elemType)); + for (uint32_t i = 0; i < arrayLength; ++i) { + uint32_t idx = static_cast(cSTypes.size()) - arrayLength + i - 1; + jvalue val = paramPtr[idx]; + if (!ConvertJvalueMPL(mt, cSTypes, &val, cSTypes[idx], elemType)) { + return nullptr; + } + switch (*elemType->GetName()) { + case 'I': + arrayHandle()->SetPrimitiveElement(i, val.i); + break; + case 'B': + arrayHandle()->SetPrimitiveElement(i, val.b); + break; + case 'S': + arrayHandle()->SetPrimitiveElement(i, val.s); + break; + case 'C': + arrayHandle()->SetPrimitiveElement(i, val.c); + break; + case 'Z': + arrayHandle()->SetPrimitiveElement(i, val.z); + break; + case 'D': + arrayHandle()->SetPrimitiveElement(i, val.d); + break; + case 'F': + arrayHandle()->SetPrimitiveElement(i, val.f); + break; + case 'J': + arrayHandle()->SetPrimitiveElement(i, val.j); + break; + default: + arrayHandle()->SetPrimitiveElement(i, val.l); + break; + } + } + return reinterpret_cast(arrayHandle.Return()); +} + +static MArray *GenerateObjMArray(const MethodType mt, uint32_t arrayLength, vector &cSTypes, + vector ¶mPtr, const MClass *elemType) { + ObjHandle arrayHandle(MArray::NewObjectArrayComponentClass(arrayLength, *WellKnown::GetMClassObject())); + for (uint32_t i = 0; i < arrayLength; ++i) { + uint32_t idx = static_cast(cSTypes.size()) - arrayLength + i - 1; + jvalue val = paramPtr[idx]; + if (!ConvertJvalueMPL(mt, cSTypes, &val, cSTypes[idx], elemType)) { + return nullptr; + } + if (cSTypes[idx]->IsPrimitiveClass()) { + ObjHandle convertedVal(reinterpret_cast(val.j)); + } + arrayHandle()->SetObjectElement(i, reinterpret_cast(val.l)); + } + return reinterpret_cast(arrayHandle.Return()); +} + +static MArray *GenerateMArray(const MethodType mt, uint32_t arrayLength, vector &cSTypes, + vector ¶mPtr, const MClass *elemType) { + if (elemType->IsPrimitiveClass()) { + return GeneratePrimMArray(mt, arrayLength, cSTypes, paramPtr, elemType); + } + return GenerateObjMArray(mt, arrayLength, cSTypes, paramPtr, elemType); +} + +class ReturnInfoClosure { + public: + ReturnInfoClosure(vector &mark, vector &cls) : typesMark(mark), cSTypes(cls) { + returnMark = typesMark.back(); + typesMark.pop_back(); + returnType = cSTypes.back(); + cSTypes.pop_back(); + } + ~ReturnInfoClosure() noexcept { + typesMark.push_back(returnMark); + cSTypes.push_back(returnType); + returnType = nullptr; + } + + private: + vector &typesMark; + vector &cSTypes; + char returnMark; + MClass *returnType; +}; + +void MethodHandleMpl::PrepareVarg(vector ¶m, vector &mark, + vector &types, uint32_t &csTypesNum) { + // do not prepare for invokeExact + if (isInvokeExact) { + return; + } + MethodMeta *method = GetMeta(transformNum - 1); + if (method != nullptr && modifier::IsVarargs(method->GetMod())) { + MethodType mt(typeArray->GetObjectElementNoRc(transformNum - 1)); + constexpr int8_t nParamNum = 2; + if (1 + mt.GetParamsType().size() == types.size() && + (types.size() > 1 && types[types.size() - nParamNum]->IsArrayClass())) { + // do nothing ? or check last param type isAssignableFrom + } else if (types.size() < mt.GetParamsType().size() - 1) { + string exceptionStr = GeneExpectedNotMatchExceptionString(mt, types); + MRT_ThrowNewException("java/lang/invoke/WrongMethodTypeException", exceptionStr.c_str()); + } else { // convert callsite param to a java array + size_t arrayLength; // calculate convert num + if (mt.GetParamsType().size() == 1) { // one param, just an object array, convert all callsite params + arrayLength = method->IsStatic() ? types.size() - 1 : types.size(); + } else { + arrayLength = types.size() - mt.GetParamsType().size(); + } + ObjHandle arrayHandle(GenerateMArray( + mt, static_cast(arrayLength), types, param, mt.GetParamsType().back()->GetComponentClass())); + if (arrayHandle() == nullptr) { + return; + } + { + size_t iter = arrayLength; + ReturnInfoClosure info(mark, types); + while (iter-- != 0) { + param.pop_back(); + mark.pop_back(); + types.pop_back(); + } + jvalue val; + val.l = reinterpret_cast(arrayHandle()); + param.push_back(val); + mark.push_back('L'); + types.push_back(mt.GetParamsType().back()); + } + } + csTypesNum = static_cast(param.size()); + } +} + +string MethodHandleMpl::GeneNoSuchMethodExceptionString(const MethodType &methodType, const MethodMeta &method) { + string str = "no such method: "; + string className; + MClass *cls = method.GetDeclaringClass(); + cls->GetTypeName(className); + str += className; + str += "."; + str += method.GetName(); + vector types = methodType.GetParamsType(); + size_t typesSize = types.size(); + str += "("; + for (size_t i = 0; i < typesSize; ++i) { + className.clear(); + types[i]->GetTypeName(className); + size_t idx = className.rfind("."); + str += className.substr(idx + 1); + if (i != typesSize - 1) { + str += ","; + } + } + str += ")"; + const MClass *retType = methodType.GetReTType(); + className.clear(); + retType->GetTypeName(className); + str += className; + // add invokeVirtual/invokeStatic + return str; +} + +void MethodHandleMpl::CheckReturnType() { + MethodMeta *method = GetMeta(transformNum - 1); + MObject *type = typeArray->GetObjectElementNoRc(0); + if (type != nullptr && method != nullptr) { + MethodType mt(type); + if (mt.GetReTType() != method->GetReturnType()) { + string exceptionStr = GeneNoSuchMethodExceptionString(mt, *method); + MRT_ThrowNewException("java/lang/NoSuchMethodException", exceptionStr.c_str()); + } + } +} + +jvalue MethodHandleMpl::invoke(vector ¶mPtr, uint32_t csTypesNum, vector &typesMark, + vector &cSTypes, bool convertRetVal) { + jvalue result; + result.l = 0; + ScopedHandles sHandles; + PrepareVarg(paramPtr, typesMark, cSTypes, csTypesNum); + if (MRT_HasPendingException()) { + return result; + } + int32_t *opCArray = reinterpret_cast(MRT_JavaArrayToCArray(opArray->AsJarray())); + for (int64_t i = transformNum - 1; i >= 0; --i) { + uint32_t idx = static_cast(i); + MObject *data = dataArray->GetObjectElementNoRc(idx); + switch ((OptionFlag)opCArray[idx]) { + case OptionFlag::kDropArguments: { + if (!CheckParamsType(cSTypes, csTypesNum, paramPtr, idx)) { + return result; + } + DropArguments(paramPtr, csTypesNum, data, typesMark, cSTypes); + break; + } + case OptionFlag::kFinal: { + if (!CheckParamsType(cSTypes, csTypesNum, paramPtr, idx, false)) { + return result; + } + result = FinalNode(csTypesNum, paramPtr, typesMark, metaArray->GetObjectElementNoRc(idx)); + break; + } + case OptionFlag::kFilterReturnValue: + // filterReturnValueChecks in java, so unnecessary check + result = FilterReturnValue(paramPtr, csTypesNum, data, typesMark, cSTypes); + break; + case OptionFlag::kBindto: { + BindTo(paramPtr, data, csTypesNum, typesMark, cSTypes); + if (MRT_HasPendingException()) { + return result; + } + break; + } + case OptionFlag::kPermuteArguments: + // permuteArgumentChecks in java, so unnecessary check + PermuteArguments(paramPtr, csTypesNum, data, typesMark, cSTypes); + break; + } + } + MObject *finalType = typeArray->GetObjectElementNoRc(0); + if (convertRetVal && finalType != nullptr) { // filterReturnValueChecks in java, so unnecessary check + MethodType finalMT(finalType); + ConvertReturnValueMPL(finalMT, cSTypes, finalMT.GetReTType(), cSTypes[csTypesNum], &result); + } + return result; +} +} diff --git a/src/mrt/maplert/src/mrt_module_init.c__ b/src/mrt/maplert/src/mrt_module_init.c__ new file mode 100644 index 0000000000..e9b65d2359 --- /dev/null +++ b/src/mrt/maplert/src/mrt_module_init.c__ @@ -0,0 +1,204 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ + +// This file shouldn't be included in libmaplert, instead, +// it should be linked statically into each linking target (be it an executable or a shared library) +extern "C" { + +#define MRT_EXPORT __attribute__((visibility("default"))) + +extern char *__reflection_strtab_start__; +extern char *__reflection_strtab_end__; +extern char *__reflection_start_hot_strtab_start__; +extern char *__reflection_start_hot_strtab_end__; +extern char *__reflection_both_hot_strtab_start__; +extern char *__reflection_both_hot_strtab_end__; +extern char *__reflection_run_hot_strtab_start__; +extern char *__reflection_run_hot_strtab_end__; + +extern char *__muid_range_tab_begin__; +extern char *__muid_range_tab_end__; + +extern char *__compilerVersionNumTab_begin__; +extern char *__compilerVersionNumTab_end__; + +extern char *__compiler_mfile_status_begin__; +extern char *__compiler_mfile_status_end__; + +extern char *__decouple_start__; +extern char *__decouple_end__; + +extern char *__muid_tab_start; +extern char *__muid_tab_end; +extern char *__eh_frame_start; +extern char *__eh_frame_end; + +extern void *mfile_rometadata_method_start; +extern void *mfile_rometadata_method_end; +extern void *mfile_rometadata_field_start; +extern void *mfile_rometadata_field_end; +extern void *mfile_romuidtab_start; +extern void *mfile_romuidtab_end; + +extern void *__maple_start__; +extern void *__maple_end__; + +extern char *__bb_profile_strtab_begin; +extern char *__bb_profile_strtab_end; +extern char *__bb_profile_tab_begin; +extern char *__bb_profile_tab_end; + +MRT_EXPORT char *MRT_GetMapleStart() { + return (char*)&__maple_start__; +} + +MRT_EXPORT char *MRT_GetMapleEnd() { + return (char*)&__maple_end__; +} + +// version query interface; +MRT_EXPORT char *MRT_GetVersionTabBegin() { + return (char*)&__compilerVersionNumTab_begin__; +} + +MRT_EXPORT char *MRT_GetVersionTabEnd() { + return (char*)&__compilerVersionNumTab_end__; +} + +MRT_EXPORT char *MRT_GetMFileStatusBegin() { + return (char*)&__compiler_mfile_status_begin__; +} + +MRT_EXPORT char *MRT_GetMFileStatusEnd() { + return (char*)&__compiler_mfile_status_end__; +} + +// reflection strtab query interface; +MRT_EXPORT char *MRT_GetColdStrTabBegin() { + return (char*)&__reflection_strtab_start__; +} + +MRT_EXPORT char *MRT_GetColdStrTabEnd() { + return (char*)&__reflection_strtab_end__; +} + +MRT_EXPORT char *MRT_GetStartHotStrTabBegin() { + return (char*)&__reflection_start_hot_strtab_start__; +} + +MRT_EXPORT char *MRT_GetStartHotStrTabEnd() { + return (char*)&__reflection_start_hot_strtab_end__; +} + +MRT_EXPORT char *MRT_GetBothHotStrTabBegin() { + return (char*)&__reflection_both_hot_strtab_start__; +} + +MRT_EXPORT char *MRT_GetBothHotStrTabEnd() { + return (char*)&__reflection_both_hot_strtab_end__; +} + +MRT_EXPORT char *MRT_GetRunHotStrTabBegin() { + return (char*)&__reflection_run_hot_strtab_start__; +} + +MRT_EXPORT char *MRT_GetRunHotStrTabEnd() { + return (char*)&__reflection_run_hot_strtab_end__; +} + +MRT_EXPORT void *MRT_GetRangeTableBegin() { + return reinterpret_cast(&__muid_range_tab_begin__); +} + +MRT_EXPORT void *MRT_GetRangeTableEnd() { + return reinterpret_cast(&__muid_range_tab_end__); +} + +MRT_EXPORT void *MRT_GetMuidTabBegin() { + return reinterpret_cast(&__muid_tab_start); +} + +MRT_EXPORT void *MRT_GetMuidTabEnd() { + return reinterpret_cast(&__muid_tab_end); +} + +MRT_EXPORT void *MRT_GetDecoupleTabBegin() { + return reinterpret_cast(&__decouple_start__); +} + +MRT_EXPORT void *MRT_GetDecoupleTabEnd() { + return reinterpret_cast(&__decouple_end__); +} + +MRT_EXPORT void *MRT_GetEhframeStart() { + return reinterpret_cast(&__eh_frame_start); +} + +MRT_EXPORT void *MRT_GetEhframeEnd() { + return reinterpret_cast(&__eh_frame_end); +} + +MRT_EXPORT void *MRT_GetMFileROMetadataMethodStart() { + return reinterpret_cast(&mfile_rometadata_method_start); +} + +MRT_EXPORT void *MRT_GetMFileROMetadataMethodEnd() { + return reinterpret_cast(&mfile_rometadata_method_end); +} + +MRT_EXPORT void *MRT_GetMFileROMetadataFieldStart() { + return reinterpret_cast(&mfile_rometadata_field_start); +} + +MRT_EXPORT void *MRT_GetMFileROMetadataFieldEnd() { + return reinterpret_cast(&mfile_rometadata_field_end); +} + +MRT_EXPORT void *MRT_GetMFileROMuidtabStart() { + return reinterpret_cast(&mfile_romuidtab_start); +} + +MRT_EXPORT void *MRT_GetMFileROMuidtabEnd() { + return reinterpret_cast(&mfile_romuidtab_end); +} + +MRT_EXPORT void *MRT_GetBBProfileTabBegin() { + return reinterpret_cast(&__bb_profile_tab_begin); +} + +MRT_EXPORT void *MRT_GetBBProfileTabEnd() { + return reinterpret_cast(&__bb_profile_tab_end); +} + +MRT_EXPORT void *MRT_GetBBProfileStrTabBegin() { + return reinterpret_cast(&__bb_profile_strtab_begin); +} + +MRT_EXPORT void *MRT_GetBBProfileStrTabEnd() { + return reinterpret_cast(&__bb_profile_strtab_end); +} + +__attribute__((weak)) +MRT_EXPORT void MRT_PreinitModuleClasses() { + return ; +} + +#ifdef __MUSL__ // using musl as the C library +// gcc/llvm generated code may need symbol __dso_handle +// which is not provided by __MUSL__. Add it here to satisfy the link requirement. +void *__dso_handle = nullptr; + +#endif // __MUSL__ +} // extern "C" diff --git a/src/mrt/maplert/src/mrt_object.cpp b/src/mrt/maplert/src/mrt_object.cpp new file mode 100644 index 0000000000..5177b34bb5 --- /dev/null +++ b/src/mrt/maplert/src/mrt_object.cpp @@ -0,0 +1,55 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include +#include +#include +#include +#include "jni.h" +#include "mrt_monitor_api.h" +#include "sizes.h" +#include "mstring_inline.h" + +using namespace std; +namespace maplert { +uint32_t GetObjectDWordSize(const MObject &obj) { + uint32_t objSize = obj.GetSize(); + return (objSize + sizeof(void*) - 1) / sizeof(void*); +} + +// Make and return shallow copy of object. +jobject MRT_CloneJavaObject(jobject jObj) { + ScopedHandles sHandles; + ObjHandle mObj(MObject::JniCastNonNull(jObj)); + size_t objSz = static_cast(mObj->GetSize()); + ObjHandle newObj(MObject::NewObject(*(mObj->GetClass()), objSz)); + if (newObj() != 0) { + errno_t tmpResult = memcpy_s(reinterpret_cast(newObj()), objSz, + reinterpret_cast(mObj()), objSz); + if (UNLIKELY(tmpResult != EOK)) { + LOG(FATAL) << "memcpy_s() in MRT_CloneJavaObject() return " << tmpResult << " rather than 0." << maple::endl; + } + uint32_t *paddr = reinterpret_cast(newObj.AsRaw() + sizeof(MetaRef)); + *paddr = 0; + Collector::Instance().PostObjectClone(mObj.AsRaw(), newObj.AsRaw()); + } + return newObj.ReturnJObj(); +} + +size_t MRT_SizeOfObject(jobject obj) { + return kHeaderSize + MObject::JniCastNonNull(obj)->GetSize(); +} +} // namespace maplert diff --git a/src/mrt/maplert/src/mrt_poisonstack.cpp b/src/mrt/maplert/src/mrt_poisonstack.cpp new file mode 100644 index 0000000000..8da122dbed --- /dev/null +++ b/src/mrt/maplert/src/mrt_poisonstack.cpp @@ -0,0 +1,77 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mrt_poisonstack.h" +#include +#include +#include +#include "securec.h" +#include "base/logging.h" + +namespace maplert { +extern "C" { +#if CONFIG_JSAN +static bool GetThreadStack(pthread_attr_t *attr, uintptr_t *spBegin, uintptr_t *spEnd) { + size_t v = 0; + void *stkAddr = nullptr; + if (pthread_attr_getstack(attr, &stkAddr, &v)) { + return false; + } + + v &= ~(0x1000 - 1); // workaround for guard page + + *spEnd = reinterpret_cast(stkAddr); + *spBegin = *spBegin + v; + + if (*spBegin > *spEnd) { + return true; + } else { + return false; + } +} + +static bool FetchLocalThreadStack(uintptr_t *spBegin, uintptr_t *spEnd) { + pthread_attr_t myAttr; + if (pthread_getattr_np(pthread_self(), &myAttr)) { + return false; + } + return GetThreadStack(&myAttr, spBegin, spEnd); +} + +static void poison_stack(uintptr_t framePtr) { + if (framePtr) { + return; + } + + uintptr_t spBegin = 0; + uintptr_t spEnd = 0; + if (FetchLocalThreadStack(&spBegin, &spEnd)) { + if (memset_s(reinterpret_cast(framePtr), sizeof(uintptr_t) * (spEnd - static_cast(framePtr)), + 0xba, sizeof(uintptr_t) * (spEnd - static_cast(framePtr))) != EOK) { + LOG(ERROR) << "memset_s fail" << maple::endl; + } + } +} +#endif + +void MRT_InitPoisonStack(uintptr_t framePtr) { +#if CONFIG_JSAN + void *fa = framePtr ? reinterpret_cast(framePtr) : __builtin_frame_address(1); + poison_stack(reinterpret_cast(fa)); +#else + (void)framePtr; +#endif +} +} // extern "C" +} // namespace maplert diff --git a/src/mrt/maplert/src/mrt_preload_class.cpp b/src/mrt/maplert/src/mrt_preload_class.cpp new file mode 100644 index 0000000000..5f8e0dec39 --- /dev/null +++ b/src/mrt/maplert/src/mrt_preload_class.cpp @@ -0,0 +1,51 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "chelper.h" +#include "mrt_class_init.h" + +#define CLASS_PREFIX(className) extern void *MRT_CLASSINFO(className); +#define MRT_CLASSINFO(className) __MRT_MAGIC_PASTE(CLASSINFO_PREFIX, className) +#include "white_list.def" +#undef MRT_CLASSINFO +#undef CLASS_PREFIX + +namespace maplert { +extern "C" { + +void MRT_BootstrapClinit(void) { +#define CLASS_PREFIX(className) (void)MRT_TryInitClass(*reinterpret_cast(&MRT_CLASSINFO(className))); +#define MRT_CLASSINFO(className) __MRT_MAGIC_PASTE(CLASSINFO_PREFIX, className) +#ifdef __ANDROID__ + +#ifndef OPS_ANDROID +#include "white_list.def" +#else // OPS_ANDROID + CLASS_PREFIX(Ljava_2Flang_2FClassLoader_24SystemClassLoader_3B) + (void)MRT_TryInitClass(*reinterpret_cast(&MRT_CLASSINFO(Ljava_2Flang_2FString_3B))); + (void)MRT_TryInitClass(*reinterpret_cast(&MRT_CLASSINFO(Ljava_2Flang_2FByte_24ByteCache_3B))); + (void)MRT_TryInitClass(*reinterpret_cast(&MRT_CLASSINFO(Ljava_2Flang_2FByte_3B))); +#endif // OPS_ANDROID + +#else + (void)MRT_TryInitClass(*reinterpret_cast(&MRT_CLASSINFO(Ljava_2Flang_2FString_3B))); + (void)MRT_TryInitClass(*reinterpret_cast(&MRT_CLASSINFO(Ljava_2Flang_2FByte_24ByteCache_3B))); + (void)MRT_TryInitClass(*reinterpret_cast(&MRT_CLASSINFO(Ljava_2Flang_2FByte_3B))); +#endif +#undef MRT_CLASSINFO +#undef CLASS_PREFIX +} + +} // extern "C" +} // namespace maplert diff --git a/src/mrt/maplert/src/mrt_primitive_class.def b/src/mrt/maplert/src/mrt_primitive_class.def new file mode 100644 index 0000000000..72234f83fe --- /dev/null +++ b/src/mrt/maplert/src/mrt_primitive_class.def @@ -0,0 +1,232 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ + +// predefined common classes for primitive types. should be generated. +using namespace maplert; +extern "C" { + +extern void *MRT_CLASSINFO(Ljava_2Flang_2FObject_3B); +extern void *MRT_CLASSINFO(Ljava_2Flang_2Freflect_2FField_3B); +extern void *MRT_CLASSINFO(Ljava_2Flang_2FCharSequence_3B); +extern void *MRT_CLASSINFO(Ljava_2Flang_2FThreadLocal_24ThreadLocalMap_24Entry_3B); +#ifdef __OPENJDK__ +extern void *MRT_CLASSINFO(Ljava_2Futil_2FHashtable_24Entry_3B); +#else // libcore +extern void *MRT_CLASSINFO(Ljava_2Futil_2FHashtable_24HashtableEntry_3B); +extern void *MRT_CLASSINFO(Llibcore_2Freflect_2FAnnotationMember_3B); +#endif // __OPENJDK__ +extern void *MRT_CLASSINFO(Ljava_2Futil_2FFormatter_24Flags_3B); +extern void *MRT_CLASSINFO(Ljava_2Futil_2FHashMap_24Node_3B); +extern void *MRT_CLASSINFO(Ljava_2Futil_2FFormatter_24FormatString_3B); +extern void *MRT_CLASSINFO(Ljava_2Flang_2FString_3B); +extern void *MRT_CLASSINFO(Ljava_2Flang_2FClass_3B); +extern void *MRT_CLASSINFO(Ljava_2Flang_2Freflect_2FMethod_3B); +extern void *MRT_CLASSINFO(Ljava_2Flang_2Fannotation_2FAnnotation_3B); +extern void *MRT_CLASSINFO(Ljava_2Flang_2Freflect_2FConstructor_3B); +extern void *MRT_CLASSINFO(Ljava_2Flang_2Freflect_2FParameter_3B); +extern void *MRT_CLASSINFO(Lsun_2Fsecurity_2Futil_2FDerValue_3B); +extern void *MRT_CLASSINFO(Lsun_2Fsecurity_2Fx509_2FAVA_3B); + +// These definition are from maple_ir/include/reflection_analysis.h +#define FLAG_CLASS_PRIMITIVE 0x0001 +#define FLAG_CLASS_ARRAY 0x0002 +#define MODIFIER_PUBLIC 0x00000001 +#define MODIFIER_PRIM 0x00000411 //Abstract Final Public +#define MODIFIER_ARRAY 0x00000411 //Abstract Final Public + +#define NAME_STRING_(name) #name +#define NAME_STRING(name) ((uintptr_t)NAME_STRING_(name)) +#define NAME_CONCAT(prefix, name) prefix##name +#define NAME_LENGTH(name) (sizeof(NAME_STRING_(name))-1) // only for constant string + +extern void (*MRT_ITABLE(Ljava_2Flang_2FObject_3B))(); +extern void (*MRT_VTABLE(Ljava_2Flang_2FObject_3B))(); +extern void *MRT_FIELDS(Ljava_2Flang_2FObject_3B); +extern void *MRT_METHODS(Ljava_2Flang_2FObject_3B); +extern void *MRT_GCTIB(Ljava_2Flang_2FObject_3B); + +#ifdef USE_32BIT_REF +#define FIELD_CLASS \ + (static_cast(reinterpret_cast(&MRT_CLASSINFO(Ljava_2Flang_2Freflect_2FField_3B)))) +#define CLASS_CLASS (static_cast(reinterpret_cast(&MRT_CLASSINFO(Ljava_2Flang_2FClass_3B)))) +#define GET_OFFSET_REF(base, target) ((int32_t)((char*)(base) - (char*)(target))) +#else +#define FIELD_CLASS ((uintptr_t)(&MRT_CLASSINFO(Ljava_2Flang_2Freflect_2FField_3B))) +#define CLASS_CLASS ((uintptr_t)(&MRT_CLASSINFO(Ljava_2Flang_2FClass_3B))) +#define GET_OFFSET_REF(base, target) ((intptr_t)((char*)(base) - (char*)(target))) +#endif //USE_32BIT_REF + +#define GET_OFFSET(base, target) ((uintptr_t)((char*)(base) - (char*)(target))) +#define VTAB_OBJECT ((uintptr_t)(&MRT_VTABLE(Ljava_2Flang_2FObject_3B))) +#define GCTIB_OBJECT ((void*)(&MRT_GCTIB(Ljava_2Flang_2FObject_3B))) +#define GCTIB_OBJECT_ARRAY ((void*)(&MRT_GCTIB(_ArrayOfObject))) +#define GCTIB_PRIMITIVE_ARRAY ((void*)(&MRT_GCTIB(_ArrayOfPrimitive))) +#define FIELDS_OBJECT ((FieldMeta*)(&MRT_FIELDS(Ljava_2Flang_2FObject_3B))) +#define FIELDS_SIZE_OBJECT 2 +#define METHODS_OBJECT ((MethodMetadata*)(&MRT_METHODS(Ljava_2Flang_2FObject_3B))) +#define METHODS_SIZE_OBJECT 11 +// compiler will complaints about using ClassMetadata * +#define CLASS_PRIMITIVE(name) ((uintptr_t)(&MRT_PRIMITIVECLASSINFO(name))) +#define CLASS(name) ((uintptr_t)(&MRT_CLASSINFO(name))) + +#ifdef USE_32BIT_REF +// classinfo for primitive-type classinfo itself +#define DEFINE_PRIMITIVE_CLASSINFO_RO(name) \ +static ClassMetadataRO __primitiveclassinforo__##name = { { NAME_STRING(name) }, \ + { 0 }, { 0 },\ + { { 0 } },\ + 0, 0,\ + MODIFIER_PRIM, { 0 }, { 0 } }; + +#define DEFINE_PRIMITIVE_CLASSINFO(name, size) \ +ClassMetadata MRT_EXPORT MRT_PRIMITIVECLASSINFO(name) = { CLASS_CLASS, 0, 0, { size }, \ + FLAG_CLASS_PRIMITIVE, 0, { 0 }, \ + { VTAB_OBJECT }, \ + { GET_OFFSET(GCTIB_OBJECT, &(__pinf_##name.gctib)) }, \ + { (uint32_t)(uintptr_t)&__primitiveclassinforo__##name }, { 0 }, { (uintptr_t)&MRT_PRIMITIVECLASSINFO(name) } }; + +// classinfo for array-of-primitive-type classinfo +#define DEFINE_ARRAY_PRIMCLASSINFO_RO(name, reflectname, elemname) \ +static ClassMetadataRO __primitiveclassinforo__##name = { { NAME_STRING(reflectname) }, \ + { 0 }, { 0 },\ + { { CLASS_PRIMITIVE(elemname) } }, \ + 0, 0,\ + MODIFIER_ARRAY, { 0 }, { 0 } }; + +#define DEFINE_ARRAY_PRIMCLASSINFO(name, elemname, gcTIB, componentSize) \ +ClassMetadata MRT_EXPORT MRT_PRIMITIVECLASSINFO(name) = { CLASS_CLASS, 0, 0, { componentSize }, \ + FLAG_CLASS_ARRAY, 0, { 0 }, \ + { VTAB_OBJECT }, \ + { GET_OFFSET(gcTIB, &(__pinf_##name.gctib)) },\ + { (uint32_t)(uintptr_t)&__primitiveclassinforo__##name }, { 0 }, { (uintptr_t)&MRT_PRIMITIVECLASSINFO(name) } }; + +#define REF_SIZE 4 // 4B for a single reference + +#else //!USE_32BIT_REF + +// classinfo for primitive-type classinfo itself +#define DEFINE_PRIMITIVE_CLASSINFO_RO(name) \ +static ClassMetadataRO __primitiveclassinforo__##name = { { NAME_STRING(name) }, \ + { 0 }, { 0 },\ + { { 0 } },\ + 0, 0, FLAG_CLASS_PRIMITIVE, 0, 0, \ + MODIFIER_PRIM, { 0 }, { 0 } }; + +#define DEFINE_PRIMITIVE_CLASSINFO(name, size) \ +ClassMetadata MRT_EXPORT MRT_PRIMITIVECLASSINFO(name) = { CLASS_CLASS, 0, 0, { size }, { 0 }, \ + { VTAB_OBJECT }, \ + { GET_OFFSET(GCTIB_OBJECT, &(__pinf_##name.gctib)) }, \ + { (uintptr_t)&__primitiveclassinforo__##name }, { (uintptr_t)&MRT_PRIMITIVECLASSINFO(name) } }; + +// classinfo for array-of-primitive-type classinfo +#define DEFINE_ARRAY_PRIMCLASSINFO_RO(name, reflectname, elemname) \ +static ClassMetadataRO __primitiveclassinforo__##name = { { NAME_STRING(reflectname) }, \ + { 0 }, { 0 },\ + { { CLASS_PRIMITIVE(elemname) } }, \ + 0, 0, FLAG_CLASS_ARRAY, 0, 0, \ + MODIFIER_ARRAY, { 0 }, { 0 } }; + +#define DEFINE_ARRAY_PRIMCLASSINFO(name, elemname, gcTIB, componentSize) \ +ClassMetadata MRT_EXPORT MRT_PRIMITIVECLASSINFO(name) = { CLASS_CLASS, 0, 0, { componentSize }, { 0 }, \ + { VTAB_OBJECT }, \ + { GET_OFFSET(gcTIB, &(__pinf_##name.gctib)) },\ + { (uintptr_t)&__primitiveclassinforo__##name }, { (uintptr_t)&MRT_PRIMITIVECLASSINFO(name) } }; + +#define REF_SIZE 8 // 8B for a single reference + +#endif //USE_32BIT_REF + +// up to 3-dim array. higher dimensional array and array of object will be +// generated at runtime. +#define DEFINE_PRIMITIVE_CLASSINFOS(ptype, type1d, type1dname, type2d, type2dname, type3d, type3dname, size) \ +DEFINE_PRIMITIVE_CLASSINFO_RO(ptype) \ +DEFINE_PRIMITIVE_CLASSINFO(ptype, size) \ +DEFINE_ARRAY_PRIMCLASSINFO_RO(type1d, type1dname, ptype) \ +DEFINE_ARRAY_PRIMCLASSINFO(type1d, ptype, GCTIB_PRIMITIVE_ARRAY, size) \ +DEFINE_ARRAY_PRIMCLASSINFO_RO(type2d, type2dname, type1d) \ +DEFINE_ARRAY_PRIMCLASSINFO(type2d, type1d, GCTIB_OBJECT_ARRAY, REF_SIZE) \ +DEFINE_ARRAY_PRIMCLASSINFO_RO(type3d, type3dname, type2d) \ +DEFINE_ARRAY_PRIMCLASSINFO(type3d, type2d, GCTIB_OBJECT_ARRAY, REF_SIZE) \ +static ClassMetadata *__mrt_pclasses_##ptype[4] = { &MRT_PRIMITIVECLASSINFO(ptype), &MRT_PRIMITIVECLASSINFO(type1d),\ + &MRT_PRIMITIVECLASSINFO(type2d), &MRT_PRIMITIVECLASSINFO(type3d) }; + +DEFINE_PRIMITIVE_CLASSINFOS(Z, AZ, [Z, AAZ, [[Z, AAAZ, [[[Z, 1) +DEFINE_PRIMITIVE_CLASSINFOS(B, AB, [B, AAB, [[B, AAAB, [[[B, 1) +DEFINE_PRIMITIVE_CLASSINFOS(S, AS, [S, AAS, [[S, AAAS, [[[S, 2) +DEFINE_PRIMITIVE_CLASSINFOS(C, AC, [C, AAC, [[C, AAAC, [[[C, 2) +DEFINE_PRIMITIVE_CLASSINFOS(I, AI, [I, AAI, [[I, AAAI, [[[I, 4) +DEFINE_PRIMITIVE_CLASSINFOS(F, AF, [F, AAF, [[F, AAAF, [[[F, 4) +DEFINE_PRIMITIVE_CLASSINFOS(D, AD, [D, AAD, [[D, AAAD, [[[D, 8) +DEFINE_PRIMITIVE_CLASSINFOS(J, AJ, [J, AAJ, [[J, AAAJ, [[[J, 8) +DEFINE_PRIMITIVE_CLASSINFOS(V, AV, [V, AAV, [[V, AAAV, [[[V, 0) + +// classinfo for array-of-hotclass + +#ifdef USE_32BIT_REF +#define DEFINE_ARRAY_CLASSINFO_RO(name, reflectname, elemname) \ +ClassMetadataRO __cinfro__##name = { { NAME_STRING(reflectname) }, \ + { 0 }, { 0 }, \ + { { CLASS(elemname) } }, \ + 0, 0, \ + MODIFIER_ARRAY, { 0 }, { 0 } }; + +// Will be registered to boot-class-loader when initialized +#define DEFINE_ARRAY_CLASSINFO(name, componentSize) \ +ClassMetadata MRT_EXPORT MRT_CLASSINFO(name) = { CLASS_CLASS, 0, 0, { componentSize }, \ + FLAG_CLASS_ARRAY, 0, { 0 },\ + { VTAB_OBJECT }, \ + { GET_OFFSET(GCTIB_OBJECT_ARRAY, &(__cinf_##name.gctib)) },\ + { (uint32_t)(uintptr_t)&__cinfro__##name }, { 0 }, { (uintptr_t)&MRT_CLASSINFO(name) } }; + +#else //!USE_32BIT_REF + +#define DEFINE_ARRAY_CLASSINFO_RO(name, reflectname, elemname) \ +ClassMetadataRO __cinfro__##name = { { NAME_STRING(reflectname) }, \ + { 0 }, { 0 }, \ + { { CLASS(elemname) } }, \ + 0, 0, FLAG_CLASS_ARRAY, 0, 0, \ + MODIFIER_ARRAY, { 0 }, { 0 } }; + +// Will be registered to boot-class-loader when initialized +#define DEFINE_ARRAY_CLASSINFO(name, componentSize) \ +ClassMetadata MRT_EXPORT MRT_CLASSINFO(name) = { CLASS_CLASS, 0, 0, { componentSize }, { 0 },\ + { VTAB_OBJECT }, \ + { GET_OFFSET(GCTIB_OBJECT_ARRAY, &(__cinf_##name.gctib)) },\ + { (uintptr_t)&__cinfro__##name }, { (uintptr_t)&MRT_CLASSINFO(name) } }; + +#endif //USE_32BIT_REF + +#define DEFINE_CLASSINFOS(name, reflectname, elemname) \ + DEFINE_ARRAY_CLASSINFO_RO(name, reflectname, elemname) \ + DEFINE_ARRAY_CLASSINFO(name, REF_SIZE) + +DEFINE_CLASSINFOS(ALjava_2Flang_2FObject_3B, [Ljava/lang/Object;, Ljava_2Flang_2FObject_3B) +DEFINE_CLASSINFOS(ALjava_2Flang_2FClass_3B, [Ljava/lang/Class;, Ljava_2Flang_2FClass_3B) +DEFINE_CLASSINFOS(ALjava_2Flang_2FString_3B, [Ljava/lang/String;, Ljava_2Flang_2FString_3B) +DEFINE_CLASSINFOS(ALjava_2Futil_2FFormatter_24Flags_3B, [Ljava/util/Formatter$Flags;, Ljava_2Futil_2FFormatter_24Flags_3B) +DEFINE_CLASSINFOS(ALjava_2Futil_2FHashMap_24Node_3B, [Ljava/util/HashMap$Node;, Ljava_2Futil_2FHashMap_24Node_3B) +DEFINE_CLASSINFOS(ALjava_2Futil_2FFormatter_24FormatString_3B, [Ljava/util/Formatter$FormatString;, Ljava_2Futil_2FFormatter_24FormatString_3B) +DEFINE_CLASSINFOS(ALjava_2Flang_2FCharSequence_3B, [Ljava/lang/CharSequence;, Ljava_2Flang_2FCharSequence_3B) +DEFINE_CLASSINFOS(ALjava_2Flang_2FThreadLocal_24ThreadLocalMap_24Entry_3B, [Ljava/lang/ThreadLocal$ThreadLocalMap$Entry;, Ljava_2Flang_2FThreadLocal_24ThreadLocalMap_24Entry_3B) +DEFINE_CLASSINFOS(ALsun_2Fsecurity_2Futil_2FDerValue_3B, [Lsun/security/util/DerValue;, Lsun_2Fsecurity_2Futil_2FDerValue_3B) +DEFINE_CLASSINFOS(ALsun_2Fsecurity_2Fx509_2FAVA_3B, [Lsun/security/x509/AVA;, Lsun_2Fsecurity_2Fx509_2FAVA_3B) +#ifdef __OPENJDK__ +DEFINE_CLASSINFOS(ALjava_2Futil_2FHashtable_24Entry_3B, [Ljava/util/Hashtable$Entry;, Ljava_2Futil_2FHashtable_24Entry_3B) +#else // libcore +DEFINE_CLASSINFOS(ALjava_2Futil_2FHashtable_24HashtableEntry_3B, [Ljava/util/Hashtable$HashtableEntry;, Ljava_2Futil_2FHashtable_24HashtableEntry_3B) +DEFINE_CLASSINFOS(ALlibcore_2Freflect_2FAnnotationMember_3B, [Llibcore/reflect/AnnotationMember;, + Llibcore_2Freflect_2FAnnotationMember_3B); +#endif // __OPENJDK__ +} diff --git a/src/mrt/maplert/src/mrt_primitive_util.cpp b/src/mrt/maplert/src/mrt_primitive_util.cpp new file mode 100644 index 0000000000..16eff8e864 --- /dev/null +++ b/src/mrt/maplert/src/mrt_primitive_util.cpp @@ -0,0 +1,317 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mrt_primitive_util.h" +#include "mclass_inline.h" +namespace maplert { +MObject *primitiveutil::BoxPrimitiveJint(int32_t value) { + if (!WellKnown::GetMClassIntegerCache()->InitClassIfNeeded()) { + LOG(ERROR) << "fail do clinit, " << "class: " << WellKnown::GetMClassIntegerCache()->GetName() << maple::endl; + } + int32_t low = *reinterpret_cast(WellKnown::GetFieldMetaIntegerCacheLow()->GetStaticAddr()); + int32_t high = *reinterpret_cast(WellKnown::GetFieldMetaIntegerCacheHigh()->GetStaticAddr()); + if (value >= low && value <= high) { + return GetCacheMember(*WellKnown::GetMClassIntegerCache(), *WellKnown::GetFieldMetaIntegerCache(), + static_cast(value + (-low)), false); + } + MObject *ret = MObject::NewObject(*WellKnown::GetMClassInteger()); + ret->Store(WellKnown::GetMFieldIntegerValueOffset(), value, false); + return ret; +} + +char primitiveutil::GetPrimitiveTypeFromBoxType(const MClass &type) { + if (&type == WellKnown::GetMClassInteger()) { + return 'I'; + } else if (&type == WellKnown::GetMClassBoolean()) { + return 'Z'; + } else if (&type == WellKnown::GetMClassByte()) { + return 'B'; + } else if (&type == WellKnown::GetMClassShort()) { + return 'S'; + } else if (&type == WellKnown::GetMClassCharacter()) { + return 'C'; + } else if (&type == WellKnown::GetMClassLong()) { + return 'J'; + } else if (&type == WellKnown::GetMClassFloat()) { + return 'F'; + } else if (&type == WellKnown::GetMClassDouble()) { + return 'D'; + } + return 'N'; +} + +bool primitiveutil::IsBoxObject(const MObject &o, char srcType) { + MClass *klass = o.GetClass(); + switch (srcType) { + case 'Z': + return klass == WellKnown::GetMClassBoolean(); + case 'B': + return klass == WellKnown::GetMClassByte(); + case 'S': + return klass == WellKnown::GetMClassShort(); + case 'C': + return klass == WellKnown::GetMClassCharacter(); + case 'I': + return klass == WellKnown::GetMClassInteger(); + case 'J': + return klass == WellKnown::GetMClassLong(); + case 'F': + return klass == WellKnown::GetMClassFloat(); + case 'D': + return klass == WellKnown::GetMClassDouble(); + default: + return false; + } +} + +MObject *primitiveutil::BoxPrimitive(char srcType, const jvalue &value) { + MObject *retObj = nullptr; + switch (srcType) { + case 'Z': + retObj = BoxPrimitiveJboolean(value.z); + break; + case 'B': + retObj = BoxPrimitiveJbyte(value.b); + break; + case 'C': + retObj = BoxPrimitiveJchar(value.c); + break; + case 'D': + retObj = BoxPrimitiveJdouble(value.d); + break; + case 'F': + retObj = BoxPrimitiveJfloat(value.f); + break; + case 'I': + retObj = BoxPrimitiveJint(value.i); + break; + case 'J': + retObj = BoxPrimitiveJlong(value.j); + break; + case 'S': + retObj = BoxPrimitiveJshort(value.s); + break; + default: ; + } + return retObj; +} + +bool primitiveutil::UnBoxPrimitive(const MObject &elementObj, jvalue &boxedValue) { + boxedValue.j = 0; + MClass *elementObjClass = elementObj.GetClass(); + char type = GetPrimitiveTypeFromBoxType(*elementObjClass); + size_t offset = 0; + switch (type) { + case 'Z': + offset = WellKnown::GetMFieldBooleanValueOffset(); + boxedValue.z = elementObj.Load(offset); + break; + case 'B': + offset = WellKnown::GetMFieldByteValueOffset(); + boxedValue.b = elementObj.Load(offset); + break; + case 'C': + offset = WellKnown::GetMFieldCharacterValueOffset(); + boxedValue.c = elementObj.Load(offset); + break; + case 'D': + offset = WellKnown::GetMFieldDoubleValueOffset(); + boxedValue.d = elementObj.Load(offset); + break; + case 'F': + offset = WellKnown::GetMFieldFloatValueOffset(); + boxedValue.f = elementObj.Load(offset); + break; + case 'I': + offset = WellKnown::GetMFieldIntegerValueOffset(); + boxedValue.i = elementObj.Load(offset); + break; + case 'J': + offset = WellKnown::GetMFieldLongValueOffset(); + boxedValue.j = elementObj.Load(offset); + break; + case 'S': + offset = WellKnown::GetMFieldShortValueOffset(); + boxedValue.s = elementObj.Load(offset); + break; + default: + return false; + } + return true; +} + +bool primitiveutil::ConvertToInt(char srcType, const jvalue &src, jvalue &dst) { + switch (srcType) { + case 'B': + dst.i = src.b; + return true; + case 'C': + dst.i = src.c; + return true; + case 'S': + dst.i = src.s; + return true; + case 'I': + dst.i = src.i; + return true; + default: ; + } + return false; +} + +bool primitiveutil::ConvertToLong(char srcType, const jvalue &src, jvalue &dst) { + switch (srcType) { + case 'B': + dst.j = src.b; + return true; + case 'C': + dst.j = src.c; + return true; + case 'S': + dst.j = src.s; + return true; + case 'I': + dst.j = src.i; + return true; + case 'J': + dst.j = src.j; + return true; + default: ; + } + return false; +} + +bool primitiveutil::ConvertToFloat(char srcType, const jvalue &src, jvalue &dst) { + switch (srcType) { + case 'B': + dst.f = src.b; + return true; + case 'C': + dst.f = src.c; + return true; + case 'S': + dst.f = src.s; + return true; + case 'I': + dst.f = src.i; + return true; + case 'J': + dst.f = src.j; + return true; + case 'F': + dst.f = src.f; + return true; + default: ; + } + return false; +} + +bool primitiveutil::ConvertToDouble(char srcType, const jvalue &src, jvalue &dst) { + switch (srcType) { + case 'B': + dst.d = src.b; + return true; + case 'C': + dst.d = src.c; + return true; + case 'S': + dst.d = src.s; + return true; + case 'I': + dst.d = src.i; + return true; + case 'J': + dst.d = src.j; + return true; + case 'F': + dst.d = src.f; + return true; + case 'D': + dst.d = src.d; + return true; + default: ; + } + return false; +} + +bool primitiveutil::ConvertNarrowToWide(char srcType, char dstType, const jvalue &src, jvalue &dst) { + dst.j = 0; + if (LIKELY(srcType == dstType)) { + dst = src; + return true; + } + switch (dstType) { + case 'Z': + case 'C': + case 'B': + break; + case 'S': + if (srcType == 'B') { + dst.s = src.b; + return true; + } + break; + case 'I': + return ConvertToInt(srcType, src, dst); + case 'J': + return ConvertToLong(srcType, src, dst); + case 'F': + return ConvertToFloat(srcType, src, dst); + case 'D': + return ConvertToDouble(srcType, src, dst); + default: + break; + } + return false; +} + +bool primitiveutil::CanConvertNarrowToWide(char srcType, char dstType) { + if (LIKELY(srcType == dstType)) { + return true; + } + switch (dstType) { + case 'Z': + case 'C': + case 'B': + break; + case 'S': + if (srcType == 'B') { + return true; + } + break; + case 'I': + if (srcType == 'B' || srcType == 'C' || srcType == 'S') { + return true; + } + break; + case 'J': + if (srcType == 'B' || srcType == 'C' || srcType == 'S' || srcType == 'I') { + return true; + } + break; + case 'F': + if (srcType == 'B' || srcType == 'C' || srcType == 'S' || srcType == 'I' || srcType == 'J') { + return true; + } + break; + case 'D': + if (srcType == 'B' || srcType == 'C' || srcType == 'S' || srcType == 'I' || srcType == 'J' || srcType == 'F') { + return true; + } + break; + default:; + } + return false; +} +} // namespace maplert \ No newline at end of file diff --git a/src/mrt/maplert/src/mrt_profile.cpp b/src/mrt/maplert/src/mrt_profile.cpp new file mode 100644 index 0000000000..442bb740c3 --- /dev/null +++ b/src/mrt/maplert/src/mrt_profile.cpp @@ -0,0 +1,596 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mrt_profile.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include "mclass_inline.h" +#include "fieldmeta_inline.h" +#include "methodmeta_inline.h" +#include "profile_type.h" +#include "file_layout.h" +#include "linker_api.h" +#include "utils/time_utils.h" +#include "mrt_string.h" +#include "gc_log.h" +using namespace maple; + +namespace maplert { +namespace { +std::unordered_set hotClassMetaSet; +std::unordered_set hotMethodMetaSet; +std::unordered_set hotFieldMetaSet; +std::unordered_set hotMethodSignatureSet; +std::unordered_map hotRefectionStrInfo; +std::vector strTab; +std::unordered_map str2idx; +const char *kMapleSoPrefix = "libmaple"; +std::atomic runPhase(false); +std::mutex reflectStrMtx; +std::mutex classMetaMtx; +std::mutex fieldMetaMtx; +std::mutex methodMetaMtx; +std::mutex methodSignatureMtx; +std::atomic profileEnable(true); +} + +template using MapleProfile = std::unordered_map>; +using MapleIRProf = std::unordered_map; + +static uint32_t GetorInsertStrInTab(const std::string &str) { + static uint32_t idx = 0; + auto item = str2idx.find(str); + if (item == str2idx.end()) { + uint32_t oldIdx = idx; + str2idx.insert(std::make_pair(str, oldIdx)); + strTab.push_back(str); + idx++; + return oldIdx; + } else { + return item->second; + } +} + +template +static void Split(const std::string &s, char delim, Out result) { + std::stringstream ss; + ss.str(s); + std::string item; + while (std::getline(ss, item, delim)) { + *(result++) = item; + } +} +// system/lib64/libmaplecore-all.so --> core-all +static std::string GetBaseName(const std::string &str) { + size_t pos = str.find(kMapleSoPrefix); + std::string baseName(str); + if (pos != std::string::npos) { + size_t posEnd = str.find_last_of('.'); + baseName = str.substr(pos + strlen(kMapleSoPrefix), posEnd - pos - strlen(kMapleSoPrefix)); + } + if (!LOG_NDEBUG) { + LOG(INFO) << "Base name of " << str << " is " << baseName << maple::endl; + } + return baseName; +} +extern "C" void MRT_EnableMetaProfile() { + profileEnable = true; +} + +extern "C" void MRT_DisableMetaProfile() { + profileEnable = false; +} + +extern "C" void MRT_ClearMetaProfile() { + runPhase = true; +} + +void InsertReflectionString(const char *kHotString) { + if (kHotString == nullptr) { + return; + } + if (profileEnable) { + std::lock_guard lock(reflectStrMtx); + if (runPhase) { + auto item = hotRefectionStrInfo.find(kHotString); + if (item != hotRefectionStrInfo.end()) { + if (item->second == kLayoutBootHot) { + item->second = kLayoutBothHot; + } else { + return ; + } + } else { + hotRefectionStrInfo.insert(std::make_pair(kHotString, kLayoutRunHot)); + } + } else { + hotRefectionStrInfo.insert(std::make_pair(kHotString, kLayoutBootHot)); + } + } +} + +void InsertClassMetadata(const MClass &klass) { + std::lock_guard lock(classMetaMtx); + (void)hotClassMetaSet.insert(&klass); +} + +void InsertMethodMetadata(const MethodMetaBase *kMethod) { + if (kMethod == nullptr) { + return; + } + if (profileEnable) { + MClass *declaringClass = kMethod->GetDeclaringClass(); + std::lock_guard lock(methodMetaMtx); + hotMethodMetaSet.insert(declaringClass); + } +} + +void InsertMethodMetadata(const MClass &cls) { + std::lock_guard lock(methodMetaMtx); + (void)hotMethodMetaSet.insert(&cls); +} + +void InsertFieldMetadata(FieldMeta *fieldMeta) { + if (fieldMeta == nullptr) { + return; + } + MClass *declearingclass = fieldMeta->GetDeclaringclass(); + std::lock_guard lock(fieldMetaMtx); + hotFieldMetaSet.insert(declearingclass); +} + +void InsertFieldMetadata(const MClass &cls) { + std::lock_guard lock(fieldMetaMtx); + (void)hotFieldMetaSet.insert(&cls); +} + +void InsertMethodSignature(const MethodMeta &method) { + std::lock_guard lock(methodSignatureMtx); + (void)hotMethodSignatureSet.insert(&method); +} + +void GenStrData(std::vector &strData) { + for (auto str : strTab) { + strData.insert(strData.end(), str.c_str(), str.c_str() + str.size() + 1); + } +} + +class ProfileWriter { + public: + ProfileWriter(const std::string &path, bool isSystemServer) : path(path), isSystemServer(isSystemServer) {} + ~ProfileWriter() = default; + void WriteToFile(); + private: + std::string path; + bool isSystemServer = false; + uint8_t profileDataNum = 0; + uint32_t profileDataSize = 0; + static constexpr uint64_t nsPerUs = 1000UL; + std::vector profileData; + std::vector profileHeaders; + void RemoveProfile(); + void GenReflectStrMetaData(MapleProfile &metaProfile, + std::unordered_map &data); + void GenMetaData(MapleProfile &metaProfile, std::unordered_set &data); + void GenFuncData(MapleProfile &funcProfile, + std::unordered_map> &data); + void GenMethodSignatureData(MapleProfile &metaProfile, + std::unordered_set &data); + template + void WriteProfileData(MapleProfile &data, ProfileType profileType); + void WriteIRProfData(MapleIRProf &data); + void ProcessMetaData(); + void ProcessFuncProf(); + void ProcessRawProf(); + void InitFileHeader(Header &header); + void Init(); + void ProcessProfData(); // used to transfer the raw profdata to file format + void WriteFormatDataToFile(); +}; + +void ProfileWriter::GenReflectStrMetaData(MapleProfile &metaProfile, + std::unordered_map &data) { + for (auto item : data) { + void *pc = static_cast(const_cast(item.first)); + LinkerMFileInfo *soInfo = LinkerAPI::Instance().GetLinkerMFileInfoByAddress(pc, false); + if (soInfo == nullptr) { + LOG(ERROR) << "find reflect str so failed " << std::hex << pc << std::dec << maple::endl; + continue; + } + // skip system so when save app profile + if (!isSystemServer && !soInfo->BelongsToApp()) { + continue; + } + std::string belongSo = soInfo->name; + std::string baseName = GetBaseName(belongSo); + std::string name = item.first; + uint32_t soIdx = GetorInsertStrInTab(baseName); + uint32_t strIdx = GetorInsertStrInTab(name); + auto metaData = metaProfile.find(soIdx); + if (metaData == metaProfile.end()) { + std::vector metaList { ReflectionStrItem(strIdx, static_cast(item.second)) }; + metaProfile.insert(std::make_pair(soIdx, metaList)); + } else { + (metaData->second).emplace_back(strIdx, item.second); + } + } +} + +// gen meta data from the raw data +void ProfileWriter::GenMetaData(MapleProfile &metaProfile, std::unordered_set &data) { + for (auto item : data) { + LinkerMFileInfo *soInfo = LinkerAPI::Instance().GetLinkerMFileInfoByAddress(item, false); + if (soInfo == nullptr) { + LOG(ERROR) << "find Meta str so failed " << std::hex << item << std::dec << maple::endl; + continue; + } + if (!isSystemServer && !soInfo->BelongsToApp()) { + continue; + } + std::string belongSo = soInfo->name; + std::string baseName = GetBaseName(belongSo); + char *className = item->GetName(); + std::string name = className; + uint32_t soIdx = GetorInsertStrInTab(baseName); + uint32_t strIdx = GetorInsertStrInTab(name); + auto metaData = metaProfile.find(soIdx); + if (metaData == metaProfile.end()) { + std::vector metaList { MetaItem(strIdx) }; + metaProfile.insert(std::make_pair(soIdx, metaList)); + } else { + (metaData->second).emplace_back(strIdx); + } + } +} + +void ProfileWriter::GenFuncData(MapleProfile &funcProfile, + std::unordered_map> &data) { + for (auto item : data) { + auto &rawFunclist = item.second; + std::string baseName = GetBaseName(item.first); + uint32_t soIdx = GetorInsertStrInTab(baseName); + auto &list = funcProfile[soIdx]; + for (auto &funcProfileData : rawFunclist) { + std::vector soNames; + Split(funcProfileData.funcName, '|', std::back_inserter(soNames)); + // funcname ==> className|funcName|signaturename + // so className idx in vector is 0, funcName is 1, signatureName is 2 + uint32_t classIdx = GetorInsertStrInTab(soNames[0]); + uint32_t funcIdx = GetorInsertStrInTab(soNames[1]); + uint32_t sigIdx = GetorInsertStrInTab(soNames[2]); + list.emplace_back(classIdx, funcIdx, sigIdx, funcProfileData.callTimes, funcProfileData.layoutType); + } + } +} + +void ProfileWriter::GenMethodSignatureData(MapleProfile &metaProfile, + std::unordered_set &data) { + for (auto item : data) { + LinkerMFileInfo *metghodSoInfo = LinkerAPI::Instance().GetLinkerMFileInfoByAddress(item, false); + LinkerMFileInfo *sigSoInfo = LinkerAPI::Instance().GetLinkerMFileInfoByAddress(item->GetSignature(), false); + LinkerMFileInfo *soInfo; + if (sigSoInfo == nullptr && metghodSoInfo == nullptr) { + LOG(ERROR) << "find Meta str so failed " << std::hex << item << std::dec << maple::endl; + continue; + } else if (metghodSoInfo == nullptr) { + soInfo = sigSoInfo; + } else { + soInfo = metghodSoInfo; + } + if (!isSystemServer && !soInfo->BelongsToApp()) { + continue; + } + std::string belongSo = soInfo->name; + std::string baseName = GetBaseName(belongSo); + char *methodName = item->GetName(); + char *signatureName = item->GetSignature(); + uint32_t soIdx = GetorInsertStrInTab(baseName); + uint32_t methodIdx = GetorInsertStrInTab(methodName); + uint32_t sigIdx = GetorInsertStrInTab(signatureName); + auto metaData = metaProfile.find(soIdx); + if (metaData == metaProfile.end()) { + std::vector metaList { MethodSignatureItem(methodIdx, sigIdx) }; + metaProfile.insert(std::make_pair(soIdx, metaList)); + } else { + (metaData->second).emplace_back(methodIdx, sigIdx); + } + } +} + +// profiledata are arranged by [header][dataArray][header][dataArray] and so on +template +void ProfileWriter::WriteProfileData(MapleProfile &data, ProfileType profileType) { + uint32_t realNum = 0; + uint32_t lastProfileDataSize = profileDataSize; + for (auto &item : data) { + auto typeTProfdataArray = item.second; + uint32_t num = static_cast(typeTProfdataArray.size()); + // if size overflow, or num is 0 skip this data + if ((num > (UINT32_MAX / sizeof(T))) || num == 0) { + continue; + } + realNum++; + char *str = nullptr; + uint32_t soIdx = item.first; + str = reinterpret_cast(&soIdx); + profileData.insert(profileData.end(), str, str + sizeof(soIdx)); + uint32_t size = num * sizeof(T); + + str = reinterpret_cast(&num); + profileData.insert(profileData.end(), str, str + sizeof(num)); + + str = reinterpret_cast(&size); + profileData.insert(profileData.end(), str, str + sizeof(size)); + + str = reinterpret_cast(typeTProfdataArray.data()); + profileData.insert(profileData.end(), str, str + size); + // update profileDataSize + profileDataSize = profileDataSize + sizeof(num) + sizeof(size) + sizeof(soIdx) + size;; + } + if (realNum != 0) { + profileHeaders.emplace_back(lastProfileDataSize, static_cast(profileType), realNum); + profileDataNum++; + } else { + return ; + } +} + +void ProfileWriter::WriteIRProfData(MapleIRProf &data) { + MapleProfile funcDescTab; + MapleProfile funcCounterTab; + for (auto item : data) { + auto &rawFuncList = item.second; + std::string baseName = GetBaseName(item.first); + uint32_t soIdx = GetorInsertStrInTab(baseName); + auto &descTab = rawFuncList.descTab; + auto &counterTab = rawFuncList.counterTab; + // gen file counter tab + auto &counterList = funcCounterTab[soIdx]; + for (auto &counter : counterTab) { + counterList.emplace_back(counter); + } + + // gen file func desc tab + auto &list = funcDescTab[soIdx]; + for (auto &funcDesc : descTab) { + std::vector soNames; + Split(funcDesc.funcName, '|', std::back_inserter(soNames)); + uint32_t classIdx = GetorInsertStrInTab(soNames[0]); + uint32_t funcIdx = GetorInsertStrInTab(soNames[1]); + uint32_t sigIdx = GetorInsertStrInTab(soNames[2]); + list.emplace_back(funcDesc.hash, classIdx, funcIdx, sigIdx, funcDesc.start, funcDesc.end); + } + VLOG(profiler) << baseName << " counterTab size " << counterList.size() << " descTab size " << + list.size() << "\n"; + } + // because profile desc tab depends on profile counter Tab, so must write counterTab first + WriteProfileData(funcCounterTab, kIRCounter); + WriteProfileData(funcDescTab, kBBInfo); +} + +// process classmeta/fieldmeta/methodmeta/reflectionstr profile data; +void ProfileWriter::ProcessMetaData() { + MapleProfile classMeta; + { + std::lock_guard lock(classMetaMtx); + GenMetaData(classMeta, hotClassMetaSet); + } + WriteProfileData(classMeta, kClassMeta); + + MapleProfile methodMeta; + { + std::lock_guard lock(methodMetaMtx); + GenMetaData(methodMeta, hotMethodMetaSet); + } + WriteProfileData(methodMeta, kMethodMeta); + + MapleProfile fieldMeta; + { + std::lock_guard lock(fieldMetaMtx); + GenMetaData(fieldMeta, hotFieldMetaSet); + } + WriteProfileData(fieldMeta, kFieldMeta); + + // process reflection str profile data; + MapleProfile reflectStrMeta; + { + std::lock_guard lock(reflectStrMtx); + GenReflectStrMetaData(reflectStrMeta, hotRefectionStrInfo); + } + WriteProfileData(reflectStrMeta, kReflectionStr); + + MapleProfile methodSignature; + { + std::lock_guard lock(methodSignatureMtx); + GenMethodSignatureData(methodSignature, hotMethodSignatureSet); + } + WriteProfileData(methodSignature, kMethodSig); +} + +void ProfileWriter::ProcessFuncProf() { + // when get the profile data of metdata and function will case some reflection str recored + // so first disble meta profile + MRT_DisableMetaProfile(); + std::unordered_map> funcProfileRaw; + MapleIRProf funcIRProfRaw; + MapleProfile funcProfile; + LinkerAPI::Instance().DumpAllMplFuncProfile(funcProfileRaw); + LinkerAPI::Instance().DumpAllMplFuncIRProfile(funcIRProfRaw); + MRT_EnableMetaProfile(); + GenFuncData(funcProfile, funcProfileRaw); + WriteProfileData(funcProfile, kFunction); + // write func ir prof + WriteIRProfData(funcIRProfRaw); +} +// some kind prof data save the raw data in profile directly like literal and BB profile +// because raw prof data is readable,and compact +void ProfileWriter::ProcessRawProf() { + // process literal string + std::stringstream literalProfile; + std::string literalContent; + DumpConstStringPool(literalProfile, true); + literalContent = literalProfile.str(); + uint32_t literalContentSize = static_cast(literalContent.size()); + if (literalContentSize != 0) { + profileHeaders.emplace_back(profileDataSize, kLiteral, 0); + profileDataNum++; + profileDataSize += literalContentSize; + (void)profileData.insert(profileData.end(), literalContent.c_str(), literalContent.c_str() + literalContentSize); + } + // process BB profile + std::ostringstream bbProfile; + LinkerAPI::Instance().DumpBBProfileInfo(bbProfile); + std::string bbProfileContent = bbProfile.str(); + uint32_t bbProfileContentSize = static_cast(bbProfileContent.size()); + if (bbProfileContentSize != 0) { + profileHeaders.emplace_back(profileDataSize, kBBInfo, 0); + profileDataNum++; + profileDataSize += bbProfileContentSize; + (void)profileData.insert(profileData.end(), bbProfileContent.c_str(), + bbProfileContent.c_str() + bbProfileContentSize); + } +} + +void ProfileWriter::RemoveProfile() { + if (std::remove(path.c_str())) { + if (errno != ENOENT) { + LOG(ERROR) << "RemoveProfile failed to remove " << path << ", " << strerror(errno); + } + return; + } + LOG(INFO) << "RemoveProfile remove " << path << " successfully"; + return; +} + +void ProfileWriter::Init() { + if (!isSystemServer) { + char *str = nullptr; + std::string packageName = LinkerAPI::Instance().GetAppPackageName(); + uint32_t packageNameIdx = GetorInsertStrInTab(packageName); + profileHeaders.emplace_back(profileDataSize, kFileDesc, 1); + profileDataNum++; + str = reinterpret_cast(&packageNameIdx); + profileData.insert(profileData.end(), str, str + sizeof(packageNameIdx)); + profileDataSize = profileDataSize + sizeof(packageNameIdx); + } +} + +void ProfileWriter::ProcessProfData() { + uint64_t processProfileStart = timeutils::NanoSeconds(); + ProcessMetaData(); + ProcessFuncProf(); + ProcessRawProf(); + uint64_t processProfileEnd = timeutils::NanoSeconds(); + uint64_t processProfileCost = processProfileEnd - processProfileStart; + LOG(INFO) << "Total process Profile time: " << Pretty(processProfileCost / nsPerUs) << "us" << maple::endl; +} + +void ProfileWriter::InitFileHeader(Header &header) { + std::copy_n(kProfileMagic, sizeof(kProfileMagic), header.magic); + std::copy_n(kVer, sizeof(kVer), header.ver); + header.profileNum = profileDataNum; + if (!isSystemServer) { + header.profileFileType = kApp; + } else { + header.profileFileType = kSystemServer; + } + uint32_t headerSize = sizeof(Header) + (profileDataNum - 1) * sizeof(ProfileDataInfo); + // adjust the offset + header.stringTabOff = headerSize + profileDataSize; + header.stringCount = static_cast(strTab.size()); + for (auto &item : profileHeaders) { + item.profileDataOff = item.profileDataOff + headerSize; + } +} + +void ProfileWriter::WriteFormatDataToFile() { + uint64_t processProfileEnd = timeutils::NanoSeconds(); + Header header; + InitFileHeader(header); + bool res = true; + bool removeIfFailed = true; + std::vector strData; + std::ofstream out(path, std::ios::binary | std::ios::trunc); + if (!out) { + if (errno != EACCES) { + LOG(ERROR) << "SaveProfile failed to open " << path << ", " << strerror(errno); + } else { + removeIfFailed = false; + } + res = false; + goto END; + } + // write header + if (!out.write(reinterpret_cast(&header), sizeof(Header) - sizeof(ProfileDataInfo))) { + LOG(ERROR) << "SaveProfile failed to write header for " << path << ", " << strerror(errno); + res = false; + goto END; + } + // write profile date info + if (!out.write(reinterpret_cast(profileHeaders.data()), profileHeaders.size() * sizeof(ProfileDataInfo))) { + LOG(ERROR) << "SaveProfile failed to write ProfileDataInfo for " << path << ", " << strerror(errno); + res = false; + goto END; + } + // write profile date + if (!out.write(reinterpret_cast(profileData.data()), profileData.size())) { + LOG(ERROR) << "SaveProfile failed to write ProfileData for " << path << ", " << strerror(errno); + res = false; + goto END; + } + GenStrData(strData); + // write strTab + if (!out.write(reinterpret_cast(strData.data()), strData.size())) { + LOG(ERROR) << "SaveProfile failed to write strData for " << path << ", " << strerror(errno); + res = false; + goto END; + } + +END: + LOG(INFO) << "SaveProfile res=" << res; + if (!res && removeIfFailed) { + LOG(INFO) << "SaveProfile to remove "; + RemoveProfile(); // In case of more exceptions, from here to re-save the cache. + } + uint64_t saveProfileEnd = timeutils::NanoSeconds(); + uint64_t saveProfileCost = saveProfileEnd - processProfileEnd; + LOG(INFO) << "Total save Profile time: " << Pretty(saveProfileCost / nsPerUs) << "us" << maple::endl; +} + +void ProfileWriter::WriteToFile() { + Init(); + ProcessProfData(); + if (!profileDataNum) { + LOG(INFO) << "dump profile no data for save" << maple::endl; + return ; + } + WriteFormatDataToFile(); +} + +extern "C" void MRT_SaveProfile(const std::string &path, bool isSystemServer) { + ProfileWriter profileWriter(path, isSystemServer); + profileWriter.WriteToFile(); +} + +void MCC_SaveProfile() { + LOG(INFO) << "SaveProfile starting..."; + std::string saveName = "/data/anr/maple_all.prof"; + MRT_SaveProfile(saveName, false); +} +} // namespace maplert diff --git a/src/mrt/maplert/src/mrt_reflection_class.cpp b/src/mrt/maplert/src/mrt_reflection_class.cpp new file mode 100644 index 0000000000..dba6bc5eb7 --- /dev/null +++ b/src/mrt/maplert/src/mrt_reflection_class.cpp @@ -0,0 +1,1593 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mrt_reflection_class.h" +#include +#include +#include "libs.h" +#include "collector/cp_generator.h" +#include "itab_util.h" +#include "utils/name_utils.h" +#include "mrt_class_api.h" +#include "mrt_reflection_api.h" +#include "mclass_inline.h" +#include "mstring_inline.h" +#include "fieldmeta_inline.h" + +using namespace maple; + +namespace maplert { +using namespace annoconstant; + +// This method is only used in cycle pattern load and all patterns are predefined +// Or saved before reboot +bool MRT_ClassSetGctib(jclass cls, char *newBuffer, jint offset) { + MClass *klass = MClass::JniCastNonNull(cls); + GCTibGCInfo *gctibInfo = reinterpret_cast(klass->GetGctib()); + DCHECK(gctibInfo != nullptr) << "Cyclepattern: gctib is null" << maple::endl; + DCHECK(newBuffer != nullptr) << "Cyclepattern: newBuffer is null" << maple::endl; + size_t gctibSize = sizeof(GCTibGCInfo) + (gctibInfo->nBitmapWords * kDWordBytes); + int32_t tempSize = static_cast(gctibSize) + offset; + constexpr int32_t kMaxSize = 64 * 1024 * 1024; // 64M + if (tempSize <= 0 || tempSize > kMaxSize) { + return false; + } + char *newAddress = static_cast(malloc(tempSize)); + if (newAddress == nullptr) { + LOG(ERROR) << "MRT_ClassSetGctib malloc error" << maple::endl; + return false; + } + // set header and cyclepattern + errno_t returnValueOfMemcpyS1 = memcpy_s(newAddress, gctibSize, gctibInfo, gctibSize); + errno_t returnValueOfMemcpyS2 = memcpy_s(newAddress + gctibSize, offset, newBuffer, offset); + if (returnValueOfMemcpyS1 != EOK || returnValueOfMemcpyS2 != EOK) { + free(newAddress); + return false; + } + uint32_t maxRC = 0; + uint32_t minRC = 0; + bool valid = ClassCycleManager::GetRCThreshold(maxRC, minRC, newBuffer); + if (valid == false) { + LOG(ERROR) << "Cyclepattern: invalid min/max rc threshold " << maple::endl; + free(newAddress); + return false; + } + auto newGctib = reinterpret_cast(newAddress); + newGctib->headerProto = SetCycleMaxRC(gctibInfo->headerProto, maxRC) | SetCycleMinRC(gctibInfo->headerProto, minRC) | + maplert::kCyclePatternBit; + LOG2FILE(kLogtypeCycle) << klass->GetName() << " max min: " << maxRC << " " << minRC << std::hex << + " " << newGctib->headerProto << std::dec << std::endl; + if (!ClassCycleManager::CheckValidPattern(klass, newAddress)) { + LOG2FILE(kLogtypeCycle) << "cycle_check: verify fail " << klass->GetName() << std::endl; + free(newAddress); + return false; + } + klass->SetGctib(reinterpret_cast(newAddress)); + if (ClassCycleManager::HasDynamicLoadPattern(klass)) { + free(gctibInfo); + } else { + ClassCycleManager::AddDynamicLoadPattern(klass, true); + } + return true; +} + +// interface to access frequently used class-es +jclass MRT_ReflectGetObjectClass(jobject mObj) { + MObject *obj = MObject::JniCastNonNull(mObj); + return obj->GetClass()->AsJclass(); +} + +jclass MRT_ReflectClassForCharName(const char *className, bool init, jobject classLoader, bool internalName) { + DCHECK(className != nullptr); + std::string descriptor = internalName ? string(className) : nameutils::DotNameToSlash(className); + MClass *klass = MClass::JniCastNonNull(MRT_GetClassByClassLoader(classLoader, descriptor)); + if (init && (klass != nullptr)) { + bool ret = klass->InitClassIfNeeded(); + if (ret) { + LOG(ERROR) << "MRT_TryInitClass return fail" << maple::endl; + } + } + if (klass != nullptr) { + return klass->AsJclass(); + } + return nullptr; +} + +bool MRT_ClassIsSuperClassValid(jclass clazz) { + MClass *cls = MClass::JniCast(clazz); + if (UNLIKELY(cls == nullptr)) { + return false; + } + if (UNLIKELY(cls->IsInterface())) { + return true; + } + if (UNLIKELY(cls->IsPrimitiveClass())) { + return true; + } + if (UNLIKELY(cls->IsArrayClass())) { + return true; + } + + while (cls != WellKnown::GetMClassObject()) { + uint32_t numOfSuperClass = cls->GetNumOfSuperClasses(); + if (UNLIKELY(numOfSuperClass == 0)) { + return true; + } + MClass **superArray = cls->GetSuperClassArray(); + if (UNLIKELY(superArray == nullptr)) { + LOG(WARNING) << "\"" << cls->GetName() << "\"'s superclass array should not be null." << maple::endl; + return false; + } + for (uint32_t i = 0; i < numOfSuperClass; ++i) { + ClassMetadata *tempSuper = reinterpret_cast(superArray[i]); + MClass *super = reinterpret_cast(LinkerAPI::Instance().GetSuperClass(&tempSuper)); + if (UNLIKELY(super == nullptr)) { + LOG(WARNING) << "\"" << cls->GetName() << "\"'s superclass[" << i << "] should not be null." << + maple::endl; + return false; + } + } + cls = superArray[0]; + } + return true; +} + +bool ReflectClassIsDeclaredAnnotationPresent(const MClass &classObj, const MObject *annoObj) { + string annoStr = classObj.GetAnnotation(); + CHECK(annoObj != nullptr); + char *annotationTypeName = annoObj->AsMClass()->GetName(); + if (annoStr.empty() || annotationTypeName == nullptr) { + return false; + } + return AnnotationUtil::GetIsAnnoPresent(annoStr, annotationTypeName, + reinterpret_cast(const_cast(&classObj)), + annoObj->AsMClass(), kClassAnnoPresent); +} + +MObject *ReflectClassGetDeclaredAnnotation(const MClass &classObj, const MClass *annoClass) { + if (annoClass == nullptr) { + MRT_ThrowNewException("java/lang/NullPointerException", nullptr); + return nullptr; + } + if (classObj.IsProxy()) { + return nullptr; + } + string annoStr = classObj.GetAnnotation(); + AnnoParser &annoParser = AnnoParser::ConstructParser(annoStr.c_str(), const_cast(&classObj)); + std::unique_ptr parser(&annoParser); + MObject *ret = parser->AllocAnnoObject(&const_cast(classObj), const_cast(annoClass)); + return ret; +} + +jobject MRT_ReflectClassGetDeclaredAnnotations(jclass cls) { + MClass *classObj = MClass::JniCastNonNull(cls); + string annoStr = classObj->GetAnnotation(); + CacheValueType placeHolder = nullptr; + if (annoStr.empty() || !AnnoParser::HasAnnoMember(annoStr) || + AnnotationUtil::GetCache(kHasNoDeclaredAnno, classObj, placeHolder)) { + MArray *nullArray = MArray::NewObjectArray(0, *WellKnown::GetMClassAAnnotation()); + return nullArray->AsJobject(); + } + VLOG(reflect) << "Enter MRT_ReflectClassGetDeclaredAnnotations, annoStr: " << annoStr << maple::endl; + return AnnotationUtil::GetDeclaredAnnotations(annoStr, classObj)->AsJobject(); +} + +MObject *ReflectClassGetAnnotation(const MClass &klass, const MClass *annotationType) { + DCHECK(annotationType != nullptr) << "ReflectClassGetAnnotation: annotationType nullptr !" << maple::endl; + // NEED: check annotationType for null + MObject *anno = ReflectClassGetDeclaredAnnotation(klass, annotationType); + if (anno != nullptr) { + return anno; + } + + if (AnnotationUtil::HasDeclaredAnnotation(const_cast(annotationType), kAnnotationInherited)) { + // then annotations from super class-chain + MClass *superKlass = klass.GetSuperClass(); + for (; superKlass != nullptr; superKlass = superKlass->GetSuperClass()) { + anno = ReflectClassGetDeclaredAnnotation(*superKlass, annotationType); + if (anno != nullptr) { + return anno; + } + } + } + + return nullptr; +} + +MObject *ReflectClassGetClasses(const MClass &classObj) { + set metaList; + AnnotationUtil::GetDeclaredClasses(const_cast(&classObj), metaList); + MClass *superCl = classObj.GetSuperClass(); + while (superCl != nullptr) { + AnnotationUtil::GetDeclaredClasses(superCl, metaList); + superCl = superCl->GetSuperClass(); + } + auto it = metaList.begin(); + while (it != metaList.end()) { + if (!(*it)->IsPublic()) { + it = metaList.erase(it); + } else { + ++it; + } + } + uint32_t size = static_cast(metaList.size()); + MArray *jarray = MArray::NewObjectArray(size, *WellKnown::GetMClassAClass()); + it = metaList.begin(); + for (uint32_t i = 0; i < size; ++i, ++it) { + jarray->SetObjectElementNoRc(i, *it); + } + return jarray; +} + +jobjectArray MRT_ReflectClassGetDeclaredClasses(jclass cls) { + MClass *classObj = MClass::JniCastNonNull(cls); + set *metalist = reinterpret_cast*>(AnnotationUtil::Get(kDeclaredClasses, classObj)); + + uint32_t arrSize = static_cast(metalist->size()); + MArray *arrayObj = MArray::NewObjectArray(arrSize, *WellKnown::GetMClassAClass()); + auto it = metalist->begin(); + for (uint32_t i = 0; i < arrSize; ++i, ++it) { + arrayObj->SetObjectElementOffHeap(i, *it); + } + return arrayObj->AsJobjectArray(); +} + +MObject *ReflectClassGetInnerClassName(const MClass &classObj) { + string annoStr = classObj.GetAnnotation(); + if (annoStr.empty()) { + return nullptr; + } + VLOG(reflect) << "Enter __MRT_Reflect_Class_getInnerClassName, annoStr: " << annoStr << maple::endl; + AnnoParser &annoParser = AnnoParser::ConstructParser(annoStr.c_str(), const_cast(&classObj)); + std::unique_ptr parser(&annoParser); + int32_t loc = parser->Find(parser->GetInnerClassStr()); + if (loc == kNPos) { + return nullptr; + } + + constexpr int kSteps = 4; // jump to InnerClass value + parser->NextItem(kSteps); + std::string retArr; + if (parser->ParseNum(kValueInt) == kValueNull) { + retArr = "NULL"; + } else { + retArr = parser->ParseStr(kDefParseStrType); + } + MString *res = MString::InternUtf(retArr); + return res; +} + +// the following API get java/lang/reflect/Field Object, Java heap Object +// implement API for java/lang/Class Native +static void ThrowNoSuchFieldException(const MClass &classObj, const MString &fieldName) { + std::ostringstream msg; + std::string temp; + classObj.GetDescriptor(temp); + std::string fieldCharName = fieldName.GetChars(); + msg << "No field " << fieldCharName << " in class " << temp; + MRT_ThrowNewException("java/lang/NoSuchFieldException", msg.str().c_str()); +} + +MObject *ReflectClassGetField(const MClass &classObj, const MString *fieldName) { + if (UNLIKELY(fieldName == nullptr)) { + MRT_ThrowNewException("java/lang/NullPointerException", "name == null"); + return nullptr; + } + + FieldMeta *field = classObj.GetField(fieldName, true); + MField *mField = nullptr; + if (field != nullptr) { + mField = MField::NewMFieldObject(*field); + } else { + ThrowNoSuchFieldException(classObj, *fieldName); + } + return mField; +} + +MObject *ReflectClassGetPublicFieldRecursive(const MClass &classObj, const MString *fieldName) { + if (UNLIKELY(fieldName == nullptr)) { + MRT_ThrowNewException("java/lang/NullPointerException", "name == null"); + return nullptr; + } + + FieldMeta *field = classObj.GetField(fieldName, true); + MField *mField = nullptr; + if (field != nullptr) { + mField = MField::NewMFieldObject(*field); + } + return mField; +} + +MObject *ReflectClassGetDeclaredFields(const MClass &classObj) { + ScopedHandles sHandles; + uint32_t numOfField = classObj.GetNumOfFields(); + FieldMeta *fields = classObj.GetFieldMetas(); + ObjHandle fieldArray(MArray::NewObjectArray(numOfField, *WellKnown::GetMClassAField())); + for (uint32_t i = 0; i < numOfField; ++i) { + FieldMeta *field = fields + i; + MField *mField = MField::NewMFieldObject(*field); + if (UNLIKELY(mField == nullptr)) { + return nullptr; + } + fieldArray->SetObjectElementNoRc(i, mField); + } + return fieldArray.ReturnObj(); +} + +MObject *ReflectClassGetDeclaredField(const MClass &classObj, const MString *fieldName) { + if (UNLIKELY((fieldName) == nullptr)) { + MRT_ThrowNewException("java/lang/NullPointerException", "name == null"); + return nullptr; + } + if (&classObj == WellKnown::GetMClassString() && fieldName->Cmp("value")) { + // We log the error for this specific case, as the user might just swallow the exception. + // This helps diagnose crashes when applications rely on the String#value field being + // there. + // Also print on the error stream to test it through run-test. + std::string message("The String#value field is not present on Android versions >= 6.0"); + LOG(ERROR) << message << maple::endl; + std::cerr << message << std::endl; + } + + FieldMeta *fieldMeta = classObj.GetDeclaredField(fieldName); + if ((fieldMeta != nullptr) && (&classObj == WellKnown::GetMClassClass()) && !fieldMeta->IsStatic()) { + LOG(ERROR) << "The Class instance field is not present on maple" << maple::endl; + fieldMeta = nullptr; + } + if (fieldMeta == nullptr) { + ThrowNoSuchFieldException(classObj, *fieldName); + return nullptr; + } + + MField *mField = MField::NewMFieldObject(*fieldMeta); + return mField; +} + +static void GetDeclaredFields(const MClass &classObj, std::vector &fieldsVector, bool publicOnly) { + uint32_t numOfField = classObj.GetNumOfFields(); + FieldMeta *fields = classObj.GetFieldMetas(); + for (uint32_t i = 0; i < numOfField; ++i) { + FieldMeta *field = fields + i; + if (!publicOnly || field->IsPublic()) { + fieldsVector.push_back(field); + } + } +} + +static MObject *GetFieldsObjectArray(std::vector fieldsVector) { + ScopedHandles sHandles; + uint32_t numOfFields = static_cast(fieldsVector.size()); + uint32_t currentIndex = 0; + ObjHandle fieldArray(MArray::NewObjectArray(numOfFields, *WellKnown::GetMClassAField())); + for (auto fieldMeta : fieldsVector) { + MField *mField = MField::NewMFieldObject(*fieldMeta); + if (UNLIKELY(mField == nullptr)) { + return nullptr; + } + fieldArray->SetObjectElementNoRc(currentIndex++, mField); + } + return fieldArray.ReturnObj(); +} + +MObject *ReflectClassGetDeclaredFieldsUnchecked(const MClass &classObj, bool publicOnly) { + std::vector fieldsVector; + GetDeclaredFields(classObj, fieldsVector, publicOnly); + return GetFieldsObjectArray(fieldsVector); +} + +static void GetPublicFieldsRecursive(const MClass &classObj, std::vector &fieldsVector) { + for (const MClass *super = &classObj; super != nullptr; super = super->GetSuperClass()) { + GetDeclaredFields(*super, fieldsVector, true); + } + // search iftable which has a flattened and uniqued list of interfaces + std::vector interfaceList; + classObj.GetInterfaces(interfaceList); + for (auto itfInfo : interfaceList) { + GetDeclaredFields(*itfInfo, fieldsVector, true); + } +} + +MObject *ReflectClassGetFields(const MClass &classObj) { + std::vector fieldsVector; + GetPublicFieldsRecursive(classObj, fieldsVector); + return GetFieldsObjectArray(fieldsVector); +} + +void ReflectClassGetPublicFieldsRecursive(const MClass &classObj, MObject *listObject) { + std::vector fieldsVector; + GetPublicFieldsRecursive(classObj, fieldsVector); + MClass *collectionsClass = MClass::GetClassFromDescriptor(nullptr, "Ljava/util/ArrayList;"); + MethodMeta *addMethod = collectionsClass->GetMethod("add", "(Ljava/lang/Object;)Z"); + if (UNLIKELY(addMethod == nullptr)) { + return; + } + + ScopedHandles sHandles; + for (auto field : fieldsVector) { + ObjHandle fieldObject(MField::NewMFieldObject(*field)); + if (UNLIKELY(fieldObject() == nullptr)) { + return; + } + + jvalue arg[1]; + arg[0].l = fieldObject.AsJObj(); + bool isSuccess = addMethod->Invoke(listObject, arg); + if (isSuccess == false) { + return; + } + } +} + +jobject MRT_ReflectClassGetDeclaredFields(jclass classObj, jboolean publicOnly) { + MClass *klass = MClass::JniCastNonNull(classObj); + MObject *ret = ReflectClassGetDeclaredFieldsUnchecked(*klass, publicOnly == JNI_TRUE); + return ret->AsJobject(); +} + +MObject *ReflectClassGetPublicDeclaredFields(const MClass &classObj) { + return ReflectClassGetDeclaredFieldsUnchecked(classObj, true); +} + +// the following API get FieldMeta +uint32_t MRT_ReflectClassGetNumofFields(jclass cls) { + MClass *mClassObj = MClass::JniCastNonNull(cls); + return mClassObj->GetNumOfFields(); +} + +jfieldID MRT_ReflectClassGetFieldsPtr(jclass classObj) { + MClass *mClassObj = MClass::JniCastNonNull(classObj); + FieldMeta *fields = mClassObj->GetFieldMetas(); + return fields->AsJfieldID(); +} + +jfieldID MRT_ReflectClassGetIndexField(jfieldID head, int i) { + FieldMeta *fieldMeta = FieldMeta::JniCast(head) + i; + return fieldMeta->AsJfieldID(); +} + +// find fieldMeta recursively in classObj +jfieldID MRT_ReflectGetCharField(jclass classObj, const char *fieldName, const char *fieldType) { + MClass *mClassObj = MClass::JniCastNonNull(classObj); + FieldMeta *retObj = mClassObj->GetField(fieldName, fieldType, false); + return retObj->AsJfieldID(); +} + +jfieldID MRT_ReflectGetStaticCharField(jclass classObj, const char *fieldName) { + MClass *mClassObj = MClass::JniCastNonNull(classObj); + FieldMeta *field = mClassObj->GetField(fieldName, nullptr, false); + if (field != nullptr && field->IsStatic()) { + return field->AsJfieldID(); + } + return nullptr; +} + +// find fieldMeta declared *just* in classObj +MObject *ReflectClassGetDeclaredMethodInternal(const MClass &classObj, const MString *methodName, + const MArray *arrayClass) { + if (UNLIKELY((methodName) == nullptr)) { + MRT_ThrowNewException("java/lang/NullPointerException", "name == null"); + return nullptr; + } + MethodMeta *methodMeta = classObj.GetDeclaredMethod(methodName, arrayClass); + if (methodMeta == nullptr) { + return nullptr; + } + MMethod *methodObject = MMethod::NewMMethodObject(*methodMeta); + return methodObject; +} + +static MethodMeta *FindInterfaceMethod(const MClass &classObj, const MString *methodName, const MArray *arrayClass) { + std::vector interfaceVector; + MethodMeta *resultMethod = nullptr; + classObj.GetInterfaces(interfaceVector); + for (auto interface : interfaceVector) { + MethodMeta *method = interface->GetDeclaredMethod(methodName, arrayClass); + if ((method != nullptr) && (method->IsPublic())) { + if (resultMethod == nullptr) { + resultMethod = method; + } else if (resultMethod->GetDeclaringClass()->IsAssignableFrom(*method->GetDeclaringClass())) { + resultMethod = method; + } + } + } + return resultMethod; +} + +MObject *ReflectClassFindInterfaceMethod(const MClass &classObj, const MString *methodName, const MArray *arrayClass) { + if (UNLIKELY((methodName) == nullptr)) { + MRT_ThrowNewException("java/lang/NullPointerException", "name == null"); + return nullptr; + } + MethodMeta *methodMeta = FindInterfaceMethod(classObj, methodName, arrayClass); + if (methodMeta == nullptr) { + return nullptr; + } + MMethod *methodObject = MMethod::NewMMethodObject(*methodMeta); + return methodObject; +} + +static MethodMeta *GetPublicMethodRecursive(const MClass &classObj, const MString *methodName, + const MArray *arrayClass) { + MethodMeta *method = nullptr; + for (const MClass *superClass = &classObj; superClass != nullptr; superClass = superClass->GetSuperClass()) { + method = superClass->GetDeclaredMethod(methodName, arrayClass); + if ((method != nullptr) && (method->IsPublic())) { + return method; + } + } + method = FindInterfaceMethod(classObj, methodName, arrayClass); + return method; +} + +static void ThrowNoSuchMethodException(const MClass &classObj, const MString *methodName, + const MArray *arrayClass, bool isInit = false) { + std::ostringstream msg; + std::string className; + classObj.GetBinaryName(className); + if (isInit) { + msg << className << "." << "" << " ["; + } else if (methodName != nullptr) { + std::string methodCharName = methodName->GetChars(); + msg << className << "." << methodCharName << " ["; + } + if (arrayClass != nullptr) { + uint32_t len = arrayClass->GetLength(); + std::string name; + for (uint32_t i = 0; i < len; ++i) { + MClass *elementObj = arrayClass->GetObjectElementNoRc(i)->AsMClass(); + name.clear(); + elementObj->GetBinaryName(name); + if (elementObj->IsInterface()) { + msg << "interface "; + } else if (!elementObj->IsPrimitiveClass()) { + msg << "class "; + } + msg << name; + if (i != (len - 1)) { + msg << ", "; + } + } + } + msg << "]"; + MRT_ThrowNewException("java/lang/NoSuchMethodException", msg.str().c_str()); +} + +static MethodMeta *GetMethod(const MClass &classObj, const MString *methodName, + const MArray *arrayClass, bool recursive) { + if (UNLIKELY((methodName) == nullptr)) { + MRT_ThrowNewException("java/lang/NullPointerException", "name == null"); + return nullptr; + } + if (UNLIKELY((arrayClass != nullptr) && arrayClass->HasNullElement())) { + MRT_ThrowNewException("java/lang/NoSuchMethodException", "parameter type is null"); + return nullptr; + } + + MethodMeta *method = recursive ? GetPublicMethodRecursive(classObj, methodName, arrayClass) : + classObj.GetDeclaredMethod(methodName, arrayClass); + if (method != nullptr) { + return method; + } + ThrowNoSuchMethodException(classObj, methodName, arrayClass); + return nullptr; +} + +MObject *ReflectClassGetMethod(const MClass &classObj, const MString *methodName, const MArray *arrayClass) { + MethodMeta *method = GetMethod(classObj, methodName, arrayClass, true); + if (method == nullptr) { + return nullptr; + } + MMethod *methodObject = MMethod::NewMMethodObject(*method); + return methodObject; +} + +MObject *ReflectClassGetDeclaredMethod(const MClass &classObj, const MString *methodName, const MArray *arrayClass) { + MethodMeta *methodMeta = GetMethod(classObj, methodName, arrayClass, false); + if (methodMeta == nullptr) { + return nullptr; + } + MMethod *methodObject = MMethod::NewMMethodObject(*methodMeta); + return methodObject; +} + +MObject *ReflectClassGetDeclaredMethods(const MClass &classObj) { + MObject *methodArray = ReflectClassGetDeclaredMethodsUnchecked(classObj, false); + return methodArray; +} + +MObject *ReflectClassGetMethods(const MClass &classObj) { + std::vector methodsVector; + for (const MClass *superClass = &classObj; superClass != nullptr; superClass = superClass->GetSuperClass()) { + superClass->GetDeclaredMethods(methodsVector, true); + } + std::vector interfaceList; + classObj.GetInterfaces(interfaceList); + for (auto it = interfaceList.begin(); it != interfaceList.end(); ++it) { + MClass *interface = *it; + interface->GetDeclaredMethods(methodsVector, true); + } + + std::vector methodsUnique; + for (auto mth : methodsVector) { + char *methodName0 = mth->GetName(); + char *sigName0 = mth->GetSignature(); + auto itMethod = methodsUnique.begin(); + for (; itMethod != methodsUnique.end(); ++itMethod) { + char *methodName1 = (*itMethod)->GetName(); + char *sigName1 = (*itMethod)->GetSignature(); + if (!strcmp(methodName0, methodName1) && !strcmp(sigName0, sigName1)) { + break; + } + } + if (itMethod == methodsUnique.end()) { + methodsUnique.push_back(mth); + } + } + ScopedHandles sHandles; + uint32_t numOfMethod = static_cast(methodsUnique.size()); + ObjHandle methodArray(MArray::NewObjectArray(numOfMethod, *WellKnown::GetMClassAMethod())); + uint32_t currentIndex = 0; + for (auto methodMeta : methodsUnique) { + MMethod *methodObject = MMethod::NewMMethodObject(*methodMeta); + if (UNLIKELY(methodObject == nullptr)) { + return nullptr; + } + methodArray->SetObjectElementNoRc(currentIndex++, methodObject); + } + return methodArray.ReturnObj(); +} + +void ReflectClassGetPublicMethodsInternal(const MClass &classObj, MObject *listObject) { + std::vector methodsVector; + for (const MClass *superClass = &classObj; superClass != nullptr; superClass = superClass->GetSuperClass()) { + superClass->GetDeclaredMethods(methodsVector, true); + } + std::vector interfaceList; + classObj.GetInterfaces(interfaceList); + for (auto interface : interfaceList) { + interface->GetDeclaredMethods(methodsVector, true); + } + MClass *collectionsClass = MClass::GetClassFromDescriptor(nullptr, "Ljava/util/ArrayList;"); + MethodMeta *addMethod = collectionsClass->GetMethod("add", "(Ljava/lang/Object;)Z"); + if (UNLIKELY(addMethod == nullptr)) { + return; + } + ScopedHandles sHandles; + for (auto m : methodsVector) { + ObjHandle methodObject(MMethod::NewMMethodObject(*m)); + if (UNLIKELY(methodObject() == nullptr)) { + return; + } + jvalue arg[1]; + arg[0].l = methodObject.AsJObj(); + bool isSuccess = addMethod->Invoke(listObject, arg); + if (isSuccess == false) { + return; + } + } +} + +MObject *ReflectClassGetDeclaredMethodsUnchecked(const MClass &classObj, bool publicOnly) { + ScopedHandles sHandles; + std::vector methodsVector; + classObj.GetDeclaredMethods(methodsVector, publicOnly); + uint32_t numOfMethod = static_cast(methodsVector.size()); + ObjHandle methodArray(MArray::NewObjectArray(numOfMethod, *WellKnown::GetMClassAMethod())); + uint32_t currentIndex = 0; + for (auto methodMeta : methodsVector) { + MMethod *methodObject = MMethod::NewMMethodObject(*methodMeta); + if (UNLIKELY(methodObject == nullptr)) { + return nullptr; + } + methodArray->SetObjectElementNoRc(currentIndex++, methodObject); + } + return methodArray.ReturnObj(); +} + +jobject MRT_ReflectClassGetDeclaredMethods(jclass classObj, jboolean publicOnly) { + MClass *klass = MClass::JniCastNonNull(classObj); + MObject *m = ReflectClassGetDeclaredMethodsUnchecked(*klass, publicOnly == JNI_TRUE); + return m->AsJobject(); +} + +MObject *ReflectClassGetInstanceMethod(const MClass &classObj, const MString *methodName, const MArray *arrayClass) { + if (UNLIKELY((methodName) == nullptr)) { + MRT_ThrowNewException("java/lang/NullPointerException", "name == null"); + return nullptr; + } + MethodMeta *method = nullptr; + for (const MClass *superClass = &classObj; superClass != nullptr; superClass = superClass->GetSuperClass()) { + method = superClass->GetDeclaredMethod(methodName, arrayClass); + if ((method != nullptr) && (!method->IsStatic()) && (!method->IsConstructor())) { + MMethod *methodObject = MMethod::NewMMethodObject(*method); + return methodObject; + } + } + method = FindInterfaceMethod(classObj, methodName, arrayClass); + if (method == nullptr) { + return nullptr; + } + MMethod *methodObject = MMethod::NewMMethodObject(*method); + return methodObject; +} + +MObject *ReflectClassGetDeclaredConstructorInternal(const MClass &classObj, const MArray *arrayClass) { + MethodMeta *constructor = classObj.GetDeclaredConstructor(arrayClass); + if (constructor == nullptr) { + return nullptr; + } + MMethod *methodObject = MMethod::NewMMethodObject(*constructor); + return methodObject; +} + +MObject *ReflectClassGetDeclaredConstructor(const MClass &classObj, const MArray *arrayClass) { + if (UNLIKELY((arrayClass != nullptr) && arrayClass->HasNullElement())) { + MRT_ThrowNewException("java/lang/NoSuchMethodException", "parameter type is null"); + return nullptr; + } + + MethodMeta *method = classObj.GetDeclaredConstructor(arrayClass); + if (method != nullptr) { + MMethod *methodObject = MMethod::NewMMethodObject(*method); + return methodObject; + } + + ThrowNoSuchMethodException(classObj, nullptr, arrayClass, true); + return nullptr; +} + +static MObject *GetDeclaredConstructors(const MClass &classObj, bool publicOnly) { + std::vector constructorsVector; + uint32_t numOfMethod = classObj.GetNumOfMethods(); + MethodMeta *methodS = classObj.GetMethodMetas(); + for (uint32_t i = 0; i < numOfMethod; ++i) { + MethodMeta *method = &methodS[i]; + if (method->IsConstructor() && !method->IsStatic()) { + if (!publicOnly || (method->IsPublic())) { + constructorsVector.push_back(method); + } + } + } + ScopedHandles sHandles; + numOfMethod = static_cast(constructorsVector.size()); + ObjHandle constructorArray(MArray::NewObjectArray(numOfMethod, *WellKnown::GetMClassAConstructor())); + uint32_t currentIndex = 0; + for (auto methodMeta : constructorsVector) { + MMethod *constructorObject = MMethod::NewMMethodObject(*methodMeta); + if (UNLIKELY(constructorObject == nullptr)) { + return nullptr; + } + constructorArray->SetObjectElementNoRc(currentIndex++, constructorObject); + } + return constructorArray.ReturnObj(); +} + +MObject *ReflectClassGetDeclaredConstructorsInternal(const MClass &classObj, bool publicOnly) { + MObject *constructorArray = GetDeclaredConstructors(classObj, publicOnly); + return constructorArray; +} + +MObject *ReflectClassGetDeclaredConstructors(const MClass &classObj) { + MObject *constructorArray = GetDeclaredConstructors(classObj, false); + return constructorArray; +} + +jobject MRT_ReflectClassGetDeclaredConstructors(jclass classObj, jboolean publicOnly) { + MClass *mClassObj = MClass::JniCastNonNull(classObj); + MObject *ret = GetDeclaredConstructors(*mClassObj, publicOnly == JNI_TRUE); + return ret->AsJobject(); +} + +MObject *ReflectClassGetConstructor(const MClass &classObj, const MArray *arrayClass) { + if (UNLIKELY((arrayClass != nullptr) && arrayClass->HasNullElement())) { + MRT_ThrowNewException("java/lang/NoSuchMethodException", "parameter type is null"); + return nullptr; + } + + MethodMeta *constructor = classObj.GetDeclaredConstructor(arrayClass); + if ((constructor != nullptr) && (constructor->IsPublic())) { + MMethod *constructorObject = MMethod::NewMMethodObject(*constructor); + return constructorObject; + } + + ThrowNoSuchMethodException(classObj, nullptr, arrayClass, true); + return nullptr; +} + +MObject *ReflectClassGetConstructors(const MClass &classObj) { + MObject *constructorArray = GetDeclaredConstructors(classObj, true); + return constructorArray; +} + +jmethodID MRT_ReflectGetCharMethod(jclass classObj, const char *methodName, const char *signatureName) { + MClass *mClass = MClass::JniCastNonNull(classObj); + MethodMeta *methodMeta = mClass->GetMethod(methodName, signatureName); + return methodMeta->AsJmethodID(); +} + +jmethodID MRT_ReflectGetMethodFromMethodID(jclass clazz, jmethodID methodID, const char *signature ATTR_UNUSED) { + MethodMeta *methodMeta = MethodMeta::JniCastNonNull(methodID); + MClass *mClass = MClass::JniCastNonNull(clazz); + methodMeta = mClass->GetVirtualMethod(*methodMeta); + CHECK(methodMeta != nullptr); + return methodMeta->AsJmethodID(); +} + +jmethodID MRT_ReflectGetStaticCharMethod(jclass classObj, const char *methodName, const char *signatureName) { + MClass *mClass = MClass::JniCastNonNull(classObj); + MethodMeta *methodMeta = mClass->GetMethod(methodName, signatureName); + if (methodMeta != nullptr && (methodMeta->IsStatic())) { + return methodMeta->AsJmethodID(); + } + return nullptr; +} + +jint MRT_ReflectClassGetAccessFlags(jclass classObj) { + MClass *mClass = MClass::JniCastNonNull(classObj); + uint32_t mod = mClass->GetModifier(); + return static_cast(mod & 0x7FFFFFFF); +} + +jclass MRT_ReflectClassGetComponentType(jclass classObj) { + MClass *mClass = MClass::JniCastNonNull(classObj); + MClass *component = mClass->GetComponentClass(); + return component->AsJclass(); +} + +jclass MRT_ReflectClassGetSuperClass(jclass classObj) { + MClass *mClass = MClass::JniCastNonNull(classObj); + MClass *supercls = mClass->GetSuperClass(); + return supercls->AsJclass(); +} + +static MArray *GetInterfacesInternal(const MClass &klass) { + uint32_t numOfInterface = klass.GetNumOfInterface(); + MClass *interfaceVector[numOfInterface]; + klass.GetDirectInterfaces(interfaceVector, numOfInterface); + MArray *interfacesArray = MArray::NewObjectArray(numOfInterface, *WellKnown::GetMClassAClass()); + for (uint32_t index = 0; index < numOfInterface; ++index) { + interfacesArray->SetObjectElementOffHeap(index, interfaceVector[index]); + } + return interfacesArray; +} + +jobjectArray MRT_ReflectClassGetInterfaces(jclass classObj) { + MClass *mClass = MClass::JniCastNonNull(classObj); + MArray *interfacesArray = GetInterfacesInternal(*mClass); + return interfacesArray->AsJobjectArray(); +} + +MObject *ReflectClassGetInterfacesInternal(const MClass &classObj) { + MArray *interfacesArray = nullptr; + if (!classObj.IsArrayClass()) { + uint32_t numOfInterface = classObj.GetNumOfInterface(); + if (numOfInterface != 0) { + interfacesArray = GetInterfacesInternal(classObj); + } + } + return interfacesArray; +} + +jint MRT_ReflectClassGetModifiers(jclass classObj) { + MClass *mClass = MClass::JniCastNonNull(classObj); + const uint32_t kJavaFlagsMask = 0xFFFF; + return mClass->GetModifier() & kJavaFlagsMask; +} + +jstring MRT_ReflectClassGetName(jclass classObj) { + MClass *mClass = MClass::JniCastNonNull(classObj); + MString *res = NewStringUtfFromPoolForClassName(*mClass); + return res->AsJstring(); +} + +MObject *ReflectClassGetSignatureAnnotation(const MClass &classObj) { + MObject *ret = classObj.GetSignatureAnnotation(); + return ret; +} + +MObject *ReflectClassGetEnclosingMethodNative(const MClass &classObj) { + MethodMeta *ret = MethodMeta::Cast(AnnotationUtil::Get(kEnclosingMethod, const_cast(&classObj))); + if ((ret != nullptr) && (!ret->IsConstructor())) { + MMethod *methodObj = MMethod::NewMMethodObject(*ret); + return methodObj; + } + return nullptr; +} + +MObject *ReflectClassGetEnclosingConstructorNative(const MClass &classObj) { + MethodMeta *mthMeta = MethodMeta::Cast(AnnotationUtil::Get(kEnclosingMethod, const_cast(&classObj))); + if ((mthMeta != nullptr) && (mthMeta->IsConstructor())) { + MMethod *methodObj = MMethod::NewMMethodObject(*mthMeta); + return methodObj; + } + return nullptr; +} + +MObject *ReflectClassGetEnclosingClass(const MClass &classObj) { + MObject *result = MObject::Cast(AnnotationUtil::Get(kEnclosingClass, const_cast(&classObj))); + return result; +} + +jclass MRT_ReflectClassGetDeclaringClass(jclass classObj) { + MClass *mClass = MClass::JniCastNonNull(classObj); + if (mClass->IsAnonymousClass() || mClass->IsProxy()) { + VLOG(reflect) << "Enter MRT_ReflectClassGetDeclaringClass, return NULL " << maple::endl; + return nullptr; + } + return MObject::Cast(AnnotationUtil::Get(kDeclaringClass, mClass))->AsJclass(); +} + +bool ReflectClassIsMemberClass(const MClass &classObj) { + char *annotation = classObj.GetRawAnnotation(); + bool isValid = false; + bool result = (annotation != nullptr) ? AnnoParser::IsMemberClass(annotation, isValid) : + AnnoParser::IsMemberClass("", isValid); + if (isValid) { + return result; + } + return MRT_ReflectClassGetDeclaringClass(classObj) != nullptr; +} + + +bool ReflectClassIsLocalClass(const MClass &classObj) { + if (classObj.IsAnonymousClass()) { + return false; + } + bool isValid = modifier::IsLocalClassVaild(classObj.GetModifier()); + if (isValid) { + return modifier::IsLocalClass(classObj.GetModifier()); + } + MObject *enclosingMethod = ReflectClassGetEnclosingMethodNative(classObj); + if (enclosingMethod != nullptr) { + MRT_DecRef(reinterpret_cast(enclosingMethod)); + return true; + } + MObject *enclosingConstructor = ReflectClassGetEnclosingConstructorNative(classObj); + if (enclosingConstructor != nullptr) { + MRT_DecRef(reinterpret_cast(enclosingConstructor)); + return true; + } + return false; +} + +MObject *ReflectClassGetClassLoader(const MClass &classObj) { + jobject classLoader = MRT_GetClassLoader(reinterpret_cast(&const_cast(classObj))); + if (classLoader == nullptr) { + classLoader = MRT_GetBootClassLoader(); + } + if (classLoader != nullptr) { + RC_LOCAL_INC_REF(classLoader); + } + return MObject::JniCast(classLoader); // inc ref by runtime and dec ref by caller +} + +static bool CheckNewInstanceAccess(const MClass &classObj) { + uint32_t mod = classObj.GetModifier(); + uint32_t flag = classObj.GetFlag(); + if (modifier::IsAbstract(mod) || modifier::IsInterface(mod) || + modifier::IsArrayClass(flag) || modifier::IsPrimitiveClass(flag)) { + std::string classPrettyName; + std::ostringstream msg; + classObj.GetPrettyClass(classPrettyName); + msg << classPrettyName << " cannot be instantiated"; + MRT_ThrowNewException("java/lang/InstantiationException", msg.str().c_str()); + return false; + } + // check access the class. + if (!modifier::IsPublic(mod)) { + MClass *callerClass = reflection::GetCallerClass(1); + if (callerClass && !reflection::CanAccess(classObj, *callerClass)) { + std::string callerClassStr, classObjStr, msg; + callerClass->GetPrettyClass(callerClassStr); + classObj.GetPrettyClass(classObjStr); + msg = classObjStr + " is not accessible from " + callerClassStr; + MRT_ThrowNewException("java/lang/IllegalAccessException", msg.c_str()); + return false; + } + } + return true; +} + +MObject *ReflectClassNewInstance(const MClass &classObj) { + if (!CheckNewInstanceAccess(classObj)) { + return nullptr; + } + + if (UNLIKELY(!classObj.InitClassIfNeeded())) { + return nullptr; + } + + // find constructor, an empty argument list + MethodMeta *constructor = classObj.GetDefaultConstructor(); + if (UNLIKELY(constructor == nullptr)) { + std::string classPrettyName; + std::ostringstream msg; + classObj.GetPrettyClass(classPrettyName); + msg << classPrettyName << " has no zero argument constructor"; + MRT_ThrowNewException("java/lang/InstantiationException", msg.str().c_str()); + return nullptr; + } + + // Invoke the string allocator to return an empty string for the string class. + if (classObj.IsStringClass()) { + return MString::NewEmptyStringObject(); + } + + { + ScopedHandles sHandles; + ObjHandle newInstance(MObject::NewObject(classObj)); + // Verify that we can access the constructor. + if (!constructor->IsPublic()) { + MClass *callerClass = nullptr; + if (!reflection::VerifyAccess(newInstance(), &classObj, constructor->GetMod(), callerClass, 1)) { + std::string constructorStr, msg, callerClassStr; + constructor->GetPrettyName(true, constructorStr); + callerClass->GetPrettyClass(callerClassStr); + msg = constructorStr + " is not accessible from " + callerClassStr; + MRT_ThrowNewException("java/lang/IllegalAccessException", msg.c_str()); + return nullptr; + } + } + + // constructor no return value + (void)constructor->InvokeJavaMethodFast(newInstance()); + if (!MRT_HasPendingException()) { + return newInstance.ReturnObj(); + } + } + + if (UNLIKELY(MRT_HasPendingException())) { + MRT_CheckThrowPendingExceptionUnw(); + } + return nullptr; +} + +jboolean MRT_ReflectClassIsPrimitive(jclass classObj) { + MClass *mClassObj = MClass::JniCastNonNull(classObj); + return mClassObj->IsPrimitiveClass(); +} + +jboolean MRT_ReflectClassIsInterface(jclass classObj) { + MClass *mClass = MClass::JniCastNonNull(classObj); + return mClass->IsInterface(); +} + +jboolean MRT_ReflectClassIsArray(jclass classObj) { + MClass *mClass = MClass::JniCastNonNull(classObj); + return mClass->IsArrayClass(); +} + +jboolean MRT_ReflectClassIsAssignableFrom(jclass superClass, jclass subClass) { + MClass *mSuperClass = MClass::JniCastNonNull(superClass); + MClass *mSubClass = MClass::JniCastNonNull(subClass); + return mSuperClass->IsAssignableFrom(*mSubClass); +} + +#if PLATFORM_SDK_VERSION >= 27 +MObject *ReflectClassGetPrimitiveClass(const MClass &classObj __attribute__((unused)), const MString *name) { + if (name == nullptr) { + MRT_ThrowNullPointerExceptionUnw(); + } + std::string className = name->GetChars(); + char hash = className[0] ^ ((className[1] & 0x10) << 1); + switch (hash) { + case 'i': + return WellKnown::GetMClassI(); + case 'f': + return WellKnown::GetMClassF(); + case 'B': + return WellKnown::GetMClassB(); + case 'c': + return WellKnown::GetMClassC(); + case 's': + return WellKnown::GetMClassS(); + case 'l': + return WellKnown::GetMClassJ(); + case 'd': + return WellKnown::GetMClassD(); + case 'b': + return WellKnown::GetMClassZ(); + case 'v': + return WellKnown::GetMClassV(); + default: + MRT_ThrowNewException("java/lang/ClassNotFoundException", nullptr); + } + BUILTIN_UNREACHABLE(); +} +#endif + +jboolean MRT_ReflectIsString(const jclass classObj) { + return MClass::JniCastNonNull(classObj)->IsStringClass(); +} + +jboolean MRT_ReflectIsClass(const jobject obj) { + return MObject::JniCastNonNull(obj)->IsClass() ? JNI_TRUE : JNI_FALSE; +} + +jboolean MRT_ReflectIsInstanceOf(jobject jobj, jclass javaClass) { + MObject *mObj = MObject::JniCastNonNull(jobj); + MClass *mJavaClass = MClass::JniCastNonNull(javaClass); + return mObj->IsInstanceOf(*mJavaClass) ? JNI_TRUE : JNI_FALSE; +} + +jint MRT_ReflectGetObjSize(const jclass classObj) { + return static_cast(MClass::JniCastNonNull(classObj)->GetObjectSize()); +} + +jint MRT_ReflectGetArrayIndexScaleForComponentType(jclass componentClassObj) { + MClass *classObj = MClass::JniCastNonNull(componentClassObj); + return classObj->IsPrimitiveClass() ? classObj->GetObjectSize() : MObject::GetReffieldSize(); +} + +jclass MRT_ReflectGetOrCreateArrayClassObj(jclass elementClass) { + MClass *elementCls = MClass::JniCastNonNull(elementClass); + MClass *arrayClass = maplert::WellKnown::GetCacheArrayClass(*elementCls); + return arrayClass->AsJclass(); +} + +char *MRT_ReflectGetClassCharName(const jclass javaClass) { + return MClass::JniCastNonNull(javaClass)->GetName(); +} + +jobject MRT_ReflectAllocObject(const jclass javaClass, bool isJNI) { + MClass *mClass = MClass::JniCastNonNull(javaClass); + MObject *o = MObject::NewObject(*mClass, isJNI); + return o->AsJobject(); +} + +jobject MRT_ReflectNewObjectA(const jclass javaClass, const jmethodID mid, const jvalue *args, bool isJNI) { + const MethodMeta *constructor = MethodMeta::JniCastNonNull(mid); + const MClass *klass = MClass::JniCastNonNull(javaClass); + MObject *obj = MObject::NewObject(*klass, *constructor, *args, isJNI); + if (obj == nullptr) { + return nullptr; + } + return obj->AsJobject(); +} + +#ifdef DISABLE_MCC_FAST_FUNCS +// used in compiler, be careful if change name +void MCC_Array_Boundary_Check(jobjectArray javaArray, jint index) { + if (javaArray == nullptr) { + MRT_ThrowNullPointerExceptionUnw(); + return; + } + MArray *mArrayObj = MArray::JniCast(javaArray); + int32_t length = static_cast(mArrayObj->GetLength()); + if (index < 0) { + MRT_ThrowArrayIndexOutOfBoundsException(length, index); + return; + } else if (index >= length) { + MRT_ThrowArrayIndexOutOfBoundsException(length, index); + return; + } +} +#endif // DISABLE_MCC_FAST_FUNCS + +void MCC_ThrowCastException(jclass targetClass, jobject castObj) { + std::ostringstream msg; + MClass *mTargetClass = MClass::JniCast(targetClass); + MObject *mCastObj = MObject::JniCast(castObj); + if (mCastObj == nullptr || mTargetClass == nullptr) { + return; + } + std::string targetClassName, objClassName; + mTargetClass->GetTypeName(targetClassName); + mCastObj->GetClass()->GetTypeName(objClassName); + msg << objClassName << " cannot be cast to " << targetClassName; + MRT_ThrowClassCastExceptionUnw(msg.str().c_str()); +} + +static void ReflectThrowCastException(const MClass *sourceInfo, const MObject &targetObject, int dim) { + std::ostringstream msg; + if (sourceInfo != nullptr) { + std::string sourceName, targerName; + sourceInfo->GetTypeName(sourceName); + while (dim--) { + sourceName += "[]"; + } + targetObject.GetClass()->GetTypeName(targerName); + msg << targerName << " cannot be cast to " << sourceName; + } + MRT_ThrowClassCastExceptionUnw(msg.str().c_str()); +} + +void MCC_Reflect_ThrowCastException(const jclass sourceinfo, jobject targetObject, jint dim) { + MObject *mTargetObject = MObject::JniCast(targetObject); + if (mTargetObject == nullptr) { + return; + } + ReflectThrowCastException(MClass::JniCast(sourceinfo), *mTargetObject, dim); +} + +void MCC_Reflect_Check_Arraystore(jobject arrayObject, jobject elemObject){ + if (UNLIKELY(elemObject == nullptr || arrayObject == nullptr)) { + return; + } + MObject *mArrayObject = MObject::JniCast(arrayObject); + MObject *mElemObject = MObject::JniCast(elemObject); + MClass *arryElemInfo = mArrayObject->GetClass()->GetComponentClass(); + if (arryElemInfo == nullptr) { + return; + } + if (!mElemObject->IsInstanceOf(*arryElemInfo)) { + std::string elemName; + mElemObject->GetClass()->GetTypeName(elemName); + MRT_ThrowArrayStoreExceptionUnw(elemName.c_str()); + } +} + +void MCC_Reflect_Check_Casting_Array(jclass sourceClass, jobject targetObject, jint arrayDim) { + if (UNLIKELY(targetObject == nullptr || sourceClass == nullptr)) { + return; + } + MClass *sourceInfo = MClass::JniCast(sourceClass); + MObject *mTargetObject = MObject::JniCast(targetObject); + MClass *targetInfo = mTargetObject->GetClass(); + int dim = arrayDim; + while (arrayDim--) { + uint32_t targetFlag = targetInfo->GetFlag(); + if (!modifier::IsArrayClass(targetFlag)) { + ReflectThrowCastException(sourceInfo, *mTargetObject, dim); + } + targetInfo = targetInfo->GetComponentClass(); + } + DCHECK(targetInfo != nullptr) << "MCC_Reflect_Check_Casting_Array: targetInfo is nullptr!" << maple::endl; + if (!sourceInfo->IsAssignableFrom(*targetInfo)) { + ReflectThrowCastException(sourceInfo, *mTargetObject, dim); + } +} + +void MCC_Reflect_Check_Casting_NoArray(jclass sourceClass, jobject targetObject) { + MClass *sourceInfo = MClass::JniCastNonNull(sourceClass); + MObject *mTargetObject = MObject::JniCastNonNull(targetObject); + + if (!mTargetObject->IsInstanceOf(*sourceInfo)) { + ReflectThrowCastException(sourceInfo, *mTargetObject, 0); + } + return; +} + +// used in compiler, be careful if change name +jboolean MCC_Reflect_IsInstance(jobject sourceClass, jobject targetObject) { + if (sourceClass == nullptr || targetObject == nullptr) { + return JNI_FALSE; + } + MObject *o = MObject::JniCastNonNull(targetObject); + MClass *c = MClass::JniCastNonNull(sourceClass); + return o->IsInstanceOf(*c); +} + +extern "C" +uintptr_t MCC_getFuncPtrFromItabSlow64(const MObject *obj, uintptr_t hashCode, + uintptr_t secondHashCode, const char *signature) { + if (UNLIKELY(obj == nullptr)) { + ThrowNewExceptionInternalTypeUnw(*WellKnown::GetMClassNullPointerException(), "unknown reason"); + return 0; + } + + const MClass *klass = obj->GetClass(); + auto itab = reinterpret_cast(klass->GetItab()); + auto addr = reinterpret_cast(itab + hashCode); + if (*addr != 0) { + return *addr; + } else { + return MCC_getFuncPtrFromItabSecondHash64(itab, secondHashCode, signature); + } +} + +static uintptr_t SearchConflictTable64(const uintptr_t *itabConflictBegin, const char *signature) { + constexpr uint8_t addrOffset = 2; + constexpr uint8_t slotSize = 2; + // search conflict table + const uintptr_t *conflict = + itabConflictBegin + (((*itabConflictBegin) & kLowBitOfItabLength) * slotSize) + addrOffset; +#if defined(__aarch64__) + // Get the high 32bit and set the highest bit zero + uintptr_t conflictLength = (itabConflictBegin[0] & kHighBitOfItabLength) >> 32; +#elif defined(__arm__) + // Get the high 16bit and set the highest bit zero + uintptr_t conflictLength = (itabConflictBegin[0] & kHighBitOfItabLength) >> 16; +#endif + for (uintptr_t index = 0; index < conflictLength * slotSize; index += slotSize) { + if (!strcmp(signature, reinterpret_cast(*(conflict + index)))) { + uintptr_t addr = conflict[index + 1]; + return addr; + } + } + MRT_ThrowNewExceptionUnw("java/lang/AbstractMethodError", signature); + return 0; +} + +extern "C" +uintptr_t MCC_getFuncPtrFromItabSecondHash64(const uintptr_t *itab, uintptr_t hashCode, const char *signature) { + DCHECK(itab != nullptr) << "MCC_getFuncPtrFromItabSecondHash64: itab is nullptr!" << maple::endl; + DCHECK(signature != nullptr) << "MCC_getFuncPtrFromItabSecondHash64: signature is nullptr!" << maple::endl; + constexpr uint8_t itabConflictBeginOffset = 2; + constexpr uint8_t slotSize = 2; + auto itabConflictBegin = reinterpret_cast(*(itab + kItabFirstHashSize)); + auto itabActualBegin = itabConflictBegin + itabConflictBeginOffset; + if (itabConflictBegin == nullptr) { + MRT_ThrowNewExceptionUnw("java/lang/AbstractMethodError", signature); + return 0; + } + + // search normal + uintptr_t low = 0; + // Get the low 32bit + uintptr_t high = itabConflictBegin[0] & kLowBitOfItabLength; + uintptr_t index = 0; + while (low <= high) { + index = (low + high) >> 1; + uintptr_t srchash = itabActualBegin[index << 1]; + if (srchash == hashCode) { + // find + break; + } + if (srchash < hashCode) { + low = index + 1; + } else { + high = index - 1; + } + } + uintptr_t addr = itabActualBegin[index * slotSize + 1]; + if (LIKELY(addr != 1)) { + return addr; + } + return SearchConflictTable64(itabConflictBegin, signature); +} + +#ifndef USE_ARM32_MACRO +#ifdef USE_32BIT_REF +extern "C" +uintptr_t MCC_getFuncPtrFromItabInlineCache(uint64_t *cacheEntryAddr, const MClass *klass, + uint32_t hashCode, uint32_t secondHashCode, const char *signature) { + if (UNLIKELY(cacheEntryAddr == nullptr || klass == nullptr || signature == nullptr)) { + MRT_ThrowNewException("java/lang/NullPointerException", "cacheEntryAddr or klass or signature == null"); + return 0; + } + uint32_t *itab = reinterpret_cast(klass->GetItab()); + uint32_t *addr = itab + hashCode; + uint64_t result = *addr; + if (result == 0) { +#if defined(__arm__) + result = MCC_getFuncPtrFromItabSecondHash32(itab, secondHashCode, signature); +#else // ~__arm__ + result = MCC_getFuncPtrFromItab(itab, secondHashCode, signature); +#endif // ~__arm__ + } + *cacheEntryAddr = (reinterpret_cast(klass)) | (result << leftShift32Bit); + return static_cast(result); +} +#endif // ~USE_32BIT_REF +#endif // ~USE_ARM32_MACRO + +extern "C" +uintptr_t MCC_getFuncPtrFromItabSlow32(const MObject *obj, uint32_t hashCode, uint32_t secondHashCode, + const char *signature) { + if (UNLIKELY(obj == nullptr)) { + ThrowNewExceptionInternalTypeUnw(*WellKnown::GetMClassNullPointerException(), "unknown reason"); + return 0; + } + + const MClass *klass = obj->GetClass(); + uint32_t *itab = reinterpret_cast(klass->GetItab()); + uint32_t *addr = itab + hashCode; + if (*addr != 0) { + return *addr; + } else { + return MCC_getFuncPtrFromItabSecondHash32(itab, secondHashCode, signature); + } +} + +static uintptr_t SearchConflictTable32(const uint32_t *itabConflictBegin, const char *signature) { + constexpr uint8_t addrOffset = 2; + constexpr uint8_t slotSize = 2; + // search conflict table + uint32_t conflictIndex = ((*itabConflictBegin) & static_cast(kLowBitOfItabLength)) * slotSize + addrOffset; + auto conflict = itabConflictBegin + conflictIndex; + uint32_t conflictLength = 0; + // This check is for compatible the old version + if (itabConflictBegin[0] & 0x80000000) { + conflictLength = (itabConflictBegin[0] & kHighBitOfItabLength) >> 16; // high 16 bit is conflictLength + } else { + // The max value + conflictLength = 0x7fffffff; + } + for (uint32_t index = 0; index < conflictLength * slotSize; index += slotSize) { + if (!strcmp(signature, reinterpret_cast(*(conflict + index)))) { + return *(conflict + index + 1); + } + } + MRT_ThrowNewExceptionUnw("java/lang/AbstractMethodError", signature); + return 0; +} + +extern "C" +uintptr_t MCC_getFuncPtrFromItabSecondHash32(const uint32_t *itab, uint32_t hashCode, const char *signature) { + DCHECK(itab != nullptr) << "MCC_getFuncPtrFromItabSecondHash32: itab is nullptr!" << maple::endl; + DCHECK(signature != nullptr) << "MCC_getFuncPtrFromItabSecondHash32: signature is nullptr!" << maple::endl; + constexpr uint8_t itabConflictBeginOffset = 2; + constexpr uint8_t slotSize = 2; + uint32_t *itabConflictBegin = reinterpret_cast(*(itab + kItabFirstHashSize)); + uint32_t *itabActualBegin = itabConflictBegin + itabConflictBeginOffset; + if (itabConflictBegin == nullptr) { + MRT_ThrowNewExceptionUnw("java/lang/AbstractMethodError", signature); + return 0; + } + // search normal + uint32_t low = 0; + uint32_t high = itabConflictBegin[0] & kLowBitOfItabLength; + uint32_t index = 0; + while (low <= high) { + index = (low + high) >> 1; + uint32_t srchash = itabActualBegin[index << 1]; + if (srchash == hashCode) { + break; + } + (srchash < hashCode) ? (low = index + 1) : (high = index - 1); + } + uint32_t actualIndex = index * slotSize + 1; + uint32_t retFunc = *(itabActualBegin + actualIndex); + if (LIKELY(retFunc != 1)) { + return retFunc; + } + return SearchConflictTable32(itabConflictBegin, signature); +} + +#if defined(__arm__) +extern "C" uintptr_t MCC_getFuncPtrFromItab(const uint32_t *itab, const char *signature, uint32_t hashCode) { + return MCC_getFuncPtrFromItabSecondHash32(itab, hashCode, signature); +} +#endif + +template +static uintptr_t GetFuncPtrFromVtab(const MObject *obj, uint32_t offset) { + if (obj == nullptr) { + MRT_ThrowNullPointerExceptionUnw(); + return 0; + } + + const MClass *klass = obj->GetClass(); + T *vtab = reinterpret_cast(klass->GetVtab()); + T *funcAddr = vtab + offset; + return *funcAddr; +} + +extern "C" uintptr_t MCC_getFuncPtrFromVtab64(const MObject *obj, uint32_t offset) { + return GetFuncPtrFromVtab(obj, offset); +} + +extern "C" uintptr_t MCC_getFuncPtrFromVtab32(const MObject *obj, uint32_t offset) { + return GetFuncPtrFromVtab(obj, offset); +} + +jboolean MRT_ReflectIsInit(const jclass classObj) { + return (MRT_ClassInitialized(classObj) ? JNI_TRUE : JNI_FALSE); +} + +jboolean MRT_ReflectInitialized(jclass classObj) { + MClass *classInfo = MClass::JniCastNonNull(classObj); + if (classInfo == nullptr) { + LOG(ERROR) << "NULL class object!" << maple::endl; + return JNI_FALSE; + } + ClassInitState state = MRT_TryInitClass(*classInfo); + if ((state == kClassUninitialized) || (state == kClassInitFailed)) { + LOG(ERROR) << "MRT_ReflectInitialized failed! class: " << classInfo->GetName() << maple::endl; + return JNI_FALSE; + } + return JNI_TRUE; +} + +bool MRT_IsValidOffHeapObject(jobject obj __MRT_UNUSED) { +#if __MRT_DEBUG + MObject *mObj = MObject::JniCastNonNull(obj); + MClass *clazz = mObj->GetClass(); + // NEED: using interface from perm-space allocator +#ifdef __ANDROID__ + return ((clazz == WellKnown::GetMClassString()) || + (MRT_IsMetaObject(obj)) || + (MRT_IsPermJavaObj(mObj->AsUintptr()))); +#else // !__ANDROID__ + return ((MRT_IsMetaObject(obj)) || + (clazz == WellKnown::GetMClassString()) || + (clazz == WellKnown::GetMClassAI()) || + (MRT_IsPermJavaObj(mObj->AsUintptr()))); // used by "native_binding_utils.cpp" +#endif // __ANDROID__ +#else + return true; +#endif // __MRT_DEBUG +} + +bool MRT_IsMetaObject(jobject obj) { + MObject *mObj = MObject::JniCastNonNull(obj); + return mObj->GetClass() == WellKnown::GetMClassClass(); +} + +bool MRT_IsValidClass(jclass jclazz) { + MClass *mClassObj = MClass::JniCastNonNull(jclazz); + return mClassObj->GetClass() == WellKnown::GetMClassClass(); +} + +bool MRT_IsValidMethod(jmethodID jmid) { + MethodMeta *methodMeta = MethodMeta::JniCastNonNull(jmid); + MClass *declaringClass = methodMeta->GetDeclaringClass(); + return declaringClass->GetClass() == WellKnown::GetMClassClass(); +} + +bool MRT_IsValidField(jfieldID jfid) { + FieldMeta *fieldMeta = FieldMeta::JniCastNonNull(jfid); + MClass *declaringClass = fieldMeta->GetDeclaringclass(); + return declaringClass->GetClass() == WellKnown::GetMClassClass(); +} + +// guard some macro +#if defined(__aarch64__) +GUARD_OFFSETOF_MEMBER(ClassMetadata, monitor, kLockWordOffset); +#elif defined(__arm__) +#endif + +static std::map stringToInt; +void ArrayMapStringIntInit () { +} + +void MCC_ArrayMap_String_Int_put(jstring key, jint value) { + MString *input = MString::JniCast(key); + unsigned long size = stringToInt.size(); + size_t objSize = MRT_GetStringObjectSize(key); + MString *localJstring = MObject::Cast(calloc(sizeof(char), objSize + 1)); + if (localJstring != nullptr) { + if (memcpy_s(reinterpret_cast(localJstring), objSize, + reinterpret_cast(input), objSize) != EOK) { + LOG(ERROR) << "copy string" << maple::endl; + } + stringToInt[input] = value; + if (size == stringToInt.size()) { + free(localJstring); + } + } +} + +jint MCC_ArrayMap_String_Int_size() { + return static_cast(stringToInt.size()); +} + +jint MCC_ArrayMap_String_Int_getOrDefault(jstring key, jint defaultValue) { + MString *input = MString::JniCast(key); + auto it = stringToInt.find(input); + if (it != stringToInt.end()) { + return it->second; + } else { + return defaultValue; + } +} + +void MCC_ArrayMap_String_Int_clear() { + std::vector tmpVector; + for (auto it = stringToInt.begin(); it != stringToInt.end(); ++it) { + tmpVector.push_back(it->first); + } + stringToInt.clear(); + for (auto it = tmpVector.begin(); it != tmpVector.end(); ++it) { + free(*it); + *it = nullptr; + } +} + +size_t MRT_ReflectClassGetComponentSize(jclass classObj) { + MClass *mClassObj = MClass::JniCastNonNull(classObj); + return mClassObj->GetComponentSize(); +} +} // namespace maplert diff --git a/src/mrt/maplert/src/mrt_reflection_constructor.cpp b/src/mrt/maplert/src/mrt_reflection_constructor.cpp new file mode 100644 index 0000000000..88d7e51169 --- /dev/null +++ b/src/mrt/maplert/src/mrt_reflection_constructor.cpp @@ -0,0 +1,95 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mrt_reflection_constructor.h" +#include "mrt_reflection_method.h" +#include "methodmeta_inline.h" +#include "mmethod_inline.h" +#include "exception/mrt_exception.h" +namespace maplert { +jobject MRT_ReflectConstructorNewInstanceFromSerialization(jclass ctorClass, const jclass allocClass) { + MClass *ctorMClass = MClass::JniCastNonNull(ctorClass); + MethodMeta *ctor = ctorMClass->GetDeclaredConstructor("()V"); + if (ctor == nullptr) { + return nullptr; + } + MObject *newObject = MObject::NewObject(*MClass::JniCastNonNull(allocClass), ctor); + return newObject->AsJobject(); +} + +static MObject *ConstructorNewInstance(const MMethod &methodObject, const MClass &classObj, + const MethodMeta &methodMeta, const MArray *javaArgs, uint8_t numFrames) { + ScopedHandles sHandles; + if (classObj.IsStringClass()) { + MethodMeta *stringFactroyCon = WellKnown::GetStringFactoryConstructor(methodMeta); + ObjHandle stringFactroyConObject(MMethod::NewMMethodObject(*stringFactroyCon)); + bool isAccessible = methodObject.IsAccessible(); + stringFactroyConObject->SetAccessible(isAccessible); + MObject *stringObj = ReflectInvokeJavaMethodFromArrayArgsJobject(nullptr, + *stringFactroyConObject(), javaArgs, numFrames); + return stringObj; + } + ObjHandle o(MObject::NewObject(classObj)); + ReflectInvokeJavaMethodFromArrayArgsVoid(o(), methodObject, javaArgs, numFrames); + if (UNLIKELY(MRT_HasPendingException())) { + return nullptr; + } + return o.ReturnObj(); +} + +static MObject *ConstructorNewInstance0(const MMethod &methodObject, const MArray *argsArray) { + MethodMeta *methodMeta = methodObject.GetMethodMeta(); + MClass *classObj = methodMeta->GetDeclaringClass(); + if (UNLIKELY(classObj->IsAbstract())) { + std::string classPrettyName, str; + std::ostringstream msg; + classObj->GetPrettyClass(classPrettyName); + str = (classObj->IsInterface()) ? "interface " : "abstract class "; + msg << "Can't instantiate " << str << classPrettyName; + MRT_ThrowNewException("java/lang/InstantiationException", msg.str().c_str()); + return nullptr; + } + + // Verify that we can access the class. + if (!methodObject.IsAccessible() && !classObj->IsPublic()) { + MClass *callerClass = reflection::GetCallerClass(2); // 2 means unwind step + if (callerClass && !reflection::CanAccess(*classObj, *callerClass)) { + std::string classPrettyName; + classObj->GetPrettyClass(classPrettyName); + if (classPrettyName == "java.lang.Class") { + LOG(WARNING) << "The dalvik.system.DexPathList$Element constructor is not accessible by " + "default. This is a temporary workaround for backwards compatibility " + "with class-loader hacks. Please update your application." << maple::endl; + } else { + std::string classObjStr; + std::string callerStr; + std::string msg; + classObj->GetPrettyClass(classObjStr); + callerClass->GetPrettyClass(callerStr); + msg = classObjStr + " is not accessible from " + callerStr; + MRT_ThrowNewException("java/lang/IllegalAccessException", msg.c_str()); + return nullptr; + } + } + } + // NewInstance 2 java frame + return ConstructorNewInstance(methodObject, *classObj, *methodMeta, argsArray, 2); +} + +jobject MRT_ReflectConstructorNewInstance0(jobject javaMethod, jobjectArray javaArgs) { + MMethod *methodObject = MMethod::JniCastNonNull(javaMethod); + MArray *argsArray = MArray::JniCast(javaArgs); + return ConstructorNewInstance0(*methodObject, argsArray)->AsJobject(); +} +} // namespace maplert diff --git a/src/mrt/maplert/src/mrt_reflection_executable.cpp b/src/mrt/maplert/src/mrt_reflection_executable.cpp new file mode 100644 index 0000000000..b4b1e206a0 --- /dev/null +++ b/src/mrt/maplert/src/mrt_reflection_executable.cpp @@ -0,0 +1,193 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mrt_reflection_executable.h" +#include "mrt_reflection_method.h" +#include "mmethod_inline.h" +#include "mstring_inline.h" +#include "exception/mrt_exception.h" +#include "mrt_classloader_api.h" + +namespace maplert { +jobject MRT_ReflectExecutableGetAnnotationNative(jobject methodObj, jclass annoClass) { + MMethod *method = MMethod::JniCastNonNull(methodObj); + MethodMeta *methodMeta = method->GetMethodMeta(); + string annoStr = methodMeta->GetAnnotation(); + if (annoStr.empty()) { + return nullptr; + } + annoStr = AnnoParser::RemoveParameterAnnoInfo(annoStr); + AnnoParser &annoParser = AnnoParser::ConstructParser(annoStr.c_str(), methodMeta->GetDeclaringClass()); + std::unique_ptr parser(&annoParser); + MObject *ret = parser->AllocAnnoObject(methodMeta->GetDeclaringClass(), MClass::JniCast(annoClass)); + return ret->AsJobject(); +} + +jobjectArray MRT_ReflectExecutableGetDeclaredAnnotationsNative(jobject methodObj) { + MethodMeta *methodMeta = MMethod::JniCastNonNull(methodObj)->GetMethodMeta(); + string annoStr = methodMeta->GetAnnotation(); + if (annoStr.empty()) { + return MArray::NewObjectArray(0, *WellKnown::GetMClassAAnnotation())->AsJobjectArray(); + } + MClass *declCls = methodMeta->GetDeclaringClass(); + AnnoParser &annoParser = AnnoParser::ConstructParser(annoStr.c_str(), declCls); + std::unique_ptr parser(&annoParser); + uint32_t annoNum = static_cast(parser->ParseNum(annoconstant::kValueInt)); + uint32_t annoMemberCntArray[annoNum]; + parser->InitAnnoMemberCntArray(annoMemberCntArray, annoNum); + ScopedHandles sHandles; + ObjHandle prepareAnnotations(MArray::NewObjectArray(annoNum, *WellKnown::GetMClassAAnnotation())); + uint32_t realCount = 0; + for (uint32_t j = 0; j < annoNum; ++j) { + string retArr = parser->ParseStr(annoconstant::kDefParseStrType); + if (parser->ExceptAnnotationJudge(retArr) || parser->IsVerificationAnno(retArr)) { + parser->SkipAnnoMember(annoMemberCntArray[j]); + continue; + } + + MClass *annotationInfo = MClass::JniCast(MRT_GetClassByContextClass(*declCls, retArr)); + if (annotationInfo == nullptr) { + MArray *nullArray = MArray::NewObjectArray(0, *WellKnown::GetMClassAAnnotation()); + return nullArray->AsJobjectArray(); + } +#ifdef __OPENJDK__ + ObjHandle hashMapInst( + parser->GenerateMemberValueHashMap(declCls, annotationInfo, annoMemberCntArray[j])); + ObjHandle proxyInstance( + parser->InvokeAnnotationParser(hashMapInst.AsObject(), annotationInfo)); +#else + ObjHandle proxyInstance( + parser->GenerateAnnotationProxyInstance(declCls, annotationInfo, annoMemberCntArray[j])); +#endif + prepareAnnotations->SetObjectElement(realCount, proxyInstance.AsObject()); + realCount++; + } + if (annoNum != realCount) { + MArray *retAnnotations = MArray::NewObjectArray(realCount, *WellKnown::GetMClassAAnnotation()); + for (uint32_t i = 0; i < realCount; ++i) { + MObject *obj = prepareAnnotations->GetObjectElementNoRc(i); + retAnnotations->SetObjectElement(i, obj); + } + return retAnnotations->AsJobjectArray(); + } + return static_cast(prepareAnnotations.ReturnObj())->AsJobjectArray(); +} + +jobject MRT_ReflectExecutableGetSignatureAnnotation(jobject methodObj) { + MMethod *method = MMethod::JniCastNonNull(methodObj); + MethodMeta *methodMeta = method->GetMethodMeta(); + return methodMeta->GetSignatureAnnotation()->AsJobject(); +} + +jint MRT_ReflectExecutableCompareMethodParametersInternal(jobject methodObj1, jobject methodObj2) { + MMethod *method1 = MMethod::JniCastNonNull(methodObj1); + MethodMeta *methodMeta1 = method1->GetMethodMeta(); + MMethod *method2 = MMethod::JniCastNonNull(methodObj2); + MethodMeta *methodMeta2 = method2->GetMethodMeta(); + uint32_t thisSize = methodMeta1->GetParameterCount(); + uint32_t otherSize = methodMeta2->GetParameterCount(); + if (thisSize != otherSize) { + return (thisSize - otherSize); + } + char *signature1 = methodMeta1->GetSignature(); + char *signature2 = methodMeta2->GetSignature(); + uint32_t signature1Len = 0; + uint32_t signature2Len = 0; + while ((signature1[signature1Len] != '\0') && (signature1[signature1Len++] != ')')) {} + while ((signature2[signature2Len] != '\0') && (signature2[signature2Len++] != ')')) {} + uint32_t sigLenMin = (signature1Len > signature2Len) ? signature2Len : signature1Len; + int cmp = strncmp(signature1, signature2, sigLenMin); + if (cmp != 0) { + return (cmp < 0) ? -1 : 1; + } + return 0; +} + +jboolean MRT_ReflectExecutableIsAnnotationPresentNative(jobject methodObj, jclass annoObj) { + MMethod *method = MMethod::JniCastNonNull(methodObj); + MethodMeta *methodMeta = method->GetMethodMeta(); + string annoStr = methodMeta->GetAnnotation(); + if (annoStr.empty() || annoObj == nullptr) { + return JNI_FALSE; + } + + char *annotationTypeName = MClass::JniCast(annoObj)->GetName(); + return AnnotationUtil::GetIsAnnoPresent(annoStr, annotationTypeName, reinterpret_cast(methodMeta), + MClass::JniCast(annoObj), annoconstant::kMethodAnnoPresent); +} + +jobject MRT_ReflectExecutableGetParameterAnnotationsNative(jobject methodObj) { + MMethod *method = MMethod::JniCastNonNull(methodObj); + MethodMeta *methodMeta = method->GetMethodMeta(); + string executableAnnoStr = methodMeta->GetAnnotation(); + if (executableAnnoStr.empty()) { + return nullptr; + } + string annoStr = AnnoParser::GetParameterAnnotationInfo(executableAnnoStr); + AnnoParser &annoParser = AnnoParser::ConstructParser(annoStr.c_str(), methodMeta->GetDeclaringClass()); + unique_ptr parser(&annoParser); + return parser->GetParameterAnnotationsNative(methodMeta)->AsJobject(); +} + +jobject MRT_ReflectExecutableGetParameters0(jobject methodObj) { + MMethod *method = MMethod::JniCastNonNull(methodObj); + MethodMeta *methodMeta = method->GetMethodMeta(); + string annotationStr = methodMeta->GetAnnotation(); + + AnnoParser &parser = AnnoParser::ConstructParser(annotationStr.c_str(), method->GetDeclaringClass()); + std::unique_ptr uniqueParser(&parser); + return uniqueParser->GetParameters0(method)->AsJobject(); +} + +jobjectArray MRT_ReflectExecutableGetParameterTypesInternal(jobject methodObj) { + MMethod *method = MMethod::JniCastNonNull(methodObj); + MethodMeta *methodMeta = method->GetMethodMeta(); + std::vector parameterTypes; + methodMeta->GetParameterTypes(parameterTypes); + if (UNLIKELY(MRT_HasPendingException())) { + return nullptr; + } + uint32_t size = static_cast(parameterTypes.size()); + if (size == 0) { + return nullptr; + } + MArray *parameterTypesArray = MArray::NewObjectArray(size, *WellKnown::GetMClassAClass()); + uint32_t currentIndex = 0; + for (auto it = parameterTypes.begin(); it != parameterTypes.end(); ++it) { + parameterTypesArray->SetObjectElementNoRc(currentIndex++, *it); + } + return parameterTypesArray->AsJobjectArray(); +} + +jint MRT_ReflectExecutableGetParameterCountInternal(jobject methodObj) { + MMethod *method = MMethod::JniCastNonNull(methodObj); + MethodMeta *methodMeta = method->GetMethodMeta(); + return static_cast(methodMeta->GetParameterCount()); +} + +jclass MRT_ReflectExecutableGetMethodReturnTypeInternal(jobject methodObj) { + MMethod *method = MMethod::JniCastNonNull(methodObj); + MethodMeta *methodMeta = method->GetMethodMeta(); + MClass *rtType = methodMeta->GetReturnType(); + if (UNLIKELY(rtType == nullptr)) { + CHECK(MRT_HasPendingException()) << "must pending exception." << maple::endl; + } + return rtType->AsJclass(); +} + +jstring MRT_ReflectExecutableGetMethodNameInternal(jobject methodObj) { + MMethod *method = MMethod::JniCastNonNull(methodObj); + return ReflectMethodGetName(*method)->AsJstring(); +} +} // namespace maplert diff --git a/src/mrt/maplert/src/mrt_reflection_field.cpp b/src/mrt/maplert/src/mrt_reflection_field.cpp new file mode 100644 index 0000000000..7ba7ed3523 --- /dev/null +++ b/src/mrt/maplert/src/mrt_reflection_field.cpp @@ -0,0 +1,644 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mrt_reflection_field.h" +#include "fieldmeta_inline.h" +#include "mfield_inline.h" +#include "exception/mrt_exception.h" +#include "mrt_reflection_api.h" +namespace maplert { +static inline char GetFieldPrimitiveType(const MField &field) { + MClass *fieldType = field.GetType(); + return primitiveutil::GetPrimitiveType(*fieldType); +} + +template +static void ThrowIllegalAccessException(const MField &field, const FieldMeta &fieldMeta, const MClass *callingClass) { + uint32_t modifier = fieldMeta.GetMod(); + MClass *declaringClass = field.GetDeclaringClass(); + std::ostringstream msg; + if (isSetFinal) { + std::string modifierStr, className; + modifier::JavaAccessFlagsToString(modifier, modifierStr); + std::string fieldName = fieldMeta.GetFullName(declaringClass, true); + declaringClass->GetTypeName(className); + msg << "Cannot set " << modifierStr << " field " << fieldName << " of class " << className; + } else { + std::string callingClassStr, fieldStr, modifierStr, declearClassStr, fieldstr; + callingClass->GetPrettyClass(callingClassStr); + modifier::JavaAccessFlagsToString(modifier, modifierStr); + declaringClass->GetPrettyClass(declearClassStr); + fieldstr = fieldMeta.GetFullName(declaringClass, true); + msg << "Class " << callingClassStr << " cannot access " << modifierStr << " field " << fieldstr << + " of class " << declearClassStr; + } + MRT_ThrowNewException("java/lang/IllegalAccessException", msg.str().c_str()); +} + +template +static ALWAYS_INLINE inline bool CheckIsAccess(const MField &field, const FieldMeta &fieldMeta, const MObject *obj) { + if (field.IsAccessible()) { + return true; + } + + uint32_t modifier = fieldMeta.GetMod(); + MClass *declaringClass = field.GetDeclaringClass(); + if (isSet && (fieldMeta.IsFinal())) { + ThrowIllegalAccessException(field, fieldMeta, nullptr); + return false; + } + MClass *callingClass = nullptr; + obj = fieldMeta.IsStatic() ? declaringClass : obj; + if (!reflection::VerifyAccess(obj, declaringClass, modifier, callingClass, 1)) { + ThrowIllegalAccessException(field, fieldMeta, callingClass); + return false; + } + return true; +} + +static inline MObject *GetObjectOrBoxPrimitive(const MObject *mObj, const FieldMeta &fieldMeta, char fieldType) { + size_t offset = fieldMeta.GetOffset(); + const bool isVolatile = fieldMeta.IsVolatile(); + mObj = fieldMeta.GetRealMObject(mObj); + if (UNLIKELY(mObj == nullptr)) { + return nullptr; + } + MObject *boxObject = nullptr; + switch (fieldType) { + case 'Z': + boxObject = primitiveutil::BoxPrimitiveJboolean(mObj->Load(offset, isVolatile)); + break; + case 'B': + boxObject = primitiveutil::BoxPrimitiveJbyte(mObj->Load(offset, isVolatile)); + break; + case 'C': + boxObject = primitiveutil::BoxPrimitiveJchar(mObj->Load(offset, isVolatile)); + break; + case 'I': + boxObject = primitiveutil::BoxPrimitiveJint(mObj->Load(offset, isVolatile)); + break; + case 'F': + boxObject = primitiveutil::BoxPrimitiveJfloat(mObj->Load(offset, isVolatile)); + break; + case 'J': + boxObject = primitiveutil::BoxPrimitiveJlong(mObj->Load(offset, isVolatile)); + break; + case 'D': + boxObject = primitiveutil::BoxPrimitiveJdouble(mObj->Load(offset, isVolatile)); + break; + case 'S': + boxObject = primitiveutil::BoxPrimitiveJshort(mObj->Load(offset, isVolatile)); + break; + case 'N': + boxObject = fieldMeta.GetObjectValue(mObj); + break; + default:; + } + return boxObject; +} + +static MObject *GetObjectValueOfField(const MField &field, const MObject *object) { + MClass *declaringClass = field.GetDeclaringClass(); + FieldMeta *fieldMeta = field.GetFieldMeta(); + if (fieldMeta->IsStatic()) { + if (!declaringClass->InitClassIfNeeded()) { + return nullptr; + } + } else { + if (!reflection::CheckIsInstaceOf(*declaringClass, object)) { + return nullptr; + } + } + if (!CheckIsAccess(field, *fieldMeta, object)) { + return nullptr; + } + + char fieldType = GetFieldPrimitiveType(field); + MObject *retObject = GetObjectOrBoxPrimitive(object, *fieldMeta, fieldType); + return retObject; +} + +template +static inline T GetPrimitiveValueOfField(const MField &field, const MObject *object) { + MClass *declaringClass = field.GetDeclaringClass(); + FieldMeta *fieldMeta = field.GetFieldMeta(); + if (fieldMeta->IsStatic()) { + if (!declaringClass->InitClassIfNeeded()) { + return 0; + } + } else { + if (!reflection::CheckIsInstaceOf(*declaringClass, object)) { + return 0; + } + } + if (!CheckIsAccess(field, *fieldMeta, object)) { + return 0; + } + + char fieldType = GetFieldPrimitiveType(field); + if (UNLIKELY(fieldType == 'N')) { + std::string fieldTypeName = fieldMeta->GetFullName(declaringClass, true); + std::string msg = "Not a primitive field: " + fieldTypeName; + MRT_ThrowNewException("java/lang/IllegalArgumentException", msg.c_str()); + return 0; + } + + T value = fieldMeta->GetPrimitiveValue(object, fieldType); + if (fieldType == kPrimitiveType || + primitiveutil::CanConvertNarrowToWide(fieldType, kPrimitiveType)) { + return value; + } + + // Throw Exception + std::string srcDescriptor, dstDescriptor; + maple::Primitive::PrettyDescriptor_forField(maple::Primitive::GetType(fieldType), srcDescriptor); + maple::Primitive::PrettyDescriptor_forField(maple::Primitive::GetType(kPrimitiveType), dstDescriptor); + std::ostringstream msg; + msg << "Invalid primitive conversion from " << srcDescriptor << " to " << dstDescriptor; + MRT_ThrowNewException("java/lang/IllegalArgumentException", msg.str().c_str()); + return 0; +} + +uint8_t ReflectGetFieldNativeUint8(const MField &field, const MObject *object) { + return GetPrimitiveValueOfField(field, object); +} + +int8_t ReflectGetFieldNativeInt8(const MField &field, const MObject *object) { + return GetPrimitiveValueOfField(field, object); +} + +uint16_t ReflectGetFieldNativeUint16(const MField &field, const MObject *object) { + return GetPrimitiveValueOfField(field, object); +} + +double ReflectGetFieldNativeDouble(const MField &field, const MObject *object) { + return GetPrimitiveValueOfField(field, object); +} + +float ReflectGetFieldNativeFloat(const MField &field, const MObject *object) { + return GetPrimitiveValueOfField(field, object); +} + +int32_t ReflectGetFieldNativeInt32(const MField &field, const MObject *object) { + return GetPrimitiveValueOfField(field, object); +} + +int64_t ReflectGetFieldNativeInt64(const MField &field, const MObject *object) { + return GetPrimitiveValueOfField(field, object); +} + +int16_t ReflectGetFieldNativeInt16(const MField &field, const MObject *object) { + return GetPrimitiveValueOfField(field, object); +} + +MObject *ReflectGetFieldNativeObject(const MField &field, const MObject *object) { + return GetObjectValueOfField(field, object); +} + +static void UnBoxAndSetPrimitiveField(const FieldMeta &fieldMeta, char type, const MObject *obj, const MObject &val) { + const bool isVolatile = fieldMeta.IsVolatile(); + MObject *object = fieldMeta.GetRealMObject(obj); + DCHECK(object != nullptr); + uint32_t offset = fieldMeta.GetOffset(); + switch (type) { + case 'Z': { + uint8_t z = primitiveutil::UnBoxPrimitive(val); + object->Store(offset, z, isVolatile); + break; + } + case 'B': { + int8_t b = primitiveutil::UnBoxPrimitive(val); + object->Store(offset, b, isVolatile); + break; + } + case 'C': { + uint16_t c = primitiveutil::UnBoxPrimitive(val); + object->Store(offset, c, isVolatile); + break; + } + case 'S': { + int16_t s = primitiveutil::UnBoxPrimitive(val); + object->Store(offset, s, isVolatile); + break; + } + case 'I': { + int32_t i = primitiveutil::UnBoxPrimitive(val); + object->Store(offset, i, isVolatile); + break; + } + case 'F': { + float f = primitiveutil::UnBoxPrimitive(val); + object->Store(offset, f, isVolatile); + break; + } + case 'J': { + int64_t l = primitiveutil::UnBoxPrimitive(val); + object->Store(offset, l, isVolatile); + break; + } + case 'D': { + double d = primitiveutil::UnBoxPrimitive(val); + object->Store(offset, d, isVolatile); + break; + } + default: + LOG(ERROR) << "UnBoxAndSetPrimitiveField fail" << maple::endl; + } +} + +static inline void SetObjectOrUnBoxPrimitive(const MField &field, MObject *object, const MObject *value) { + FieldMeta *fieldMeta = field.GetFieldMeta(); + MClass *fieldType = field.GetType(); + MClass *declaringClass = field.GetDeclaringClass(); + char fieldPrimitiveType = primitiveutil::GetPrimitiveType(*fieldType); + if (fieldPrimitiveType == 'N') { + // ref, set object + if (value == nullptr || value->IsInstanceOf(*fieldType)) { + fieldMeta->SetObjectValue(object, value); + } else { + std::string fieldTypeName, valueClassName; + std::string fieldName = fieldMeta->GetFullName(declaringClass, false); + fieldType->GetTypeName(fieldTypeName); + value->GetClass()->GetTypeName(valueClassName); + std::string msg = "field " + fieldName + " has type " + fieldTypeName + ", got " + valueClassName; + MRT_ThrowNewException("java/lang/IllegalArgumentException", msg.c_str()); + } + return; + } + + // Primitive, UnBox first, then set Primitive + if (UNLIKELY(value == nullptr)) { + std::string fieldTypeName; + std::string fieldName = fieldMeta->GetFullName(declaringClass, false); + fieldType->GetTypeName(fieldTypeName); + std::string msg = "field " + fieldName + " has type " + fieldTypeName + ", got null"; + MRT_ThrowNewException("java/lang/IllegalArgumentException", msg.c_str()); + return; + } + + MClass *valueClass = value->GetClass(); + char valuePrimitiveType = primitiveutil::GetPrimitiveTypeFromBoxType(*valueClass); + if (valuePrimitiveType == 'N') { + // not Box object + std::string fieldTypeName, valueClassName; + std::string fieldName = fieldMeta->GetFullName(declaringClass, false); + fieldType->GetTypeName(fieldTypeName); + valueClass->GetTypeName(valueClassName); + std::string msg = "field " + fieldName + " has type " + fieldTypeName + ", got " + valueClassName; + MRT_ThrowNewException("java/lang/IllegalArgumentException", msg.c_str()); + return; + } + + // check can convert + if (!primitiveutil::CanConvertNarrowToWide(valuePrimitiveType, fieldPrimitiveType)) { + std::string srcDescriptor, dstDescriptor; + maple::Primitive::PrettyDescriptor_forField(maple::Primitive::GetType(valuePrimitiveType), srcDescriptor); + maple::Primitive::PrettyDescriptor_forField(maple::Primitive::GetType(fieldPrimitiveType), dstDescriptor); + std::ostringstream msg; + msg << "Invalid primitive conversion from " << srcDescriptor << " to " << dstDescriptor; + MRT_ThrowNewException("java/lang/IllegalArgumentException", msg.str().c_str()); + return; + } + + // set Primitive + UnBoxAndSetPrimitiveField(*fieldMeta, fieldPrimitiveType, object, *value); +} + +static bool CheckFieldIsValid(const MField &field, const MObject *object) { + MClass *declaringClass = field.GetDeclaringClass(); + FieldMeta *fieldMeta = field.GetFieldMeta(); + if (fieldMeta->IsStatic()) { + if (!declaringClass->InitClassIfNeeded()) { + return false; + } + } else { + if (!reflection::CheckIsInstaceOf(*declaringClass, object)) { + return false; + } + } + if (!CheckIsAccess(field, *fieldMeta, object)) { + return false; + } + return true; +} +static void SetObjectValueOfField(const MField &field, MObject *object, const MObject *value) { + if (!CheckFieldIsValid(field, object)) { + return; + } + SetObjectOrUnBoxPrimitive(field, object, value); +} + +template +static void SetPrimitiveValueOfField(const MField &field, MObject *object, valueType value) { + MClass *declaringClass = field.GetDeclaringClass(); + FieldMeta *fieldMeta = field.GetFieldMeta(); + if (!CheckFieldIsValid(field, object)) { + return; + } + + char fieldPrimitiveType = GetFieldPrimitiveType(field); + if (UNLIKELY(fieldPrimitiveType == 'N')) { + std::string fieldTypeName = fieldMeta->GetFullName(declaringClass, true); + std::string msg = "Not a primitive field: " + fieldTypeName; + MRT_ThrowNewException("java/lang/IllegalArgumentException", msg.c_str()); + return; + } + + // check can convert + if (!primitiveutil::CanConvertNarrowToWide(srcType, fieldPrimitiveType)) { + std::string srcDescriptor, dstDescriptor; + maple::Primitive::PrettyDescriptor_forField(maple::Primitive::GetType(srcType), srcDescriptor); + maple::Primitive::PrettyDescriptor_forField(maple::Primitive::GetType(fieldPrimitiveType), dstDescriptor); + std::ostringstream msg; + msg << "Invalid primitive conversion from " << srcDescriptor << " to " << dstDescriptor; + MRT_ThrowNewException("java/lang/IllegalArgumentException", msg.str().c_str()); + return; + } + fieldMeta->SetPrimitiveValue(object, fieldPrimitiveType, value); +} + +void ReflectSetFieldNativeUint8(const MField &fieldObj, MObject *obj, uint8_t z) { + SetPrimitiveValueOfField(fieldObj, obj, z); +} + +void ReflectSetFieldNativeInt8(const MField &fieldObj, MObject *obj, int8_t b) { + SetPrimitiveValueOfField(fieldObj, obj, b); +} + +void ReflectSetFieldNativeUint16(const MField &fieldObj, MObject *obj, uint16_t c) { + SetPrimitiveValueOfField(fieldObj, obj, c); +} + +void ReflectSetFieldNativeInt16(const MField &fieldObj, MObject *obj, int16_t s) { + SetPrimitiveValueOfField(fieldObj, obj, s); +} + +void ReflectSetFieldNativeInt32(const MField &fieldObj, MObject *obj, int32_t i) { + SetPrimitiveValueOfField(fieldObj, obj, i); +} + +void ReflectSetFieldNativeInt64(const MField &fieldObj, MObject *obj, int64_t j) { + SetPrimitiveValueOfField(fieldObj, obj, j); +} + +void ReflectSetFieldNativeFloat(const MField &fieldObj, MObject *obj, float f) { + SetPrimitiveValueOfField(fieldObj, obj, f); +} + +void ReflectSetFieldNativeDouble(const MField &fieldObj, MObject *obj, double d) { + SetPrimitiveValueOfField(fieldObj, obj, d); +} + +void ReflectSetFieldNativeObject(const MField &fieldObj, MObject *obj, const MObject *value) { + SetObjectValueOfField(fieldObj, obj, value); +} + +jint MRT_ReflectFieldGetOffset(jobject fieldObj) { + MField *f = MField::JniCastNonNull(fieldObj); +#ifdef __OPENJDK__ + FieldMeta *fieldMeta = f->GetFieldMeta(); + int32_t offset = fieldMeta->IsStatic() ? + static_cast((fieldMeta->GetStaticAddr() - fieldMeta->GetDeclaringclass()->AsUintptr())) : + static_cast(fieldMeta->GetOffset()); + return offset; +#else + return static_cast(f->GetOffset()); +#endif +} + +MObject *ReflectFieldGetSignatureAnnotation(const MField &fieldObj) { + FieldMeta *fieldMeta = fieldObj.GetFieldMeta(); + MObject *ret = fieldMeta->GetSignatureAnnotation(); + return ret; +} + +bool ReflectFieldIsAnnotationPresentNative(const MField &fieldObj, const MClass *annoObj) { + if (annoObj == nullptr) { + MRT_ThrowNewException("java/lang/NullPointerException", nullptr); + return false; + } + FieldMeta *fieldMeta = fieldObj.GetFieldMeta(); + string annoStr = fieldMeta->GetAnnotation(); + char *annotationTypeName = annoObj->GetName(); + if (annoStr.empty() || annotationTypeName == nullptr) { + return JNI_FALSE; + } + return AnnotationUtil::GetIsAnnoPresent(annoStr, annotationTypeName, reinterpret_cast(fieldMeta), + const_cast(annoObj), annoconstant::kFieldAnnoPresent); +} + +MObject *ReflectFieldGetAnnotation(const MField &fieldObj, const MClass *annoClass) { + FieldMeta *fieldMeta = fieldObj.GetFieldMeta(); + string annoStr = fieldMeta->GetAnnotation(); + if (annoStr.empty()) { + return nullptr; + } + AnnoParser &annoParser = AnnoParser::ConstructParser(annoStr.c_str(), fieldObj.GetDeclaringClass()); + std::unique_ptr parser(&annoParser); + MObject *ret = parser->AllocAnnoObject(fieldObj.GetDeclaringClass(), const_cast(annoClass)); + return ret; +} + +MObject *ReflectFieldGetDeclaredAnnotations(const MField &fieldObj) { + FieldMeta *fieldMeta = fieldObj.GetFieldMeta(); + string annoStr = fieldMeta->GetAnnotation(); + if (annoStr.empty()) { + return nullptr; + } + + MObject *ret = AnnotationUtil::GetDeclaredAnnotations(annoStr, fieldObj.GetDeclaringClass()); + return ret; +} + +MObject *ReflectFieldGetNameInternal(const MField &fieldObj) { + FieldMeta *fieldMeta = fieldObj.GetFieldMeta(); + char *fieldName = fieldMeta->GetName(); + MString *fieldNameObj = MString::InternUtf(std::string(fieldName)); + return fieldNameObj; +} + +FieldMeta *ReflectFieldGetArtField(const MField &fieldObj) { + FieldMeta *fieldMeta = fieldObj.GetFieldMeta(); + return fieldMeta; +} + +#define MRT_REFLECT_SETFIELD(TYPE) \ +void MRT_ReflectSetField##TYPE(jfieldID fieldMeta, jobject obj, TYPE value) { \ + FieldMeta *field = FieldMeta::JniCastNonNull(fieldMeta); \ + MObject *mObj = MObject::JniCast(obj); \ + mObj = field->GetRealMObject(mObj); \ + if (UNLIKELY(mObj == nullptr)) { \ + return; \ + } \ + uint32_t offset = field->GetOffset(); \ + mObj->Store(offset, value, field->IsVolatile()); \ +} + +#define MRT_REFLECT_GETFIELD(TYPE) \ +TYPE MRT_ReflectGetField##TYPE(jfieldID fieldMeta, jobject obj) { \ + FieldMeta *field = FieldMeta::JniCastNonNull(fieldMeta); \ + MObject *mObj = MObject::JniCast(obj); \ + mObj = field->GetRealMObject(mObj); \ + if (UNLIKELY(mObj == nullptr)) { \ + return 0; \ + } \ + uint32_t offset = field->GetOffset(); \ + return mObj->Load(offset, field->IsVolatile()); \ +} + +// ONLY use in hprof.cc for heap dump +jboolean MRT_ReflectGetFieldjbooleanUnsafe(jfieldID fieldMeta, jobject javaObj) + __attribute__ ((alias ("MRT_ReflectGetFieldjboolean"))); + +jbyte MRT_ReflectGetFieldjbyteUnsafe(jfieldID fieldMeta, jobject javaObj) + __attribute__ ((alias ("MRT_ReflectGetFieldjbyte"))); + +jchar MRT_ReflectGetFieldjcharUnsafe(jfieldID fieldMeta, jobject javaObj) + __attribute__ ((alias ("MRT_ReflectGetFieldjchar"))); + +jdouble MRT_ReflectGetFieldjdoubleUnsafe(jfieldID fieldMeta, jobject javaObj) + __attribute__ ((alias ("MRT_ReflectGetFieldjdouble"))); +jfloat MRT_ReflectGetFieldjfloatUnsafe(jfieldID fieldMeta, jobject javaObj) + __attribute__ ((alias ("MRT_ReflectGetFieldjfloat"))); + +jint MRT_ReflectGetFieldjintUnsafe(jfieldID fieldMeta, jobject javaObj) + __attribute__ ((alias ("MRT_ReflectGetFieldjint"))); + +jlong MRT_ReflectGetFieldjlongUnsafe(jfieldID fieldMeta, jobject javaObj) + __attribute__ ((alias ("MRT_ReflectGetFieldjlong"))); + +jshort MRT_ReflectGetFieldjshortUnsafe(jfieldID fieldMeta, jobject javaObj) + __attribute__ ((alias ("MRT_ReflectGetFieldjshort"))); + +// ONLY use in hprof.cc for dump heap +// Returns the object referenced by an instance field (if 'obj' is not null) +// or a static field (if 'obj' is not null)without RC increment +jobject MRT_ReflectGetFieldjobjectUnsafe(jfieldID fieldMeta, jobject obj) { + FieldMeta *field = FieldMeta::JniCastNonNull(fieldMeta); + MObject *mObj = MObject::JniCast(obj); + mObj = field->GetRealMObject(mObj); + if (UNLIKELY(mObj == nullptr)) { + return nullptr; + } + uint32_t offset = field->GetOffset(); + return mObj->LoadObjectNoRc(offset)->AsJobject(); +} + +// TYPE MRT_Reflect_GetField_##TYPE(jobject fieldObj, jobject obj) +MRT_REFLECT_GETFIELD(jboolean) +MRT_REFLECT_GETFIELD(jbyte) +MRT_REFLECT_GETFIELD(jchar) +MRT_REFLECT_GETFIELD(jdouble) +MRT_REFLECT_GETFIELD(jfloat) +MRT_REFLECT_GETFIELD(jint) +MRT_REFLECT_GETFIELD(jlong) +MRT_REFLECT_GETFIELD(jshort) + +// void MRT_Reflect_SetField_##TYPE(jobject fieldObj, jobject obj, TYPE value) +MRT_REFLECT_SETFIELD(jboolean) +MRT_REFLECT_SETFIELD(jbyte) +MRT_REFLECT_SETFIELD(jchar) +MRT_REFLECT_SETFIELD(jdouble) +MRT_REFLECT_SETFIELD(jfloat) +MRT_REFLECT_SETFIELD(jint) +MRT_REFLECT_SETFIELD(jlong) +MRT_REFLECT_SETFIELD(jshort) + +void MRT_ReflectSetFieldjobject(jfieldID fieldMeta, jobject obj, jobject value) { + FieldMeta *fm = FieldMeta::JniCastNonNull(fieldMeta); + MObject *mObj = MObject::JniCast(obj); + MObject *mValue = MObject::JniCast(value); + fm->SetObjectValue(mObj, mValue); +} + +jobject MRT_ReflectGetFieldjobject(jfieldID fieldMeta, jobject obj) { + FieldMeta *fm = FieldMeta::JniCastNonNull(fieldMeta); + MObject *mObj = MObject::JniCast(obj); + return fm->GetObjectValue(mObj)->AsJobject(); +} + +char *MRT_ReflectFieldGetCharFieldName(jfieldID fieldMeta) { + FieldMeta *fm = FieldMeta::JniCastNonNull(fieldMeta); + return fm->GetName(); +} + +jboolean MRT_ReflectFieldIsStatic(jfieldID fieldMeta) { + FieldMeta *fm = FieldMeta::JniCastNonNull(fieldMeta); + return fm->IsStatic(); +} + +jclass MRT_ReflectFieldGetType(jfieldID fieldMeta) { + FieldMeta *fm = FieldMeta::JniCastNonNull(fieldMeta); + return fm->GetType()->AsJclass(); +} + +char *MRT_ReflectFieldGetTypeName(jfieldID fieldMeta) { + FieldMeta *fm = FieldMeta::JniCastNonNull(fieldMeta); + return fm->GetTypeName(); +} + +jclass MRT_ReflectFieldGetDeclaringClass(jfieldID fieldMeta) { + FieldMeta *fm = FieldMeta::JniCastNonNull(fieldMeta); + return fm->GetDeclaringclass()->AsJclass(); +} + +jfieldID MRT_ReflectFromReflectedField(jobject fieldObj) { + MField *mField = MField::JniCastNonNull(fieldObj); + FieldMeta *fieldMeta = mField->GetFieldMeta(); + return fieldMeta->AsJfieldID(); +} + +jobject MRT_ReflectToReflectedField(jclass clazz __attribute__((unused)), jfieldID fid) { + FieldMeta *fieldMeta = FieldMeta::JniCastNonNull(fid); + MField *mField = MField::NewMFieldObject(*fieldMeta); + return mField->AsJobject(); +} + +uint32_t MRT_GetFieldOffset(jfieldID fieldMeta) { + FieldMeta *fm = FieldMeta::JniCastNonNull(fieldMeta); + return fm->GetOffset(); +} + +uint32_t ReflectFieldGetSize(const FieldMeta &fieldMeta) { + char *fieldTypeName = fieldMeta.GetTypeName(); + return ReflectCompactFieldGetSize(fieldTypeName); +} + +uint32_t ReflectCompactFieldGetSize(const std::string &fieldTypeName) { + switch (fieldTypeName[0]) { + case 'Z': + case 'B': + return 1; // jboolean, jbyte size + case 'C': + case 'S': + return 2; // jchar, jshort size + case 'I': + case 'F': + return 4; // jint, jfloat size + case 'J': + case 'D': + return 8; // jlong, jdouble size + case '[': + case 'L': +#ifdef USE_32BIT_REF + return 4; // ref size +#else + return 8; // ref size +#endif // USE_32BIT_REF + default: + __MRT_ASSERT(false, "Unknown Field Type For GetSize!"); + return 0; + } +} +} // namespace maplert \ No newline at end of file diff --git a/src/mrt/maplert/src/mrt_reflection_method.cpp b/src/mrt/maplert/src/mrt_reflection_method.cpp new file mode 100644 index 0000000000..7aae488bfd --- /dev/null +++ b/src/mrt/maplert/src/mrt_reflection_method.cpp @@ -0,0 +1,443 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mrt_reflection_method.h" +#include "mmethod_inline.h" +#include "methodmeta_inline.h" +#include "exception/mrt_exception.h" +namespace maplert { +MString *ReflectMethodGetName(const MMethod &methodObj) { + MethodMeta *methodMeta = methodObj.GetMethodMeta(); + char *methodName = methodMeta->GetName(); + MString *ret = MString::InternUtf(std::string(methodName)); + return ret; +} + +MObject *ReflectMethodGetReturnType(const MMethod &methodObj) { + MethodMeta *methodMeta = methodObj.GetMethodMeta(); + MClass *rtType = methodMeta->GetReturnType(); + if (UNLIKELY(rtType == nullptr)) { + CHECK(MRT_HasPendingException()) << "must pending exception." << maple::endl; + } + return rtType; +} + +jclass MRT_ReflectMethodGetDeclaringClass(jmethodID methodId) { + MethodMeta *methodMeta = MethodMeta::JniCastNonNull(methodId); + return methodMeta->GetDeclaringClass()->AsJclass(); +} + +jobjectArray MRT_ReflectMethodGetExceptionTypes(jobject methodObj) { + MMethod *methodObject = MMethod::JniCastNonNull(methodObj); + MethodMeta *methodMeta = methodObject->GetMethodMeta(); + std::vector types; + methodMeta->GetExceptionTypes(types); + if (MRT_HasPendingException()) { + return nullptr; + } + MArray *excetpionArray = MArray::NewObjectArray(static_cast(types.size()), *WellKnown::GetMClassAClass()); + uint32_t index = 0; + for (auto type : types) { + excetpionArray->SetObjectElementOffHeap(index++, type); + } + return excetpionArray->AsJobjectArray(); +} + +static void ThrowInvocationTargetException() { + ScopedHandles sHandles; + ObjHandle exception(MRT_PendingException()); + MRT_ClearPendingException(); + + MClass *invocationTargetExceptionClass = + MClass::GetClassFromDescriptor(nullptr, "Ljava/lang/reflect/InvocationTargetException;"); + CHECK(invocationTargetExceptionClass != nullptr) << "Not find InvocationTargetException class" << maple::endl; + MethodMeta *exceptionConstruct = + invocationTargetExceptionClass->GetDeclaredConstructor("(Ljava/lang/Throwable;)V"); + CHECK(exceptionConstruct != nullptr) << "Not find InvocationTargetException's Constructor" << maple::endl; + + ObjHandle exceptionInstance( + MObject::NewObject(*invocationTargetExceptionClass, exceptionConstruct, exception.AsRaw())); + if (UNLIKELY(exceptionInstance() == 0)) { + CHECK(MRT_HasPendingException()) << "Must Has PendingException" << maple::endl; + return; + } + + MRT_ThrowExceptionSafe(exceptionInstance.AsJObj()); + return; +} + +static void FailCheckParameter(const MethodMeta &methodMeta, const MClass *receObjectCls, + const MClass ¶mType, uint32_t num) { + std::string methodName, srcClassName, dstClassName; + methodMeta.GetPrettyName(false, methodName); + if (receObjectCls != nullptr) { + receObjectCls->GetTypeName(srcClassName); + } else { + srcClassName = "null"; + } + paramType.GetTypeName(dstClassName); + std::ostringstream msg; + msg << "method " << methodName << " argument " << std::to_string(num) << " has type " << + dstClassName << ", got " << srcClassName; + MRT_ThrowNewException("java/lang/IllegalArgumentException", msg.str().c_str()); +} + +static bool DecodeArgs(const MethodMeta &methodMeta, MObject *receObject, jvalue &arg, + const MClass ¶mType, uint32_t index) { + if (!paramType.IsPrimitiveClass()) { + if ((receObject != nullptr) && (!receObject->IsInstanceOf(paramType))) { + MClass *receObjectCls = receObject->GetClass(); + FailCheckParameter(methodMeta, receObjectCls, paramType, index + 1); + return false; + } else { + // copy ref parameter. + arg.l = reinterpret_cast(receObject); + } + } else { + // Primitive Parameter + if (receObject == nullptr) { + FailCheckParameter(methodMeta, nullptr, paramType, index + 1); + return false; + } + MClass *receObjectClass = receObject->GetClass(); + char srcType = primitiveutil::GetPrimitiveTypeFromBoxType(*receObjectClass); + if (srcType != 'N') { + char dstType = primitiveutil::GetPrimitiveType(paramType); + jvalue src; + // copy primitive parameter, ingore return value, we will check again next. + (void)primitiveutil::UnBoxPrimitive(*receObject, src); + if (!primitiveutil::ConvertNarrowToWide(srcType, dstType, src, arg)) { + FailCheckParameter(methodMeta, receObjectClass, paramType, index + 1); + return false; + } + } else { + FailCheckParameter(methodMeta, receObjectClass, paramType, index + 1); + return false; + } + } + return true; +} + +static ALWAYS_INLINE bool CheckReceiveParameterAndDecodeArgs(const MethodMeta &methodMeta, const MArray *receiveParam, + jvalue decodeArgs[], uint32_t parameterCount) { + // here decodeArgs length equals parameterTypes.size + DCHECK(parameterCount == methodMeta.GetParameterCount()) << "parameterCount is wrong." << maple::endl; + MClass *parameterTypes[parameterCount]; + uint32_t receNum = (receiveParam != nullptr) ? receiveParam->GetLength() : 0; + if (UNLIKELY(parameterCount != receNum)) { + std::ostringstream msg; + msg << "Wrong number of arguments; expected " << std::to_string(parameterCount) << + ", got " << std::to_string(receNum); + MRT_ThrowNewException("java/lang/IllegalArgumentException", msg.str().c_str()); + return false; + } + bool isSuccess = methodMeta.GetParameterTypes(parameterTypes, parameterCount); + if (UNLIKELY(!isSuccess)) { + return false; + } + + for (uint32_t i = 0; i < parameterCount; ++i) { + MObject *receObject = receiveParam->GetObjectElementNoRc(i); + MClass *paramType = parameterTypes[i]; + if (!DecodeArgs(methodMeta, receObject, decodeArgs[i], *paramType, i)) { + return false; + } + } + return true; +} + +static ALWAYS_INLINE bool CheckAccess(const MethodMeta &methodMeta, const MObject *obj, + const MClass &declarClass, uint8_t numFrames) { + uint32_t mod = methodMeta.GetMod(); + MClass *caller = nullptr; + if (!reflection::VerifyAccess(obj, &declarClass, mod, caller, static_cast(numFrames))) { + std::string declaringClassStr, callerStr, modifyStr, prettyMethodStr, retTypeStr; + MClass *declaringClass = methodMeta.GetDeclaringClass(); + MClass *retType = methodMeta.GetReturnType(); + if (UNLIKELY(retType == nullptr)) { + return false; + } + modifier::JavaAccessFlagsToString(mod, modifyStr); + declaringClass->GetPrettyClass(declaringClassStr); + caller->GetPrettyClass(callerStr); + retType->GetBinaryName(retTypeStr); + methodMeta.GetPrettyName(true, prettyMethodStr); + std::ostringstream msg; + msg << "Class " << callerStr << " cannot access " << modifyStr << " method " << prettyMethodStr << " of class " << + declaringClassStr; + MRT_ThrowNewException("java/lang/IllegalAccessException", msg.str().c_str()); + return false; + } + return true; +} + +template +static T InvokeJavaMethodFromArrayArgs(MObject *obj, const MMethod &methodObj, const MArray *arrayObj, uint8_t frames) { + MethodMeta *methodMeta = methodObj.GetMethodMeta(); + bool isStaticMethod = methodMeta->IsStatic(); + MClass *declarClass = methodMeta->GetDeclaringClass(); + if (!isStaticMethod) { + if (!reflection::CheckIsInstaceOf(*declarClass, obj)) { + return 0; + } + MClass *classObj = obj->GetClass(); + methodMeta = classObj->GetVirtualMethod(*methodMeta); + CHECK(methodMeta != nullptr); + declarClass = methodMeta->GetDeclaringClass(); + } + + if (UNLIKELY(!declarClass->InitClassIfNeeded())) { + return 0; + } + + uint32_t parameterCount = methodMeta->GetParameterCount(); + jvalue decodeArgs[parameterCount]; + if (!CheckReceiveParameterAndDecodeArgs(*methodMeta, arrayObj, decodeArgs, parameterCount)) { + return 0; + } + + const bool accessible = methodObj.IsAccessible(); + if (!accessible && !CheckAccess(*methodMeta, obj, *declarClass, frames)) { + return 0; + } + + T result = methodMeta->Invoke(obj, decodeArgs); + if (UNLIKELY(MRT_HasPendingException())) { + ThrowInvocationTargetException(); + return 0; + } + return result; +} + +void ReflectInvokeJavaMethodFromArrayArgsVoid(MObject *mObj, const MMethod &methodObject, + const MArray *argsArrayObj, uint8_t numFrames) { + (void)InvokeJavaMethodFromArrayArgs(mObj, methodObject, argsArrayObj, numFrames); +} + +MObject *ReflectInvokeJavaMethodFromArrayArgsJobject(MObject *mObj, const MMethod &methodObject, + const MArray *argsArrayObj, uint8_t numFrames) { + return reinterpret_cast( + InvokeJavaMethodFromArrayArgs(mObj, methodObject, argsArrayObj, numFrames)); +} + +MObject *ReflectMethodInvoke(const MMethod &methodObject, MObject *mObj, const MArray *argsObj, uint8_t frames) { + MObject *retObj = nullptr; + jvalue retJvalue; + retJvalue.l = 0UL; + char returnTypeName = methodObject.GetMethodMeta()->GetReturnPrimitiveType(); + switch (returnTypeName) { + case 'V': + (void)InvokeJavaMethodFromArrayArgs(mObj, methodObject, argsObj, frames); + break; + case 'Z': + retJvalue.z = InvokeJavaMethodFromArrayArgs(mObj, methodObject, argsObj, frames); + retObj = primitiveutil::BoxPrimitiveJboolean(retJvalue.z); + break; + case 'I': + retJvalue.i = InvokeJavaMethodFromArrayArgs(mObj, methodObject, argsObj, frames); + retObj = primitiveutil::BoxPrimitiveJint(retJvalue.i); + break; + case 'B': + retJvalue.b = InvokeJavaMethodFromArrayArgs(mObj, methodObject, argsObj, frames); + retObj = primitiveutil::BoxPrimitiveJbyte(retJvalue.b); + break; + case 'C': + retJvalue.c = InvokeJavaMethodFromArrayArgs(mObj, methodObject, argsObj, frames); + retObj = primitiveutil::BoxPrimitiveJchar(retJvalue.c); + break; + case 'D': + retJvalue.d = InvokeJavaMethodFromArrayArgs(mObj, methodObject, argsObj, frames); + retObj = primitiveutil::BoxPrimitiveJdouble(retJvalue.d); + break; + case 'F': + retJvalue.f = InvokeJavaMethodFromArrayArgs(mObj, methodObject, argsObj, frames); + retObj = primitiveutil::BoxPrimitiveJfloat(retJvalue.f); + break; + case 'J': + retJvalue.j = InvokeJavaMethodFromArrayArgs(mObj, methodObject, argsObj, frames); + retObj = primitiveutil::BoxPrimitiveJlong(retJvalue.j); + break; + case 'S': + retJvalue.s = InvokeJavaMethodFromArrayArgs(mObj, methodObject, argsObj, frames); + retObj = primitiveutil::BoxPrimitiveJshort(retJvalue.s); + break; + default: + retObj = reinterpret_cast(InvokeJavaMethodFromArrayArgs(mObj, methodObject, argsObj, frames)); + } + if (UNLIKELY(MRT_HasPendingException())) { + RC_LOCAL_DEC_REF(retObj); + return nullptr; + } + return retObj; +} + +char *MRT_ReflectGetMethodName(jmethodID methodMeta) { + MethodMeta *method = MethodMeta::JniCastNonNull(methodMeta); + return method->GetName(); +} + +char *MRT_ReflectGetMethodSig(jmethodID methodMeta) { + MethodMeta *method = MethodMeta::JniCastNonNull(methodMeta); + return method->GetSignature(); +} + +jint MRT_ReflectGetMethodArgsize(jmethodID methodMeta) { + MethodMeta *method = MethodMeta::JniCastNonNull(methodMeta); + return method->GetArgSize(); +} + +jboolean MRT_ReflectMethodIsStatic(jmethodID methodMeta) { + MethodMeta *method = MethodMeta::JniCastNonNull(methodMeta); + return method->IsStatic(); +} + +jboolean MRT_ReflectMethodIsConstructor(jmethodID methodMeta) { + MethodMeta *method = MethodMeta::JniCastNonNull(methodMeta); + return method->IsConstructor(); +} + +void MRT_ReflectGetMethodArgsType(const char *signame, const jint argSize, char *shorty) { + MethodMeta::GetShortySignature(signame, shorty, static_cast(argSize)); +} + +template +static T InvokeJavaMethodFromJvalue(MObject *obj, const MethodMeta *methodMeta, const jvalue *args, + uintptr_t calleeFuncAddr = 0) { + if (UNLIKELY(methodMeta->IsAbstract() && (obj != nullptr))) { + char *methodName = methodMeta->GetName(); + char *sigName = methodMeta->GetSignature(); + methodMeta = obj->GetClass()->GetMethod(methodName, sigName); + } + + DCHECK(methodMeta != nullptr) << "method must not be nullptr!" << maple::endl; + T result = methodMeta->Invoke(obj, args, calleeFuncAddr); + // Interpreter should skip MRT_HasPendingException + // and handle exception in the place where ExecuteSwitchImplCpp. + if (UNLIKELY(!methodMeta->NeedsInterp() && MRT_HasPendingException())) { + return 0; + } + return result; +} + +void MRT_ReflectInvokeMethodAvoid(jobject obj, const jmethodID methodMeta, const jvalue *args) { + (void)InvokeJavaMethodFromJvalue( + MObject::JniCast(obj), MethodMeta::JniCastNonNull(methodMeta), args); +} + +void MRT_ReflectInvokeMethodAZvoid(jobject obj, const jmethodID methodMeta, const jvalue *args, + uintptr_t calleeFuncAddr) { + (void)InvokeJavaMethodFromJvalue( + MObject::JniCast(obj), MethodMeta::JniCastNonNull(methodMeta), args, calleeFuncAddr); +} + +#define MRT_REFLECT_INVOKE_A(TYPE) \ +TYPE MRT_ReflectInvokeMethodA##TYPE(jobject obj, const jmethodID methodMeta, const jvalue *args) { \ + return (TYPE)InvokeJavaMethodFromJvalue( \ + MObject::JniCast(obj), MethodMeta::JniCastNonNull(methodMeta), args);} + +#define MRT_REFLECT_INVOKE_AZ(TYPE) \ +TYPE MRT_ReflectInvokeMethodAZ##TYPE(jobject obj, const jmethodID methodMeta, const jvalue *args, \ + uintptr_t calleeFuncAddr) { \ + return (TYPE)InvokeJavaMethodFromJvalue( \ + MObject::JniCast(obj), MethodMeta::JniCastNonNull(methodMeta), args, calleeFuncAddr);} + +#define TRIPLE_MRT_REFLECT_INVOKE(TYPE) \ +MRT_REFLECT_INVOKE_A(TYPE) \ +MRT_REFLECT_INVOKE_AZ(TYPE) + +TRIPLE_MRT_REFLECT_INVOKE(jboolean) +TRIPLE_MRT_REFLECT_INVOKE(jbyte) +TRIPLE_MRT_REFLECT_INVOKE(jchar) +TRIPLE_MRT_REFLECT_INVOKE(jint) +TRIPLE_MRT_REFLECT_INVOKE(jlong) +TRIPLE_MRT_REFLECT_INVOKE(jobject) +TRIPLE_MRT_REFLECT_INVOKE(jshort) +TRIPLE_MRT_REFLECT_INVOKE(jfloat) +TRIPLE_MRT_REFLECT_INVOKE(jdouble) + +jobject MRT_ReflectMethodGetDefaultValue(jobject methodObj) { + MMethod *methodObject = MMethod::JniCastNonNull(methodObj); + MethodMeta *methodMeta = methodObject->GetMethodMeta(); + MClass *declClass = methodMeta->GetDeclaringClass(); + if (!modifier::IsAnnotation(declClass->GetModifier())) { + return nullptr; + } + + string annoStr = declClass->GetAnnotation(); + if (annoStr.empty()) { + return nullptr; + } + AnnoParser &parser = AnnoParser::ConstructParser(annoStr.c_str(), declClass); + std::unique_ptr uniqueParser(&parser); + int32_t loc = uniqueParser->Find(uniqueParser->GetAnnoDefaultStr()); + if (loc == annoconstant::kNPos) { + return nullptr; + } + MethodDefaultUtil mthDefaultUtil(*methodMeta, declClass); + if (!MethodDefaultUtil::HasDefaultValue(methodMeta->GetName(), *uniqueParser)) { + return nullptr; + } + return mthDefaultUtil.GetDefaultValue(uniqueParser)->AsJobject(); +} + +jobject MRT_ReflectMethodGetAnnotationNative(jobject executable, jint index, jclass annoClass) { + MMethod *methodObject = MMethod::JniCastNonNull(executable); + MethodMeta *methodMeta = methodObject->GetMethodMeta(); + string executableAnnoStr = methodMeta->GetAnnotation(); + if (executableAnnoStr.empty()) { + return nullptr; + } + string annoStr = AnnoParser::GetParameterAnnotationInfo(executableAnnoStr); + VLOG(reflect) << "Enter MRT_ReflectMethodGetAnnotationNative, annostr: " << annoStr << maple::endl; + AnnoParser &annoParser = AnnoParser::ConstructParser(annoStr.c_str(), methodMeta->GetDeclaringClass()); + std::unique_ptr parser(&annoParser); + return parser->GetAnnotationNative(index, MClass::JniCast(annoClass))->AsJobject(); +} + +jmethodID MRT_ReflectFromReflectedMethod(jobject methodObj) { + MMethod *mMethod = MMethod::JniCastNonNull(methodObj); + MethodMeta *methodMeta = mMethod->GetMethodMeta(); + return methodMeta->AsJmethodID(); +} + +jobject MRT_ReflectToReflectedMethod(jclass clazz __attribute__((unused)), jmethodID methodMeta) { + MethodMeta *method = MethodMeta::JniCastNonNull(methodMeta); + MMethod *mMethod = MMethod::NewMMethodObject(*method); + return mMethod->AsJobject(); +} + +void MRT_ReflectMethodForward(jobject from, jobject to) { + MethodMeta *methodFrom = MMethod::JniCastNonNull(from)->GetMethodMeta(); + MethodMeta *methodTo = MMethod::JniCastNonNull(to)->GetMethodMeta(); + if (LinkerAPI::Instance().UpdateMethodSymbolAddress(methodFrom->AsJmethodID(), methodTo->GetFuncAddress())) { + methodFrom->SetAddress(methodTo->GetFuncAddress()); + } +} + +jobject MRT_ReflectMethodClone(jobject methodObj) { + MMethod *mMethod = MMethod::JniCastNonNull(methodObj); + MethodMeta *methodMeta = mMethod->GetMethodMeta(); + + MethodMeta *newMeta = MethodMeta::Cast(MRT_AllocFromMeta(sizeof(MethodMeta), kMethodMetaData)); + // force to be direct method, this method will NOT be discarded in InvokeJavaMethodFromArrayArgs() + newMeta->SetMod(methodMeta->GetMod() | modifier::kModifierPrivate); + newMeta->SetName(methodMeta->GetName()); + newMeta->SetSignature(methodMeta->GetSignature()); + newMeta->SetAddress(methodMeta->GetFuncAddress()); + newMeta->SetDeclaringClass(*methodMeta->GetDeclaringClass()); + newMeta->SetFlag(methodMeta->GetFlag()); + newMeta->SetArgsSize(methodMeta->GetArgSize()); + return MMethod::NewMMethodObject(*newMeta)->AsJobject(); +} +} // namespace maplert diff --git a/src/mrt/maplert/src/mrt_reflection_proxy.cpp b/src/mrt/maplert/src/mrt_reflection_proxy.cpp new file mode 100644 index 0000000000..da0bcbf8f5 --- /dev/null +++ b/src/mrt/maplert/src/mrt_reflection_proxy.cpp @@ -0,0 +1,592 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mrt_reflection_proxy.h" +#include +#include +#include "itab_util.h" +#include "mclass_inline.h" +#include "mstring_inline.h" +#include "mmethod_inline.h" +#include "methodmeta_inline.h" +#include "exception/mrt_exception.h" +#include "mrt_reflection_stubfuncforproxy.def" +#ifdef USE_32BIT_REF +using VOID_PTR = uint32_t; +#else +using VOID_PTR = uint64_t; +#endif + +using namespace maple; +namespace maplert { +MethodMeta *GetProxySuperMethod(const MObject &obj, uint32_t num); +jvalue ProcessProxyMethodInvoke(MObject &obj, const MethodMeta &proxySuperMethod, jvalue args[]); +MObject *InvokeInvocationHandler(MObject &obj, jvalue args[], const MethodMeta &proxySuperMethod); +void DoProxyThrowableException(const MObject &proxyObj, const MObject &exceptionObj, const MethodMeta &method); +extern "C" +int64_t EnterProxyMethodInvoke(intptr_t *stack, uint32_t num) { + // this proxy object always put index 0 + CHECK(stack != nullptr); + MObject *obj = reinterpret_cast(stack[0]); + CHECK(obj != nullptr); + MethodMeta *proxySuperMethod = GetProxySuperMethod(*obj, num); + CHECK(proxySuperMethod != nullptr) << "proxySuperMethod return nullptr." << maple::endl; + jvalue ret; + ret.j = 0L; + { + DecodeStackArgs stackArgs(stack); + std::string prefix("L"); + proxySuperMethod->BuildJValuesArgsFromStackMemeryPrefixSigNature(stackArgs, prefix); + ret = ProcessProxyMethodInvoke(*obj, *proxySuperMethod, &(stackArgs.GetData()[prefix.length()])); + } + if (MRT_HasPendingException()) { + MRT_CheckThrowPendingExceptionUnw(); + return ret.j; + } + return ret.j; +} + +jvalue ProcessProxyMethodInvoke(MObject &obj, const MethodMeta &proxySuperMethod, jvalue args[]) { + ScopedHandles sHandles; + jvalue retValue; + retValue.l = 0UL; + MObject *value = InvokeInvocationHandler(obj, args, proxySuperMethod); + ObjHandle valueRef(value); + if (UNLIKELY(MRT_HasPendingException())) { + ObjHandle th(MRT_PendingException()); + DoProxyThrowableException(obj, *(th()), proxySuperMethod); + return retValue; + } + char retPrimitiveType = proxySuperMethod.GetReturnPrimitiveType(); + switch (retPrimitiveType) { + case 'V': + return retValue; + case 'L': + case '[': { + MClass *retType = proxySuperMethod.GetReturnType(); + if (retType == nullptr) { + return retValue; + } + if (valueRef() != 0 && !valueRef->IsInstanceOf(*retType)) { + MRT_ThrowNewException("java/lang/ClassCastException", nullptr); + return retValue; + } + retValue.l = valueRef.ReturnJObj(); + return retValue; + } + default: { // primitive value + if (UNLIKELY(valueRef() == 0)) { + MRT_ThrowNewException("java/lang/NullPointerException", nullptr); + return retValue; + } + if (!primitiveutil::IsBoxObject(*value, retPrimitiveType)) { + MRT_ThrowNewException("java/lang/ClassCastException", nullptr); + return retValue; + } + bool success = primitiveutil::UnBoxPrimitive(*value, retValue); + if (!success) { + MRT_ThrowNewException("java/lang/ClassCastException", nullptr); + return retValue; + } + return retValue; + } + } +} + +static void ThrowUndeclaredThrowableException(const MObject &exceptionObj) { + MRT_ClearPendingException(); + MClass *exceptionClass = WellKnown::GetMClassUndeclaredThrowableException(); + MethodMeta *exceptionConstruct = exceptionClass->GetDeclaredConstructor("(Ljava/lang/Throwable;)V"); + DCHECK(exceptionConstruct != nullptr) << "exception Construct nullptr." << maple::endl; + MObject *exceptionInstance = MObject::NewObject(*exceptionClass, exceptionConstruct, &exceptionObj); + if (UNLIKELY(exceptionInstance == nullptr)) { + return; + } + MRT_ThrowExceptionSafe(exceptionInstance->AsJobject()); + RC_LOCAL_DEC_REF(exceptionInstance); + return; +} + +void DoProxyThrowableException(const MObject &proxyObj, const MObject &exceptionObj, const MethodMeta &method) { + if (exceptionObj.IsInstanceOf(*WellKnown::GetMClassError()) || + exceptionObj.IsInstanceOf(*WellKnown::GetMClassRuntimeException())) { + return; + } + ScopedHandles sHandles; + MClass *proxyCls = proxyObj.GetClass(); + char *methodName = method.GetName(); + char *methodSig = method.GetSignature(); + MClass **superClassArray = proxyCls->GetSuperClassArray(); + DCHECK(superClassArray != nullptr) << "Proxy Cls super class array nullptr." << maple::endl; + uint32_t numofsuper = proxyCls->GetNumOfSuperClasses(); + uint32_t assignableNum = 0; + uint32_t duplicatedMethodNum = 0; + for (uint32_t i = 1; i < numofsuper; ++i) { + MClass *interfaceclass = MClass::ResolveSuperClass(&superClassArray[i]); + MethodMeta *methodMeta = interfaceclass->GetDeclaredMethod(methodName, methodSig); + if (methodMeta != nullptr) { + ++duplicatedMethodNum; + std::vector types; + methodMeta->GetExceptionTypes(types); + bool isCurrentAssignable = false; + size_t numOfException = types.size(); + for (size_t j = 0; j < numOfException; ++j) { + MClass *decExCls = types[j]; + if (exceptionObj.IsInstanceOf(*decExCls)) { + isCurrentAssignable = true; + break; + } + } + if (isCurrentAssignable == true) { + ++assignableNum; + } + } + } + if (assignableNum != duplicatedMethodNum) { + ThrowUndeclaredThrowableException(exceptionObj); + } + return; +} + +static MethodMeta *GetHandlerInvokeMethod(const MClass &handlerClass) { + MethodMeta *invokeMethod = handlerClass.GetMethod("invoke", + "(Ljava/lang/Object;Ljava/lang/reflect/Method;[Ljava/lang/Object;)Ljava/lang/Object;"); + if (UNLIKELY(invokeMethod == nullptr || (invokeMethod->GetFuncAddress() == 0))) { + std::string msg = "No Method: jobject invoke(Object, Method, Object[]) in class "; + msg += handlerClass.GetName(); + msg += " or super class"; + MRT_ThrowNewException("java/lang/NoSuchMethodError", msg.c_str()); + return nullptr; + } + return invokeMethod; +} + +static MethodMeta *GetSuperMethod(const MethodMeta &srcMethod, const MClass &proxyClass) { + MethodMeta *superMethod = nullptr; + char *methodName = srcMethod.GetName(); + char *methodSig = srcMethod.GetSignature(); + MClass *objectClass = WellKnown::GetMClassObject(); + if (!strcmp("equals", methodName) && !strcmp("(Ljava/lang/Object;)Z", methodSig)) { + superMethod = objectClass->GetDeclaredMethod(methodName, methodSig); + } else if (!strcmp("hashCode", methodName) && !strcmp("()I", methodSig)) { + superMethod = objectClass->GetDeclaredMethod(methodName, methodSig); + } else if (!strcmp("toString", methodName) && !strcmp("()Ljava/lang/String;", methodSig)) { + superMethod = objectClass->GetDeclaredMethod(methodName, methodSig); + } else { + MClass **superClassArray = proxyClass.GetSuperClassArray(); + CHECK(superClassArray != nullptr) << "Super Class Array is nullptr in proxy class." << maple::endl; + uint32_t numofsuper = proxyClass.GetNumOfSuperClasses(); + for (uint32_t i = 1; i < numofsuper; ++i) { + MClass *interfaceclass = MClass::ResolveSuperClass(&superClassArray[i]); + MethodMeta *method = interfaceclass->GetMethod(methodName, methodSig); + if (method != nullptr) { + superMethod = method; + break; + } + } + } + return superMethod; +} + +MethodMeta *GetProxySuperMethod(const MObject &obj, uint32_t num) { + MClass *proxyClass = obj.GetClass(); + MethodMeta *proxyMethod = &(proxyClass->GetMethodMetas()[num]); + MethodMeta *superMethod = GetSuperMethod(*proxyMethod, *proxyClass); + return superMethod; +} + +MObject *InvokeInvocationHandler(MObject &obj, jvalue args[], const MethodMeta &proxySuperMethod) { + ScopedHandles sHandles; + FieldMeta *hfield = WellKnown::GetMClassProxy()->GetDeclaredField("h"); + CHECK(hfield != nullptr) << "Can't find h field in proxy class." << maple::endl; + ObjHandle hobj(hfield->GetObjectValue(&obj)); + CHECK(hobj() != 0) << "proxy's h field is nullptr." << maple::endl; + MethodMeta *invokeMethod = GetHandlerInvokeMethod(*(hobj()->GetClass())); + if (invokeMethod == nullptr) { + return nullptr; + } + + uint32_t parameterSize = proxySuperMethod.GetParameterCount(); + MArray *arrayP = nullptr; + if (parameterSize > 0) { + arrayP = MArray::NewObjectArray(parameterSize, *WellKnown::GetMClassAObject()); + CHECK(arrayP != nullptr) << "NewObjectArray return nullptr." << maple::endl; + proxySuperMethod.BuildMArrayArgsFromJValues(*arrayP, args); + } + ObjHandle arrayPRef(arrayP); + ObjHandle interfaceMethodObj(MMethod::NewMMethodObject(proxySuperMethod)); + jvalue methodArgs[3]; // invokeMethod has 3 args + methodArgs[0].l = reinterpret_cast(&obj); + methodArgs[1].l = interfaceMethodObj.AsJObj(); + methodArgs[2].l = arrayPRef.AsJObj(); + jobject retvalue = invokeMethod->Invoke(hobj(), methodArgs); + return reinterpret_cast(retvalue); +} + +static void SortMethodFromProxyClassbyName(MClass &proxyCls, std::vector &objectMethod) { + uint32_t numOfMethods = proxyCls.GetNumOfMethods(); + MethodMeta *objectMethods = proxyCls.GetMethodMetas(); + DCHECK(objectMethods != nullptr) << "GetMethodMetas() fail in SortMethodFromProxyClassbyName." << maple::endl; + for (uint32_t i = 0; i < numOfMethods; ++i) { + objectMethod.push_back(&objectMethods[i]); + } + + std::sort(objectMethod.begin(), objectMethod.end(), [](MethodMeta *a, MethodMeta *b) { + char *nameA = a->GetName(); + std::string sa(nameA); + char *nameB = b->GetName(); + std::string sb(nameB); + return sa < sb; + }); +} + +static void GenProxyVtab(MClass &proxyCls, MArray &methods) { + const uint32_t numVMethodsObjectVtab = 11; // 11 is number of methods in objectVtab + MClass *objectMClass = WellKnown::GetMClassObject(); + const static int16_t equalsVtabSlot = objectMClass->GetMethod("equals", "(Ljava/lang/Object;)Z")->GetVtabIndex(); + const static int16_t hashCodeVtabSlot = objectMClass->GetMethod("hashCode", "()I")->GetVtabIndex(); + const static int16_t toStringVtabSlot = objectMClass->GetMethod("toString", "()Ljava/lang/String;")->GetVtabIndex(); + uint32_t numVirtualMethods = methods.GetLength(); + auto vtab = reinterpret_cast(calloc(numVMethodsObjectVtab + numVirtualMethods, sizeof(VOID_PTR))); + CHECK(vtab != nullptr) << "calloc fail in GenProxyVtab." << maple::endl; + if (vtab == nullptr) { + return; + } + + uint32_t vtabCnt = 0; + auto objectVtab = reinterpret_cast(objectMClass->GetVtab()); + DCHECK(objectVtab != nullptr) << "object Vtab is nullptr." << maple::endl; + for (uint32_t i = 0; i < numVMethodsObjectVtab; ++i) { + vtab[i] = objectVtab[i]; + } + vtabCnt += numVMethodsObjectVtab; + + std::vector proxyMethods; + SortMethodFromProxyClassbyName(proxyCls, proxyMethods); + for (auto it = proxyMethods.begin(); it != proxyMethods.end(); ++it) { + MethodMeta *method = *it; + char *methodName = method->GetName(); + char *sigName = method->GetSignature(); + uintptr_t address = method->GetFuncAddress(); + if (!strcmp("equals", methodName) && !strcmp("(Ljava/lang/Object;)Z", sigName)) { + vtab[equalsVtabSlot] = static_cast(address); + continue; + } else if (!strcmp("hashCode", methodName) && !strcmp("()I", sigName)) { + vtab[hashCodeVtabSlot] = static_cast(address); + continue; + } else if (!strcmp("toString", methodName) && !strcmp("()Ljava/lang/String;", sigName)) { + vtab[toStringVtabSlot] = static_cast(address); + continue; + } + vtab[vtabCnt++] = static_cast(address); + } + proxyCls.SetVtable(reinterpret_cast(vtab)); +} + +static void SortMethodWithHashCode(std::vector &methodVec) { + std::sort(methodVec.begin(), methodVec.end(), [](const MethodMeta *a, const MethodMeta *b) { + uint16_t hashCodeA = a->GetHashCode(); + uint32_t hashCodeB = b->GetHashCode(); + if (a->IsFinalizeMethod() && !b->IsFinalizeMethod()) { + return false; + } + if (!a->IsFinalizeMethod() && b->IsFinalizeMethod()) { + return true; + } + return hashCodeA < hashCodeB; + }); +} + +static void GenProxyConstructor(MClass &proxyCls, MethodMeta &proxyClsMth, MethodMeta &constructor) { + char *mthMethodName = constructor.GetName(); + MethodSignature *mthSig = constructor.GetMethodSignature(); + uint32_t modifier = (constructor.GetMod() & (~modifier::kModifierProtected)) | modifier::kModifierPublic; + proxyClsMth.FillMethodMeta(false, mthMethodName, mthSig, constructor.GetAnnotationRaw(), + constructor.GetVtabIndex(), proxyCls, + reinterpret_cast(constructor.GetFuncAddress()), + modifier, constructor.GetFlag(), constructor.GetArgSize()); +} + +static uintptr_t FillItab(const std::map> &itabMap, + const std::vector> &itabConflictVector, + const std::map> &itabSecondMap, + const std::vector> &itabSecondConflictVector) { + size_t size1 = itabConflictVector.size(); + size_t itabSecondMapSize = itabSecondMap.size(); + size_t itabSecondConflictSize = itabSecondConflictVector.size(); + unsigned long itabSize = 0; + if (size1 == 0) { + itabSize = kItabFirstHashSize; + } else { + // 1 is first tab, 2 is second tab, * 2 for hash value and methods adress in second tab, + // itabSecondConflict * 2 name and signature, function addr + itabSize = kItabFirstHashSize + 1 + 2 + itabSecondMapSize * 2 + itabSecondConflictSize * 2; + } + VOID_PTR *itab = reinterpret_cast(MRT_AllocFromMeta(itabSize * sizeof(VOID_PTR), kITabMetaData)); + for (auto item : itabMap) { + if (item.second.first == 1) { + continue; + } + *(itab + item.first) = item.second.first; + } + if (size1 != 0) { + unsigned long tmp = reinterpret_cast(itab + kItabFirstHashSize + 1); + *(itab + kItabFirstHashSize) = tmp; + } + uint32_t index = 0; + if (size1 != 0) { + index = kItabFirstHashSize + 1; +#ifdef USE_32BIT_REF + uint64_t shiftCountBit = 4 * 4; +#else + uint64_t shiftCountBit = 8 * 4; +#endif + itab[index++] = ((itabSecondConflictSize | (1UL << (shiftCountBit - 1))) << shiftCountBit) + itabSecondMapSize; + itab[index++] = 1; + for (auto item : itabSecondMap) { + itab[index++] = item.first; + itab[index++] = item.second.first; + } + for (auto item : itabSecondConflictVector) { + std::string methodName = item.second; + size_t len = methodName.length() + 1; + char *name = reinterpret_cast(MRT_AllocFromMeta(len, kNativeStringData)); + if (strcpy_s(name, len, methodName.c_str()) != EOK) { + LOG(FATAL) << "FillItab strcpy_s() not return 0" << maple::endl; + return 0; + } + itab[index++] = reinterpret_cast(name); + itab[index++] = item.first; + } + } + return reinterpret_cast(itab); +} + +static uintptr_t GenProxyItab(const std::map> itabMap, + std::vector> itabConflictVector) { + size_t size1 = itabConflictVector.size(); + std::map> itabSecondMap; + std::vector> itabSecondConflictVector; + if (size1 != 0) { + for (auto item :itabConflictVector) { + std::string methodName = item.second; + unsigned long hashIdex = GetSecondHashIndex(methodName.c_str()); + if (itabSecondMap.find(hashIdex) == itabSecondMap.end()) { + std::pair > hashPair(hashIdex, item); + itabSecondMap.insert(hashPair); + } else { + if (itabSecondMap[hashIdex].first == 1) { + itabSecondConflictVector.push_back(item); + } else { + itabSecondConflictVector.push_back(item); + auto oldItem = itabSecondMap[hashIdex]; + itabSecondConflictVector.push_back(oldItem); + itabSecondMap[hashIdex].first = 1; + } + } + } + } + return FillItab(itabMap, itabConflictVector, itabSecondMap, itabSecondConflictVector); +} + +static void ResolveConstructHashConflict(MethodMeta &constructor, std::vector &methodVec, + std::vector &methodHashConflict) { + bool isConflict = false; + for (auto it = methodVec.begin(); it != methodVec.end(); ++it) { + MethodMeta *methodMeta = *it; + if ((methodMeta != &constructor) && ((methodMeta->GetHashCode() == constructor.GetHashCode()) || + (methodMeta->GetHashCode() == modifier::kHashConflict))) { + if (isConflict == false) { + methodHashConflict.push_back(&constructor); + isConflict = true; + } + methodHashConflict.push_back(methodMeta); + } + } + + for (auto methodConflict : methodHashConflict) { + auto it = std::find(methodVec.begin(), methodVec.end(), methodConflict); + if (it != methodVec.end()) { + methodVec.erase(it); + } + } +} + +static void ReomveRepeatMethod(const MArray &methodsArray, std::vector &methodVec, uint32_t &rmNum) { + uint32_t numMethods = methodsArray.GetLength(); + for (uint32_t i = numMethods; i > 0; --i) { + MMethod *method = methodsArray.GetObjectElementNoRc(i - 1)->AsMMethod(); + if (method == nullptr) { + continue; + } + MethodMeta *methodMeta = method->GetMethodMeta(); + char *signature = methodMeta->GetSignature(); + char *name = methodMeta->GetName(); + bool hasGen = false; + for (auto it = methodVec.begin(); it != methodVec.end(); ++it) { + MethodMeta *temp = *it; + if (!strcmp(signature, temp->GetSignature()) && !strcmp(name, temp->GetName())) { + hasGen = true; + break ; + } + } + if (hasGen) { + ++rmNum; + continue; + } + methodVec.push_back(methodMeta); + } +} + +static void GenProxyMethod(const MClass &proxyCls, MethodMeta &proxyClsMth, const MethodMeta &mobj, + const uint32_t ¤tNum, + std::map> &itabMap, + std::vector> &itabConflictVector) { + constexpr uint32_t kRemoveFlags = modifier::kModifierAbstract | modifier::kModifierDefault; + constexpr uint32_t kAddFlags = modifier::kModifierFinal; + char *methodName = mobj.GetName(); + MethodSignature *mthSig = mobj.GetMethodSignature(); + char *sigName = mobj.GetSignature(); + std::string innerMethodname = std::string(methodName) + "|" + std::string(sigName); + unsigned long hashIdex = GetHashIndex(innerMethodname.c_str()); + auto address = reinterpret_cast(gstubfunc[currentNum]); + uint32_t modifier = (mobj.GetMod() & (~kRemoveFlags)) | kAddFlags; + proxyClsMth.FillMethodMeta(false, methodName, mthSig, mobj.GetAnnotationRaw(), + mobj.GetVtabIndex(), proxyCls, address, modifier, + mobj.GetFlag(), mobj.GetArgSize()); + std::pair addressPair(address, innerMethodname); + auto hasExist = itabMap.find(hashIdex); + if (hasExist != itabMap.end()) { + // conflict + if (hasExist->second.first == 1) { + itabConflictVector.push_back(addressPair); + } else { + auto tmp = hasExist->second; + itabConflictVector.push_back(tmp); + hasExist->second.first = 1; + itabConflictVector.push_back(addressPair); + } + } else { + std::pair> hashcodePair(hashIdex, addressPair); + itabMap.insert(hashcodePair); + } +} + +static void GenProxyMethodAndConstructor(MClass &proxyCls, MethodMeta *&proxyClsMth, uint32_t ¤tNum, + std::vector &methodVec, + std::map> &itabMap, + std::vector> &itabConflictVector, + bool isConflictVector) { + for (auto mobj : methodVec) { + if (mobj->IsConstructor()) { + GenProxyConstructor(proxyCls, *proxyClsMth, *mobj); + } else { + GenProxyMethod(proxyCls, *proxyClsMth, *mobj, currentNum, itabMap, itabConflictVector); + } + if (isConflictVector) { + proxyClsMth->SetHashCode(modifier::kHashConflict); + } + ++currentNum; + ++proxyClsMth; + } +} + +static void GenerateProxyMethods(MClass &proxyCls, MArray &methods) { + uint32_t currentNum = 0; + const uint32_t numDirectMethods = 1; // for constructor + uint32_t numVirtualMethods = methods.GetLength(); + uint32_t methodNumSum = numVirtualMethods + numDirectMethods; + uint32_t rmNum = 0; + if (methodNumSum > kSupportMaxInterfaceMethod) { + LOG(FATAL) << " number: " << methodNumSum << maple::endl; + } + + std::vector methodVec; + ReomveRepeatMethod(methods, methodVec, rmNum); + + // constructor + MClass *reflectProxyMCls = WellKnown::GetMClassProxy(); + MethodMeta *constructor = reflectProxyMCls->GetDeclaredConstructor("(Ljava/lang/reflect/InvocationHandler;)V"); + methodVec.push_back(constructor); + SortMethodWithHashCode(methodVec); + std::vector methodHashConflict; + ResolveConstructHashConflict(*constructor, methodVec, methodHashConflict); + + MethodMeta *proxyClsMthBase = MethodMeta::CastNonNull(MRT_AllocFromMeta(methodNumSum * sizeof(MethodMeta), + kMethodMetaData)); + MethodMeta *proxyClsMth = proxyClsMthBase; + std::map> itabMap; + std::vector> itabConflictVector; + + GenProxyMethodAndConstructor(proxyCls, proxyClsMth, currentNum, methodVec, itabMap, itabConflictVector, false); + GenProxyMethodAndConstructor(proxyCls, proxyClsMth, currentNum, methodHashConflict, + itabMap, itabConflictVector, true); + uintptr_t itab = GenProxyItab(itabMap, itabConflictVector); + proxyCls.SetItable(itab); + proxyCls.SetMethods(*proxyClsMthBase); + proxyCls.SetNumOfMethods(methodNumSum - rmNum); + GenProxyVtab(proxyCls, methods); +} + +static void GenerateProxyFields(MClass &proxyCls) { + // set proxy declaring field 0 + proxyCls.SetNumOfFields(0); +} + +jclass MRT_ReflectProxyGenerateProxy(jstring name, jobjectArray interfaces, jobject loader, + jobjectArray methods, jobjectArray throws __attribute__((unused))) { + MString *proxyName = MString::JniCastNonNull(name); + MArray *interfacesArray = MArray::JniCastNonNull(interfaces); + MArray *methodArray = MArray::JniCastNonNull(methods); + MClass *proxyCls = MClass::NewMClass(); + std::string classNameStr = proxyName->GetChars(); + const char *classNameStrMeta = strdup(classNameStr.c_str()); // Does not need to be free-ed. See below. + // Note: Why not std::string? It is used as the name of a MClass, and it needs to be a C-style char*. + if (classNameStrMeta == nullptr) { + int myErrno = errno; + LOG(FATAL) << "strdup: Failed to allocate classNameStrMeta. errno: " << myErrno << maple::endl; + MRT_Panic(); + } + proxyCls->SetName(classNameStrMeta); // This keeps classNameStrMeta alive. proxyCls is permanent. + constexpr uint32_t accessFlag = modifier::kModifierProxy | modifier::kModifierPublic | modifier::kModifierFinal; + proxyCls->SetModifier(accessFlag); + MClass *javaProxy = WellKnown::GetMClassProxy(); + proxyCls->SetObjectSize(javaProxy->GetObjectSize()); + uintptr_t gctib = reinterpret_cast(javaProxy->GetGctib()); + proxyCls->SetGctib(gctib); + // proxy super class always java.lang.proxy. + uint32_t numOfSuper = interfacesArray->GetLength() + 1; + MObject **itf = reinterpret_cast(calloc(numOfSuper, sizeof(MObject*))); + CHECK(itf != nullptr) << "calloc Proxy itf fail." << maple::endl; + itf[0] = javaProxy; + // interfaces is class object which is out of heap. + uint32_t srcIndex = 0; + for (uint32_t i = 1; i < numOfSuper; ++i) { + itf[i] = interfacesArray->GetObjectElementOffHeap(srcIndex++); + } + proxyCls->SetSuperClassArray(reinterpret_cast(itf)); + proxyCls->SetNumOfSuperClasses(numOfSuper); + + GenerateProxyMethods(*proxyCls, *methodArray); + GenerateProxyFields(*proxyCls); + // flag 0 + // annotation 0 + // clinitAddr 0 + MRT_RegisterDynamicClass(loader, *proxyCls); + // mark this generated class as initialized + proxyCls->SetInitStateRawValue(reinterpret_cast(proxyCls)); + std::atomic_thread_fence(std::memory_order_release); + return proxyCls->AsJclass(); +} +} // namespace maplert diff --git a/src/mrt/maplert/src/mrt_reflection_reference.cpp b/src/mrt/maplert/src/mrt_reflection_reference.cpp new file mode 100644 index 0000000000..f81d194631 --- /dev/null +++ b/src/mrt/maplert/src/mrt_reflection_reference.cpp @@ -0,0 +1,51 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "collector/rp_base.h" + +namespace maplert { +address_t MRT_ReferenceGetReferent(address_t javaThis) { + if (javaThis != 0) { + SetReferenceActive(javaThis); + } + address_t referent = MRT_LOAD_JOBJECT_INC_REFERENT(javaThis, WellKnown::kReferenceReferentOffset); + if (referent != 0) { + // Apply barrier to ensure that concurrent marking sees the referent and + // does not prematurely reclaim it. + MRT_WeakRefGetBarrier(referent); + } else { + ClearReferenceActive(javaThis); + } + return referent; +} + +void MRT_ReferenceClearReferent(address_t javaThis) { + bool clearResurrectWeak = false; + if (javaThis != 0) { + MClass *klass = reinterpret_cast(javaThis)->GetClass(); + uint32_t classFlag = klass->GetFlag(); + clearResurrectWeak = ((classFlag & (modifier::kClassCleaner | modifier::kClassPhantomReference)) == 0); + } + MRT_WRITE_REFERENT(javaThis, WellKnown::kReferenceReferentOffset, nullptr, clearResurrectWeak); +} + + +// Only one thread can invoke MRT_RunFinalization at same time +// 1. Only one runFinalizationFinalizers list in system. +// 2. Swap runFinalizationFinalizers and finalizers list two time, might cause same object finalize two times. +void MCC_RunFinalization() __attribute__((alias("MRT_RunFinalization"))); +void MRT_RunFinalization() { + ReferenceProcessor::Instance().RunFinalization(); +} +} // namespace maplert diff --git a/src/mrt/maplert/src/mrt_reflection_stubfuncforproxy.def b/src/mrt/maplert/src/mrt_reflection_stubfuncforproxy.def new file mode 100644 index 0000000000..e970a18baf --- /dev/null +++ b/src/mrt/maplert/src/mrt_reflection_stubfuncforproxy.def @@ -0,0 +1,928 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +namespace maplert { +constexpr int kSupportMaxInterfaceMethod = 600; + +// this stub is defined in proxy_stub_arm.S/roxy_stub_arm64.S +#define STUBFUNCPROXY_TYPE_NUM(TYPE, NUM) \ +extern "C" int64_t sfp_##TYPE##_##NUM(); + +#define STUBFUNCPROXY_NUM(NUM)\ +STUBFUNCPROXY_TYPE_NUM(jlong, NUM)\ + +STUBFUNCPROXY_NUM(0) STUBFUNCPROXY_NUM(1) +STUBFUNCPROXY_NUM(2) STUBFUNCPROXY_NUM(3) +STUBFUNCPROXY_NUM(4) STUBFUNCPROXY_NUM(5) +STUBFUNCPROXY_NUM(6) STUBFUNCPROXY_NUM(7) +STUBFUNCPROXY_NUM(8) STUBFUNCPROXY_NUM(9) +STUBFUNCPROXY_NUM(10) STUBFUNCPROXY_NUM(11) +STUBFUNCPROXY_NUM(12) STUBFUNCPROXY_NUM(13) +STUBFUNCPROXY_NUM(14) STUBFUNCPROXY_NUM(15) +STUBFUNCPROXY_NUM(16) STUBFUNCPROXY_NUM(17) +STUBFUNCPROXY_NUM(18) STUBFUNCPROXY_NUM(19) +STUBFUNCPROXY_NUM(20) STUBFUNCPROXY_NUM(21) +STUBFUNCPROXY_NUM(22) STUBFUNCPROXY_NUM(23) +STUBFUNCPROXY_NUM(24) STUBFUNCPROXY_NUM(25) +STUBFUNCPROXY_NUM(26) STUBFUNCPROXY_NUM(27) +STUBFUNCPROXY_NUM(28) STUBFUNCPROXY_NUM(29) +STUBFUNCPROXY_NUM(30) STUBFUNCPROXY_NUM(31) +STUBFUNCPROXY_NUM(32) STUBFUNCPROXY_NUM(33) +STUBFUNCPROXY_NUM(34) STUBFUNCPROXY_NUM(35) +STUBFUNCPROXY_NUM(36) STUBFUNCPROXY_NUM(37) +STUBFUNCPROXY_NUM(38) STUBFUNCPROXY_NUM(39) +STUBFUNCPROXY_NUM(40) STUBFUNCPROXY_NUM(41) +STUBFUNCPROXY_NUM(42) STUBFUNCPROXY_NUM(43) +STUBFUNCPROXY_NUM(44) STUBFUNCPROXY_NUM(45) +STUBFUNCPROXY_NUM(46) STUBFUNCPROXY_NUM(47) +STUBFUNCPROXY_NUM(48) STUBFUNCPROXY_NUM(49) +STUBFUNCPROXY_NUM(50) STUBFUNCPROXY_NUM(51) +STUBFUNCPROXY_NUM(52) STUBFUNCPROXY_NUM(53) +STUBFUNCPROXY_NUM(54) STUBFUNCPROXY_NUM(55) +STUBFUNCPROXY_NUM(56) STUBFUNCPROXY_NUM(57) +STUBFUNCPROXY_NUM(58) STUBFUNCPROXY_NUM(59) +STUBFUNCPROXY_NUM(60) STUBFUNCPROXY_NUM(61) +STUBFUNCPROXY_NUM(62) STUBFUNCPROXY_NUM(63) +STUBFUNCPROXY_NUM(64) STUBFUNCPROXY_NUM(65) +STUBFUNCPROXY_NUM(66) STUBFUNCPROXY_NUM(67) +STUBFUNCPROXY_NUM(68) STUBFUNCPROXY_NUM(69) +STUBFUNCPROXY_NUM(70) STUBFUNCPROXY_NUM(71) +STUBFUNCPROXY_NUM(72) STUBFUNCPROXY_NUM(73) +STUBFUNCPROXY_NUM(74) STUBFUNCPROXY_NUM(75) +STUBFUNCPROXY_NUM(76) STUBFUNCPROXY_NUM(77) +STUBFUNCPROXY_NUM(78) STUBFUNCPROXY_NUM(79) +STUBFUNCPROXY_NUM(80) STUBFUNCPROXY_NUM(81) +STUBFUNCPROXY_NUM(82) STUBFUNCPROXY_NUM(83) +STUBFUNCPROXY_NUM(84) STUBFUNCPROXY_NUM(85) +STUBFUNCPROXY_NUM(86) STUBFUNCPROXY_NUM(87) +STUBFUNCPROXY_NUM(88) STUBFUNCPROXY_NUM(89) +STUBFUNCPROXY_NUM(90) STUBFUNCPROXY_NUM(91) +STUBFUNCPROXY_NUM(92) STUBFUNCPROXY_NUM(93) +STUBFUNCPROXY_NUM(94) STUBFUNCPROXY_NUM(95) +STUBFUNCPROXY_NUM(96) STUBFUNCPROXY_NUM(97) +STUBFUNCPROXY_NUM(98) STUBFUNCPROXY_NUM(99) +STUBFUNCPROXY_NUM(100) STUBFUNCPROXY_NUM(101) +STUBFUNCPROXY_NUM(102) STUBFUNCPROXY_NUM(103) +STUBFUNCPROXY_NUM(104) STUBFUNCPROXY_NUM(105) +STUBFUNCPROXY_NUM(106) STUBFUNCPROXY_NUM(107) +STUBFUNCPROXY_NUM(108) STUBFUNCPROXY_NUM(109) +STUBFUNCPROXY_NUM(110) STUBFUNCPROXY_NUM(111) +STUBFUNCPROXY_NUM(112) STUBFUNCPROXY_NUM(113) +STUBFUNCPROXY_NUM(114) STUBFUNCPROXY_NUM(115) +STUBFUNCPROXY_NUM(116) STUBFUNCPROXY_NUM(117) +STUBFUNCPROXY_NUM(118) STUBFUNCPROXY_NUM(119) +STUBFUNCPROXY_NUM(120) STUBFUNCPROXY_NUM(121) +STUBFUNCPROXY_NUM(122) STUBFUNCPROXY_NUM(123) +STUBFUNCPROXY_NUM(124) STUBFUNCPROXY_NUM(125) +STUBFUNCPROXY_NUM(126) STUBFUNCPROXY_NUM(127) +STUBFUNCPROXY_NUM(128) STUBFUNCPROXY_NUM(129) +STUBFUNCPROXY_NUM(130) STUBFUNCPROXY_NUM(131) +STUBFUNCPROXY_NUM(132) STUBFUNCPROXY_NUM(133) +STUBFUNCPROXY_NUM(134) STUBFUNCPROXY_NUM(135) +STUBFUNCPROXY_NUM(136) STUBFUNCPROXY_NUM(137) +STUBFUNCPROXY_NUM(138) STUBFUNCPROXY_NUM(139) +STUBFUNCPROXY_NUM(140) STUBFUNCPROXY_NUM(141) +STUBFUNCPROXY_NUM(142) STUBFUNCPROXY_NUM(143) +STUBFUNCPROXY_NUM(144) STUBFUNCPROXY_NUM(145) +STUBFUNCPROXY_NUM(146) STUBFUNCPROXY_NUM(147) +STUBFUNCPROXY_NUM(148) STUBFUNCPROXY_NUM(149) +STUBFUNCPROXY_NUM(150) STUBFUNCPROXY_NUM(151) +STUBFUNCPROXY_NUM(152) STUBFUNCPROXY_NUM(153) +STUBFUNCPROXY_NUM(154) STUBFUNCPROXY_NUM(155) +STUBFUNCPROXY_NUM(156) STUBFUNCPROXY_NUM(157) +STUBFUNCPROXY_NUM(158) STUBFUNCPROXY_NUM(159) +STUBFUNCPROXY_NUM(160) STUBFUNCPROXY_NUM(161) +STUBFUNCPROXY_NUM(162) STUBFUNCPROXY_NUM(163) +STUBFUNCPROXY_NUM(164) STUBFUNCPROXY_NUM(165) +STUBFUNCPROXY_NUM(166) STUBFUNCPROXY_NUM(167) +STUBFUNCPROXY_NUM(168) STUBFUNCPROXY_NUM(169) +STUBFUNCPROXY_NUM(170) STUBFUNCPROXY_NUM(171) +STUBFUNCPROXY_NUM(172) STUBFUNCPROXY_NUM(173) +STUBFUNCPROXY_NUM(174) STUBFUNCPROXY_NUM(175) +STUBFUNCPROXY_NUM(176) STUBFUNCPROXY_NUM(177) +STUBFUNCPROXY_NUM(178) STUBFUNCPROXY_NUM(179) +STUBFUNCPROXY_NUM(180) STUBFUNCPROXY_NUM(181) +STUBFUNCPROXY_NUM(182) STUBFUNCPROXY_NUM(183) +STUBFUNCPROXY_NUM(184) STUBFUNCPROXY_NUM(185) +STUBFUNCPROXY_NUM(186) STUBFUNCPROXY_NUM(187) +STUBFUNCPROXY_NUM(188) STUBFUNCPROXY_NUM(189) +STUBFUNCPROXY_NUM(190) STUBFUNCPROXY_NUM(191) +STUBFUNCPROXY_NUM(192) STUBFUNCPROXY_NUM(193) +STUBFUNCPROXY_NUM(194) STUBFUNCPROXY_NUM(195) +STUBFUNCPROXY_NUM(196) STUBFUNCPROXY_NUM(197) +STUBFUNCPROXY_NUM(198) STUBFUNCPROXY_NUM(199) +STUBFUNCPROXY_NUM(200) STUBFUNCPROXY_NUM(201) +STUBFUNCPROXY_NUM(202) STUBFUNCPROXY_NUM(203) +STUBFUNCPROXY_NUM(204) STUBFUNCPROXY_NUM(205) +STUBFUNCPROXY_NUM(206) STUBFUNCPROXY_NUM(207) +STUBFUNCPROXY_NUM(208) STUBFUNCPROXY_NUM(209) +STUBFUNCPROXY_NUM(210) STUBFUNCPROXY_NUM(211) +STUBFUNCPROXY_NUM(212) STUBFUNCPROXY_NUM(213) +STUBFUNCPROXY_NUM(214) STUBFUNCPROXY_NUM(215) +STUBFUNCPROXY_NUM(216) STUBFUNCPROXY_NUM(217) +STUBFUNCPROXY_NUM(218) STUBFUNCPROXY_NUM(219) +STUBFUNCPROXY_NUM(220) STUBFUNCPROXY_NUM(221) +STUBFUNCPROXY_NUM(222) STUBFUNCPROXY_NUM(223) +STUBFUNCPROXY_NUM(224) STUBFUNCPROXY_NUM(225) +STUBFUNCPROXY_NUM(226) STUBFUNCPROXY_NUM(227) +STUBFUNCPROXY_NUM(228) STUBFUNCPROXY_NUM(229) +STUBFUNCPROXY_NUM(230) STUBFUNCPROXY_NUM(231) +STUBFUNCPROXY_NUM(232) STUBFUNCPROXY_NUM(233) +STUBFUNCPROXY_NUM(234) STUBFUNCPROXY_NUM(235) +STUBFUNCPROXY_NUM(236) STUBFUNCPROXY_NUM(237) +STUBFUNCPROXY_NUM(238) STUBFUNCPROXY_NUM(239) +STUBFUNCPROXY_NUM(240) STUBFUNCPROXY_NUM(241) +STUBFUNCPROXY_NUM(242) STUBFUNCPROXY_NUM(243) +STUBFUNCPROXY_NUM(244) STUBFUNCPROXY_NUM(245) +STUBFUNCPROXY_NUM(246) STUBFUNCPROXY_NUM(247) +STUBFUNCPROXY_NUM(248) STUBFUNCPROXY_NUM(249) +STUBFUNCPROXY_NUM(250) STUBFUNCPROXY_NUM(251) +STUBFUNCPROXY_NUM(252) STUBFUNCPROXY_NUM(253) +STUBFUNCPROXY_NUM(254) STUBFUNCPROXY_NUM(255) +STUBFUNCPROXY_NUM(256) STUBFUNCPROXY_NUM(257) +STUBFUNCPROXY_NUM(258) STUBFUNCPROXY_NUM(259) +STUBFUNCPROXY_NUM(260) STUBFUNCPROXY_NUM(261) +STUBFUNCPROXY_NUM(262) STUBFUNCPROXY_NUM(263) +STUBFUNCPROXY_NUM(264) STUBFUNCPROXY_NUM(265) +STUBFUNCPROXY_NUM(266) STUBFUNCPROXY_NUM(267) +STUBFUNCPROXY_NUM(268) STUBFUNCPROXY_NUM(269) +STUBFUNCPROXY_NUM(270) STUBFUNCPROXY_NUM(271) +STUBFUNCPROXY_NUM(272) STUBFUNCPROXY_NUM(273) +STUBFUNCPROXY_NUM(274) STUBFUNCPROXY_NUM(275) +STUBFUNCPROXY_NUM(276) STUBFUNCPROXY_NUM(277) +STUBFUNCPROXY_NUM(278) STUBFUNCPROXY_NUM(279) +STUBFUNCPROXY_NUM(280) STUBFUNCPROXY_NUM(281) +STUBFUNCPROXY_NUM(282) STUBFUNCPROXY_NUM(283) +STUBFUNCPROXY_NUM(284) STUBFUNCPROXY_NUM(285) +STUBFUNCPROXY_NUM(286) STUBFUNCPROXY_NUM(287) +STUBFUNCPROXY_NUM(288) STUBFUNCPROXY_NUM(289) +STUBFUNCPROXY_NUM(290) STUBFUNCPROXY_NUM(291) +STUBFUNCPROXY_NUM(292) STUBFUNCPROXY_NUM(293) +STUBFUNCPROXY_NUM(294) STUBFUNCPROXY_NUM(295) +STUBFUNCPROXY_NUM(296) STUBFUNCPROXY_NUM(297) +STUBFUNCPROXY_NUM(298) STUBFUNCPROXY_NUM(299) +STUBFUNCPROXY_NUM(300) STUBFUNCPROXY_NUM(301) +STUBFUNCPROXY_NUM(302) STUBFUNCPROXY_NUM(303) +STUBFUNCPROXY_NUM(304) STUBFUNCPROXY_NUM(305) +STUBFUNCPROXY_NUM(306) STUBFUNCPROXY_NUM(307) +STUBFUNCPROXY_NUM(308) STUBFUNCPROXY_NUM(309) +STUBFUNCPROXY_NUM(310) STUBFUNCPROXY_NUM(311) +STUBFUNCPROXY_NUM(312) STUBFUNCPROXY_NUM(313) +STUBFUNCPROXY_NUM(314) STUBFUNCPROXY_NUM(315) +STUBFUNCPROXY_NUM(316) STUBFUNCPROXY_NUM(317) +STUBFUNCPROXY_NUM(318) STUBFUNCPROXY_NUM(319) +STUBFUNCPROXY_NUM(320) STUBFUNCPROXY_NUM(321) +STUBFUNCPROXY_NUM(322) STUBFUNCPROXY_NUM(323) +STUBFUNCPROXY_NUM(324) STUBFUNCPROXY_NUM(325) +STUBFUNCPROXY_NUM(326) STUBFUNCPROXY_NUM(327) +STUBFUNCPROXY_NUM(328) STUBFUNCPROXY_NUM(329) +STUBFUNCPROXY_NUM(330) STUBFUNCPROXY_NUM(331) +STUBFUNCPROXY_NUM(332) STUBFUNCPROXY_NUM(333) +STUBFUNCPROXY_NUM(334) STUBFUNCPROXY_NUM(335) +STUBFUNCPROXY_NUM(336) STUBFUNCPROXY_NUM(337) +STUBFUNCPROXY_NUM(338) STUBFUNCPROXY_NUM(339) +STUBFUNCPROXY_NUM(340) STUBFUNCPROXY_NUM(341) +STUBFUNCPROXY_NUM(342) STUBFUNCPROXY_NUM(343) +STUBFUNCPROXY_NUM(344) STUBFUNCPROXY_NUM(345) +STUBFUNCPROXY_NUM(346) STUBFUNCPROXY_NUM(347) +STUBFUNCPROXY_NUM(348) STUBFUNCPROXY_NUM(349) +STUBFUNCPROXY_NUM(350) STUBFUNCPROXY_NUM(351) +STUBFUNCPROXY_NUM(352) STUBFUNCPROXY_NUM(353) +STUBFUNCPROXY_NUM(354) STUBFUNCPROXY_NUM(355) +STUBFUNCPROXY_NUM(356) STUBFUNCPROXY_NUM(357) +STUBFUNCPROXY_NUM(358) STUBFUNCPROXY_NUM(359) +STUBFUNCPROXY_NUM(360) STUBFUNCPROXY_NUM(361) +STUBFUNCPROXY_NUM(362) STUBFUNCPROXY_NUM(363) +STUBFUNCPROXY_NUM(364) STUBFUNCPROXY_NUM(365) +STUBFUNCPROXY_NUM(366) STUBFUNCPROXY_NUM(367) +STUBFUNCPROXY_NUM(368) STUBFUNCPROXY_NUM(369) +STUBFUNCPROXY_NUM(370) STUBFUNCPROXY_NUM(371) +STUBFUNCPROXY_NUM(372) STUBFUNCPROXY_NUM(373) +STUBFUNCPROXY_NUM(374) STUBFUNCPROXY_NUM(375) +STUBFUNCPROXY_NUM(376) STUBFUNCPROXY_NUM(377) +STUBFUNCPROXY_NUM(378) STUBFUNCPROXY_NUM(379) +STUBFUNCPROXY_NUM(380) STUBFUNCPROXY_NUM(381) +STUBFUNCPROXY_NUM(382) STUBFUNCPROXY_NUM(383) +STUBFUNCPROXY_NUM(384) STUBFUNCPROXY_NUM(385) +STUBFUNCPROXY_NUM(386) STUBFUNCPROXY_NUM(387) +STUBFUNCPROXY_NUM(388) STUBFUNCPROXY_NUM(389) +STUBFUNCPROXY_NUM(390) STUBFUNCPROXY_NUM(391) +STUBFUNCPROXY_NUM(392) STUBFUNCPROXY_NUM(393) +STUBFUNCPROXY_NUM(394) STUBFUNCPROXY_NUM(395) +STUBFUNCPROXY_NUM(396) STUBFUNCPROXY_NUM(397) +STUBFUNCPROXY_NUM(398) STUBFUNCPROXY_NUM(399) +STUBFUNCPROXY_NUM(400) STUBFUNCPROXY_NUM(401) +STUBFUNCPROXY_NUM(402) STUBFUNCPROXY_NUM(403) +STUBFUNCPROXY_NUM(404) STUBFUNCPROXY_NUM(405) +STUBFUNCPROXY_NUM(406) STUBFUNCPROXY_NUM(407) +STUBFUNCPROXY_NUM(408) STUBFUNCPROXY_NUM(409) +STUBFUNCPROXY_NUM(410) STUBFUNCPROXY_NUM(411) +STUBFUNCPROXY_NUM(412) STUBFUNCPROXY_NUM(413) +STUBFUNCPROXY_NUM(414) STUBFUNCPROXY_NUM(415) +STUBFUNCPROXY_NUM(416) STUBFUNCPROXY_NUM(417) +STUBFUNCPROXY_NUM(418) STUBFUNCPROXY_NUM(419) +STUBFUNCPROXY_NUM(420) STUBFUNCPROXY_NUM(421) +STUBFUNCPROXY_NUM(422) STUBFUNCPROXY_NUM(423) +STUBFUNCPROXY_NUM(424) STUBFUNCPROXY_NUM(425) +STUBFUNCPROXY_NUM(426) STUBFUNCPROXY_NUM(427) +STUBFUNCPROXY_NUM(428) STUBFUNCPROXY_NUM(429) +STUBFUNCPROXY_NUM(430) STUBFUNCPROXY_NUM(431) +STUBFUNCPROXY_NUM(432) STUBFUNCPROXY_NUM(433) +STUBFUNCPROXY_NUM(434) STUBFUNCPROXY_NUM(435) +STUBFUNCPROXY_NUM(436) STUBFUNCPROXY_NUM(437) +STUBFUNCPROXY_NUM(438) STUBFUNCPROXY_NUM(439) +STUBFUNCPROXY_NUM(440) STUBFUNCPROXY_NUM(441) +STUBFUNCPROXY_NUM(442) STUBFUNCPROXY_NUM(443) +STUBFUNCPROXY_NUM(444) STUBFUNCPROXY_NUM(445) +STUBFUNCPROXY_NUM(446) STUBFUNCPROXY_NUM(447) +STUBFUNCPROXY_NUM(448) STUBFUNCPROXY_NUM(449) +STUBFUNCPROXY_NUM(450) STUBFUNCPROXY_NUM(451) +STUBFUNCPROXY_NUM(452) STUBFUNCPROXY_NUM(453) +STUBFUNCPROXY_NUM(454) STUBFUNCPROXY_NUM(455) +STUBFUNCPROXY_NUM(456) STUBFUNCPROXY_NUM(457) +STUBFUNCPROXY_NUM(458) STUBFUNCPROXY_NUM(459) +STUBFUNCPROXY_NUM(460) STUBFUNCPROXY_NUM(461) +STUBFUNCPROXY_NUM(462) STUBFUNCPROXY_NUM(463) +STUBFUNCPROXY_NUM(464) STUBFUNCPROXY_NUM(465) +STUBFUNCPROXY_NUM(466) STUBFUNCPROXY_NUM(467) +STUBFUNCPROXY_NUM(468) STUBFUNCPROXY_NUM(469) +STUBFUNCPROXY_NUM(470) STUBFUNCPROXY_NUM(471) +STUBFUNCPROXY_NUM(472) STUBFUNCPROXY_NUM(473) +STUBFUNCPROXY_NUM(474) STUBFUNCPROXY_NUM(475) +STUBFUNCPROXY_NUM(476) STUBFUNCPROXY_NUM(477) +STUBFUNCPROXY_NUM(478) STUBFUNCPROXY_NUM(479) +STUBFUNCPROXY_NUM(480) STUBFUNCPROXY_NUM(481) +STUBFUNCPROXY_NUM(482) STUBFUNCPROXY_NUM(483) +STUBFUNCPROXY_NUM(484) STUBFUNCPROXY_NUM(485) +STUBFUNCPROXY_NUM(486) STUBFUNCPROXY_NUM(487) +STUBFUNCPROXY_NUM(488) STUBFUNCPROXY_NUM(489) +STUBFUNCPROXY_NUM(490) STUBFUNCPROXY_NUM(491) +STUBFUNCPROXY_NUM(492) STUBFUNCPROXY_NUM(493) +STUBFUNCPROXY_NUM(494) STUBFUNCPROXY_NUM(495) +STUBFUNCPROXY_NUM(496) STUBFUNCPROXY_NUM(497) +STUBFUNCPROXY_NUM(498) STUBFUNCPROXY_NUM(499) +STUBFUNCPROXY_NUM(500) STUBFUNCPROXY_NUM(501) +STUBFUNCPROXY_NUM(502) STUBFUNCPROXY_NUM(503) +STUBFUNCPROXY_NUM(504) STUBFUNCPROXY_NUM(505) +STUBFUNCPROXY_NUM(506) STUBFUNCPROXY_NUM(507) +STUBFUNCPROXY_NUM(508) STUBFUNCPROXY_NUM(509) +STUBFUNCPROXY_NUM(510) STUBFUNCPROXY_NUM(511) +STUBFUNCPROXY_NUM(512) STUBFUNCPROXY_NUM(513) +STUBFUNCPROXY_NUM(514) STUBFUNCPROXY_NUM(515) +STUBFUNCPROXY_NUM(516) STUBFUNCPROXY_NUM(517) +STUBFUNCPROXY_NUM(518) STUBFUNCPROXY_NUM(519) +STUBFUNCPROXY_NUM(520) STUBFUNCPROXY_NUM(521) +STUBFUNCPROXY_NUM(522) STUBFUNCPROXY_NUM(523) +STUBFUNCPROXY_NUM(524) STUBFUNCPROXY_NUM(525) +STUBFUNCPROXY_NUM(526) STUBFUNCPROXY_NUM(527) +STUBFUNCPROXY_NUM(528) STUBFUNCPROXY_NUM(529) +STUBFUNCPROXY_NUM(530) STUBFUNCPROXY_NUM(531) +STUBFUNCPROXY_NUM(532) STUBFUNCPROXY_NUM(533) +STUBFUNCPROXY_NUM(534) STUBFUNCPROXY_NUM(535) +STUBFUNCPROXY_NUM(536) STUBFUNCPROXY_NUM(537) +STUBFUNCPROXY_NUM(538) STUBFUNCPROXY_NUM(539) +STUBFUNCPROXY_NUM(540) STUBFUNCPROXY_NUM(541) +STUBFUNCPROXY_NUM(542) STUBFUNCPROXY_NUM(543) +STUBFUNCPROXY_NUM(544) STUBFUNCPROXY_NUM(545) +STUBFUNCPROXY_NUM(546) STUBFUNCPROXY_NUM(547) +STUBFUNCPROXY_NUM(548) STUBFUNCPROXY_NUM(549) +STUBFUNCPROXY_NUM(550) STUBFUNCPROXY_NUM(551) +STUBFUNCPROXY_NUM(552) STUBFUNCPROXY_NUM(553) +STUBFUNCPROXY_NUM(554) STUBFUNCPROXY_NUM(555) +STUBFUNCPROXY_NUM(556) STUBFUNCPROXY_NUM(557) +STUBFUNCPROXY_NUM(558) STUBFUNCPROXY_NUM(559) +STUBFUNCPROXY_NUM(560) STUBFUNCPROXY_NUM(561) +STUBFUNCPROXY_NUM(562) STUBFUNCPROXY_NUM(563) +STUBFUNCPROXY_NUM(564) STUBFUNCPROXY_NUM(565) +STUBFUNCPROXY_NUM(566) STUBFUNCPROXY_NUM(567) +STUBFUNCPROXY_NUM(568) STUBFUNCPROXY_NUM(569) +STUBFUNCPROXY_NUM(570) STUBFUNCPROXY_NUM(571) +STUBFUNCPROXY_NUM(572) STUBFUNCPROXY_NUM(573) +STUBFUNCPROXY_NUM(574) STUBFUNCPROXY_NUM(575) +STUBFUNCPROXY_NUM(576) STUBFUNCPROXY_NUM(577) +STUBFUNCPROXY_NUM(578) STUBFUNCPROXY_NUM(579) +STUBFUNCPROXY_NUM(580) STUBFUNCPROXY_NUM(581) +STUBFUNCPROXY_NUM(582) STUBFUNCPROXY_NUM(583) +STUBFUNCPROXY_NUM(584) STUBFUNCPROXY_NUM(585) +STUBFUNCPROXY_NUM(586) STUBFUNCPROXY_NUM(587) +STUBFUNCPROXY_NUM(588) STUBFUNCPROXY_NUM(589) +STUBFUNCPROXY_NUM(590) STUBFUNCPROXY_NUM(591) +STUBFUNCPROXY_NUM(592) STUBFUNCPROXY_NUM(593) +STUBFUNCPROXY_NUM(594) STUBFUNCPROXY_NUM(595) +STUBFUNCPROXY_NUM(596) STUBFUNCPROXY_NUM(597) +STUBFUNCPROXY_NUM(598) STUBFUNCPROXY_NUM(599) + +void (*gstubfunc[kSupportMaxInterfaceMethod])(void) = { + (void (*)(void))sfp_jlong_0, + (void (*)(void))sfp_jlong_1, + (void (*)(void))sfp_jlong_2, + (void (*)(void))sfp_jlong_3, + (void (*)(void))sfp_jlong_4, + (void (*)(void))sfp_jlong_5, + (void (*)(void))sfp_jlong_6, + (void (*)(void))sfp_jlong_7, + (void (*)(void))sfp_jlong_8, + (void (*)(void))sfp_jlong_9, + (void (*)(void))sfp_jlong_10, + (void (*)(void))sfp_jlong_11, + (void (*)(void))sfp_jlong_12, + (void (*)(void))sfp_jlong_13, + (void (*)(void))sfp_jlong_14, + (void (*)(void))sfp_jlong_15, + (void (*)(void))sfp_jlong_16, + (void (*)(void))sfp_jlong_17, + (void (*)(void))sfp_jlong_18, + (void (*)(void))sfp_jlong_19, + (void (*)(void))sfp_jlong_20, + (void (*)(void))sfp_jlong_21, + (void (*)(void))sfp_jlong_22, + (void (*)(void))sfp_jlong_23, + (void (*)(void))sfp_jlong_24, + (void (*)(void))sfp_jlong_25, + (void (*)(void))sfp_jlong_26, + (void (*)(void))sfp_jlong_27, + (void (*)(void))sfp_jlong_28, + (void (*)(void))sfp_jlong_29, + (void (*)(void))sfp_jlong_30, + (void (*)(void))sfp_jlong_31, + (void (*)(void))sfp_jlong_32, + (void (*)(void))sfp_jlong_33, + (void (*)(void))sfp_jlong_34, + (void (*)(void))sfp_jlong_35, + (void (*)(void))sfp_jlong_36, + (void (*)(void))sfp_jlong_37, + (void (*)(void))sfp_jlong_38, + (void (*)(void))sfp_jlong_39, + (void (*)(void))sfp_jlong_40, + (void (*)(void))sfp_jlong_41, + (void (*)(void))sfp_jlong_42, + (void (*)(void))sfp_jlong_43, + (void (*)(void))sfp_jlong_44, + (void (*)(void))sfp_jlong_45, + (void (*)(void))sfp_jlong_46, + (void (*)(void))sfp_jlong_47, + (void (*)(void))sfp_jlong_48, + (void (*)(void))sfp_jlong_49, + (void (*)(void))sfp_jlong_50, + (void (*)(void))sfp_jlong_51, + (void (*)(void))sfp_jlong_52, + (void (*)(void))sfp_jlong_53, + (void (*)(void))sfp_jlong_54, + (void (*)(void))sfp_jlong_55, + (void (*)(void))sfp_jlong_56, + (void (*)(void))sfp_jlong_57, + (void (*)(void))sfp_jlong_58, + (void (*)(void))sfp_jlong_59, + (void (*)(void))sfp_jlong_60, + (void (*)(void))sfp_jlong_61, + (void (*)(void))sfp_jlong_62, + (void (*)(void))sfp_jlong_63, + (void (*)(void))sfp_jlong_64, + (void (*)(void))sfp_jlong_65, + (void (*)(void))sfp_jlong_66, + (void (*)(void))sfp_jlong_67, + (void (*)(void))sfp_jlong_68, + (void (*)(void))sfp_jlong_69, + (void (*)(void))sfp_jlong_70, + (void (*)(void))sfp_jlong_71, + (void (*)(void))sfp_jlong_72, + (void (*)(void))sfp_jlong_73, + (void (*)(void))sfp_jlong_74, + (void (*)(void))sfp_jlong_75, + (void (*)(void))sfp_jlong_76, + (void (*)(void))sfp_jlong_77, + (void (*)(void))sfp_jlong_78, + (void (*)(void))sfp_jlong_79, + (void (*)(void))sfp_jlong_80, + (void (*)(void))sfp_jlong_81, + (void (*)(void))sfp_jlong_82, + (void (*)(void))sfp_jlong_83, + (void (*)(void))sfp_jlong_84, + (void (*)(void))sfp_jlong_85, + (void (*)(void))sfp_jlong_86, + (void (*)(void))sfp_jlong_87, + (void (*)(void))sfp_jlong_88, + (void (*)(void))sfp_jlong_89, + (void (*)(void))sfp_jlong_90, + (void (*)(void))sfp_jlong_91, + (void (*)(void))sfp_jlong_92, + (void (*)(void))sfp_jlong_93, + (void (*)(void))sfp_jlong_94, + (void (*)(void))sfp_jlong_95, + (void (*)(void))sfp_jlong_96, + (void (*)(void))sfp_jlong_97, + (void (*)(void))sfp_jlong_98, + (void (*)(void))sfp_jlong_99, + (void (*)(void))sfp_jlong_100, + (void (*)(void))sfp_jlong_101, + (void (*)(void))sfp_jlong_102, + (void (*)(void))sfp_jlong_103, + (void (*)(void))sfp_jlong_104, + (void (*)(void))sfp_jlong_105, + (void (*)(void))sfp_jlong_106, + (void (*)(void))sfp_jlong_107, + (void (*)(void))sfp_jlong_108, + (void (*)(void))sfp_jlong_109, + (void (*)(void))sfp_jlong_110, + (void (*)(void))sfp_jlong_111, + (void (*)(void))sfp_jlong_112, + (void (*)(void))sfp_jlong_113, + (void (*)(void))sfp_jlong_114, + (void (*)(void))sfp_jlong_115, + (void (*)(void))sfp_jlong_116, + (void (*)(void))sfp_jlong_117, + (void (*)(void))sfp_jlong_118, + (void (*)(void))sfp_jlong_119, + (void (*)(void))sfp_jlong_120, + (void (*)(void))sfp_jlong_121, + (void (*)(void))sfp_jlong_122, + (void (*)(void))sfp_jlong_123, + (void (*)(void))sfp_jlong_124, + (void (*)(void))sfp_jlong_125, + (void (*)(void))sfp_jlong_126, + (void (*)(void))sfp_jlong_127, + (void (*)(void))sfp_jlong_128, + (void (*)(void))sfp_jlong_129, + (void (*)(void))sfp_jlong_130, + (void (*)(void))sfp_jlong_131, + (void (*)(void))sfp_jlong_132, + (void (*)(void))sfp_jlong_133, + (void (*)(void))sfp_jlong_134, + (void (*)(void))sfp_jlong_135, + (void (*)(void))sfp_jlong_136, + (void (*)(void))sfp_jlong_137, + (void (*)(void))sfp_jlong_138, + (void (*)(void))sfp_jlong_139, + (void (*)(void))sfp_jlong_140, + (void (*)(void))sfp_jlong_141, + (void (*)(void))sfp_jlong_142, + (void (*)(void))sfp_jlong_143, + (void (*)(void))sfp_jlong_144, + (void (*)(void))sfp_jlong_145, + (void (*)(void))sfp_jlong_146, + (void (*)(void))sfp_jlong_147, + (void (*)(void))sfp_jlong_148, + (void (*)(void))sfp_jlong_149, + (void (*)(void))sfp_jlong_150, + (void (*)(void))sfp_jlong_151, + (void (*)(void))sfp_jlong_152, + (void (*)(void))sfp_jlong_153, + (void (*)(void))sfp_jlong_154, + (void (*)(void))sfp_jlong_155, + (void (*)(void))sfp_jlong_156, + (void (*)(void))sfp_jlong_157, + (void (*)(void))sfp_jlong_158, + (void (*)(void))sfp_jlong_159, + (void (*)(void))sfp_jlong_160, + (void (*)(void))sfp_jlong_161, + (void (*)(void))sfp_jlong_162, + (void (*)(void))sfp_jlong_163, + (void (*)(void))sfp_jlong_164, + (void (*)(void))sfp_jlong_165, + (void (*)(void))sfp_jlong_166, + (void (*)(void))sfp_jlong_167, + (void (*)(void))sfp_jlong_168, + (void (*)(void))sfp_jlong_169, + (void (*)(void))sfp_jlong_170, + (void (*)(void))sfp_jlong_171, + (void (*)(void))sfp_jlong_172, + (void (*)(void))sfp_jlong_173, + (void (*)(void))sfp_jlong_174, + (void (*)(void))sfp_jlong_175, + (void (*)(void))sfp_jlong_176, + (void (*)(void))sfp_jlong_177, + (void (*)(void))sfp_jlong_178, + (void (*)(void))sfp_jlong_179, + (void (*)(void))sfp_jlong_180, + (void (*)(void))sfp_jlong_181, + (void (*)(void))sfp_jlong_182, + (void (*)(void))sfp_jlong_183, + (void (*)(void))sfp_jlong_184, + (void (*)(void))sfp_jlong_185, + (void (*)(void))sfp_jlong_186, + (void (*)(void))sfp_jlong_187, + (void (*)(void))sfp_jlong_188, + (void (*)(void))sfp_jlong_189, + (void (*)(void))sfp_jlong_190, + (void (*)(void))sfp_jlong_191, + (void (*)(void))sfp_jlong_192, + (void (*)(void))sfp_jlong_193, + (void (*)(void))sfp_jlong_194, + (void (*)(void))sfp_jlong_195, + (void (*)(void))sfp_jlong_196, + (void (*)(void))sfp_jlong_197, + (void (*)(void))sfp_jlong_198, + (void (*)(void))sfp_jlong_199, + (void (*)(void))sfp_jlong_200, + (void (*)(void))sfp_jlong_201, + (void (*)(void))sfp_jlong_202, + (void (*)(void))sfp_jlong_203, + (void (*)(void))sfp_jlong_204, + (void (*)(void))sfp_jlong_205, + (void (*)(void))sfp_jlong_206, + (void (*)(void))sfp_jlong_207, + (void (*)(void))sfp_jlong_208, + (void (*)(void))sfp_jlong_209, + (void (*)(void))sfp_jlong_210, + (void (*)(void))sfp_jlong_211, + (void (*)(void))sfp_jlong_212, + (void (*)(void))sfp_jlong_213, + (void (*)(void))sfp_jlong_214, + (void (*)(void))sfp_jlong_215, + (void (*)(void))sfp_jlong_216, + (void (*)(void))sfp_jlong_217, + (void (*)(void))sfp_jlong_218, + (void (*)(void))sfp_jlong_219, + (void (*)(void))sfp_jlong_220, + (void (*)(void))sfp_jlong_221, + (void (*)(void))sfp_jlong_222, + (void (*)(void))sfp_jlong_223, + (void (*)(void))sfp_jlong_224, + (void (*)(void))sfp_jlong_225, + (void (*)(void))sfp_jlong_226, + (void (*)(void))sfp_jlong_227, + (void (*)(void))sfp_jlong_228, + (void (*)(void))sfp_jlong_229, + (void (*)(void))sfp_jlong_230, + (void (*)(void))sfp_jlong_231, + (void (*)(void))sfp_jlong_232, + (void (*)(void))sfp_jlong_233, + (void (*)(void))sfp_jlong_234, + (void (*)(void))sfp_jlong_235, + (void (*)(void))sfp_jlong_236, + (void (*)(void))sfp_jlong_237, + (void (*)(void))sfp_jlong_238, + (void (*)(void))sfp_jlong_239, + (void (*)(void))sfp_jlong_240, + (void (*)(void))sfp_jlong_241, + (void (*)(void))sfp_jlong_242, + (void (*)(void))sfp_jlong_243, + (void (*)(void))sfp_jlong_244, + (void (*)(void))sfp_jlong_245, + (void (*)(void))sfp_jlong_246, + (void (*)(void))sfp_jlong_247, + (void (*)(void))sfp_jlong_248, + (void (*)(void))sfp_jlong_249, + (void (*)(void))sfp_jlong_250, + (void (*)(void))sfp_jlong_251, + (void (*)(void))sfp_jlong_252, + (void (*)(void))sfp_jlong_253, + (void (*)(void))sfp_jlong_254, + (void (*)(void))sfp_jlong_255, + (void (*)(void))sfp_jlong_256, + (void (*)(void))sfp_jlong_257, + (void (*)(void))sfp_jlong_258, + (void (*)(void))sfp_jlong_259, + (void (*)(void))sfp_jlong_260, + (void (*)(void))sfp_jlong_261, + (void (*)(void))sfp_jlong_262, + (void (*)(void))sfp_jlong_263, + (void (*)(void))sfp_jlong_264, + (void (*)(void))sfp_jlong_265, + (void (*)(void))sfp_jlong_266, + (void (*)(void))sfp_jlong_267, + (void (*)(void))sfp_jlong_268, + (void (*)(void))sfp_jlong_269, + (void (*)(void))sfp_jlong_270, + (void (*)(void))sfp_jlong_271, + (void (*)(void))sfp_jlong_272, + (void (*)(void))sfp_jlong_273, + (void (*)(void))sfp_jlong_274, + (void (*)(void))sfp_jlong_275, + (void (*)(void))sfp_jlong_276, + (void (*)(void))sfp_jlong_277, + (void (*)(void))sfp_jlong_278, + (void (*)(void))sfp_jlong_279, + (void (*)(void))sfp_jlong_280, + (void (*)(void))sfp_jlong_281, + (void (*)(void))sfp_jlong_282, + (void (*)(void))sfp_jlong_283, + (void (*)(void))sfp_jlong_284, + (void (*)(void))sfp_jlong_285, + (void (*)(void))sfp_jlong_286, + (void (*)(void))sfp_jlong_287, + (void (*)(void))sfp_jlong_288, + (void (*)(void))sfp_jlong_289, + (void (*)(void))sfp_jlong_290, + (void (*)(void))sfp_jlong_291, + (void (*)(void))sfp_jlong_292, + (void (*)(void))sfp_jlong_293, + (void (*)(void))sfp_jlong_294, + (void (*)(void))sfp_jlong_295, + (void (*)(void))sfp_jlong_296, + (void (*)(void))sfp_jlong_297, + (void (*)(void))sfp_jlong_298, + (void (*)(void))sfp_jlong_299, + (void (*)(void))sfp_jlong_300, + (void (*)(void))sfp_jlong_301, + (void (*)(void))sfp_jlong_302, + (void (*)(void))sfp_jlong_303, + (void (*)(void))sfp_jlong_304, + (void (*)(void))sfp_jlong_305, + (void (*)(void))sfp_jlong_306, + (void (*)(void))sfp_jlong_307, + (void (*)(void))sfp_jlong_308, + (void (*)(void))sfp_jlong_309, + (void (*)(void))sfp_jlong_310, + (void (*)(void))sfp_jlong_311, + (void (*)(void))sfp_jlong_312, + (void (*)(void))sfp_jlong_313, + (void (*)(void))sfp_jlong_314, + (void (*)(void))sfp_jlong_315, + (void (*)(void))sfp_jlong_316, + (void (*)(void))sfp_jlong_317, + (void (*)(void))sfp_jlong_318, + (void (*)(void))sfp_jlong_319, + (void (*)(void))sfp_jlong_320, + (void (*)(void))sfp_jlong_321, + (void (*)(void))sfp_jlong_322, + (void (*)(void))sfp_jlong_323, + (void (*)(void))sfp_jlong_324, + (void (*)(void))sfp_jlong_325, + (void (*)(void))sfp_jlong_326, + (void (*)(void))sfp_jlong_327, + (void (*)(void))sfp_jlong_328, + (void (*)(void))sfp_jlong_329, + (void (*)(void))sfp_jlong_330, + (void (*)(void))sfp_jlong_331, + (void (*)(void))sfp_jlong_332, + (void (*)(void))sfp_jlong_333, + (void (*)(void))sfp_jlong_334, + (void (*)(void))sfp_jlong_335, + (void (*)(void))sfp_jlong_336, + (void (*)(void))sfp_jlong_337, + (void (*)(void))sfp_jlong_338, + (void (*)(void))sfp_jlong_339, + (void (*)(void))sfp_jlong_340, + (void (*)(void))sfp_jlong_341, + (void (*)(void))sfp_jlong_342, + (void (*)(void))sfp_jlong_343, + (void (*)(void))sfp_jlong_344, + (void (*)(void))sfp_jlong_345, + (void (*)(void))sfp_jlong_346, + (void (*)(void))sfp_jlong_347, + (void (*)(void))sfp_jlong_348, + (void (*)(void))sfp_jlong_349, + (void (*)(void))sfp_jlong_350, + (void (*)(void))sfp_jlong_351, + (void (*)(void))sfp_jlong_352, + (void (*)(void))sfp_jlong_353, + (void (*)(void))sfp_jlong_354, + (void (*)(void))sfp_jlong_355, + (void (*)(void))sfp_jlong_356, + (void (*)(void))sfp_jlong_357, + (void (*)(void))sfp_jlong_358, + (void (*)(void))sfp_jlong_359, + (void (*)(void))sfp_jlong_360, + (void (*)(void))sfp_jlong_361, + (void (*)(void))sfp_jlong_362, + (void (*)(void))sfp_jlong_363, + (void (*)(void))sfp_jlong_364, + (void (*)(void))sfp_jlong_365, + (void (*)(void))sfp_jlong_366, + (void (*)(void))sfp_jlong_367, + (void (*)(void))sfp_jlong_368, + (void (*)(void))sfp_jlong_369, + (void (*)(void))sfp_jlong_370, + (void (*)(void))sfp_jlong_371, + (void (*)(void))sfp_jlong_372, + (void (*)(void))sfp_jlong_373, + (void (*)(void))sfp_jlong_374, + (void (*)(void))sfp_jlong_375, + (void (*)(void))sfp_jlong_376, + (void (*)(void))sfp_jlong_377, + (void (*)(void))sfp_jlong_378, + (void (*)(void))sfp_jlong_379, + (void (*)(void))sfp_jlong_380, + (void (*)(void))sfp_jlong_381, + (void (*)(void))sfp_jlong_382, + (void (*)(void))sfp_jlong_383, + (void (*)(void))sfp_jlong_384, + (void (*)(void))sfp_jlong_385, + (void (*)(void))sfp_jlong_386, + (void (*)(void))sfp_jlong_387, + (void (*)(void))sfp_jlong_388, + (void (*)(void))sfp_jlong_389, + (void (*)(void))sfp_jlong_390, + (void (*)(void))sfp_jlong_391, + (void (*)(void))sfp_jlong_392, + (void (*)(void))sfp_jlong_393, + (void (*)(void))sfp_jlong_394, + (void (*)(void))sfp_jlong_395, + (void (*)(void))sfp_jlong_396, + (void (*)(void))sfp_jlong_397, + (void (*)(void))sfp_jlong_398, + (void (*)(void))sfp_jlong_399, + (void (*)(void))sfp_jlong_400, + (void (*)(void))sfp_jlong_401, + (void (*)(void))sfp_jlong_402, + (void (*)(void))sfp_jlong_403, + (void (*)(void))sfp_jlong_404, + (void (*)(void))sfp_jlong_405, + (void (*)(void))sfp_jlong_406, + (void (*)(void))sfp_jlong_407, + (void (*)(void))sfp_jlong_408, + (void (*)(void))sfp_jlong_409, + (void (*)(void))sfp_jlong_410, + (void (*)(void))sfp_jlong_411, + (void (*)(void))sfp_jlong_412, + (void (*)(void))sfp_jlong_413, + (void (*)(void))sfp_jlong_414, + (void (*)(void))sfp_jlong_415, + (void (*)(void))sfp_jlong_416, + (void (*)(void))sfp_jlong_417, + (void (*)(void))sfp_jlong_418, + (void (*)(void))sfp_jlong_419, + (void (*)(void))sfp_jlong_420, + (void (*)(void))sfp_jlong_421, + (void (*)(void))sfp_jlong_422, + (void (*)(void))sfp_jlong_423, + (void (*)(void))sfp_jlong_424, + (void (*)(void))sfp_jlong_425, + (void (*)(void))sfp_jlong_426, + (void (*)(void))sfp_jlong_427, + (void (*)(void))sfp_jlong_428, + (void (*)(void))sfp_jlong_429, + (void (*)(void))sfp_jlong_430, + (void (*)(void))sfp_jlong_431, + (void (*)(void))sfp_jlong_432, + (void (*)(void))sfp_jlong_433, + (void (*)(void))sfp_jlong_434, + (void (*)(void))sfp_jlong_435, + (void (*)(void))sfp_jlong_436, + (void (*)(void))sfp_jlong_437, + (void (*)(void))sfp_jlong_438, + (void (*)(void))sfp_jlong_439, + (void (*)(void))sfp_jlong_440, + (void (*)(void))sfp_jlong_441, + (void (*)(void))sfp_jlong_442, + (void (*)(void))sfp_jlong_443, + (void (*)(void))sfp_jlong_444, + (void (*)(void))sfp_jlong_445, + (void (*)(void))sfp_jlong_446, + (void (*)(void))sfp_jlong_447, + (void (*)(void))sfp_jlong_448, + (void (*)(void))sfp_jlong_449, + (void (*)(void))sfp_jlong_450, + (void (*)(void))sfp_jlong_451, + (void (*)(void))sfp_jlong_452, + (void (*)(void))sfp_jlong_453, + (void (*)(void))sfp_jlong_454, + (void (*)(void))sfp_jlong_455, + (void (*)(void))sfp_jlong_456, + (void (*)(void))sfp_jlong_457, + (void (*)(void))sfp_jlong_458, + (void (*)(void))sfp_jlong_459, + (void (*)(void))sfp_jlong_460, + (void (*)(void))sfp_jlong_461, + (void (*)(void))sfp_jlong_462, + (void (*)(void))sfp_jlong_463, + (void (*)(void))sfp_jlong_464, + (void (*)(void))sfp_jlong_465, + (void (*)(void))sfp_jlong_466, + (void (*)(void))sfp_jlong_467, + (void (*)(void))sfp_jlong_468, + (void (*)(void))sfp_jlong_469, + (void (*)(void))sfp_jlong_470, + (void (*)(void))sfp_jlong_471, + (void (*)(void))sfp_jlong_472, + (void (*)(void))sfp_jlong_473, + (void (*)(void))sfp_jlong_474, + (void (*)(void))sfp_jlong_475, + (void (*)(void))sfp_jlong_476, + (void (*)(void))sfp_jlong_477, + (void (*)(void))sfp_jlong_478, + (void (*)(void))sfp_jlong_479, + (void (*)(void))sfp_jlong_480, + (void (*)(void))sfp_jlong_481, + (void (*)(void))sfp_jlong_482, + (void (*)(void))sfp_jlong_483, + (void (*)(void))sfp_jlong_484, + (void (*)(void))sfp_jlong_485, + (void (*)(void))sfp_jlong_486, + (void (*)(void))sfp_jlong_487, + (void (*)(void))sfp_jlong_488, + (void (*)(void))sfp_jlong_489, + (void (*)(void))sfp_jlong_490, + (void (*)(void))sfp_jlong_491, + (void (*)(void))sfp_jlong_492, + (void (*)(void))sfp_jlong_493, + (void (*)(void))sfp_jlong_494, + (void (*)(void))sfp_jlong_495, + (void (*)(void))sfp_jlong_496, + (void (*)(void))sfp_jlong_497, + (void (*)(void))sfp_jlong_498, + (void (*)(void))sfp_jlong_499, + (void (*)(void))sfp_jlong_500, + (void (*)(void))sfp_jlong_501, + (void (*)(void))sfp_jlong_502, + (void (*)(void))sfp_jlong_503, + (void (*)(void))sfp_jlong_504, + (void (*)(void))sfp_jlong_505, + (void (*)(void))sfp_jlong_506, + (void (*)(void))sfp_jlong_507, + (void (*)(void))sfp_jlong_508, + (void (*)(void))sfp_jlong_509, + (void (*)(void))sfp_jlong_510, + (void (*)(void))sfp_jlong_511, + (void (*)(void))sfp_jlong_512, + (void (*)(void))sfp_jlong_513, + (void (*)(void))sfp_jlong_514, + (void (*)(void))sfp_jlong_515, + (void (*)(void))sfp_jlong_516, + (void (*)(void))sfp_jlong_517, + (void (*)(void))sfp_jlong_518, + (void (*)(void))sfp_jlong_519, + (void (*)(void))sfp_jlong_520, + (void (*)(void))sfp_jlong_521, + (void (*)(void))sfp_jlong_522, + (void (*)(void))sfp_jlong_523, + (void (*)(void))sfp_jlong_524, + (void (*)(void))sfp_jlong_525, + (void (*)(void))sfp_jlong_526, + (void (*)(void))sfp_jlong_527, + (void (*)(void))sfp_jlong_528, + (void (*)(void))sfp_jlong_529, + (void (*)(void))sfp_jlong_530, + (void (*)(void))sfp_jlong_531, + (void (*)(void))sfp_jlong_532, + (void (*)(void))sfp_jlong_533, + (void (*)(void))sfp_jlong_534, + (void (*)(void))sfp_jlong_535, + (void (*)(void))sfp_jlong_536, + (void (*)(void))sfp_jlong_537, + (void (*)(void))sfp_jlong_538, + (void (*)(void))sfp_jlong_539, + (void (*)(void))sfp_jlong_540, + (void (*)(void))sfp_jlong_541, + (void (*)(void))sfp_jlong_542, + (void (*)(void))sfp_jlong_543, + (void (*)(void))sfp_jlong_544, + (void (*)(void))sfp_jlong_545, + (void (*)(void))sfp_jlong_546, + (void (*)(void))sfp_jlong_547, + (void (*)(void))sfp_jlong_548, + (void (*)(void))sfp_jlong_549, + (void (*)(void))sfp_jlong_550, + (void (*)(void))sfp_jlong_551, + (void (*)(void))sfp_jlong_552, + (void (*)(void))sfp_jlong_553, + (void (*)(void))sfp_jlong_554, + (void (*)(void))sfp_jlong_555, + (void (*)(void))sfp_jlong_556, + (void (*)(void))sfp_jlong_557, + (void (*)(void))sfp_jlong_558, + (void (*)(void))sfp_jlong_559, + (void (*)(void))sfp_jlong_560, + (void (*)(void))sfp_jlong_561, + (void (*)(void))sfp_jlong_562, + (void (*)(void))sfp_jlong_563, + (void (*)(void))sfp_jlong_564, + (void (*)(void))sfp_jlong_565, + (void (*)(void))sfp_jlong_566, + (void (*)(void))sfp_jlong_567, + (void (*)(void))sfp_jlong_568, + (void (*)(void))sfp_jlong_569, + (void (*)(void))sfp_jlong_570, + (void (*)(void))sfp_jlong_571, + (void (*)(void))sfp_jlong_572, + (void (*)(void))sfp_jlong_573, + (void (*)(void))sfp_jlong_574, + (void (*)(void))sfp_jlong_575, + (void (*)(void))sfp_jlong_576, + (void (*)(void))sfp_jlong_577, + (void (*)(void))sfp_jlong_578, + (void (*)(void))sfp_jlong_579, + (void (*)(void))sfp_jlong_580, + (void (*)(void))sfp_jlong_581, + (void (*)(void))sfp_jlong_582, + (void (*)(void))sfp_jlong_583, + (void (*)(void))sfp_jlong_584, + (void (*)(void))sfp_jlong_585, + (void (*)(void))sfp_jlong_586, + (void (*)(void))sfp_jlong_587, + (void (*)(void))sfp_jlong_588, + (void (*)(void))sfp_jlong_589, + (void (*)(void))sfp_jlong_590, + (void (*)(void))sfp_jlong_591, + (void (*)(void))sfp_jlong_592, + (void (*)(void))sfp_jlong_593, + (void (*)(void))sfp_jlong_594, + (void (*)(void))sfp_jlong_595, + (void (*)(void))sfp_jlong_596, + (void (*)(void))sfp_jlong_597, + (void (*)(void))sfp_jlong_598, + (void (*)(void))sfp_jlong_599 +}; +} // namespace maplert diff --git a/src/mrt/maplert/src/mrt_string.cpp b/src/mrt/maplert/src/mrt_string.cpp new file mode 100644 index 0000000000..db6fe9ae40 --- /dev/null +++ b/src/mrt/maplert/src/mrt_string.cpp @@ -0,0 +1,1646 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "mrt_string.h" +#include +#include "jni.h" +#include "allocator/page_allocator.h" +#include "mstring_inline.h" +#include "base/low_mem_set.h" +#include "libs.h" +#include "exception/mrt_exception.h" +#include "literalstrname.h" +using namespace std; + +namespace maplert { +enum ZeroCodeT { + kZeroNotcode = 0, + kZeroDocode +}; + +int GetMUtf8ByteCount(const uint16_t *utf16Raw, size_t charCount, ZeroCodeT zCode, int charSet); +void MUtf8Encode(uint8_t *utf8Res, const uint16_t *utf16Raw, size_t charCount, ZeroCodeT zCode, int charSet); +void MUtf8Decode(const uint8_t *utf8, size_t utf8Len, uint16_t *utf16Res); +uint32_t CountInMutfLen(const char *utf8, size_t utf8Len); +extern "C" char *MRT_GetStringContentsPtrRaw(jstring jstr) { + MString *stringObj = MString::JniCastNonNull(jstr); + return reinterpret_cast(stringObj->GetContentsPtr()); +} + +extern "C" jchar *MRT_GetStringContentsPtrCopy(jstring jstr, jboolean *isCopy) { + MString *stringObj = MString::JniCastNonNull(jstr); + if (stringObj->IsCompress()) { + uint32_t len = stringObj->GetLength(); + jchar *chars = reinterpret_cast(malloc(len * sizeof(jchar) + 1)); + if (UNLIKELY(chars == nullptr)) { + return nullptr; + } + const uint8_t *src = stringObj->GetContentsPtr(); + jchar *pos = chars; + jchar *end = pos + len; + while (pos < end) { + *pos++ = *src++; + } + if (isCopy != nullptr) { + *isCopy = JNI_TRUE; + } + return chars; + } else { + if (isCopy != nullptr) { + *isCopy = JNI_FALSE; + } + return reinterpret_cast(stringObj->GetContentsPtr()); + } +} + +extern "C" jstring MRT_NewHeapJStr(const jchar *ca, jint len, bool isJNI) { + MString *res = MString::NewStringObject(ca, static_cast(len), isJNI); + if (res == nullptr) { + return nullptr; + } + return res->AsJstring(); +} + +extern "C" jstring MCC_CStrToJStr(const char *ca, jint len) { + MString *res = MString::NewStringObject(reinterpret_cast(ca), static_cast(len)); + return reinterpret_cast(res); +} + +extern "C" jstring CStrToJStr(const char *ca, jint len) +__attribute__((alias("MCC_CStrToJStr"))); + +MString *StringNewStringFromString(const MString &stringObj) { + uint32_t length = stringObj.GetLength(); + uint8_t *contents = stringObj.GetContentsPtr(); + MString *res = nullptr; + if (stringObj.IsCompress()) { + res = MString::NewStringObject(contents, length); + } else { + res = MString::NewStringObject(reinterpret_cast(contents), length); + } + return res; +} + +MString *StringNewStringFromSubstring(const MString &stringObj, int32_t start, uint32_t length) { + MString *res = nullptr; + const uint8_t *src = stringObj.GetContentsPtr(); + if (stringObj.IsCompress()) { + res = MString::NewStringObject(src + start, length); + } else { + const uint16_t *srcStart = reinterpret_cast(src) + start; + bool isCompress = false; + if (stringObj.GetCountValue() < static_cast(start)) { + isCompress = false; + } else { + isCompress = MString::IsCompressChars(srcStart, static_cast(length)); + } + res = isCompress ? MString::NewStringObject(srcStart, length) : + MString::NewStringObject(srcStart, length); + } + return res; +} + +MString *StringNewStringFromCharArray(int32_t offset, uint32_t charCount, const MArray &arrayObj) { + uint16_t *arrayData = reinterpret_cast(arrayObj.ConvertToCArray()); + uint16_t *content = arrayData + offset; + const bool compressible = MString::IsCompressChars(content, charCount); + MString *res = compressible ? MString::NewStringObject(content, charCount) : + MString::NewStringObject(content, charCount); + return res; +} + +MString *StringNewStringFromByteArray(const MArray &arrayObj, int32_t highByteT, int32_t offset, uint32_t byteLength) { + uint8_t *arrayData = reinterpret_cast(arrayObj.ConvertToCArray()); + uint8_t *content = arrayData + offset; + const bool compressible = (highByteT == 0) && MString::IsCompressChars(content, byteLength); + MString *res = nullptr; + if (compressible) { + res = MString::NewStringObject(content, byteLength); + } else { + res = MString::NewStringObject(byteLength, [&](MString &stringObj) { + uint32_t highByte = (static_cast(highByteT)) << 8; // shift one byte to obtain highbyte + uint16_t *resDst = reinterpret_cast(stringObj.GetContentsPtr()); + for (uint32_t i = 0; i < byteLength; ++i) { + resDst[i] = highByte | content[i]; + } + }); + } + return res; +} + +template +static inline void MStringDoReplace(const MString &srcStrObj, const MString &dstStrObj, uint16_t oldChar, + uint16_t newChar, uint32_t len) { + srcType *src = reinterpret_cast(srcStrObj.GetContentsPtr()); + dstType *dst = reinterpret_cast(dstStrObj.GetContentsPtr()); + for (uint32_t i = 0; i < len; ++i) { + srcType c = src[i]; + dst[i] = (c == oldChar) ? newChar : c; + } +} + +MString *JStringDoReplace(const MString &stringObj, uint16_t oldChar, uint16_t newChar) { + bool srcIsCompressible = stringObj.IsCompress(); + uint32_t srcLength = stringObj.GetLength(); + uint8_t *srcValue = stringObj.GetContentsPtr(); + bool compressible = MString::GetCompressFlag() && MString::IsCompressChar(newChar) && (srcIsCompressible || + (!MString::IsCompressChar(oldChar) && + MString::IsCompressCharsExcept(reinterpret_cast(srcValue), srcLength, oldChar))); + MString *res = nullptr; + if (compressible) { + res = MString::NewStringObject(srcLength, [&](MString &newStringObj) { + if (srcIsCompressible) { + MStringDoReplace(stringObj, newStringObj, oldChar, newChar, srcLength); + } else { + MStringDoReplace(stringObj, newStringObj, oldChar, newChar, srcLength); + } + }); + } else { + res = MString::NewStringObject(srcLength, [&](MString &newStringObj) { + if (srcIsCompressible) { + MStringDoReplace(stringObj, newStringObj, oldChar, newChar, srcLength); + } else { + MStringDoReplace(stringObj, newStringObj, oldChar, newChar, srcLength); + } + }); + } + return res; +} + +static void GetCompressedNewValue(const bool stringCompress, const MString &stringObj, uint32_t stringLen, + uint32_t string1Len, uint16_t *newValue) { + if (stringCompress) { + uint8_t *value = stringObj.GetContentsPtr(); + for (uint32_t i = 0; i < stringLen; ++i) { + newValue[i + string1Len] = value[i]; + } + } else { + const int doubleSize = 2; + uint16_t *value = reinterpret_cast(stringObj.GetContentsPtr()); + uint32_t cpyLen = 0; + if (stringLen <= UINT32_MAX / doubleSize) { + cpyLen = stringLen * doubleSize; + } else { + LOG(FATAL) << "stringLen * doubleSize > UINT32_MAX" << maple::endl; + } + if (memcpy_s(newValue + string1Len, cpyLen, value, cpyLen) != EOK) { + LOG(ERROR) << "memcpy_s() not return 0 in GetCompressedNewValue()" << maple::endl; + } + } +} + +// This Function is to concat two String, the String maybe compress or uncompress, when it's compress, +// the String is saved as uint8_t and when it's uncompress, it's saved double the storage as uint16_t +MString *StringConcat(MString &stringObj1, MString &stringObj2) { + uint32_t string1Count = stringObj1.GetCountValue(); + uint32_t string2Count = stringObj2.GetCountValue(); + uint32_t string1Len = string1Count >> 1; + uint32_t string2Len = string2Count >> 1; + if (string1Len == 0) { + RC_LOCAL_INC_REF(&stringObj2); + return &stringObj2; + } + if (string2Len == 0) { + RC_LOCAL_INC_REF(&stringObj1); + return &stringObj1; + } + + const bool string1Compress = MString::IsCompressedFromCount(string1Count); + const bool string2Compress = MString::IsCompressedFromCount(string2Count); + const bool compressible = MString::GetCompressFlag() && string1Compress && string2Compress; + uint32_t len = string1Len + string2Len; + MString *res = nullptr; + if (compressible) { + res = MString::NewStringObject(len, [&](MString &newStringObj) { + uint8_t *newValue = newStringObj.GetContentsPtr(); + uint8_t *value1 = stringObj1.GetContentsPtr(); + uint8_t *value2 = stringObj2.GetContentsPtr(); + errno_t returnValueOfMemcpyS1 = memcpy_s(newValue, string1Len, value1, string1Len); + errno_t returnValueOfMemcpyS2 = memcpy_s(newValue + string1Len, string2Len, value2, string2Len); + if (returnValueOfMemcpyS1 != EOK || returnValueOfMemcpyS2 != EOK) { + LOG(ERROR) << "memcpy_s() not return 0 in StringConcat()" << maple::endl; + } + }); + } else { + res = MString::NewStringObject(len, [&](MString &newStringObj) { + uint16_t *newValue = reinterpret_cast(newStringObj.GetContentsPtr()); + GetCompressedNewValue(string1Compress, stringObj1, string1Len, 0, newValue); + GetCompressedNewValue(string2Compress, stringObj2, string2Len, string1Len, newValue); + }); + } + return res; +} + +template +static inline int32_t FastIndexOf(const MemoryType &chars, int32_t ch, int32_t start, int32_t length) { + const MemoryType *p = &chars + start; + const MemoryType *end = &chars + length; + while (p < end) { + if (*p++ == ch) { + return static_cast((p - 1) - &chars); + } + } + return -1; +} + +int32_t StringFastIndexOf(const MString &stringObj, int32_t ch, int32_t start) { + uint32_t count = stringObj.GetCountValue(); + int32_t len = static_cast(count >> 1); + if (start < 0) { + start = 0; + } else if (start > len) { + start = len; + } + + if (MString::IsCompressedFromCount(count)) { + uint8_t *value = stringObj.GetContentsPtr(); + return FastIndexOf(*value, ch, start, len); + } else { + uint16_t *value = reinterpret_cast(stringObj.GetContentsPtr()); + return FastIndexOf(*value, ch, start, len); + } +} + +// Create and return Java string from c string (in UTF-8) +MString *NewStringUTF(const char *kCStr, size_t cStrLen) { + MString *res = MString::NewStringObject( + reinterpret_cast(kCStr), static_cast(cStrLen)); + return res; +} + +// Create and return Java string from UTF-16/UTF-8 +MString *NewStringFromUTF16(const std::string &str) { + return MString::NewStringObjectFromUtf16(str); +} + +extern "C" void MRT_ReleaseStringUTFChars(jstring jstr __attribute__((unused)), const char *chars) { + free(const_cast(chars)); + return; +} + +extern "C" void MRT_ReleaseStringChars(jstring jstr, const jchar *chars) { + MString *stringObj = MString::JniCastNonNull(jstr); + jchar *contents = reinterpret_cast(stringObj->GetContentsPtr()); + if (stringObj->IsCompress() || chars != contents) { + free(const_cast(chars)); + } +} + +extern "C" jint MRT_StringGetStringLength(jstring jstr) { + MString *stringObj = MString::JniCastNonNull(jstr); + return stringObj->GetLength(); +} + +size_t MRT_GetStringObjectSize(jstring jstr) { + MString *stringObj = MString::JniCast(jstr); + return stringObj == nullptr ? MString::GetStringBaseSize() : stringObj->GetStringSize(); +} + +MArray *MStringToBytes(const MString &stringObj, int32_t offset, int32_t length, uint16_t maxValidChar, int compress) { + char *src = reinterpret_cast(stringObj.GetContentsPtr()); + uint32_t srcContentSize = stringObj.GetLength(); + if (UNLIKELY(offset < 0 || length < 0 || static_cast(offset + length) > srcContentSize)) { + LOG(FATAL) << "the index to access stringObj is illegal" << maple::endl; + return nullptr; + } + MArray *dst = MArray::NewPrimitiveArray(static_cast(length), + *WellKnown::GetPrimitiveArrayClass(maple::Primitive::kByte)); + int8_t *buf = reinterpret_cast(dst->ConvertToCArray()); + int8_t *end = buf + length; + uint16_t tmp = 0; + if (compress != 0) { + src = src + offset; + while (buf < end) { + tmp = *src++; + tmp = (tmp > maxValidChar) ? '?' : tmp; + *buf++ = static_cast(tmp); + } + } else { + src = src + static_cast(offset) * 2; // not compress, offset should be twice + while (buf < end) { + tmp = *(reinterpret_cast(src)); + src += 2; // uint16_t is u16, when read char*, char ptr need to move 2 at a time + tmp = (tmp > maxValidChar) ? '?' : tmp; + *buf++ = static_cast(tmp); + } + } + return dst; +} + +// MUTF-8 format string start +MArray *MStringToMutf8(const MString &stringObj, int32_t offset, int32_t length, int compress) { + char *src = reinterpret_cast(stringObj.GetContentsPtr()); + MArray *dst = nullptr; + uint32_t srcContentSize = stringObj.GetLength(); + if (UNLIKELY(offset < 0 || length < 0 || static_cast(offset + length) > srcContentSize)) { + LOG(FATAL) << "the index to access stringObj is illegal" << maple::endl; + return nullptr; + } + if (compress != 0) { + src += offset; + dst = MArray::NewPrimitiveArray(static_cast(length), + *WellKnown::GetPrimitiveArrayClass(maple::Primitive::kByte)); + int8_t *buf = reinterpret_cast(dst->ConvertToCArray()); + for (int i = 0; i < length; ++i) { + buf[i] = src[i]; + } + } else { + uint16_t *pos = reinterpret_cast(src) + offset; + int count = GetMUtf8ByteCount(pos, length, kZeroNotcode, 1); + dst = MArray::NewPrimitiveArray(static_cast(count), + *WellKnown::GetPrimitiveArrayClass(maple::Primitive::kByte)); + uint8_t *buf = reinterpret_cast(dst->ConvertToCArray()); + MUtf8Encode(buf, pos, static_cast(length), kZeroNotcode, 1); + } + return dst; +} + +extern "C" jstring MRT_NewStringMUTF(const char *inMutf, size_t inMutfLen, bool isJNI) { + MString *res = nullptr; + uint32_t length = CountInMutfLen(inMutf, inMutfLen); + const bool kCompressible = MString::GetCompressFlag() && (length > 0) && (inMutfLen == length); + if (kCompressible) { + res = MString::NewStringObject(reinterpret_cast(inMutf), length, isJNI); + if (res == nullptr) { + return nullptr; + } + } else { + res = MString::NewStringObject(length, [&](MString &newStringObj) { + uint16_t *outUnicode = reinterpret_cast(newStringObj.GetContentsPtr()); + MUtf8Decode(reinterpret_cast(inMutf), inMutfLen, outUnicode); + }, isJNI); + } + return res->AsJstring(); +} + +extern "C" jsize MRT_GetStringMUTFLength(jstring jstr) { + MString *stringObj = MString::JniCastNonNull(jstr); + uint32_t length = stringObj->GetLength(); + uint16_t *utf16Raw = nullptr; + jint res; + if (stringObj->IsCompress()) { + res = static_cast(length); + } else { + utf16Raw = reinterpret_cast(stringObj->GetContentsPtr()); + res = GetMUtf8ByteCount(utf16Raw, length, kZeroDocode, 0); + } + return res; +} + +extern "C" char *MRT_GetStringMUTFChars(jstring jstr, jboolean *isCopy) { + MString *stringObj = MString::JniCastNonNull(jstr); + if (isCopy != nullptr) { + *isCopy = JNI_TRUE; + } + uint32_t count = stringObj->GetCountValue(); + char *res = nullptr; + uint32_t len = count >> 1; + if (MString::IsCompressedFromCount(count)) { + res = reinterpret_cast(malloc((len + 1) * sizeof(char))); + if (UNLIKELY(res == nullptr)) { + if (isCopy != nullptr) { + *isCopy = JNI_FALSE; + } + return nullptr; + } + res[len] = 0; + const char *data = reinterpret_cast(stringObj->GetContentsPtr()); + for (uint32_t i = 0; i < len; ++i) { + res[i] = data[i]; + } + } else { + uint16_t *utf16Raw = reinterpret_cast(stringObj->GetContentsPtr()); + uint32_t length = static_cast(GetMUtf8ByteCount(utf16Raw, len, kZeroDocode, 0)); + uint8_t *mutfStr = nullptr; + mutfStr = reinterpret_cast(malloc((length + 1) * sizeof(uint8_t))); + if (UNLIKELY(mutfStr == nullptr)) { + if (isCopy != nullptr) { + *isCopy = JNI_FALSE; + } + return nullptr; + } + mutfStr[length] = 0; + if (length == 0) { + return reinterpret_cast(mutfStr); + } + MUtf8Encode(mutfStr, utf16Raw, len, kZeroDocode, 0); + res = reinterpret_cast(mutfStr); + } + return res; +} + +extern "C" void MRT_GetStringMUTFRegion(jstring jstr, jsize start, jsize length, char *buf) { + MString *stringObj = MString::JniCastNonNull(jstr); + if (UNLIKELY(buf == nullptr)) { + return; + } + if (stringObj->IsCompress()) { + const char *data = reinterpret_cast(stringObj->GetContentsPtr()); + data = data + start; + for (jint i = 0; i < length; ++i) { + buf[i] = data[i]; + } + } else { + char *src = reinterpret_cast(stringObj->GetContentsPtr()); + uint16_t *utf16Raw = nullptr; + utf16Raw = reinterpret_cast(src) + start; + uint8_t *mutfStr = reinterpret_cast(buf); + if (length == 0) { + return; + } + MUtf8Encode(mutfStr, utf16Raw, length, kZeroDocode, 0); + } +} // MUTF-8 format string end + +// literals in MFile (with only jstring_payload) cannot be inserted into MapleStringPool directly, +// instead, a new string in PERM-space will be created. +// thus, all jstring_payload_p points to the middle of a valid java string object. the begin of +// the MString object can be obtained by a negtive offset from jstring_payload_p +const int kZygotePoolBucketNum = 997; // prime number +const int kAppPoolBucketNum = 113; // prime number +const int kPoolNum = 47; // prime number +const int kClassNameNum = 1024; +static maple::SpinLock mtx[kPoolNum]; +using MapleStringPool = maple::LowMemSet; +static MapleStringPool zygoteStringPool[kPoolNum]; +static MapleStringPool *appStringAppPool = nullptr; + +enum SweepState : uint8_t { + kClean, + kReadyToSweep +}; + +using MUnorderedMap = std::unordered_map, + std::equal_to, StdContainerAllocator, kClassNameStringPool>>; +static MUnorderedMap classnameMap(kClassNameNum); +static maple::SpinLock mtxForClassname; +uint8_t sweepingStateForClassName; +static size_t deadClassNameNum = 0; + +void RemoveDeadClassNameFromPoolLocked(MUnorderedMap &pool, uint8_t &state) { + if (LIKELY(state != static_cast(kReadyToSweep))) { + if (UNLIKELY(state != static_cast(kClean))) { + LOG(FATAL) << "[StringPool] Concurrent Sweeping ilegal state" << maple::endl; + } + return; + } + + size_t curDead = 0; + for (auto it = pool.begin(); it != pool.end();) { + address_t addr = (it->second).GetRef(); + if (IS_HEAP_OBJ(addr) && MRT_IsGarbage(addr)) { + it = pool.erase(it); + ++curDead; + } else { + ++it; + } + } + deadClassNameNum += curDead; + state = static_cast(kClean); +} + +MString *NewStringUtfFromPoolForClassName(const MClass &classObj) { + char *cStr = classObj.GetName(); + if (UNLIKELY(cStr == nullptr)) { + return nullptr; + } + if (!MRT_EnterSaferegion()) { + __MRT_ASSERT(false, "calling NewStringUtfFromPoolForClassName from saferegion"); + } + mtxForClassname.Lock(); + (void)MRT_LeaveSaferegion(); + RemoveDeadClassNameFromPoolLocked(classnameMap, sweepingStateForClassName); + auto it = classnameMap.find(cStr); + MStringRef newStrObj; + if (it == classnameMap.end()) { + mtxForClassname.Unlock(); + string binaryName; + classObj.GetBinaryName(binaryName); + newStrObj.SetRef(NewStringFromUTF16(binaryName)); + ScopedHandles sHandles; + ObjHandle stringInst(newStrObj.GetRef()); + if (!MRT_EnterSaferegion()) { + __MRT_ASSERT(false, "calling NewStringUtfFromPoolForClassName from saferegion"); + } + mtxForClassname.Lock(); + (void)MRT_LeaveSaferegion(); + it = classnameMap.find(cStr); + } + + if (it == classnameMap.end()) { + RC_LOCAL_INC_REF(reinterpret_cast(newStrObj.GetRef())); + classnameMap[cStr] = newStrObj; + mtxForClassname.Unlock(); + return reinterpret_cast(newStrObj.GetRef()); + } else { + // When we reuse a string from pool, the string may have been dead in java world, + // we should set it as renewed to make concurrent marking happy. + if (reinterpret_cast(newStrObj.GetRef()) != nullptr) { + RC_LOCAL_DEC_REF(reinterpret_cast(newStrObj.GetRef())); + } + MRT_PreRenewObject((it->second).GetRef()); + RC_LOCAL_INC_REF(reinterpret_cast((it->second).GetRef())); + mtxForClassname.Unlock(); + return reinterpret_cast((it->second).GetRef()); + } +} + +void CreateAppStringPool() { + if (appStringAppPool == nullptr) { + appStringAppPool = new (std::nothrow) MapleStringPool[kPoolNum]; + __MRT_ASSERT(appStringAppPool != nullptr, "fail to allocate app const string pool!"); + for (int i = 0; i < kPoolNum; ++i) { + appStringAppPool[i].Reserve(kAppPoolBucketNum); + } + } +} + +int InitializeMapleStringPool() { + for (int i = 0; i < kPoolNum; ++i) { + zygoteStringPool[i].Reserve(kZygotePoolBucketNum); + } + sweepingStateForClassName = static_cast(kClean); + return 0; +} + +static int initialize = InitializeMapleStringPool(); + +void StringPrepareConcurrentSweeping() { + sweepingStateForClassName = static_cast(kReadyToSweep); + deadClassNameNum = 0; +} + +size_t ConstStringPoolSize(bool literal) { + size_t cspSize = 0; + auto visitor = [literal, &cspSize](const MStringRef strRef) { + MString *jstr = reinterpret_cast(strRef.GetRef()); + if (literal == jstr->IsLiteral()) { + cspSize += jstr->GetStringSize(); + } + }; + for (int i = 0; i < kPoolNum; ++i) { + maple::SpinAutoLock guard(mtx[i]); + zygoteStringPool[i].ForEach(visitor); + if (appStringAppPool != nullptr) { + appStringAppPool[i].ForEach(visitor); + } + } + return cspSize; +} + +size_t ConstStringPoolNum(bool literal) { + size_t cspNum = 0; + auto visitor = [literal, &cspNum](const MStringRef strRef) { + MString *s = reinterpret_cast(strRef.GetRef()); + if (literal == s->IsLiteral()) { + ++cspNum; + } + }; + for (int i = 0; i < kPoolNum; ++i) { + maple::SpinAutoLock guard(mtx[i]); + zygoteStringPool[i].ForEach(visitor); + if (appStringAppPool != nullptr) { + appStringAppPool[i].ForEach(visitor); + } + } + return cspNum; +} + +size_t ConstStringAppPoolNum(bool literal) { + size_t cspNum = 0; + if (appStringAppPool != nullptr) { + for (int i = 0; i < kPoolNum; ++i) { + maple::SpinAutoLock guard(mtx[i]); + appStringAppPool[i].ForEach([literal, &cspNum](const MStringRef strRef) { + MString *s = reinterpret_cast(strRef.GetRef()); + if (literal == s->IsLiteral()) { + ++cspNum; + } + }); + } + } + return cspNum; +} + +void DumpJString(std::ostream &os, const MString &stringObj, bool dumpName) { + uint32_t len = stringObj.GetLength(); + const uint8_t *data = stringObj.GetContentsPtr(); + std::ios::fmtflags f(os.flags()); + + if (dumpName) { + if (stringObj.IsCompress()) { + // need to convert to utf16 to calculate LiteralStrName + int constexpr maxLiteralLength = 1024; // up to 2KB local stack + if (len > maxLiteralLength) { + return; // just skip it + } + + uint16_t utf16Raw[maxLiteralLength] = { 0 }; + for (uint32_t i = 0; i < len; ++i) { + utf16Raw[i] = data[i]; + } + os << LiteralStrName::GetLiteralStrName(reinterpret_cast(utf16Raw), len << 1); + } else { + os << LiteralStrName::GetLiteralStrName(data, len << 1); + } + } else { + if (!stringObj.IsCompress()) { + len = len << 1; + } + for (uint32_t i = 0; i < len; ++i) { + os << std::setfill('0') << std::setw(2) << std::hex << static_cast(data[i]); + } + } + os << "\n"; + os.flags(f); +} + +void DumpConstStringPool(std::ostream &os, bool literal) { + auto visitor = [literal, &os](const MStringRef strRef) { + MString *s = reinterpret_cast(strRef.GetRef()); + if (literal == s->IsLiteral()) { + DumpJString(os, *s, true); // uuid name only + } + }; + for (int i = 0; i < kPoolNum; ++i) { + maple::SpinAutoLock guard(mtx[i]); + zygoteStringPool[i].ForEach(visitor); + if (appStringAppPool != nullptr) { + appStringAppPool[i].ForEach(visitor); + } + } +} + +void VisitStringPool(const RefVisitor &visitor) { + for (auto iter = classnameMap.begin(); iter != classnameMap.end(); ++iter) { + visitor(reinterpret_cast((iter->second).GetRawRef())); // pass direct root ref for moving gc + } +} + +void MRT_ScanLiteralPoolRoots(function visitRoot) { + auto visitor = [&visitRoot](const MStringRef strRef) { + MString *s = reinterpret_cast(strRef.GetRef()); + if (s->IsLiteral()) { + visitRoot(reinterpret_cast(s)); + } + }; + for (int i = 0; i < kPoolNum; ++i) { + zygoteStringPool[i].ForEach(visitor); + if (appStringAppPool != nullptr) { + appStringAppPool[i].ForEach(visitor); + } + } +} + +size_t ConcurrentSweepDeadStrings(maplert::MplThreadPool*) { + maple::SpinAutoLock guard(mtxForClassname); + RemoveDeadClassNameFromPoolLocked(classnameMap, sweepingStateForClassName); + return deadClassNameNum; +} + +size_t RemoveDeadStringFromPool() { + size_t count = 0; + for (auto it = classnameMap.begin(); it != classnameMap.end();) { + address_t addr = (it->second).GetRef(); + // dead string might already cleared in early sweep + // object already cleared, only fast check is enough + if ((IS_HEAP_OBJ(addr) && MRT_IsGarbage(addr))) { + it = classnameMap.erase(it); + MRT_DecRefUnsync(addr); + ++count; + } else { + ++it; + } + } + return count; +} + +// non-literal strings using this interface, it always returns a off-heap MString. +MString *GetOrInsertStringPool(MString &stringObj) { + uint32_t count = stringObj.GetCountValue(); + const uint8_t *data = stringObj.GetContentsPtr(); + + uint32_t hash = stringObj.GetHash(); + if (hash == 0) { + hash = LiteralStrName::CalculateHash(reinterpret_cast(data), + count >> 1, MString::IsCompressedFromCount(count)); + // this is not thead safe + stringObj.SetHash(hash); + } + + uint32_t num = hash % kPoolNum; + maple::SpinAutoLock guard(mtx[num]); + // try to find existing string first + MStringRef retStrObj; + auto iter = zygoteStringPool[num].Find(&stringObj); + if (iter != zygoteStringPool[num].End()) { // found + retStrObj = zygoteStringPool[num].Element(iter); + return reinterpret_cast(retStrObj.GetRef()); + } else if (appStringAppPool != nullptr) { + iter = appStringAppPool[num].Find(&stringObj); + if (iter != appStringAppPool[num].End()) { // found + retStrObj = appStringAppPool[num].Element(iter); + return reinterpret_cast(retStrObj.GetRef()); + } + } + + // not found: create a new stringObj in perm-spaceto and insert + uint32_t strObjSize = stringObj.GetStringSize(); + // is there possibly a deadlock here (with mtx[num].lock)? + address_t newStringObj = MRT_AllocFromPerm(strObjSize); + if (UNLIKELY(newStringObj == 0)) { + return nullptr; + } + retStrObj.SetRef(newStringObj); + char *pDstStr = reinterpret_cast(retStrObj.GetRef()); + char *pSrcStr = reinterpret_cast(&stringObj); + for (uint32_t i = 0; i < strObjSize; ++i) { + pDstStr[i] = pSrcStr[i]; + } + + if (appStringAppPool != nullptr) { + appStringAppPool[num].Insert(retStrObj); + } else { + zygoteStringPool[num].Insert(retStrObj); + } + mtx[num].Unlock(); + return reinterpret_cast(retStrObj.GetRef()); +} +// Note:only used to insert literal (stored in static fields) into pool, +// currently only used by mpl-linker +// literal might be a full MString object, or only the jstring_payload +MString *GetOrInsertLiteral(MString &literalObj) { + // all literals already has hash set ready. + uint32_t num = literalObj.GetHash() % kPoolNum; + mtx[num].Lock(); + literalObj.SetStringClass(); + MStringRef retStrObj; + auto iter = zygoteStringPool[num].Find(&literalObj); + if (iter != zygoteStringPool[num].End()) { // found + retStrObj = zygoteStringPool[num].Element(iter); + mtx[num].Unlock(); + return reinterpret_cast(retStrObj.GetRef()); + } else if (appStringAppPool != nullptr) { + iter = appStringAppPool[num].Find(&literalObj); + if (iter != appStringAppPool[num].End()) { + retStrObj = appStringAppPool[num].Element(iter); + mtx[num].Unlock(); + return reinterpret_cast(retStrObj.GetRef()); + } + } + + retStrObj.SetRef(&literalObj); + if (appStringAppPool != nullptr) { + appStringAppPool[num].Insert(retStrObj); + } else { + zygoteStringPool[num].Insert(retStrObj); + } + mtx[num].Unlock(); + return reinterpret_cast(retStrObj.GetRef()); +} + +// Note: used to insert literal into pool, only for compiler generated code +extern "C" jstring MCC_GetOrInsertLiteral(jstring literal) { + MString *literalObj = reinterpret_cast(literal); + DCHECK(literalObj != nullptr); + jstring retJstr = reinterpret_cast(GetOrInsertLiteral(*literalObj)); + return retJstr; +} + +void RemoveStringFromPool(MString &stringObj) { + uint32_t judgeHash = stringObj.GetHash(); + uint32_t num = judgeHash % kPoolNum; + mtx[num].Lock(); + auto iter = zygoteStringPool[num].Find(&stringObj); + if (iter != zygoteStringPool[num].End()) { + zygoteStringPool[num].Erase(iter); + } else if (appStringAppPool != nullptr) { + iter = appStringAppPool[num].Find(&stringObj); + if (iter != appStringAppPool[num].End()) { // found + appStringAppPool[num].Erase(iter); + } + } + mtx[num].Unlock(); + return; +} // String pool function interface end + +// String compress start +int GetMUtf8ByteCount(const uint16_t *utf16Raw, size_t charCount, ZeroCodeT zCode, int charSet) { + DCHECK(utf16Raw != nullptr); + int res = 0; + for (size_t i = 0; i < charCount; ++i) { + uint16_t ch = utf16Raw[i]; + if ((!static_cast(zCode) || ch != 0) && ch <= 0x7f) { + ++res; + } else if (ch <= 0x7ff) { + res += 2; + } else { + if (!charSet) { + if ((ch >= 0xd800 && ch <= 0xdbff) && (i < charCount - 1)) { + uint16_t ch1 = utf16Raw[i + 1]; + if (ch1 >= 0xdc00 && ch1 <= 0xdfff) { + ++i; + res += 4; + continue; + } + } + res += 3; + } else { + if ((ch & 0xfffff800) == 0xd800) { + uint16_t ch1 = (i < charCount - 1) ? utf16Raw[i + 1] : 0; + if (!((ch & 0x400) == 0) || !((ch1 & 0x400) != 0)) { + ++res; + continue; + } + ++i; + res += 4; + } else { + res += 3; + } + } + } + } + return res; +} + +const int kCodepointOffset1 = 6; // U+0080 - U+07FF 110xxxxx 10xxxxxx +const int kCodepointOffset2 = 12; // U+0800 - U+FFFF 1110xxxx 10xxxxxx 10xxxxxx +const int kCodepointOffset3 = 18; // U+10000- U+10FFFF 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx +const int kCodeAfterMinusOffset = 10; // codepoint equals itself minus 0x10000 +const int kUtf16Bits = 16; // UTF_16 16 bits +void MUtf8Encode(uint8_t *utf8Res, const uint16_t *utf16Raw, size_t charCount, ZeroCodeT zCode, int charSet) { + DCHECK(utf16Raw != nullptr); + DCHECK(utf8Res != nullptr); + uint32_t offset = 0; + for (size_t i = 0; i < charCount; ++i) { + uint16_t ch = utf16Raw[i]; + if ((!static_cast(zCode) || ch != 0) && ch <= 0x7f) { + utf8Res[offset++] = ch; + } else if (ch <= 0x7ff) { + utf8Res[offset++] = (0xc0 | (0x1f & (ch >> kCodepointOffset1))); + utf8Res[offset++] = (0x80 | (0x3f & ch)); + } else { + if (!charSet) { + if ((ch >= 0xd800 && ch <= 0xdbff) && (i < charCount - 1)) { + uint16_t ch1 = utf16Raw[i + 1]; + if (ch1 >= 0xdc00 && ch1 <= 0xdfff) { + i++; + const uint32_t tmp = (ch << kCodeAfterMinusOffset) + ch1 - 0x035fdc00; + utf8Res[offset++] = (tmp >> kCodepointOffset3) | 0xf0; + utf8Res[offset++] = ((tmp >> kCodepointOffset2) & 0x3f) | 0x80; + utf8Res[offset++] = ((tmp >> kCodepointOffset1) & 0x3f) | 0x80; + utf8Res[offset++] = (tmp & 0x3f) | 0x80; + continue; + } + } + utf8Res[offset++] = (0xe0 | (0x0f & (ch >> kCodepointOffset2))); + utf8Res[offset++] = (0x80 | (0x3f & (ch >> kCodepointOffset1))); + utf8Res[offset++] = (0x80 | (0x3f & ch)); + } else { + if ((ch & 0xfffff800) == 0xd800) { + uint16_t ch1 = (i < charCount - 1) ? utf16Raw[i + 1] : 0; + if (!((ch & 0x400) == 0) || !((ch1 & 0x400) != 0)) { + utf8Res[offset++] = 0x3f; + continue; + } + ++i; + const uint32_t tmp = (ch << kCodeAfterMinusOffset) + ch1 - 0x035fdc00; + utf8Res[offset++] = (tmp >> kCodepointOffset3) | 0xf0; + utf8Res[offset++] = ((tmp >> kCodepointOffset2) & 0x3f) | 0x80; + utf8Res[offset++] = ((tmp >> kCodepointOffset1) & 0x3f) | 0x80; + utf8Res[offset++] = (tmp & 0x3f) | 0x80; + } else { + utf8Res[offset++] = (0xe0 | (0x0f & (ch >> kCodepointOffset2))); + utf8Res[offset++] = (0x80 | (0x3f & (ch >> kCodepointOffset1))); + utf8Res[offset++] = (0x80 | (0x3f & ch)); + } + } + } + } +} + +uint32_t CountInMutfLen(const char *utf8, size_t utf8Len) { + DCHECK(utf8 != nullptr); + size_t count = 0; + uint32_t res = 0; + while (true) { + uint8_t ch = utf8[count] & 0xff; + ++count; + if (count > utf8Len) { + return res; + } + if (ch < 0x80) { + ++res; + } else if ((ch & 0xe0) == 0xc0) { + ++count; + ++res; + } else if ((ch & 0xf0) == 0xe0) { + count += 2; + ++res; + } else { + count += 3; + res += 2; + } + } +} + +void MUtf8Decode(const uint8_t *utf8, size_t utf8Len, uint16_t *utf16Res) { + DCHECK(utf8 != nullptr); + DCHECK(utf16Res != nullptr); + size_t count = 0; + int outCount = 0; + while (true) { + uint8_t ch = utf8[count++] & 0xff; + if (count > utf8Len) { + return; + } + if (ch < 0x80) { + utf16Res[outCount++] = ch; + } else if ((ch & 0xe0) == 0xc0) { + uint8_t sch = utf8[count++] & 0xff; + utf16Res[outCount++] = (static_cast(ch & 0x1f) << kCodepointOffset1) | (sch & 0x3f); + } else if ((ch & 0xf0) == 0xe0) { + uint8_t sch = utf8[count++] & 0xff; + uint8_t tch = utf8[count++] & 0xff; + utf16Res[outCount++] = (static_cast(ch & 0x0f) << kCodepointOffset2) | + (static_cast(sch & 0x3f) << kCodepointOffset1) | (tch & 0x3f); + } else { + uint8_t sch = utf8[count++] & 0xff; + uint8_t tch = utf8[count++] & 0xff; + uint8_t fch = utf8[count++] & 0xff; + const uint32_t tmp = ((ch & 0x0f) << kCodepointOffset3) | ((sch & 0x3f) << kCodepointOffset2) | + ((tch & 0x3f) << kCodepointOffset1) | (fch & 0x3f); + uint32_t pair = 0; + pair |= ((tmp >> kCodeAfterMinusOffset) + 0xd7c0) & 0xffff; + pair |= ((tmp & 0x03ff) + 0xdc00) << kUtf16Bits; + utf16Res[outCount++] = static_cast(pair & 0xffff); + utf16Res[outCount++] = static_cast(pair >> kUtf16Bits); + } + } +} + +extern "C" bool MRT_IsStrCompressed(const jstring jstr) { + MString *strObj = MString::JniCastNonNull(jstr); + return strObj->IsCompress(); +} + +// invoke by compiler +bool MCC_String_Equals_NotallCompress(jstring thisStr, jstring anotherStr) { + MString *thisStrObj = MString::JniCastNonNull(thisStr); + MString *anotherStrObj = MString::JniCastNonNull(anotherStr); + bool thisIsCompress = thisStrObj->IsCompress(); + uint8_t *thisStrSrc = thisStrObj->GetContentsPtr(); + uint8_t *anotherStrSrc = anotherStrObj->GetContentsPtr(); + uint32_t thisStrLen = thisStrObj->GetLength(); + uint8_t *compressChars = nullptr; + uint16_t *uncompressChars = nullptr; + if (thisIsCompress) { + compressChars = thisStrSrc; + uncompressChars = reinterpret_cast(anotherStrSrc); + } else { + compressChars = anotherStrSrc; + uncompressChars = reinterpret_cast(thisStrSrc); + } + for (uint32_t i = 0; i < thisStrLen; ++i) { + if (compressChars[i] != uncompressChars[i]) { + return false; + } + } + return true; +} + +static MString *StringAppend(uint8_t *stringsContent[], const uint32_t stringsLen[], const bool isStringsCompress[], + int32_t sumLength, uint32_t numOfStringAppend) { + if (UNLIKELY(stringsContent == nullptr)) { + return nullptr; + } else if (UNLIKELY(sumLength == 0)) { + return MString::NewConstEmptyStringObject(); + } else if (UNLIKELY(sumLength < 0)) { + MRT_ThrowNewExceptionUnw("java/lang/OutOfMemoryError"); + return nullptr; + } + MString *newStringObj = nullptr; + bool isAllCompress = true; + for (uint32_t i = 0; i < numOfStringAppend; ++i) { + isAllCompress = isAllCompress && isStringsCompress[i]; + } + if (isAllCompress) { + newStringObj = MString::NewStringObject(static_cast(sumLength), [&](MString &stringObj) { + uint8_t *newContent = stringObj.GetContentsPtr(); + for (uint32_t i = 0; i < numOfStringAppend; ++i) { + if (stringsLen[i] != 0) { + errno_t tmpResult = memcpy_s(newContent, stringsLen[i], stringsContent[i], stringsLen[i]); + if (UNLIKELY(tmpResult != EOK)) { + LOG(FATAL) << "memcpy_s()#1 in StringAppend() return " << tmpResult << "rather than 0."; + } + newContent += stringsLen[i]; + } + } + }); + } else { + newStringObj = MString::NewStringObject(static_cast(sumLength), [&](MString &stringObj) { + uint16_t *newContent = reinterpret_cast(stringObj.GetContentsPtr()); + for (uint32_t i = 0; i < numOfStringAppend; ++i) { + if (stringsLen[i] != 0) { + if (isStringsCompress[i]) { + uint8_t *content = stringsContent[i]; + for (uint32_t j = 0; j < stringsLen[i]; ++j) { + newContent[j] = content[j]; + } + } else { + uint32_t copyLen = stringsLen[i] << 1; + errno_t tmpResult = memcpy_s(newContent, copyLen, reinterpret_cast(stringsContent[i]), copyLen); + CHECK(tmpResult == EOK); + } + newContent += stringsLen[i]; + } + } + }); + } + return newStringObj; +} + +enum TYPESTRINGAPPEND { + kString = 0x01, + kChar = 0x02, + kBoolean = 0x03, + kInt = 0x04 +}; +const int kMaxIntcharLen = 13; +const int kMaxNunmberString = 20; +const int kSwitchTypeOffset = 3; + +uint8_t nullBuff[4] = { 'n', 'u', 'l', 'l' }; +uint8_t trueBuff[4] = { 't', 'r', 'u', 'e' }; +uint8_t falseBuff[5] = { 'f', 'a', 'l', 's', 'e' }; +extern "C" jstring MCC_StringAppend(uint64_t toStringFlag, ...) { + uint32_t numOfString = 0; + uint8_t *stringsContent[kMaxNunmberString]; + uint32_t stringsLen[kMaxNunmberString]; + bool isStringsCompress[kMaxNunmberString]; + uint16_t charbuff[kMaxNunmberString]; + char intBuff[kMaxNunmberString][kMaxIntcharLen]; + jint sumLength = 0; + + va_list args; + va_start(args, toStringFlag); + + uint32_t type = toStringFlag & 0x07; + while (toStringFlag) { + switch (type) { + case kChar: { + jchar c = static_cast(va_arg(args, int32_t)); + charbuff[numOfString] = c; + stringsContent[numOfString] = reinterpret_cast(&charbuff[numOfString]); + stringsLen[numOfString] = 1; + sumLength += 1; + isStringsCompress[numOfString] = MString::IsCompressChar(c); + break; + } + case kBoolean: { + jboolean b = static_cast(va_arg(args, int32_t)); + if (b) { + stringsContent[numOfString] = trueBuff; + stringsLen[numOfString] = 4; + sumLength += 4; + } else { + stringsContent[numOfString] = falseBuff; + stringsLen[numOfString] = 5; + sumLength += 5; + } + isStringsCompress[numOfString] = true; + break; + } + case kInt: { + jint intValue = static_cast(va_arg(args, int32_t)); + char *intChar = intBuff[numOfString]; + stringsContent[numOfString] = reinterpret_cast(intChar); + int len = sprintf_s(intChar, kMaxIntcharLen, "%d", intValue); + if (UNLIKELY(len < 0)) { + LOG(ERROR) << "MCC_StringAppend sprintf_s fail" << maple::endl; + len = 0; + } + stringsLen[numOfString] = len; + sumLength += len; + isStringsCompress[numOfString] = true; + break; + } + case kString: { + jstring tmpString = reinterpret_cast(va_arg(args, jstring)); + uint32_t count; + if (tmpString == nullptr) { + stringsContent[numOfString] = nullBuff; + count = 9; + } else { + MString *string = MString::JniCastNonNull(tmpString); + stringsContent[numOfString] = string->GetContentsPtr(); + count = string->GetCountValue(); + } + bool isCompressTmp = (count == 0) ? true : MString::IsCompressedFromCount(count); + isStringsCompress[numOfString] = isCompressTmp; + uint32_t len = count >> 1; + sumLength += len; + stringsLen[numOfString] = len; + break; + } + default: { + LOG(FATAL) << "Unexpected primitive type: " << type; + } + } + + toStringFlag = toStringFlag >> kSwitchTypeOffset; + type = toStringFlag & 0x07; + ++numOfString; + } + va_end(args); + MString *newStringObj = StringAppend(stringsContent, stringsLen, isStringsCompress, sumLength, numOfString); + if (newStringObj == nullptr) { + return nullptr; + } + return newStringObj->AsJstring(); +} + +extern "C" jstring MCC_StringAppend_StringString(jstring strObj1, jstring strObj2) { + MString *stringObj1 = MString::JniCast(strObj1); + MString *stringObj2 = MString::JniCast(strObj2); + const int arrayLen = 2; + uint8_t *stringsContent[arrayLen]; + uint32_t stringsLen[arrayLen]; + bool isStringsCompress[arrayLen]; + jint sumLength = 0; + + for (uint32_t i = 0; i < arrayLen; ++i) { + MString *tmpString = (i == 0) ? stringObj1 : stringObj2; + uint32_t count; + if (tmpString == nullptr) { + stringsContent[i] = nullBuff; + count = 9; + } else { + stringsContent[i] = tmpString->GetContentsPtr(); + count = tmpString->GetCountValue(); + } + bool isCompressTmp = (count == 0) ? true : MString::IsCompressedFromCount(count); + isStringsCompress[i] = isCompressTmp; + uint32_t len = count >> 1; + sumLength += len; + stringsLen[i] = len; + } + MString *newStringObj = StringAppend(stringsContent, stringsLen, isStringsCompress, sumLength, arrayLen); + if (newStringObj == nullptr) { + return nullptr; + } + return newStringObj->AsJstring(); +} + +// count value of String is the length shift left 1 bit and use the last bit to save whether the string is compress +extern "C" jstring MCC_StringAppend_StringInt(jstring strObj1, jint intValue) { + MString *stringObj = MString::JniCast(strObj1); + const int arraySize = 2; + uint8_t *stringsContent[arraySize]; + char intBuff[kMaxIntcharLen]; + uint32_t stringsLen[arraySize]; + bool isStringsCompress[arraySize]; + jint sumLength = 0; + + MString *tmpString = stringObj; + uint32_t count; + if (tmpString == nullptr) { + stringsContent[0] = nullBuff; + count = 9; + } else { + stringsContent[0] = tmpString->GetContentsPtr(); + count = tmpString->GetCountValue(); + } + bool isCompressTmp = (count == 0) ? true : MString::IsCompressedFromCount(count); + isStringsCompress[0] = isCompressTmp; + uint32_t len = count >> 1; + sumLength += len; + stringsLen[0] = len; + + stringsContent[1] = reinterpret_cast(intBuff); + int intLen = sprintf_s(intBuff, kMaxIntcharLen, "%d", intValue); + if (UNLIKELY(intLen < 0)) { + LOG(ERROR) << "MCC_StringAppend sprintf_s fail" << maple::endl; + intLen = 0; + } + stringsLen[1] = intLen; + sumLength += intLen; + isStringsCompress[1] = true; + MString *newStringObj = StringAppend(stringsContent, stringsLen, isStringsCompress, sumLength, arraySize); + if (newStringObj == nullptr) { + return nullptr; + } + return newStringObj->AsJstring(); +} + +extern "C" jstring MCC_StringAppend_StringJcharString(jstring strObj1, uint16_t charValue, jstring strObj2) { + MString *stringObj1 = MString::JniCast(strObj1); + MString *stringObj2 = MString::JniCast(strObj2); + const int arraySize = 3; + uint8_t *stringsContent[arraySize]; + uint32_t stringsLen[arraySize]; + bool isStringsCompress[arraySize]; + jint sumLength = 0; + + for (uint32_t i = 0; i < arraySize; ++i) { + if (i == 1) { + continue; + } + MString *tmpString = (i == 0) ? stringObj1 : stringObj2; + uint32_t count; + if (tmpString == nullptr) { + stringsContent[i] = nullBuff; + count = 9; + } else { + stringsContent[i] = tmpString->GetContentsPtr(); + count = tmpString->GetCountValue(); + } + bool isCompressTmp = (count == 0) ? true : MString::IsCompressedFromCount(count); + isStringsCompress[i] = isCompressTmp; + uint32_t len = count >> 1; + sumLength += len; + stringsLen[i] = len; + } + + stringsContent[1] = reinterpret_cast(&charValue); + stringsLen[1] = 1; + sumLength += 1; + isStringsCompress[1] = MString::IsCompressChar(charValue); + MString *newStringObj = StringAppend(stringsContent, stringsLen, isStringsCompress, sumLength, arraySize); + if (newStringObj == nullptr) { + return nullptr; + } + return newStringObj->AsJstring(); +} + +#ifdef __OPENJDK__ +template +int32_t StringNativeIndexOf(MString &subStrObj, const srcType *srcArrayData, + int32_t srcOffset, int32_t srcCount, + int32_t fromIndex, int32_t subLen) { + if (UNLIKELY(srcArrayData == nullptr)) { + return -1; + } + if (fromIndex >= srcCount) { + return (subLen == 0 ? srcCount : -1); + } + if (fromIndex < 0) { + fromIndex = 0; + } + if (subLen == 0) { + return fromIndex; + } + subType *subStrArrayData = reinterpret_cast(subStrObj.GetContentsPtr()); + subType first = subStrArrayData[0]; + int32_t max = srcOffset + (srcCount - subLen); + + for (int i = srcOffset + fromIndex; i <= max; ++i) { + if (srcArrayData[i] != first) { + while ((++i <= max) && (srcArrayData[i] != first)) {} + } + + if (i <= max) { + int j = i + 1; + int end = j + subLen - 1; + for (int k = 1; (j < end) && (srcArrayData[j] == subStrArrayData[k]); ++j, ++k) {} + if (j == end) { + return i - srcOffset; + } + } + } + return -1; +} + +int32_t StringNativeIndexOfP3(MString &subStrObj, MString &srcStrObj, int32_t fromIndex) { + bool srcIsCompress = srcStrObj.IsCompress(); + bool subStrIsCompress = subStrObj.IsCompress(); + char *srcChar = reinterpret_cast(srcStrObj.GetContentsPtr()); + uint32_t srcCount = srcStrObj.GetLength(); + uint32_t subLen = subStrObj.GetLength(); + uint32_t res = -1; + if (srcIsCompress && !subStrIsCompress) { + res = StringNativeIndexOf(subStrObj, srcChar, 0, srcCount, fromIndex, subLen); + } else if (srcIsCompress && subStrIsCompress) { + res = StringNativeIndexOf(subStrObj, srcChar, 0, srcCount, fromIndex, subLen); + } else { + uint16_t *srcArrayData = reinterpret_cast(srcChar); + if (subStrIsCompress) { + res = StringNativeIndexOf(subStrObj, srcArrayData, 0, srcCount, fromIndex, subLen); + } else { + res = StringNativeIndexOf(subStrObj, srcArrayData, 0, srcCount, fromIndex, subLen); + } + } + return res; +} + +int32_t StringNativeIndexOfP5(MString &subStrObj, MArray &srcArray, + int32_t srcOffset, int32_t srcCount, int32_t fromIndex) { + uint16_t *srcArrayData = reinterpret_cast(srcArray.ConvertToCArray()); + uint32_t subLen = subStrObj.GetLength(); + bool subStrIsCompress = subStrObj.IsCompress(); + int32_t res = -1; + if (subStrIsCompress) { + res = StringNativeIndexOf(subStrObj, srcArrayData, srcOffset, srcCount, fromIndex, subLen); + } else { + res = StringNativeIndexOf(subStrObj, srcArrayData, srcOffset, srcCount, fromIndex, subLen); + } + return res; +} + +template +int32_t StringNativeLastIndexOf(MString &subStrObj, const srcType *srcArrayData, int32_t srcOffset, + int32_t srcCount, int32_t fromIndex, int32_t subLen) { + if (UNLIKELY(srcArrayData == nullptr)) { + return -1; + } + int32_t rightIndex = srcCount - subLen; + if (fromIndex < 0) { + return -1; + } + if (fromIndex > rightIndex) { + fromIndex = rightIndex; + } + if (subLen == 0) { + return fromIndex; + } + + subType *subStrArrayData = reinterpret_cast(subStrObj.GetContentsPtr()); + int32_t strLastIndex = subLen - 1; + uint16_t strLastChar = subStrArrayData[strLastIndex]; + int32_t min = srcOffset + subLen - 1; + int i = min + fromIndex; + + while (true) { + bool flag = false; + while (i >= min && srcArrayData[i] != strLastChar) { + --i; + } + if (i < min) { + return -1; + } + int j = i - 1; + int start = j - (subLen - 1); + int k = strLastIndex - 1; + while (j > start) { + if (srcArrayData[j--] != subStrArrayData[k--]) { + --i; + flag = true; + break; + } + } + if (flag) { + continue; + } + return start - srcOffset + 1; + } +} + +int32_t StringNativeLastIndexOfP3(MString &subStrObj, MString &srcStrObj, int32_t fromIndex) { + uint32_t subLen = subStrObj.GetLength(); + uint32_t srcCount = srcStrObj.GetLength(); + int32_t res = 0; + char *srcChar = reinterpret_cast(srcStrObj.GetContentsPtr()); + bool subStrIsCompress = subStrObj.IsCompress(); + bool srcStrIsCompress = srcStrObj.IsCompress(); + if (srcStrIsCompress && !subStrIsCompress) { + res = StringNativeLastIndexOf(subStrObj, srcChar, 0, srcCount, fromIndex, subLen); + } else if (srcStrIsCompress && subStrIsCompress) { + res = StringNativeLastIndexOf(subStrObj, srcChar, 0, srcCount, fromIndex, subLen); + } else { + uint16_t *srcArrayData = reinterpret_cast(srcChar); + if (subStrIsCompress) { + res = StringNativeLastIndexOf(subStrObj, srcArrayData, 0, srcCount, fromIndex, subLen); + } else { + res = StringNativeLastIndexOf(subStrObj, srcArrayData, 0, srcCount, fromIndex, subLen); + } + } + return res; +} + +int32_t StringNativeLastIndexOfP5(MString &subStrObj, MArray &srcArray, + int32_t srcOffset, int32_t srcCount, int32_t fromIndex) { + uint16_t *srcArrayData = reinterpret_cast(srcArray.ConvertToCArray()); + uint32_t subLen = subStrObj.GetLength(); + bool subStrIsCompress = subStrObj.IsCompress(); + int32_t res = 0; + if (subStrIsCompress) { + res = StringNativeLastIndexOf(subStrObj, srcArrayData, srcOffset, + srcCount, fromIndex, subLen); + } else { + res = StringNativeLastIndexOf(subStrObj, srcArrayData, srcOffset, + srcCount, fromIndex, subLen); + } + return res; +} + +const int kSurrogatesBits = 10; + +static int minSupplementaryCodePoint = 0x010000; +static uint16_t minHighSurrogate = 0xd800; +static uint16_t minLowSurrogate = 0xdc00; +static uint16_t maxHighSurrogate = 0xdbff; +static uint16_t maxLowSurrogate = 0xdfff; + +static uint32_t validMaxCodePoint = ((0X10FFFF + 1) >> kUtf16Bits); +static uint16_t validMinHighSurrogate = (minHighSurrogate - (minSupplementaryCodePoint >> kSurrogatesBits)); +static uint16_t validMinLowSurrogatee = minLowSurrogate; + +// codepoint hava two part, each part is u16 +// the leading surrogate code unit used to represent the character in the UTF-16 encoding +MString *StringNewStringFromCodePoints(MArray &mArray, int32_t offset, int32_t count) { + uint32_t *jintArray = reinterpret_cast(mArray.ConvertToCArray()); + if (count <= 0) { + return nullptr; + } + uint16_t *jcharArray = reinterpret_cast(malloc(count * 2 * sizeof(uint16_t))); // codepoint hava two part + if (UNLIKELY(jcharArray == nullptr)) { + return nullptr; + } + int32_t end = offset + count; + int32_t length = 0; + + for (int i = offset; i < end; ++i) { + if ((jintArray[i] >> kUtf16Bits) == 0) { + jcharArray[length] = static_cast(jintArray[i]); + ++length; + } else if((jintArray[i] >> kUtf16Bits) < validMaxCodePoint) { + jcharArray[length++] = ((jintArray[i] >> kSurrogatesBits) + validMinHighSurrogate); + jcharArray[length++] = ((jintArray[i] & 0x3ff) + validMinLowSurrogatee); + } else { + free(jcharArray); + jcharArray = nullptr; + MRT_ThrowNewException("java/lang/IllegalArgumentException", + "Exception IllegalArgumentException in newStringFromCodePoints"); + return nullptr; + } + } + + const bool compressible = MString::IsCompressChars(jcharArray, length); + MString *res = nullptr; + if (compressible) { + res = MString::NewStringObject(jcharArray, length); + } else { + res = MString::NewStringObject(jcharArray, length); + } + free(jcharArray); + jcharArray = nullptr; + return res; +} + +static inline bool IsHighSurrogate(uint16_t ch) { + return (ch >= minHighSurrogate && ch < (maxHighSurrogate + 1)); +} + +static inline bool IsLowSurrogate(uint16_t ch) { + return (ch >= minLowSurrogate && ch < (maxLowSurrogate + 1)); +} + +static inline int32_t ToCodePoint(uint16_t high, uint16_t low) { + return ((high << kSurrogatesBits) + low) + + (minSupplementaryCodePoint - (minHighSurrogate << kSurrogatesBits) - minLowSurrogate); +} + +int32_t StringNativeCodePointAt(MString &strObj, int32_t index) { + uint32_t length = strObj.GetLength(); + if (index < 0 || static_cast(index) >= length) { + MRT_ThrowNewException("java/lang/StringIndexOutOfBoundsException", + "StringNativeCodePointAt(): input index out of Bounds"); + return -1; + } + bool strIsCompress = strObj.IsCompress(); + if (strIsCompress) { + char *strDataC = reinterpret_cast(strObj.GetContentsPtr()); + char ch = strDataC[index]; + return ch; + } + + uint16_t *strData = reinterpret_cast(strObj.GetContentsPtr()); + uint16_t c1 = strData[index]; + if (IsHighSurrogate(c1) && static_cast(++index) < length) { + uint16_t c2 = strData[index]; + if (IsLowSurrogate(c2)) { + return ToCodePoint(c1, c2); + } + } + return c1; +} + +int32_t StringNativeCodePointBefore(MString &strObj, int32_t index) { + uint32_t length = strObj.GetLength(); + if (index < 1 || static_cast(index) > length) { + MRT_ThrowNewException("java/lang/StringIndexOutOfBoundsException", + "StringNativeCodePointBefore(): input index out of Bounds"); + return -1; + } + bool strIsCompress = strObj.IsCompress(); + if (strIsCompress) { + char *strDataC = reinterpret_cast(strObj.GetContentsPtr()); + char ch = strDataC[--index]; + return ch; + } + + uint16_t *strData = reinterpret_cast(strObj.GetContentsPtr()); + uint16_t c2 = strData[--index]; + if (IsLowSurrogate(c2) && index > 0) { + uint16_t c1 = strData[--index]; + if (IsHighSurrogate(c1)) { + return ToCodePoint(c1, c2); + } + } + return c2; +} + +int32_t StringNativeCodePointCount(MString &strObj, int32_t beginIndex, int32_t endIndex) { + uint32_t length = strObj.GetLength(); + if (beginIndex < 0 || static_cast(endIndex) > length || beginIndex > endIndex) { + MRT_ThrowNewException("java/lang/StringIndexOutOfBoundsException", + "StringNativeCodePointCount(): input index out of Bounds"); + return -1; + } + int n = endIndex - beginIndex; + bool strIsCompress = strObj.IsCompress(); + if (strIsCompress) { + return n; + } + uint16_t *strData = reinterpret_cast(strObj.GetContentsPtr()); + for (int i = beginIndex; i < endIndex;) { + if (IsHighSurrogate(strData[i++]) && i < endIndex && IsLowSurrogate(strData[i])) { + --n; + ++i; + } + } + return n; +} + +int32_t StringNativeOffsetByCodePoint(MString &strObj, int32_t index, int32_t codePointOffset) { + uint32_t length = strObj.GetLength(); + if (index < 0 || static_cast(index) > length) { + MRT_ThrowNewException("java/lang/StringIndexOutOfBoundsException", + "StringNativeOffsetByCodePoint(): input index out of Bounds"); + return -1; + } + bool strIsCompress = strObj.IsCompress(); + uint16_t *strData = reinterpret_cast(strObj.GetContentsPtr()); + int x = index; + int resCount = x + codePointOffset; + if (codePointOffset >= 0) { + if (strIsCompress) { + if (static_cast(resCount) < length) { + return resCount; + } else { + MRT_ThrowNewException("java/lang/StringIndexOutOfBoundsException", + "StringNativeOffsetByCodePoint(): input index out of Bounds"); + return -1; + } + } + int i = 0; + while (static_cast(x) < length && i < codePointOffset) { + if (IsHighSurrogate(strData[x++]) && static_cast(x) < length && IsLowSurrogate(strData[x])) { + ++x; + } + ++i; + } + if (i < codePointOffset) { + MRT_ThrowNewException("java/lang/StringIndexOutOfBoundsException", + "StringNativeOffsetByCodePoint(): input index out of Bounds"); + return -1; + } + } else { + if (strIsCompress) { + if (resCount < 0) { + MRT_ThrowNewException("java/lang/StringIndexOutOfBoundsException", + "StringNativeOffsetByCodePoint(): input index out of Bounds"); + return -1; + } else { + return resCount; + } + } + int j = codePointOffset; + while (x > 0 && j < 0) { + if (IsLowSurrogate(strData[--x]) && x > 0 && IsHighSurrogate(strData[x - 1])) { + --x; + } + ++j; + } + if (j < 0) { + MRT_ThrowNewException("java/lang/StringIndexOutOfBoundsException", + "StringNativeOffsetByCodePoint(): input index out of Bounds"); + return -1; + } + } + return x; +} +#endif // __OPENJDK__ +} // namespace maplert diff --git a/src/mrt/maplert/src/mrt_util.cpp b/src/mrt/maplert/src/mrt_util.cpp new file mode 100644 index 0000000000..7a47e1a1db --- /dev/null +++ b/src/mrt/maplert/src/mrt_util.cpp @@ -0,0 +1,174 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mrt_util.h" +#include +#include +#include +#include +#include +#include +#include "namemangler.h" +#include "panic.h" +#include "mrt_reflection_class.h" +#include "itab_util.h" + +namespace maplert { +// string _3B, _7C, _29 is the ascii code for some special character +const int kAsciiStrLength = 3; + +// typechar +// V void +// Z boolean u8 +// B byte i8 +// S short i16 +// C char u16 +// I int i32 +// J long i64 +// F float f32 +// D double f64 +// L ref u64 +// A array +// _ not array + +// two usages: +// short form: when typenames passed as NULL +// return a string for its prototype +// each argument is represented by two charactors, A/_ + typechar +// followed by 2 charactors for return type +// normal form: +// typenames contains char* for arg types and return type +// in this case, return is NULL +extern "C" char *MRT_FuncnameToPrototypeNames(char *funcName, int argNum, char **typeNames) { + bool shortForm = true; + if (typeNames != nullptr) { + shortForm = false; + } + DCHECK(funcName != nullptr) << "MRT_FuncnameToPrototypeNames: funcName is nullptr" << maple::endl; + // arg type names and return type name + char *name = funcName; + unsigned index = 0; + int protoArgNum = 0; + unsigned endPos = 0; + while (*name) { + if (*name == '_') { + if (strncmp(name, "_28", kAsciiStrLength) == 0) { + name += kAsciiStrLength; + break; + } else if (endPos == 0 && strncmp(name, "_7C", kAsciiStrLength) == 0) { + endPos = static_cast(name - funcName); + } + } + name++; + } + + // collect protoArgNum, whether need to insert this + char *name0 = name; + while (*name) { + if (*name == '[') { + name++; + } + if (*name == 'L') { + while (!(*name == '_' && strncmp(name, "_3B", kAsciiStrLength) == 0)) { + name++; + } + // when the current pointer meet substring of "_3B", pointer skip 2 steps firstly. + // then skip more 1 step to skip over substring of "_3B" for code name++; + name += 2; + } else if (*name == '_' && strncmp(name, "_29", kAsciiStrLength) == 0) { + break; + } + name++; + protoArgNum++; + } + + if (protoArgNum != argNum) { + printf("argnum = %d; protoargnum = %d\n", argNum, protoArgNum); + __MRT_ASSERT(0, ""); + } + + char *retPtr = nullptr; + size_t size; + if (shortForm) { + size = static_cast(argNum) * 2 + 3; // args*2 + ret*2 + '\0' + } else { + size = strlen(funcName) + protoArgNum; + } + if (size == 0) { + return nullptr; + } + retPtr = reinterpret_cast(calloc(size, 1)); + if (retPtr == nullptr) { + return nullptr; + } + char *ret = retPtr; + + name = name0; + while (*name) { + char *ret0 = ret; + if (*name == '[') { + name++; + *ret++ = '['; + } else { + if (shortForm) { + *ret++ = '_'; + } + } + + if (*name == 'L') { + *ret++ = 'L'; + name++; + while (!(*name == '_' && strncmp(name, "_3B", kAsciiStrLength) == 0)) { + if (!shortForm) { + *ret++ = *name; + } + name++; + } + name += 2; // skip 2 steps + if (!shortForm) { + *ret++ = '_'; + *ret++ = '3'; + *ret++ = 'B'; + } + } else if (*name == '_' && strncmp(name, "_29", kAsciiStrLength) == 0) { + if (shortForm) { + ret--; + } + name += 3; // skip 3 steps + continue; + } else { + *ret++ = *name; + } + + name++; + + if (!shortForm) { + ret++; + typeNames[index++] = ret0; + } + } + + return retPtr; +} + +// function GetClassNametoDescriptor calloc space for javaDescriptor +// remember to free when used in another place +// the className after Decode is shorter so that the classNameLength is large Enough +std::string GetClassNametoDescriptor(const std::string &className) { + std::string name = className; + std::string descriptor; + namemangler::DecodeMapleNameToJavaDescriptor(name, descriptor); + return descriptor; +} +} // namespace maplert diff --git a/src/mrt/maplert/src/mrt_well_known.cpp b/src/mrt/maplert/src/mrt_well_known.cpp new file mode 100644 index 0000000000..2538c3cab4 --- /dev/null +++ b/src/mrt/maplert/src/mrt_well_known.cpp @@ -0,0 +1,706 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mrt_well_known.h" +#include "mrt_classloader_api.h" +#include "chelper.h" +#include "mclass_inline.h" +#include "fieldmeta_inline.h" +#include "methodmeta_inline.h" +#include "mrt_primitive_api.h" +#include "mrt_array_api.h" +namespace maplert { +// Init Ljava_2Flang_2FString_3B direct +// mpl-linker use it when load const jstring +extern "C" { + extern void *MRT_CLASSINFO(Ljava_2Flang_2FString_3B); + extern void *MRT_CLASSINFO(Ljava_2Flang_2FClass_3B); +} + +MClass *WellKnown::Ljava_2Flang_2FString_3B = reinterpret_cast(&MRT_CLASSINFO(Ljava_2Flang_2FString_3B)); +MClass *WellKnown::Ljava_2Flang_2FClass_3B = reinterpret_cast(&MRT_CLASSINFO(Ljava_2Flang_2FClass_3B)); + +MClass *WellKnown::primitiveClassZ; +MClass *WellKnown::primitiveClassB; +MClass *WellKnown::primitiveClassS; +MClass *WellKnown::primitiveClassC; +MClass *WellKnown::primitiveClassI; +MClass *WellKnown::primitiveClassJ; +MClass *WellKnown::primitiveClassF; +MClass *WellKnown::primitiveClassD; +MClass *WellKnown::primitiveClassV; + +MClass *WellKnown::primitiveClassAZ; +MClass *WellKnown::primitiveClassAB; +MClass *WellKnown::primitiveClassAS; +MClass *WellKnown::primitiveClassAC; +MClass *WellKnown::primitiveClassAI; +MClass *WellKnown::primitiveClassAJ; +MClass *WellKnown::primitiveClassAF; +MClass *WellKnown::primitiveClassAD; + +MClass *WellKnown::primitiveClassAAZ; +MClass *WellKnown::primitiveClassAAB; +MClass *WellKnown::primitiveClassAAS; +MClass *WellKnown::primitiveClassAAC; +MClass *WellKnown::primitiveClassAAI; +MClass *WellKnown::primitiveClassAAJ; +MClass *WellKnown::primitiveClassAAF; +MClass *WellKnown::primitiveClassAAD; + +MClass *WellKnown::Ljava_2Flang_2FVoid_3B; +MClass *WellKnown::Ljava_2Flang_2FBoolean_3B; +MClass *WellKnown::Ljava_2Flang_2FByte_3B; +MClass *WellKnown::Ljava_2Flang_2FCharacter_3B; +MClass *WellKnown::Ljava_2Flang_2FShort_3B; +MClass *WellKnown::Ljava_2Flang_2FInteger_3B; +MClass *WellKnown::Ljava_2Flang_2FLong_3B; +MClass *WellKnown::Ljava_2Flang_2FFloat_3B; +MClass *WellKnown::Ljava_2Flang_2FDouble_3B; +MClass *WellKnown::Ljava_2Flang_2FNumber_3B; + +MClass *WellKnown::Ljava_2Flang_2FObject_3B; +MClass *WellKnown::Ljava_2Flang_2FClassLoader_3B; +MClass *WellKnown::Ljava_2Flang_2Freflect_2FField_3B; +MClass *WellKnown::Ljava_2Flang_2Freflect_2FConstructor_3B; +MClass *WellKnown::Ljava_2Flang_2Freflect_2FMethod_3B; +MClass *WellKnown::Ljava_2Flang_2Freflect_2FProxy_3B; +MClass *WellKnown::Ljava_2Flang_2Freflect_2FParameter_3B; +MClass *WellKnown::Llibcore_2Freflect_2FGenericSignatureParser_3B; +MClass *WellKnown::Ljava_2Flang_2Fref_2FReference_3B; +MClass *WellKnown::Ljava_2Flang_2Fref_2FFinalizerReference_3B; + +MClass *WellKnown::Ljava_2Flang_2FCloneable_3B; +MClass *WellKnown::Ljava_2Fio_2FSerializable_3B; + +MClass *WellKnown::Ljava_2Flang_2FStringFactory_3B; +MClass *WellKnown::Ljava_2Flang_2FError_3B; +MClass *WellKnown::Ljava_2Flang_2FThrowable_3B; +MClass *WellKnown::Ljava_2Flang_2FArithmeticException_3B; +MClass *WellKnown::Ljava_2Flang_2FInterruptedException_3B; +MClass *WellKnown::Ljava_2Flang_2FClassCastException_3B; +MClass *WellKnown::Ljava_2Flang_2FUnsatisfiedLinkError_3B; +MClass *WellKnown::Ljava_2Flang_2FStringIndexOutOfBoundsException_3B; +MClass *WellKnown::Ljava_2Flang_2FNoClassDefFoundError_3B; +MClass *WellKnown::Ljava_2Flang_2FNoSuchMethodError_3B; +MClass *WellKnown::Ljava_2Flang_2FNoSuchFieldError_3B; +MClass *WellKnown::Ljava_2Flang_2FVerifyError_3B; +MClass *WellKnown::Ljava_2Flang_2FExceptionInInitializerError_3B; +MClass *WellKnown::Ljava_2Flang_2FRuntimeException_3B; +MClass *WellKnown::Ljava_2Flang_2FSecurityException_3B; +MClass *WellKnown::Ljava_2Flang_2Freflect_2FUndeclaredThrowableException_3B; +MClass *WellKnown::Ljava_2Flang_2FArrayStoreException_3B; +MClass *WellKnown::Ljava_2Flang_2FArrayIndexOutOfBoundsException_3B; +MClass *WellKnown::Ljava_2Flang_2FNullPointerException_3B; + +MClass *WellKnown::Ljava_2Flang_2FEnum_3B; +MClass *WellKnown::Ljava_2Flang_2Fannotation_2FAnnotation_3B; +MClass *WellKnown::Llibcore_2Freflect_2FAnnotationMember_3B; +MClass *WellKnown::Llibcore_2Freflect_2FAnnotationFactory_3B; +MClass *WellKnown::Ldalvik_2Fsystem_2FDelegateLastClassLoader_3B; +MClass *WellKnown::Ldalvik_2Fsystem_2FPathClassLoader_3B; +MClass *WellKnown::Ldalvik_2Fsystem_2FDexClassLoader_3B; +MClass *WellKnown::Ldalvik_2Fsystem_2FInMemoryDexClassLoader_3B; + +MClass *WellKnown::Ljava_2Flang_2Finvoke_2FMethodType_3B; +MClass *WellKnown::Ljava_2Flang_2Finvoke_2FMethodHandle_3B; + +#ifdef __OPENJDK__ +MClass *WellKnown::Ljava_2Futil_2FHashMap_3B; +MClass *WellKnown::Lsun_2Freflect_2Fannotation_2FAnnotationParser_3B; +#endif // __OPENJDK__ + +MClass *WellKnown::Ljava_2Flang_2FInteger_24IntegerCache_3B; +MClass *WellKnown::Ljava_2Flang_2FByte_24ByteCache_3B; +MClass *WellKnown::Ljava_2Flang_2FShort_24ShortCache_3B; +MClass *WellKnown::Ljava_2Flang_2FCharacter_24CharacterCache_3B; +MClass *WellKnown::Ljava_2Flang_2FLong_24LongCache_3B; + +MClass *WellKnown::Ljava_2Flang_2Finvoke_2FInvokeData_24BindToData_3B; +MClass *WellKnown::Ljava_2Flang_2Finvoke_2FInvokeData_24DropArgumentsData_3B; +MClass *WellKnown::Ljava_2Flang_2Finvoke_2FInvokeData_24FilterReturnValueData_3B; +MClass *WellKnown::Ljava_2Flang_2Finvoke_2FInvokeData_24PermuteArgumentsData_3B; +size_t WellKnown::Ljava_2Flang_2FMethodHandle_3B_dataArray_offset; +size_t WellKnown::Ljava_2Flang_2FMethodHandle_3B_metaArray_offset; +size_t WellKnown::Ljava_2Flang_2FMethodHandle_3B_typeArray_offset; +size_t WellKnown::Ljava_2Flang_2FMethodHandle_3B_opArray_offset; +size_t WellKnown::Ljava_2Flang_2FMethodHandle_3B_index_offset; +size_t WellKnown::Ljava_2Flang_2FBindToData_3B_receiver_offset; +size_t WellKnown::Ljava_2Flang_2FDropArgumentsData_3B_numDropped_offset; +size_t WellKnown::Ljava_2Flang_2FDropArgumentsData_3B_startPos_offset; +size_t WellKnown::Ljava_2Flang_2FFilterReturnValueData_3B_target_offset; +size_t WellKnown::Ljava_2Flang_2FFilterReturnValueData_3B_filter_offset; +size_t WellKnown::Ljava_2Flang_2FPermuteArgumentsData_3B_target_offset; +size_t WellKnown::Ljava_2Flang_2FPermuteArgumentsData_3B_reorder_offset; + +MClass *WellKnown::Ldalvik_2Fsystem_2FEmulatedStackFrame_3B; +size_t WellKnown::Ljava_2Flang_2FMethodHandle_3B_artFieldOrMethod_offset; +size_t WellKnown::Ljava_2Flang_2FMethodHandle_3B_handleKind_offset; +size_t WellKnown::Ljava_2Flang_2FMethodHandle_3B_nominalType_offset; +size_t WellKnown::Ljava_2Flang_2FMethodHandle_3B_type_offset; +size_t WellKnown::Ldalvik_2Fsystem_2FEmulatedStackFrame_3B_callsiteType_offset; +size_t WellKnown::Ldalvik_2Fsystem_2FEmulatedStackFrame_3B_references_offset; +size_t WellKnown::Ldalvik_2Fsystem_2FEmulatedStackFrame_3B_stackFrame_offset; +size_t WellKnown::Ldalvik_2Fsystem_2FEmulatedStackFrame_3B_type_offset; + +MClass *WellKnown::ALjava_2Flang_2FObject_3B; +MClass *WellKnown::ALjava_2Flang_2FClass_3B; +MClass *WellKnown::ALjava_2Flang_2Freflect_2FField_3B; +MClass *WellKnown::ALjava_2Flang_2Freflect_2FMethod_3B; +MClass *WellKnown::ALjava_2Flang_2Fannotation_2FAnnotation_3B; +MClass *WellKnown::ALjava_2Flang_2Freflect_2FConstructor_3B; +MClass *WellKnown::ALjava_2Flang_2Freflect_2FParameter_3B; +MClass *WellKnown::ALjava_2Flang_2FString_3B; +MClass *WellKnown::ALjava_2Flang_2FBoolean_3B; +MClass *WellKnown::ALjava_2Flang_2FByte_3B; +MClass *WellKnown::ALjava_2Flang_2FCharacter_3B; +MClass *WellKnown::ALjava_2Flang_2FShort_3B; +MClass *WellKnown::ALjava_2Flang_2FInteger_3B; +MClass *WellKnown::ALjava_2Flang_2FLong_3B; +MClass *WellKnown::ALjava_2Flang_2FFloat_3B; +MClass *WellKnown::ALjava_2Flang_2FDouble_3B; + +MClass *WellKnown::AALjava_2Flang_2Fannotation_2FAnnotation_3B; + +FieldMeta *WellKnown::Ljava_2Flang_2FBoolean_3B_TRUE; +FieldMeta *WellKnown::Ljava_2Flang_2FBoolean_3B_FALSE; +FieldMeta *WellKnown::Ljava_2Flang_2FByte_24ByteCache_3B_cache; +FieldMeta *WellKnown::Ljava_2Flang_2FShort_24ShortCache_3B_cache; +FieldMeta *WellKnown::Ljava_2Flang_2FCharacter_24CharacterCache_3B_cache; +FieldMeta *WellKnown::Ljava_2Flang_2FLong_24LongCache_3B_cache; +FieldMeta *WellKnown::Ljava_2Flang_2FInteger_24IntegerCache_3B_cache; +FieldMeta *WellKnown::Ljava_2Flang_2FInteger_24IntegerCache_3B_low; +FieldMeta *WellKnown::Ljava_2Flang_2FInteger_24IntegerCache_3B_high; + +uintptr_t WellKnown::Ljava_2Flang_2FClassLoader_3B_LoadClass_Addr; +uintptr_t WellKnown::Llibcore_2Freflect_2FAnnotationMember_3B_7C_3Cinit_Addr; +uintptr_t WellKnown::Llibcore_2Freflect_2FAnnotationFactory_3B_7CcreateAnnotation_Addr; +uintptr_t WellKnown::Ljava_2Flang_2FBoolean_3B_ValueOf_Addr; +uintptr_t WellKnown::Ljava_2Flang_2FByte_3B_ValueOf_Addr; +uintptr_t WellKnown::Ljava_2Flang_2FCharacter_3B_ValueOf_Addr; +uintptr_t WellKnown::Ljava_2Flang_2FShort_3B_ValueOf_Addr; +uintptr_t WellKnown::Ljava_2Flang_2FInteger_3B_ValueOf_Addr; +uintptr_t WellKnown::Ljava_2Flang_2FLong_3B_ValueOf_Addr; +uintptr_t WellKnown::Ljava_2Flang_2FFloat_3B_ValueOf_Addr; +uintptr_t WellKnown::Ljava_2Flang_2FDouble_3B_ValueOf_Addr; + +size_t WellKnown::Ljava_2Flang_2FBoolean_3B_value_offset; +size_t WellKnown::Ljava_2Flang_2FByte_3B_value_offset; +size_t WellKnown::Ljava_2Flang_2FCharacter_3B_value_offset; +size_t WellKnown::Ljava_2Flang_2FShort_3B_value_offset; +size_t WellKnown::Ljava_2Flang_2FInteger_3B_value_offset; +size_t WellKnown::Ljava_2Flang_2FLong_3B_value_offset; +size_t WellKnown::Ljava_2Flang_2FFloat_3B_value_offset; +size_t WellKnown::Ljava_2Flang_2FDouble_3B_value_offset; +size_t WellKnown::Ljava_2Flang_2FMethodType_3B_ptypes_offset; +size_t WellKnown::Ljava_2Flang_2FMethodType_3B_rtype_offset; + +size_t WellKnown::kReferenceReferentOffset; +size_t WellKnown::kReferenceQueueOffset; +size_t WellKnown::kReferencePendingnextOffset; +size_t WellKnown::kFinalizereferenceZombieOffset; +uint32_t WellKnown::currentCacheArrayClassIndex = 0; + +std::vector WellKnown::arrayInterface; + +MClass *WellKnown::primitiveArrayClass[kMaxPrimitiveSize]; + +MClass *WellKnown::cacheArrayClasses[kCacheArrayClassSize]; + +// frameworks +// ALandroid_2Fcontent_2Fpm_2FPackageParser_24ActivityIntentInfo_3B 0x01 +// ALandroid_2Fcontent_2Fpm_2FSignature_3B 0x02 +// ALcom_2Fandroid_2Fserver_2Fam_2FBroadcastFilter_3B 0x03 +// ALandroid_2Fcontent_2Fpm_2FPackageParser_24ServiceIntentInfo_3B 0x04 +// ALandroid_2Fcontent_2FIntent_3B 0x05 +// ALcom_2Fandroid_2Finternal_2Fos_2FBatteryStatsImpl_24StopwatchTimer_3B 0x06 +MClass *WellKnown::arrayFrameWorksClasses[kMaxPrimitiveSize] = { nullptr }; + +MClass *WellKnown::GetCacheArrayClass(const MClass &componentClass) { + for (uint32_t i = 0; i < kCacheArrayClassSize; ++i) { + MClass *arrayClass = cacheArrayClasses[i]; + if (arrayClass != nullptr && arrayClass->GetComponentClass() == &componentClass) { + return arrayClass; + } + } + + std::string arrayName("["); + arrayName.append(componentClass.GetName()); + MClass *arrayClass = MClass::GetClassFromDescriptor(&componentClass, arrayName.c_str()); + CHECK(arrayClass != nullptr) << "Get array class fail." << maple::endl; + uint32_t index = currentCacheArrayClassIndex; + cacheArrayClasses[index] = arrayClass; + currentCacheArrayClassIndex = (index + 1) % kCacheArrayClassSize; + return arrayClass; +} + +MClass *WellKnown::GetWellKnowClassWithFlag(uint8_t classFlag, const MClass &caller, const char *className) { + MClass *arrayClass = nullptr; + if (classFlag < kMaxFrameworksSize) { + arrayClass = arrayFrameWorksClasses[classFlag]; + if (arrayClass == nullptr) { + arrayClass = MClass::JniCast(MRT_GetClass(caller.AsJclass(), className)); + arrayFrameWorksClasses[classFlag] = arrayClass; + } + } + return arrayClass; +} + +void WellKnown::InitArrayInterfaceVector() { + arrayInterface.push_back(Ljava_2Flang_2FCloneable_3B); + arrayInterface.push_back(Ljava_2Fio_2FSerializable_3B); +} + +void WellKnown::InitCacheClass(MClass *&cls, const char *className) { + // boot class loader + cls = MClass::JniCast(MRT_GetClassByContextClass(nullptr, className)); + CHECK(cls != nullptr) << "InitCacheClass fail, not find class: " << className << maple::endl; +} + +void WellKnown::InitCacheMethodAddr(uintptr_t &methodAddr, const MClass &cls, const char *methodName, + const char *signatureName) { + MethodMeta *methodMeta = cls.GetDeclaredMethod(methodName, signatureName); + if (methodMeta == nullptr) { + LOG(FATAL) << "InitCacheMethodAddr, init fail, " << cls.GetName() << ", " << + methodName << signatureName << maple::endl; + return; + } + methodAddr = methodMeta->GetFuncAddress(); +} + +void WellKnown::InitCacheFieldOffset(size_t &fieldOffset, const MClass &cls, const char *fieldName) { + FieldMeta *fieldMeta = cls.GetDeclaredField(fieldName); + if (fieldMeta == nullptr) { + LOG(FATAL) << "InitCacheFieldOffset, init fail, " << cls.GetName() << ", " << fieldName << maple::endl; + return; + } + fieldOffset = fieldMeta->GetOffset(); +} + +void WellKnown::InitCacheFieldMeta(FieldMeta *&fieldMeta, const MClass &cls, const char *fieldName) { + fieldMeta = cls.GetDeclaredField(fieldName); + if (fieldMeta == nullptr) { + LOG(FATAL) << "InitCacheFieldOffset, init fail, " << cls.GetName() << ", " << fieldName << maple::endl; + return; + } +} + +void WellKnown::InitCachePrimitiveBoxClass() { + InitCacheClass(primitiveClassZ, "Z"); + InitCacheClass(primitiveClassB, "B"); + InitCacheClass(primitiveClassS, "S"); + InitCacheClass(primitiveClassC, "C"); + InitCacheClass(primitiveClassI, "I"); + InitCacheClass(primitiveClassJ, "J"); + InitCacheClass(primitiveClassF, "F"); + InitCacheClass(primitiveClassD, "D"); + InitCacheClass(primitiveClassV, "V"); + + InitCacheClass(primitiveClassAZ, "[Z"); + InitCacheClass(primitiveClassAB, "[B"); + InitCacheClass(primitiveClassAS, "[S"); + InitCacheClass(primitiveClassAC, "[C"); + InitCacheClass(primitiveClassAI, "[I"); + InitCacheClass(primitiveClassAJ, "[J"); + InitCacheClass(primitiveClassAF, "[F"); + InitCacheClass(primitiveClassAD, "[D"); + + InitCacheClass(primitiveClassAAZ, "[[Z"); + InitCacheClass(primitiveClassAAB, "[[B"); + InitCacheClass(primitiveClassAAS, "[[S"); + InitCacheClass(primitiveClassAAC, "[[C"); + InitCacheClass(primitiveClassAAI, "[[I"); + InitCacheClass(primitiveClassAAJ, "[[J"); + InitCacheClass(primitiveClassAAF, "[[F"); + InitCacheClass(primitiveClassAAD, "[[D"); + + InitCacheClass(Ljava_2Flang_2FVoid_3B, "Ljava/lang/Void;"); + InitCacheClass(Ljava_2Flang_2FBoolean_3B, "Ljava/lang/Boolean;"); + InitCacheClass(Ljava_2Flang_2FByte_3B, "Ljava/lang/Byte;"); + InitCacheClass(Ljava_2Flang_2FCharacter_3B, "Ljava/lang/Character;"); + InitCacheClass(Ljava_2Flang_2FShort_3B, "Ljava/lang/Short;"); + InitCacheClass(Ljava_2Flang_2FInteger_3B, "Ljava/lang/Integer;"); + InitCacheClass(Ljava_2Flang_2FLong_3B, "Ljava/lang/Long;"); + InitCacheClass(Ljava_2Flang_2FFloat_3B, "Ljava/lang/Float;"); + InitCacheClass(Ljava_2Flang_2FDouble_3B, "Ljava/lang/Double;"); + InitCacheClass(Ljava_2Flang_2FNumber_3B, "Ljava/lang/Number;"); +} + +void WellKnown::InitCacheArrayClass() { + InitCacheClass(ALjava_2Flang_2FObject_3B, "[Ljava/lang/Object;"); + InitCacheClass(ALjava_2Flang_2FClass_3B, "[Ljava/lang/Class;"); + InitCacheClass(ALjava_2Flang_2Freflect_2FField_3B, "[Ljava/lang/reflect/Field;"); + InitCacheClass(ALjava_2Flang_2Freflect_2FMethod_3B, "[Ljava/lang/reflect/Method;"); + InitCacheClass(ALjava_2Flang_2Fannotation_2FAnnotation_3B, "[Ljava/lang/annotation/Annotation;"); + InitCacheClass(ALjava_2Flang_2Freflect_2FConstructor_3B, "[Ljava/lang/reflect/Constructor;"); + InitCacheClass(ALjava_2Flang_2Freflect_2FParameter_3B, "[Ljava/lang/reflect/Parameter;"); + InitCacheClass(ALjava_2Flang_2FString_3B, "[Ljava/lang/String;"); + InitCacheClass(ALjava_2Flang_2FBoolean_3B, "[Ljava/lang/Boolean;"); + InitCacheClass(ALjava_2Flang_2FByte_3B, "[Ljava/lang/Byte;"); + InitCacheClass(ALjava_2Flang_2FCharacter_3B, "[Ljava/lang/Character;"); + InitCacheClass(ALjava_2Flang_2FShort_3B, "[Ljava/lang/Short;"); + InitCacheClass(ALjava_2Flang_2FInteger_3B, "[Ljava/lang/Integer;"); + InitCacheClass(ALjava_2Flang_2FLong_3B, "[Ljava/lang/Long;"); + InitCacheClass(ALjava_2Flang_2FFloat_3B, "[Ljava/lang/Float;"); + InitCacheClass(ALjava_2Flang_2FDouble_3B, "[Ljava/lang/Double;"); + InitCacheClass(AALjava_2Flang_2Fannotation_2FAnnotation_3B, "[[Ljava/lang/annotation/Annotation;"); +} + +void WellKnown::InitCacheExceptionClass() { + InitCacheClass(Ljava_2Flang_2FError_3B, "Ljava/lang/Error;"); + InitCacheClass(Ljava_2Flang_2FThrowable_3B, "Ljava/lang/Throwable;"); + InitCacheClass(Ljava_2Flang_2FArithmeticException_3B, "Ljava/lang/ArithmeticException;"); + InitCacheClass(Ljava_2Flang_2FInterruptedException_3B, "Ljava/lang/InterruptedException;"); + InitCacheClass(Ljava_2Flang_2FClassCastException_3B, "Ljava/lang/ClassCastException;"); + InitCacheClass(Ljava_2Flang_2FUnsatisfiedLinkError_3B, "Ljava/lang/UnsatisfiedLinkError;"); + InitCacheClass(Ljava_2Flang_2FStringIndexOutOfBoundsException_3B, "Ljava/lang/StringIndexOutOfBoundsException;"); + InitCacheClass(Ljava_2Flang_2FNoClassDefFoundError_3B, "Ljava/lang/NoClassDefFoundError;"); + InitCacheClass(Ljava_2Flang_2FNoSuchMethodError_3B, "Ljava/lang/NoSuchMethodError;"); + InitCacheClass(Ljava_2Flang_2FNoSuchFieldError_3B, "Ljava/lang/NoSuchFieldError;"); + InitCacheClass(Ljava_2Flang_2FVerifyError_3B, "Ljava/lang/VerifyError;"); + InitCacheClass(Ljava_2Flang_2FExceptionInInitializerError_3B, "Ljava/lang/ExceptionInInitializerError;"); + InitCacheClass(Ljava_2Flang_2FRuntimeException_3B, "Ljava/lang/RuntimeException;"); + InitCacheClass(Ljava_2Flang_2FSecurityException_3B, "Ljava/lang/SecurityException;"); + InitCacheClass(Ljava_2Flang_2Freflect_2FUndeclaredThrowableException_3B, + "Ljava/lang/reflect/UndeclaredThrowableException;"); + InitCacheClass(Ljava_2Flang_2FArrayStoreException_3B, "Ljava/lang/ArrayStoreException;"); + InitCacheClass(Ljava_2Flang_2FArrayIndexOutOfBoundsException_3B, "Ljava/lang/ArrayIndexOutOfBoundsException;"); + InitCacheClass(Ljava_2Flang_2FNullPointerException_3B, "Ljava/lang/NullPointerException;"); +} + +void WellKnown::InitCacheMethodAddrs() { + InitCacheMethodAddr(Ljava_2Flang_2FClassLoader_3B_LoadClass_Addr, *Ljava_2Flang_2FClassLoader_3B, + "loadClass", "(Ljava/lang/String;)Ljava/lang/Class;"); +#ifndef __OPENJDK__ + InitCacheMethodAddr(Llibcore_2Freflect_2FAnnotationMember_3B_7C_3Cinit_Addr, + *Llibcore_2Freflect_2FAnnotationMember_3B, + "", "(Ljava/lang/String;Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/reflect/Method;)V"); + InitCacheMethodAddr(Llibcore_2Freflect_2FAnnotationFactory_3B_7CcreateAnnotation_Addr, + *Llibcore_2Freflect_2FAnnotationFactory_3B, "createAnnotation", + "(Ljava/lang/Class;[Llibcore/reflect/AnnotationMember;)Ljava/lang/annotation/Annotation;"); +#endif + InitCacheMethodAddr(Ljava_2Flang_2FBoolean_3B_ValueOf_Addr, *Ljava_2Flang_2FBoolean_3B, + "valueOf", "(Z)Ljava/lang/Boolean;"); + InitCacheMethodAddr(Ljava_2Flang_2FByte_3B_ValueOf_Addr, *Ljava_2Flang_2FByte_3B, "valueOf", "(B)Ljava/lang/Byte;"); + InitCacheMethodAddr(Ljava_2Flang_2FCharacter_3B_ValueOf_Addr, *Ljava_2Flang_2FCharacter_3B, + "valueOf", "(C)Ljava/lang/Character;"); + InitCacheMethodAddr(Ljava_2Flang_2FShort_3B_ValueOf_Addr, *Ljava_2Flang_2FShort_3B, + "valueOf", "(S)Ljava/lang/Short;"); + InitCacheMethodAddr(Ljava_2Flang_2FInteger_3B_ValueOf_Addr, *Ljava_2Flang_2FInteger_3B, + "valueOf", "(I)Ljava/lang/Integer;"); + InitCacheMethodAddr(Ljava_2Flang_2FLong_3B_ValueOf_Addr, *Ljava_2Flang_2FLong_3B, "valueOf", "(J)Ljava/lang/Long;"); + InitCacheMethodAddr(Ljava_2Flang_2FFloat_3B_ValueOf_Addr, *Ljava_2Flang_2FFloat_3B, + "valueOf", "(F)Ljava/lang/Float;"); + InitCacheMethodAddr(Ljava_2Flang_2FDouble_3B_ValueOf_Addr, *Ljava_2Flang_2FDouble_3B, + "valueOf", "(D)Ljava/lang/Double;"); +} + +void WellKnown::InitCacheFieldMethodHandleOffsets() { + InitCacheFieldOffset(Ljava_2Flang_2FMethodType_3B_ptypes_offset, *Ljava_2Flang_2Finvoke_2FMethodType_3B, "ptypes"); + InitCacheFieldOffset(Ljava_2Flang_2FMethodType_3B_rtype_offset, *Ljava_2Flang_2Finvoke_2FMethodType_3B, "rtype"); +#ifdef METHODHANDLE_OPENJDK + InitCacheFieldOffset(Ljava_2Flang_2FMethodHandle_3B_dataArray_offset, + *Ljava_2Flang_2Finvoke_2FMethodHandle_3B, "dataArray"); + InitCacheFieldOffset(Ljava_2Flang_2FMethodHandle_3B_metaArray_offset, + *Ljava_2Flang_2Finvoke_2FMethodHandle_3B, "metaArray"); + InitCacheFieldOffset(Ljava_2Flang_2FMethodHandle_3B_typeArray_offset, + *Ljava_2Flang_2Finvoke_2FMethodHandle_3B, "typeArray"); + InitCacheFieldOffset(Ljava_2Flang_2FMethodHandle_3B_opArray_offset, + *Ljava_2Flang_2Finvoke_2FMethodHandle_3B, "opArray"); + InitCacheFieldOffset(Ljava_2Flang_2FMethodHandle_3B_index_offset, + *Ljava_2Flang_2Finvoke_2FMethodHandle_3B, "index"); + InitCacheFieldOffset(Ljava_2Flang_2FBindToData_3B_receiver_offset, + *Ljava_2Flang_2Finvoke_2FInvokeData_24BindToData_3B, "receiver"); + InitCacheFieldOffset(Ljava_2Flang_2FDropArgumentsData_3B_numDropped_offset, + *Ljava_2Flang_2Finvoke_2FInvokeData_24DropArgumentsData_3B, "numDropped"); + InitCacheFieldOffset(Ljava_2Flang_2FDropArgumentsData_3B_startPos_offset, + *Ljava_2Flang_2Finvoke_2FInvokeData_24DropArgumentsData_3B, "startPos"); + InitCacheFieldOffset(Ljava_2Flang_2FFilterReturnValueData_3B_target_offset, + *Ljava_2Flang_2Finvoke_2FInvokeData_24FilterReturnValueData_3B, "target"); + InitCacheFieldOffset(Ljava_2Flang_2FFilterReturnValueData_3B_filter_offset, + *Ljava_2Flang_2Finvoke_2FInvokeData_24FilterReturnValueData_3B, "filter"); + InitCacheFieldOffset(Ljava_2Flang_2FPermuteArgumentsData_3B_target_offset, + *Ljava_2Flang_2Finvoke_2FInvokeData_24PermuteArgumentsData_3B, "target"); + InitCacheFieldOffset(Ljava_2Flang_2FPermuteArgumentsData_3B_reorder_offset, + *Ljava_2Flang_2Finvoke_2FInvokeData_24PermuteArgumentsData_3B, "reorder"); +#else +#ifndef __OPENJDK__ + InitCacheFieldOffset(Ljava_2Flang_2FMethodHandle_3B_artFieldOrMethod_offset, + *Ljava_2Flang_2Finvoke_2FMethodHandle_3B, "artFieldOrMethod"); + InitCacheFieldOffset(Ljava_2Flang_2FMethodHandle_3B_handleKind_offset, + *Ljava_2Flang_2Finvoke_2FMethodHandle_3B, "handleKind"); + InitCacheFieldOffset(Ljava_2Flang_2FMethodHandle_3B_nominalType_offset, + *Ljava_2Flang_2Finvoke_2FMethodHandle_3B, "nominalType"); + InitCacheFieldOffset(Ljava_2Flang_2FMethodHandle_3B_type_offset, + *Ljava_2Flang_2Finvoke_2FMethodHandle_3B, "type"); + InitCacheFieldOffset(Ldalvik_2Fsystem_2FEmulatedStackFrame_3B_callsiteType_offset, + *Ldalvik_2Fsystem_2FEmulatedStackFrame_3B, "callsiteType"); + InitCacheFieldOffset(Ldalvik_2Fsystem_2FEmulatedStackFrame_3B_references_offset, + *Ldalvik_2Fsystem_2FEmulatedStackFrame_3B, "references"); + InitCacheFieldOffset(Ldalvik_2Fsystem_2FEmulatedStackFrame_3B_stackFrame_offset, + *Ldalvik_2Fsystem_2FEmulatedStackFrame_3B, "stackFrame"); + InitCacheFieldOffset(Ldalvik_2Fsystem_2FEmulatedStackFrame_3B_type_offset, + *Ldalvik_2Fsystem_2FEmulatedStackFrame_3B, "type"); +#endif // OPENJDK +#endif // METHODHANDLE_OPENJDK +} + +void WellKnown::InitCacheFieldOffsets() { + InitCacheFieldOffset(Ljava_2Flang_2FBoolean_3B_value_offset, *Ljava_2Flang_2FBoolean_3B, "value"); + InitCacheFieldOffset(Ljava_2Flang_2FByte_3B_value_offset, *Ljava_2Flang_2FByte_3B, "value"); + InitCacheFieldOffset(Ljava_2Flang_2FCharacter_3B_value_offset, *Ljava_2Flang_2FCharacter_3B, "value"); + InitCacheFieldOffset(Ljava_2Flang_2FShort_3B_value_offset, *Ljava_2Flang_2FShort_3B, "value"); + InitCacheFieldOffset(Ljava_2Flang_2FInteger_3B_value_offset, *Ljava_2Flang_2FInteger_3B, "value"); + InitCacheFieldOffset(Ljava_2Flang_2FLong_3B_value_offset, *Ljava_2Flang_2FLong_3B, "value"); + InitCacheFieldOffset(Ljava_2Flang_2FFloat_3B_value_offset, *Ljava_2Flang_2FFloat_3B, "value"); + InitCacheFieldOffset(Ljava_2Flang_2FDouble_3B_value_offset, *Ljava_2Flang_2FDouble_3B, "value"); + InitCacheFieldOffset(kReferenceReferentOffset, *Ljava_2Flang_2Fref_2FReference_3B, "referent"); + InitCacheFieldOffset(kReferenceQueueOffset, *Ljava_2Flang_2Fref_2FReference_3B, "queue"); + InitCacheFieldMethodHandleOffsets(); +#ifdef __OPENJDK__ + InitCacheFieldOffset(kReferencePendingnextOffset, *Ljava_2Flang_2Fref_2FReference_3B, "discovered"); +#else // libcore + InitCacheFieldOffset(kReferencePendingnextOffset, *Ljava_2Flang_2Fref_2FReference_3B, "pendingNext"); + InitCacheFieldOffset(kFinalizereferenceZombieOffset, *Ljava_2Flang_2Fref_2FFinalizerReference_3B, "zombie"); +#endif // __OPENJDK__ +} + +void WellKnown::InitCacheMethodHandleClasses() { + InitCacheClass(Ljava_2Flang_2Finvoke_2FMethodHandle_3B, "Ljava/lang/invoke/MethodHandle;"); + InitCacheClass(Ljava_2Flang_2Finvoke_2FMethodType_3B, "Ljava/lang/invoke/MethodType;"); +#ifdef METHODHANDLE_OPENJDK + InitCacheClass(Ljava_2Flang_2Finvoke_2FInvokeData_24BindToData_3B, + "Ljava/lang/invoke/InvokeData$BindToData;"); + InitCacheClass(Ljava_2Flang_2Finvoke_2FInvokeData_24DropArgumentsData_3B, + "Ljava/lang/invoke/InvokeData$DropArgumentsData;"); + InitCacheClass(Ljava_2Flang_2Finvoke_2FInvokeData_24FilterReturnValueData_3B, + "Ljava/lang/invoke/InvokeData$FilterReturnValueData;"); + InitCacheClass(Ljava_2Flang_2Finvoke_2FInvokeData_24PermuteArgumentsData_3B, + "Ljava/lang/invoke/InvokeData$PermuteArgumentsData;"); +#else +#ifndef __OPENJDK__ + InitCacheClass(Ldalvik_2Fsystem_2FEmulatedStackFrame_3B, "Ldalvik/system/EmulatedStackFrame;"); +#endif // __OPENJDK__ +#endif // METHODHANDLE_OPENJDK +} + +void WellKnown::InitCacheClasses() { + InitCachePrimitiveBoxClass(); + InitCacheArrayClass(); + InitCacheExceptionClass(); + InitCacheMethodHandleClasses(); + InitCacheClass(Ljava_2Flang_2FObject_3B, "Ljava/lang/Object;"); + InitCacheClass(Ljava_2Flang_2FClassLoader_3B, "Ljava/lang/ClassLoader;"); + InitCacheClass(Ljava_2Flang_2Freflect_2FField_3B, "Ljava/lang/reflect/Field;"); + InitCacheClass(Ljava_2Flang_2Freflect_2FConstructor_3B, "Ljava/lang/reflect/Constructor;"); + InitCacheClass(Ljava_2Flang_2Freflect_2FMethod_3B, "Ljava/lang/reflect/Method;"); + InitCacheClass(Ljava_2Flang_2Freflect_2FProxy_3B, "Ljava/lang/reflect/Proxy;"); + InitCacheClass(Ljava_2Flang_2Freflect_2FParameter_3B, "Ljava/lang/reflect/Parameter;"); +#ifndef __OPENJDK__ + InitCacheClass(Llibcore_2Freflect_2FGenericSignatureParser_3B, "Llibcore/reflect/GenericSignatureParser;"); +#endif // __OPENJDK__ + InitCacheClass(Ljava_2Flang_2FCloneable_3B, "Ljava/lang/Cloneable;"); + InitCacheClass(Ljava_2Fio_2FSerializable_3B, "Ljava/io/Serializable;"); + InitCacheClass(Ljava_2Flang_2FStringFactory_3B, "Ljava/lang/StringFactory;"); + InitCacheClass(Ljava_2Flang_2FEnum_3B, "Ljava/lang/Enum;"); + InitCacheClass(Ljava_2Flang_2Fannotation_2FAnnotation_3B, "Ljava/lang/annotation/Annotation;"); +#ifndef __OPENJDK__ + InitCacheClass(Llibcore_2Freflect_2FAnnotationMember_3B, "Llibcore/reflect/AnnotationMember;"); + InitCacheClass(Llibcore_2Freflect_2FAnnotationFactory_3B, "Llibcore/reflect/AnnotationFactory;"); + InitCacheClass(Ldalvik_2Fsystem_2FDelegateLastClassLoader_3B, "Ldalvik/system/DelegateLastClassLoader;"); + InitCacheClass(Ldalvik_2Fsystem_2FPathClassLoader_3B, "Ldalvik/system/PathClassLoader;"); + InitCacheClass(Ldalvik_2Fsystem_2FDexClassLoader_3B, "Ldalvik/system/DexClassLoader;"); + InitCacheClass(Ldalvik_2Fsystem_2FInMemoryDexClassLoader_3B, "Ldalvik/system/InMemoryDexClassLoader;"); +#endif // __OPENJDK__ +#ifdef __OPENJDK__ + InitCacheClass(Ljava_2Futil_2FHashMap_3B, "Ljava/util/HashMap;"); + InitCacheClass(Lsun_2Freflect_2Fannotation_2FAnnotationParser_3B, "Lsun/reflect/annotation/AnnotationParser;"); +#else // libcore + InitCacheClass(Ljava_2Flang_2Fref_2FFinalizerReference_3B, "Ljava/lang/ref/FinalizerReference;"); +#endif // __OPENJDK__ + InitCacheClass(Ljava_2Flang_2Fref_2FReference_3B, "Ljava/lang/ref/Reference;"); + + InitCacheClass(Ljava_2Flang_2FInteger_24IntegerCache_3B, "Ljava/lang/Integer$IntegerCache;"); + InitCacheClass(Ljava_2Flang_2FByte_24ByteCache_3B, "Ljava/lang/Byte$ByteCache;"); + InitCacheClass(Ljava_2Flang_2FShort_24ShortCache_3B, "Ljava/lang/Short$ShortCache;"); + InitCacheClass(Ljava_2Flang_2FCharacter_24CharacterCache_3B, "Ljava/lang/Character$CharacterCache;"); + InitCacheClass(Ljava_2Flang_2FLong_24LongCache_3B, "Ljava/lang/Long$LongCache;"); + + InitArrayInterfaceVector(); + // keep order with maple::Primitive::Type + primitiveArrayClass[maple::Primitive::kNot] = nullptr; + primitiveArrayClass[maple::Primitive::kBoolean] = primitiveClassAZ; + primitiveArrayClass[maple::Primitive::kByte] = primitiveClassAB; + primitiveArrayClass[maple::Primitive::kChar] = primitiveClassAC; + primitiveArrayClass[maple::Primitive::kShort] = primitiveClassAS; + primitiveArrayClass[maple::Primitive::kInt] = primitiveClassAI; + primitiveArrayClass[maple::Primitive::kLong] = primitiveClassAJ; + primitiveArrayClass[maple::Primitive::kFloat] = primitiveClassAF; + primitiveArrayClass[maple::Primitive::kDouble] = primitiveClassAD; +}; + +void WellKnown::InitCacheFieldMetas() { + InitCacheFieldMeta(Ljava_2Flang_2FBoolean_3B_TRUE, *Ljava_2Flang_2FBoolean_3B, "TRUE"); + InitCacheFieldMeta(Ljava_2Flang_2FBoolean_3B_FALSE, *Ljava_2Flang_2FBoolean_3B, "FALSE"); + InitCacheFieldMeta(Ljava_2Flang_2FByte_24ByteCache_3B_cache, *Ljava_2Flang_2FByte_24ByteCache_3B, "cache"); + InitCacheFieldMeta(Ljava_2Flang_2FShort_24ShortCache_3B_cache, *Ljava_2Flang_2FShort_24ShortCache_3B, "cache"); + InitCacheFieldMeta(Ljava_2Flang_2FCharacter_24CharacterCache_3B_cache, + *Ljava_2Flang_2FCharacter_24CharacterCache_3B, "cache"); + InitCacheFieldMeta(Ljava_2Flang_2FLong_24LongCache_3B_cache, *Ljava_2Flang_2FLong_24LongCache_3B, "cache"); + InitCacheFieldMeta(Ljava_2Flang_2FInteger_24IntegerCache_3B_cache, + *Ljava_2Flang_2FInteger_24IntegerCache_3B, "cache"); + InitCacheFieldMeta(Ljava_2Flang_2FInteger_24IntegerCache_3B_low, + *Ljava_2Flang_2FInteger_24IntegerCache_3B, "low"); + InitCacheFieldMeta(Ljava_2Flang_2FInteger_24IntegerCache_3B_high, + *Ljava_2Flang_2FInteger_24IntegerCache_3B, "high"); +} + +MethodMeta *WellKnown::GetStringFactoryConstructor(const MethodMeta &stringConstructor) { + struct StringInitMap { + const char *initSignature; + const char *stringFacName; + const char *stringFacSignature; + MethodMeta *stringFactoryConstructor; + }; + StringInitMap initMap[] = { + { "()V", "newEmptyString", "()Ljava/lang/String;", nullptr }, + { "([B)V", "newStringFromBytes", "([B)Ljava/lang/String;", nullptr }, + { "([BI)V", "newStringFromBytes", "([BI)Ljava/lang/String;", nullptr }, + { "([BII)V", "newStringFromBytes", "([BII)Ljava/lang/String;", nullptr }, + { "([BIII)V", "newStringFromBytes", "([BIII)Ljava/lang/String;", nullptr }, + { "([BIILjava/lang/String;)V", "newStringFromBytes", "([BIILjava/lang/String;)Ljava/lang/String;", nullptr }, + { "([BLjava/lang/String;)V", "newStringFromBytes", "([BLjava/lang/String;)Ljava/lang/String;", nullptr }, + { "([BIILjava/nio/charset/Charset;)V", "newStringFromBytes", + "([BIILjava/nio/charset/Charset;)Ljava/lang/String;", nullptr }, + { "([BLjava/nio/charset/Charset;)V", "newStringFromBytes", + "([BLjava/nio/charset/Charset;)Ljava/lang/String;", nullptr }, + { "([C)V", "newStringFromChars", "([C)Ljava/lang/String;", nullptr }, + { "([CII)V", "newStringFromChars", "([CII)Ljava/lang/String;", nullptr }, + { "(II[C)V", "newStringFromChars", "(II[C)Ljava/lang/String;", nullptr }, + { "(Ljava/lang/String;)V", "newStringFromString", "(Ljava/lang/String;)Ljava/lang/String;", nullptr }, + { "(Ljava/lang/StringBuffer;)V", "newStringFromStringBuffer", + "(Ljava/lang/StringBuffer;)Ljava/lang/String;", nullptr }, + { "([III)V", "newStringFromCodePoints", "([III)Ljava/lang/String;", nullptr }, + { "(Ljava/lang/StringBuilder;)V", "newStringFromStringBuilder", + "(Ljava/lang/StringBuilder;)Ljava/lang/String;", nullptr } + }; + MClass *classObj = stringConstructor.GetDeclaringClass(); + CHECK(classObj->IsStringClass()) << "must String Class." << maple::endl; + CHECK(stringConstructor.IsConstructor()) << "must String Constructor." << maple::endl; + const char *signature = stringConstructor.GetSignature(); + constexpr uint32_t length = sizeof(initMap) / sizeof(struct StringInitMap); + uint32_t index = 0; + for (; index < length; ++index) { + if (strcmp(signature, initMap[index].initSignature) == 0) { + break; + } + } + CHECK(index < length) << "Not find String constructor from StringFactory" << maple::endl; + MethodMeta *stringFactoryCtor = initMap[index].stringFactoryConstructor; + if (stringFactoryCtor != nullptr) { + return stringFactoryCtor; + } + MethodMeta *ctor = WellKnown::GetMClassStringFactory()->GetDeclaredMethod(initMap[index].stringFacName, + initMap[index].stringFacSignature); + initMap[index].stringFactoryConstructor = ctor; + return ctor; +} + +void MRT_BootstrapWellKnown(void) { + WellKnown::InitCacheClasses(); + WellKnown::InitCacheMethodAddrs(); + WellKnown::InitCacheFieldOffsets(); + WellKnown::InitCacheFieldMetas(); +} + +jclass MRT_GetPrimitiveClassVoid(void) { + return WellKnown::GetMClassV()->AsJclass(); +} + +jclass MRT_GetClassClass(void) { + return WellKnown::GetMClassClass()->AsJclass(); +} + +jclass MRT_GetClassObject(void) { + return WellKnown::GetMClassObject()->AsJclass(); +} + +jclass MRT_GetClassString(void) { + return WellKnown::GetMClassString()->AsJclass(); +} + +jclass MRT_GetPrimitiveArrayClassJboolean(void) { + return WellKnown::GetMClassAZ()->AsJclass(); +} + +jclass MRT_GetPrimitiveArrayClassJbyte(void) { + return WellKnown::GetMClassAB()->AsJclass(); +} + +jclass MRT_GetPrimitiveArrayClassJchar(void) { + return WellKnown::GetMClassAC()->AsJclass(); +} + +jclass MRT_GetPrimitiveArrayClassJdouble(void) { + return WellKnown::GetMClassAD()->AsJclass(); +} + +jclass MRT_GetPrimitiveArrayClassJfloat(void) { + return WellKnown::GetMClassAF()->AsJclass(); +} + +jclass MRT_GetPrimitiveArrayClassJint(void) { + return WellKnown::GetMClassAI()->AsJclass(); +} + +jclass MRT_GetPrimitiveArrayClassJlong(void) { + return WellKnown::GetMClassAJ()->AsJclass(); +} + +jclass MRT_GetPrimitiveArrayClassJshort(void) { + return WellKnown::GetMClassAS()->AsJclass(); +} + +jclass MRT_GetPrimitiveClassJboolean(void) { + return WellKnown::GetMClassZ()->AsJclass(); +} + +jclass MRT_GetPrimitiveClassJbyte(void) { + return WellKnown::GetMClassB()->AsJclass(); +} + +jclass MRT_GetPrimitiveClassJchar(void) { + return WellKnown::GetMClassC()->AsJclass(); +} + +jclass MRT_GetPrimitiveClassJdouble(void) { + return WellKnown::GetMClassD()->AsJclass(); +} + +jclass MRT_GetPrimitiveClassJfloat(void) { + return WellKnown::GetMClassF()->AsJclass(); +} + +jclass MRT_GetPrimitiveClassJint(void) { + return WellKnown::GetMClassI()->AsJclass(); +} + +jclass MRT_GetPrimitiveClassJlong(void) { + return WellKnown::GetMClassJ()->AsJclass(); +} + +jclass MRT_GetPrimitiveClassJshort(void) { + return WellKnown::GetMClassS()->AsJclass(); +} +} // namespace maplert diff --git a/src/mrt/maplert/src/mstring.cpp b/src/mrt/maplert/src/mstring.cpp new file mode 100644 index 0000000000..7c86bccd25 --- /dev/null +++ b/src/mrt/maplert/src/mstring.cpp @@ -0,0 +1,153 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mstring.h" +#include "mstring_inline.h" +#include "mrt_string.h" +#include "chelper.h" + +namespace maplert { +std::string MString::GetChars() const { + uint32_t length = GetLength(); + uint8_t *src = GetContentsPtr(); + + // Prepare result string + std::string res; + res.reserve(length); + + if (IsCompress()) { + // If it is compressed, just copy the content. + for (uint32_t i = 0; i < length; ++i) { + // IsCompress() implies each character is within 1 <= ch <= 0x7f + res.push_back(static_cast(src[i])); // safe to cast + } + __MRT_ASSERT(res.length() == length, "Length is different after copying"); + } else if (IsCompressChars(reinterpret_cast(src), length)) { + // If the string itself is not compressed, but every character is compressible, we compress on the fly. + for (uint32_t i = 0; i < length; ++i) { + // IsCompressChars() implies each character is within 1 <= ch <= 0x7f + res.push_back(static_cast(src[i * sizeof(uint16_t)])); // For little endian. Safe to cast. + } + __MRT_ASSERT(res.length() == length, "Length is different after conversion"); + } else { + // It contains non-ASCII characters. We need to handle it with care. + std::u16string str16(reinterpret_cast(src)); + // Note: The last parameter of UTF16ToUTF8 is called "isBigEndian", but it really means whether the endianness of + // each str16 character needs to be swapped. We do not need to swap endianness, thus we pass `true`. + uint32_t ret = namemangler::UTF16ToUTF8(res, str16, 0, true); + CHECK(ret <= str16.length()) << "namemangler::UTF16ToUTF8 in GetChars() fail" << maple::endl; + } + return res; +} + +MString *MString::Intern() { + return GetOrInsertStringPool(*this); +} + +MString *MString::InternUtf8(const std::string &str) { + uint32_t length = static_cast(str.length()); + MString *strObj = NewStringObject(reinterpret_cast(str.c_str()), length); + if (UNLIKELY(strObj == nullptr)) { + return nullptr; + } + MString *internedString = strObj->Intern(); + RC_LOCAL_DEC_REF(strObj); + return internedString; +} + +MString *MString::InternUtf16(const std::string &str) { + MString *strObj = NewStringObjectFromUtf16(str.c_str()); + if (UNLIKELY(strObj == nullptr)) { + return nullptr; + } + MString *internedString = strObj->Intern(); + RC_LOCAL_DEC_REF(strObj); + return internedString; +} + +MString *MString::InternUtf(const std::string &str) { + uint32_t length = static_cast(str.length()); + bool isCompress = IsCompressChars(reinterpret_cast(str.c_str()), length); + return isCompress ? InternUtf8(str) : InternUtf16(str); +} + +MString *MString::NewStringObjectFromUtf8(const std::string &str) { + uint32_t length = static_cast(str.length()); + MString *strObj = NewStringObject(reinterpret_cast(str.c_str()), length); + return strObj; +} + +MString *MString::NewStringObjectFromUtf16(const std::string &str) { + std::u16string str16; + (void)namemangler::UTF8ToUTF16(str16, str, 0, true); + MString *jStr = NewStringObject( + reinterpret_cast(str16.c_str()), static_cast(str16.length())); + return jStr; +} + +MString *MString::NewStringObjectFromUtf(const std::string &str) { + uint32_t length = static_cast(str.length()); + bool isCompress = IsCompressChars(reinterpret_cast(str.c_str()), length); + return isCompress ? NewStringObjectFromUtf8(str) : NewStringObjectFromUtf16(str); +} + +MString *MString::NewConstEmptyStringObject() { + static MString *constEmptyString = nullptr; + if (constEmptyString == nullptr) { + MString *emptyStr = NewEmptyStringObject(); + constEmptyString = emptyStr->Intern(); + RC_LOCAL_DEC_REF(emptyStr); + } + return constEmptyString; +} + +bool MString::Equals(const MString &src) const { + if (&src == this) { + return true; + } + uint32_t lenA = this->count; + uint32_t lenB = src.count; + bool aIscompress = (lenA & 0x1u) == 0x1u; + bool bIscompress = (lenB & 0x1u) == 0x1u; + lenA = lenA >> 1; + lenB = lenB >> 1; + if (lenA != lenB) { + return false; + } + // at here hash code should already been the same + const uint8_t *srcA = this->content; + const uint8_t *srcB = src.content; + if (aIscompress && bIscompress) { + return (memcmp(srcA, srcB, lenA) == 0); + } else if (!aIscompress && !bIscompress) { + return (memcmp(srcA, srcB, lenA * sizeof(uint16_t)) == 0); + } else { + const uint8_t *compressChars = nullptr; + const uint16_t *uncompressChars = nullptr; + if (aIscompress) { + compressChars = srcA; + uncompressChars = reinterpret_cast(srcB); + } else { + compressChars = srcB; + uncompressChars = reinterpret_cast(srcA); + } + for (uint32_t i = 0; i < lenA; ++i) { + if (compressChars[i] != uncompressChars[i]) { + return false; + } + } + return true; + } +} +} // namespace maplert -- Gitee From d0ea48fefdd34ca547e9a8fe007db7418c4f62b8 Mon Sep 17 00:00:00 2001 From: binaryfz Date: Tue, 1 Dec 2020 10:36:58 +0800 Subject: [PATCH 5/9] update mapleall lcn header,add dex2mpl --- src/mapleall/BUILD.gn | 10 +++++----- src/mapleall/bin/dex2mpl | Bin 0 -> 3791984 bytes src/mapleall/bin/dex2mpl_android | Bin 0 -> 3810792 bytes src/mapleall/bin/java2jar | 10 +++++----- src/mapleall/bin/jbc2mpl | Bin 3177024 -> 3177024 bytes src/mapleall/huawei_secure_c/BUILD.gn | 10 +++++----- .../huawei_secure_c/include/securec.h | 10 +++++----- .../huawei_secure_c/include/securectype.h | 10 +++++----- src/mapleall/huawei_secure_c/src/fscanf_s.c | 10 +++++----- src/mapleall/huawei_secure_c/src/fwscanf_s.c | 10 +++++----- src/mapleall/huawei_secure_c/src/gets_s.c | 10 +++++----- src/mapleall/huawei_secure_c/src/memcpy_s.c | 10 +++++----- src/mapleall/huawei_secure_c/src/memmove_s.c | 10 +++++----- src/mapleall/huawei_secure_c/src/memset_s.c | 10 +++++----- src/mapleall/huawei_secure_c/src/scanf_s.c | 10 +++++----- src/mapleall/huawei_secure_c/src/secinput.h | 10 +++++----- .../huawei_secure_c/src/securecutil.c | 10 +++++----- .../huawei_secure_c/src/securecutil.h | 10 +++++----- .../huawei_secure_c/src/secureinput_a.c | 10 +++++----- .../huawei_secure_c/src/secureinput_w.c | 10 +++++----- .../huawei_secure_c/src/secureprintoutput.h | 10 +++++----- .../huawei_secure_c/src/secureprintoutput_a.c | 10 +++++----- .../huawei_secure_c/src/secureprintoutput_w.c | 10 +++++----- src/mapleall/huawei_secure_c/src/snprintf_s.c | 10 +++++----- src/mapleall/huawei_secure_c/src/sprintf_s.c | 10 +++++----- src/mapleall/huawei_secure_c/src/sscanf_s.c | 10 +++++----- src/mapleall/huawei_secure_c/src/strcat_s.c | 10 +++++----- src/mapleall/huawei_secure_c/src/strcpy_s.c | 10 +++++----- src/mapleall/huawei_secure_c/src/strncat_s.c | 10 +++++----- src/mapleall/huawei_secure_c/src/strncpy_s.c | 10 +++++----- src/mapleall/huawei_secure_c/src/strtok_s.c | 10 +++++----- src/mapleall/huawei_secure_c/src/swprintf_s.c | 10 +++++----- src/mapleall/huawei_secure_c/src/swscanf_s.c | 10 +++++----- src/mapleall/huawei_secure_c/src/vfscanf_s.c | 10 +++++----- src/mapleall/huawei_secure_c/src/vfwscanf_s.c | 10 +++++----- src/mapleall/huawei_secure_c/src/vscanf_s.c | 10 +++++----- .../huawei_secure_c/src/vsnprintf_s.c | 10 +++++----- src/mapleall/huawei_secure_c/src/vsprintf_s.c | 10 +++++----- src/mapleall/huawei_secure_c/src/vsscanf_s.c | 10 +++++----- .../huawei_secure_c/src/vswprintf_s.c | 10 +++++----- src/mapleall/huawei_secure_c/src/vswscanf_s.c | 10 +++++----- src/mapleall/huawei_secure_c/src/vwscanf_s.c | 10 +++++----- src/mapleall/huawei_secure_c/src/wcscat_s.c | 10 +++++----- src/mapleall/huawei_secure_c/src/wcscpy_s.c | 10 +++++----- src/mapleall/huawei_secure_c/src/wcsncat_s.c | 10 +++++----- src/mapleall/huawei_secure_c/src/wcsncpy_s.c | 10 +++++----- src/mapleall/huawei_secure_c/src/wcstok_s.c | 10 +++++----- src/mapleall/huawei_secure_c/src/wmemcpy_s.c | 10 +++++----- src/mapleall/huawei_secure_c/src/wmemmove_s.c | 10 +++++----- src/mapleall/huawei_secure_c/src/wscanf_s.c | 10 +++++----- src/mapleall/maple_be/BUILD.gn | 11 ++++++----- src/mapleall/maple_be/include/ad/mad.h | 16 ++++++++-------- .../maple_be/include/be/aarch64/aarch64_rt.h | 10 +++++----- src/mapleall/maple_be/include/be/bbt.h | 10 +++++----- src/mapleall/maple_be/include/be/becommon.h | 10 +++++----- .../maple_be/include/be/common_utils.h | 10 +++++----- src/mapleall/maple_be/include/be/lower.h | 10 +++++----- src/mapleall/maple_be/include/be/rt.h | 10 +++++----- .../maple_be/include/be/switch_lowerer.h | 10 +++++----- src/mapleall/maple_be/include/be/try_catch.h | 10 +++++----- .../maple_be/include/cg/aarch64/aarch64_abi.h | 10 +++++----- .../include/cg/aarch64/aarch64_args.h | 10 +++++----- .../maple_be/include/cg/aarch64/aarch64_cg.h | 10 +++++----- .../include/cg/aarch64/aarch64_cgfunc.h | 10 +++++----- .../include/cg/aarch64/aarch64_color_ra.h | 10 +++++----- .../include/cg/aarch64/aarch64_dependence.h | 10 +++++----- .../maple_be/include/cg/aarch64/aarch64_ebo.h | 10 +++++----- .../include/cg/aarch64/aarch64_emitter.h | 10 +++++----- .../cg/aarch64/aarch64_fixshortbranch.h | 10 +++++----- .../include/cg/aarch64/aarch64_global.h | 10 +++++----- .../maple_be/include/cg/aarch64/aarch64_ico.h | 10 +++++----- .../include/cg/aarch64/aarch64_immediate.h | 10 +++++----- .../include/cg/aarch64/aarch64_insn.h | 10 +++++----- .../maple_be/include/cg/aarch64/aarch64_isa.h | 10 +++++----- .../include/cg/aarch64/aarch64_live.h | 10 +++++----- .../include/cg/aarch64/aarch64_memlayout.h | 10 +++++----- .../cg/aarch64/aarch64_offset_adjust.h | 10 +++++----- .../include/cg/aarch64/aarch64_operand.h | 10 +++++----- .../cg/aarch64/aarch64_optimize_common.h | 10 +++++----- .../include/cg/aarch64/aarch64_peep.h | 10 +++++----- .../include/cg/aarch64/aarch64_proepilog.h | 10 +++++----- .../include/cg/aarch64/aarch64_reaching.h | 10 +++++----- .../include/cg/aarch64/aarch64_reg_alloc.h | 10 +++++----- .../include/cg/aarch64/aarch64_schedule.h | 10 +++++----- .../include/cg/aarch64/aarch64_strldr.h | 10 +++++----- .../include/cg/aarch64/aarch64_yieldpoint.h | 10 +++++----- .../maple_be/include/cg/aarch64/mpl_atomic.h | 10 +++++----- src/mapleall/maple_be/include/cg/args.h | 10 +++++----- src/mapleall/maple_be/include/cg/asm_emit.h | 10 +++++----- src/mapleall/maple_be/include/cg/asm_info.h | 10 +++++----- src/mapleall/maple_be/include/cg/cfgo.h | 10 +++++----- src/mapleall/maple_be/include/cg/cfi.h | 10 +++++----- src/mapleall/maple_be/include/cg/cg.h | 10 +++++----- src/mapleall/maple_be/include/cg/cg_cfg.h | 10 +++++----- src/mapleall/maple_be/include/cg/cg_option.h | 10 +++++----- src/mapleall/maple_be/include/cg/cg_phase.h | 10 +++++----- .../maple_be/include/cg/cg_phasemanager.h | 10 +++++----- src/mapleall/maple_be/include/cg/cgbb.h | 10 +++++----- src/mapleall/maple_be/include/cg/cgfunc.h | 10 +++++----- src/mapleall/maple_be/include/cg/datainfo.h | 10 +++++----- src/mapleall/maple_be/include/cg/dependence.h | 10 +++++----- src/mapleall/maple_be/include/cg/deps.h | 10 +++++----- src/mapleall/maple_be/include/cg/ebo.h | 10 +++++----- src/mapleall/maple_be/include/cg/eh_func.h | 10 +++++----- src/mapleall/maple_be/include/cg/emit.h | 10 +++++----- src/mapleall/maple_be/include/cg/global.h | 10 +++++----- src/mapleall/maple_be/include/cg/ico.h | 10 +++++----- src/mapleall/maple_be/include/cg/insn.h | 10 +++++----- src/mapleall/maple_be/include/cg/isa.h | 10 +++++----- .../maple_be/include/cg/label_creation.h | 10 +++++----- src/mapleall/maple_be/include/cg/live.h | 10 +++++----- src/mapleall/maple_be/include/cg/loop.h | 10 +++++----- src/mapleall/maple_be/include/cg/lsda.h | 10 +++++----- src/mapleall/maple_be/include/cg/memlayout.h | 10 +++++----- .../maple_be/include/cg/offset_adjust.h | 10 +++++----- src/mapleall/maple_be/include/cg/operand.h | 10 +++++----- .../maple_be/include/cg/optimize_common.h | 10 +++++----- src/mapleall/maple_be/include/cg/peep.h | 10 +++++----- src/mapleall/maple_be/include/cg/pressure.h | 10 +++++----- src/mapleall/maple_be/include/cg/proepilog.h | 10 +++++----- src/mapleall/maple_be/include/cg/reaching.h | 10 +++++----- src/mapleall/maple_be/include/cg/reg_alloc.h | 10 +++++----- src/mapleall/maple_be/include/cg/schedule.h | 10 +++++----- src/mapleall/maple_be/include/cg/strldr.h | 10 +++++----- src/mapleall/maple_be/include/cg/yieldpoint.h | 10 +++++----- src/mapleall/maple_be/mdgen/gendef.py | 10 +++++----- .../maple_be/mdgen/include/mdgenerator.h | 10 +++++----- src/mapleall/maple_be/mdgen/include/mdlexer.h | 10 +++++----- .../maple_be/mdgen/include/mdparser.h | 10 +++++----- .../maple_be/mdgen/include/mdrecord.h | 10 +++++----- .../maple_be/mdgen/include/mdtokens.h | 10 +++++----- .../maple_be/mdgen/src/mdgenerator.cpp | 10 +++++----- src/mapleall/maple_be/mdgen/src/mdlexer.cpp | 10 +++++----- src/mapleall/maple_be/mdgen/src/mdmain.cpp | 10 +++++----- src/mapleall/maple_be/mdgen/src/mdparser.cpp | 10 +++++----- src/mapleall/maple_be/mdgen/src/mdrecord.cpp | 10 +++++----- src/mapleall/maple_be/src/ad/mad.cpp | 10 +++++----- src/mapleall/maple_be/src/be/bbt.cpp | 10 +++++----- src/mapleall/maple_be/src/be/becommon.cpp | 10 +++++----- src/mapleall/maple_be/src/be/lower.cpp | 10 +++++----- src/mapleall/maple_be/src/be/rt.cpp | 10 +++++----- .../maple_be/src/be/switch_lowerer.cpp | 10 +++++----- .../maple_be/src/be/trycatchblockslower.cpp | 10 +++++----- .../maple_be/src/cg/aarch64/aarch64_abi.cpp | 10 +++++----- .../maple_be/src/cg/aarch64/aarch64_args.cpp | 10 +++++----- .../maple_be/src/cg/aarch64/aarch64_cg.cpp | 10 +++++----- .../src/cg/aarch64/aarch64_cgfunc.cpp | 10 +++++----- .../src/cg/aarch64/aarch64_color_ra.cpp | 10 +++++----- .../src/cg/aarch64/aarch64_dependence.cpp | 10 +++++----- .../maple_be/src/cg/aarch64/aarch64_ebo.cpp | 10 +++++----- .../src/cg/aarch64/aarch64_emitter.cpp | 10 +++++----- .../src/cg/aarch64/aarch64_fixshortbranch.cpp | 10 +++++----- .../src/cg/aarch64/aarch64_global.cpp | 10 +++++----- .../maple_be/src/cg/aarch64/aarch64_ico.cpp | 10 +++++----- .../src/cg/aarch64/aarch64_immediate.cpp | 10 +++++----- .../maple_be/src/cg/aarch64/aarch64_insn.cpp | 10 +++++----- .../maple_be/src/cg/aarch64/aarch64_isa.cpp | 10 +++++----- .../maple_be/src/cg/aarch64/aarch64_live.cpp | 10 +++++----- .../src/cg/aarch64/aarch64_memlayout.cpp | 10 +++++----- .../src/cg/aarch64/aarch64_offset_adjust.cpp | 10 +++++----- .../src/cg/aarch64/aarch64_operand.cpp | 10 +++++----- .../cg/aarch64/aarch64_optimize_common.cpp | 10 +++++----- .../maple_be/src/cg/aarch64/aarch64_peep.cpp | 10 +++++----- .../src/cg/aarch64/aarch64_proepilog.cpp | 10 +++++----- .../src/cg/aarch64/aarch64_reaching.cpp | 10 +++++----- .../src/cg/aarch64/aarch64_reg_alloc.cpp | 10 +++++----- .../src/cg/aarch64/aarch64_schedule.cpp | 10 +++++----- .../src/cg/aarch64/aarch64_strldr.cpp | 10 +++++----- .../src/cg/aarch64/aarch64_yieldpoint.cpp | 10 +++++----- .../maple_be/src/cg/aarch64/mpl_atomic.cpp | 10 +++++----- src/mapleall/maple_be/src/cg/args.cpp | 10 +++++----- src/mapleall/maple_be/src/cg/cfgo.cpp | 10 +++++----- src/mapleall/maple_be/src/cg/cfi.cpp | 10 +++++----- src/mapleall/maple_be/src/cg/cg.cpp | 10 +++++----- src/mapleall/maple_be/src/cg/cg_cfg.cpp | 10 +++++----- src/mapleall/maple_be/src/cg/cg_option.cpp | 10 +++++----- .../maple_be/src/cg/cg_phasemanager.cpp | 10 +++++----- src/mapleall/maple_be/src/cg/cgbb.cpp | 10 +++++----- src/mapleall/maple_be/src/cg/cgfunc.cpp | 10 +++++----- src/mapleall/maple_be/src/cg/ebo.cpp | 10 +++++----- src/mapleall/maple_be/src/cg/eh_func.cpp | 10 +++++----- src/mapleall/maple_be/src/cg/emit.cpp | 10 +++++----- src/mapleall/maple_be/src/cg/global.cpp | 10 +++++----- src/mapleall/maple_be/src/cg/ico.cpp | 10 +++++----- .../maple_be/src/cg/label_creation.cpp | 10 +++++----- src/mapleall/maple_be/src/cg/live.cpp | 10 +++++----- src/mapleall/maple_be/src/cg/loop.cpp | 10 +++++----- src/mapleall/maple_be/src/cg/memlayout.cpp | 10 +++++----- .../maple_be/src/cg/offset_adjust.cpp | 10 +++++----- .../maple_be/src/cg/optimize_common.cpp | 10 +++++----- src/mapleall/maple_be/src/cg/peep.cpp | 10 +++++----- src/mapleall/maple_be/src/cg/pressure.cpp | 10 +++++----- src/mapleall/maple_be/src/cg/proepilog.cpp | 10 +++++----- src/mapleall/maple_be/src/cg/reaching.cpp | 10 +++++----- src/mapleall/maple_be/src/cg/schedule.cpp | 10 +++++----- src/mapleall/maple_be/src/cg/script/genmop.py | 10 +++++----- src/mapleall/maple_be/src/cg/strldr.cpp | 10 +++++----- src/mapleall/maple_be/src/cg/yieldpoint.cpp | 10 +++++----- src/mapleall/maple_driver/BUILD.gn | 11 ++++++----- src/mapleall/maple_driver/include/compiler.h | 10 +++++----- .../maple_driver/include/compiler_factory.h | 10 +++++----- .../maple_driver/include/compiler_selector.h | 10 +++++----- .../include/driver_option_common.h | 10 +++++----- .../maple_driver/include/driver_runner.h | 10 +++++----- .../maple_driver/include/file_utils.h | 10 +++++----- .../maple_driver/include/jbc2mpl_option.h | 10 +++++----- .../maple_driver/include/mpl_options.h | 10 +++++----- .../maple_driver/include/option_descriptor.h | 10 +++++----- .../maple_driver/include/option_parser.h | 10 +++++----- src/mapleall/maple_driver/include/safe_exe.h | 10 +++++----- src/mapleall/maple_driver/src/compiler.cpp | 10 +++++----- .../maple_driver/src/compiler_factory.cpp | 10 +++++----- .../maple_driver/src/compiler_selector.cpp | 10 +++++----- .../maple_driver/src/driver_option_common.cpp | 10 +++++----- .../maple_driver/src/driver_runner.cpp | 10 +++++----- src/mapleall/maple_driver/src/file_utils.cpp | 10 +++++----- .../maple_driver/src/jbc2mpl_compiler.cpp | 10 +++++----- src/mapleall/maple_driver/src/maple.cpp | 10 +++++----- .../maple_driver/src/maple_comb_compiler.cpp | 10 +++++----- src/mapleall/maple_driver/src/mpl_options.cpp | 10 +++++----- .../maple_driver/src/mplcg_compiler.cpp | 10 +++++----- .../maple_driver/src/option_parser.cpp | 10 +++++----- src/mapleall/maple_ipa/BUILD.gn | 10 +++++----- src/mapleall/maple_ipa/include/call_graph.h | 10 +++++----- src/mapleall/maple_ipa/include/clone.h | 10 +++++----- .../maple_ipa/include/interleaved_manager.h | 10 +++++----- src/mapleall/maple_ipa/include/module_phase.h | 10 +++++----- .../maple_ipa/include/module_phase_manager.h | 10 +++++----- src/mapleall/maple_ipa/include/retype.h | 10 +++++----- src/mapleall/maple_ipa/src/call_graph.cpp | 10 +++++----- src/mapleall/maple_ipa/src/clone.cpp | 10 +++++----- .../maple_ipa/src/interleaved_manager.cpp | 10 +++++----- .../maple_ipa/src/module_phase_manager.cpp | 13 +++++++------ src/mapleall/maple_ipa/src/retype.cpp | 10 +++++----- src/mapleall/maple_ir/BUILD.gn | 10 +++++----- src/mapleall/maple_ir/include/bin_mir_file.h | 10 +++++----- .../maple_ir/include/bin_mpl_export.h | 10 +++++----- .../maple_ir/include/bin_mpl_import.h | 10 +++++----- src/mapleall/maple_ir/include/bin_mplt.h | 10 +++++----- .../maple_ir/include/cfg_primitive_types.h | 10 +++++----- src/mapleall/maple_ir/include/global_tables.h | 10 +++++----- src/mapleall/maple_ir/include/intrinsic_op.h | 10 +++++----- src/mapleall/maple_ir/include/intrinsics.h | 10 +++++----- src/mapleall/maple_ir/include/java_eh_lower.h | 10 +++++----- src/mapleall/maple_ir/include/lexer.h | 10 +++++----- .../maple_ir/include/metadata_layout.h | 14 +++++++------- src/mapleall/maple_ir/include/mir_builder.h | 10 +++++----- src/mapleall/maple_ir/include/mir_config.h | 10 +++++----- src/mapleall/maple_ir/include/mir_const.h | 10 +++++----- src/mapleall/maple_ir/include/mir_function.h | 10 +++++----- src/mapleall/maple_ir/include/mir_lower.h | 10 +++++----- src/mapleall/maple_ir/include/mir_module.h | 10 +++++----- src/mapleall/maple_ir/include/mir_nodes.h | 10 +++++----- src/mapleall/maple_ir/include/mir_parser.h | 10 +++++----- src/mapleall/maple_ir/include/mir_pragma.h | 10 +++++----- src/mapleall/maple_ir/include/mir_preg.h | 10 +++++----- src/mapleall/maple_ir/include/mir_symbol.h | 10 +++++----- .../maple_ir/include/mir_symbol_builder.h | 14 +++++++------- src/mapleall/maple_ir/include/mir_type.h | 10 +++++----- src/mapleall/maple_ir/include/opcode_info.h | 10 +++++----- src/mapleall/maple_ir/include/opcodes.h | 10 +++++----- src/mapleall/maple_ir/include/option.h | 10 +++++----- src/mapleall/maple_ir/include/parser_opt.h | 10 +++++----- src/mapleall/maple_ir/include/prim_types.h | 10 +++++----- src/mapleall/maple_ir/include/printing.h | 10 +++++----- src/mapleall/maple_ir/include/tokens.h | 10 +++++----- src/mapleall/maple_ir/include/types_def.h | 10 +++++----- src/mapleall/maple_ir/src/bin_mpl_export.cpp | 10 +++++----- src/mapleall/maple_ir/src/bin_mpl_import.cpp | 10 +++++----- src/mapleall/maple_ir/src/driver.cpp | 10 +++++----- src/mapleall/maple_ir/src/global_tables.cpp | 10 +++++----- src/mapleall/maple_ir/src/intrinsics.cpp | 10 +++++----- src/mapleall/maple_ir/src/lexer.cpp | 10 +++++----- src/mapleall/maple_ir/src/mir_builder.cpp | 10 +++++----- src/mapleall/maple_ir/src/mir_const.cpp | 10 +++++----- src/mapleall/maple_ir/src/mir_function.cpp | 10 +++++----- src/mapleall/maple_ir/src/mir_lower.cpp | 10 +++++----- src/mapleall/maple_ir/src/mir_module.cpp | 10 +++++----- src/mapleall/maple_ir/src/mir_nodes.cpp | 10 +++++----- src/mapleall/maple_ir/src/mir_parser.cpp | 10 +++++----- src/mapleall/maple_ir/src/mir_pragma.cpp | 10 +++++----- src/mapleall/maple_ir/src/mir_symbol.cpp | 10 +++++----- .../maple_ir/src/mir_symbol_builder.cpp | 10 +++++----- src/mapleall/maple_ir/src/mir_type.cpp | 10 +++++----- src/mapleall/maple_ir/src/opcode_info.cpp | 10 +++++----- src/mapleall/maple_ir/src/option.cpp | 10 +++++----- src/mapleall/maple_ir/src/parser.cpp | 10 +++++----- src/mapleall/maple_ir/src/printing.cpp | 10 +++++----- src/mapleall/maple_me/BUILD.gn | 10 +++++----- .../maple_me/include/alias_analysis_table.h | 10 +++++----- src/mapleall/maple_me/include/alias_class.h | 10 +++++----- src/mapleall/maple_me/include/bb.h | 10 +++++----- src/mapleall/maple_me/include/dominance.h | 10 +++++----- src/mapleall/maple_me/include/dse.h | 10 +++++----- src/mapleall/maple_me/include/func_emit.h | 10 +++++----- src/mapleall/maple_me/include/hdse.h | 10 +++++----- src/mapleall/maple_me/include/irmap.h | 10 +++++----- src/mapleall/maple_me/include/me_abco.h | 10 +++++----- .../maple_me/include/me_alias_class.h | 10 +++++----- src/mapleall/maple_me/include/me_analyze_rc.h | 10 +++++----- .../maple_me/include/me_analyzector.h | 10 +++++----- src/mapleall/maple_me/include/me_bb_layout.h | 10 +++++----- src/mapleall/maple_me/include/me_builder.h | 14 +++++++------- src/mapleall/maple_me/include/me_bypath_eh.h | 10 +++++----- src/mapleall/maple_me/include/me_cfg.h | 10 +++++----- src/mapleall/maple_me/include/me_cfg_mst.h | 10 +++++----- src/mapleall/maple_me/include/me_cond_based.h | 10 +++++----- .../maple_me/include/me_cond_based_npc.h | 10 +++++----- .../maple_me/include/me_cond_based_rc.h | 10 +++++----- src/mapleall/maple_me/include/me_const.h | 10 +++++----- .../maple_me/include/me_critical_edge.h | 10 +++++----- .../maple_me/include/me_delegate_rc.h | 10 +++++----- src/mapleall/maple_me/include/me_dominance.h | 10 +++++----- src/mapleall/maple_me/include/me_dse.h | 10 +++++----- src/mapleall/maple_me/include/me_emit.h | 10 +++++----- src/mapleall/maple_me/include/me_function.h | 10 +++++----- src/mapleall/maple_me/include/me_hdse.h | 10 +++++----- .../maple_me/include/me_inequality_graph.h | 10 +++++----- src/mapleall/maple_me/include/me_ir.h | 10 +++++----- src/mapleall/maple_me/include/me_irmap.h | 10 +++++----- .../maple_me/include/me_loop_analysis.h | 10 +++++----- src/mapleall/maple_me/include/me_loop_canon.h | 10 +++++----- .../maple_me/include/me_lower_globals.h | 10 +++++----- .../maple_me/include/me_may2dassign.h | 10 +++++----- src/mapleall/maple_me/include/me_option.h | 10 +++++----- .../maple_me/include/me_pgo_instrument.h | 10 +++++----- src/mapleall/maple_me/include/me_phase.h | 10 +++++----- .../maple_me/include/me_phase_manager.h | 10 +++++----- .../maple_me/include/me_profile_gen.h | 10 +++++----- .../maple_me/include/me_profile_use.h | 10 +++++----- src/mapleall/maple_me/include/me_prop.h | 10 +++++----- .../maple_me/include/me_rc_lowering.h | 10 +++++----- .../maple_me/include/me_rename2preg.h | 10 +++++----- src/mapleall/maple_me/include/me_ssa.h | 10 +++++----- .../maple_me/include/me_ssa_devirtual.h | 10 +++++----- src/mapleall/maple_me/include/me_ssa_epre.h | 10 +++++----- src/mapleall/maple_me/include/me_ssa_lpre.h | 10 +++++----- src/mapleall/maple_me/include/me_ssa_tab.h | 10 +++++----- src/mapleall/maple_me/include/me_ssa_update.h | 10 +++++----- src/mapleall/maple_me/include/me_ssi.h | 10 +++++----- src/mapleall/maple_me/include/me_ssu_pre.h | 10 +++++----- src/mapleall/maple_me/include/me_stmt_pre.h | 10 +++++----- src/mapleall/maple_me/include/me_store_pre.h | 10 +++++----- src/mapleall/maple_me/include/occur.h | 10 +++++----- src/mapleall/maple_me/include/orig_symbol.h | 10 +++++----- src/mapleall/maple_me/include/preg_renamer.h | 10 +++++----- src/mapleall/maple_me/include/prop.h | 10 +++++----- src/mapleall/maple_me/include/ssa.h | 10 +++++----- src/mapleall/maple_me/include/ssa_devirtual.h | 10 +++++----- src/mapleall/maple_me/include/ssa_epre.h | 10 +++++----- src/mapleall/maple_me/include/ssa_mir_nodes.h | 10 +++++----- src/mapleall/maple_me/include/ssa_pre.h | 10 +++++----- src/mapleall/maple_me/include/ssa_tab.h | 10 +++++----- src/mapleall/maple_me/include/union_find.h | 10 +++++----- src/mapleall/maple_me/include/ver_symbol.h | 10 +++++----- .../maple_me/src/alias_analysis_table.cpp | 10 +++++----- src/mapleall/maple_me/src/alias_class.cpp | 10 +++++----- src/mapleall/maple_me/src/bb.cpp | 10 +++++----- src/mapleall/maple_me/src/dominance.cpp | 10 +++++----- src/mapleall/maple_me/src/dse.cpp | 10 +++++----- src/mapleall/maple_me/src/func_emit.cpp | 10 +++++----- src/mapleall/maple_me/src/hdse.cpp | 10 +++++----- src/mapleall/maple_me/src/irmap.cpp | 10 +++++----- src/mapleall/maple_me/src/irmap_emit.cpp | 10 +++++----- src/mapleall/maple_me/src/me_abco.cpp | 10 +++++----- src/mapleall/maple_me/src/me_alias_class.cpp | 10 +++++----- src/mapleall/maple_me/src/me_analyze_rc.cpp | 10 +++++----- src/mapleall/maple_me/src/me_analyzector.cpp | 10 +++++----- src/mapleall/maple_me/src/me_bb_layout.cpp | 10 +++++----- src/mapleall/maple_me/src/me_builder.cpp | 10 +++++----- src/mapleall/maple_me/src/me_bypath_eh.cpp | 10 +++++----- src/mapleall/maple_me/src/me_cfg.cpp | 10 +++++----- .../maple_me/src/me_cond_based_opt.cpp | 10 +++++----- .../maple_me/src/me_critical_edge.cpp | 10 +++++----- src/mapleall/maple_me/src/me_delegate_rc.cpp | 10 +++++----- src/mapleall/maple_me/src/me_dominance.cpp | 10 +++++----- src/mapleall/maple_me/src/me_dse.cpp | 10 +++++----- src/mapleall/maple_me/src/me_emit.cpp | 10 +++++----- src/mapleall/maple_me/src/me_function.cpp | 10 +++++----- src/mapleall/maple_me/src/me_hdse.cpp | 10 +++++----- .../maple_me/src/me_inequality_graph.cpp | 10 +++++----- src/mapleall/maple_me/src/me_ir.cpp | 10 +++++----- src/mapleall/maple_me/src/me_irmap.cpp | 10 +++++----- .../maple_me/src/me_loop_analysis.cpp | 10 +++++----- src/mapleall/maple_me/src/me_loop_canon.cpp | 10 +++++----- .../maple_me/src/me_lower_globals.cpp | 10 +++++----- src/mapleall/maple_me/src/me_may2dassign.cpp | 10 +++++----- src/mapleall/maple_me/src/me_option.cpp | 10 +++++----- .../maple_me/src/me_phase_manager.cpp | 10 +++++----- src/mapleall/maple_me/src/me_profile_gen.cpp | 10 +++++----- src/mapleall/maple_me/src/me_profile_use.cpp | 10 +++++----- src/mapleall/maple_me/src/me_prop.cpp | 10 +++++----- src/mapleall/maple_me/src/me_rc_lowering.cpp | 10 +++++----- src/mapleall/maple_me/src/me_rename2preg.cpp | 10 +++++----- src/mapleall/maple_me/src/me_ssa.cpp | 10 +++++----- .../maple_me/src/me_ssa_devirtual.cpp | 10 +++++----- src/mapleall/maple_me/src/me_ssa_epre.cpp | 10 +++++----- src/mapleall/maple_me/src/me_ssa_lpre.cpp | 10 +++++----- src/mapleall/maple_me/src/me_ssa_tab.cpp | 10 +++++----- src/mapleall/maple_me/src/me_ssa_update.cpp | 10 +++++----- src/mapleall/maple_me/src/me_ssi.cpp | 10 +++++----- src/mapleall/maple_me/src/me_ssu_pre.cpp | 10 +++++----- src/mapleall/maple_me/src/me_stmt_fre.cpp | 10 +++++----- src/mapleall/maple_me/src/me_stmt_pre.cpp | 10 +++++----- src/mapleall/maple_me/src/me_store_pre.cpp | 10 +++++----- src/mapleall/maple_me/src/occur.cpp | 10 +++++----- src/mapleall/maple_me/src/orig_symbol.cpp | 10 +++++----- src/mapleall/maple_me/src/preg_renamer.cpp | 10 +++++----- src/mapleall/maple_me/src/prop.cpp | 10 +++++----- src/mapleall/maple_me/src/ssa.cpp | 10 +++++----- src/mapleall/maple_me/src/ssa_devirtual.cpp | 10 +++++----- src/mapleall/maple_me/src/ssa_epre.cpp | 10 +++++----- src/mapleall/maple_me/src/ssa_mir_nodes.cpp | 10 +++++----- src/mapleall/maple_me/src/ssa_pre.cpp | 10 +++++----- src/mapleall/maple_me/src/ssa_tab.cpp | 10 +++++----- src/mapleall/maple_me/src/ver_symbol.cpp | 10 +++++----- src/mapleall/maple_phase/include/phase.h | 10 +++++----- src/mapleall/maple_phase/include/phase_impl.h | 10 +++++----- .../maple_phase/include/phase_manager.h | 10 +++++----- src/mapleall/maple_util/include/error_code.h | 10 +++++----- src/mapleall/maple_util/include/factory.h | 10 +++++----- src/mapleall/maple_util/include/file_layout.h | 14 +++++++------- src/mapleall/maple_util/include/itab_util.h | 14 +++++++------- .../maple_util/include/literalstrname.h | 10 +++++----- src/mapleall/maple_util/include/mpl_logging.h | 10 +++++----- src/mapleall/maple_util/include/mpl_number.h | 10 +++++----- .../maple_util/include/mpl_scheduler.h | 14 +++++++------- src/mapleall/maple_util/include/mpl_timer.h | 10 +++++----- src/mapleall/maple_util/include/muid.h | 14 +++++++------- src/mapleall/maple_util/include/namemangler.h | 10 +++++----- src/mapleall/maple_util/include/profile.h | 10 +++++----- .../maple_util/include/profile_type.h | 10 +++++----- src/mapleall/maple_util/include/ptr.h | 10 +++++----- .../maple_util/include/ptr_list_ref.h | 10 +++++----- src/mapleall/maple_util/include/safe_cast.h | 10 +++++----- src/mapleall/maple_util/include/safe_ptr.h | 10 +++++----- .../maple_util/include/string_utils.h | 10 +++++----- src/mapleall/maple_util/include/utils.h | 10 +++++----- .../maple_util/include/utils/iterator.h | 10 +++++----- src/mapleall/maple_util/include/utils/meta.h | 14 +++++++------- .../maple_util/include/utils/ref_vector.h | 10 +++++----- src/mapleall/maple_util/include/version.h | 14 +++++++------- src/mapleall/maple_util/src/profile.cpp | 10 +++++----- src/mapleall/mempool/include/maple_string.h | 10 +++++----- src/mapleall/mempool/include/mempool.h | 10 +++++----- .../mempool/include/mempool_allocator.h | 10 +++++----- src/mapleall/mpl2mpl/BUILD.gn | 10 +++++----- .../mpl2mpl/include/annotation_analysis.h | 10 +++++----- .../mpl2mpl/include/class_hierarchy.h | 10 +++++----- src/mapleall/mpl2mpl/include/class_init.h | 10 +++++----- src/mapleall/mpl2mpl/include/coderelayout.h | 10 +++++----- src/mapleall/mpl2mpl/include/constantfold.h | 10 +++++----- src/mapleall/mpl2mpl/include/gen_check_cast.h | 10 +++++----- .../mpl2mpl/include/java_intrn_lowering.h | 10 +++++----- .../mpl2mpl/include/muid_replacement.h | 10 +++++----- .../mpl2mpl/include/native_stub_func.h | 10 +++++----- .../mpl2mpl/include/reflection_analysis.h | 12 ++++++------ .../mpl2mpl/include/vtable_analysis.h | 10 +++++----- src/mapleall/mpl2mpl/include/vtable_impl.h | 10 +++++----- .../mpl2mpl/src/annotation_analysis.cpp | 10 +++++----- src/mapleall/mpl2mpl/src/class_hierarchy.cpp | 10 +++++----- src/mapleall/mpl2mpl/src/class_init.cpp | 10 +++++----- src/mapleall/mpl2mpl/src/coderelayout.cpp | 10 +++++----- src/mapleall/mpl2mpl/src/constantfold.cpp | 10 +++++----- src/mapleall/mpl2mpl/src/gen_check_cast.cpp | 10 +++++----- src/mapleall/mpl2mpl/src/java_eh_lower.cpp | 10 +++++----- .../mpl2mpl/src/java_intrn_lowering.cpp | 10 +++++----- src/mapleall/mpl2mpl/src/muid_replacement.cpp | 10 +++++----- src/mapleall/mpl2mpl/src/native_stub_func.cpp | 10 +++++----- .../mpl2mpl/src/reflection_analysis.cpp | 11 ++++++----- src/mapleall/mpl2mpl/src/vtable_analysis.cpp | 10 +++++----- src/mapleall/mpl2mpl/src/vtable_impl.cpp | 10 +++++----- 472 files changed, 2372 insertions(+), 2368 deletions(-) create mode 100755 src/mapleall/bin/dex2mpl create mode 100755 src/mapleall/bin/dex2mpl_android diff --git a/src/mapleall/BUILD.gn b/src/mapleall/BUILD.gn index 4b8a86442e..34e983a3a1 100644 --- a/src/mapleall/BUILD.gn +++ b/src/mapleall/BUILD.gn @@ -1,16 +1,16 @@ # # Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. # -# OpenArkCompiler is licensed under the Mulan PSL v1. -# You can use this software according to the terms and conditions of the Mulan PSL v1. -# You may obtain a copy of Mulan PSL v1 at: +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: # -# http://license.coscl.org.cn/MulanPSL +# http://license.coscl.org.cn/MulanPSL2 # # THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR # FIT FOR A PARTICULAR PURPOSE. -# See the Mulan PSL v1 for more details. +# See the Mulan PSL v2 for more details. # config("mapleallcompilecfg") { cflags_cc = [] diff --git a/src/mapleall/bin/dex2mpl b/src/mapleall/bin/dex2mpl new file mode 100755 index 0000000000000000000000000000000000000000..2ccee50cd7271fcb59458fc4fdea5d8e4e91caa5 GIT binary patch literal 3791984 zcmeEv2Vhji^Z!c-7>XE*h=O<^D%eP)S&tGVfDn_2STC34k{qO*$t4g}JP{QYG@>G6 zi`YdpVneY+Ma3RFU)wigLq&~<0*dhWb_CRe|hs!}I<~oyD*lBaF`YYcmcqIsh7jWS6_<&w^;ap+93K zUpsu#bN9-9^Jiblr$04?^hwWIUCIApy7i|K$2sY__ViWiW&Vsm=x~)$e+EBCbu4Re6-;g;UGSd%+YJQN=PL<{(N7Vrlw;ICQ0zp;RK z#CQ^p4~JX83jyyIM(YN`~(YlrUiVu1^iqK zc%=pWNelRQ7Vv~e!}D{v1^fgHc!~vlwgvni3wW&syxU{p={y1OaQ$ebMgI1WhsQJ4 z0)CkV{0J0^aG# z@OTDUz$aM1F9SRre{QjWudsl3ekwdYeE}a3M*k;RdVHWV|--XBX)yDAfUoGIQVaMp3;2B&@Fy(bZ&|?Kvw;7{0{&kM_%{~tpDf_6=S->B*fInpcf6)T|t_A#K3-}in@C_F5pDo~9Ea3lGz+>Ro z4JZGdEa3ZCz+XDWj1$>hQe5(a~ zpMAp9Io|?aWdXm{0{*N8`~wU4mlp6zapCE_^J>|-?Cv7gc&KZp+L-8V|XtG*kd zAZm>c-xjYVJ7gcjSj+I=+VKNxfxny4%c#C#qyqG2uu&gNO@rBBbKZMJj zEqJ<1xwo0hP43O~$6d|zbM3G;IfU{17WYsXh6w)4*9hO-HU&Ra;QI^yhfVlx0zdZp z(E3&$#CYt#GyX$`&f_sJAbxIL!t|i}>bl`qsqgS4^?Rh0dzj!KYr;QI+N&m;`A{Kv zj+Xq1ru-|wsQze|&$cjwbvw@7Bz!n7wA`VB=VTKe%0=zFVHvkUuPv@zhP=`mT`%JK-LN*WZjjk85(Y`Y_IWf#JRs2r#ZxsaNF&hSzS3SMYBI{?)$R z-+M`Yk5+dL;}qdTgNYB@I}84^nI4F-y3U>~_@^;^nb7m=B@B-%+F#|%lX8#0lHtY$ zq40I+UnGaNyV1dM4O4#&W17&j%|y?GmE13`-OijJC;2~EDCJ(v_%Y0>>yRpepT}^Z zRoBD+VR-!VL)C9um!)fHPv*}+Q$KPDJv~kEERlygk%#__#`y3+)?rH4=qxnRlP7c@Xo6=P!1ABAAyjT3 zxsJVVbzgy&gORez7&T@XD(0PpTXRV1p1Clv^)eS74cFA9S zo8Z55KSfAAlVjYxo#Amea=pG3_{YMBy6#-BlLcNRe15|upFbhcp}%pUvGyZwFDcfT z^P-enU{e@iPOB>l{tD91Ws5uTZw4D#7Vnn3q5a^2tUtY{nJYjohS5MVWQ^)^b5jYE9Gi=IBPoN zX?U62caGp$D&s=6XL(eGwwLb+fOy>vliqH-_8nNec6wl0VPOc1TqR)&tu9`BlcF`by?cyyQRZ3&vBo z=1_%ah_uU@YQBbkl*Rf-z{V3I&kf&(_UjiFKYJO2Oyf@czKlQZA#O*l-!4uT_+o}@ zJ=v){k2_mUdUf6RoWFVt+Z9?4YefHhO#N$WXVwFTlj)x*{O=_5lvPWaKfMM1^H+?& zrUn9ri>We<+fhwwuec&^uLp&14QNn;*Dc|kE88)m9=|iZ;RJ3ME&s2BZ)DHcnaVv8 z?N4~Bh5o6G#`y6`)|2tV|04w+S1b7+?x!GK1zw}((U2>tukP2m!q48Sugr4y- zPXt+-MK2PxUDQEfbZLhZL@9=V_Sg|J4(k3p{8z@~8Ih!N>iPajlE2K9|4#8^WZgJYm#vE#k4^G>#&Ul9KRm8!y;>^crtO(f{+}auRNTSb?!AR? z@q?vaB5zK?KdOxTZ<=ZRnlJR+Yoe#d!})7X{e6MxpIc1&=VHhYwPT&g|I+~*Idl~Vjtn>(aIj{Wvnd;ZLb$6aQ?dO++MnUkGPiM@z3!5 zD@qFZ=S3-Z2DkV&fuAaR#xVKi-Vl2)?Q!Otw!5x6m)oVm)Gk-axKl53tLxhv)gk`Z z3Lh?!cKO52aL-FDSK4mxAaYeD`UGO5uF2@g-7pXG+Zq%m*lF=a49}5zjS{=J+GO{hc01!Y#NUfB z19g2R_CvM!3ANm2OTErC)$2L&Q`MUMRE6jW)Q;7AaeWUK`MK#RhS$mb;&g$}EMmBA z5zF&(8K?I1F}ywp1|%+RM|Bl@&yfD8?b{F09I9VW=%Mmw3;yM&G2C}#DF5@nVR+gl zN2vTW$^N`r?0-Y>YdLuv;~U}koXPk#AGV*)@U(b_V^~zz@H~cBf5h!| zvy^+Cz&#^5|3sd< z)Oz(gre}@NGeGR9hIr=xgM$A88OIA`9Pcmi_djBK>K-{%K^_qJIPjC~Sf9*~G(Cq( zJ6w&xfX_Ti|+VNcevS zw^yOifA(bpFJU>+_SoW^8E(63Kb2F4>A@sZkaV0wK zGZ_D|qR0kk3I0o&&(90|pN$N!?vSW3KwQ-|`ya`FSCaaT;YnSGsJI2JgS)t2aIFpF zYoW7`iOwga-Rm}SxyLgaBQAsSH}qgSwV&iok^hGIoZlh#n=4!L&tmvQZP$G&^gqV^ zOZyFm0il{_y~&)>biSLw`Qv3CB*TmGs*DT2$vSnJ(Ai(+WpSqbib4M0h_vO7-drKGE=!1!!RH` z#`#>+MZ%x+B>$tP{69#0y(VxG2V-47##1f+a6Mihb`HbqpJ)EZ$OD(nWq4IG*XuT+ z^I|FY1e2csMaq58RPJR-jK}ig7- zLx=cjrP}>HlDg)ax{+{|lk#$1H(+S#C#(oSY?mo^Rswa-s7J6P+D} z&WeR$XJy(Unfjbzk{Zzx=V0iuE z165A^RM)l-7@qYTx1-jpbJ1am|FyZ?kF*{8$BzuJX`iT`ZxB3h|IGe}B__Xd7qKgh zrHp@?bof%(S7g7{iv6bT_S@i2QTi&D>1S<;IFmA#bB{@{{vzYGc|GwO>F@Vm6KcnP zBK&{bB%c?ezmq=C68V&7G#)vO=}9xK7oU6_!>d;@orXMcjI?{L*wuQTa-+~wU|Og6 zQs`M{qGvP|IPszSV8&19DbO_r=8ftX{TO~VgN;#8;1r+cF~uR=F6G9W_%I01*^nO# zKZ1cj9z*vb{52wnx?TP}N#HN@xI9swx=--0GOZ8(AoW^ks#hi$Lip=HWIS*esH?k- z`wdRkGiM0?hc*2sz5T1;Uv9#`R^+PABv%V&@wj%8$^Y}X@U6(Z5ald)8WR7fsK5K&aYlUzWESqUSe`Ww>!F>xU?5$J#=v z*D{8W5CPiqGwX*{ruoaOnog5^{v`gf<)(G6e_m$%wNKg<-}LynRK~k{)A+JN{E+ox z&!aii^^TOgWKpPoNDw@MwIul$gRgMEkbYyI%K#IyNi{?eP#8{Y{%^G)>pF7|4j zX}tJF^3O8)AGQg6tqK0R^gDC>Mx9#6>0`tRo#Ta`?V_L4^0{6Y3*6>nKGeU~mt(|UO54hK_7%N-$r#QbKaJro!T%PfC#rv4$NlRk zq35|AhI@(`{vrA&T~Tnv(fnnrslVL0ALqB%aDF|$A1vc(gUOD{%;)@RDV+a2F~}Y% zWjTyDjkA5w(Nuk(V7b+D(oyIcZ<6!A9>!B`vP;*Ay=s{7Z!M?%osDj$dB>wO8UENG zOyD?XgE3R&zuF}K$=W|~2h$1jOkH+NxQPC^{SwsgzXje?(@#N;621)<{DVyWeTCQ$wU=;z z94GzaSm8rj8S_E=*&1dDf4=5+(d!FKWxiHZ#dxMmxz`Wi{5I*oFu&EceJaD_XEFXS z1zz_T!;K|L3IesGu0f)g;!W#l-|xlwJ%4ez+W(W+Mewv|{9u>5?7uMFXOi<08P~E* z>uWtHbN=cJxxRY-`~3ynkGB2D{Ycc3am8}UFaBF?cik!aKP`>h3)NKDc#(%{nb&L= zxbJDkQ}fAz3ZloaxJ?YV$@(DI4*nPzLaINWzOTw(DSR6uvg8ck4@J5s!Zd4vgqfyYZ(uQA$1Ke zXZ<$cwC?}HRxUT~o+On|%Spbh8+uIkNiM{I_)rtgoH_er*@|XK6Q#10Q8QP%nNY3`6SrYc1pPthA}$;{?wv zv6E~5j92g<1>R2NzskhVXGCwzlJPN4J!Kd#3VejUvx9&zJdyrf0MGN#acPy28nLYHndZ zmx$cXFJn2(GL5I>u9tRMi8&rFZJ!@_7sKlcxJ6Ev3S9g?!+ohB7+1W&KhX6u(K$xw zX?UCI`9|dS6U}E+{x1YRNaiEDzNw42+_#p*<1kbOMf0Zf!r=W-8_j*YG<);i|IK+?eWXX8Vy%_!Id}{YThOLI}aD8~{ zb*njl^{1?d*Gst@v|YMC>ly9G`%UzKZ!W`iy!r_xjHmt)rX*MDTRxlhLxHTPGdB$5 z3t9KCl6C)BDfd*-xACHHPZ9Vw;X|z051Mb8C{F2y=$uNrs&3`{ z4J-Fkzw6as!$N$L@z=Ki`-A(Gd*3^ILIkFq$q+Yroop%YBYv0P_k)Dt2 zCwO|B`qyinIDb_q)>jt_osURAjT3*D?q8Q4!};SMWj8LQg2YBz`%tOI@y$7~U|Q+e^#C6N!dQ&{!I4O6D$xk~E@=JVcDB$|87dj8v zi{Wt+&-|poPf2CC?Ovu+^Ydg~FB5*R$W?<$uC^+@*TnR}Z^cjuO0MqV`fB^|>z5c_ z_qa_xul*Hn!86O`=XqS_o1Q(|dn~=W+GdFg${1tqIR-;%^x*dTE5@ zzYy)O#usTv-M?N=WO&+RY!B%9@W_sM=DE$1IYaVb0>GMzeJbo2$RAJ&=tt8ZS; z`D@%vzsA4)Ngj`k&zM4ON6nOa^_Ko?6Z$uaU6&|!9b+?$Ox%wPv;OkeO+x46-0unn z&*HlnZa3{`u|2@>wCUWB^mz2P_H)X9E^cMRm?Lx+nCN^M0|D_TK7n&ik$$%vgc7_? z)){sCp7biitKR3DE|c-=Oc~!dnCzBIq}*vHe@;63Daozz9P{&O!Sl}$#&4Inh4X|D zy%U7aYq%YuF4UEmCh)%)uKh5tO%ps9a=pHg{B_SWynggS3Znf**+PGB@%w=->RKT5 z-)o|O#qW%#_IL;$F5O;#&*c95r;Hb4)Looc0DFw;TPyu^tH52bGil#J)=C&2xU}Bb z@+;%78^!o%2!9I14-+STIh%UQFwV1>*Nn#kCADM29gHVl@HB{>>@(TP*GhfYne>Kx z9n<5xoAuA%g6H>b47cCS_%;6z(Q;_ApWRwcOm@gpDR+=bKb)S$_#1pZR8992JIb+y z;WaO_U393>b1)2eWjC7Q!FNF}zy*09rqkZ((@aGHx%hOI`O0{w1bye3`OOIvcZO9z8(vuiT&MH?P0P ziv1Hmg!!;q+T|g^zsjV4Y{G{+@n=f&8Xn=pT9e=4alsR7>W@hWbG;fQen9(M9+Y{m zeGbb{y3q5$J)GZm6Z7pEfuHkVhI=k#Ij@oW)}w)l&$dT6e}lj;8OQK?H{;ji=}e)2 zmWlo?{W*WFXS0){ud!FcpKW1aLNdKF@zuA`1;xbYanQw08ShQMVV z`9scboNzY7?f>EWrU{*kMIP!!9(qguSEQe&Nk7$k@`!sGk59(O(P*NdYjF`lYLOuycTVw=hOe6Y#>UncySCj8O;eZvimr~Vbz4_k!~x4zEs>JM#- z5`=GlT z1K;Uva6ZBoYjiR8mZ#{hPSGf%E0X>f`n@ae5u~fiuk-8!S?UDHK7u6%zXByO^-Kxn zhciN$qxZlqBFyuaLFL}O)+l>{r9>xawU8d^0 zudzvfd)4pm#^3V0r^?^K=yVYCr|KFqAbT1;<@a^!_g=<9_)QnwEe2c!#CQWsVQ4LR zU!omf2x$1mveN))=!)^-585g&#-3m&OUH*PNsP}P6@^DJDH&*^+xA#|jf*8#Q!CLG z2cXY5#IWJ}px6%d8D)zzVxnNKb=K1|z~X#>+36|R=5bX-kP#b?oazgjG7D2c!)_S1 z+M&svY#iSZ7aJ98h;2Svkc z1U4j*)i&CQH*B%dQ3HJT7^BK(_%6b2Tb!-Rm=|5ckM&1gK}l6yySP}Q-v{n>w2w9h z#Q3V)(@!E96hv1?<7*7WG6n_E#A2)se+;{N1b^cQ4RWI5sAkVW_&R8iVN}}y1Pu40 z?9pJPFG>|07i}|adPGh_%x< zAId@5FmdDC9~V;%hz$wY=&ubS;kVDeH76e6K(g=-8kI0MYpS&7j4u=QSGa$=~b1Zsa*%5a6sZltFmH@1hlXbZ;Kfe z9TgWHdy%g9XV55F@L@L$AN|6og4?L-Al440LYd>n#rC(^K`bg8;{!fS?2Vqp zyBNbBZORhMXL`x{f+A%W6V`Q`A_Vn;6I*^jvfpCjgL0QVmW&6y8wze;{M*U?g$hdJ)iRi z{sLjF5oI%|I~uXw$5Qp|23>LVH42i6?;UJ*{H2Lba$Hsi;Gs%f9F0n$&jLSf8&EP3 zYxo9Z2D|mSR}R7>bGu~4sYb7jHu|IJMgJWCdv2@|-9_;W7>%|6^!tU3!-GAeutZ=pfV3w*qKx@SxN0dNozi^dvDxv5y1$)na^u*j znlZqLYY?8q6Qg6|y5XBGcA$a={jsBoe84~*NReYF>Tz{4jPY^p@uk1|NmT|0pAEn4 z1nJh^7J~}LLe6@F zV6-v7_Hc~NhD@kJj8XefKRD0_7=y;+H#m;JV<5_Qd>W7el-LeHAN?HF4f&}qQAXzh zXgnKWU?@IgVz_dYi!M9;A6*sI-WSy_RuOHBCN?TOHk3=ui;3~Y+VF$$bR6ixUB05} zBZ~9J`S6slo%)RmA+ygn2)9XR#t~*8zTmPm#Xh1x##R+&tF{rCFCdJiei1|2z@(Vb zP`~)}0U_@BqW8sZx)j3<=t^}(QPV(g`bcO%k4l_T-N&Q2Km*;wAO}&RYxLNTwf$A4 zeVoHKcJARv8XfpOcr@8JQJ{1l5Y*CPvUcjmbfEydVH^^zs34FJ=m&vcP(kX7 zLsw}J?in;qu`Qy@T{V(nm-iBmEcYOQdg+zDN2A z=~tv}NPi*yjYK`2d~+RV56-Xc>>Z zMtTqF1Ef!p)+2q3^fOWe(yvIrBW*+a8>t<-4Mq|Jox|vkv@g>BNPUrPNCS}$M>+~A z9;xffw+{WH`s1RmJyJe;Y59O8+u8XiRV1|Ec;@{Js}8uQv|H`%t1?$rcRZS4U#86W zEuHh%rZZkSuze+dQ<`i{ul*_R#9O21U*6+9{Eiv(V=R5%z2)d_w(o4GUx4oqO#HGo ze(V9uW5%alNx#!xIC8}s?~GqtaNj21h4?-HtIWizZw9@+GXB>4d;W6V&^ds;R`XG> zcAwwX@0_@&ZytE<`nS*AzwD(s|GYP4+?&Z4tU9#vwd9@&&n`Irk3%j(-naJ295&$Z z>SNz{?}!^#-FEAY?x*j2>cxN`{`Ra--tClngl*w}9^LvuUAu-M?KjQ^eBdd$|BO7- zd()A#wmlcS>Fde!_CF(U6YkB!y%qOu8*SXT>5)T5eEw+d>UMuV_WkddeE;6hT?^_z zy>-|X_CDjvqU^|D9OwKF zeDeWYx=)Yf!SD3HtxM1Kx2FD+W+cygu+tTQACdp)!ErZsJ$u&T`)+Gk`r9oF>3O6# z57{>Qm~Nk!uT03l{_ho!?!6s&t^)kW!C_z8oqnp0J|zTdssBM$4k z{kf|SIcdhkG385&iuXT!_2kdapWkc1Bbk#fimi0jeV6T9X+LGn1wCIKbL#2Yvmbpg zapk<_o8z_}cirlBGbdjE&TaAY`h51~oO@3mmOJ|L#2>4Acc?q{yRHwW&pN*0)o~ZD z*?je_a}Ugp>b~gW=bqe9ye6yLk{_pj^2Axg9{qSW!RKF+)2p(}#=UR&$+_sW^ti7k z-}&mBi5FzPxZ&%UYL?Dl{@L{_wvX=nTCZcCI`q|?wKGO-MVYcG}R+Pn9sct*1Ku6> z?&H_}{mr7NZ#PZNTyvK54twrnm4g>{-!FRZ!t-BPfX}l#-kH5Pt@?zIOOLN#2psp@ zpO~@!^P|6gX~<*V(xajt_PzGW;;R8KOc}qn*Q&w8_MhN*X55m8yBU4=`efd`OAlHE z97o(S_oB%q(~c^*YT}fQw`?w+n1Ar0k+1kMFqZn{lW9^K4YP@80W9d-n$5tUL6fz1H6H zMS0%wcUANo``mz!AKcq9GoxS4r*E?c+=h2*JsHCvekR46+9~CS!>Z3c z=#Sn9KL6IjgNAIVPW>YNjj{Xu{NHW$lg4&Aq+-G?y$|{pxIXRq)T5WJ-_UjIdt2UL zwspmVLp_^T#a?m!mjv_H)%UD;b4u#v_+G!^hN8ni`29Khe%4uUFTVMLLF+f-`780e z%L|8Ydp7HkhkHNQYQF*+*NoD?!SD|_R({_3m<>~g>!G*xV^)tM-Mq}+k@#{JFLB^)4RQq z=UU|H`OaH?s)l{P?9PcVkNBfQQt`aw$_p0(ept7~4<=98ba_Ft(SG(1j}7dde%Fn^ zZ+x}m^|&|r{M9KdubUIs;ke~z&weV$`0&INE;#S_zER&?GI2`7{v-OvUDu`P`LmZ7 zPWWTk&kvM0yz7~}XrI0)??u4Q|DmY!3;4Vdc-}hg+*i-J`j~sxZkqSS8i8%<@k;rp z*|$A+`1Yk8Ujl6Infq<*bWxS#WqiL0&m?Al@}up9PZpo`(vKev08WD64EW%RB^{n> zH~EF_Qx_ce_z{DLecbifnf*2#p1M}<->^L=|Ne`IPw)BM(ASqtx%Y{Ic$V(nif14D zWk%Phs%9MYI(}zmtsj2#85dsD=dJyp*!bqQE(fd_f9xB8uRdb(65pnIx7A;2e}#T$ ztc)*y;BDj8vF<}^*3SOtzNRaGyE8k_#$mjAvD_#6Ab z+ip!(V%+_!o^OcVe#4pTzWn~J$1*rtU=+hoOl79!C@8bCxAN0R1{`$0WbzN5tOg=r%z7_Bf06#x}WuM32>5|{YcE=Ul z0PAwh4V`Ny?LG11>wmlJ5B&ZJdA@32y!WzCoFwVaKTzViJGFFja{bG6|3A3@z}?r3-Tv$!efo`CmAAS7abvnq z@}~Wbdw(9+<0;SDujBk~^p&1qi_OqjRt zrytHax%#&o@BitYdrCV4eubyQr*AAfcH6KozfGOB?}7P)lDckO<;acdJEQKQnrq|d zpWA=&-UCi~^4m=lSKqQOd;MKodq45n-gxfLZi}AnHTbq8>3fxH*~EGKT)uMhH?w{F z;P=3exAmKUYr$3Z-&8+Gzpwgc+t*LMI_b?DoImuQ_tH)21MZ#t+%dN&Y`JRw=9-)O z-?RPY>Bm2J)_nyt-p@6yt-Phfaeeaw1U72_A$c<+mwDIxp-speZ z$KS2K{i^N%TzU9!?R#zc`K^OiAN|7VpJ#q^|KXndj=kiqEhoM^=zyme_TM(z)4{eR zYr_*KJhfL<98@e8cboJwPqsEPY;KAh7PCZV<@9YbXPCWsij-@5HjJtOB zw?@yY7cKiOe^~k<%jR#K+4cNUxHtW!ZA<8L4AN}ZCeM;io$g$6d}{B}_&wmIpJsQy zAhz<-|1=zID@%Sf1+XJ+RbP+AXZxd1UGhLy_p`N^Q}w|2Z|#BvnbiFC5?^~YGBN}rRSyW)oa_({lGI*0Q;>ghPX5G|+wXtnis(;I$-wUpw>@;~_l_4wp5VUm{@BE2F4)+i{l-&qzhGNV(HV!OKUaFl)SDajiaEXd z%MtUp9(zvQkGTIc(g7Ft={WwWlU5$^%ZaDNCEW1|!Df9mZNdQisWDGoJ?!~k@O#3F zc`v42T7DgU{}pM$nU$YB@aH2>zkFzEHT|ycc=;o94xB&kH+=VuikfiFl`nj@8Q)Kg zc|Cp6lO^Mi@Benu{jT$p>keQ4V3+;hdM$nUhjHC6zv-6Wk>|jdPN|;WamUF zE&JfE@4xOfa`H)cZP|u=~L_vyQ)5YH0qtiS*HEBmY0pE3&C-@WLXYt}r{?(ES| zk6m@hJooIgH;$itTwKnUv&a4Z;k)mj@7|dZn0m!qi=y$n>#6Ns z2lnofyx@_A-P_^!+DnFBQVQJd@xAu555Jo9U^jbox2mHYhhLenE^6+V=S(WQt{nG` zhkrgLacSQhdUktY^S<@tDs5R`mvN`L zwQ2c(D^AQTe&g=KvOc$ZKHb`RYK(@GaL}>?kjDhP92-QKd)oRkbF{-g}gDJk+eOI;~x6LSbf%Q1{}oa_KW?)>7E z^obeCsC=YYa!OrJuPY_Js3J|GSytes)=!YX0E!cZ>5#Gi(C*tdcxr;DRk#lGP>abBDK6s%8pOrl#qfeAqlN6 zPddwIr4)gQ!`;PRSAHo-%qb}bBZ|GL*^{!7{r|73Cnlik?&306sn=0n>^`^L7*$+(veTPW;3^GO zfUtmEv-C&1S#-ujJnxJUj8CV<9IvK`FesQ@n^OA~P zGab&t!jc^B_S&9+45nrWREBliS=xmuXhi%c4MUoDc&-bUWC;eHG5A%H#+n1_N`@aP zIHH1v0VA3Nax>hKF-mow5JT1^9SOq@1s+lIh48=E$bL*d)5)u!xJx@=8jJ93B`%&eBSa&(T~Xnu=-$;o(YBgtw@mkvrzl z2+g6A!a~@q!8~eFQ9G{F&`EGn!AP z8U#l>&;nz3$e0!o(Q+C_5e;KnLPJ_B8b5Ni8l5U{q+6;y^kLi@TyY;rgyDt`)T##%eT^G$S+!EXokgZqmpY`>1*- zFJU;?>F?vlvOh^n@wUmTmsMgGq(p*P&NqKQDh6wFYzR{Lf zsz9%_as#OR;s~JclQ9TcxB^BwEL{QWR(Mwc(1mpcurR9@N1MlkIWnC|oq!>ihR=*C z#L^V{rKn1FOvW5JpyXj%1iT4qY8JkyLqvk+DgWbk7;cN;{EypV++3}e?N++E{%>l8 zCOu@(DIp#*QUvJb;#c`!57+KHOonK1R34?a8)Dee_3s*B#Ge^pNP=T5T;VR|;3vbx zKT?i_I}`Jr4A;3$+bhZlpb;}AVJHSKUUyL5%(9fUG*YNM$pZ7+vMe%=Xko~$)^uQe zcv57gBea-8m?(weX)DY~(qa)pnSP|nA~MPKtBkVLNk#D~6C%=1Y^J1)NlZ-#qb7)( zpduv&-b5Oz<28Lr<(S#gx>qjMG<7<;7HOqHtzLwzWx%_utswGBrv%n<5|SO`%8PTn zZmg}*at?gYqwxWR#m;>8BKaF1JdDV{Bo>UIOi3p{8W5?~J>|=S6u@lf5*;O2@^KP9 z(m8iFfutIF(BA_Ilu6B=P2e8OL2X6T`p_QP#G1f8nuB|^3=(mic3~4II>un3zhz#R zT_=Dorf!!?UyG>Q<)YVO>UJsbB6RreI?;=Sz&)CSyIcTU%E30#eZvDbV1ctZw-9#U zxa^!z7g@x%AFcMfiz`ZIAnpjuL;=%piiAVK1LikD+q8HkS-YorzufG;;&)T+x#k>` zld|h;uZ!P}wcm|xPepTrHPklDZ_$Xz5p27!>^5xm7S+3ro4mzkw_&|oIsmnClSfkg zZmfM9H+)NL-~7Ig#VHDc^ZUkf5&Gt-OjUXsJ|DTqz#WDUMC8`;4y!<*Errg(i74TQ z2xwBlFBIP7$Vhc$j2GWlo06-2UlEZ#ieec3q>omltJO@9PM=KrL`TMi2uMdfYrwZU zjD4#+bgWX$5SKbC;6_ErTm}Mmuz2c)JJ91Th0UenA4A-?SR@=bYKH}&ap1X~6f6da zvMoW_wS+Jy%8Wunmh@8X?xDe5hM+;$ZlfU?<}C#?<|2+9!J%y9dMiC}0~hC&un%)r zGX}0>73qpNt{i8fD;-jnFqGHbb>Mhe!iciUVy|-+{H@CQOC)39rUm7o_26Ai`Plf~ z)sO*~coI3&BheM)e-DWj*FJZ2C-SaccO_Y2w;?h)&(-K%Y{Nj zWr>ebM~;~z+p+Gz(%Tm3h6%2rxwNhnqLU(6Qld2&>}27nOj@hJGLH&FL_j7N5WHGr zkyGpRi1@@RG6maO{E-=0K7oeBRt?qnXz44ZBB&>|MR6lN<=96;m`?Lk-F^Pmx zFNlW07!=GCJT6#;4aK)Gyb%1PuvZn`lq>sb5H#k(HVaw6+if&CGl(@QBEUgp_}9gn zJ8(BGG}NoZi%hHR7sUROwKbf$YbmXCH>|YoJGEdIY^`+td-oDJcub9nsT_%F zj!P323tP(pGGPPqUEhwTg z6>;M!uvAD5;ak)DWGF{lQ`>mg7^YBW!f95ehMzE!x8p5i=}MEKzsRPsig03zBHB(& zgIc56^=sw6ICCW}^oe3FCJwR_le=Lla#W?YP^-FOkOQNH@lT>(oBGjrgW@ z5U4X#)@sut-jS*{G?9g4QaA{5_xEquHrm~UV5g8+v}W!HO;_8rGA8QI9j7hUbe2(fC&q@>-a zT9_?F0$H*tNYPA(TR%6^O{Xk`+6F-zxZSFw08I%e-UuPC?xRg^3YUiQd+79@?MAXdqXo+X&50vx1Aptw*87uo&=gc=Q+wWVKkV4j zxG7p(zt(UaWbI%peuP|ax(A|Ivw~^N2%MI}oxg~5N>P|jPfSN2;c$KYXrs0szsuS> zk}B?!=8k}{*0eZA4eKF=n>Vr{1ya@Ic@JU7N%;Boa1dVcP7gmu7V4)a78W*DA>70? znkv*bx@l(}a}q&)S|glbNH|^8aw?jZ#4wge5yU-6OrUQ}nkTH7KqKh}4=*M~;l(79 z(so)|P4cS9uch@_TZxX09A|N{tCTjFz%SckPpU@L(kKZDQBfyq6**_%KrTAQDQYIRlPBb!0{}4~k2XC9F22hfk+)U}?GtM_RR% z9zD#qlJCk>O6tHxWc~i$%d){~q&JbDUQh^vQg!Lx*Ulog=A0 zyhllD2tB)BQi&Y$5A89M3WC~LQh~9JB{dK=5xLMy3>{XhnE9kdQ(CQ?HCEbR-lj@l z5brTvsS!Q9w<~FK_L#0D1hrvT5{zxtl?WYr=1!1*!n#xBABguT`46FI_sc(#vq#B4 z2x?>b2gWv*|8PUeu)x_rdX2 zhEGe;l)5}PjXTm>@i`RgjARN3EOq7SNV%2~6of}L8A6y5lb$DMLa2R!=9zr6DoXlB z_*LjGb~z}NPoEvGkKb+vW6QjjAZWANSpH|@m@8P^(xDJr|s zTUx2SPmPFcJ{l4NdhJ0eZ3k!t9uh$uqmL8R^W0D-W^Of~AZ0-n!kbo%<6tTp%TCKM z<+{tJi&AR^O-kg@LvbQJotOjSiqsw|-tU@?qiuL87`vx2o;n(P6d@YZG94+?C|X}1 z$>r}2&99L)9cn<|&2-2FDP?!_LuOXHjt-^ENz*~II7pMToJ1~gYL+FVpL)57jL|_t zR}pa!+W~Ttq@99Xi^onw-b%`Y3`X6SVc2fAMVsAu6L*MT(Z7@M`=gg3Nq+TtBbl{v`K3;znooBpVqfb^Xk+3_8|el>D@(!XHTpH>D%1AGWfd2)3&OUm6NQ9UP?u;Ou+;# z$cAs`38`el=g^DllO9yc zoH7h=Vxzs@bZU4iO+z#Q`-n$7rtnV3*1xF+;yl8kURMJKWrpnd1p^~HN3?A)kp^o$ zCat5|22<;4VtRNU+B%vvrnaG}ZTLi3IE8Dw7M+))53Fg-q!yzI&!HBfv1U(;(S)Za z!kDHoJtPuLZ9|halUhnshzvnRLY0@}b(3nhYiiRco+x?2BN)5_2d&VN$<2HY=Nx!A z5}^zM4mhO6>&zFNyZV$3DmEi{&}x-NZapR~C9T60EG4bS)Ownjo`6PFyt#1y1TE6BF7$ntszR;9iJsV|IHgavInEu>xraI()LMU;dZ!rPZi*8QNfKc5 zM0~mzX4iUahni?(8;%Y=mgI0jAI^JK)zafwtQ7N=ws>EM=d26y34#OX0N&1 zU`UjyH-$Z|L3J}afFa&735eBMMJjucytTe)4sC3FnnufavbVNBV#7L)#A$jfH-hk* z+&_%_LEH(br3h-%8qozAdXFAl-GTQLX*Jr~*D2$WF51VdMpbwpVcqe&VuPHsn7sSn z^wo@jQVRZTEma@`BP3c82g&kkQ20Poa{pVsnO<$`hjSKK{M-7|;HdStD)CdFw-cF? zrNJ#|8zbw3#z#BJo5DyJb2mpn^m8<_l7q%M-mME265^W(8ulRPL2{!81AgNb(YvR- zoD;P0F%x_6t2zIboTjzVQWw49jE>n%!NyyQw->`={hxHwL_Z}sxKu14ifd0PD#Fpz zO}{8IazojhROa&1plmi!Y4C=XN&|dQuP4nYhvNnmcAHs@O%l_{m<&CsuGQzkovcUh zu;;;{Fuw+GwE_=jXlv@+N`pnti1Sz6Vgf;YZtBRBJ(D%NcuLW#W^9e62JY{aNk(fb zWPV3!EA5@O>Mld^vdS`-IJ!ukgqtpdpKa|DzyRJnhk!NTf=@iaoBSl;QDf#Vx;ya{D=c3P12tBySs>lfcCFQ(=t zxqP~XgUlp2RtS#GDr4nLZFTZRuDU6A!oQck8Ly>RlNoiSy*?dJ4f~Cbpc=*kgCXDA zVMM3nSEzHA&HY64#SNw;nC2)rZ_aOF(04G6PN@rX;0m8!M@PVgmr$#d>mn$U zGS@Ju8;W-_;-l+YJ$4iO5aHCcy8-jFrrMI+kRcsJn{sVg(bkM^pilcDf~~Gl^2ma1 zB+89+E*&BkI4v%m;E{U7D>WeBO(;m92~m*YITi^?t&|9LlHe{lzOI>Pc(t-N)aq!5 zo#jpo5}@YRr!`_6W_Qp(;peKLKU7t1eg&`zp=tr*ls*^PR0SevJnGN*dbE|9q9 z)@CP2oE{`2GJD91p|>jp2@!d5wa}{X7}v?M8%DjtX-an@P_4U+Xzp&sSmr9vk>v_{ zHxrSa64on#J9_v8TY`duBj&Hnb@2QSLxM*%3JD&GkYMeW(Jw!l4pR%7ggaT|b60m2 zZ<{u?z(=#Gh1Ual%RPm7oy2sSvuHZ$aO1}PhYsu23#=G7$hrh*2gd^=hq=%om7^z1 z=BZm!peVNe0SG7V>UnkeV^boJhj(aA$gHhdgfz`&l-5*@{jbgwnQ5eS*@+)?P%}9R z!*q47JGDSlb;rXytb1f6wZ%BFBTGv0!g`iOiyT@tpfiUZ)dKG-Y!Sm-Sv#?;_Tc)p zX50hYcQ|1%smA7sdDA@$#WvBi$gm5I2nlPxSoEYKnsGNVYzDh*u((KJT^z2FhZ6~K zWW;cH8D1Rdl5+x(V`vU-Dxt$9Xq9HBJJ(f=Q#)|pyV~Q33@!EDBqZj!v2CT$iADaB zJUUgONWBRx5>&$isESKkk8Zd^m**~3@dvHO#~!+{6&xBIBa>a8XB{S!1Ovm*YkyoC z4DK-godoh@7rPLG&Z}uy4dR!ql_Yt}y>j$k$m;`l9k+r13f8>OO(!dWXO)V*NOo0f~ zE-vn8%_3}Ug0RHfyzGfOJKCbzZ{SUfXaiW9vMRBq3!bzw4)`g#TFNaKk@2X_9~zfn z{@^1ea7tHrZ(rcP>}3q_ORLC;NRi@0j)V$%)#8qh+oI)2GP;8X(49h%;!;Y*?jnc! zsSf`OdLgAy)y9<<*KW#=Dyu^Gi23R8BCt@l1@D7oO}|bJIw567!N-mp;x1OeBN)j0jgXuV{IjADTSp!vfm&C>BcOvhH;6>>HnK96_dm#^sG^RWHe~S#3u%C=po96F z^pyr)chGbZuvc{vdUX*j{dXS87!M0&cO1#MX9n_K);R<%T}#3h4il zVo{cqSs9bSK%$I*PCSf6N(T}HP=X3zbYxe0U1ez#E0hue7`x(;i|kWv_vTP$bqJHU z)al0Im@p-A(3UoUCAYkZ8*4srndJN&I`MXZY>TR5!kV5z10Ec6IVfFy8;&4SVYR9N4hs%yw5KkYuWNG^oQO z&`NnF3S($4l9MvGMaBR_5}H2{b0Hu*@Cdq~)x;8X=Q`7S32Hdd^=#L!amVWcQSHbhGVv_}XyNO{O1kq~<~ zl$&CCez~(W*Wt`ADMgnIhUDOlAEnTx3X}1q1iVHJjp&Rp`rmK! zfmz`^3f6TYgR=l<;;3q%4xv50W#vn2m^)Yvjm9x|wnEu&cz{(>{=^)(P8jG8$FCEtlxof{WWXo>Dzb)KD^?i+!7j{RRe zr;6Zl5oMZ%@La|XJ%&l}EJ#X@H?@#j*sYhYgWDd1eMsx;ft*~NRthIsIXzL9MT2W? zThf$o3v&cgS!k7JWe<2z{7zOdT{XIYSW(jsH8cXN3^jpb1i-W42q%o{kYNA2u~QR(+BOckB-rLx<-PN=H$n4dvg8893**EFoUq z@}F~#=qSH+6>KBT3APH8mXg} zq``+O(?kuahJb1R{I+&Rj!#ZR!KxCBRjVGzbCxd7<# zZH5cS6l45@?TxZ>3QE-nhE4qPmJ}M^l5!8m0{)rHA9<>)1b1B3rL_aPq_8Z74AaRO z^o=(8qJ;wN3s}!kEkW?00WG2d6gFOw2gkU;9|Y?T=Ao`4{24e;CS?3(kZDjaa}}zZ zMWFG5%4UH<-AQ`IKc5|{)q*!@AzX)6?jgkfeh>(}!2Bo>UaI1TD27yJPq2zDO|T9w zRky;dKA?1<8y?s;MngJHr|6`Vz?mYvkkpFpKL4h=Hn~j)+zNi(#dQCPGO8KF#0moL zw`4-6G?pfea!dtnv{ReOb@&6vB*2T)>fPn!lAW;g!%KG}1E`+`?J7rzwU<_P&MkL2 zXlwb70%P?$GKQA?;&Q0bS+un3Epg0Z-5WfOG?{AFZ3nX~(~&mLora|!c&Fi!OM^A) zsBk)H?=_t>g}rj49XvPp6k5*;tof$q%W5hNZHFCPaO2agswBtAD|IPlUQDx}in3x4 zh7d37Y;&N6_SZz9@)E+Vxx!*}mpKcI%f*G=rt4f|63JbJ1rE2@jUjqA-sz||veQC} z3g1$EP~(o?N@C4D>!y=8Bm=ANY*YZ0G=1n-?C9_-K3aaK6q8CTGh`0aQcajwxJOiI zGxrn+1*DFq!!<{dTs09HO00pDk8|MoP;J)-1;a!{s%}Hz-a(TEHF(NStG5CtdZ76mbw+*$O1Y`Ba00!C>V?rt=d!l{~t%1 zG{wNvU{eeY9c+wQ$qHgiArz3)2!tZpN+^=8g+kp5FBCwxONHX!R`n)ZAIuAB{lt*g z4<8R>Wk*b>p*De51`_px24>A{PxxoA5m|%KNa_&j7M_=cs_7XdMlFn7uDL})GfC0Q8q?G1?0pF%l%2@5k zL=uV{{t{Pysk0EfGO(sx|)aHpZAlSRbUP;n&JQh#*>I44F!pYfgkYfNve!dL9R4Oc{~?vJ*Knyrp5o;b1C-My^E|6lLVNC<27mvD+e=8_TWXLf4fVNU89P zY_Rj#^s?$5hN0sX*^SqWP4N%@KnEyv^{(-ec#$r@YbzCMhZOxVtiv&=!qWjl;eoJGgquyw2=L+t&4Cc5oqqkj50(ximK&tMqCR zIf?O-vt$PC?a~9=?jUQIkUJ!wH?jTZ1N$T()t)Rp$y}0a~w?VEB#4pX7shMmBBnGq6<}EN!v9TtgD^a zI#@(fiZ94C)1!kM3Kb`#h8`Q>U#Z?r+KwD_Sid2Il17IFj-OP5i-l6^HO*JvLF?|j zsWu*u4lUDsjkpuGY^<~xOVpqayBzCPSVKjGcUgI%|DEf528p!vMSWbvfcx2=K^kP! zKN1IIYxlP3j{A!{k`A+bD<$1Vxgy8VESwH&fqkbZ^!|ao*)bV3XVB-oRHS99DHqml zRdiYs4~w!9(AtayAGI7dyWAb`#mphnpwtjn)W}fCA>BRBL5>Nmt*h|gP>q=Bfk zh)>5rOTjv-5+h)>z^jzw+M}rEQ8i$;aR^|hBPC&It~(F${Vq_<5w&OyYLGk+M;7JH zEX898{h5{P&UbqeBnqdGS~+VR0-rEU#lVkfDg@p-I7i~&5hITP_@KCW4uRK)HA1*e zt|wX)m(a;zFOmwQqX@X*kO8e@Cdw&+0ryg1u_my7N_0+nhu-aR))hN((k|(iZOfJ&v?AtNZ^hsDEoBj6t@J zFb<}=<-6_MrWJP9xt9^tr!`zf$Y?X1r-YWOOLw><+`r+Uj)aq~<7~AD zIuCDuG#@J$*y`Av&6t6;)#mLM53;TG5fZTTWGm(lY};hzA|mV9!nCEC{cxNvN2fd} z_GOgt2MkW0o;J0w6j^f%(<7!t4P0UFjl|*lAg9{sR_EX>uKnnnCo8TX@bMYr8EBt7sokM@(P;2}a zPO7H%meX(?bn?^KGP7OVOqb3uc$8`Kr?(>qEVanwKUCTw=s}}&$euXeJyc+6aOkY+ z18oyDd$Lu~?5?aE?+j^lel^uQ{Q1?m6V`Ozg+!w>gzVA_+Mt(?hT~qnTSTQv_W;Gm z7~X^`WeGVnXs&B6BWp=7`3 zYb&5ldC8j}{$0G>Ezz|>^7n7#S(B57reCtyrta{(CsUhWs$+kORs^_0vg+?YRCw`+iKb{ji zD$3Z3bmSDwphI+u)q!z2-jX7$b~)5Z{f=xpqsIS>j@JOGx7jb z=Q}uajiPLC$;=!+kf$)$C@)s`*hrPLm-V6rod%}E0dd5z7oq%E%h=(8*acYaQK4ga z)s$6WpEDO{nWI z3e|DckRw1icJb<=QN@*xT&LHGxf?k^sLA;mddC6wZ>U3z$P)lBfxn0X99ao+QWoEsk_TfGoK}f)GzZD}m2Rnejv;>Dk6_(^XRC}Ve@^DUw z(`)3E=70?d4kT|77Q^!puu`nP;h@Gsg2-n|ajyQFiWn#CHC3k-4ofE1sZ*o_zw={oMDs}k)`cF76i_7g7OkR9~kI&XgNxOp#(c&iWC$1@F+)F0ZtWkC`v$b zp7L_3$19ZQtE3objX>b#+z}=5kOpwy-*KAhN=QfwdaZ;1jT7!NgnZ`62`7*yI_1O> z(lpq$ljqJyXd9|+c?U;i;tdF<`;e_NI-n)Yqw17H57j)(O(~I?7`8}ttI0CMJsc=B z++CV?nkwKetqcQ|gRX+b7gCHfZd4;`F#RAPNjDv18NySn2}5(3fZ1i?$jbJHg_h^} z`;0D;N#A@#D{%BB9SC6}5YkbO*px~~cuE7e!xcvT8pj%kyX9{e zQ+X9yD+Je&5oieMb*gs&g}#G5Ql>k|OTDWkFV|TK-7>>9sU#nI^E4H1!%;TDkkA9q zv?yrwRH!pIcbvNrD+cN1;8oZ=8S2EjkbAtjVq9rSk>yR0F$pLBhI!1Pjw1>#iqYY9 zCo@B`E7pa>0~JK)zmCQ!zonJPF#7*7cJJ}7+;zSGN2nSvMaQVhst zpeQL77^q4>uqiDxMQ8(UAzZ|uq7hI=`KbZHGb&0r9%WF_2p$+zBpyHpMhu zjDp7iKICNO`+l#z*QpgXM>pCV_t(Y# zo_E7_`_GBpZ+&>&-hWKq(Vx!j_wMM?d;jhIeKOqj4hNI{2kh-1^bg-TdPF$=NZ;F! zJbC(I>~JqXj1lf^NB?v3jH8oYdHv+tqp#OLeiGgA%)^y?!!wUQ(e}n`_y7OsA>iZ{ zyZ`S2UA6zz%&V_H!*#L$-jnuG`qJMyTK>_rk3N=nUw!$LufN(My!deMR3aaC_}tx) z&OI7ibC&jR-UBxirO+5hJ0nyXIx1<`4G>X%GM|9t1@!xx`=#WRiqq8&K( zQ)mAEsh{6}AanRM?^8c=Ose{Uqu&aicJjkRfB1((H$St!c|69xA4i{P?1%aNXPRF5 z_=!GJ@$gZ{$17E(e*1Lzsl)%?Kb)WOH2X;%eO|@o``m@kv)(`Lc-qll6Yalo_oHu| z`nY6$CyeRx{eMCF@a#wDJynfwT%}K>J=&Z7^RE3M`grN14?uhT4d3epUA+JAJYB#4 z9H0FYtRKAg=|}(Jr{iy(=|}f7JpR6Z^b9|F_?^P(XFd9}vTLq?*o{x+51;wGC!2rO z+t>KkG?$ho&B?kC+t5s{aSxa^U>XM zf8Cy8VTTb8-+uh5=^yZDrpLDbWY5E`X+PH^zxqkn?LWeO{~?^mm6H8GWVzlaJ^qO4 z(}uyk^jX|T58-EA+R>BU8?PRpHj(2X@*CG5e=_HOg4gdq0sNXLTyy2Io%VsGemuKw z|A(i?-`@xA_-FP{Gxay9hnwmX_W!M{Qv>avlU;TB;KK&v*t$IJ|(GBLF$NO=3X>@pKP%S&vM@{ z^?^s9^t%5dJo>cP{n3qwFDLIW`~Q5yCkJAT+ zO-{AT>7_2b)RmXI^itPedR)AZ_I|6xMBxL_} zs^g<4D|^@UG&@aBbJNr`GfhnM(zG-yO-ggplr$qvNb}KjG#gDO%q2`E%p^=C%p*)A z%py!8%ppu6%pgo4oIjjCoIRX8oI9L4oH?BMaNf(pc3u`X^Rlp&mxYbIENtUtC$F?K zU)eV=I>VKXqO)AtH?KO=m3{N7GhNxJI{ivG^|w*`D4eYO5?A)k3(s<8qwq^y*{J-I zS2k)-U(Z>t?8|gz@4p@*v;TUC$o}gg9{aC{`}bwOvXB0UDs^QaiE4Fa@8yu!{ntZc z_g@d0-G4o#cK`K|+x^!=a`#^k+1-CVq<8<-r#K{de|*UB{_7#d`>&d0xU%=hp1>i^ z`{P5N_g@c*-hVx0djB<~I$YV8rJ9{4r@3irnwci1d1+djl_sS*X-bD{>#|f6(tI=> z%|??6a|xXaGYJz3^9a)jvj~$2a|lxiGYAt1=MSe3XAdV2=MJY1XAUPmocH8^Ddy<89plk0_vS;L>8d?h^`#%{Q2YPBB97jh z#PguLKYe_~ssB>$nO}9Dtl9sy;{J8r(c8c9<7a}W`OVJv!1(y@9Zo(WJM}kpXP(6V zkN=K#d*ACbfB)3LXZil|&!1Yrsn^b@*%|6ijeV-_)JyTx?9|U5H{-OQJ!QxL6^>I+ zpHI)tafgp`bJ@*$C_VE!_03b0G8~`%2=-5i&;H~4Z=U?|BRKuXkN(3)hu<=s{`aCs zxS#6zhiA_FPXOOPGuxl(40^_&Ji5-0YWMHF-E4^cU#I^6(wh&w|H;F^M_+F~#Qv?N zoBhb%&R7V%`OiK5Oh0!syxGqkF3Hp0q&e-{*X!PX zYaXZ9&F5Wz-Fb2Jpzpur0T*9>!Fd;)chT|xzVN&YqJ7=|9po$Ta+jmBdxUQdhnHV? z-rZ0C-{;-)#L@ZZpFBKSd%n&PhB@#2{>kE#LtUV$srZ8Pql<^b`#<24%g;aW!u?eD z(|dB5+5UfBedU#xUw?G8KmElQ@8=jFegDeSX1-tGh2d;R|Les6)*SbK>VKVosxRl? z{b;ObhS6KmJ)@)lYaRZ76qWVY9sXAv9{qjr4m{rm@6!M1>ks4f;Mo_0$MDH_v=8-t zcy>wf1YSQb_y9h8>o*<7N#Vt}-1^`{c=g2KBY5+vZ$8wI;qBG89DD*#-x_=h@BW|Q zGx+E+ox?bDcy`;{96W<#D|mXJ+aAU*;oV0D zU&HIa3|_%U?+(6!&)y%rhF9m_?r`2MJiT4;2HxBw_zs>uBY5;>Cv)EZ+2AdB_P4>? z@ams~ci{P_f_LF%{9h0A@4@4H2an;|2ZQ(F$&Y{QVVneBye9YnUUk3iP@mGjFZdAN zy;Jv4KY~a14nBr=pB#JwFaA9E6h8Tj;4}E_ox$hu;(ft0`0S?O3wZjK-yR+PUk;z# zEBF#V`=i?*>I?YfO~F_6HwQ1_@ni0A7-tP1{ble9-u_td4Se>8-+36Ph9{dl9()V0 z-gT#gH}LxX!FTZ4IlV)Dbc>Vuj~^Di1<&6RyiFf|*I}Fvyn1W!EJJdeNbN11~PW$H8lO^;O?}@EyE;%aG$pU;VtL7>4H+EqFW*?w@mg zx(MTRP+$D>mmkjCgC{zU;oT>M@%!-jiNXDIJnsPYZ_#;&@QIE`@aRXw_+xnWZ{Z(SeFJy>4u0p)hxtce zaq_PK+b zkKteWAId-6^;7smR6m2eehz=J>NB|O7x2GTeGdQ7FNAq6;lFjY9)IC?yD9hz{`Nl% z%l&h{A9~*hsK3uSUvYR`Na6qEt3#iM@bi~p{v-H*c7l)Lzp8O2 z@O!-~j6a3n`MlsW`0d{od=CHh9}J$s`x<8f|Hv>KSRC8 z^LhQa+(-TW)XxO&e)_A&@xG*}pJ*KS$@M<(2=y;h{TS}^P2shUXYkpN==n1|ds}#) zErS>G1-z8!@cJQPoFzP$7x2-ahx!#fmzVJ7zTy3xH9Wds@CrV8dl-KMpUK@P_J51| z*Pa`$qXxdbb-0eAuR3|Z?YDIdZ|}nUZ*6#2-hsE1P~U}j{cs>mE z6L=w?!sADU`WZZt&*7y!gIDq#zE-Z5@Iu#10k7pNc=6~k&k|nB*YHZdfwwfD8eXfP zTX-XH=o)_qkA67Jv-Pznb6e}YZFr*k4!n|g;pt<-_&xYY-iPmW-UQzKNEl}TpIsh2 zg-4^{BY688!aB$F&xQI4Je5!3v&V({89b9`@Se`QfM*&%hgb3?Ji9WSw}9vJ5}xS1 zYk2$fVVnw{$~W-n@u9wkx8)6dsPpdN`4hr8(SJFaw`3f=1s}=V@QJ(wpUJ!MOx}YR z@)%yq`|wJhz-xI5pQsN*c=435-Vr>#A@~?Rkx$^6dzJe$65D4ne&Of z1)s^=@Lb-37xFH=lK0@XJcdWp(1!#*S8fOJO!tQrp38^u`UPS95j=Wf@G-nCpTN8F zDLj_X;E8+=PvseWBwxTM@*F;sFX5TIfamfRypWghQoe>)@(NzdH}FPY!=s-I{olgd z@&?|O@8GdK((^&TkCDh*@KoM`clEg6g=aq&{?4ZdkMz7NhUY&Y*42lX@&PAtG-qSb*+~cg^uhKXr zysvTAaF0{L->h*q@PWpu;T~rT-)Ni$KGZlnxW|dKpU&rJG)@aX);NA$<$90PLH(`1 zBU~3<_*CQc;2tN2#~P;(pKBby?%{j}sDGHoN#P5PGlY9SBluG^&KSPbIDQ?#`Akv& zVvRF{uQbja?)hZ!|Dkaf@U_Ot;T~rRf4#;j;2Vvzg8TPPC475_unr#zzBANs z;I6OXw^jWX?)nCPzUp^y*GF2P_v=#Cx8SaC!=I@74&3!!_)PUZxa(v1OI6>8yFP)x zPW1!0>r?n&seTA|{RsXs)sNw>pTKW%$FRRsxa()|+pB&KcYOxGzv>ro*XQsnRKJ9~ zzJULf>Q`{rm+)7rehqhh1%H$3H*nY2@OP`ef&1ro@LbQUqi;O<`m}sbc)s0&x8Kn} zJi}<8#XD#5?peHd7LU*3{j+#-79X6&)3f;SEIvAmkI&+hv-tEZK0Axg&*IrxJcoC` zDeUVKp1$M`hkY#I@hgI_;LWGPzauE&$wz~);pIKUzZa%0|wrN5)tz$b4Ef7emN3;7n_$QyX{CjC1g_(=7U&hPmbyE`7P(-u7Y zhA>VWUdlW0k^U~F3!lh)@LV3l3wZ+f-=7TN?pq4)YQ00a|ITLw_uu)9;qKc6?!Hao z?%NFRzRltOJD&{hzw=qZ{dYb&+<)ieK6!tAJr=0{rvDc14=cFuCnfyOs$au>z74$Y zg!@bl@BVAZ)fS$~8~ALiJj2UR29Lh^Cblir! zZyosTQ_3woSKoT@O!YCml=tC{Jb`yV9nL#|rymHO!n;lIA^l&1d)@Av?@MFU|A6{7 zfxGWB_(sQbxciyGW957SFFzFKnbVcqCEPhI;LhO+?i`lz(cg#juHlL1S;2ihdfn`2 zjru35pIf;5xr3*=zeW1$@!iiBJk>ng@J#FKzOH-2KeqBh7OOk3SmvQ@|&xU%@kZ33or&@KW^^JXb$A@LKgX-2L>r+0O>`|Dk^F z;O=MZ+>?18>9`GdKRfV7^X$Si^|J?$J{$TR!xMQQ?tUh4-+u;l^)rQM8h;3PKfP}D zbBy|1$KiF;1nz#$;2Ry!;ko9M!J~f*{aL`%e+-_(OF!R$`+i=)-Om-={Vd^K-DlSD zk>**!-A}KZ{j5zdd+#+mm^2RNsPooDO`Y<1T#kyLUdk zPtt=&Zw?;AC#vtmXYvGIzQ1=Ee*n)^pTcwb5T5;Bn9m5F%g69SK7p6=DZG-;;I(`X zZ{!&~dUcrR0^XMA@UDCbkL3kCk+0x>953Ok^TTz$hNlk=dD_6Iw+mjwQ=NAUcTO7k zNcB7TL>}opzK)!e7QFqTke@c(IqAR)jo*cr@*cdB$8hJQ4|h%yc&%{;aOWh2N2(vf z+wu`Se{#sx7~ZIU0#D@LPxsk5nW6rFD<^ZfbGCr@|7*xe4o`L7CEPhF=tWr93f`8N zaOY$VpC~64+&S663(d2JJ11LsrTPZ$ob2GvNu)lx|ISGZ?wqvY&PfN})_l6~=GxHD z9(<&J#&GAv`{{W)Ckg65sGJPoQ>}jp-|2V+cTUD|=VSt(X#P|9Og@7j zJ^B3PGs;N|-q+7};42+>;m%19?wrK%N_ptRYk2~9P6qJ!BOxa#+&LM-os$vVIT^#9 zlL_28nZliu8QeLU!<~~1?wl;(&PfiBUlj7Ngg3hX6maKc1$RzLc=cEO%E(qha;m%11p6PjG7oN*|@IoHLos&M? zIZ5EP#u>n!lN9cp4B^hn2;Nm5#_(7^fjcKtxN|at-(ESH!;hbz!24Z2KY^#3&l2vO z6!27eSiwi~67HO=;pv4TCl%Z|*}x0Uvxb-QExeL9aOY$PcTOVp$vLkzP7CgwwBeEJ zJMgx=3-|Nb9=uU~40levpPr|4lA!*c%E9`9|HJ={bIf>!z-w*5R!@KeX?)%9AKKaLx zlN9cp4B>_5If6SUV|bD+&P)Uos$gioGjpN%_pb7DD-m)Z&Y8vos$*Z zIVs^+`}qmnIoZ%}ujeQ5k)NNyos$MWQ66^inLN^Yd_QzfTJYJ&Lr&Uo=cEJA{QLx- z`}qmH@beS6bJB-9Ckedv^AosplER&nA>26`!Mn=I7#=Gp6S#9Sg*zuR_;dXH1b+Pd zgs$Tpo@sxV@LXQNN1q7$yMp`gqe}R%Xr2{(@f{(z8+f7f*6`@Hx};zJoir zkA+)o7oNy_aOXCLJGXtfbDO|ZjX!`pw<+AY9n!O~t`Xe1 z9m9`v3-|uc;3Lg*4xh*~c=pMVH;?E2YQ03yw^9GQ%3A^Vby3olj}1J(gYK(v=W`2p zJ{$N<>)OFHd8G4rUCw6<-u+bQLmTdVcHquu7w&xa;Lc|ZcRu@Y=QDvjp98q_nZljV zA>8>K(O(?$Ifgr*6S(s^gXcP)!)xad-pCj5ZWH#!<9Q!_-(I5rPn}P=^IgK{-x+ea zhC7E9+&SF9+q!?&@UHKxaObdrXIk$L?i@y%ulwK}w&2cT8}1x-;Lc$e?i}{u&S4C9 z4*PKDFo8RV19G32m-JBKT{b6CPN&3_HgDyzvxIk5U%+Ge3ho@1@L1Q~8txoc@J!=x;O##O{jA~H zD)<)e95!(0a0hn|BlXYw(&%+i3!dw|ZMbvTfmgp2=GlcihdsD+=>7D3oRdE4zxKlL zzI_7s^STs1(eV&oXk83wZX% zFrOSgQ~eTN%L}-3yMmAYPgqw8cW&44;`L!Z6}*&h;I+JlJGWc7bKAh3+a27wjnr4~ zi|?N;c%%8W;n5qyx;k*@whKSbE!_LthqpC;0-wkS@J##S@w_g--Wj6)BIRui_jNIa zPt?Z@KD@K;t8nKthdZB3c&&96@Z?WIpI30_vxKMr5%Rf)JD(NY`P{&r&l>K0ZsE>n z19v`maOX2p|GZz$XAACpw&Bia2R`zB6`s5x^s`6T`%8Ve_cwvJ|5q4i0H4THc=4ZM zUp$`I>-+Wy^$&7B;i>vKg?GLyHW+(+>h@Wyw>~|@J61)ox>$v>n-5U;R>GV z@w9~J@-@7WS8(TW19uKt9?kADHI)~0l z3+|k>;a%mS1CQlhxO38j&!TX@jp5EoAMTtaaOY$IcTQ5cb25ZGCnLCXGKM=R6S#9S zg*zuRxN|axJ0}_3Ia$D+lN|mcYT|-VvxO1|GJ0}%9Q64t%R9?fKlP!F7 zi;$BB?wsu4&Pk+oIOonu3+|k>;m%11?woYt&PfmMoWyYFqz`va5_tN{dVT_TPExpY z;{81C!y@eO2=z-pKY=?ZQ+VfYdVT_TPUdjuB!iEXhXs5h&*9F=5?(7O1>8AV!JU&5 z?wqXQ&PfG#PBw7oq=q{uTex%5z@3vF+&PJ~58mIdpP#^;lQ!Hr@qT(7-%q-zf3u&T zz@3vmJUTDrB!SPgzXNzCPvMPx2v6S>u8R@eIT^!?&-KFl7;xug3is=&8Qia@=5W8B z%HYn)0`8pTaOY$RcTNhpbFzZ_^;8KT>3*_?J0}&~IjP~^-z_{Yb>D_(@*TYXLf99N z#~iklBm4W6!#3Qnr@HV`$33|3S25i8t3JG~f1j1WyYd0tIZWZjEkl2XaOZFY&-D6e z4A12gcp;y{ox>U2Ih@0t!wl{mF5u2#4)^_P2~S@V`cS}~!xh{)EaARit>OQyoK$e% zuWI=0zlHnN7VexhaOY$PkClf=<2kp9yajho+VJKpLVr4N=cEgFPI_?XB!)XDeYkUy zz@3u;+&M|%&dCt&oQ&Yk$rv8JDD+_hcTT2o=VS(VPUi4i+&#Rnp23}y9G=7>Crh|< zQox;)6+BfQO87{=hC3$}Jp1aSw(wLrY2ePu z4(^U{1l$ahj8a=1b42+aOY|Qcdn*z=V}IbuI6y(DuZXb|199nRStKqmhfDCDCl~h zX9a((a#g~e%L+cdFyv_i_kF2`J3m`^p?Nm&Qof_>z7*;FzOUA|3;Aimou4*5)9auP zJePOj&QA~S{KRnQrw@0261ej-fIB}a-1!;8eP0^Eou4t>`I*3dU-CY?&wlf&q5wbxbHt}`nQJq3hw-D;LcACcYd~T=cj=? zKRdYd6RA(`zw^_AJ3no>^V5MlKV7)<(}O!dG2Hp-!<`@Rv*+*p3{d|(apA6o7d&tiM?)>C%=Vu9behRqrvw}N6CEWR0!=0ZB z?)+@v&QA?@eztJur-3^^JGk=`X+ND4=g0eOcYfNaUnoBvxbxG4S2~X2&QBkny*2D_ z0&nC4c)AYtDct!P!bczI9iEwu;Lgt&?)*&P&d(I?{LJ9a&m8XjWN_za0e60Kxbw4w zJ3j^7`B}l8pAzo;tl`d21$TZn@K-56HQf1W;EQ{N`_B&U{6zY`a~r=s%6c59| z&EU?@9Pa#NaOY3aQDz}pvu z{H);4PYEwy8S2;Yb``vWcjX(n^Hal}pDo<^Y2eP!4(|L!>bLjN`Dwvp&9eKE5yYQNLBL61a1f!lQeJTn*uc&O3rTS7Ugo`U$*}PvO2V&ES)7 z4E>qIovRG)TrJ>joi~R&S4+5aRluFA72LTh;m*|>?p#%H=V}A5e6qd!JVry+_{>-ovSI_xthV9t2x}c%HYn`0`6SpaOY|XkM(@1fIC+!x^h**OZ9&Z zujCcn_p1&3o0Y2??p!tS{9bw<4tK61ec!otu3GR~^K8Q#c?a%Xb>aCPLausrJ+F)5 z&Q%}oTqSVlY5;exQn+(9ggaLwxN|jzJ6999b2Wue{5%})T+QjqRR+&=-(J9T-_Plb zkcTDwJmso@J69!q@;zakHQc$X;OSq7ec!+fc@1y>O{m|(ovQ|3e^;pA!F|7qG+*c3 zxoW|kt2W%Z>cE|=F5J25!JVrZ?p*cZ&Q$_;t_JWb4}D1C&ef2vT#ew))fnzvdH>x{ z->;^q|6b*426wJ9c>mrZR|~jvmBUNrWC^e31-y~3;LcSE&)*&Txu)xWRlzg8zT3ca zc?~b*Tex%8z@4id+_{R>Kktik)q*=$ZMbvQfhWHn`p|{v@*X@2ehw#>YLViZ@Li;#|`|)l9FI7KX6q`rE;oX-~A`E0`*<+B6#>xC{| zuNQi7zg~#pe!bxR_dK1e1oi)^Tn*rUy)cCLE)KaG!E@zd3@_vpc>Db!|5NxxK7%_~ zbNYS5I2qizTELT=LVXTT=Ya*>xmv-Us}k;9t>Mm91$VACaObLqcYiaiYYX>% zuYvo%w}bn>*U~;aKi>B?e4@T};FY`!kM9@e>G3?iAFpH7-{v0r+&s7+uT!``A8ZUy z9vE^vp?@IEXG;Ia;4^rt`Z+xMV5raF&g}v|`MXe`!=2kD+_^2_&g}~B+?Mc6=Uu~# z?+x>*;Lhy^?%dXJ=XMKsZX0-}-0tAcZKQp3PMq5o+_`PTyAKNM>cX9e9z0inVtD$Y zFisynl6ySQ)A!o}>SN_Oh4-}n5xoDsA-7}rOy`}zGx-!A*J1uMcrBm9o!bna{zDjN z0e5b5xO2ONJGTYgxn030I&TSY9vtSghC8ZZ&Tkz!4Fn$N_JapkR^`{4qKODx1;fdVic|Oi_&*AaMf@g5&b^&*8bGUQ6ggdtdJk@zu@a{vxd`h@;yM{Zr z72LVqz@6I~K2mPCaObvxJGVQya~o-&ouA^NVO?#w^U#5h)SoWAt;gRUyes#3KCg4z zNBs}_`2*a!P3aE`xgEk2J?@X_n$H+s%O~(`8~QVa`+hrvcmFBW&*9E(26t{3aOXCM zJGV=CqVpDTKTlY}o!b)b+^*ryZ3TC3H*h~ssNv4-7Vg|OaOZXhpFKSExuyMgu6jCd z!&CLA1CKrt#_7V_a*yZno!c1oPf>3B@I;R*19)c;a+|{ae$)`|_oGJeSo0ji{XXgh z?%Yn{wceMT!JXSV-0w$aaK9h5fcyQZ9A4`6+7e#*bsXHeUBR8(67Jlt;m&OZ_xq?D zxN}>>o!c$^IJa=`d!+q$t`hBI3!ci`biI!Dcpk_3?4te!%4ZMm_oMpoTE_`|r1zr+ zaK9gw!bh6_5I&KQ;Lhh5p8QC-PA72Za|+LtpBX%t&*6nUgFBxKxbvCAozErQ`7Geh z=L+t8mhj4-Zvb~bE4cHyfjgfy-1*tUU!nXoaKEmPE(!ND=Wyv`f7RzCI6rN;^V6X# zCtY|U@4=m)7@k}a^3#VqKMCCV8Ni*N6z=>C;m*$p?);45&d&ty{7m7_&kXMT%;C;Y z26uiIaOWq7J3mXf^Hae8Ncma8ou3kZ?$3VtAu(%sQl4}03jT8+4)q)O-QF3zhX3_T zgKy#Qxm6gyf#2f&w>+G82mi9`LVfh$lli}|&f9|j*(IUA4Zr-0Vf+sKVOnn&{)7v| z{Cn_U`(MFh_?^|yKKxlaZvwxa)-{0t`Z$cA!f*GE9JT$S*v z|19)#4S&b)h51zQ*Q?JP_-oYX8vYE8zlG1`4ScA5*}?yp`W#()GXI}aKU?sB{Ct>y z8-BZw1ng%hF_xlSOx#!tHZiB@Vn@~RKuU8es19})_EKFXEgo}K35*1??0LU zKi2)K1%Hb6s|~-a^3#ETm-^O)e?fig!J{JdA%@>yedxnq_v&z;N#I|jaR%@oRX$Vr zUueBU_&qfL5&V_9zQ*vo>OL@mKi~O>f6aG>T+QHb(YogFzt%i6_&b%`1$-mV;rG%! zm++bPw}8J-*W(Jl)BUi7U#a`s8vX^{H!AqUwZ9wq*FHG(r-uKb@Bi=*>i*EcU!&(0 zJNT_Ne)N!&`G2(L(}Lgf3t>KO_{VhK4tyu?!e5~0Ej{=vb5kLQd|BTLC!yl^o zZ{eTT<5~m%f_w-6%}<2;XXMWX)a%?kE1xa+OYflk!(VZ$;2rqxg3!+{{A;x@J@`*) zo-zC}%6T7t)3xEe3H%Yd?gsGND4!|Z|9!e4{NsAvcl^1D>W}NksQ(YuPvJh_3?4V( zb^RP3>3xz6o~k~FcOMq+UrTuUk~)<6k`DpMpyu3#kr-CQ1 z2BCn#PT=0J0ld+9Q~2ofVg5t7_iF^NwO?bn_iF-=9uxL=3ip1^;NGt}e6|bo z%;4Uy1-yD_IByP59ua&AuQh%FPc?t{+xzABc}moO+5N(Pt>IlAZ{Wi;Tz56x`?!VY znok4oej@Z|2lqZkI*;ey{_fC+7To*Th8H?-2i|^c7^e&OKK9`CkA(Ud?tSdTGtD!B z&psK}JAhAg-KFs4@^D`F+kNmpj!^&2+Q%{6`#6Q~bUcH5ALsDsiZGuH-h3*oYXSE@ z=J4{KVVou0`&hu+I`0bZ>#>A;AJ_0g_sI(GecZrn&9jCVpAPHY!o80TJk|W&Z|-CC z@RQHO@1}ih!M%?ic#vjA0LGTIO`#6P9?icE3@aR6l=Wy?129F*W>KE`tp2Ji55}wNo`juh) z75%EW;xvcya&WL%Q-mf|ok)7+!xN zTo)7gBg>g!FTfT-T z@(P~HH}Fzk!z=j~UdtPJBj3TZr-b=O%ANC^JvDd>UR@u&4bLAO-1+r5zAn0`f2^*H z9=xUb_2IoA2-igdk6sq8ivc{jAYB#=fgvIrFo9vsqWijxUY)|ywvlkDLhd=XYg9*ox{7&3jN98xpNNp`w2PRj~7e0 z-%lvuem`MF*Xx=R9&4WNH}|nZ{fo4Z8@M0$xA5@~hWlFsAL+b1cq5PWbDqy=8yL;NHhBJbSkC46o!d-22#vdmj_H_i+ICKBjQ*9KmbN)BSeed_7K3 z|0?a{6z+YT!&^TT_A!I|dR)Mh9}E4=;m!HMmvHZ60dMp;wSs#eOL(gDuHl)yf_on~ zaPMOc_dagn-p2;+ecaKtkCFO%ykDB9`+e;GTpiTEN&DD=dmnr7rH*5`_puMJHJ=1t zUJ&+i0QWwo@b&{j{SfYb9Kjo%cMOl86ULdqy^m9PqWT%!`#6VtA2Yc3aY5HU=I~VK zb-%ff1?u0YeO$r4k8Akgk>Pr*;O*yz{%qix=2OFG7l!&R-22$TW9{P(?tP3jFR!=M zd0X)IES$Fu_da&uz8<@9?_&?{eT?DW$39*An83T5r~B=7`M#Q>{!`k=A>8{ohHrH| zfqNgP@L2mhgHJ9B{hY(Sj~P7EJ}%(i#~eP=d6)3|C&GCPxc6}d_db?z@8cToeXQW# z#|>TkSi>95)BWZ?HmLtENq8TA2OsD-8iwodc$|BO`WC#9x8bF{1Fz&=crEY28+i@O$Jw{GYA~p1>dR$lwF`4SyFrh5wN1hwyLvo$&Vz zBly!b&KSNtFU)@e|Lj)>pTeK@Kf-)wbd5iU-|+fSpTVc{1^gj#sL$cIzHOM#68<}y ze*u5V=R+S>@IQG#7{7$?UKPe)!>@jOsITCU`la9-_;bD{cnv>S^WVa6FK^&Ku0HSJ z-=Y1D9(^+ZuT~#g@W)&k*42hDKN|Yefj>juh4=0k#_7R-TI-GBU-8?a4}JLZqrnI8 zk&aV%@viXrK7^NY|J?E8;3D*8jQU4EAUqyT;MqB$eg-deJcoDR8|IV26Zrx@k>_y# zy~Gm!a?R7{=eky?f41sNxX)L?8y#=p?VG~7YIrK&!ZW$Y<2-k$UumAv51-7f=h=qG zI_|(rowp0miA>8vE!7I&Y46o%LkMo?O{&Sk=4DNYm z@U4y)@OJ0?!{byA&*e+_=%zaz>I-=J$>1w^B`@K%d<}2p6+HUqF#ZOf+$PMwhPPGk zb+exh>Yw`%J>P~m-yHH9J?3O?*T14;_y;sj8@_x=sPDjkPxU>x&lkge-ah;u4-NBA z;6CpF{#B|U!hOCme5T_Gy#1lj|0z8B_u&3H_uu!uIqDy$`7Ge$$A-S;@b2GVaLC~j zp1m!20ng4?UY#H62k^;-!Bcqq9sNTdhVV%J8Nnx- zkMrfeIiC~MFCG!DuNgf3k&w?heD>F2o*BH9FW}B+4tGA6aObmtJD)4K^I5{3&o$in ztl-Y)1|I!QSZ@t)zdQIA?tC_I=W_>lJ|p$p`{jJL;Lc|o?tFIO&Sw|yeD>hk2g15y zxbxYkr=dQ9=N}9{fERM-%j@;~enZs1TKOEo$Liw*o?ISsJB62j7v?jAw?7no4tH)d zxO2OJJGVL9xn07Y+XC*~uHepX33qPS@a#TeT@`$CQSc4ixvk;O?H2CbHgM;52X}5G z?T`1}xoyFn+cw;}?Z7A6$1Yv>)gHW7e`0uB{prK&dxX9@U!K2nJ3#$kDz_=zxgEjd zQONBW?&kv&_^b}|nZljh8Qi&@!=2j>o!c$kxozOi?GEnTM%qv3#JO$3{d}Me_x-j5&o2)9-i7=5Ko6cjIMh2| zo{w|eNBuu2w+Y<2P2t@uLT-ofT=^WqYxx-N+)m)m?G*0Z&fw1N9PZp^aOZXbcW!gI zbGxJ~w*_6fUBR8(67Jlt;m&OZcWyUu=eCABw_CV#+rXXM9o)H%wExbPbK8PvKx%=VuU?1LnU+@Ipes}NzJe8;L zOg@B{@)5j|kKu{tIfa)xp3(m`%ySNpJ`+5H`*rc+ES{gmmuK z|F3yvsBghP^6z2%HvF|ssPDpuI_|-n$wi0U#_;GV!Ta#+vfv54ln>yOM~C_pKKkL{ zo|o6_{TiWus{NY4mpY!pC*v^w44%p7@KT<^EBOLm%X4@m_q@C=-)9Qc|Gd_Ab2k+w$^?$EEjNrZ>P2j(CZMfd2@H^f+T)#8; z`+hR`9RBC84dZ0+H$FJ{0{#mx3ZBESU5D|P@b~pXeF1;qjiG)8f3?GM?@`1PL%=iR|y{^h}=$Dhpe%Rd#yZ^2)Eo8WEu z=g$q}bm0H-+ECwxzfkk(_@qOyQ57h58x%&Cd%yhkyKo!87*;JlF6~sQ(rG34a#qH}LZ{P7VK@#@WJuL)Uu)f5T_P zx_0o_=z5Hva5DeT(*2$3082((%rw@ON^ADe^ zegJ>H)|dn$H}5lkN{0{LNa| z0{&9nXL9&QHO>-#Z{@9k|Aoe1!TY+;l<*Jhabykul)Qph8h-=-BlWX}KSllA!te77 zAy*B2tlaM4|MV51KDzp3{zv~aTrVy7+m*LA{4-it2mTz*vkSjOeeS_8lgIGK%KPv? z|C2EP1pY4q8!fPUv=j& zpE>+S-ETAa4Z43W;EArU9R6WFzgogC8isxr@So6mSMV45{tsX4`Pdr%0_CBC-(CIR z!2eYJui^iq*NI#BdzH@ye&H{Me(vC3_t4UE(HE%>?eHvCiSX9s@1>bvk; z{!|#h2Y;~kD~3Nn_nAKYEqWf3z`vke4d7pXU081l|7G3hhwxwX{U835x<8NM=e#}S zd;-64rToLcPW3bR1J$=V{1v*NXYhBb4-5FiHBJtHtDX-m;V;wUZvlUYa=3#3zUEWH zZ>{Th4gXWsSMZxO{|)@Z%5x2W_KhJwTljs|=LY^BU5`8XpXu=>y5?m5AEd{T7W}^` zKW+HOb>Hj2-=^`q@VD!ATo3*Z-JfIlqnv;ElV1_~kig%o{T;yH!&EX$V4wvvh@ckeD z^gx0Sas{FAyrOyK{d`_dG?)pb3CKU4cYhkso6jST)757YC9HT*NqKm3{6uMPZ(8mESTR(;sQpRaKm_+0(m!Jnz| zqbHrr|G(*VaSOiCIBodnl&cQ>H`V7Z{5swDdhlE8J`ltIQ}^>e{9)Rc1pXVE=K%h1 z%3BKmtR5eS@SpPYANcDu{uur!&1VW<=y(Pn{lYyCzmJ>46L|*DUKr{<9)C}fqdtFX z_`T;69zQwk_X@tzaS8W4*YH&Htl-_B5A*bRoM(;tH*20-xaYZpcQpSC(*@90r z&o;bxQJAO4^Em#!PZ#wc(L8%_&(mK$j(_jgNBvh{ANFMc&vcx^{d=?_JpQ#X{}DX@ z)!<`zA)ml&`4paN{24rYSr~r~_wVJs4)?+RT%bNxA9A>Vk5<6n{*~eN`wD*9ABN`- zCHxDz&#d83xh&LI@JD1}{0;oRzZJZOU#atM;p?vq<2Uec|F`fyzz%-npNH|IYfs*X zAFapt7JQ@e+wga*KV5jD;~w09zY)V{?+W|dhev-A+~awFo$~?e$J*Z^e5&IS-18a3 zqxXjMPT_EG-_ znr8y{JX82Y$3u9f^N!%2=NR6o-s5qeQ`DE5=M3(7&f)%dc^UkvH-yKL1>FBGFNfb- z^#$DLTfu$a68>t9vxfV;75tT|ui-vl1OJ-y!u@auf304ZMU#^`{G#rME%>XxCydjE zZ}h%y2Y#)de|F)|_{DHP@4;WM`NZ)5{#&8G4?pKEVLl1`2Ay{RfBx%2eG0$ylHf!5 zCv^WG!`C{Vz>`1z?!)uXDLj?W;3N4QK9OhenS23{Umwn!!xQ-u9-SBd{apc1RllO^ z-|3g|8=e=g7x#nLX@&X+s(u6a`L^)>Q$j8qxcj_=yU&q+&hv4fTX6Te4Iilw9k~14 zg}cu^xceN#CvOP*<@I@z%we^tlCh-`en*>b*XXzV+dA9VhTi=N-T+c?xg8FRW__ zPvs-{Og@Ge@(JA6=@jn1&EU1h@%q@e4E5)$Zwt8lwuJ9=T)>^T72J6%;m+F{?z~m- zSvTDOH*n{zhP!WDxck-|b?pqFb-%PUPzfpbb!`-(5e4*nM?!FD-?%N3N zzK!AT+XS9yT~oOGHiNrwbGZAK!P|Ph=Jl~}IqF}fzAfSI+X~)#TF6@oci+}<_pO4v zZyUJ#R>Qkm*B0)+HE{QB2Y25ht;79EFA90{`s~hI8}&D-ZymV%)`L%V9K+qWKHPmv z;O^T1?!Kk)k=8YYyKf`7`!CER^0 z;O^TB?!J}qw$`PKSg~T!`-(je68ad+Uacq-^OtF zZ31`Srf~Od2G8yn)-{K_ZyDTuTfp779A4f(jPLb%f8DnN^*i-#1$W=p@Xj+r-YU5J zwt>5EHQasM!riw9UTIxBxce4qUS6;J)`GilZFu|Q&^NEodA2x9Jt%f^qTe$nyz~gZk-|J)FqGz6bUhzWptp#`A zI`FZMyKwid2Y26MxckvG>x)Zgj(djAdX zzK!WRp1|F=DcpUV!QHnx+R%5E!=%;;O^TF?!HAjkN3T+b+zE`TO01ab>Qw>7oI*T^v&z@dEK`d^*^t^ z_2KT@0KU+13U}XzaQAHlci+Zv_iX|nX|2idx2SJR zxcjz(w|*?-t%SR8Yq-Sj=+?!F~(_iX@o-%@xq3F{idowpI(eH+8ww+Xy{N*LekW8Y?| z@4Z0pzro$N1$?XH9PYj?;qF@jci&cU_pO9CTGtxxzEyDdZ3B1TYIuBo=$qHazBQOZEw4dCwE5Wdv$2=2a(;qKc6?!Hao?%NDrXkBx-`G?d zw(<;j-n#HdsBb;E`__lgb)3N6w*lOJOX2R@5bnN>;FZ=jhP!VQxcfGRyKgi2=*L3e zygv8eeale)Q|j9S?!GPII~^Br_iY7t-%7aqwuZZJ6eRp3Xuaws7~YflpNL z^|5c!b5B06c#Znjg1c`W_*lnXxckqvH(jzAfPHTMl>MmT>p2fTvp53hus@aQAHuci$@b=qEzo zygv4=M*Z!7E<7%5;qKcGKKO}{*GOMIzWdgKyKil{`__TGZ(X|9)q}flG2DIY!`-(8 zKKsegH?Pm@^8GDE{lnF_A>4f%({((7yKhst`!<8SZ*#c&mccWvYXNuPa=81pgu8DA zynbHj+Y0W!mGI}OZ)>>wwt@G4GUTm>yKh^#`_{nSw;kMli*z3Ed!u!=;O<)+?!I;4 z?pqf=`I*o+ug~Xo-(uANvHI4ByKe*dLdPlGeH+5vw-MZZ8^hhV34Eq?P2uj_4DPCSn3VAEx?%NvfzEyDdZ3B1TYIve`ZQ<@)1FxPR z`mlq$Z;{sF{xquh`s~hI8};3v5048Sxcko8ypgBy_=REo zA$%eq!JW4;+r+@&f601ycKZgZ3TDUN_eStt>MmF z1$W;zaQCf-7e62N#p`3=8q_~uecQp^x7PDcK2IL%xD9vTI&k-`3wPgoaQ7{Sms(dJ z?!G1P?u$Yn25|Q+g~zJ*`q;M->ffNgjp6Rw6u#E+4DPEu5dwuL%h5FB`ZyUJ#wuSe9I^?f`yKg(V`xfcvJRkS11$W=t@J8$E zz}>el+}CLj?!LwFI@A3b?#G1$e&-j3$AtmheH+4;Iv&B@w=vv(o50<-DcpUV!5gh> z4tL)&xcjz%yKgys_KTr!UZ3~ZeJfD^IQ4A>ci-0V&d-FrRq#yb-N4W{&f5U)zNK*I zZ3uVYMs%%f40qlpaQAHrci(34ViEf0^|5am>euSq0`9&o;X54{aQAHmci;XmYwrRd zS8|qlp8!d0g^Rcw65z~5<90@uwmdV@$d)5nk}b{1Qb&?!lz1ksR`-$A<5qX4yJfd$ z0udkZMG2dYfRGj85)mLVWJ#0|f;U-+fQ1MMMBEL8AeJl`2%u#bFf5Soc`tRk`n6r~aL%Ue&qRcFg^S8m&eyK>txeCtzeD=pA zewRO6Zoz7^UGWcEZifta<#xpIMSUL{?#eA;xGT4$;jY|z40q+$Z@K1c#Bf(`DZ^d4 z%^2>=ZPxJC_ei<9{Mm9VoA3{;$+%EA+?Cs^;WzaCn&F{dcinJTZX1RtHT*ThUAb)< z-ml@W8@{1@%Wzk2+lISx+cA9iy;`3Qcm3P0;b|?mn})k`yJh&uo20%5`q$kTSKkg9 z?#ivza97`s81Bj~G<;9f0XT)Cx8_%$uJ8N*$< zEn2Sc%Z9shs~hghZN+d`ZmWj7a$7UJU-PwYxGT2}!(F*uGu)Nirs2u=Nx5A&+?Csw z;lH5ewr#j8w;P5h-YoTP*YJM5?oGpImG2q8qWqTOuG|8>PJG`r{2{}4-Y@xWHQbfk z5yM@%g@*5HJT8Cnb=M`oNfZ8-mRpbEuG~fpU)T33!(DxwG2GR+S;Jj@TQuC2TiNhE z%~#!USKn3)cjdNfxGT3c!&^TfRLT z$}M4dUEe1S-_Yy!7`~~z-*8uMBZj+jOBueU@yr;$qkPtIS8j`jyK*ZVUjC4jo6Daq zw-poqjaqK2hP!fGH~gl)-!ObduY1jKSKl@bcjb27a93_yhHq&6+lKG`wB&cka93_O z40q+WYxvrSB_5YQTW)(M{D-vMZW->%?a+UacIo1o(f6%}yLNiSa93`j;jY{ghP!f0 z8or_V>M`84(|*HUxs4d^$}MI1#?MH(x%|2Ox^_Bi!hcrFZP9R7Zgs=A_5F(Bn|j?< z!(F+p8Sct$-EdcK8;0*_{MQUGe?;=TX}Bx5>xR2>+cJFiqY{tHpDni?6aMIT%eZjE za93_O4exo2)W1E$%X-~ghP!eL^m{HnuG|h8?#ivz@HLJ9h~ed*mHdW=yK+ky?#eA` z`0UR~JT8AOovyz1oA6I+xs4d^%5BE*Yx;iHaMw;34R_^MHr$n4-EdcKE0$}%Rt;EHE)x0K4iE{&k@62 z{Gs8SzbXB5!tgESNyB%P_ZYsbyx(wlE_cfC6@5QrxJ%Eh;d>hYqTwz*Wy4*1>V_wO zTk`9!Z~N6%6Mk09dChQFp6iCY{A?J$s_|Si+?{*8Y51Ep{FdSFdfSG(cyxR4QZdk72Hw|~!yKcCP zXUp*SYCJoJyLfIG?$W<&`1>`Un})mV?iv084Ig}0bG?eMXSj=})$k8#JfYz(o`m5p z{Yk^GYdk%MyX*EF{t*qIGTdEn#&8$Utl>Ya@stgB@zf1>>0dGY=QW;H!`*e)4F5$9 zzhSt$-ZjHrJe!99vc|JzxQl1oaF_la!+%xdxna1w?yljV(C~YPyX)OD+{F`ocXNIF zq{h=~xQpkA;V%85;lHi%Bn)@gO&b2Y8ou9fcfAqAT|6nnZ)iNThP!wc4R`4;8~!Pc zr*62r?uy}`*6?eFyX&nR?&8@n{7*HWO~YM0*9~{+-!lAvXgu47yX)>4{udg4*Kl{e zn})l1_6+|kjVJh?=6dDgIb^s?f2-kttMME$++8;`d{4tC4R_b;G2F$|Z}|Vxcv6PD zcxDWD>7Ohz>c_6(FVuMU40rL|GTfDa@V4gqc39&%WVlOztKl!v@S)-E zdI`f_JW0d9RO9J4+{H6uxJ!S^@UPH#W(;@Noi+TcGJX+;|UFS@gxj)=}#K|xW?0CxVvt@;pa7c%5ZnR z8N*#XvxcWMp0eRCp1R>K{VRq~YdoulyX&qQ{-lQAFx*}5n&B>EAJYR^z#0xV!GI;aLs8XSloGEyG{OO~aiow`KUQ>Z;p@yL{~!p45EZFx=&9*Kn7wn}(+}U$+cTs2@(Ce_eiE zdJY+$()6?%?$UF_aF?FY@L5fdyS}@w)0vYd{5!OqdklBwJYu-h_uX?HUHmRzGbVgl z^EGR@%h#geE?;HC%bKsc;iiq;tpcmH#gMhT%J!FP9IOewUw{Cj3YBzV8|C?r)&;6&KHld++}8 z)8T@^-S<|@wS0~k?#d@L+?7wla92J_!&9G;a_BL9<oR4_bzP%w_>Pv(isc%9)o@onYlg4=p5$x8aJTMt&2V?WHVxm?^jtUG z-LEafU3#_+4}M?LbHi}A?zL;UOV3TiTeZFK8Sc_^%W#*T;2q85`_3OpdRi^l_eTuh z)BYqhJatpzNfyvOjH`o7UB2_Pbzoux%9Ypx@E$DrKV@waF?GQ!(BW# z3~$qTb`5v&+%(+9vuAj>#&gT?I32o9?((&w?^_LD`)jFhM-1Ok9vZ%-JZbp4zV9)7 zSHt%kzNdV|@Yc^tdS(oF>;AKb&uTo2hL@GQ@40et^{sBgKd$BJ%E^Uy<-cme&uI8H z!`*#!|$n?@9O>!*6POW)1K8Y6-t+__eo6{AI(pmDdg5JuTr^ z3||>;uJd`xQo@-!eR@_hsAgZ4JL; z_$}qTh9@-rHw{moko4~ve&o2M^OoUTpA;UvQ~tVoUjJHsZ+M`*)$ruYC47(Jt$!-K z-|&RipAo}nG+!yh>t8SN%oyJKb;4&2Usb+n_^e*HZ1~6@NIZ4JS5FFGF}!?C_^RQ_ zDdB5|hm*qB4d4A;;TwjxzDoF};r(AGe9Q21PWZOr8_&r2yJPr~tc1T|_{giJ{_Gn5 zFEhe#8h%K_?-`y@e#`LLCncU>L$2@Y+qA}i$nf=X3D;_PazgkK!*`U2hL=+kK4JLo zwD2Co_q3k(8y;vq95H-H`?r+g$*+<4XAIwbQ24Cj<#yqVh9Bt^UN(HaOL*Pz8ybGa z@Kh+_R}F8yNBElIJIdEBf2oAuu>2c^Uo(8|n}ly#{xad$4R6(WwhT{wgM{BUeEVME zJBF`5ApC~mJNF6SH9XlN{HEcn3E_K&kMsz?W%!Xtg$F+@e_cHfy7j%`YyHAo4PO}$ ze#G$ELE)j{Geg1?hNp&wCoMlCyvOkRD}?tOo;oFb#PGd7;VHv!J}i93@NF%JS;N;( zNccs=XC4w>Hayh$>xL&sB>alux0J6MepBz)n&HWQ>95ue-_ibR!|)C5->w-x`=q32 z)9|e)gkLv2RK8{S$^{9(ZFs--V>^b2+8^F9{E&`EyN0)xJJ^o8X?VwLq#X7PzkXWM zbIb5h>qGD^`P|k2>%IEk@Xberw;DcjR`?OaS6(SRG<;9(kc8pa@0ajN!)KDhdkkNE zt?+)sdosdD3?G>lo-%wUEquoCH60gb4X?WSi{S~aZ>xrX>XQ=xn&F>OzHWH`l7zoz z_}XE4{`jWh8_KU6zFm@dwhUj}5x#BshVmW5%kP$Yb;EKEziar4@|%XY{-#`a&+ttR zAN+`X?&@JYKQ!FUqgxGk^S~p9yLoYFxSRhc40rlQ((qMHe~;lS%KHtU{h*ZFh~aL& znKFD$^@SP3oo+H~xJ&1v;ZEl-8}4+@y5UaeTru4F>#iE^bjUTs*WW7lZ{2XWjxMfWX3KC_KHG)|I{)7>JmGYK&E|S|NZ+>_9(+vnlhE*gc(?2y zNf`d#^RiDQY4{u3h4&c#&8KxAis1|2CgDd6|AJqY@F~MzqM=UAJglu8$O|Y!|)&a4awIv!(a5> zlAcY&hksf4b;BRIBKg`fJb7LCw&Ckqo;!xWU+?=3!(Z1Y*WESzU7G%zhX0htvuF7B zcSw3}8UD{&4}%Xh*XMsze#r0-Yy7Q-f2ZEBBZhyQ*4xnVH@`>9KVkSKtyf9IZ(Wn{ zJ%+dF{pvUT%^4}T5yRiD<(4vhS<89G@JX#dvxc{5ds#I6BU(OX!=Ki8>W07IJ#t@G z4BzMwzH0a<^ggZ`-mCR+-SA(&A?2`P_)od|Z}?mFK5iQR+j`yWhQD3QdCTxi+W%}D z{!%Ts9mDs&R?>OH@K67f-1l9>AA6mIziIdnX#3hT{1aLZw+w%S-uK{x&Gr9#v>Xl@ z{^B2(^t2lO)qf`Zh~d9?NYWV^{t2xo3By05;gg2{u;#1B@Q-W1+Hd%0wA@Av|3BJ) zrVM|H*25XYQ(DfmhX2WjC0~n%hkAd@hW~)ple*zQqvOs z-S9uua^5g}K-?;Q+A{plwfwgY|LX;bXUFgl?FqkO_%+Sf zuHp0ApWHP3Q<~pB!xuIDEyI6B>re1g&Gr9XS`LQ{|6>i`YWUx1zK$6Fr&>=!!#}L~ zO&I>?uKpYTCT*uZhQCqAmwv-*WvM?ShTp68GiCTqy z!#}O}rEd67=>1(WyrS1#HT)N~+|~^LCFSdee_Z*7;WxFMuNgk5?PAmL(;EJ|;eXdI z^>)kfquL*C8-7aL(T?H&Tg&Z+;a{Y7$gbhLTAnuz|3{5y&+yM`J-lW3ce?&x*K^#w z@?Ea~H~inU|7p|a-~KV-;q~V9 z-1sx$DZ^cQRt*0&P5-9hAJq7F4S!VcW9xruzV4Lre#0MppQNX3_=1+thT*@Y^mp#KzXguLZo73}ty}v2L zd$r!K82*NTk?U?6epcJjuHh-YZtH(-zU~LLJ@y;^W19Z5;os`Y-|)-YUUm%seZ4Qi z&o*E8Eq^WbIcfNl@7MZo_-FrJ>%ZZz)%PWX=De}6-Gu+@Cs zb-j;C!{4vvHf#7>^!~0HKCkU}%kV$Z{O%e4UcGMk^Udk`OD)fo;age{R}6pQA4tA7 z4S%C6f5Sha<<|NO&DVXE#?x>3-)g-r8~#e|mo^OlEA2mb41dWJavy_VY`*S4Y561# z|BUumvxeV4r2UWK&${wA{5!rv>%ZY&r}ZTKZ_VlX(ll;NMyerd(#;lHKzA^7Fy>mK<|55AV ztl>{;KfGr6gtLDPe^BG!GyFZS|M_@xdU`Z`%J4JFR}6pAXXJiu8XjpncMX5RZ%O#p z?dIzibX@2+{G=;?!#}9$-!S}BT2FQi|1KS8gI{UB?hEU3zmkT(OWWhD;Xmi>Ps8ui zdbnlyuHN@O!+SM+_^Zw7`Dg8KQ-=SN=6A*Lk83}XEoQ#SmT zv;PhMVr^eLhJU+`7lE#ey7}5u+HWKc|0B)Utl^VdZ}$v$y4fwmogNkJH0SphJ}B+( zkl}y%7U8XiKRzn_h~b|s2oDYa#8bi(hHvY27Y*P1Dp~(68}8oM58!S+)|IDwziPre zzGk?4zkUGUFxI+fPk6lF<4KPXczo648IK=%XCpn&cs%s@ zWsfI3e#PTSkJmkZ!@KUY9>3;=|BoKO?(y&OcygnW&hPj5;=3CBCp}*F`1?Fw_xPsA zH$48|J$}pM|KjnzACc?680-ZezvXei9tJ9kB1%? z&vyAR;c-f<`$>Ad#XhTeef3e3?9{)0r&v^W}$7enM4IW?g_*Z$n z?D3a+yzcRrd3?p=-5y`{_yZna^Y~YLeBI+O_xOg#6CS_jale1q^tkkF@}=t@f7pv> z%i|AueB0wkJig=c`#pZc%8zcJ$|3Z_dMR|@mn4bJs$j6qyC@t_#uy< z@OZ1o<(u;15s%;Ng%3Ucl^#!c{2M)<^mv!Ydp!P49`E<~w8uw0{>`Q@g0v(di;jRAMyCE$6w{~n;uVke9z-w@9|q6AN6?f?neE; z;PFEqKj-mQj}LqNh{sQPJoI><#}ghu=JBM*#dAzP?D2S;7rx))?H(WT_~RZ=dHkZs zXFUEIkI#C1#^Z|~f3?TU9)H5)b&o&k@fD9h3=C z*Li%?<2jFC_xQZWw>)0(__oKddVI&@-|X=l9-}Lyh$M-y5^!P20FM2$9 zPow@Xc>IvZzs2LN9?yIHh{v-Y4?TX#;|Y&fJf8IUoX2}SUh;Uq$Di}~h{u;ap7QwX zJwD^{vd3pV9(jDx<8SbI+2bo7uY3Ie@%W0zzs=*T9{*O4uX+62J-+Vo@9_AB$NyiC zU-S5?$2UFxMvq_j_?tYwG2(pzs2J>JpLa%zU%QdkKgq8TRpz#@$dHd zEsuYf$AfE)`v1KiKjiVZd%V@-|H`_4tQ8 ze$(SW?eRU2|BT0PdHlM^gCB3y{||fokjFpb@m7z2)Z<4y{$D&Edi=k7JmK+Q^mx+a zKkxA#kN>R4`#t^(9v|`e&v`uM@hy+fc>H4?pY`~E^Z262f7#<@kN=X#>mL84$5%Z5 z>mFbA_^)|<&Ewl1U-$SYJig)a-|+Y~kN=9tH$DDwk6-uruX=pT|DnfkdHhd29{fb3{{Obe4|)7|Jl^W@Kk)bwkMDRq z^!OinJmK-*@_5qYzv=NFkAKSJ{T}~}$45N=ryfsv{Lef-(&MWh|0|ELdHkH#&ZHBAqW*DUbBy{>~s@--!1=h~IC*?+fl5UU<`cfBo+qZ8xqeYRAIIx>ZVdw(TgTkOb}Gq1;Y?#FLG ze$)7+a;5tB0sQ4Oxi9Enss~?>&)kGbOgWDr{Nu`#-r%czFX%qg zHT+-tjDFnT5U>4T7k_Sfsa(r1MLUSt|NRk!|8o3h@cY&HrFi_`58@5Q@l*JF7=K-S zwWV@+x-U$kfd%gOh2G~boclY!T+9~o)f$4tKa|JBbZK;G_(CC+q28Z!qZ`GF<EKQmNpVDSAh_SdHLY69_4dlW?(Hq&6Px-et_MhP`W1ue z6NC9;zLq{QOmDh-21Z9m)7_8WmSg$O*$mZ!tCZKN#nN&i7gGIND20nrC29$h_3mWT zK+;DI0=M_ss4whWM9pxoqhbzKv6fi?FXW4tgd-mq-1FIB`9yb+yIMp&MI{Y8sF%sk zFZfaSh6F=U+@g9ns$>`@2G5)eQ5OX8lZ#*!cV9rc zm_`TM7mVdHh0FO%s9oG@xs~9}`Dke_s_;dlDi!<~8qQq<oPC?Pw=g`p^$U&<6D7sXtql+WSQe0G`s zMbY3-s<4d0np`eK)v;107wvbQ!At=~jmAP3(j;iZ9LhvX=(y6|4~;~b@^q$pNr)d* zQwviJeQ6~s)^ORWtGHv8Qn9pLebsWb9Qomkocrt|Zc9`^)Muwk*-KFk1xvsWsWFdH ze-j}MC^6i7Wc++7CsCFv)O|ctMSszK_OYpnacNh`X7@u=S&ShZofKZac)4^5(VW$2 zpF~H2v4i48ozD2>T#b;GeC;X%mn)^qc`CQ=v)QOpD`#qpcsWRRXC;#_)~bWlV@SE8 zK{bY>sM^~SoF5!aj~26&(R_M5lfN8I4xY_mbS3;GE)d~^v-zlyqYgfw9WNnlkWM2% zq}3tJUZ-6*&#XQ8B9EBh*Fua=KPRL>M7EJGV{T3sHop!b|5f^?}Mlb$od#sKT6qcjlIWEt0 zFbvP6)0+CyaxE;)hnz$jtIv;4o>`_55o7Kk#?)FgTrXFKXmoA4O9`Q$p$@ETDPKur z#98QE3{Ys)KtlAdqZm<7h37bJ(-};qs=dAEqIk?_lsML@$h{TwCuq5k&5+*OVuTT^ zimooqz_viWMZ$9!>E^?WD5>_P)MpPm3VNTwDBrkRG-UEXo?C6j-+EJvYUCH|6>ES7Ez9F~zaB?tQLYLJZqG>GS zRuxMjY^BV6gd16?bKVbe+RP`6`teTcX*TO9za* za=8O`_@&H31h^Ch%f$+2jtfPML%Fb8#<&3s4t1G&jY^0%gFo{ab;U}&f}VRJys`)j zyqu|2Y3N78K^}ARITh$!CVqpQ&MhO&Xl9o)l{}|N?kf+vAqA&+C`7)7Fle-)xd!sE zJV*I}6&?m;1x0lvvlbq0KN`-%mM&c(TkmT4bQtI-82y(rVP>9kKo$aoixO;e^rm_V zBr3$X9e+VSqw=7n#$SnpiG^I{sYQ#s3ho(SA%h~|s!Dn6qJALMF9s>f+f;3-HdHE< zO6BH)LuV=z3z#Nl*kb5;a0j?I4YL`PiM!Ao83``VP$K0%Dq>@FE?=uQ$6UKoay^BM zgF4&@6SHz94?7AsOzu~lLoK<+$L{tn7c&*u)YR|57KziekcD{xb3X)I$l_CdYa?{w zp5ew}LMx2F(*ZLTU8XeFG|HfMiW}_d!~z-(ddN=c9ghVRu&*z?bS7V%y1F!n$p(gB z-0orsLkJ@_%`1!HBw9^4{p!?ki~GX2@HcQvGL>3@k#jgRxX^JLUGO*>VIp~~K4%BRH;k5$nBt*dphoZ4yQ1hqn&908d0KpzSh|iq?2vQhscJ7$)XeXUNpS<`F#Ci z@@XpTWcd2mHQfPF3sn#EeQr?RjC{E$)r{)7%yy;WQRAm>N7_F%w%+jQ z*`wj*YIw9Qdo+LrQp8*jtsof1>}-L1C^WPx>>KGwF!#`J(+u5BzEC<;Y(3%v6Mr-Sm?;H_=zjmI%?=;7cox`>ZM8tKvy?4 zCbCmH_&(`-^w;_=1X77g!1~P@Aw8+C5jQ>!H{3iq)`lMvDZlS zP_~?@siR1E1VQapDW$f!T)af#f(IYOq*3~(*V6n^ zUY_bZpz(@25jR~`E7IJZQ6)yhtz8x4jjAFWX4>%ttQ<8g`7~4$VW^_DOc7cQ*J$LO zD=TT96vt>zR3`p9<_)!yBaNGdc0xhBAxU-OHlc9%ers;i>GPA*X-o>(J_=p~4Izu> zABmO#9l@Ao(uqlraQgJ=a4gDPrrw7KH`XtLb5Zdzn9wA3oQKH@iH;`V=QCwo`Uwo` z^H&GSV2|iNNh+ub`3l=;5iK^KQ_WA>GUXwphA$l(CrcD7OcpS|K8eXXW=p~Nz~ezN zUyOn?XrPxHY83hg43wg@6~jxT)L55l-NzSO?iS|6@f!8>6&l!2L4=~VOx86dydoDr z@cz!mBYNdS@8+cf1qh0VVPQQ81y22jrqFHhGT1Uv^XlA z8Sa0D`+vFzeb4&`)+6<0uqUe3UU^OC2;H&X;J&cyGUPo5n&tV zDtY9(I8iR=Yixj&@+rLI$zIn|rPh_js6vM6r7BFA z5{*nHrIpgs8FY13Rl3f&${A1aVHS~3F1%d3g z8Z1#!=~0)55i7zxlq{D3%LvfX0w_MvnbFr}3MeUzKlu{=VgS@ZIn9MgY45v@Q2t;- z@B7n>&WcBMxeE{p(RuL~0)+2~>X_b3EAR7O-#JE7f~0*!8Q3Ne;$ZSqw5%`m9-I`; zLpwL9cJ3cwb=K6Zu5x;cSm>B<&IlyCQaC;_9b$-Ki;qk=ZkVa*SEms?n1il-iHZh= zMWauU!4$0nH-3S-;*QKRrtYXd)HhJ4P!;)Y5b4fCO2a6D`+ysVZo#T?(6%7Y!N7uQ zah6PORI$DQ2{{W{p8FdVJ;m7I!B*Fp>-7d~vE5RK%>qZ}39lnS-Yx3VF13}6WZ+8Ji+jX@hqAKiF#Dg_q! zc=lwpUM%21C%~oA%VHQCDCTgbF-!@h*L8PSt4=L%aQKOVvEZ?RCkE1kSbiFvP7jU@4?dorf-ngCjEoFga3k#n z9V3j8zA!iq702rd?+crnE{}Ccd9%Vv+TEP!e=KoQ54n8&k^PjrXGIhAs-x?NFHBv2|XY_o6zSKWpI{P-f5D=@ zi*%Zi${TJb){w3=;^$)Vka`jfwY)aY!*aDVj*_RdSWV+q0y35sN~8qFYLQ0PbY)SQ z;vlTQ4MWZ!;PnQwq}>FwNyBW&UKCqHVH+k}FI!`UO(v#RPvfh{*TWnTHi{@$)Fq^A zrxPS<9Yc3QSsjHCi5|om=&2G<|D=H7SD>B??Fh^61nAZIGXU0Q@-?W-EET2avGPie z0o5p|7P-fsPsg4XW8Lw9BvUUPY`QS2qtA!xSi(H)Si6=*JMI94GDy#CCQ%~_1|O+_(DXsG`S&EG-b3%*>vlv#ksM3Lu}R6&gVG+MoY@;!C@k<*E1P`T2b`C|b_ zZA`vN@wphVZJnmMDkQGp=*gq#Rz22usrL&8;_;`+YLw}Hm}pzhk>83_%Y7USojHf$ zYygw?v5CQfF}{y17mVa{xu^(FA)PjJnb%6CkVPgcfhulKgoS>xfaoHqJfzofT@if& z5v5`6EJ+liLNw3C3z-Et+8k}m9SsI)F3C++R)e_8lDh{SCP+GN$Qk;DAty(pOjSyv zPpU_Q^D3@l1mI4V`U@^5zU5M2lpjf)%4w33j?+Bfr8B|$Vq5YNXll4zTeaqE}0wTg|l2MV{nK+3cCC#C)j;U z2cdl(#X19?IJ$;ScAk^;dxKN2JROkWjhxlJStd8>08OHts$K1VjP8c7>cH(PosK8Y z>2xA-1y<*=V^~d*TgHuqrLKlu31=YcFQ;l1KPtAXrCW_>Q}C22BGnDfzfsMezz}hoSol2!k%Vn7`kf)jy-@da2PBucCs8JNu zWN+iU9TQHHhL8cqXj;;f;hQoBIZHbU3pP2>8F>f1|@-1hTHBO%0*{HoX0`5n^?S~l=ucY;vs*hSSrSQGP7~#;6s56 z&?J8po%uW(9lFF(aXX@sjz8^%Py+45jUHG-IT3vGy-%YqX0aSt>AUy7B>&voekIB; zEY|uuPhb_k*mokJsW;hae(E84Ww|FuJTSDVwwy+S4+VaW?c z)WIdn5@Gy`x~+p|Fuf=a6d6x>_zS3VP8^gHzwKYh#nApZu=JC$0LTfzXhYpn-zoa{ zG=IA{lsA2FktIt?ofnR^Uy=9Rb73)P|DRrm7bLW-wH!Dn!y+FHw?%kkihNQyIO{SN zqVI=s!X`?tGo-mYdBnmRAa}gZ09WTRLne(bEuj{iP_$Rn-l)=r^&QkZvdYPo~uv||5cXs&^VE9t~C9*y`>1xtze z7q8(afUALvJ1W_`ul!I$QbdA%3&(~u{$L9s-OvdF$TBx;v|%l8E% z`LWUhcW!Yv8&K!$`;7(zSmlO0D)s-+H!)?P+9e%1{!s6%>!1kGZ)3#tlf_c>Rc^gX zRO!xO8dKE9fCE$1xtv10WdBzTGyAwh4nrx2Lu_$jkh&&nss}h%PO-K(6hTnHq>7|` zapXN9xPNoeV-LX=o}^iS1btyjRIi|l=}|BSqXmA5S4&k$vz$3Z+(p&$BeQ0+VuCF6v8d3>71 zmILy&Eszh8>&ir%9fHx6t&v`R221KHg~nc}aMi<2MK%XfmeDyj@~9*+a5p(_s+4(Q z%y~UNHuSi>9*GLDAh`Gs`jXR8MPB8gDoDnQGS2IgOcR(kvCpNAQpe8=Q|zD$!$Mx5 z=rU~l@0WJxbc${cUK%jE%Iack@(4qZyeQA)n_O6%QralS0eG$!7rY|XaQkTK-46|u z7dMHuqeubC#C{pX-&j&l2C|p1(m*b<6BW$E1~Kl7j{|>Agm+I9hULGHGw9>d)xm}n z=xsMt3kB8yIpmV`&x02^f{x)8EA6a;kd(kq;}|4wa~LNpbc&q*Y55stui7zJRWTXv zDjY)|dEUZtv%|43LX&NT!C?TF<&{i%6uzgNX4e$7+BAe{_J4m3>&uXxbEw0p7Va&p zL2y)cqCS>x*}*hmw^Hu5=b=lAE)6d^xL3nUfY1SZ5&eSn^RRLX!THn}dy_sfJ#Z#X z+k|5AS-X^)^DiA`$I|qfSEosk4AAexQe%7Xe07030upFi0$RzUHs31)Iz|b2$?(VM z492ji8ZFV}HI`#k)^P?FtQ1}~XAe=twvR?9K22nzK+c71j6{$(yO{hh?`jE-!5!zh z*rPK)A5Zuw8CdD!J_>#p?Y#WgL1P~23)HDQsW!8K4Js}zJP#x#60!gc8kxG%S~B*w zxchftS7rw(Y;Jf=wl?hVomG&ER5E0XHa$EzUwRK(E3TZ8?DL!OSh#1#sV$k!UQP5iAOCM z_SQr(7jk_k$Z%==9HxZuC#;n4muak~HEj27?8#7<%5o*M00%}EW??z;+=xP-YKWhF zM{%d9$BP>S%rF?5yb=yaxP~KeBLIJmye`${ik4SdF4)KMlCBt6B%PXZ5~FFVP9kA6 zJ01>ylRkM7YHTXc4itz#P0nds6&) z9*SHRo>|r91#EJA?Vy#md@i862S53~>{9M{FaGhDNWu{HOyw9dvg_EafTab@{$c6h zu9M}y0L_`ahwt=r7{;S~IqD2Y$HS@NS6!fKW9Qwzr;dQIhTxBTZ|}XG1E`(Q@1YSP zKEBu3`*5dABMZ;WWogz;D-!rE7xYcJkiBcVM~kuW#MNJ9BU&qgz*-<#{caZ*XFF`t zYjT9dJe$^HQI4Lc5JoFVi6mcQmrG<`;8p}rEwi&H zdk2!Y_So4G_3#Vf;V03OVA-}TPJ#>Nxe%K*78=gAxH1++!V{giucX}Ijz33n!b$N8 z%w%~jUxxpnG;+8aV!GnAJd(R$T!lF{jj=4y$tozcPiUd6gcg?Zy0>llY5Za5FI*_k ztB<&-;e`7Rk9>+qdUN#n zmnO>N(G`xog}hsXA#&2muxB1_MsGNnDP*x#vlg+RF*|;56+qexMJ`fsju00qUW=EDGU=6z%u z)2>!bl&R~>@@fM7mn#u%LM1H&0onJi;k*~1Y&u|4v7w*^MSpsExd9(Y?^1i9Mt=Z3P$4ug`&Qc6dMxXzF5D@G?&lQ)t*03Dr*DK4p4P{2XqA~iYWe?75z}09x7-36V5?g<}k7FkZW`tOprujT7AtXiJ0fn)cgtj!vA{d4fXW89zVm={#XW|$HCYGJ3 zZ-q-Z2~IhdKVeOf)sCGc1$HNJ<~G$ySx`Vbe$a{RVSE(C^dg9(rVObUxfM!s4`nL2u7{to- zeX_+55VWuvfL({-`alWle&_=4)!i>)8gxiDLrdZBlQ$R+n5NKnVOfw8Wr#L1U>zQ` zh{=5SS=mk7eRe#;_NvOIVcJ}aU3Qol;D$AcNfI7|ah+$HrXorej5?SYNrl6rM-7}p8y^#@t+A?u#5jTVd&p^D5KsvgGzqfVuWu}wn4OY`pL z@zNyAF=^z2{k{fCV3oknRAEo23ot#FYrNbRh~0-_g4-K5v}JNUPGY?biv>$GWSR}f zLBqL$J{1WGOGg#*8>Hl*Za@WsI10^(oeif)CMU2NYjSjY1U~blgXD@Rdyyy3;9Q(( zdY{6kWO{*3$>YOM@~Sfy+ygjz1hhj2ialHrGjns5=rR-)TE@Q!=b9|`fg#PkL zWyLGt7ui7Y4CPq%Kt14r^dNDlzaei7gtF=sGGCMY*_B2U#EG56{C%ms4?>Tb(i~QvRu%X=noCZ zJeqQmaNB5>ku*O#YoaNJ=wGPnEazt#Gg2Tr*PI9VAQgUN0%SxCBQw(S$ zbOYeul7r+FM=uk$`0LQowHdn*GAB>-n4U;>9Y5B%=u>sKE0s2-UZ_@N2tN`QH;EM~x!6la-B(9oC&>^i?EY_wvFnA@@W$A`UhQL|FVo&x*Z+ zkg2)zBqIisz024uf@yE9w-+m=m|Em`y88qBSR0Cm%l9mRkzivLvO!EfF4xkk*c&<9P7F2Se-t z?!}P}Sf-;6lDBau+IY`ZTMoWy)OxWYh&{{sCZXGj1v9F7e7B&w=yD~NK7-Szu{)YJ zBcmfv(=_OKcTX(ap|GikiQmJC5p!G8h~X>w{Ewx2XeyONXRJq4J=A$TKAK8|umko) zc9t)9@jb@bA()^v9FAmluFDGTvz%i> zZ#vMwxJ_nhxHdfU5dC|(r-Kd{z-$DknZXaL3oBNV4)jgPy4{ydT?I}zJS7$Cm8WUr zxV-KUo~AdldWEf_7ZKBgL9Wm_H^*i>W*nEBH^ib_!e~!6Jk=VyUD6*#)r-lFN1uLq zCyGBH9THy7V*zPU&vRok&x^#7Qg`*^NFiu@#fC%PevXr@@nfjOj=hrspK%ln1=GR2 zot@-)VfCFbD%Ix?}o};p8%A1~gb=5{CWA z=o#?`Tc@4u|O|KaHL|pO+clBqQfxNZ7vp3ZwmjjoV3D3<%YnqyoO!1ywpp6kX+>CRYe<$x$U!{?EI{% zm?1+wejR_=sc`7)c)fXI-ks}5irW1e5s^K^FCu~}gRmCsd!G_p~gPSYP;@qu}s-!+WaLU=&rN{L6 z&(EhP*1eXNF%`5whll8F)$Jyf^YUG`SuFn5)*C2fg zsyCa}ZY6{K@_DT&!a->=yCPdf{pd6)2Sgnw<@1ji0?d9&n;c6=<225!qm7#~bup*a zQ8Fxx9>@{}SG3mr^hfW0XrYyw)sVHMsNI4P))cl`!OMPHwkdAIsv=#^1G zJf)-5LKtu4m?;vge(K`gh=ij9kZC;4oE9*3=Ax#&kRI3{JasQ?{n!o>Me*)YUqPc> zQ3DD_*{aFlk_(+pXRwjU&0-$r?7Byp8Wv`<*i?IVV)A_Y-1r4L9|ea&)q;DmKNefC z;Nge8I?yzrluH)jwgt-`2M&(7)+`G9%IsIau&HHmJwQ z7>dJf3gku))fD}NRI|Y*jp-it2TWsX-f`MF6Tr61$S&Y8!LR9ReZdr+S9TTWr*%q9 z?od41#+g0ZE}J<=7hpG0agYlT4-5xuMQ$R}n7WHdA+BneisYKrorZ~&#&n&=C$8M2 z_cRGSbgwIBPb(EJ1&oym={3+lvf_+VVg(5)hb_w;3JVKdWE?;3dZq4^&up5D(4|5T zg4N3DK)d9Nl5dXGx8^V2#c@j-oTqKE(x%N&R6v~Ght0Qa+RdbKTg;J(65jI^sX`x!!G zWJ0wNIe&JUoY43nxd(Bimpn3~&qm7a8IkKy&WFfzKZZL!Pe)rJrfM+E5t9VSXMO~u zT@&IU8;nd3rYA?o=!~I`Nv!2|O+u6Ez*k_B1-xpDmuP;xpvEnitnOGq3UO=|v{#(u zu8J7>h|N8dRj*+A2u(x|h-0t2D;dL}Mf_np{ z^qB(g3Ax2a^wK^*A3L7!lpUq>0*J9v0 zL~1^HQ<238-IZ4SaQn!KK|5KkRHaMmuM5mYf_skU;6- zBJJV5IL6<5P=nDrkXl?Fc1+M#l{P>EhWLibJ@phJvEol+>-H7$vknG>LNt%+jmH>3 zB%8yj3^*7(sL-}a3-iI{;A$`rS6dA109dxwT+XepaYk^JA- zhmI^@(!F4bVe*1Sb9XkQ40Z__+gt>+6&Whh1IO-mM&0q_ICrBxyboOv2A!yW=1jUq z$`MQ=eTzd4$-gADmOpK@sO72Pa$`7Jh?i){=0`$wmoSeBE}L{L=UL>4MLm(xn`J#U z(cFD^y920-EYji&tyVPl2XMWmwj=d}>(^PD%TW=J(`bkr$q8r)=Aq%@R0Te&jl8;O z28_)H&^ih@ylW}yq$-%8$S^O!v4be|OT_sECkzLe!}E!J+$L#yf{tFEt{{!-dH&l1 z$0N8gz%NVX;rUXDw94=zS+X?CgIB0~i38bj3+QkQe9*Uq*Y1ao`{UjS?h|6NR1cCi zLfM{pet7cSu=_gwJ${!jOplLkdge#~(TP3ox7phlkIPP7h8D#rZgKhdc~jIG{rebZaQLAIaA3J6h3D{K9|UrUSGXiiBeFQ; z1!}tSdW)y=(Bv`ZxTOpeO4YCl_?zH_8Em_(yW=Xw5KXX%fN@91Km+bCl@(f=VqS~l91IZhD3}h*iW_TwN9YcqmeV&d3`aC&ut{2y7cJtw|)ew8RN-D4d#v*TCa<6cyPk zN`Ft~r7StYky=YjZ3hP1ck8A2xv}Nl_lS#3D%ywOQX|A=GCK7Ql2f|diElKlqOFl`H>3`5 zaEW*78*CMF)XxQTXeOA^)9!gbi;7jZLG${)Ssv-tei<0W{y z;anQjWq2>ftBA7%P>FZXI$jwa8wWO0oo2r+0Ju7*3{89Z-0&k z2J+*N43a}%OR!vq8HF?Ar^pY5m(ejG!4B1DJ9NgP-+_K?@E0a?j=h=O9!Az-$ci-{ zZ0_k9AsljOhASsv)2xO=y02jxfSbj|N}*`$7@3Vw-CPLmrr;?@eLQm6_=qQHji+j8 z5KbfP>5K3aYNrQu-~#-Rj`#r`Xb$)kXM?xT;br?=9*3{Z;b`%8)Vf@If!-GAQ!MAx zlRX+wzsEG)wI_M*(DOSpF^62|9gHy~Jq_ zL=GCBA{Pf@0c)Bnm74^Mua{H4Xtbv5i^fxti4@Q4#q-QIB)uq2R6?4ez=KI1L4lEo zy03-?hAnYX0aY2t=a+H9-ED4bw8xoh8;7`pJu^5BgSOB?A;km_^Wr!UP8B>w!SAjL zvSXLa-`#_YPa@v~_az!bkQ{o#0vRl@{{o`RWp3;Q<=a2WRegi#g^D9~eEPTH&UVogzE+_%Vp2O|(@zVV+R~+-G~CI1sVy)V%KyrPRfzoN;WV zJKaHr3+Ti-%)jukGTt78WMDRjN(5;VZHqg%YO|*2yoYE3hMr|e)uI~PX$?nfJuA)~ zuIn`Fwq-iXOt+;}NlBvTwuo8Ay9*&_(J}vM-d#A44TpNY|<_>4Dw5(D~R^JoTr_`)lK<=Q-4!?65D2jH?GhCz<} z>FFVlmA*C{m{uGu_Ca{d(4S?Tu)d7vAmAzEqq`{Rt7o=Y6YDhXvsY0iBvt$)}MUiqG3s?^{j&XZAo^wb_mcOa-6a^+3 z4xC=5G2Y?WKgOq(tFBk72n|mM7$ZJEPbNN)k~T(EV}GU?eZCG&l?8Tcs!mhi>F>x-Knh2&wp|h6II7{Wl2pbYG&&gLW1v9~G?kXv<>nAjs!hzxukEC~#kg(~f zCAbG^pCAE`M)`PaA@(=ZgbIE@;`q;!BTWx+>_3|N>EN7w7#Wo?5Bu^UGXE`UxR8ua zhjb!x9xc)=MUTU%NKw}uRpMe|JHC0v`XJXX3hHTvvW)g5c1R530D7wKSO)D4$(!Wm zw|N}N)EH9(TJo8~x*{UOy+*#pX@mAbcK6)wP!xafyyp@|3BkNaO!&S&n!^maW7S~# zxFgl>f#nRXs;RuO31SpY)Oq5b4j~xmk!y5-hSW{#3ngvJST84X(1jyC3 zYIxFD_yA5WW1Gi7jcb}7Rf2u;vU?ZNV$5{@dM}3{Y3>?6#jtX)>(n562=~~AK|Y{m z-w3GC)b8u(kkJClAgvhCTm~7E>s*+YM?6s24mjas2%+H)4Fq%}j!18K1OQg+ zVE@3Wv>30gNlaw5)3O)G&&E=g-S0d56gOi%~s8CIZIV962CFI8}-oYE`3 zPU*qvqG^PN)Lg)zfWtft3r+@x2kOBv>&iyMSa2MKdBg_^z=;jTj}8tGO2xB*=}ZY0 zlX0324giJ|0!t5^p(;B>D2+@s>ttURLwIza45MKxwWUo1-I9iQ@Gg(z?PoB8O9S}~mg=_sJD;1nZB3*m$636BG8Jxo1nniq^hmOtq^TzJY3SzuT@f~ zy4#S=GX*+YtRbWD^HRrjqaqdX9TXlM^nw{8I?Vxo!RI~kJuxS6sZezR3n3T=YgY#f zc(f-3r8|UeR-U|cpbyv6yhg=C$t6&>NRHwUZJEKOhNIWi%ZC>x$7Ih$)80x?9zp{3 z$gPK_qRQpGoPOo5;KVC5Fth~{9>Yf7Av^>goFJ-kB;_S z$kJ&vWjV~BR_dTIV8i0GSGgBt4F*O9Up0P!D|ul_1NBuO)~V<$Qan8o8xt4!feIw) zkOvp*I5@0hh&7aTqe#2xYL-OV7t+x^PCFW+Gw&%ecUOm5|0J^a3R=CmsrNZO$>NuK->N9 zl~=6CG|Ra6+zVf6Qkr1M(!R*&Cos`d5#Vx1&pDt+QP4vh)9z-fAcy$W`z__1tLZM$ zqaQHvrSOPAbOHR3RWVGkI)%aUNzAi!08U`nf*MJ@$OT{04mxdDOh^pSWM)@OWDC&H zqQ~FE)d9MXbQYxW#g1Y$gQJJ$@ECz14nNMHNoj`+T<*Llbm#0prH*Fy`?1r~0pAYj z>)8rU7wV9bQjayha^u>52=sDTKSgHTrTq`Rug916KW}_F&=lj@iiN^H&T;PZ{axqC zXctAO!wFS%gla4WQRhsizS9__sD+UOkQj$LH`ONsN#{=f3M|ji?c%PMBH=NbBE%F9 zH#FeLG1aF2MFNT9!oyr+R~(KJW{a7FQ^Xig`ScZ;RG8~iRS~UH|*{fE4UbaE>X zC3Ue#dohyGJWspHM(3=kgrsNPHIx%FK0{RcD$aP(^I250LbjNbDJ*N#IFCql8@JLV zer2&_4#;LCS65tFW)!zLBXWB zv4M?uUtk&r`wg-H4bV9Bgirm)ldI2;jt$2iPaJ2%EaJmN=JGJYt|HbtRbSkGSY)18 z(Vce8;kbObTn%RJ)b7Z6oV@HC0(KOy7?7+Yv5&# zEe;@5%%cB-mU`1g@A)e$D^E^4nsaJ5HXE}qHF+{q-0w{aW zq5VTLQ4K!`54V#l5_G}dZAc|Bb!i{j#iudiHco)*?M_U5aY zB66?KEw|zO@OjH7*i%B5OG7k?fm)%@cN3V?Oak)!Kx;u&pt-3pc$~s1a;&_b#$k2R zyRs$1?JqvF4vmO=Iy`VNOgtg}NZe*{KFI{U!0+P#QGD_go<<~c?kSFv)dc6s&EvPI zD}&Uvj+bg@>Ewl2FgOYM49{u~#)Bt=OR@Q?!~KENbz>Ir-YarZBV(Wg_nJIfRlcB& zj_l$Zw6Na@uZJTC9po2``jam*ZgK86sPD2zuN7A6x-}tcq0MAg)*G|KB zz`0X$<`f=!4zGil@+ba|F!}JdJz;M3;e zT1Pc7Jb|?b3>JL2PH+W=cz~t0rkWu2;ST?bTM;H~kUZwNub{%lPcM`V45g@QM1x9J zd^xc7ztX|3^6c_~fH=*no^~F>{8jog&YHrp=CpF8=fY_SRDL-=X$(#!4^tUqA-$AS6upG)KgtjthN$4+qSEn$WwEyvHp&l}6UxQx z!I*#h7j;a8`R6=O#1MBYlvG7^`Jgip9~J7)JRwXS!@Q0eesXeb0b(W3K`ihfc0SLG z%M}pohun9=(-C!ZK2yL$l*sesVVnu>YU+MI5@rUCshun-@#NwH@3W%^2g>|<-%m$r zw&=}|-As3sq3iJve5RuJ&D*B8zGFs{{|Zb;6diPbmP`5NRAUcWvtZCT32CgfFo~y{ z;jzr#MYT|1&GYuJlpIun?OJ$@SW|!3d`7g)zv11AC-1`KF2+ZDV@8gtIbT7N?9J}c z^4x*iGdRPjp;$Wi94WgL&f_Ef;JhpsQCu{y9)#slVfT|M+A)Tk!7dl>S&HnGUTsFY z2DFs9>Q0)5wk;+yS>G7sV4{)NIK9JIMqVj7bH0#NW1MNGpTY?Mwk+A-c8GjMCjJn|4c8Js7F*!0a`#ma0kmyG9c z&nE)vz-U=tF0ISZwAw?*sQM`}juuD>JzhC->p?6mz>nA&I(O46#2};EX+mm~a!rK# z4|yGrk@!YqZ529PI$&c0!Uht=FPls*SplXR0f&m9pB5zZS?+^TgQ-J=Dc(>m0#X!s zLz(WA&=E%e+T>_eK*`#*>Ift{!^lplUDEY4p< zJ8KV*_tGAp7B>Ty+bkmxp6Q#6w>kC3T7yi$*^G&2$e5AwjLIuKCnYp8jhV(mk(1`? zO&KfNIJsy(>_*bCGKeWI-ng_-gKtdey(zjo@+iJMZHdCn95Eo@SaVpwKW@*E>L(3t zgeU)G;f_kLC*dWg$t}dW-axwJ-LI61d04dAKSCc-LzT0F=&DcR zP_x*7CcY*;?vgm2*My$0#>K1M0ezp;IubL;-=6+*`u*N^Kzq>mPNKZ1nfQI*~Lc2 z$>8G2r(XZm15e%m)aj>A%|0Edk%l#6w?&8dcc>HqEhIijs4tkq<3~=i1I0-gQ+yhc zo>)y8YZ5-B#rTp1omhma=mH5AXK(~Ay#1S^;BT-hL?QUVURvWKS%nQQs1-}Fwer|L z?C)-oxukQT#5N`*8RdVO_J8#SQi@K#K>g7NvGAP~Z9@*>$(5oiJ;xO?Km3x|qTWaU zpnX{wHc=GmbM-7ZemHWI1$cnELR- zYP%>gVx%z9%~?F3#*uk&K%&J&+!mU@%4IOckbWz8m28osJ=0id;#-m?N20_7$Ta2- zHQFMA1N+LQERG>6LGpk-m4#DFt}S~ec&aUVq69A~J($})qOgj!s(>FAA3LV7zf;gQ zmKlrPQ@Y|;O$RhX#=%BcI!}UoU=h5WU9O#&k!En$V7|h-;6wD0jbxMYqHUo|cSfmE zR6#!J;W)fM3isBnjl)o)YL5xp0S^Z36j`h@OTIABsKOCW9k$QuK>_EntW2IhFr#3F z#s_Z?Me`ufaMc-}o(a%(E_KXDFxN(>x&u1m7|X_0@91qRetL`v7Rd3TzgTimj(GS> z!z|-W(axZG-kyg_y@XB$>MA>Eahtp+z|*sbF)7Eakz_S&cEYOA)b!}k43qHnucys` z_>lH0;@FVOLHk5%nub%ZduX$|&BwPbOXYFi`r|zM+_s}Tq&`gpASc+(u~g=ngyjhD z^X3VJ&Py~1EMgcmuw)?eu#kk=JkGS?b!#%JP*8gKw4ca>5))f-X*j^4oog}bxjhYI ztM_@`3=}%udpJka?u-3080%Prs@qbM4@w-e64%^D)KYOV`f>bjj~G?047`D0Hcz82 z`+3JOB^IPkoT&3*|2wT4y5j-|3J%f|Bdjg@In_15gHwDexd=w=%o55R1p7s>3>wdq z^#(Ov3)zFv0^dxfsFid;hjS}Ab7!{_Q#&LMiSjnBXz(=$OF{d8MYW&CLzE6ynfA|P z<6x0&wN~kV2LGHT%g&p4CF{qVkBZn&Afg!}Jzq+9iuyrXP@nmXvns8{!-Jv{;NEml zXDJ8Ww!>(rcT%y09qcK>ovhX=jD1u~JLEffhsebR0?G|t1V$6i_<+Ae1@GiX9*E~C z+jvEEXXw!IxgH;mXfSBPK*WQc(XGe}F`psV@N7`@A^y%>0dsOP3oxisa-PSbK@t#; zj@EB-U%c-_`C|3#;1Y?0$aq7(rL-u;{?Igr!YYBL1<23rJMiR6oW2B`m{lfpxn!SW z@TJEu$(#Z!D{6=F$Pcqu+p84oZU54f8$~5YTYWv-d*ksdYTxwk;7s{n7Q>0ZEiY*g z4cE3i$ASKdttWZNBT|gsNK?%>j%SKuI8z0sEI^B-6GuEq!zyylhe*Sb8DV&asUjp! z9EQn7->V`!WX!u=mv&nqWagzZ7syalo?z*cPUU|Bws|dtm5tr77&kF$B?xzg)1_Lb z0Eaj6MDkB;_kE1I2C=1whTCyOa2Wkn8Q(lSlfNDCZq< zM?=M!6$Vu_a1R2cAl7vT=C7{5?i+*3hl6@#J1R+rOc|&T4xP;xXi0Lv%j=h?xLv1E z^LY+;r}10uqv0Z+q*CHZGe_(aEt!v>jyO=ABDeWQPwR5GpD2s(D3blJo(_+{Z~7 zq~(S&aj{EWdJH}XxPc4fa7?5{Ig}djQHYC+zXFRfQKY%0Ti(Y669z81-eLj>8!bHG zQw;sb$Qy?m9ev*%yRHtp!Kj={i&znrz7eB}+A*%Kj!Yz*;$b-GWLZWztVH>cQ|%6HnIf>$*y)Gp*CWWa4|KBR8_5V<-?Gmw1A{Ykfj>l&whVv#olMg zL6ZBvJhn(qo^hYOV_2~wB9O@L&LN9se^sps+u!UX^=>%a?=k{MQDsWohV=DAqvVf+ zYd#@T+}glcDy67^R*{PtT_}$5fp2@nc{4IJpjCUjN)7i6_@@3d|VK?(iS|^AEXRy1d$|X8M#9C9qIeNSvJZ zxuWcjFy?91&}jgDUH)8yKMnn4MYK>qk)9Y&Q?*Z!XrOS$qVMwXdQ8=!yvNC`s3 zvZGgEO)HY3Nft=JIMuzS1MXGW=LxARv{fJ7VP*zfhNMX+ukk4ZY8o;XW=hv$fTYmI z4)+i+YDOLdodd6P%hl>J$=PY`7AS;3HyHH)p|q(+|KUqWSkl#1^>d810kaC8_x)0y zE3|V4M*{QzsFXS)7W05IIP$kz^`bCx*UgR6)HxIo2+G1NlPfRY7Sx37(XwmeJc+_r z@ts^HidWage*ZyxR0JsO*plrU;VNATpz!jX8+a_ zh)+)4yIJP&gT0l57a^H3XnG>+H|~>WO4IIh_hCK@huje9eA74!D(r61Nt$k3>oZr` zjj9VIgT@O*<`NX<5K2rX=+nH8;QYu{ztD_H;f6D%9MGbLNYVB+NvP- z458^Mq=TtW^$+l+5Hd`l8FJ&aLmMZ)2$Gr)0CyeVA+g4)9LAh3%o&qE8QzsSN;y-} zqM2J;{hFF*R5v^Rx=49PCC^XD`4$19v{`IS;9g#3_Dc2anfd0jV##L{04-4su@5lR zMr8I`apt7j(B5iceX0Ia zOf4C;6PR~~sj-XW^;;Z+fhz;3N(j}&C}u9^uyJNyEX`~$&2%ax_^ayEO!bMX@4Ed~ zWVX?Ch7`60du0yW$L$#uzUvm$!E0#cQW3C@-MavOu6@|~IED8w7lQqZxe zp$_r!<2S2=3v^d&sNBoLT5od6uo!H?{~>eJt{S`eYEyhn()lFfs+d7z7GyU!y~Xga zYvRjCG1ZC3^3p%Xb7L|-E!TwJ7;F;!Pg$T3m&a7)xJ06#6rr9{yGE+B9=>NIwI_;O zNWS057bxf62#r*XMN=t$h_1^Vi2_`vMvFw25XK&AC00=09W03XLlgvC-;^(S^F|Q0 zNRrUc!RTSZ3o|b*86LbeC;pvN>F-?cPws0#L(8f7CoiDftdUMWa}JK{-|(_DQN8;4 zJnw$h*kB?m7jM72MwiaAZ{&D&%dg6}uM`M<*1y@NZz0Y!RX1B-uMI8^`#ir?SU@-{L&7!J4LE1?(&>a5dh)q~5S9unjT6TznR0lJ z#Sf42V6f65VYP9Ugf30Oe|X_#Rqz#4Mv5 zci5n;r7OkWnD=@rl+Zm{Iaiy(n>QD$;ysY_4>sL6f7-jn_){0ePOv9Gw&x!85d>w!5yTyA>=~52 z9idn^L%{pwimOtSmI1eS8g>?E#gZ`O)yn7NWT43P z_PWwKh*Nh}fe_}wia;z~JChdEe2$PUw}DJQBM0*Gq|+moe8ZnmptDFwdLZT#JO8wa zG~YCs*3A`qGPH&Y6gh`l9^^DwI+{Ip1%sm5Zo!r9lc~BXP)!psA?#j+uxAUi!y-g({(%)@! z=Q$;yG-+Vl%F}@9v9h(f(kZ^7RU(&1j7mcjY8Uchh6PGNM4=SeU0PaY`Q1Pjy7q*= zTO@vu4HBC*Bd9?xdK6ckNK~A^;BBXtOm2_bGL8Oyx8;jjpJS$sV`idNL~{xkw36+j zfz_{tZ)~*BUB4NAvp_54A1g*9g|1u|ve#Ky_r3#yG^YHISHPp7D9-sF8ux|oB5@_PBf z{l)taf7V0zC(#kjE4{jM%&ej#hQGUbzkIk9A0Lt&p~BlhAa;cp^&a~-N2}covoQlE z)NB_;Zq0aILYyuG*-m_jP4ycBQq zMsXM0g_1&Q;jF4UaxzqwyF2n{vo=6uF9}6<4GQ98Sa7l{tvtXjmW!7=o1j_i+tff^ zUmK|>{zcxcssmHbil}Ma01_P(JmgBw5;S_E8w;jbw5h?Z3LGN@agDj$2gU(WA+@jG z<{3cV=tppvwHk++Slm3wMOoUX+(IU!L=`yFt*V^gMwRmlU(rqY{iaeaJOk?nkIH)p zuR(7SeT$;*HH#-;d%-y zW+vkdJK~RABY~-eFE{YkmDCv=M?CPtki4Rx)~ru_=oX>jo^ ziWlhE(&OS~aooQ?zP(T>$>Q~&4zE@BVCiS(oC%VMbx|#&#OILM3n4nEqN@~41BPLM zB*+^z-`{HTBjk=UwWuxzZc-8#3&Z$d~^ti{H3{u77`woNJl2hyIf=bOIj zOPo>ko-PPHN0r)UCN%=){PqI*18&nqL(v_ANHP<)dwhCk9d;vzOeyIgyM^@$_NPxz zD8wC^q3GZSCu~?!=KtsW%%|i_w~(oDU&c6j(EH@4yefvjTHrJXZ}#Z+be>FOl%V9v z;B@u?B0c3rb}xIP2)f4cJ%u51`0s2$DV^&c*wOW|Q!shKf!~TnL)nEBe7Dn6Wt5Y$ zQ`ak5aiyP)L1aVHCkn&8N6ZBwjH^WaxOe^LkT%9NmnRT|d}AiyP}n3VRhNqVmLN9+ z@_exnThhi&g|$ww6(|0rwtSq@66kCFtNp(&j~9;VnHrXLhz5lYK2bH?QVZFpJK|26 ztd$(onIBKT4Tw^eWNaN%hExP-YskdvLA7{AzQPLUCp}F@tLbk+ohYG(>As5EMW|C( z=gbSYY=Uz9?B(+^;dK^g%Z09`lPLZ|nuQu5=qItss;%&I&p(g~|fm-@iw2$C3RJFF%nV_#v1@sz=kgx^GsWgsY&{U>lSv>?hwlB%Z2do?e{Srvx z#3xy@nb{d(j>I4GK@+DD=8V2*1G_P9!Qvz;a1ap-GU` z2EVRqQK~{z<;oj;Lh+cYit!4O7QPh1t2ll~LxIE`sM%B0eLC1&?e1=Dbf{25w?3T! zwZ0#TrhIC!sq~!zuc$2r6Kdrzs%o+of9sQ*`T2_?{4v)oQ)f;ga}@tp#4UQ7 zSYT*}FPb{Em05ZXWx$efekw@8n9h2adX$t?{&EXNJQb<=$ND+iCCrPMmF$)%ZQj3Ze zOU+Ug(xtVLvg+|474`F82(fjXH&m2>iBFVWT3-C#Vl(|Sts|kUHdPjm8U0)|jxcg5NHXXV$2{)+6rL@M$KS41KM37cf^Tl^$WK$&bVU@-`O z={WqC2B%1D9E=m8mU)N!iEf1Tb^>vi06RyDqvCk}$j(6?TsVSTNK$`&uB^KBbOP*c&mHtnR8tye$&2;X zKQ8#+?#9Z&%9bs7AVkIV#3)OZu9GF#&MXY(t6~O^QRTcH-U3mqvtxKh?rt!GH{d3@ zHC1!1Xl#r*zD7xT1sHXC{ovdCFVHflk zdS)}7k?F~R7TKT2VGp3dn3DLY*P?P1Cp{Vj;;lh@HjYrCNxpTw?M(b=6l`LE@Scn= zRro{gH5-J;myO3Uojw5xtJt98gSkfK5vE1jC46Jml+kTD7OPZ8N`o*>!NT4R6y?A_ zmk=~7Q?GU8tuUyuEb%{-=r|PD;Vr65O4h;$SqJ%d;(oJZJNnQFa@=!ve_Il>RW-$QO7C&`2}aFYG5Z2;iE;cDRX_|d#&UD_8)(p|K{8On!We>=YRhDU&Z|Ezs}y{?|=U5Z~sxOtv-^A)E}G_ zd&S=iiI4~MF>Jc?*B_W3xYGz2bSxtIAZ@8`!LEHS2-mQ4kONCghc}fin7qndFg})j z;{QhejC`5c$_T%nxurNo14{6~m4jn==d}N9 z@E(?e&$GK{rI7URVfp3j`T6-T4Qbvd@P2md>{>XDvo_#>tGc4pG4n*U zbjIPCsy~{qksf6l3P2UA2U=$np7#JUSUc}bP{{iWO%Zukw1wl7Zc#*XTF&iHF8GzR z@=R+992t>v=f^sjB@2^=*ajF}tI|vfj9Ag<3<%h1C;iD5V?!K71F+nPjBp9$gp{F$ zNooY0>R9h@upI#I=rg|>ser!`%_-lG4szkx2Y55(8`Zxs3UQ7ijLuBa{3ObTGiG7pI4 zUnIxf`3?*fo%^Z~dlB>OujC)S9^QOlMlXMPGN6><32or93784X<(Ef{)R#YFS}zuV zTG`)OD%W_dNST=}{`O1NJu0;6SD`yuvk{r@4T+Yz>r5lA3ah+-cuD(g=*M_}5NqBl z5E<2p6$>ZM`cy6hvsH4$u$VV<%Vsj`amrsEF(F!V)cXKZq7*U(t@WxaN0>K@{Ho64 z>+v^9spFN0!_Q^hTq`*oWL7F%vF!ZHfqqW|&FuNlu0_XTx@huA*_V93`1^asXsU#6 zR}S~k*&lu$tP8v(=i3yj9p@0 zG`~nA-To%zr5>^Jd+!!s7C#rngHng{7&olIT}ldTl}oiI7unV-Zg9gPy<2o%J}cgQ zpy|?OL36o6Q$bU&ixx>uB<)zW3+!A$;%B@nE;cLh& zCwb<)avT(Fi&9=e)YO;rve-38v&qh9`ioVT@f-y(KUkR|Z`Rc%H0*ypq5Xt)wSW2Z zpH<>18W^#B`j=6#l-L(hxQKCSXT%5rrR%rYD>VSdAC5spNTXudwsuf>uhGXviQ+rh zAn$B1HQdveRi!fPKb3B9IgW_8N?vJsyj`Ts!j?E6ddYGm9xmea==Vc@+u8_sVpVWE zQ4k_wb@kR{>6$ifdu@Vcy^DK_X9fAY6{*RVrBs$ocl3g)r=F~0X0m1Sz9MF46OSGB zCssJ#xq9(VFc{HZwR14dK=yIfU{@Z&tL-%dmkqN$J?^$G+VA$c;lTx-7W(aZ&?^Vm zF9zqCA)O5QhGk4M;|U?U>STj-qO>2ec6mGAyMOI`gKi3d`=pc(+&v}eVLP6i7XW{F zbi_cG|HKq0fv_ka)WN*}A7MboW>WNy*E#v!V~LIh=J>nqY#j-04L{lb7NuZ3obtZT6=O{W zSZ^y!)OFPT;Qp2gBm6>nVFAQ@uF6GqCQzKYG z0_{?8gA^MFvheqK_{X|shA7^2asa>ZtdPr4IYc;X&{SkzYcutXw5G>u2_o3u@!ObyJPl zgTIW-zZ~q8g}0C@KLl&53ih>jNzne}FL8D7lpYtn+NSSJ&R)g7-8O3S0(pVEB5cp| z=0WiVT{-$P&9;>G#~2Ljhny`0a@AbP5PHnDLsM0Q*cl>2?4U#yXoOAi`k3+a{b5%^ zhdC17Y;PlZ&()iK56%qjB4GRS;10x!1Cq1v?c=*6`@03f<)lBXQr#R=5 z=dFx6z@f9rSt0f?<0QHjodcUFRHSE+#3oL46P*>j44_hA z(*Fo4=gC$Ys1PbZaZNc|WYW;ERk+vX$$U%ReTKr8hop5wfI*(#5WS6S91A%3vv2|t zy;6;-DB~E8u!$3uBVFNpraazP+fDy6d7&bCcHF0EBH>Hlr9=~_cpYi_5jT!{3hJAGPVQf)ziJa#CJlnNLK6|zU!aqLms*|y3mMm4#*!2Mp=t3L*joc`O>}^s7t@u&y#>*0suO!lhOAR*qtRZfL9BYILV-FE6aOy>@PrX4!xb~g{!_g4;7 zIFeVYeD?0aZucjC6NgY8QH<%z9hFbk5>kj*7XE17_{mXl4{ex95`&8PT=eWgAN^ST zPLZM0Qu)@o2~G)b6k$O3FEU=-IB=VGTx09WR1u`yuCe#)Tg`P)ZyITk6-4Vr_Xwz`~BEnU6k`r z=H&cAU%brt`mz0!~=b6O4Wyl8n zltlkmcsUgO(rNKWK%%h}QHrG5D;GY%xAf-xRw3Aa|6qnnOmJeE_(Tmk7_73{MaK6j zsK+?P9_Y=1?mT<}QkoMB*q~GfCL+z7op`mCc`u^|RmIAqhG0d;PMA@EBrztJVS;@I zR+Zc3#)5)0&Q!L^mZEULPWH|TBcFR>*|MtD}$Xw0ogNz#X9W{UYe&sdlyTaRj*=E=A4O^KApG5nbPC*nSH4SW&(^8#JkNzF>K#P$ zwPoGTuB_#`LbroRvjm6KXG`;@NB$WWs!>D;qk&Su1Yqn1cdQy+)WFvAJ_9t=RbR{D z_FcR|=}0Ge12D{aO zw&fCwnge1pGyv|V!^KL)KtyZ?1|kUoMgD*4q5-W7A<2+0?w15pKPCM;+z+5d)_;5* zPGI%Dd?9~pm!?AK@)!?-#lWlVMqO6J?!A-r5?uao(!Nmx(4dHdz(X%IuqI-{3@3GK zossW=tqJ9_Mdth|5#6uFFyM}hH2aDu#t60*0~AF&Gp;-P`|rPh^!+^|n|`@c5dq_8 zBopO+Y{yiJ`xr@2fG)iz)fF|@fx@=3#5^Jp>(6f9@ao`K9G|2lGyeR?ZCqj9lZ!tKSY-t`f2xSU}nsMDeFP6ZulsKv9N3=Z1<%4B*H|+ zg3tg5N#9xazw(VyR3w$0SsM;DupPKo^+m%3!SEpwsDHDA4W+%%qORPHkFpK*+M)kO z)ySVyNkgWUPNF4fEaq-ODWRW0S3a{&WGgd3n%g=Qlb5PZ0f5QXT-c5n)jpjiZ?BJg zm1Xj{*m=2Ctan!FIP_g{b$&aX(<>`BAe`8t&iQy*H4AQ9xl zgidf*WX4?fIT90PJq>S^p`{RrW=5>bTPD_{hfilWcMj+Uu)XqUfZBt>=Gr=F$n`y2 zke_DWgM9s>^l4$iOm3}Qu0{dP2n&SlG&;t6xl6?6J+$tNmIcsE#JH*wFA}DPE%~vs zo9Zre+U$?LSbPC!Irmfmc7-9{pOu1n_KfGoy+c@y&W&oeY0R4%4;9rcJ|OLe__6$U zpQKpGnEKY03KjubZSwf6ud8D|7^mTTR#;k3@jY<#PEeLPjq{%o|EmFv< z+zHYNN=Whln0Ynffmw^4leVHqK{?>rxf0vQ`$Y5BfCS<|Ikp?};u>}ukeElbH>hOBy`b?AXe);`N zfW=X?CSiu#y~81)IDnO?0Xe!`0(F9NaQs1$Y{YQ>Q%nr(L>cyMO@Bc($82hWaA^Sq zSg1V_Z8bE8oKHJ2arq;qb2PF9#&E85_a7}e$!)SOF-(x1ToY?G$mR_YF^9^ysF?Zu zTHW8;XGju;I1zvnm0q%EM1Le4nsX%8LGP2zOoGY%PpS>zX2}qP_xFo{WM1b_4UJMM47y#^o^iNC8WorCy$pYn^l+wu9}#s2P}y3Wz^4yW?h?W1X_DQsN) zSp3@=UXqz3B1{cvZht@vK0imPoFB+Y8!j!ul+x(>&)Iv*lWzX0voqsw*du?nsj+g? z)c?ha$ZzvqcjQ$NW5>SKk}toq;{K1~cR(GAa+DOi*UNlA|Aq#384i1*`*>P`P=|@m z6{LfDH|yc-@}rg$0X_p}!mh@9LXQAXE^5&sScc*H4-J*(XQBr+^a8SWJc}VAK=W7c z7EUYfWaKR*W(eyiIZPRm2!lb-mr#z^H3*aUX=e0eoLAj1{#oxz`ix6@VdoIF<=G{> zg)w=Qo2ZH>f#57#zKOJyucZ5s!t(u8U{G@P7!O*Cru&bah!zP6wLRM+6O*N}iXlFSQAnB)~>1A55k^poCJn zZPXc40~i5BfWbu1sg@2Z3wVc`z{?MQetfr}sAn#V7*!ETkC68T{Y94v?2t@U#G;Az zPCqM^t6AI6>np3z8}$>vq9cV7CW{bcM1A1wJ^SE`$={J;T8IqU=9Un%v?wZ$#qKx` zlm{7bd^UkVgyAuXLv#!x7ey0NvcdCt5uCKvkU@usM{<1T@a=v@QKZ75#hv))UjlC=KMB_N8F3k@ouMAM^v$%tT*P1 zP00g`9a**~#rnn7%?G3x6{v{1M;~2a1Jg)=Fxj;Dhx79j`2vR*$(1brhJC6ocQRVF z*jm*NqYUV%X_Gx^ai>H1VB17D~ot5rpSxzK+-6-6gL|5(IQHzPva zu?z8fN|G{=lt-TPl*ZieQGjD-V^?8!PVkXL4+yrfz$T)xcZf>Ykd?SqE>fAy4U; zAs27=>N)H})!JxnV0koe=u>Vo-}_yannC2Xpm=Wefj&@*=rI4@_3mk@pQtxf`EC_2 zMjgdPMtA&xVMQ))UN#6rDAk^}V)s$9US9~~7fR8X7S%EyK|tFuO)l7R;90sC!RFk3pc(YdKK`L zr-C>>uELNsZZ10bqJKiqh?1iDqcsP`tSu5?bbqwv$Scu^pM8EulI3D0tuw}6MqxYd z!)gqIj%ZR6bwQFUpF#%BVlns~Tr+4ZqhLis1zu)g`WfsuQ9-`j9}H@mOtFSh z-!Wld3h;nPtjH=%O=QQg6db^J z>|u%C3@&gB8YoVC;A_0Z5zxMh0Ky}%eKYyICg_USgY?iIO|3SrX2~~S2v;j2o6#ph zh=`Do355!bmT(d{5u_JS~x`9fENbI8)yS8xVFtf^RFz7Y9S zVpT(9I_6aYb*Y9whLhrR9FIId3&h3Xix5_2M`0J^je?SM&}l(F6vf_lx2c@Cw!T_d zQ`~(;1;wrPE*07L|I*bTw1WJfzv%AuU_5|68m$qKR?uQLx?-#!Y@!&F5nc-swxQ&5 zP#{X7Q+Ox&n~T3!*9F3*)MiWgBQmO7IXT(mAqZETU<1q)F;K*Kki3ncF#?BPG@NQF zCB~W=0eX`?PmG2XSz`93859*#c){3SV9^Xwt6<@t#W_K=@VUm4o0cUgC9V~j7jhb< z3@gp7+gAx+=Y8^^*RT%e0xLnZY8e#>+4M1g5v?Yc7c)6V=WqUu6hOQBKBv zI|Ok8l{unc;o^fCcmbgdb2rpd@40msQ6Y=ye@D6Mv9)i7(3;s&NlI`F=6yP>dSH3f8QLgd=t1^ZcbmW(7r;% zBKDiuu6N2gQoG&&1Ek;U(@<%d32OLHg`NqVDQs4xbhn&naU+vD@nz`R8}2oWWIEn8 ziq)hzhfgFEqcShOrP@2E^)2i55eWd3`E>T2$z4_P%jQv>#WJIjYG*tsWGF(4*lqHb z0*tA!GW4$QEC^U>cY+1=-n@~qovUIYnHJ5129`FwJ>{J#3!zTg3)YecLc_}g``#2w zl=*2qt06ZfHZ%V0q``!fWiTVyR9dwlNY#Mq9((Ej_p7HZym?A;9a7bdHVkUtkij!R zUB*&0&DqS;r2hj}6``aSQ0P(9L(zU4^emesNoZ9?)DT*~9uiD(7|-SS$fgxGHun_7 zPmz+Q3-=KfCT;6N>X?$P;+s~!wAqqwvJDon0Li0E{_}uArxr$-ZTh45N*+dc9x)$` zbTE&v=nFJIviX0}0XDY`9h&8Viakbs)%`FJC;Lf~R$Q($Gol;>bMKZ`?Fn$KhPb_5F=R`26OY6G>%BTorRch}-F0@9**NKlJPTCVut)0mgkJ z*o@%*S+w#{uP0E09}59ysl_X~R)`(|C6LX9nYkb4zZo(NGvBM?D+-h6Xb`LlHwlna zWjejby_m0yi2RpYoOkW|e+kJzrb1RFe97fJdTq*#Hz}@~ z__}V&&WLI}v`QbY(NQtn*_8t*)5T9ao9lnx+uc7XWmT!WbNBiBPM7%2DYnmP=~@rI zaUNNiIGQM?fX@e@kr1JAvt_}{6f4_hE+Yusv3>rySl{{YVrBJ@v@$z2H;!9;Lj-Kd zGXd9a7ClDsnD+*hf<_1E%l!aqU`vmaK&|_fv;Q-B@|}atwLjzWxrGPQ>UV+J0ngNg zkk_&JUw94uyi_(*$kqZ3EH}-uk#w$9mXa^4fd8_Zu#&=fwXBB06!I{hDXv7t`>3cG z0vTe!n*@=>9LVgD5sC#pmCW-nV-|y;WzHi+np}?V(UCEiSX?{!=-d=hg2;wT4L#Vz za0Df(v_-=p!H+R(xiWm03e*VoB*iU*4K}QDDc=zzC?;9M^3e1G?lB5nE5x7;9y~=j8b^t@BT0+w8b!`=uR)GnCYmqC%$RfvK zM0(jqWxZ*I7~8!RwN=1?zR}wZU?zR|I4)zWi#=_odkrLexw0`ol5M5}TGi|kbW(~F ztt{05Q7&n<=%Q}P;NurnFTaOXep*dB}Tp8!?CEe|p!0Rp9wm#uF-_W__9<>hpR|Q%MQI z#x@pITZcDiYPcH-7LTGGDLZ$os`b)PJmmW@k4UHj7qO-k<6-iSF8a8E$pn!^e))zA zvh1GD#-kEZgGbOxbQ8qmvm&X5_LsAGn72Hh3w6e-G5kXmMn-ikbZ{U9TB{spf+SvU zsTPuu9;8%{_M;y@iXg}@4zP!cF=VLZ{YTH9Y`d2oH%8~|@!{q0@T`TkBv=Xsn)}Xf zUfwhs!j7Idk!lXE`>oTYhlw&e6COb0at!~oO+QJAa8cK0jmMASZyujcj07Q-Bvl5* z%1M80k%Q7ZX_t}28=rJgRBTs^2{pDb(JMGT%~2c!n0`5iGqeG4Vwi~wpY{Ve8rzd) zJq3zV4xETLSh#Y0TM1$f7Eiiq5JH>B%|S>n6DNS1=?qS9KuT(Gs#!Y6wo2G|$I6H^ zFl0c8Lx3bOp9CWrVo4{tsq@6-mmSk#C~v{$@$QTlMwJVOp6~R2_5I~qmxA;wA4m#{ zE*iMvQHQtcs6pRjOn#NnG8&|28m8AozSzpzTB@G2ez3Ps0@<^_L{*)fPWGodn+zqV zgy;%Lj%@qJ-^@^j2!BnjtSqxXK=wav-qO)ih|Yvx17YHt(BeU-PNv0In^ z12UhS^bM`C`keJaSmE#w!34ap9v8CU5IGJheF2QPfk)qnND7z=4Zi#(zZaMI=i!$z z9Cvn8=Ow8g8#KN5B;nm?`b;gOoksG`u*Y2>g)u2VlwU4=IX~Tu@F5_F@H|ud+nDje z98iR$+TOqX)M-HAZZC=VG!50HcRp5D1`ENMfIRn?mq3rmyA#x~R;Ha;>E!B58~td#s&+NW@yXq0wg%7!*iof!jZ!@tjs*G#Ga11A=s^&%kLNBRIsb<+QxmJU{yU)xLBGkBO212&McL>@zMDC@Up{$~f(atpv6c2L@8@Pto;GP7GbaVvKZ$Ik zZY2Uc&=RUlq(VBpmlB7YJ}I^ZmzIK=>hnc$p7Qkg4Tye%@rrMjiszl};s|r=lzSo@ zT}ncgZ`9tfdrf|4quMZY_vo5Hit1dI3|wmdlkttLvWX$U01yqO_;$H?`jj4T=d=(# zCE%){-r%}MK>qcw0y~|xbrHBD4-m9bJE<&);Tuvn=}!M$xBJ$%cDGP$S|Up-=poCF z|C*Z?6UW8*Hv#Giy7HV`sHbGu{4vd7RT376f^rNU5dV8M3y?Qvfs^W^`V=zDccB2+ zK5Mf#gd~%9)6E&oV*_>Hd#WFm=CttOHN4)>B+vX3lq9>)>N&ExTVV^ievVALg*uG- z7+8rS+$sf#4&-)t2qH~$>U3LdH^Gn$z02K%jwS0%A-$wrcE)b*LQ|c4o zf*Agi1%oM*g&{KCZ75rfHjV^ABBr4$l!!eA(6|O;?i7>AH1x4QwVM0k(N8P=8mo=ztTguMSWtMSrdL zex+`3)F3CCtzzNM{CYKt@^>s`^yLVig8D}b{zw0b)|LVJ1SIgW-BW}%U}_kT2rvp1-~LeCLIFC=4M|&xKO0gu#hL=S zjR?l<5`Ms}R4t^)&A62{0Roo2!;xe$BaJxt#k zNAf5DL&mB6m+uQl4wz{!r-^Z^U^}r9Yg61yLo?T`ErQa_Sh6^IOr|?BN`_svrFTLu z1c$4FQ_@RT%pnk_-?c$IOue$Z2gUDgcGJ@<|Bkoh3SolhG~8g zCS4`$#F(}wmccL%9pwT-lnTbr4$veR@Ifw0Y?Xv42r_y>-~*)Fqt@oKyxLxs-j14Z z&F=#lQs%G_Fw%?SQ-tuYH&AR<*Mm0r#XH)I3CfvqVk9XGjOC3(rukOanoz;WQ-JQs)g2%AFzp+B$WN7(Yg~X~* zX3*oyVi~9R{QMH5OcIr1A%4z~4BWSpe@fEcUE}V-{P*(@%2~>{4j1;vQkl zzh7`T;$Csm4Y*6l6Vg0*F`fVtnlZIY)cEYZua^iiU1mt zMQ-B(JZ5_HJiD%+fihZ}V(jw}sfcnqns-(om+%MO^Z$nJ(HcmoTIAGFSb|YIGa_h4 zaiQwRBZ>Ol!|%U)I(yr>xdul}0qT1=50OX}fL6++0vV2y$U#%IeO7+;6>dgrJI74e z3h>;@wldLBezv@`cDhwY*U)if!(RU9f2!4tiqkD0_h}tymcJy82A>xfKw?E^ z;t3D?x7oRW-K!eRFn8aG)73k_DqpYeZtrbwt?#P`-_M%Bcc|#ZOSn~l#C)DGN_i8T3=HA5}?8=u7AJ`8RJHY%OH{N?CaQDg;*`Ty!X{h=%^S`P~D z(G{CdG0pZ$H-4u_vz&PP<#fK_xN*bs!t~GLefZb|Mke*X#d>7K+rTv-%3Ko;A5Mq zjtg%N*Il=Xd5bqPQm)7+t7{e!A#YuH?p(d!mUmumx39`yX>NSv9sAf%;-#E>`N2}T zf~w9=L0xQV?5eE|cXaIHQVA}6qAbsJ{HI>Jj(WIMt|L4tSf(+mF8w)&K2oMw<&0dA5S6iO|qRBM@v z6(jY@~Oe>RQ|^j~bVh{<9O=sH$$pTmnP&J=U|_+SI-3A0zFn8UcMdAb1j8JNCAx z7BWH$=GDvDnm}ZI(6KqDdGA`}u}fHE{RU#}A~$|UJ-*Oky(#i&q6AecHU=r=1Hotmg;1x~UzAm~}8#a)sei}Ih;-eHFa?XFbB zu*k+K1pzpT+7m+v>;tjVQn{z95lCPUEs0T^%Iu8Q4NhzrdO=d2zp<2G6=YaLBvfT^ zqTCn7Wicqe{bVVlNAJ-XzjOFN$%s2AGH|MN0U}B&1g=ERZ=1NR@y=AWNUNLy;1|QI z&@?HLfN9kqtro%rq)^+CCuJg3?!K9O4^XkKBvWeDCF58AoNEC8L;~S~47nQ&q7HWM z@tfJGF5kO(k_s)AUtuuFVOm;>EkrP7h~|x(2h+zOT%w=Gayci$pB(I?Xg)8{_ied|ovuYSAF342 z(Fi)ynFj54AO5g1wT9UL70ekg)aUs^CV+87&C}`nH^`NrzW~#Ddlh2sqp(PUApC9cXunBC5eWnvmqG95*U~3jh{d%0sWXy zQZr%7XqwaqM~-kLO135sS8cN8T#sIxrkPlz6lG>zdlsfSij?#lFK07O>ySKuP;r8& zsvZ=5OQeyc31bDfL8-weStOW3R)MxH}sLJyD*oiQ5$_2;1>s&)b2aXB68Byx6@WHp>#Fv{K`sJLHpvK zq-nMI4H9lJRlr0=FB75%0Y$Lt)SBoO3G(FR;@Frjf#9H8PXc5|l2LW)k3+jnE*s(FFmQ4Wo9<GJk^(@q{v3{tX*IPtg==ch;@RRt*b_hJ?cfkjcx9pu8kgUvKV3GscL*^ zu}S0KD&XI4rjXHjMpSbE66sqU@ThM88@P$vFa>(olNpL*;C$~oRYCjc9BbCyUmG05 z`)(B8uO?vCno(2N%Ak%wG#QE;5|lBbI|Jb6t3G-|g*mI=ISWe4SID27lLDcQ!+=h| zP{MeyB*L7=E(0SKYp)GulaD3X`Wb3veFen2(}kpXu(`dy`;rP_s%lR*s>q4O-wK;o zEJ?NElbn}HA!zfTbmDto%woT*hF1q)gvIWMOC^Uo$1BQ~r_9C2_Ca6}*7?+4l4;kM zpV})*hu!;Z%v!|l3bdAOW2*@pCV)N$l!u2?@LK6DsUa)2nd|vnlBNw|~+hpHM zA@lKCsEndoy_3h8z0rfALSH~Cc1C%FnTlmu?kWnPvI2s6qSBS(`*qdu%wd3Qg&l_V z)+ent7cd+0%8&83uP#p>R9s99Re8NEB-@iU9dnXUdmf37om{tb5~~r^6AB8Z|%?exH^aJ#MdYheVuQveNYgWI;|4O8Yqh^r;UyU`rGIx>ufk)!^Yi}Iun`}ddGPRLPXCnpXQ}!JOhZ|Y{9AMv ziwBDjAAR@z@Bj0MKO7z%^InA!Rvr8lGNh+G9;HH|6d!n*`fNrpqAVWV7D$x{13>EA z3v#Mnvx>CvzDaZ%Vo{uhQh$T)5z><}F<2o2SxQnecy;gx6$>hrWvUG2dH1h(X0lZ2 zXoH4S3mIb*?6M8lhXFJdCk+KlXC51ix3_$c&>*x z4Q$)r$xB0DWaJyZv~t7~u9dRFJp+NxS2g*A12;m3CMQJ^Xod^R_4o!e=-2BTRg#li zF<&HdG^zP~DZ}eLYLd-4#^99cC?lBmcn>1ZEHVe$7;k1gKU77RB7GhFXJ&!9&iqs4 z&-||I*fqBQZsuE5pc>!s#9b%PkQz5uiVzWh!;SJh#cJf_<*I+>%oSt}J))A>c%{)w zj(4K*L1ug3c{kbI)Cbp^sMoNE4LN+zT5?q{AN1tY|#Q>Vf>8!t?pR;25hjCuf8 zVmhhGg-G1ssVC7Yl@<`jS}B4^d_sGt?dK=yA;kH1IRCPzwxkL-P_thBOw~!sr7-nT zkaepIMcluhFofzGIz6YG1p^R;++&2)f<2~16w>wCg$x(u}R!^sbmW!DK^#B&MyL zyuc8=ug=}38@1s4^*_timm{>agw>>%(!k3%E^g@gh+(shyaRUuF}}H1WUDnaGTxZDjTmXH$3wvB7WP&_O9|abLCx(5 zt96h+z4gpI)r-b=Lz0GgtG*aAP${9l2J*(xdcFNXZN1Z7mFGmdg@Pvmnh895H!|`X zjWzVla+Gx=s)DC!(~dd`5Fkdqu>#KnNIqFnb8}%o5REuhr9&pG%M;?kDUOW>*9O85 zOITMVDhIt*!>E*EwSG=NOBzMY(AdZgV`9ix%|QVp0?COh=PU$}$Oe9c*--TzP#I8v zACEtrN)+J;GP$fg6%CZ^<+`!*^n78uHZX9u5iyoM%dYan9WRjznr9w20pyy^TgZsD z$^>kd(??iL&k`~nK6k9<@vaoLkDM!3*snDl5M5OAkg>LLdlvSq1uv@K( z1SkUgMeg{{V3WYDiYLh>dVzBqSjE;9d{rdcgPg-Xx=&8a_DkWEK^q?3E8)31%R>#+ z*rfo)-Y~A)0{DVl>fOd6zz*Og)$nEYv{29nyEb!WeTD=~PGUhfzn=`cW&}1;$Jdm- z=z{_@CE-T6_{Q3^{@G?=MU8J8pzBLgdi>3lVF5B|wG!>ndy^?X8O~03Dq80-zm*2P zw_&b!nFsH?hb(5nP~g3&HjEJq&t#(Th5LKr%wl_8b&o!{z)?Y(mTH?9XDrX77NS|F zTR(k**@wYbhN=owGlJb~{ll|MYAW#M%&;J-Xc6k;IT8@WFQabYBIvE(k{UhU9w479 zpgX)oNg*O!W^iVbpUI3EP@mh1QxZ?A9M`F7{F@`Y8_a&zW%Bl1z*!ute->d*x)Xcz zxB#1|<(^bQ`&W$yI9CO;CP6L;%SdE9J{9CVNMG8bn{ zF~=JokZULFFm=FuzVoun-B4&0fhLm)7X~p@<+Wp7G*a8~K2K<)a8sqR6UGPQUgbCd z{-+qk*xblE8k{J%I>H`g!`}?VJe2A#i|uG>3};@WfX!!cC3Dbo2F}tFuRtvNm2-{S06Lw0!86e~?CILQ=vx!@ea`Jl-U z)*-z7&pzn-0_&$NW{#tHn>cj%eTjM#g=sOgrTj9{`naS`s#y*xK^o3r#H?JC|4HrE z{teUDiBqH-hHi^GlT?VowZAyQD>AJZ|7Px~WX(7!5otL65FIoR67DHnb^yl#ZC zTVKl$dt40rs#3@l!PtI9c11(*HA%P3lxAf!ff=d?<-FNO1-FJFP2huw=nNb?pWeAB zJ75zD2naf5u+*Nyb-B6fQ^@@ue5W54|M0L_f&HT*XU2+=6+`^mqoY9W#V?iv!6bZF z-Y+|cr-nhWc;qvK-Kc^r+ZH>XKr#ds>9kwTc@pARY_IP>UoRih*g%8A@-DQp1TCoJt2+kP0xQy*3CO~D_)Vwqq;&Y@ZMX$16s)7T~!2>)^(I}1Lb(uYIN@|x%))^Ax zH>PVP3YZWWj0e2r`EDf<8t?}r;J6FF(=tvZGdB^gcl2$|+4-i;0yAatzJERB*Kqg< zTWR7Cd^r`RbA%T7!LS4eqs7^suxPYeh*^mr>7x4s5j&Gr33+jF62dcfO*7bsv(HHH zKQM*rK(r@s4%oz^*ShpnAzN7zK3quv$5etN5gT?k_oJ%|InftHR>6VdEAK4~8zo1Z zjdMz&5!k;FnQr=X^Q9JEwYO*D^xY$^mgxFN3Im#778Wx^EEftRsjv-}gu^B1Xyxkd zHdvu%`!FLS_URKM^0wEzVy$X{1R|n0B|)}ae8EYP*u!dx$2bItQ@s7|dlYrZhG1wR zp$It`<=^PR{9XM0>;v^keEa?4c`85W*`JUFbeU#Q00(H~2#^5OH#VpGH!Qkw_dU7S znIy%X94ZE9r?*WFsw-Pa#;mKe&n7liRqCw0cZ1XNhx2u+<|cyRP)bbbA46Qp0rf(%etL??u>e0 zFL(a9v->Ak;!P!S==(%ti5oXk+_bHM(;?f%jS0|#px{YZN;+!xxm25+CoBZ)6-}S1 zXv(?##teX9pdv>k$Ru*X&FGa)PI_`_B@%ab58(KSDwjh<+`QOa-I2EkUVGWg<>f|p zNz?WPt_@SlRMT}i1DcqjDdf@#fIzk1e5e}%2t%II2*7F@Oa5bB0C*^R%4E}Cpl<*d zRv%OTfk`pps>@QTa5^-TA%vj=58KCQz#{FDZ8z>O(0FY)%pk>-(^MeE$4`>I`Iwv2 zT0$m>qVnMr^sw}Vb@WsR)$d+2TU+plnF_(`mSJg9N7y83M%@3m=7eU@IsH3GD@|uT zWYe1*u#SBLW4NDhwu0NG!mQ-&#gE?JYkw6_hWUhtoxzkUL;6%Mc8WtqRh2bW$Ewey z`hxAk^)^dbL8m0N!N`WxcR@fDr-H>)vou!RnIy286xpoJ-}1?S6k++N)o8p$CxFEP zw?o<+nBnr}5XHbUA{c<~!iLXn@Yqu@N{JAn&J5{BEGm}5-J%7q{GZ%T($LB~2ep@$ zy4xynB@PqZLAs=Tdz4U@aQnL{TwQ@JFZQ~7he~n(1DMII2kc()*UPzvWqH8dR*K3cGKHJe90^*M-6#38gVTAf4lRa{CGg{%HMwdj>sNwtjzT@{m7~Az z>{I1wb8jVHn_9)Fw@zLzQ;y61Lx@b_Y<|OBQL+;H?!3n<;P|B_s*GxK%;xU`GE$7w z7J51|x<__z=($%FXVaRfO0!YllbQ^VO~Z)y&P>85F#4lN6d+I$pxNvU2m`b}m9KIo zsS4U5%J0cdovK29{p`$bwWn4TfpL}LHu1;b#Wm)A)kI5?_TZwoVgsm-0sE`;zuz!- zJ1h=J(Z{#?fOp!vARmmJGA$1LR5?s>sJ;rAOSjzvPN9<`9Z@MiTx=!}BUmPIZB$0c zz@Ath9A77wN`C89Tx56|W%K<#@7~wc+6~C()b7cT8l}gL8-I`AQ$5DWA>%vWXYfco zY2HCC2~yQVslX2Rj7~Nc&Kug5y05@?jcdV{i#@ zBUpAb$LnTQ9;#EA(?l~E#|Bp%EQZ=EZLLcRvKR}&a*c|DBG%;eQDu`h{-%mktKeYR zgpf0@wPaIM&rG%%W8c&hTb9b@Sv9rWq#O;P<}>>JoG|?+t`_y&46K&3cv{!XOmAhD zOyb)t7vFySZMosMw7$E#CW6Y*?di9WeatdR=Mmb*9hB#%Z{o$cC|_cw0QDsLQ8G%?oY-RzAaU z@kEjf>2q_kw4{%qMO<>|%}>jxMIX0T`xVDnOAu%N z@`}VVn!vBQk26hf#q5`WRl=##ZP}3t>=8ko2p+#94jE0vW%>@Ve2cxnpGZ!)KHQMP z7y26N@QIKU_>d^Y(1+c_+xGzk1=zW3Ec|iNIrww2*;y=95LE3zl~i%Rq|X8M5BP9& zL`4I|<*Q1AuDFI7gYL6v*<5Qvm{nG91J;~?QA*~%RdOs;9Qocdtg64Sk4Y{dD1Qr9g5{?ps?b7Kx#p%>ajMvoxga5}`8e!rY`6G!Ydg^$R$? zSIp_h%?)#1vmi}G{r%b1nnt(-3&ul!4}+JVXrRbZx;!F{L2IJ9R;`GQ1%K%Fuj{00 zy1UAfPywO~V$F#Angm+GTO-dNn!9$B@f7g@(^h=aRZeI-W*9!?%I4-K&f>(g$i1k# zH6N*4KvB)#qBiY{TjmHLK42W>wNsEOx+jEl!txSjEXe#umxzY!9UW9%RC>R!OJxuxWpebFRpeP@1Fs}ve9(u2` zu39~$!t8Uh=2W&M%G!mEnF_(W`l}*7h$37eYdd9di3DSxAkkmCV#amP5C8F@Lcpju zWx3ehIb>$+uc|I2NB%No1l{aXehUxn1J)AKiwELx6_N*KntM#ziMU$`T&K?qCGn^_ zHMqcG6Vz&tfgmN+dt^8n;?*`hV)zU0#_505!)jVv%#F?Ut+nWj`P0g)m9B$GRVz$d zC2ST{%sZWMU&xx$lTebx(9-VpDpTX8x25I)Ykl-%9c##+KA{xUi&X8tJ;shD-7k%w z&CC=Vbyoz7OalzIE#RUuGMMuqR02iJJM=r_CRK1Q!x1A8Rc8NeVwZ^*E#4T}8UP%6 zEx0_=W#IjoUW&NDGjv6b%kqg(5i^gTW}7JIssa7gseWz9xCv}kpDTn5PNqz(&m5pI z={!Q>{eDAND@Uw7C<=JwqI;V|7A#8)#=~J%T_eWkjru`dAHNa#TvRI5(p*LFaTc2L zCzt<_dU@ViVT4GN(2%gIs0hyR5E#{-C3#I)aD2n8&{z&x&ZJ-5l(g_9&^4aq;4fHM7O6q6I**NmceJvw0I!T zwCW5jnB+9IIzO7BcO-q1@A#h{eC$rD3|BXp>x}zL$My*5B_BM=vPQekG*b7f=Sb1^ z>U3#`5uj2-Zz;lf)t65Dg5Lyj@afoB0ql*Qoc$T~(e9VkUcL|_IB|-D{fuT)x`d4G zQW)tumFCDlp@4>bR%yOGQK^}(8a=#iJj5jQd8d{KW60p@a}}VLmUt2mSSLLsxKBli z1RJ`bL;;?dgcI) ziY#i{&CEL@7A$51Rvu{3pUt|)nqqz?T{Vvavw-T+B33Q82vjNnRvZ&;5aePW6-B>< z?)aZ5@Fj#N0XlEal?;`jnq2pbG9N$i24&07HM+Yl_{DPvm?e<8#!{OMj?`Q!G%XD= zj6+$>1ct_(N)QS8FAwGeQQ1sr9YiFktPE7Dl9w#dNqr$CWizJH+3rwf>}08kDhdft z8O2hLB(SC-DUE7jGs}fb_%%`=A~QT*cB3al@{2yzjNysI^?$i9hCcn6{C|7?9@0n) zSOR_^y($WAqo=@G@0CyKg9%q9r-F@qom}vP4_Yrz%ID{Uqr+%=G_<{=84t(zoN2G* zoaUceBrFEq&;fY}O@P3)la;|A{u?~3km%5VC)Z{wItq>|ZWg}mxE~u(hV0v_FO^EHtgaI0x z*lKfD_L3umYQ9pLUV7{R?T|9k03qC&L3VhtL@-uw2;-DSSByLmM>>f}6TF zVB1RagGV6_y_;8bySOpZu*ntEjU;|jk6?r;N>VHpClRl;ET62c?X1%EDQer(If zBAaTq6#$!0(6cS)12i~uLo#oOY@YNERkqnbOQkl7>a8JO)n((TnA&ORee@|W+&QCy z4f4@4x%7o20sf+*)`aU&9$vjQeaB>@$N~_Hg=8cn1Cd<7Sn>D zhhir$aiukNBk_0;_@5YXJa(U9`bqt#cn^(!6MJyqL{AE%^jAZ}# zR{zS)ez;+4T=kBgvAf40kcKEYMdhC>Fi5_Bx5z(#zBu3GPn)Qd&zfMEXK2p1n+TIp z#!$NDz;Zw~?0B>aE2TYN?ri=!&3Sf9pZ0syiB2VpYFx4!ri4s1=tzSz2<2xS{r-cX za}Pv-Jz*R0OzEIM!T-!dH3kPIbYyjZi#r$&E`@xyp^>Textg_;R#_X!^6E~eyb=^e zE73^i8`sVGU+tHl=)^q8cGZMoGA0}qrz=R<@~2z5r>;!zEF&pvV<}`BNp}fW>4Lki zexN${3*U#t>E(@@N}?2q_ORILB^(au5X%fTd;S5!rF0Lg>b!7EN$jy?;A@*qcImM2 zlVI}v0`i0l0}}J*TDZ-sltP7)P`YU=Qk+(IcUDPrb^)i|{R!|#hCx32H!%joh9d>z z2~$uLX@ozMre|#pO#Z$K74$COb(M(9v;Tme{$>F*sjk|OMI&TKs%0*Ozf0j{nVNs! zQAVa+{`lHzn<%h+R1g6yhuDtf#ZJDS#+FQ{gH~fb<71ZhUaWN1RX-?hq879H;`z60 zv^2hMirG(Ju|9BFTEtz#;)<-hIhZ_Z@G)n}moHe}!LTY<_V%z%{I$)UPhV>Egv;r0 z)59j0NRe7r5MKlOx^`DZMVBoy_tXeaA_OC=>k27>JlOZi{&)aqM67B|Bsog`f8ej; zm{vY*a+NKFwR_aes=ltUfHiDK;$|%%06PzFfHR~FrC`qpdzgY802y&?GU&h;1N{0&ViG!r28%wra;J&B6jmq(M}_jiwp>ezroJs*#OcEnvqEIt>0_E zM~g-;{cpMdQ~wmJYuhWKl;tW*#ppG&K9!q5OE5Q>?7hk9Q!Gohki|gvyns}~^JrGY zjzBGW&+aK^x18(=oC}m@j{u2hu(=oDM8}7>&>4!z3!zNST=kK9Bu&;uqEmc|cHy9F zp%Q`DL}sq9_xj3yREZ`Mi}>V3Ae;+C!;i&1;`e`iQ<~RG`4lGW6qh=D{DPF6sPybO zUW2rcfa~B}3cB#B+V=F;Ci(dZcZjQoAD!S*1JaaBWNNJru08~{Huf3VRW~A=GrBG={pf`=aRlWv5u*hBEP}@JUdR$}J)h1lvq57JUHNdya7PZt<7m zm*Tx)5cFlbJ5u!)E4w(^EFrIC1E-b2G$J(aBjuNEU!A{x@Uvv-*DGK1-gd^aq~F3} z5yJ(r9k=B|;QZRR7DC@nl&%-%%z69}?v$2epNr!0V zp=o;SwHiAM2-txrmwA>bv&1k>>9fthWr|g9)wXzY2nlU?j%aIQDM*R)q0#>ol=!mRc~m0F`UfEZ zw&+7w>%FJS)6uB}D#ZA=4_)E^OBF$RGy<4lqQ50wHb4>zGx;NciDq#IO#C?kCe;n_ zVdQQ#KykO1+{aB=K!r@om{&>Q@+w&mxtulktXB1mh&kP^16CP&0OdjyN)yUp%l7-s zMTiE0AJ`FCaeNk=VY&G>(w!lKFoTbJNFUG+ROxuN5id<(4y?@iySC1w*gwDh~R<_@p=#oNV2ptpyQ-WOmJ z$hD75Kp6#8!TMAsuaS|0i{cfq(*qAd`F>9(Xg1=81J??NgF0t+FA9>O6U%O z6!}dx&xtbXv;3Qo!Gi4Nd9B=FB)6~)}z@G%uyo?5&sEvtxrK{=* zqln4F@Ae(UZr)mhL;}w8PGtd`U#D~6q&U+l=YC!lezTqTrRQmHUTui zbFQh911;BoFv6sE`6G}I=flyci_E0jGmXs-jJB9Hs|J7qu#t5%IF-YfD^%AmBPdTb zREC5a;Ez0-At`{#$7-|Ru_UKl1~aEC)Dw{Q0jw@v9Qh5UysU&{OEk3h>GxAULN{nRJ=65hR>7; z>i>R>eUYsE{v|wF)Sn#*WEFvfP-iIHg!BaAp;ov46rd6#4beoLP_r~NazKe8S_4K9se*v<2NSGy1MO^p4X z)<;vXC=U@*cBK5!o%B_yuedkY7n-trm9f@3rtm?CnRh)sdONQV@l}me;?9nZ)9~i@ z=+mPTM~`2j9vf^`-ad;02c5}U{oKjh4R1QEBUKHlA^eHDw-#pqTL7$#B3UrlCQ8%w zXOf~4NY$(f)P(jLOq8e#eDKH8?B|<9ojR=`9O%ger>lXbS6no8gT%i^HQ`XZm#J(5 z9Fz*ci-l)+YF-;b4Nr=Ti->^D zn!n8#XcpkA?loh6uCrgCIW`-5L&AkAT4l0c`II&DkfTwk5l>_&7ySIC_3AfqIef_a}b+qNv z_nzY3JqjLbiZ-Cd zsWp@}y$t_N-2;5gJ9w#dV|>|=u)hRh){O}5W-B$*8LsJdSWnuVNY6xjr2wf^9ghH9 zw*qUhdJuuejux{fXh>%X$eb~O{`R;i(w%L1OJBG4s`|yk!L}eF{}~e%T{(#vrArf zd|7NnKADY_6uAaNgSm%mHK&|{-Y`f%bYBKwn-z*G;Sv!)%N2;YDl2gHVfT~|1>aWQ z-x*fXa9$A6tRoy^$s*h5!SC-0W*XlYMma8+QnwUyK;bJ{0iWK@vW`e>rsNcT&n49^ z)e`t-F8}QW2*4o~hq0RmGI7A9Zid{Ql;vek#9e6KsIgT=_4G)*+0P&YB_k4Ud2XHh zONOU|`qv*&>WTNmv-8d#W5E$XxtWAWd-S~3I(t-L8*VHB5@8LE__ zCf>1`SZ0GOkwhY)hzEZ645#*pmQ-U&7e+edGxcvO%uPcQi>2DsDDf*Y_b{V~yrf43 zs2w-Y1V|xwiC*D_yd!0sP^ktE4-i`NuaaLJSdin=O8g85JIvfecjab`$4dc?9Hlp? zW5%+IM)s@Dj?j&tCnntkyY<(4?`u#8SGPGMEDApUZE&nGOw??J4lC$+kXNr=;>P4> zI9Z9xK`B+N1T(}0=-?H~i0+_sN zv%xCxN=sXBP0v2ZVo{+fy$fxZyq>01T(oC&XB-xL1OPND%*$t;svNrWTF4li7+hCi zqpyhun~MnI+(~~Zh<5rVJ!61hQpgdP-~yY|MpR1^20I9e$qvpnAbJ3W2uHK0`6t*_ za}eezqlpLs&Bcy zDAxw)!_ibCBUYP(1CtTS=lO0M+0s1NJAf6v-wJp_}qQmgWAZy{X`eqEL5K%Czbr=gPnK9X5Bo_uAT zr&VG|60Lt6g@%kJ9SoSNc;*>F3d)ZwFcxqV2Iud3DZVT}q&OG9=e!SJ080wBV#2>J z!}9={>C+ZPj`-MlHK<#<>5q((bx1;z@eiD%VnWTrL91JvJDb!Rda=Ix$8Klu?f3CR^tPt7@?#=>>+g~q1?JA{9b)|E}!CFAUfPGDvvnPR{~t?YKl2E)$64`>;zDa6tte?#5Fq))L6$Tqjz66E@$ z4CP1Z6;Gvy6(a^txTUM@nTn@W=c+Wa65cjBAXlX?DJMrQ%J_3bILH_e(1Rpd8htSt zjr*o4vyh!R+-mR8l(uII?^1Th@;TFK;uyN@y|Fn^gK8+mu3?r*5!4dFZy7D_q{wv+ z&&ras3CqTS$aYdZLbwzvC!?toGXgT_d!?1Y=oECgHFLwf1xZ~}B2>WaIYU^wsVo(Z z7UY6p5XXt2dwFej?C5gMyY#JsHaa-z6`Q5ra*2bhVLwx)npfZ3et+NPG? zZln8kF~Jkvw{c{G39`gOlPDKcVhPor8{(gGhLSNiFWRqRHnKC6WlK(rMcBml+rEL$ z6?uhAcc`A4bg;DOyq*PlJ)6?xdqvBpfBDjtvrlDaNaVMR5kDw(E3T7>n(7NxRU6K6fgwlY3-ykE>J!V$9KetA3iCT*OaaSm_$Kb=T>rx>*fRZ}oEfJGT^ z7>0eTkULgphnomkHbg+e66H$$!4E;DnU`40GHdjDTh8U&UQ^_e=&o%r8k7LkI#CUu zjIjgcQ!E|nH4IdS$x|%I*mE0Vk1+P+F{!bT;Lci1Y&Mn!^roE^P>?-QyDkOL&T8f* z=_L!C@VCeV6|nF27M2%U^&rbovUXlnWXPp2X%ZD(A;Gz#Xq&o1zdMGycu8`!I{iUq zg%A@`Ln;uW3Hct7!z=iim|U@Sb$)l6m^WZWdxUu?CRbXw>5=a2y#9gMHm3~32htBq zYnjrzigj28Ow_DhEhDHw4n=zhdSrtyJ;3y0oTb}rMPU^k1*#+rft>N4H85KYD8nCc zB0Fkq#ThnrEC7*{NHw-1RFh>`1}s7S8Y&d1^QC2rsTE&r2+K(hn!&Nzh8ZJdL!=zR zv-#?>q=@casi$Q?a2<(3X`pK>&Api;9g(-PROc$QNdltP&4PIt3p8TC$ojG*C(Im9 zHX7Gftd%RNo%mNkd+A)Ew#wk33ZlIz!6 zfr`E5q$m+Oqt4O=yn&RWI2W9{H9clnU!zQc_@I4FK#;3N3`-I<@_U0VrtDFZTO>;8 zuC<6YML$!HEax#ZgE7d$E1F*Gl(~RpDT-gpVPJb2`B}VA588wv6ebZh&qU*Su2_zl zA!ACW`+Vu26s8*psuQh-FI5R&=}NLT4En9MlY|Nga57R1qu6wJrBp^XM{15t3=W$^ z8V$Ng)c$r)BuM2eSX#Y6P?9N(f!5S2k}2{vn?x5~8DdcHI{aApq75v2@R(8y;$yw2 z4knNJ?iNWVa&|Y2x{&1Nm`7;tXhFVBx#J*-EqP8_WdWcH?DPWZAUp3EwQ>`y5LqD0 z)bC@v8W_IeW+GZbiDE4C4#859nFxzSZr`gzE(`506cjpNkcwGssMKWE2Vy0ai{ULj zZ4k_ENBLSLdeN^d^+Z6vw=BZqF5(>wMQeyPAe|kR9F_P;CyO1OiJe)|X%=k(B@HbJ z=_x}Bm#$rotvD77C1;C@*LQIW?R8pG`2b|&`UctE@MOsA(2`SgBBvquE|blUY0Z*e zN4lsd-NmNkP@rn!L@rWtKhzWZO2bxZmyVX@+P@sOt=({mMdWo@=z&xyXOgEjY-)-! zKBB3-A6XOWxsu0m+2J3`6<(fK@91t7qp8KPQ z+becHefk))rw&{t$PUAXO{mCv-|DG)30&pOD_+j`DH8$}vG%V`tCb@)+}Y0(;Py7a8n=iQfSOq zXeC)b#JDFXlK&IGST$gy!yNU-O$TnI*v7!N4r#i@Vez3hgxo?TDa772%2mK)|Q zEngDDB5aYCOTD0TfRm7Dnd6m#Wlw7(jBLqAxonfAFou-w7Hm=ehhb%JSYN)cjJ)n>`%Y%UGc!){aMj2d!c5DUe=MK|pXHCeZ1g&UMjE>;|*q{P=xwF0AujOo*k z6Kk-1mY`jtn+KKE;_HQG^Ta|DrxI$3%5J}O4#~(EhYwmq1+&7^Ma1y1j6^$Z8U4Z- z$XJAxAF}tu(|lU*un?$3t4praf51(0GUk>{)``NTkj&}LByyH)l=|T2vE@iGcIa#g z&YYMj5=w=6T_>4KXxTn{xBLRDt`YEJ3e^N}AW5O7c>S@sRpCOW*ig39+j1T!HzDy% zB7-ilo*(0e>Q-}x7zpFrtx|bWxe`J%yQ)0#hN1L#AEEceNNjJ@ zCjuzK;$@1lonI)~tyq;2{R`NyXerfODH)j%yuXUM&T6}}LyeZCtEe(s2Bb*UpvS%& z1)&?dw$WfM4?7~J!${Q&i>!Er-S6j^jmI_hillAHCgqbjt_|kg7*5vu3q{IwtNR?I z?{Z(+7ZGLXgqudmDLYup6_M?GweW%l!dGru+uSxFI`(wQqvcE=Z_5_T;K)&VAf>wz zAP~DyysfFdOQUnpTetE;!GCtafqzA;lsT|EIH->`QWZ?Ch8c2~wcorMP^l3WN0x<=KSAXvcm#JKcKCyU`o{>72iBVh;Yp}g@h2D87C&rLq+u@2Q zw=*ZwhDgZk0M8;_;DoRAz!|&Cy2u$_6x{YKiIFMa31bk@Y6 z$aXwVlW&8H+`YJYp>t(@;unSyu(4i#X@H2*C25O*LEo1$jsgvh$SGHK~H5~V9CICOC&VJ%hsUk8@wvL z#iVWcKItY;)^R1DR4rmmmRYNLXXHFxrB1BUEHsEPdB~4E0yCRa8ey&EjL=@4EYu1c z*dDv8^dY8%9H^QbK|z(H$+flB<%#OL^3a5W0S3I*AtZuhFMPJ0&xlLiy7|^bWHY%U-<$ zeQqm1)+egsZ4p`EJfztzkGxnG!I7@)VjtGevdAMIZf2P?I0o{34J)uSZoR-tPU{Z3;`d1`qgi>;AX`)C30N-v}py%E&Klsy8W zBxm2D)m7|T_}}JRp+#;9);)pp-sH?VXpQL3A|!nwZCI59+en(=cBX7=BX^^StRjX5 zL}?^Yg0NR2>vcg)iPBN=n$`1+Y?T^mPcD*e~ZyT$EAq*E_Sz#da;9?3F8f=Jfke^@V!N&_W|aqo6T-E)Y@6|^6+x&u zxo>EPC>rnr_7%#9c2$)#AjXVUJm8BVENp*E%3~_hpdt)13n1fw!P7t*!HFy^Od6|@ zAjGng&8o~HO!+2E>vmh>S?;ajyE)pK0lY1*b$%$nAWH4;Br3)^J`NOU%7Wq+?&xTx%gvT+rV&o^H$S6k zNsD!=&@Uo+XBw=CNE{xmF?}1Q~RtQg*0R zF(be3&H1_V4)rGcWOv{{UxokSYvjnL1TzkI|6r7#!$!qD<{_O}QMHJ$Y z`(WTDPb=tx4vo^NnPdx!kS!P{4_BZf&cP2YicS#JQ{?br7z!W7aY~gjrC)(tp55{i z<3OvWMHFbFGkzK^8CAVfMyI2m>5X!z=P7P2-_EwWY4-8U*HzY(Hde2#Ye3Hc0!==P zb{g@h=pIYMo3d%^X)1T-X?p`*RD=l@mdk92va*#dC@rh5_SyY$u&nqIO91j<(XtJE zQ5hf%P?hEqd8#DFq?{cBmdG~F9{8CE#BwP_j*%;vSFZGPv)9yP24#*}u76@^q&Usc z2%ju4D58F6bc9(nh*!TeG@@G;x)&>fFl49$Dq5ckeLpEME26)zlvvbAXI?p9$5$BMNUJDFkFKg*+TH0ZId zNRq6u5&7cJCJ`&A7BYrxe~Ac5N&WN1)+|gB;X>=mLEq!nH+dJmRB03Z{?A^pOak8r zt_{pZf8Ui)3T*3LYARWUbT9K8En|ZGXKyJf6DLwY?@C#@-RY527-LLH=XHEI>xH}xP^ek5i_;#&sW3PElo%~nXN zTnDy9tg?T8%AAtEg0l4!qda^o={yCv1v+yltWA>cUUrn)-W--_r;Y^@5RUFv=8Xs~ zl+!V}Sw$~rt;s?<+sxG!mM4&&$xR^Z8&vpGo5)i8P9#MSlW9U^@AYz+qT)aY7CK(PQ|4^z1F;R3MOu+dXC${{VMHW9_|-CdMpIp@ zFwospMIFk6Kn*g*$yRF=rKq@$-ag#3?6bEX&C1o-)MuqetN{>=pNcdL@(gu>9-tTC z@e?cQ>xIu$RBT%&)MgpRh=@I_6yCcxB;v%whHJQV&WLaXGyK5nR)#&Zwp@9_E<$od z*S(O^aaLf3YhZh2XaQF5nQ~vm^_x)o$6nU$`7Hb#-e;#uQAd!k&-cNur`=~`0g-Kc z>YY6)ax9)N!#c%c&_j@Ml7Jq9bA{Nb+NkVm6`LR&`Ek*XlVdgOu_azrT~}G5j2bvD zRf+Po>+4{-1l*h2%37E(NY!EzWJ9U4Vz^5hT3;nq0!9rtQVXM|rnI3kB7kB^YcGS6 zgY0UOBXHSgxJz&nx-@aQG}5Zr0MXSequp56z9f8;`LT$^2-`z3C?sM-YPBqo=R{3s=>i39XUnpa0r#4>?hJV)PIN+hvP@vodxGbQU>8H5!%lgdE26jcWU#LbZO1l?Y9Uww6JyA{{#I*|BN9u! z(cG#Cm(n6kYtodusj{uw+AUiR2uLZ&*S4oGInc_<@5qerx}PZ10K#Lgr3QLf?2y@O z;N&4}E&VB#{4^^pU|E8;$`V`cm7ltMW_#Tyq!e_H@^uRvCM?#9*laW_V;f-y*!t*p zkgGqAQHGh_8CouqoS7&3SI%C5RTH6-ZO&hK#R@&^; z<`y~QT);LyI$`2h8`yStZz*h-)k~@A+2}}~V||rZAXVvZ-j1-{oDl>0X_#cu8HHm` zJ*{-#y#!e!Oz9`0w0LYbJ8%}NR_#U`gnWUcrj&C@i3VjZkn13In=66zvqY;m6<-`t zoSG%gj%10plSr7Ps86{>4AM*Y5yp$~T}n#UBF9-GqH1C(70o#(jwL#f4OY-cXLAs=Jm9a3<-xgo2MT!JZt!R|9K>M42kS)N{#@eX8V>$^vO=@Qqn1t!ng<+Mx zBLcoyn!=OaY{3$C8c!;*$~UpI7S)AR)LzRL$m-xD0@j;6RT+=c`DJQp8xU4ie$0|< z?ZVw~B!shiA`VmzPN-UEf)CEHV(R<$1sQIAzfP7Sf1714|%P6f!) zp4rYh5f7k5XCI&#*)SZ*`e7=90|F>4qm;rkgqQPFRrFxgd{I zxHTntwIz96_#{_nm!ud7x4d10w5X6{@uH<^oKUMYm56Xc8L`Nb%i7`Iqe>ki^^24> zI?r+y*_W;w`}gYBzO_8dRz&0yG`B1yFtt|3Fn=E2+8LeBAaP6; zyclTnk=Rec9Jjp>XQnx>WnY}c;gIFLm}&CIfg4VP_Uyq#?C#2g^|O8T2) zN+PI8UxYgzTNKLWIC(r#(TdFT$dY)Ife4v1rMaz9Nl9rdlx|QIFjJxFT19tSdXDb? zm^1xWCPg!i?KJ~NX6G1loGdb-(U<6&5hY5Q{i1?BLbcG1LyfuCrgqPi9Y~h;8{K}( zzCe%moanmTPSC`r6|<2r;!?&^Nl6oys(1fJvhd4TBQkP126kU((7~vle4G^CyJuoJ zlG!ddRh)!d#tlOuMLA5Zse0D!V8b@{aiQN=@Ag$_3`^Z)i(Dkj1w+F0iGW2C%@FBt z3s@gHdRtN8rp}!O!v9puGUQUN1YG6z$(2}oLW5wH&z+sk>2n09{rWZJypeaJ(2DJv zK6)WR{d4b>C={=`f|yO4Y;n95w0yc7Cxj!E1MlPjZ zVWgu7e~<-p1-n+33|c{{AsC}vP+h-X%q3aP*T_~V6Qi*jrK(Ma?5S2`#T=L-C&GoAm0E|1#tEA*-5g4tiqEq(mWkVmM0r-A%Js-j!yRETbM;!OCuMuNpTsJY z!iHQ=7S-&4HHY}VL#Mb{$AR=Q_+NMe3!^&Rmqt%>vZpNa#YaT zz1b#dn`CQc+um@2rMaUyT_X&m{-#e1!vcjsYwB0h)4pJk)LGBg7GaRqA5-42jDrU? z`)rxHgR(m7&>~GG{VQAg@rw$V$yMe;Ax1K~-xS^xR*!HXo8ZnG?hE7RBX5U(UnXh7 z2BfQ)3#2S2pYbNJ*vvrHLl@<1VoiB_!Y@>gwV8TH7J|oYq-9OT`i-nBR^%mWA6a5$ zU`8(;?_h7iaXHpuL|g!~HmW9RrON8p)|`MEUi}&b+0+;^49)~521E``Zq7=qt2}mn z^}0&S25nqZ-Jlef&XaN|EcNX(hnrYJBz<%xhs((ZppUt`S8vbNEdH9BEv+4T!Hu(E z5{QN=q2dlo0=3Qkg5vhoUqUaFZ;@LnxI9 z2gbMTpltIUi-s@ExjWA~M$@2-V)fM#YW(=HIn&>}&>v4(J^}NpiYn=+ki%@N9Ro{x zW^A|i7JB+^Wt>!Fk&er10LP}UAL@WvI`wX36>h?Oi-;g}LXY*zRZCmAw{Jyr_m257 zHK(*?z*H?z-=Enc)};o|ix%&gumefdHWN3L@+Ygw#B7F%dP%LJD0ElnZR7aCB$ z^I3@)NNsOSMM!PCA+}ssp|@Swwk0}0gY}PO4J=TGMWBC_!c~(U60goG9(8mm9<@8q z!2n5Zy16YfJ8?dfBB{k9q?8W)Tzgb5N&^fa&r}fksQJ>ZEdzs6+uD0MA996B+u8?U zN$Xh=93L4uz0n-Pc?pw6WJ;{TShK1e&1{QWUzMOW=Hl56j9k4UdI6^H5p8tBTQSQl zs9cMQmdLpdG7Z8rf@Kd6V#rNYR6^*Bj%Z{eR<}5~Aycw5Hj125Rb%4`-%T`0r$8iQY&Wb2f&D1YTZC* z7JAjwne`FbC`;B?sDxA|^3X8|3S5&pGQEP?CgaGGUSFHvHDo?Wf$xE- zYE6|vC)*W~j$sCILOh*KJLvc+e1>!Vw8;!Qh@w1E%^i01su}! zW9?0p2dq7aayAX^+*C9}5B2E8fk+aIKog_%Ju%6mYo%{79i{5f?|)6PNiFR=t&#WULw`V z@;Zp|E`+9AtzFQJZ_P|-Wh&F<3@w#VySl1lDmXC@u6`L*5%*B4VNx5)T6y8zF-D&aWI(}Q7tO(-HGuw#w@%Xj(MXc zMslTy)A~LWNf-T1YFJsW`WsqEHrlQJnCjyzcOXqKr?2{B8MotCA?<9(fJoOP}R>V zQkd6b=uf0BF=`+)id@1d8-!?sJG#dn|H^{s<${Ip?Py%c1%z+J?4p8&w48W3c7eBk>^fsI7@rN%y-tjSGm> z_BOk%luLT;uCGv(%9ETNb9Nx7NS;{5^tJNFU?XM|$f@B90O>-R^^fctsl+Eoh-=%* zxCgH_w7e@1a$dy`nr%c?lg3c@1CQ7&Uozln5VW@Rs^ljS{upiWA9Abz21 zWZ1;A?ZFzpTx=}DRO$4r@(lR?PM}+FKne2DDYU1_*At0g@^SWIxl1J_>dLtX12p8Z zcv%M9f0CTRxSk8$!q3}jX;Uc*0gbRa_OR88XUJKDaI2-OI8`9oT-f~@4-S%0D0iY* zrJHp{n=iGLp6*PLM)$kAd!Zhg&5%%4mRzMVYOP^bE5=aTSjoZlW(Y6rMsYlgWP?PZ zjuDAGsjEsl70eCat@UOV_PcCIYPnWrU2?PYF~FdsgZoxsNGks)G{(fKUN$8{NNdre z64^riFfzOtD=c3wd-ta74WhC;alYXsPgcRM{pvA(S}s-Q9d_c}n~uSqqh4s05(Xn_=tN zLfh#nzuleKLo#P|)$25!VGe_JAZ_Bz@}Lb+QC^gGz?!H{SdIc4lkQfdRZ63xg{Ubj zld;V7BjcGPW%eU!yEN^gBH|${PCzxkxyyqMHsVlB8k!{{1c29rpVL=3b-9z)PAu#m zWZzZ?86VxdwYv+MB~IwTisTNq2PdUP++eXQY5Z9Pz) zLg)^usvUCk_K@e_@*S<+nMA&f9I<|$)}{Ex%uVdC z39_r}>bCvQv@d0E7F(eA8teIburUjik3>sXu00kbbhdjjrOBtYn+$zkPSHiEgt#)|M3b$M7J-O5fW zRymyrnOS7c=SLtI?2Vo@^=w71neEkD)lmv8y2^Ru!>UkVi&ecxU|9w5m^wg>m%|hmKzeBl=~zb?F_caf-07ckX6(&)N*eI%N@#i z;c{YUZqC_L^%o6`2w;Z~h~E~=lwye?_9sf{S~-sF%f!k{gEa)B24TuhNQeMg>QyX=dCJ)_CktYT zncWDxbtpqS&%&?9q0_*i6R|f)m!?%K?R(u!K|5unyxj zb*teV9iNB<%a13>1tYOl9Dyi_y$az)#DBGoh-dqt+rn7DEV0L<_N)EoDX;e`EtEeOd6b#ub5UZ_H;4Row5;?bYq{7CT&d?- zx}UWslHZJMAEQ|ac57lqWt%(`AgsK!cS!y_9)UG=XU}f~E`%ozENuWY9ur_TKi?|I zrTa-yYf%LnWkg}okA0$Lz0d-dW$B5*D7*c>MB70BS+Pk_7(J0a7I!Hx*!{>#Q-|P0I#u@q1?QDaJvpWqA*=LLW z2Wbs^aycVrd4cWy&?z&j``n`d8V@_&lqCvC<=YBY+d;g+9`Y zWwhT(9}*%;WAwSc{0w3e_E*_nsgJ-A;>?2&-OSqiVfZ8*{eJsCZmyy zH8f+d)$!I_ttnSwYN=0oj_OlM6*-VuV4o5$mwP_E`4)Z`E3WdwEcZ+8+gSMwr)<); zXwd1Xa$D(39Kt(39L)^L6eYv`9Sp7AoFd4Ys@8m(+!jh!SPE#(H=Ov(&VZlW42A6aBjbLZVbT!}*mh}}Zf8JG^NK-l=jFp!e>!dUbcPTG{!);1f%nyg~;V>3_c zvX}E%m4yU{W6TgNJIO?1_U!HK_7H39Xm9G9J=4JQsn*L@0}b<#ryvXpWhT<{$~4v! zrbbzKBf<$v6MnG08Ze~oi&xyGW+O(%7@?iKObo-!f0e3DN zuZFoD8F5e7r`%TyFHV`SPSG-w ztzdgU+YrfYo1#kkY`Gr3h854kfl3wC*KWoR#G4n>9hw^Irtrcn9HWqo9#VB_U4Xsw2XN(_t^xl9QPBrIRw{VZDKsJG2@lB4u`jSFkhk+uxY*Um0l)_TEKV zr(|(c(MUa2qE80Ka>H!`<0OrJ1*eLow^@a3xY|ZcAX}jfIH?QlSvPjp;9JED5jV1X zW4rpGtpcr$?5N$bv*AE0N))lxcrw#0+x!-;AmnHEEw4Zg2)e7YooiZ9(iRKLBYm_% z)iJ_{ckS|*NYgW-#kh>azkiN!EmA5k;)ElpeG{w_bQV)zJIu3T_3hJP_0w8mB-Pg< z&2B{${i36!iwHHeiAaLCmNaT2np(sNbI&JgxRjT~Gho%I;a?^$YtHD}nnYac9C z+AEsbtzTYrp(d;DM$j8uTRTN`r=qpfS`W@p+6G934AJnL951k5ICIe)?ngqu$bOq0 z5N{~&?%BDvOBpLEt3ujLN?BrFFGE8hQYkDtr^fA!P)6VCGOQCx44O#fx}wRJr6|0~ zV#k{toLZM;lhF&en~&0l0O{Zy_Gam5yyMvhsx7jn*{iHL2Gpqo9%qmmeKP0^o6*7F zEM2C1b8QpXkSJT!YORzz)gkFYo`vd|qkUpxc};(RPv81=HRS8EoCke_}sHzct zsDL*dt!`Bf0hhUwu#95AsXzqA^q%2SEEjr6ve%>IC zWh93(DYc9G$0HA_a2SM^uzNX$HSKLiIgwLMVqoCeNJIg+?+;8UxK0?a7FW zMs$Eg~U@W!FE|V(3So2`N~PScL{bArYY~xkuL*?YIPb2?S=8 zV2S1^TUEa}qG?b-vZ6v>#p(z8wlrf|fE8uqhg%8EKF{vSQkdiRtoHXn4PTy-s>x<# zgT7CONVIn2gDa&X>C!!XMY25VDw6eSeQh}+7Sg4y9%ssHtN6re$+afhjo@V!rtpj@j^X%|(*CS%u)N)V1>RIDle7>FeLX?-*1RvYN_w5$&_|L@mL8Jw4Cm3_ELXnFI6*{TGxq z=yZS%-XZGD9Ml;V>4DAMU~4GNj34QgOoqEFo0#2sh<&wO@nsLo)TDQ1Q~73_1{YaR zRLT=Q9Rq#(k6E^J2ln-4{q9I-3!lukxxF=iW8s2jo93O=95u=<3S_d+4%molo13Hc z=58jh41eQGWFDk5%k1vNs37)X(MpT3+97=Ad9|q)b0X2o;)5vgOia-sh|Lz};0k4| zT2U6uYK5hsQW$VkS>)v~(%u@*AQVccQ`4EdVLDi@u6`N(a7vtIGO^Ih#HiXr%xR^<`99L*}9BI=)gYW_ekxU%1r+pdz`EpSIWTE*my468t{k3PGux+b{# zynY>0;Z<@2x?#|^ghG6f0!S44Np(xBy>Q)X&oQsVhQ%nHnECf&xG3RS8%DDO6F!Ip z(YGOH+qr>6Hnpan4Mog%2fxKpelYfKsVYW%_+1gUhLOLs&&w~Jy5YCi-7@CK%Qa%v zC95ue{tF*_>AqW6YS{ew9_v0`?!sqTSN@-!kpI8GX1wXSZoGNp+VSQ|a1Hnxc!s#X zkB&DJKQ`XH@bU5HbNKU#uUn+6UA&M(GGrucVUkn^k&A*zR(%*DnNpa!HD!D7RIzGG ztZ8Sef~&nVe5hE=1BeCR!V(aR_B=0)6kU`mh`)Ti_<8#cPaOdMn_E7lFT=9!5;b(5njz5u>4gVr78}__qZ<7s=e_?Nv z4HLe&x5ruT27EdN`?-+9$OCI`&;uyw`%pTw<^_uu@veaxe`yg7{f{66L+a4TrL zbszIL&~V#6=8hro0BMuHypP#%|6ttj;=6DU|H?k5Z0baFEI0}51a}`X(L4Zt3|;`u z3n!Y>z%FnVIJ0b`c^|k8+yZ`f(nM2ma)|RbO*9L@qu|JniRSXo5YG|*09a03*E=Vg z`*wvm2>-prb>oh{V4^wk-63uvd^{LKT=Xv?Qdmv`MARgp;iAb1?8xQBRu`UqOOqNS zXx7bz=5y)2-Abw3%&5^oJtC zds#1wrB6cZu#N+WIMjwa1sIG7dRNYd^Uqe%M!=glGP2I9YVsqDk zW-LP3vt=vBD7l_%XJt{PR%LPYl|o!r5C2R&Q7g7C5+lhTt$942DDC3vRqQh)dx!o7 zHO+;U9#r%#mlCH<-dZVfthaQ^`=2Yb$%WHy6(unFXYGr`568C(O71Rnykz(VjI za0plrrhwA_Cz%r5 zzY@NJw7u}3kG~23h1ZW9_mg*|SaHK7la4XOrQ`TdO)@h+J;{6x_XY4C(0%tL^BgF< zXOg)YTn_dGhl2-*-+{aD*HiI%;x}c8PtP}!@Wqt%IOThEe1rbu@Eoqk8{eH|JZ5}r zl6mssN#;9`SY-42^tZ!(iTD5hFO$q|V2{6ASNt!zIPvdG{7Y4n0~D^FY%0Ls5lDEX zcn5g}!{x6d&tYdxHV=a)a5ebOc_YWz^Cz2;VkddiaTIas*zdy0=G)*{a3M%sG}$Z$ zm7p4|0LOy$pcZTbrQihc?n@?{Q^5e(2~Gp&fb+o_V6WRIo3DUMa2D|6bq~huSgUBtpE1Uy$iN=M$d^HxZu+o`bxkqFt}k zsHA->7UsSs{G=$?{5LU?`u{iaRo?&a!jZ@1nm3E5mgO3c^(DE+;~gt=O+F|Gr-2K= zhe7l`Qx@f#;SkO5{Wnq0VMpbftALLiiTkr7bB$oqs$6qEcm>FFrUFO&gow+G^>07yGBxrkou9dQ5PZ0XVXJx|Luf@oYe|DOHi1<~}X z9^M6_araT4AR3p=U$u?rLALa0`~?Hdfgl=}$$vpF`9P*LiMx;AAjp>fcEn%VK_AM1 z*{?D0{pL;JuRIgMA-~TxL*dN74R#IjZ*ZqaOX*ikZ(y@}?@bAEN zf8>2x@cJL9Zx*b4p7&?Llf-Ak$&{B3Xa0#c%Yqfp<(iS=kbm)QK=M~_FJAvU?E}vI z-SD{k#attJojU!y@Z?Lhvw*f44*#G%1lRrg--na=?Si{s$c!`pNxOhUNY8|}SDC+G z$u<9cJ=gRwE?4qf1iyGW*DNOPZpK(P970=qT>RTyGg8PJ{)GwqnSbsBChceaxDwvJ z-eSjw{mlO+T>ZbSKL4BACR1JfcZ|A{FB)Z7`MZcLbON4grS&`KFoT zzxBKQ&0nA2-&Fize>3;@`c5-#Jp>0lJ;(X^=lM$*iQ z{!BF2qthq~#7#}_sr?%J}Yyt@-6@uaX_%$hf*m~kV- zTg{v4`Al)y>ho6fW_mtTT=f0JrAPDbu3yR?36kU!>~8sQHUEvgQ}D;gyU#FF%?}An zz3&902%m{7koQUaTYc|;>iJ`->zl!|uTL>=2L3xnUHPriu=mT@l)rr+ooep<`Bd}T zBU8;)_`m-%8-9?m#1Dr8@~?O-HQ#og6}%?#xEFwHojZfLXTT=P-X9#W>Hza2upZ1U zJHT96YF+Vv8~^1lUc%WhDe_Eh#4q{ZARKEt!2B3AH(S5B5-xk!0jBc21I+zE{NhUZ z$d4UhPKn^$j~-yIzAiH)pQJrW9zTaJWYWbrmecfP8gvH?Q@f@akIzT^66eF;@1JI( z_)YgTQw`>QW|}z!ECrQdoeL*$I~;aK{66ji;w}d_-!#oU3hn|AfHS}kz#ZTl;0Evr zcmm`-FwJy>d+(oS+MFwX!N#<(kN57^$&>uvH1izT1N_Cg;unnmzIA;#-TnK6L#L$gNm0dYO@T)YpbyD`!iJ~GW54bA}K7gxfuUraM6 zJxW;-Nch>uZG6=4g$2a88dH>IEs zh~LXMk76zv4Wegd{`_p5Of-{?83jgzF~GzKgVA6N*aM6Odjk18Wz}?Z!{X`Y!X?%f zeE>44DPS!qT`}FhqOy@|3X<`BW@ab&Z7>q z@PC@${{h^qN&gY~K13PY@n1}y7F%Y=+Xm27ysNc!p?2S|LyVv&7W|O$2|vke#L>>Uk*O_AX5UWz&da;I2CLI=Yscx zkANG&?ci?k9q>c&7Slk3VK}l9Op{fP>wsoDB8?5Ep-D?h!AXLv3F{F=$FraP1tscuarG8U3hDn=*KcE{4Te_#ocrec04h=@xG z5tBh-!ZH%dMdH$0#p+EhzmU|%-kqtDQWlgJHh|JisIL{bs)_J^fJ zFD5$tSy(0w>RTi=N=Zuno$_ch4g8Iz@!D9R!Oj0R}d9FYX8lE}@_!7JkO zi&V}$rOg(vK@&CE8)hr4X>Zo5b_gZ-%5C()+Zl)8W^g3H#v*P7?#Z}IaOdIH zfywv}!o3dc4W0yZB_H?${xP`6g00|8!d>9}I~o69pr1#u4gU&I1fC;(H131AGjYF+ zI|=v0xSs>^+~0A}#(f<3Q@CqD2e=kAgKvD1aS0v*v%YkYnGWhQ;IE{=0UiYtNI!Me zJIu*o9jFFNfbai{s{Ln8?^wl>5`K}ef#Z@eXc%ZgnZ`j;q2$j#5VW{KBSDMHI20D^ z>s81;W0f-vhK2G>hS9J@;=|vVG90vCDQPL=!Tzl51H!hFRx>OlGGMfMhaM7XU!X6; zn9v_Kcu>T(&C^E4QrieREOhOXe~WKF*!=L0gU!=lI9UDSN_d|?9c=cF;Aejb!(;y# z`lEUB$&X;Xs*o_yRW%|_zF+MXi-w3-sbC+6&3Sj}@4`w67A;ka^S*~JuINrz$P9Y# zLdcz?Rnedk?QVvAH|={JEV zw;gImf;Ut4+wsip8GhsG#C4^`znT_Kugl&$Qrl~e3m@p*X7~IBxJ|^J3bujsz{GbR zYQD4U?ZMNOeV;3T0q-aQKOy`)I1K->U^S5Uf10>|oqj0#aW*Xeks#^v-+{Xk+~wXE zckbKu+oI)u)Yai@xc|xb`Z|2Zz5jN8^SNgnYQ765pLwYH8sU%N&Lv&^xx{}Rzl4uC z>rhh$c9H&9;$I~0Jn#w71~!rQS>o5=ufjc`8Y_6BXO?SPVu6J!I z|GEFZzVb%Ds1`<RFT;D;tcUs9orX@*zxVz59 zYL_ZnM%#Vz6En@!PtG(GfVhHPAGc^GEf+{w{E{Z&TIcuqr@80Em9!n;yAj+K@kism z|24{=LY`IN(g@Cr_@nXO|5?)a=h+#!Ya*B$@kisme+lW&|7@nY?9rKKGdLYIf(L#* z(+vD(ra2lkgC*c%uwnUO=1DMN#bN5d4Zomd^+j5x=k*o{j6Jrzl;`L}2I zJ&UkvBKHi*PB^WkMlqF!BQDbk^WouEw7D14_z z7i(^!ky6l7Z{|7dBk^i$6)-j5YWO}WG0TqbgDy^qT9O{IObP3hJn=$%9G)6kq-H+3 z{311ap$?A>RjkNR#rQ(nBvZV6%+^8mC%c%L8Wg`wulr7|M`3l_;7Tk!ZVPsm2FDjx z=xwN}B`m_Cp1%_$HKo`Nn!)81yYDb)m@Re*8Af1{bn-z(Efv9tl(#OcZb^@dF9}A& z;Iu_j0waa%eNsDYyttH0KYKAA+L@ZLtfr|snflVW40^e=*jB}i9yiX69y?}?c=+3E z&pm=aH(@UG6J}%BJr~vYV|v<-8%aiAJQ4QdO4j)B@9@JzzxcfU0~rk&%p(Np4RSNF z*sV-h`w0(UakzOr1D?nb_at$@N{jzJ;Xi{H9A3p8`@zGF#0y@Ggij^D7n}~x1@8lw z0g3;JbH#sJ#4lmNC%|Vj#QXG-xVQ8C(Vsiq+yZU{SAc85mx088!@1)BdBiVa!B4;g z8RC8VNZdZ2|Lo@vH{S+NgG<0GVCqeWn>eTeo55M2pLl`f5%)vH-2z?%-vLj7XN9ua6Y&cTn%mnw}HFBcfgOq6W}@U zH}Em zosWuS5V|%R7P>bY9-kH#z0+vA=%bDz!WTIqExb=!Sgb!q^G_lyQP$Dp{FLy%HX%sQ z^%+h-%!S@r0KZI5GG3hd>-#g8S;#{66fP3ZYYo(yw~%Vd_8MM}CNt}1( z9TFB-;(WZs4F!_@aMNcbEM>@_)K&g`f0i=kFI&1z4gaU-OAkx?$e(|Y5Bq%H_33Zb z^$A*p_slZy z1($2M)9t_!4z;Pm<1MqVsIoV1J$63yj#Hl zs3&|D?s2#$gCytxJHUD1Qt$<^>l?F70d5o63I@Pg;Jx5-a4q;0xDEV|gvtMQRj zuK)>)|K0~|c+~f(!?Ux@E8wADTEDmw#)vw+RDY}`GqwF2-hOAB-fRgA3=`#p%zMJG z8QOkmdCl6o$_iE-m~Yf!*mc~mzcE+8Rn9%zx&N;K8e)je(4&{nHb-4C+dPl^6c~+P z!sDGQ{wRJ!o)^I@U@~b-o||nB1P6l{I1J1JbHIF10E)mOho!hjM#4V+Kkcrh4)WL2 zH^+QrV2-(B`y8{)x#AbxPnw4hr>9By1N}C?Pm8+pyiXT@dfNUw=a`qyn1lSr95egO zIp%}-CH(W#ttz~th&X3e6x zrU+O3;!6128|RvrPo8T=Z<=d<3y%1}TvG~)!Su`Lntxs~*IabvT=T0TP)A&iTQ(v_WHAH&_KzQK14t{k!wEgca{8wVbVmC5Jiw>yc5tKY^ zUQZ#U{FeA)t|{a0jRrQ}m2GX%pim5pzcSD4_tkkO_CFR&o$JHmRz=dEBEQ7-5w{cf zt1kb%yTURgy(29waqaF|@q64FDf{_|-=}ACZ^%%N2=s|5xH9Joz5$UPD|y{yT`T!TqUAmpl_ln@f0`%j?UDx@*Yue$v16 zjd|vA@K5~NaMCvir=3CEZQx1p$}rDA@%6#aXL{FnzcsRaxsiN_W~kdJ>XYy4)Qx*N znC$BCJDxonf7cLos&?fcc<;#Slj*nYZrr;fziFsvKTf?L<2OrNEdkosPM z+m8FE`{tSV-9OLV4jus|56&|i!F~_SGne4j;+_ru^j*fv_vV@F@!wDU82pce!@;=k z&ohq_pTz%h{BwY~9_u6it6UzRHi>uE14EhDQJ2@j6!IVSoq47g+zkEz@_7GY&&)H= zf-T@vU`PFY^G9$!xC;ccMrCRFYJX-IPK=8ry1u(zxMwNHknv(kv1ZIu6XKGvE}15I zuux)^&6b*5i{W}q<|uczh)uyK=bPpTF2`T;RBGIJBI(h*!}*u;oc!&4b-r2p+I(|A z?k~W#pq}t)U=MHw$je(`P6JmTwg7vs3(TQl6XEm0Rp3~{H{mYDmD{#ElKI^1Bv2SmH3Z<2Wby; zw8bRa$NP83T}xZ1oC5|xJJoDX(_PS6C_gKDq>#6cdI4)y_~z~AX}zXMNzAAxU!uYjAt$H51|d%+pt zG>`-*P@nU`S>ReAzvYAA)tGIjMP6SiIAz(Hr0?R=S zI0doOqhJOZ8s&w;;!SHWcR91fO(8gL3Y4V()e2bY7Z!42Rh z@C9%WxF7rk{0)q&Aj_VQ>*R8}x%M;3QBFD!@`OAH=}^ zU>tb0KHvNq{0ck@z6ZVz?gXC(*MLjGd0+?V0H=UPum&s#h2U^74NL$AyttNj0KWh~ z1owh3gU^GHfh)j!z%I}OTEX$42CM?bU^aLMm<;v+|K#`n5j+ik3cd^O;$6>yXXU;8 z9{DZ*1bg5w1e3uuFcZuL(gu}a9rzSTfX(1EuoIjEE(9M09|gC7d%zFCQ{V;g2AH&t z=fI)h2oML$Kq)u|tOp6u2i^^?2DgB3fuDk3gOv^W<{!APgK_vLgK1zUm=6|%4PYxc z8+;Ia7Tg1V23`Ok+Q3)?7lQM_SzssV1D#+qXaXC-daxE$gL1F}ECF#a59EPE!E~@6 z*az$dMuAt>Gd{r!;CJAc;0bUFXxehVH3UkCSrhrsmX$%Fe#@CWcBcn$1D{Jvl& zm9~p2o3}D!2!H?U$8eA3*KmC z&INx1&x7BBXTam&VekWRKlm2-8u${p4SWvV06q$?0+)k}!MnjZ;GJLqbc1%#0ycr4 z(+<)u4}tH3Z-TqP7s0LIv*3Dg9r!S~3|s^*0B3{KK|kmMTR<~732XrM;Al_*R)VFV z2+RkwKn#2myvld@Czy63-vW#W`+}1|49o)gU&<#$c4>W>0um+TY2m*0Ne}i244oZgU^E-!NANSvIpWDrPWXhi@PJTht+F4?E!j=B@|XZ-LXm9OsIE z5h#s>B`y2@{^q$csA#Oj*0Wu-NVL|q~tUt}eekhtec zvM#lJpt<5a%bmoPav?B`A&nDj4^rNs%;-~~9MaMs{K%iqEij9~mEaoi3GiufGq@9c z6_kKz+7ojM%u3=az|o){Yyc;LDb)q$l{Ezz;{ONh;Q0BcLadd%s z85EJ{gU496u!cH-c5nuG&xr-*7_bh!3v@YG`~%>GNLb=B;gm@H=7?YN?gCQAl#Kxj7yn{UfmzjFU^ano zg8EZwb6mk54!K(k%+7ZfnEW#fOcGoJegbmNDlnyB2Y4^|AM#v``xuyUc7eI?bjsUR zU_Jz%1bJr^m=16U==pSk`8&w@Oo2HR6oYE86`T#Cf6x7^z>Imhz#Ii8I#>Kr{F6M< z@O09ryjEb!MirVRuNRmraNmpDH>S|s1b#yJW!&!)w|b94^Ei16-=H6jEi^^=W1|br z<+vr}xtzG0_9`?_;hwN(q4^j7zlC^Tle#evg8zmNOB#eIf0$%fY^ z6q;-}Ya+jy-!1Ne`&+-b#ZwB+_rTSYtvi%o(*FN~Khw9e;jq06O*TZwgc&vZjj=?G z8Z~;9xO1$*sm7;K|T)g3#k zbhL!`9aT#DDDv$YxKhErsh@Y{MG{VT$MXC>qsELLsXL1Ndy#*nt}&|&e^h^DuB~s* zXjrAxmv-7scekHIfOgVY-sjx0yXo%kbE0pM!~0Z58P|tL>hA7ylaWN8vd3IZ2d2C9 zvq-#rBpD}@&15rGQc=+PM_>YEVz)0A;KEHq7Kj+=_ zd_L^+4|26Xh2z4HkIelnTfV{Zqi5LhmD$Z1ZR5SmypoiellEjDGS(fN=BCFRbGVtZ*Iar_ zp_z)Co_}}4>En5%;mPJabHKQ{drUT$nrXPQnob#?L%qh0nu^P3ADxq<>){?bd*_T+ zx~`~L6{|%TMU-o>Pq?-x$(8166F>;RqQx7UdP{pZ_YIL{SzY(KWJ|IgasTYO;>$9o z@p0W>YjTH7sli|2!bPTV;S#VEEOT_|qK1BI*VNk@(3^Gf`04FETpr)Dg&HhAuD2b@ z1<2Oa=ylGvUp%g4tqq!ZQEjpl(}4CN6E74J*9R6lOZ6^%|M`XHYhWk1)Zv+k-^XQg zqtD#NGY*%kFBY2dU@|xW90KNpJn-Rf7Mj)HE;OsYRcIF6TWIbh{OC^#&4~{e znos|<&@9AVk9#rh)sI3$JyvMCK`B`G^Fnhn?%ALgtgdZ@|7+GamNU=+Oq;w zgX2IYr~(b(7%&^$2<~;b?HTIxY@wM2)`D(u7dZIWg{J+th2{g`YS8C!L&WdnGP!3& z@_qBULi29Id%RI-21dtI@Q*#?rhHs#TzVcK{w?_~9B=b}bg#H6$Qjvl{`o(Uf75<( zGktp8d~9mmtU18?2XN1Ien}IIn`qr-)2!QmP~0p$*!q8ry9aq1_l=ufgvH?ljW(<@a1e_`TF;G0$8~_?3C7xPNA9I6Xa6_{GEQGw(V)ZgTVD=HsAnmW_La z@{%q+`s_WV%lqq&2;cV@e*gY0vu(QMnXxEtep1M9C{D$gcxpI3-G}cX-@A+A=GOAK zIiWIcKD0V+E~&7t_&x4+X+G@TSXG!u;$Gz0&m@M&8uI)*FsgOvat;_KJyRV%j?6szw4$Qw%l5NkEGRZ%lw|oos3D^ zVHJ7Ov9^0~Tza0N!q;|=?DxM=!N{z}^2r%SmnQPybs^E)U{P}~vv zecapap1d`D=UU!>D!7#K`koWtIu53fy_s^B@?01A2z4+c#LpRrl6MzlTHLja(`?U< zXKYGZQ--oXN7~lYZTeB~jGIaF{ZAMgSJF@32_)YqnD?^fuV${hm3d=6`J%Y3Cp8>R z8_NGV#)wb9FXH!cncPmsR;KvxWvRn&h@W>6{rz1dL#4~N?0ohd?~9uwE{vN7{D-`o zwtTNGqmj6k;1B27@E)X%CC`rt*YQmBU0VpBOI{zh75|sWcPRNUeSh5i?ef96I7@t{ zyl*9LsQ15df85;ko4EPuL+nq!7dM~%kqtNh*t+lkzI7$fB|l)k`yuUkM6YW2Rc#q?swEmo*hH*Y^IQRBv!e>gKrJwT6z9R1jolk^*ANO{CjtSkO>Tu+{R_u9aCiP1LuJozzN{Dpe{{fVR1X$pZ0;yL~F7WgMb~$ zc)Xz}+0u?Gi8anLT>fIGi!xlkqIfyBTQ>BI^^4tm1EwoGlg{YNaBqnh)+PJ;ldU7H zd{Hrm3Ae-hYD{+abTsv2?xPJ=>8=(rOEc`dip3z#<|Ov`;Ey$RjNp-_W!>E!=sYSn zEmd@6@P}Be=4@Tz)zq)Lt#SnKh!1|Q^-VsP9MN-&Tr-q6arMdwe_LSF-y8cGuCbR^ zb$7Imu=Jv$;O>U{rk1Td)r6H(y(nh5*LZWSBYac5u)a6h*1lte*|9`9;wJh(+`R>y z9LKdcdQNv&Pj^rE^wiAk%#tLpJff9JQVeop#~e$F9mRGMhpuCmY>^>!lk13?8CJ|- zF*7qWGc&Wa?_aaBqm`{h_PyVI?|t)|k!q`p>(r^kor+p{kL{LIAy^^4Oil70cK~mc zseu>1+-naNeh%-l>WD{huSe^N7u4HC?+u5PS20R&jXxzV|7Vn^f4gCi-Z~&()w`Dy zQnBC-6yJ49@Oys0<~tCD-{TTLkqXE4`XArxV zsudvJdz?VUf>^{U#Bd8K+!GS6psu&u!xH~HfUKAb5Ot5kD*reNLM!q9d$+Dc1uUOe z|NM`)-fQ*mIxAORU8>*Vq?LQ**>|eH@%D$$e*XDAP5;qf)T)Iu{GPgg@9{8~d-Qp1 zC!)uGhdSlU6@;Qgt@jWR_&or_J@!`dr?nBU_Z|zEtN1;axkBL!pE&{FjITR~uK%=W0*yv7aZtL)mhVR;~V*_iGdi zrTA~r%p>2actIHVdmMYYinm6-1yWV3xM9V`w_u8L=Xis4$?BNIZShXB(vy?af zy_QF4^;>MgJ=Q4y$gTVG>w{TP5eiX!{!pc6;f9 zl`8yi{;f};d6%z{>h3*YWf=3#kb0z z>0PRdZ_zpepT?#2UHc`Dj(2$ zR2APU@6r2I72hiF(}z?Q-zp!{3x7m;{Hoxe7ceIEtAc-Cz-ZL33jQgyF6FS@sc^El zuSnq%fSC7dL8Nducu+g}S@l~lqq}RNatM!B^IJ;2{&~d$bPqV7*FP^;;hneNd+YX` ziYfyE`)j|8$K@YM{iWjKzE$h}KfhJu{aPP>UJVZ_e%&$tctRC=_wCO=r7qjF-o&L)irBieE2J6&?o$fjfuv1`ZU;r@iGYG^zS> zm|0CQT_oq2&=ug5pC=hU@~*Pu`|9)Uc~8OOyAA?s1$znp+jB%SxFfd^v_?aajGI~+ zBnRue&`M(=kZvIm$@um*^SRcaD>leZY7*)7EKl|GFF*#uti-1$*vYn58O|*E0Biz=L70tUCTwymX?b?x>_Hp zx#Z`iv0XLThWWQYx_vNMk1DT4hTAliBZF*O&OayWsXe*WrZIYKfK3Z@SdDZw(#F{| z%lPSbo6d;fPMfZaw!mU7Z5MUA+AazXu;(f~hxY!18X?t4OKcC(Nj);sr@BUMqEkk6 zfTUsKcY`GLHlzDPG$Qc+kr4Hq9gzbTWlH(MJ&)gOwY@k;OWl{%zloYGHH+IT{P*W zhRMv)<2Ov&tq0GW)Z7SOGij#rAIMo@x<^ggVh&906zc!*ML8%)E!gi8r0$$`)1iYb zFFG_-%i8XsMY-Og0eaS2hqmi-se@I@TI|p`11+Z3VHCfb7$2wY?DvV&6y0BvMxzaX zNE-DPev>rnZ2Bi-bk%e=#He>5yf8*hoW!a)^>ThkbkzxOk5lttWKWzn1QUznbUgSU zL`&W9!8mPoBPZh2&+|9MX^aPJpC%#eSe)8~B5f12G32j^(}57)$2G>c&Z1{Oaz0K6 z!`8w$osYURV^lZhPK;5@m^(5?U1RRh7!8Q6!pdUKCFu{i|B7`H)K>d(A3>Y6P%lB3 zbSF=NvFL<+@d~D=$`%LWq-w>c3mVP-c>Jq1-g{wv*ua!y9i)6 zk1ZsJb8MDHdpS14qRv_uo@3GMKyZOY%L3h0rPMg^O0n0u65edkK<%ZC z26oF`WzbS%-q-D8OVze|y8^UQFV@_m3wmTnfbxt=hXT|<#E^5$e0y7fmIsdAY9D($ z(xn6Jo($3r&D|fQ`i8qTNUe>YC^1~PtC3??nCJpUTt@3(I#$CVO>mkbox&61F)=1( zD6AF(AB?#o;dLBu6}xWINcI6-D_NX0X@Dk91KhM9Hw<8me?fYlVVyN;uPKh3bjZAt znue;MNto%+iUEpG^B>pfG+P&d>jkSiQy^v@X1DVrti+SJ{WgKgSpB?sEn#2$})^!3*7y?mdRSUGuNx?(w3eLQk5_|(zP zI_=X8`>|PJfU|Sbr*=+v9v!AtK?_-XgJqEJ<^CM$Ng;XMr^bG%%|3nUm&7Lf0uML) zU%^0v8l$tu9BHJ}T#hu*X+1AhtA+lZ$A8|Gfj?6r5H^^CXMBc0VbX2 zSU(fe^~3^`+8XbzH0h?%ftQ=q&II1?Y`Q4Z&-|-eTI^>AyN?9_i+?Kq$iKE~7RLXT z_^G7WYx3h_tI3tcT9beFXQG8HmYTebhXdVQ)Wuwrn;BxN$$5qtYjR)X8U~tYTEooS z8q3cs!D(fS_tE&3ad=!;3+~hO7TO=MYqwtyx5k9K=)T~Sw^dj3mMz(zrqOOr4%Vol z24szoYBzyA!cR0dc$qG4>bzMO7xmk#quirhsssCNHSWj1CAz_GYmJ*}6^N#5Vva`h zHL*tHB^vxh(Ad`>^jj%))}8AC>dF4$0FB^UMBO!iLjdS-A`0))#kv6P*E?f9t_tsJ zfSQ;JR;C496kil@QF2}2`Ba-~|6cKyG5w}tsvC93iY#YRq4R`2aqe^KYMi?hg;M3d z8c=@TQ~WSt)D?n*nV=*o5nWi)}OM4AgQq23mB@ zSXO{rD&gX43>a-JoJB765#BME76_-i%d12S6xnKd{aiX^oj`iM>+N&-hU>KPxQTbi zwiS$34SJj=a0i|+a)6aui|ts8qo&F^Wr7w+jVG;GxTl5}!1UJWCI@?H)J&_P9;r6U z&<@nVDxMN_fnz5G9{z@CpVpOI1Ir2bn4qoVNWluDyrjP9v#%EJuhST<<*od@ia=SD z@yBHLyBoZS6}iyNaFOn0;LQ$W3^CE5DTY{R@Jyr7RPs}G6&k**$Ib<~rLhg^Ljf1* za{t%`l77en*IE#l{h3n>X_;3C&jaG3Iuz60=HwEo|&#`=Kd*!&qOWr)@*Kq@k zWf!TLjJFl6W3icO8;irtyA@a#tVL>F^U82$Y4I{Y*Z`TMSxjK8j99=tqj153J$MRj zsrF=V0gC1`!o?hJD(H~frFFV{*1!P-owvkrQDB4N;@KWUWuGuo*R|Tmk8m%5R$zCc zM;%zvNyBy7&%@1^Z0*q?0}Hvv2sZZUuwgg!Xta=kye4LHlS>T)&IXr`1>`Chlv;9y zOCxN#&ZP!Ua*azT9J$P;`N8B8mktLHrzVO15Nre6)I^ul0<=SyeFHS!NHz<=EmZan z&=Dbf257dKYyz}lzH`1pdD^Xe^toGL>NUc+IocpVm$)7`4^UIxsTZIgdM?rn^d4wrw;}UQnu3*5JtToL z#+!q_ntv(WS2&{l&=rH~YZ%{dOEcL&Vo!@8+?9XCJk{zNFQ>4+CP&PdMbUui*3xHp`oAl&7`Kj%IW9F zCuYzAzv%c38X1nQ%%F4OpY}!zGF4YP3p419EIL1fCPor-GpJp(QqxR29Ssf6r1r7I z>^fS8m2wiIFlBoJ%IdOad$)pU602GWYD@qEDv?lOEt)#9qEIxIch)V0zne} zBD^t1)y*|AYQw?hF>0V$UDBwHW_3!VUfO@t*+e(J`-RuNaLF#U{t7zW>ab z7-$al^rY{Nj?p#0D9GYTVL1YXvV3X>`jVk5F*+{guow-GWDSYYl1OqJo<<)+{>bPe zoFp|5=Z#7u8TX?yuUCJsWEuhh7{Xc2eb5W{_wwlotYC`>;2t*FkHw!YRHoz8Y;4mhiH+wzpGD21+qJt5**ZeGiz^%&YJgk z_NkM?gtG&3Yl!j#S(`)D(<<4~r+F5#Pg-(yh{oGbwMUmWGV&a`I7GXgAA!;85k$tq zpqw3|mhMxnLDsp*IPc2IA)4ep)6%C)-W&zgnA87lbilXV4G)5zE=N=@)ROtfzz%sV z`Z5h*Meh&&Hr$M$Nq~Skn%rPgPhdL`pL%kz31&|YG^weO{Y|*QC;OVzz(jkgbsWyO znB??HQSjS|B@vpWRh${6^P0IVLc>6}M!+thcrU|T5uyD?VqOF^!n;H}g}F5XGB&m- zLIX_@`j^a7haz+;@T1vLT4_bbM5uvnBHh{^%oC#2B^a3=rS5KcM1*F!9v)2aVuMjP zlxQ8H)_(3ONi%%+sH7!+Y_Ft#;lyr9EoAOaNfYE!Obz?KiUGC?bkkQSaBZYdOQ(5y z=H>())SsH04sI+mEkT!!$LFWhIE*g=^qyFrpt>f|-cHlpo}j!yVrhcL1wab5w#+>V zT5cs)CcsyKAew5II+&nQj*jZnoo+lOou;^v*=X1ECMM{V=i$NikUKsBMCFc6&}cs~ zEJ3})iGj$Gx&0H=B9iEvp!1R3-U;d-b-QB((L|>Nt&Qb&NYJ{pM7sn{kLR|*h~twA z=9HnFWL}7#(CAm8!y0|^4~U9Rv!ad)_9${e*IR-^HFYqM-OOF6^?&6VxH~PJ_npr| z+ACfwjcNx$^tNZ`IOFtyDJ@vH2t7{pxDD3lO$XAOcOVl=t!Er`I4~Fct2GD(oG}_0 zTTnQuXW^7=Zz>Pese{(AVBcc&B+-)=cI(~O&ucW1w-n^Z=?D@>G{3V!qx58b1C-HM z=k529@ac-SIK}CUELy8MczQ*PH@9h!ZXrEVAAwf4 z3OU)L{$_H31w|;@VOLbl_)^q%PWL7bHv%a>i7+unSJG2;W(s!so7Mo^g zyr$li9gBep`t4x6M&267c(BNcqg0<@99dU_EmhV|?^$F`4Lysj9)1>bI)-0D&Sd!# za&F45SgX!a*}fP}iT$E$FOm(p<@gxfMN`DfCcBD^MEp^YVh^!5n6_GVK%bS6= zvmE5N$bM{5F*;nNB5L<7T7+n3Q5OSSS~P}-JBzw#`e;#ss;7zyR6SqRM}C81DyL~N zmD8%2K-Koe6nO*O5XTk}e*R_+bWU=wJ)wp}jsZ4jv5+Bf{qgN&INHhKv~psJ#*a-$ zp|hN|Mx$2RJGU27bNmKs8wR6f*-`5C}uPZs@{2C^8-JnWly3izF}%2TgO zXgYHj7UP-o82s)k7FQS;O6*~90pbkvk#F#}-zvi%ukj#lDaUrC(@q{61@)Vjxj369 zr#X|esdvH|oz3eL&X8>Gm@W>c)ADo|B{!!luJ&f8=#$M2vKoV0YgSZjD$99A#j3JA zs;F30mgg1~v&!;@qGD26K3r6cD$6H|u0XA+Ibv{GUXdeum*w3#qDxsmkkbt7d$PD_ zQQ^9i$PF`g$x_ZV*! zqJ4~y2+=&o*M+Db<2I(a8s(1W0j%B?OPq~zOIsX`a&KGgjq(6nNyTP3Vr!IF2F2Pa z?+J>fQ9d6ObEDkE6;q?!-4$b_{G}^~MR~R>`bBx2E4oMdpex!(`HCx=qfJlLN1L9w z8sV{?I2+*wo;Vue&7RmB;bWfI8sX+4u{OdzLt<%!M~B4R2+s?NsS(~75@RELBqWAK z_7=pNxiVbMOq$HNQnSOTm;CH`Rw>4z5y`UyB(y3TTz!1=-vp08Qs1k@fM z6HU^^1i{nw;vg3E&~@3$Ck`X=>6m%{wb-vDTE&3$S7h z__UyX;+;Yjv4{83---VIaC-s4^GvMn5nZ+wG{Hz-H)xCTy22&D=AS}`qx3kkjYZ8h zr-{YAw8)77P1T$O0dA!`-7W5-PePfMMp&VLa(&Y$2r*(;OPB)S_4b12bHMY9UpLS#= zJ)OhO5g)=A4?@?OW}sIXEK%Z`PYtc*Xmqt5>l3E^wi39Ub`m$hbGR#bewfaNoO3?c z^POWpkM`q-@W6Ku__Wq{_xZHbhaeT|0jG`R3t^{;pbckOwRJCwJS$^&{ib#Ji6)5L^rAVCfc#= zsY99KL!>)IOF{&M zTc)Y2q;uRG66fu4=Wv`?C%mf(K9KP0r}NoFp_Rkl|AEx4GM!(<9oA8U5RdBW&HxwU zcL5>ZR^7u5cUl*??XK!dthdA1tK)GOTi{CUYkRmhN819|=2Y85&P1mSuFbJQfh%+p z=!F9Kd07H+HDq}mFvaq!MtwASS%d7YPU?C5mi*4Y33-Q>JgU*6uW*>UXP|BRUCs)f z`f#wL!SyuRTc;J;!vl1>u7PMetp{5gd`-_ObgTfE1DHE0hNwpr#EzI8Me$Wwxn)8u zh|+5DHbi=+m=UFkrW)tmfORxVM+0e_qBPu|spF5ao-;bi^F41!lRfij=+6dY>v>n2xJG;DdtQEj*WTqV>~`q zXa{c1|8De2-m_f(F}c#9zx@n~rKS8lbgY^`)qpI>?`zO-P3Zzx>%r3ow>4Z8$uqE8 z$ouC9d;BDgLe?lQ4N2B+b|5wCs>wAb1mkeLKdBYnY|>mky3V8x`e`iC0@KCAHRje- zJE;Ey>SSORM#dG;JVwSP7dp1TUvsIYxdQoH9C-@)LAl$d=5BJiOCwzf{qiyP3YGlp zHorL^rSgP?YNvrIr`fd5tgy_cUI97Dh7MScwP}x4VTuji4B+AR!RO}41?28Ac0H4swQ5!fW8uXtOlZC(Q`bC#_!C zI%??vJs)(OeIdT=c-unUD(I~XalfFqEX1RN5Qbj}Cgwq<>m~+xJpP{;3D4JoHn;xz6;b zul2$|cunpT)Rny@0wmlB6kVhFxZt|$NnB`abU8*qckho9bj(P8DX5Kj>lPf|MA@4x zhv~FROXAF)yLArpq0U$gk^f>bL7VSVV{13hb?KlDDtfFVXJg}oa)e78gGrqGtHJjQ z@Tji>I=sG&_iGT4-O4LTxHH}eb_Bb`ueVo7wiWt3r@2tq=W8xdJqjta>EhuTQy#Q%g5`b-yn7O&*|8QtNwpy%*+3g2 z)X1X~Vr=Ri0xCb7>H*Y&Ogb3gspJ(M_gnG+^msb7EcuXq7olf~o{bJ>7*XtJ{u_?U zYz}T`sMK-_?F;s?82yCkC#4oKgtozk8uwt7-p#@5%*P6sR&A^DSh$~7fdYCo2M1`- z3!yx;ZFw5E&YOov2;zE!BZsd8opa`sfFy@3k7t57FPi*sI8-N#s_nDRHn_r3O%Y&3s>cg@^ZFSrgdJ0ZdIspNaYOkbGB5bm41aT zRj6KA5f~F?&Zf$=RhI2kg&IXv&eUkmhRU=vTDC(KY8F#DGt+X`q4%`1?W!O;gUXqk z$XQdF4kpUBsY312RnEeUoK==&MNC zX6UP^BJ5iiOU$cGdy3`EsZ5RTE8e6E4Zct1kIqR$~52yD*wwz6B{Z~-N$lPSE2!rl{i4l zYDRsZ%$Zq<7Cc$v6jt>~wVthilryCgjr~!H6P0PxkJch*=+imlE76Lliyx~@r=C8A zoSn}+c?9Esb_>#Ho=fzoM5~_9ZCHt}JP$2JmlqP(U#7+{CN8~9`(Mo6@-hv6DY52d z>h+Vv@|WrQPjZ*OOoLv|168TcB7&MDwN;lZp{#{XQGhNO$vy!ZBV;e7rj>b6&6>&X z&~tpPhxu3hZYA85FhybOtOE_}OIIE6`$aE1An0fFbqA2&fX_QLQN-{^v%tf!n8~wZ zryV+K1vfc#!HUd;g@WB3_@Q8Yj}h?WTXMgkI?pNfSqyr{Nty&gT&z_lYOlwZ3Tki2 zDFRe?a-g94qCN(Yn%iGhkef;o7z({-LO|Y)<;oBZW3+Re{T(5Y@>$zJt7`U^5G~Q# zX^^dNHEKe`B+N}Am?6~%CqsP$?a|&i%iIv68`eLrhUmK80Xc1g=DHB=2>t_d`Ry)v zZkiEt(f*Q9N_3^x_J3L)9!i;&WiMuG$tsMG>LUa*LaLQLpCQ5kc2DJ1Na6XuJX`=} ziJ<}%ThR^zsv!sI$+{RU=$u}(9Wa&QAidE5C8yf@zV%8GF4<4}K{E?m5;5JtVoK0S zP4%LK4BDeR$T_B?i0UJLi{J4#YIKd0(DU~G`kwpyYdLK_=peItdbETS2V82cc}P#w z(m?$T(&ZsYi1Z@+AOq4Pdt7Q~;C7=6ri3NVoIoj1$cF>30{>VNgmZnn6bR?pb{d*F zY)g>XGacDV-ImEKL0TS6Lj!w)>5E;+)?hS&yWzk169_#)z^cu&gZ?s$plooiPf1g;sv&M)8#=<;tc4uU~H{Y zedbPcsl7WC9Zd6_6&`r&aN)b`eSVYH;PD!ECIz{nW=;sI{^tc%|KozH|2aYGYU(?K zG}P?NyMwebU~Lc5ao9%%sj-tjD@X?%Z+MW;I)4H&6m+g487$O|+5?x$5S4L0q6)9w z+LJXAbQ_8Fi_w^9xfU_n8wCTa*5oOEs2w|-1Rd3iZV})>tZf1WU&-Bq`U+>20F2=} zy(FAwCf73y_mpb8G^`qMjd5hNpn00JQP5Idt`{&alCxDyN$|DJ1Yj8B!V4OJizexE zLI5gI*&_fBTxozu3JJOyxCq)-;3BA5FPS*mYJ4^Mtv~8}ZJN*CaT@~S^rJS2sJ|Sx zVJw-)`)#T#f~Re|AfoLY(9F(Bm^B5QgEnmnG~}~3wXvP8HutifwKfm2p>OTxICC8y z;y6_aH|$Sj{)2c3aFbq!|q_i0N| z_J#J%ePS@|m^`<)52a$fC!YGs*bH<28|0tLEYz>(gNIF=3eh^_03J;?V~0Yt+{8KW z7w|5HXt5=MvbHFV$Y?v3AEIk^1P!%vykOYHcVqSf?gJSVK7{yNGU{@UqNpRn(=Xcsty-r(EbF23B zY>)DEd!|R*^dQm)b%^Gvi}@l>>Mk<`K4nP2L%OjY1BbHHE(Jq@=}rtL0^NeKDK4D} zs`J{_9eQgV@n1-&Sq8DcQrTf7r)sc#EwH)C`vt!QlO0e69NcB|HtvR@oi`G&K5HR< zjJrle=GnAcSPN`AEEKoZB_Mm*5P!%vHchdT*DTs$Db5_qoAr25eXsZY)Xe2Zk{Zbp<>8~y&Wfz%eOfBdZWgdm$ z8HUat;JCf!0;b!gj0WcFCM+MC8*jIQ8G$eX@k_)zEdn&$taqz!-rM}zYay^{7ulI% z!-ByC`?gVcz|dVWLV*9aB0R^YQ6h|`Xm1vqWz!DR8D%r9GP=Tg&nnf>rk$4JwU62k z_`>t{H_mDvjHHJ08+w1(;Bk?Wl6Gj31z|d+L1?~I&uSs*OT(WR29zSBi-;T#(|J+! zXc*fa8xp2-reY@QTJdFJnrjywCh3HI-v|i+jVuY%B`0T2m<9&lSOk;6ATFvEt{fJo zo}SYq%&R@ObC|Y!ZpSbk_T2UmV1;6B!_?W&h3(*e--YeqX@4P_pD3Nhl4r|129LD` zuh0(#`f*|)tXdiJ{nlF9NpU)>l^Gkyt#RL%aiGyA#)deQp^^S^s%NE7$7N*2`^Lee zs`Ol2_Kwp!8@J&iXQqyvMj=IiZSq~TdC`|WAwmkvW^w8vlNZuxzDz^iLsFhgquCL8 zCXHG~lc$g$bz`VdNY)Rq17c5QwO2fpidA9V`LD*m6N>4X|9) zA8olPINfqlaGNDTTaCBnNi^mpw>mV}QTZz!7Y%H6un`!~U!Q&sMx5(7)E}mt9EK_9 z4YnEsooe!&1^)1PEW9o+S>XNU8Sr<;bLs%bE?Bfq#87j9DNk8+!<4%%n1v>nTR407 zLycg$c$jCoXm*I@qN(wgi~6%u`OB@pesy;KlW-N_C-;YGjd&lunHhB@Oy(jDB{dJE z)t7*)`;jxo@(%*utcL-6gKhtCm^RoC0RkI1%dxVn-1NO+#8a1>!nDkj8^Y8jlmygG z3*~~454lQ6>dP5nY86gS3)7hJf89E}_t7(mD(b@^>JNW!3EL=kP8$%&SxDz;AS~4y z!O9JA@wpER>*nn4iPIK6-X$G^Ingbh`WqP?(&@DD+N4AF>a|O!H34xUj_VmhkI`1- zY8>Q90usn>_6KOq{$P3n7qub!(kObjd%1JE9gc2|rH-f3$h49$J4$oW!0t5F!11)TSi965@3c{g!*o)aPx|XM$W)TEH5enn zLM~M{4(cAVYWA-v0YegsiMd}t`y{11Ps3gN4{Nhc<5_{n0){}MtL7}RK^TT%WDHrz z2^f0oxPL|)ksdbe(>%b)6(a~3IUu~PHk}nf$IDC?)gf#^X$TvRS+p>qL=K&-t^`EjUlo7ELynpkH1fsB4`Ck`CzM{*p!-KA@|i2=$c^ zOZqp%P~*ZH4##Y~q$6gyv4j-czZ#~dRtNyN%5F7RkD6Cd_7FYV(kNhFSu_ucdF(9HXa$2onX0)+F8s~5`Y&_kND-hlz2JOBh?#ID;k*#J?|T4YXeV2&SY)22cxyL z+$pGcazW&(|p38GtP~l*})AWp9 z*|c8I=$1_<^rPmOY}(`$8I?^fgYg?#v`NOgWK-QpEI*4nMC_drsmXlL7IEWPBqrBh4 zbXQr`U*pJClfKjdDb=n?oI#93Mo{hwYA(`J%Z2ujK?;oy8gj5rDHu6E50Md97+DI2 zU^wq*(O|bypb*jc5~C}(Dn)w)8t-bBLG~OI?9-?~8RTvb0K4A|?B^DxXs?ZQQ>T~K zr4$WvkzVfZ=Vqm7bO`B-}M$1g>zN(zj3eMy0lZYe1+#+}gYm!y3q1x9eVq(I*% zN(yv!7P|hDbg3keUw+;v{9aqm1hG8caE4m6)fCfgK5mL}HeWNvaGM(ioc=a13n;E) zN5Jc0^Vxvc!RGwHQ&8DiqJ_;XEz!W{9hSIe@gd7OXYp#=tMBju+q;V5_8oX550dPS ze2$%^88kw(>Sn;JAOf4&lftPF8(0xpoPkTFSVLII2Ax*vFvJrxGkB>dMrQCXPxQ** zBc9VbgSUiY?GrT9m&fDOI$UaVoMwcbE(tmr5fd}GK~xON;I2{8IfMH}oyHk_0ouw; zt{-#iWpc~dO8~4`Y+W3fQLX_|5HAIEd)cFHIMR(|LWmvpb#FW>{A+s0`-}29;4*83S1U2<}BG7bBR0pJ0$ci?lE-#7}DN z7!sU(UESDQggX>IR^l0gonmYZlvw5~xUmCi;0?zH8Z&IhCtJ!|dZ+E;=`}kx*rk1r zgtl{3u+()>&p`#cDO}eF2^_niDTRA(tAb2QQSR?&(D?m8FRp!9!(fX-1JFWg!XoTjWxg z?4NYXr9nmfwz)K+$P67P`97yXE?vLRxmJc6=eS6>&vB7Hnj^22p^3$l2g=Zd`(w+? zP@80_$z`ZX2|2M0tt*inUxo&hjEyKmElb5Zl%buaO68ZPL8V>Hcx>r(Om=DMbWC<{ zX%`bbSsFLOxzaAW%`d$YX|+yyFcgA-Sy{bL(Sx{jkLli_EGWe6BUuoxWVg?tQKoe+ zofZVF-RY2LW$a9+29`Z5oqE{L*bGoL&VVdB>O289D;TMt1yw|NUnZ>%7N4I9jh!FcUrDx$=q(_#gff_d# zrNK2;9*WTs1KGnwWHNO6;$2t;OgA%NwSB;Jmc-y6;LL|(rIj%&M#C%z_TBrf?BOwL zYkQqz)YHyKr+LAQ{3tbXonuf4=q(RjMn_wr-3%Rh_s8W~%OGLhue zD6NXzkG+n#81K!9i{Ukox)@F8sH>_*T{Jl|3MIPQmwbM{itJ?4AKvU@!npv)rM;GU z6yj{nJuGM(beATr)#X6}@=Zb#ciKqK6Vy?lhe<*`oG1QT&m~s zU*A!ZfQyUEx=a)F?f-apc=Y=MXM-#zm)x69`mn}fb}?7!4WK`FpnwRIM!DA@1+y;Iz_E= zTrvxKQSlO062IhE3k?J1)PAM*vhX_J#y>#*umac_3KSQ~d0+7lcd>IX{~c@+zxKK7 z2cNq-w-$`CU~q0qE!Z*^km14SX*cucZ`vh(U4QWR^x6C79PYL++psU&*oQq#Uo8ol z^n9oz^UtwY|JL6BTiSp=J1*8TYyp=tR}^!ZUlLQ9KNee=>wH=W zmZ19v;=}+av(-g|(i;}?y)7jol+UZrqQR)ttLg2TfmHp?Hxh(Q`x5xq4257AlUPXwH58^RG+Q>ME&m4UCg z$UGI^;0(7%agcdETRU$@aw#03&bG2!5H$+-1^eZQsEw{Z%Z2&af1bdU4{0h1`w>8wX0_3|iC!2;^Ydm~Jlgu5 zocSqKw{qgAK)!P1r=SNDtv{h|x@`6d9o1#SPiP{1cBN$ zo~%Pd1Bv5xAn%li>cEIk?yW<8Y`LQjZMPHK>Ok-!H`bxcj$BiRmImeWI@H)rEUiPE zUAX{tJvqA$UGU_zIy5enm{Nxt`EpDh+UUy>XeTTOqaFB)pdBfD)}eeUJJq3O5!tE^ zHH#*i)}d`tS+@?2h{?;f>0(Ttu1#ap631#&!?@g68}bCXtv2;c$aS@8TOzTtHhq~c z7uKfybUCv&Ez6J-Yg4n##27ryltXLNz%1FfHl59q-D=b5>_o@fRKJL9S(}y@kqv88 z*P`-zE!tQ#aj6y!DJD}7<7R@guo7SSbr4tQm(c04T zT20C;BhS^OgJtBgns_&D;$TfWpDTCPq=jYW#+uaa0lBg!ZFnHDq$Ul1P|m4IS00p8 zYSQY5Qr7fsY!dw%lsNNvVy!+ zgN|27oUK7Ke;|+6pyrRty)|g#qjGBv>iAeHIl2bzswjulpy5x-J~imblZhTRXxa~DhZ@x6N3ul?TK6N_pa!*jDsl5;+V_;a z_%V%oTAuuv8vIxu{FvtaII-tr>hO%*@-gjtMy~moMm#H*d`w55P0at8ravdAe@soE zm*YRC4bRIFDEUHS@W*uE1=;&!8uOy;@-f|bQMUP*roEJC{xP-siLC!IZTX43`VkF# zS)Tof4!@i@@exg}BoBNkmbg-# zPP``1RHsR=%OlmP`5SUibz1mFVtaM!`U|?1Twa~V{Zh`aPG^3Zm{pzT{Yp-% zPOaXQqpH)+H|5~!l=tgIzv^`I*Rp$cn*1BtzB<+at!!SMX8$(Ps5-U(oxD+vw*O9E zs77D@UY@8%$9|tUT#aV@humF_n!F`9SEJ2u$<@`U^B)q+s?p&;$a&Rh`rC3^HLCxP z99NBIzLOYPjoQ8|2UVl}@5)})Xykjcb2U2pUZNdJzAu|qqsAY|devz42lC2?)bhi` z`46f8hy5K`;Cvzmeag!|`CB23le)Jmq{z^s$5gzJV0TzR^D9U<(-Maa8m1LJV!&ED zcmf8P`pYVbeU=p}e z(Mgjv7s>erbP{l1F@h+hWW^QCZz<7iXHCEvzLzG3Yux`E_j8JF1HG0dp5yy)0ly}G zMZW=_e(O#gsoGU^b_f*eFF~3DqYB{FYno`PbKP%@^S^Zmg1-1i7zT@*K-+3?7J>h41=dg5)GOuXR1PiFxsTR{7wbGq&jGKdhj;5Rs}!eoK77Xj6%CzKZ=C+2O*sz+}aKf6eb%Y)1*NUc0#a=)(8jzR|!YQGH;aoI$gyEbtc%lJLwykjH2_A~jRD#EeLJ*dMuX@^p z!*yjB(qHH8n$tt)gBlDRX6Oze3;ONDI`7sCRYRX;IJ-YT2=}7{4CyAHf}9yA)mf?` zYQ&vq3-sA9ozDu>MC!bO?EW+agVMVEM_3TCxXKVLh~^sCzoQBC2fFb-=SQ3if$Q7i z1F8<%vDTgasQ48H4*k}4jT>vube-F3XuA=jBk6Ds5#aU*>tejlBMMI_wGPeM8O}VJ zz1|uxfE^4>AiTk9FrtYDGd0mg<2KrTg)rlHTL+xw6Z~Ko9iajo5NeCz#G>dGb!WZK%L@-p!AlX^l?fZ(OutEz<`wa_Bx!q*kGCahQBergQ*iN}_T#(k9md>D zdvKhNPz}y-9hS@rN=~XVLq~C0hvSEWlEZh7S>df#+Ji79TBRZ68z0u3!8)H+^_J+) zb)EO>&Uu}Wf1@67W5q8PY6x!S&(yfL_5%p7Vf3u=-fzML%D(so6sPb70Nf2%Wp`=* zMGfOC3>qnT{XbUr|8L{1^2KK5BnM|+9;4RmfCfIC&wuNhz4is1s)uU+CXFYnvd1;J z1o5?RmR0Au8iI}e{j5fC3Brag)u5lt*SwB8x4h$e!hU?$^6G4F0{+~jJ=DNJgb(MM z&Iqt^PUo3AfU1+>EH-$g;mkI8vQY?GDg2l_fnS_%!(HFG9H8~a7b_9s%=AwOc&gbK zjc&92@irf|_5icu`_GTU3uPwqi!Bh2%WyU#;-A@o;fj1S;IFi}l?5Z5!@+x9-OKxAL+3=v{}wDoiC^}he5y(@__EZ5%ScXXHMB7Rwk+(u0_8As|kE_ z=il+tUMqthjU3mh9(#bE*5EQzc;{cmNm9NCN`sWAg@z~^QBhU+p#6ACL}tOJ_mSeD z$#S|geFm*s>RD0sLd;+Ja8CFDYQ;%xH&Z_UF8;pGrEZl8QB{OuQAP)EDotUkUdfaV zt~Jx49&DYk;i)0EV%Kbe^afj{x7uIXnuH&-YSVAUpXnn}n`+;O`q=018F#D9-=cF( z>dnqL69E~2xfCi~!ylt!ON4uxkPttCh-S-0?*g#G-2YX~DK{05%KD66eIhldEQBzn z8rDve{%&8xEP4y}4lLRsD1-`*0Ux457!h?P4A@i{ zp3q|WRo%8X|8{K=hz@HDLS?Njkltjg^mZF8Lu$;$q5J{Op}#TGget&?Q(`kC+ySnj z;;GRlq6A@gQ{^J`G||&OOa*^$VH4kf?No*x8R4uU%l(;8!uM`vHfi z5upnnUz&&X5#1SL!JPPun-*;}{XP~SFhNe?)v5jgo4ecJf{UKzM|<)8!(eC0X(M2C z$R5(i3BNy~&^-68$hRm+5t9HiO~wkZi{|bOq0gLjz4em7t zXV9K-WN|tVl!mISAYPuz)My5EXHazbuTn)P8_=D4Gg75`wYphma7g zVX=`Rd{=^7aJaVve5(9C@D8`d41`7SLL-3yVmplf0D!syft*GGFzG2+xI6xaFa7^| zT)@;?d>7fhT01a}_Z;Wq`^3%ru6_JBcx%LXkhh#0oGCibW1z9y@!j3yP_|QY13=w* z6|n=fg=nV1wYm^wDp-TQGu@egyDH$@^NMhYibJ{U4AuC7Mu3}tBHa`WD*a(GqGDEg zE|moti}(pu7Qf=36^2&ekrF=z@l3yl|I%W1ptoGh?nyZ1u7gIIvsfm6XZX+>P z<6+u;g*V_|#3RjD@kp_Ads)RpKFZ<}Lz?joH~^geEzm}VWrZ{VL2d=={d0>`I(%O| z&)MRKT*j(!+aq;1{3k?xU|l#mGXnqq!;C;-V~irx@rNieHG+s3;fWDA#)SJt;OUmt zCz5iEnH*7;u5I93Z)dfNAimCa?iHbiUJwu3crDOi_fX=Rqy~QOer3O!KxDA|aPDqN zD`XVy?Uwyg14RFFkmhfOX(&Sf=&&$Mnd_->eehx0;*_sfD-ZJAa0?Ia9btr*T&>;L z#)I|fqX;s&OfOmAqvJ-&x*q7h-i!x_OcjY^08CP06J#Yf!+Xn?8xbn-bBd zR|ZNAM{F-E3e&KYmQ&Z}%Y_4ns&@0C9zzrW1j^L(RWaJ8#g@k4E%7GIo%FQzF*>Vz z2a#cj#WCt(SqS0X%+=?_XpS2j6r&9;%$@pqX`^Da*7LrM!SK5bOi#M|u>r7g@nxeJ zZ3>q<6Q$nLh0)1y2`iq_(nZY`GIl6RlOu9%l-fs2AuQaK=;_o{)tEej0ZqkXZNZevuDtkSsbb{w78Q$hk{(8Kkq&cJMEK|ry^E2BkQajtG0x!OMqxaiqNkOm-XM0NNNAwB z9hI~)2k?p(m3@U+iU_-!fM9fe;hO<#^zAXL^TBT;04#pvc@$g)l(^xB{m`F)V`ZM9%SoqK4|itN^cka!(a%cFWJ_~hsP73!u7 z$hg31(>zFg!-$I0T^o;_mHHdIJ-pqa2)s^53JYGRlSOWKPuVNa48dM`GBD6SD*<#l z*UmlR(s3KTjBygjT*U5-0V!Sy=FV`bzlsJ=eL}&Bq1$f=%mw(dQp#Z?3$8A2J*n)T z5?HMjEGK|B(IR~jwO5?S+T1@YG()b1!tP@v@O;1mqxiW@}G@zq}Nh<6)EVEsohLms7^b9!)MyoaOnAYwty)*qbV!Bk$1 z0*o6bPQXq>v}G5;&1cZwfR@u;WiBc{|O^Qn9ncXd^yTfi{~bF<_*ME|qP zW7X?9{z+RcLg4v^PEN@zTs5>*o1-$ zZ$BQ`m*OB=5T-#dbI8O+S#+OC>qQnKx!({za$1@Y(iGr>yZVy;>wDmvHd%`9zcb$b z-SA`{;`|_t3nJ@q4DtN0Ds9#&EvS-OXJofEf(yX08%5?B%8g@#!7B=5Z*?Y=eEWOj z?{67@IV@UTbY7SJWw;4gDGe`b;u?bFX$Vpe*WyAKh+4nz#Y=t{-V^6#Ait6Uf4AwJ zy#$UVS~dcI<>}cA;s_&cc~l()0tOq%Cp zPl&^RCwm;q2eZeZyqi4=<-KgUmo5)w4@Y@Fdnn3>vj?NR%!cP^%Sbl-O|M6?`=Wd_ z8@{32W7+T}U6YpGBTgsM-iKvE+}a!Gt?@e+CKbo>&UX%}I1L-Hq#O99y||IIsu0R+ z4;_I^y`HsO#do;>kbqM*I7@)4iz2%PARcQBSox9IX!EmBRzq7QR@&TC7YpH1t&5p9 z6s<}K7llxkER59TL3cl&6eEf(3iGc z1)o|wInAM-&O8iecQAM*csnX8xWg}E#up6kyD@4VbDTjzzTkX|9Xwo@QRS2&A=#cL zN_919@7MH5g4%&zY8Q}@$nFUw_gK^@<@1j5jApQosW68L#3<+hmJor=GPZ}oxcqi! zm>R2h1Dw|H9}c%`;q(vlK+_qAH!PWn9X?$&i_Z3Gqb2)>DbJ3;HG8BTKIg-kIDNm5 z_*3bK2)4m7_xg0m`FL-bM!61}oZ`CZX@Tn^ebkNNB~NQT*~zC~p;8EQus}H=9Sb>V z{z6C4Sx!5V(Q`w-W;3KSI?>0#9^aEpjY4fb-G=7GIDq@^{(9iw|qJVek= z!~L;HcM+KCm~P<`xT^oG0ZJMcLaXgG90V>nV~BdV-^Hi6nVOp~;FdaV25)){-V+f; zPFw2T27>`-DGn5lE(@Ge@LA~_luO|B5=<^acoO%1h)>)o8eZgf!_ann!Hb3OL)LrxF?K<44*J@r8IH>6c?JSwvago{8fn$f?-GQNyx|%2HD!oVdIy(>*JeB7_%^TeB z@Dzc#ebm@YB8JH@6~X2I;&l#_%f+CM0Y_P$G1H)&)z;PxY<~YJ(9~^Kf=bUrSIp>^nmL*w|m6Mfo z)_(t*UD;Y$GL{W@-|yR><>{`@)zwvRz3=lrPakzr$KC13RZf-LKOovf&9aoJaIr;O z$`JoLs@l?k@@+Xkgwdz8wrWTKxiawk`TK|h3w@o+pbWc5yNQkQE`a|A0-ip zzCF9Gkc>6%%?#1m*l2|fiBCr|C-GlLm_KIirg!L%-id^pV0*GN^T;)hs;=&Qwo~rz zyj*sk2Sgu_d7c;2Nu2O9J<){ni!eU@5H*7Ou@DJ@)v*k^1E*tnR!XlOiecR2FljTv z8YdMJue2P&GwuoID(Dn%n3;LRL#?lrZ41dgP#P(PdojvDM@O##!O)2x)G0=0=$Js3*$S)lr(rQLS){#c_&v?|>1uRDFD%b`~bd zt7X?L6mu-VQP~{$wvli{mS-H9w1#S&&Yq_<=Qr$Uz^<2Rh3<+$HubI_~cS4o-GX+Q}(oYmedDlY?Lh20dhRa;RIC`@5lgs zN|fk?LkzHG#P7faJ>s?_5*tzZM=hf!TiZpau*@IldH!c%!zj5hMMrF=e=)po3p5u` z+-glBHRl>i&!w{&F0e6GGGlB7{YLX>pA`_6|l?NFrQ|?R)cnZIRL&? z*p56XMmn=~fcvF>o@n5?gFM;V`vNFM&pqzR9bW$Jp9be>EHRUT2l3CGokR-9Y?q=# z_Xnl$G5mosIckz3Hp(?Sq(uF=yBBtkxU(%OPRHF|DcL4;nxzOpIWv=DY3O!J$%CO& zl9Z>yw|I#wlIHrPd?#sMq9jR2J3r-aPs#eZZjan@U4yEZ!jH_&%J}%G0}sv@pvNt( z;}7a1KSh^A{b(x7?79|QGgXg(k=Vcp@oa1`0-Pp2%?wVHx#o|`&i5b#rb76~;aIq& zjbORInY+Swtqa(kT0G6n++4^y?u=w{EhirOHW(VGkY#(N=no zwZ&{|eYPGSqp&FhSw3ln0~O~$2;Y7uI|JYTG+VisOZN|($N+x*wX$Y+4bMn+*i!Wd zc%vaZHI#mvfaq!1EkH*xy)g!AjxTDJmV%V@oED&<(poQoo^1~#aWPWb$@*MoRDjR) zppiTYo15`1Y?4jpmza!N6;MWL8rh=3&@{sJIM)U@y)cFilN^CqieKCjdL`fEG?#pj zqZ-BNY2a95_l~h8)DGjW4r&$_&_g(|mX<#C%pg)Y2|n z6*fe~_=r2fma`(C!GMdn&24!t^2gg7dzOc{Tj@Q;H@e~nKFWptW!_8&_s&CmU5HPu zwJyU_!B*z@T7voe2F-*e<~Wgc&@8&Sh2C3#xT~vXk|RB-d|?g*5)Wg>9y7kP*CDI2 z?rzLm%N^*+P1esj!$>=XS>l=<4s=l8$+Tt9C@2e2-*vaUa=hy%D<2U+QR&c5s3 z{iS-Vs_xQGVdKgI36AH7yb%Fww})AaprQdDdx9 zFUc?7YUfCrF+3m8#ax-(4uo9>EFYB*@6&-eJpvYckCxo30~c{8aowfIM6xXmYon8xRIRpPN)tEF(pb=7@5ZPWFW&nB1>w6^}bi=*r`=6<7S@`xvPM2j)*`#Byb zUf_hVDtB_jnedvqnW4ywNdl-X?L5m{?x`fVvO6IbF) zO5?3VzW%Cc@eHwi7-MEz<{0Q?EPHyKPPhp2nFEpUa@2@urS_p1?j$Im={z|Vt}W*` zljGu=``ASG_uO-#Jmx(EX2?$uhsrsY9HeGn#Z8dl3{u?qfuN9Et{;DuW*&DBvv+(Q z?~ni9{VVBB+G?B9=l1Jk8b}8i(O{W`9eAyb^Qj$_76U!5aRO|uwSsO+TU0-T{HkUn z5m{f?k4NxT1lWh8^#`Y_p3dDCl|77rt;3CnQHR`Ndv)z?HU9k+byI634+Vkujp!hYGQ0iRwRS@!Xyn0jbHI2K=9>kk4_nbW^9>krZdm3rBu*nflxs61F3ZOvF69kv z6T7^%?au)2WxFhQiF#aiK$K5@Xf%h@&xm4TskZ(nyd~7VOmBk^RW;b|TB?zYFTCic z!w67`u57Aa)ZE2uYttz;G*X8k?=mt69E{SM`{4aD-)Mk&2f=5iLxxG7v2ciiG%qPen49T`|fIFQSQShv!|fBAPkts^mN9C>yyM z+{weTzF(Tzb&qCN-t1D(Uy6H5EYflQh6{9(LBcUQklhTHOo^7Fyt59LU)`(nNuJ}j z!NIe!v6do$*+=uc=zLEuNcB}3ecX6mMXYA&Rc>#gCB*GNFP3raGU;I|Zj$3zZm28f z_AYwfbaeHQ(MCW)cZZnrn9jS|W4L`-H25&swd{}k#dXIzqN*SpHp&c`tBa*Z2-|9c zI|IR?Io*|>kkb`)N=5f_TF;@Nk�|SJnBd=l=Wi;C#S5`kwjlcgz1(T-zTyH~BuX zHFvw--@EJ|sRMgAKm2|BsqdA4tMD^4w|`i^(|x|H`+lyEo71Fn#GXR{H@1Zwv8T5x zVbiKOrDYuJ;%pY-a;ZEVh(19*Q=Jd5z4spe>Lcb|VSjx+e22kbhTusqNq2|KIPMIB zYB#H7+}m{MpNE=lnUqg<%}uUc?3&8pRATJIJaf4xM|(Wv9ev6!)^{Pi{XV@7IJq`U zbGJpn1bty7X3TY#d|NY@S@Mi#&a>o2P5CXF8s;>M;nEyuk!ocQv*bL(>}SC^Wp=Y< zPt$B?$$lpN=n>Owc6&rt^{ZeUmdg+~!gg}b>Ft#`wdk%lr!VVo#zt6#ah+in4o?$x z!giL|-(qgWGwijHE0>3?@&O9^w!$)=i;xL*6D>fxWto;i(CC=P_{R2szM=)qx! zZyJZz)MR-rd5w6YZ2JfTGd4E=@Tk2T(TM2t1k|I>I{~4@N_zvb)-!PtZ}QYcbY|eK zSJE^l$E*rWc3&U7ob9Q(MlMKmK^$iTs+g69T<|tp(ci0 zr(+WIG3>>L9BIH#Ehm>x+n}EQhAtTW7&J2g5e(;m0WT_UI_~4MWiXrcK{$)`f5Xff zXjqsz7Y*xd1Y&Xvzff~C7cbdBxIQjn=UDr>arIund}P~p>JQ>Y=Qr|Yk|F9#r=yfD z@AwSZ7ysn9b%@LcN$a8#l5eYBMy}il!T-Nw!1m>nXr zMWp=e<6N&uGyvG)ZJBOhk^=XKMsS#EKVyck>rG&N)(e=rG%*q^_r@)5$T@~{27zXS zNpm~XS!v4grnAtLGs`FUm5zIFi26Ry;X940ik3dCXQFGuKU76*$jKb2BCg~FEH_W~ zqkLF?dSexFGrxFN6|tg1dQufJryxC{is)SUz_==6UtxM!6|u1>J*0{lQL%VX6|t$} z7qOlyrF&Hott+Q{pyg4yxLXy`rpl<&`Ye$%3-U0w+(mf=`<*3u zz|j)R@*t15R^%bf;0(!!kJ=iL&!oi~luw-3qU5Ym;W&^d)8CTEi?TK;XmWa89=2Yk z4SB@H?KPA{m#E5fQd?O@Z~SB)Y7F)&xIlwMa|piTsh0U-TbSW=2XoTsVy;c5(XSkm zD(=ZXsWkMShjY_#ht5qG!y0-defrk@hh-^8!7xJ{(iCS&ww+*m6X-8}V_u6QI*D|% zBBeuBQb-P;f2mL|C!4oR5ekIrm>JRIl9e7_7ulf*rz&YHh-6MvD?JijWGBjUvSq1x zYUNU{bE;6bNIQoMW%~;0gN34HL3(YW*jw} zt+X9~_vLv?A>5g!$xbS7YnkS3*W`xs$RE|_m5q0lGai-UG^MIzk0F6de_)D1v$Ka7 z@~jR-Zk}Pkt=bzY99UtvW}q4 zlqOkFofOduGU(fF9&!fPS!f2cZG@yVDGu&3! zsxSPj%iZ*p_DB^PJF1;URb)s~hcan_Zx?L_4JoORj^syzG#oZtWCV9_>hB1qRGc}FsvPVIut5I=m49UXVjt1}{iUy>x8K z?;(GG4Kq=nD0@Tz=zW@%Ar>?v>W^{57o6w+INhcA0kDmGm3{2FkD8xi;(cgNk`KvI zcfUGq3I}2j*2MRT-K%HK`gTea8sb|Fvcuyf2e;CLfhf#(59Vz$WO+HD@7?d>}rGF2Achse+n$5aW-q+~d&WTlO|v9<$Wu>(-x0 z25)LRGgWRiHupu_84#67l8nF|dIx>1diOb5=vBd+QhejPn-yc=lke6^X@D;PBXUvm zHyQM{!4U(&180vRFX_%Uv>OfT1x7Od`GV6ALXyg@);WGH%SCB+vvS> zh+@=YWlYpLpODj{=c&X|ZbSV#>Wjf3tv+IoVKr7Rht6@fC!BXsrcXFW5-?~vhZ15- z!a0x-yAvSEo98&4(W=dHrsl}8Ipw;pEC&W*Y6PD2La;j9k$m;#Y$G)v6y96B$FdWS%d1agieuVpY zT7J5{v=13XK>VFb^WNU9Is_Ltcr(LcX4OIUjPng=wINSX>xR5g{?(M8qpVFl*UVx% zH$iJ*!R%9!l}V;kV#;%-d4u!-6OTX1F@r2P;mcPD_ZUtrC+(A+T5)bT1D;M2@&CTl zNXnl*pk_P{rcR&opO4xv{fP4i@nw7?-(;_Q`9{!I(Q8L~)D{1tcdJWCPVZ6|W9hYa zbwQlG)}}6@CX@0DhDmvE^R-rW!J6nz>WVeiYc1*me%Bk-MejjxP*=>gUu#wu<&@Vh z)e++y{X!jxkM(nPnB`aH^s|)We0%R_o$88<9uv9M{u>?YibJv2PSg=Q1N~SXu_GSk zyo*AUGfqiFIpf=j*ACYan{!?}SVwe9MLA=W+$d+fo@=sy@3cDa%Cx?zjyRM49LtmQ zUfW+sjLd&+ZynLU!fU(hK>h#Pt~#Q1;cGkUKpObk_B!Hn(Q8}lh{F}tv0W6bss7;-=7|2Q`J z|HRyje1rG%DnI;SK>zjY0ME=|8H_WSq6A~I+Q&l-GE4%zpB?p}mw~Ai^iZUG*%)W< z*;0;{KTNi*NIIvtx=7^>FDxRe=}j*}c)=T61klSHTm-(+>sf>a)@3g=C$2s9u~8y#9q^f86U-NWQ7p z6sJ$Z=~_{=$#HsC6iai$?iIz>9Gp8NlC5RGisE#E*8tFBfr8M@sA?{)D(6-$-%F|X zx;Q4M#hA^h8C^IVI=~Y$SV1uHh!!5k!vtt^E)colM!1``b4D-|-_sE@EMaYe6^vrf z;gPT#YgZ#dZ*`m0YpkpYHK+0^iI#@!%vs z+rn@dKO+;tTl{p-2?wfeIYC4A+?EXcsBNj>hT4`J&QjZQgQNUxONUF;w!Gl7+6Mcm z+Lj+2P}?emqt&*8V7J;<7|vJQ3WIa}tg7g!$829IT+Ds$tJGI+W3_n|M?LPUD&aaF zscY3>Gizh2IqIoTR}1@5I4=_nr*KVmM?HM|8sRbuch?B6Qh1`Kqv~K{t?&|s4QdB% zSnFKusIzP>4o^`y^b^5)PB!P0jyg;I2f}7te&qwfO!nOMDMy{9|AS!(Pkrjapd)KH z9&*%K7S;*pad^wR!C?yfJnX2m?0GmGM&ZOy2iMg(KjWyg)cvN7e zOaDj0trVVpBsfIjjn6ykEbSh3)Nvah4cl=Oz3T=0_!;wymu$nuY{&Lt>o%374Y;`Z+3R|$Y^4mcpb=-HtrRum> zf??_zUI`nkZQl)UvgerZIjWtX{a$#CS2pMS9pwO4TmB&2%j)(Ybd|kW9rKl$~^vmCH z)N5$-pJ4|oY1@AW`>3Sze{j^>nDNK(`V3sk{}rs`nLGcitpTfC3?5IBn9XM{z zUmW#r*8Rm%FJT{M8jisaBQ3=25?JRC0!^0fkwW$oIa{QQP(oyf}Y%>}5<+#Po zK}&I-T`i=e-q^sFur0IynU*p*$nkAjOGmxC^{r*NkMdq^WN=uWr>%6b=*-{wU=}L zd5E;N=6Y);OGh2oehSz)&VOhM?}2(aIaNAp`wf28`n753rP4X>osNJTzn7t&JhM#7 zmNR&dOg4wikkO5te-i5}aaGQlA<0`6jh9P*8k@Vq4>C{f#_uwI$1R5{RxoIm{bBFN zKO2Gt{cEtoC2j(Old<*l@{>Oa_Yte{_ectLLjA*~eOSuzQbm5H(?yd_G`o)`+uT8h zQqsFQD)41Y7T$0lKv06btU+2nxmXLd+9MUxFR_erBcB|tfky4ozPMo z+fH9}gl&hk>LoS?>$qr04s~-U#M`jJ$skwS#tkoc;*hKINXL1ZGaflh=5AkZEico{ zbNp0(`w!0oBi$rzygNOh2>|1(;njtq@U%@*p##GGj1fX7vCIrN!z5s30Q#=B!Wp(0 z5(%d%%yedoEjrqbsIx9n6Sl0LcM5W5bHdk3lLTUB`p{a;)s)WhI_(QglJo?L-79o| zI>lxK$v*R}zhLV}oUxv4WhY0I)@myt*Su)Www`G0BwKsploPg8X}UF=dt#qEoJ+U# zsSw%ujwe*_>Z91i%4bOE;kU8jn*1w`isXBf%$v2ExkHni-XH%{G4@i7m-6&qCVYNd z6XG7=94i%>yVE}pCGagxi3<`n33C@?zv2r9k6HQlla%_;KLzPntc8@10jtewdWZqi z9IFzj-)H}o$fs~VF%i;()-l{?3z(jdu!6QR^mvz3Uf&51_z+St`#a<`MUY7xcU3Ot z0^i&1i#0wp3bg$%mT?b;oeCnc-qLwB`z3L%%GoOL;qYxGL!fYEay8LuZkaQxX8$dFy{9m?Nq3OY?&7kE zGTXl_qu>(IKcu9O7Cml2;}d;{WxZn7EofgbL^s1G1F@fh;m&pbEz9raPIAO=%?W_4 zW!MC~p?sP+sTP3b?xj_1p)xbWW~3|X6{mqQG)!>bM~w$pIY2f4$ntb+BKJYgPa`<% zzz9Y~n`mYaSm|gr95YdmGwz8h2uta^#T);5msGcZGP@e`o2ortlVMu}3Yefo7lZZ8 zF`eN2k8wB}Chm#xMyC9`RdHGRKK_Lb3<6w00y=1ZBZDkbryHCVdJYqlVY+(>Rb$<` z0)wpXT|k*ouOo2HI`5~l;J(niZmxZDlF7uyA4lq}{s1XOdyLo+#Q=d=x@=&=kTA=1 z)|hgM`M&nkdz((c3;ZS!A3eUd7+nMRSTVwH_Tplu3;xVv@DkM*7L&)Go>+?%nw)91 z=%OBOT^kIiKe1RYwbSE^;S)&oC>Cp?scxLX@i{|hH|IiaG12u;)Ru3#l{i}i&tFzs zZuQa&YlAsR%tsT-PtB?=mc@Lw4i0j<))or`zin-KI!HIHjhbZQMlEq6p1NF13<}fd zYKfa+;$$u4tx`v8p-kj+SLbtb7S|FTlm6^ls5RjjTAK{lAxM=A;m+v30DH~R>o7EPVE9AUWL-Z>6 z=^Rwr3gc^Qh~tIM(i-w+p+C2VTvz1Js3FEyjQ6c6mR5AS)|9;~`5bk#QXH+eu9clr zHRSNh{?cMOr83`K%PQ%yHALg8=`M)6RV}_*UCgTHQIBh?RpOutne>(FqFeRi9o5Bx z>MG^n%^J?W8feuyZEMP*HJvFnTTQ%j?nttnAa$PNdZ!I~Xwm+%1tY7T6E0%4F ze}Nai*l+a-+2|8@yxgpQ)=>Ek>sm@7oG+z-uf`FS+N4>{&{EYbbb(K3PAfEp^;DZ& z@ZDCsTvG27%TZY~%`Rkyo9PZ@hFgtgC)9x=^yB9vscyL>AGFj+86V*&Zq|8TB`&bi zugIxS_|B=6Eb;xrDS0I3pm^0b?(9#A^(l{}q@5|y4(oHBF=@Fk*B_FW-P7ijwCs~E z*9Fq1RX+==pMUlV#m)^hjJmn?r^d7IOp@s_$Z2cou4GVXPG>T@G^-;Ch=z}d^i`ws zw5V8QIbfXIM4kcx-O)~gah_p2Ez!uc{f1Gw&Hfc0aH0fJxiyLwJKF1aVncp~n$06Z zJYsi^%BNKJ<#G86X%a1U7y8Jax^vc+gLOPsgAA1mL9#bSff1}@h&4YA3GG(XCpqGP zxrJJqVSDVd#C{xPZ`5fUm93+E)VDoVwol6~lhO&cD@0g^G@XH^i3Id_ck*lS+Bh6@ z38wUGGP6)+%VwRF?0fInxQg*%Ch^Z6rV-V9L-lR<;oIW)KLsOU$B`Lc{yBy4D8rFY#@HURA`Y?D%Os|Am;G^jMm!&Vzo}HfDoaGQ2(I&8JfA z!IMHf8RH^Ubu&$F0bR&q$4iW~1A)7I|SFI(EV z#1`maVp_^YDlk@sR7E`w4B&0uBvQ8jeV7<<2oIS5e0w9h`@6auPvrgLCDi^83Gq;@ zHQx^K<-ehGhJNrj=1Q9}V5hX^k!h#3mW#*?Ho|^UlAd{q`^|?2kZ%`p7r@yZamb5l zWLIK&iS2Y#{NcG>NS2Gzf3Jx;yVaOc?&b3Kc)x{2nfUC@2bZd3Ak4+f=1a>X#L)Zl zVX+-RF$*i?6SS;m8rD5!pDDA2UjV<{SM!@fU9II23tOY@XW#M03xh0i)9@QxK+p1; zBNc3_utw?iiWt}l2g*vrb@)i!wko+U4@&9Dn!@6h?o*R3#Im(YWXKI;A7yRY7nO#^ z&w(81_IaHc_8&_3y*0gg^c*}X+E2>vxpc}?zU(aLKP&5%+mCb;CYSYk>SA-*v6a3S zK;80#r}?I&KU4z%@IyuA;ZfK_t9k31Gt~H~g%>sQ`<}~Q^P9XMo?WXw!2_4=j)No& z%}f_&GyjZ>iP}*1LT%L~Rlc#6IOmG4)?W$2){j{O0084yGY>%M=Xr|nvzp?8_B^0 zY)?3Hypzj}WU2FAAnmR*(Ul|b49}`|Qnjqhe)6b6B5sJ1&`E9T|XvJ$hKc3S+JPDX4VQ>FQuk|!v=-`ZvGIPRCdk}v(Mv<4QhM{fpZ^_ z|Dn_p;>}GG$f?Lpx_1^X0>v}4!tmH(i{Y_tuc3-h7)*!xU@b zZIA@@+4eWInc&dJGEf^(h?-0Lpj(^hjNgHI7v6x>A-@BP?tVc_LAznxj@A5 z!Dv4GDC4!nWesiF>#g}~b(LAYT4(IT;q@_(`=E1{ZQGN*-rMSW9k_$+Gu|89`BBDs#qZUU zM`_6bV3M}Ikh#TX(*#Vs(=?Boa_?Q{#re`LYiJ6aTkahY+s_kK;V*`}wwM?6tobGJ zqWK;9_3|S^7PIwLn>bB->8_P>fiziOs*I2~W&9AnP-!BdbrQvz^6wZ7WhL>t5U<+< zP5GDE-0Td~oNJQZX|6Tp(mQXqRB!b|+>i2czE8`6xNuejK{@uuD;9A+Uvmi#)bhrX zdN0c6c@kDz`2_$3J*8Uer}oN1cTt}(7nebI->a?Cxd9fKBGKt@kSH{qSKQ9V) zCMT<5=Bj4W|2NdtCwHd|B*&?Dz~tz|YO#ITHgVT?@F8_5?)RSlW*_t++wsS`XZ7us zCZ*aF4A~iE74r_mT!YulFn1YpOZk_s-r0LPvGKFGrttL9HF#LeTQoZ4dGyT~&$=qu zs()I&;}88Dv=fH8*Ok-^WjfP+l1!Xy^{AZy0$!7yBnI?oU zU}us6#?c^|WdLE@Z|QYa?soBnB;=!s13B<8RyhO>lkUUx+0by><%pS90-oxj(H^p8 z0V5pZ^MOMu9vwef2uyegaKT&8@j?EN zCPa7Hv5`y@80o3_e=o?eRX~&>-NUD8P%vH(kH^FnJ>A)d{sZ!q{YKR_F)=a{hg0Dl zJA@LgMbshQ(j}VT(HC>0PJ17_>st$aRMQD^i5;HD$q#!TXFTV5oTkLXpl|GZ?9$#> zFE?8s1)`L&ec54NKFUs>02B)$)hB z1gknoNU`Qb2XdC(j>lP!IUa{zay<5L5!!t?V45> z?V9`UMK9%MFPh6m8*t<~BQZNyG%*YM=Zb@7)ibypt-|X`65aeAs`3gu=8Cp<>Rd`J zihgTuuISugTBtz1w{~B)*6qs7ubu*E_TQ zKsGJuxHJJ5pTWvz4r_K)xf;kQ$ewpNhq6ALLT9v;HB=Jb9nbtx?N{Y?8Y@H8$v3i zXV8bARF-?90u@=FA~L{Nn;Dd|PMf!U;Dwlif`4ICCRTu0G-7R;Z5lJ4S5({kseGlK zmTU&uHDNnE2qtiCvEB??K$U4atvoqBQWdJ{Q}#mHMbYUv;JXL9&IMOaCZ?qz)ZbSu zfX;q}+IDt$a<1oZ^5iP7a=GWlw_ex~j((;6X^+<6D#Y#4StCV6;;OlQSMKRP{P5~q zyAuQX9*%yVl-TFflzsj%*^|<|E@k<=t@4-V+V{#AxL-fp|Ae3Mey_n$1J6;6Q0I89 zgM3Su3zRQ&pRUZy3%c3RkXP^YWtKq6@>3yx`oxF`yot|^qJ7F(^9XJ6lP#e!)10Oe zi20K@EiuYW?t!Jwths^&cT4fk@kcuk1$My$?pP@vaegbF`RlFnSBO*rv)3xkh6YDV z5mWE4Kf4&ny-|Gv95qG?*!~sc*+wqpIYm2(wVOVyW#_Hxfc<+!QqbnCh&&6Zzn5JE z2!Fcm!+y5JhK#lBx&?foD(%hz$*_h584@8p7MGT4LYK&DZzw2iOIdX^ekAGO4{W6Q zq!+gbhOc7pOvkhL9~1S^HA^(q#8=m4A4=s;eD`_fDAiXb{yOj&50jt7&v{1La0K_#Hp$)=v=L!f z3(rdkY=5+wIcM^DGPR?vg01;8WJ!uJDGy51x1_%^A+F1!sR^-8Pjx4W$1uAlz{T60 z6JQk{n4G{6uqQ%$>Qo$`5aZkmqZ82dnsEX#egm)F2|j339s zJ@_E4V}#+d+|2w7C(`K5I{+2tzlUpNU zeJmL}bKDVI?GzX39Sz^X%Nfl;c{j!JxYPnC#Z}&Pl&h}2=Z^4h^Pq75fA9GOL251p zWNnRxS31LM>{=EJ?LA!SIhI+P>Cu>H${If_73uLTEjS5fK81>+vnrYU9f9ity@Ei}8X&1)+W)ZS}#MDUYDA_%c7*vHQt56^N z?YtR$zR~n#Xb2s5gfCmV9@p;bdR%azds5Yk?@lO3t*rWt=kS=SFxXE(gU!)4^G$b% zB!PXMj}*hn+}Gw~YYc-QDyd@{PP=n4L#?BRN6#8!Ig92SOEO z(t`O)?;|r>7cH`5XQ{5lE!;SyS>q(`!WP<-q%OUwRe(S1n)a^^kSx+IJnPMjpR{7x zFlh=?&AfRojgwZN~Wch(Y{tn}7e5cZ}w z))KoSytc`9dSxxZC;Fh8B>dR}YNE4g^&{fp_E9L=9&kq zie~A@hg27<(vJ_SE>`4yawR4C1xu=m`T6e6TC#ox_hKzMzk++RmON6J9$HmwD@r%7 zDi%~MJ_AokrS$PCqEY4aJ5|Jm%5{%a5yPs~W+GR`)n%g^ZjTyrNe#DKO}W0tTza)^t0?WXXlM4P?N`NPv?i8+{d{si^IsWX z>XX}@oQsr&RtbFKd_4&ov8|B+jkwY%f-(NE!JKBRnS_FHh!uyGe7BXP74C{u0?v0m z!k}=>R&0dxqj4G}5I*EQ4mu%19z)zp%lVjGAP3oNew$&J`MaKOqnItmaC0-vR1XMr zRG^y>Ngf2LZ$%U+X_}kUC=e^%O3X>!@LEhP^umjZ-}IBs1F^#Y0;YH@=S&Qp7)&fR zHlWLP1Xmzx>FJ?)NI&NH&13BQ-jqCCF_d;Ps-8=WlZJOJO@uBzJWU7pM@BO%|7cpA zvA#Dx4;2MqD)a5CGt#g}d(+Z9k3S_X_D4rbs98F@pC1cnikF0|Wsyf}b2mS}AWv-b zbNA$llYU}fo@f?(79N(9F{L84KJFgKgM!x^ggjN~aT^Q5bgwiSR_TUmr2um*JJIAm zPUd)Ayi+oHC|BG}h8y6_NYzBtp(IsI!mJg*!q6r8bB1<}Z{k z&WJTqW$TyL_nlfTA!t@b50ZBbjU?uVrTg{%m3&ovS%@#kVQqaS4r%M}s_uwDqnQie z>w1}n(RHbo4~y$1&1D(x&cS2`>*2Zx1T^8StRX)PB5BBwBPdd22Em;esn3P#+iq_g zlxLMY+u=WxJ*P70@=t-elP^O(*i<z8Be@U)Lo!a ziI({3Gm78apGhQ55=(eN^cn$eIzYTC$B^tkL)BHc9`}Mb<>mSPPr{-aVqVJ1;;vt2PXazn3 zq(Luf_E+uA9IrTL-alVgSR7`w-}iS&`HLSCK$ad$Zz@a+S}CW=WRWIroXh18Ew#m; z&qLMvXDRR$jbv(T6hB2UH!4Pvi2!_pc_{Rx@^f1t`K>XQs`!nB;gp>HvBMPIj*v{~!AF)-3aV_e0cV$yYxf}Vf z?60IZ`nQU5vxoV~FOU@aqe<87E)Vb+PjvH7`vXwu7vVLY0k)q?@(xK
S1VKi9=(`tXebFaw#%w-nQl+Xt|FBu>f52Z=i4dtCbde-LKohiBniC?bI$+75I<6>^*?Qa})p3EX`pf5F z&oF{95i!Te#Y1w*`0`~o*VKKE$-WuCi@5%8>Av(G)*Oi)kv`P?Nm6s&{a)EuZY ztBm*8AyZPeEPvP-AGGc7Jr}rJ-}1lw?ZWA|EGxUbT8Zc)#JWW>jhSmSuvO*~O^(s* ztO#wnloVBO9~2LXgNneU)*Rs5*ACRe5pyEOTI>;Xm=#s>%wGJlUd<8TT!gHj8+}M955PU$j@*x z*HCkJ=n+ca?XPIZbT3*k#1q>!ZxAUQdN{x%+7*ztG0zCOkA`N@7mpCnlxf&S&;9+|YfU^0vol1HG&-?H8Qim=4khF%Pz^nafB<$jPYH%n<1pUD?NXSl;YAtwXurcN&LsVl3T|@*sT-M(}v? zX4IkLDyld+basVuQRr+5<(lw)e$L5K4knVw)VdKWvP4};k_hEsLxum)7Cs2~xM}Z- z%SIM%1JOGY-w_wx>^Pj{jiT|nai#?J)VLfL{UsNF%keJ;WGUGv13A<6*|UuobO*cc zIql%Js{+^-iYEt3b+&n2_K!QG<8npZ-V~Ro7GtHFB!UoC8hKN%VK}`(OlR>sK#g=34;=>11|3RF1 z3OjUz)8Ej-4kq}g@RGumWx!$0H?sb6#ZisH%0C}bjyI*<2C7%tfkRHw$njqp;V~2F zV`i%|IpC3|V{i6k3*91ZtebM%ymL-6$V4|64v|z9GM6 z{=59X{j=K!-h2DZ@8L^V{rPyFfK*AGcNb;mJ?EY3Q|^s0H20E|D$O%e9=%(9?jv)%-EEHWzjZHJPP;ny!Q-rQ z5x+?MFvcYB&riLnP*GJCV{IJA-x+D%l=5PEGnz5&e&2B^2r{-*$A)Mpm+NwzifOFZ z*i@)9=XHwo*lQK@+IaRq71dr&NPmSLpFiy?6`A_`z^dky18UDWB&qj_yqqT z&ceg}lww&5M`=t5FniVZzqjr7zig*{9Nv?U%gw0fclXi8N}y6JeK%nT3q${T#b}l4 zVG*GE6+rU&x+2A1WH=)%$edv~5-rS6P+s5i*uAy&_0snT4u_7og%$@()kE9GyDlv+ z1SyU`Lko8pYD8E-q)4v?nM!?wA+4m$r;c0oPqE|q0ABMtRS+kne;^-YCU0#%z}W+` zR|PT5G8gBI^-+6aJ_gkTlx%nH)%oIpTVZ8B_?imK^F?RhT$(TX`cHPKAle6Rzd~6O zrf24hlZiC!#+!4Bp(CE0OgG6FgH!3lc{o>#*XN1Jxj(E48*(@EzbSdkYA&0gt89Cm2QU0~Qn;fbDcrN2nv+m z?*S5J*ZMMwYfGE*_< za8Q4iwppzJY zFH_ZDOn?LYxAam41G{zXhb=4Rpv(v0_A88}((C@apcDIUfP>)|DFox%Nq+>ADzb2J zl)=r%<8V&8co-XK9s-(9gl_ua?c!zcK^t|9%)GQMt`9Fw;pt|ZE( zJ1CNkH!8Ax&TtR7Vu%kvCaLoGb|$i zZ1#)DdD83_!GDd#f7@g)``(oX80~<6e`<=)!17U09v?|HCgO+Sn=6k+KvnOx!z+%M z7!A=&YvE+bei;Lu9+!39)k;0s zefN*+q{}8I&PEdXHIsgDjW$Z{bZ=Y#W7rILvvKcMcHj2reST*jfc?2I&6g6{Nd8lz z7ZEBWGm4?w^oJ2CGao0eaohy|a?DyHrxD|@Yf#d|sXB!{U2{YpQEPJu%ibQUx_#^` zY@HW8Gzhtagx4ca^i8B$-keBxBjT2muAc`%S{6U6$35^|ILl-x z_}U^9bDiS`Xu&Ww)&*@%z`Kh$qM_-rv8|bP*U&EhS$Ka= z6%qp+klKks4ufmZ06j#VDMm*l-o#;k>@;$)>cSEv!>sT|lz3GJFZzLqw@)R#hkH;x zi)L06Q;YV?wt3E*`)YVJ%jFNc=e*1idg)%q<*#xkNHOE)wQ#e7huer&P19p5b!0fx z1ip3qxZG{?FX@qhnd$<=*`W zxIc9)j;H^r9O!HgOZIyB4Zo^@FtapszS8(M*K2Zl`HX4R?}C^8s=rgp@BUVeDIQB( z2`2!!$LB!1H5+L1O8Ln&ZMzWn5&P&0I<}BpM^kh(Go*V>Ez{G0R*~)3^$@X%jYbF; z>_9Vf)D%n0wts*x=5bjkJDyYloA%cRl5VEHN+I2^*{v-IxgVo=nGswzMISSBK;>-x z{`O&pd-LGF9jlY0p%}1PuP1=O{44}}_n&;wv&l|2a1w;g4GcgEmTB%N5-GI`9Sz{y z9=l!AJvKH{y$kiTS~41EP2;@vl>u;6Zvak2tI)kN6Nl7=N{sX7iAs3d?SqxX@n|dA zw2HXy*vl*9=&pCYGAKBIer3Qo6{lAgm%YkuD~s-a_3@R#Gqlt$R2Em_wXRkcLmmIHu*X5_TRT8r+q-RzVLkfxqRTAq8uo-6+ zrh8Qq!;8`lD~S#ji;q zzq&*Y)Wp|>_}cRgDvE)!U}+I9_A1+pNOgR$qzKTk$MQ-&b+w2YYQ@DxVwUmXp(0+o z*R-Oz!bGYF8L4N1Oj`v|fR2wma-fK?quO}bt}?R-eEx%5i^M!fm0WiUAPDX0R@zb| zx_MDb=6jE9DiUw{YPm(MU}}*V7OS+O2x1#mvM4B+TqO1eRoC(VxGHHI7ECM>qr<9e z_-R7t1Yg1JQiL8|WI|7lfnEL||W zNW7ExnYW9?jeK=)Z3@B>MPg?`2w~}@!VFZU7YY@TX@eq_h^N|IE*dWPuQRWXyC=?0 z`T4~oiY#S~C!SYqU$3UX{QS8N9^D!83p>E6Y04dYTvOqnC3*&kSv%ut%GVpO(hy90 zz9$cw=5$YXw?0Gh4$B-5HCUuB%UdJna8EV^i@hBdu_) z0;q5F9&_LSNANKBTVExZ87gFanmpVi0HyU#X!D_Z9qzTnaP+CcNSm3tO1rP6szsi^ zcA19rcrXwRz08I{3{}v>t6sPc(4?PP z6NpK%kPu_DAhRM6qXIUnXT7)X*5BHuEVGl)Iq6J$GGx%~zpY>SXk*9c%P@!GDV^Ss zVRoMkNI|Ht<+m}&QOLQXlalQ~eMf39yKL2SSbnEe(o8)|&0ELshv`c4UiZp?Nc(A7 za160q&7=5jjW?n8jS1zPPLna$5%)-GS=BNScme50M`Q>C%2X}br<1IZ!9h0aUH{fI zSs!Ic#Qx9Rr?1%6)^wTnjl#Tmd-&~rK5)jd4p0q;Eliu5QZ(gs5vNY{oUqgdfBP5jOw;j@j5GPUa^qi-rYmNzIFsK&3- zdwzkO7VICx2O2TJ3h`)7v@oa|MDlj3~Exx zL3(OfCrIcuDxOlV_ZQFkkdu7-j4w{x-VT+zt323v^B{mR&YK^bvwtCqLXHwOj2Drr<1&vDrb`@d&h4iiJz^K z*OI`^_iI;@pe{efO8sb@w9kD`dTA1M@*gw*bqkg!#T-vw8FPKLv0ZM zChC40HEX1jva3E89#_L;d8F|Udv>-$gf~uDVQ*V3h-8{8-vpld?7ckjY`D}=zLT|F zq0DzOQ7IQG*8J%j`Iw-^JT~?)Xd0!*d=O>@xd;=BO+I<*uay_ufDC}Ab11%wBRrv##j9Zm|w4m<1@z$=^~ ziN3y*Y!N81ezQQ#aBE(UF(ssn9q6Uc@#XsIw_>8iFJ2TABVx*JrtazQYXg2L8~h)c zOAJP@k*rK`d%BDw2f9bPjdehJIg||59u?HFdtH;qv?{lod8!_sDf{*c@$j9|a&OqY zg5=OBY2U#$zC)vrWMwQ!Cptna&T`YpHyT9ARez|FqNP+s0@?b^Mc)O$kr|-(u+Pj8 zxNU0P8WXAsh_?9m?Stf181?ppc7eXeBfw;DC11xLL9eauCHGs(rw8Tt`+RyO@+p%2 zW=5C(&d-c?8mCN`$Vo388CkDXtXrIAM1=i5j$*NH>WD37;s`WqXjU@ek0fz7_OO!xdr#Umr$*_r`<1nsd&euh-$X9#69=Ww48Ok?1C}&aQ&X?fXS6?`^C$5YY(4HLE=Ev3LdGt@zb`?+ zQkLd;MF$sjVpNy;jJ}J(rj#CA9$%nxkvCcNn^si@q6^kcYH^$!zU7MnUN|4d2rok( zR$KqK+4Cai!C5-X5C)Tr7#5WP}}M_PgE14n)^fXDfVUk%i@nh{4qI&Ttk)ehRM@0f?8TQ$r7{G-PG6f z50bH}I~y#5nX1)IGXC<81(%w~F#|1+9mkfkf@RP5w>U5UD*j7||4I@OYpXa&cF7d; z^T}|Hd(4oN1iPB*6mQWJU_h_vKGXK@Mji=u;^90Q%eIf@20zZ$d}gB`$zyXLL$*a%nZ5H-K>y2>OEFwSOnS&+9h>UL3|!M z?Id%HwK4?C+)c~8t>F!Rt*lO-e9>7=o0L)5>1HbqAar@j3Z9{yqBk{vfCb<{;1Ta%5riK!Ky6_kEkhw1NY5U>2JaM*BElM`@WF< z#(r{8Szs=>-qrH!t4%Iiu-GIDXpb@p@wm%Pl5^~(CS>0>d)7Bi_Gx007SPTtWXDma zqPfVP@9}K=FbOsNCPd}1fqYs`_<|WGfEmBP$&==_!_Lzf+v$VD`X-%12Hx|vM&Ily z)cx1~${6&^v02@L24-L}sN*JjYgN?2V6f^BTN^eo9pFAnMEL`z=L&5@RDZI}Zs4{UX_fa=-#bR2 z=GHqnXon2&P7lyBpuCs7JHNY!?}^X!*;r>OUwTmuUgiL#JV+IhazuG&K~pDjFCL{&$J$Hz3~=!N(qukCz6&~!W8Q!KKhocJSKq0+zx&8tx~K2!<6`XI zJ$~37e9Iq_Z{|LqBkLo38o*Bs?S`+BL{MJR`VQ9+?epFHyHh{n$b0$`>m~N4xksr; zly}vB{Nv#Un@Q3JU%#0F6|%<+&Jr`XGN*}hbamcTd6CW2Bom7v<;b5^FK8_AZa`$6m zjh9;b7zUa>?lCdLPquzcjEbes)f3BO_Re}_HYR7(6Q_ez|9Sx1>=yOJ&Uo_BqoC_k zYaSJwLwo9@Vo>7$viBbFQB-@w|2Z=|o5}2KW;e;EA{dA=f(kYSQ2{Gl_3`$rKDT)F z_Ui4$dlfYGU8VL}h&FQZq3$!93JV98XAG(oC#M42^3h4kwzunu(dA#FI@$I4QKX zshFH(E@~<+Cne@I74Ij9+BFrOQp~bPMM+9<)uUooYW(y^#l}=K=TXrwE%-qbaW*af zKod+c=GrFWe0uVl78EWEOlu*w-jRIaNwKANVDFO@KujL>r092NAp1$N{m$e~Pl%F? zz}zQ9zq-j+9~Zsq1r9$hO6ny~d|d3!4D^0n<0o@nuzreB(G>9wmcY^+(d*QN*vcj^ncjjyNOu$aANl+ zV)G;Z3y+9>j}XGIp0i*5d2mq-Zj}yicJe7q@iMhQp?Y5PlgwwU9>lvShp4h>1`nS8 zM?B9l0yg{%hUXi+L3(*@b{Xom`EI;dg`iXp?O;gCe5m(KCz<@wvuOB2N1x?&mYm&+ zgtq83hwFhk&x&kMz0uE#h$lJkSuxiLb$OPVQgT)+A`bZ7bKXGlGt{O?nDY!lddbtD z5d&gFgPuV{m3*ls$+g(zJu47-8p~(O%BR%~cj?n8j6&m|Mv0G(*FjRV~HfI-wOU#ppZLeU_gQTG~<+)D0E36y55D7IFV&h8DCGBku~$ zZz(G83e9aPHr*YX-BRqXADY=x9C1U_TZ%RJgz{U8oCcvOEyafoLX%k2Ff^g1n0#+& z9Bpx5Xbi`2UuYD^aDQlIOL3x6D3@bs9O}d|JP_*8lD-hiYANDb1lT6rp-9J(@lK~pOcrHK7_WqnayP@Xl8R|RyMO~ z{E}ui?Ypm;O%s+iOQ6NBHcOzbdOemvD~)d+<_Ys!h|Z5E%;eA> zPng<*T6hT)T8Jf&CyZ_(wmhDY+d|BIBKad8F;6y#JSj#z=Ue`qob+5qjVZnA+s^&L zwwn06b3|o`d{;g9RL+^d3A8b<1o6NbDdX|LIVyt_V@dkQt9t=8tYaDeCXqJ0yT_V@ zla42TI{r3Bd_I1l#)GUOd&((GPV&+=`@A-LmU$D##EPrlgb}e~cua5zaqBU`!PKoV zGdnVyHr1?kljS>N-7xTo9?MHOf1l;cvzSG=d$gu1@s*a++pA*ow|XUx51km9d4eZY zU|QyThM28pm(#qy^$c{pH=pm&ml&J=kb0#~xkvo@5$q}pWD1Ma3ZG@j%M$DL5AD*+@J%-45TnK~(tcmIZs>cM@Ck0xe!41v!Z>|JK%CRZXvABM zuzY(1Vv5gVIpRy29uQOQ;E(`CNw_Fk{=hK2oc+9c!*RjE0dXNNI50qsnautHQYPAL z*DJtzt2RX^*Cf~poxg+U$U+$`#Pv^I*j}2m9D4;J7W8P{W_5wigNW_#Cs2$~WFtuV zqg~!0{b6iO=1Ha42_BC{bsT&@T21JvswVWOY8jMN;dd6wTj~945h|^6NC)x7AEh}( zI-}oA97TFn%M0Mzk6@X!UnQRVar79zEtoC+!a}c0(q4j?7s+h4S)~~yH(0MJU#u*T zL5hRU9yQ$V@Aab-&ht)VeMyYX_WNRNwk(S-Ux+SesR||0-~My8k3h2llz`TZ4`P`& z^k>f^Jpl)^kMzLhSUN%cMO>jhsprX@Z&(LnRiU9sthi)2dt;e<+w9djMr}VOCb%2b z7xQ=$IGC9@P+u_XDX%xR$(Yy#HlG*Uo}Z7(9xR8-#FGvk+&JJcya+@bW|(iO>_zua zP!7$Z(KxQ_b$SKJRh0IDBf8*$?TEqNZ!l-@)}9&=b7PV!$PH>bdmXXCOeTe&Y4d~F z5ZJCyZ16otoQVx$D_dk`q7l7f1*bUTJzsE=L-bze1cz5b;z&pA^#{o_GC8jPxj3;U zPEDSo$5%5Kyc+M%ejDlpTJs8kk8ea=2Lbyv!cC{X|Z(P4+Myt|u5h7R9W zP432-94m`&y(SlE88<%JqIKY&NIYvYcJt$QxL(Gi{vWQzcQo0-kKcJ8J+v3UbB}*b z37IXl&t*Z722`ryKGnZ-D<4nqxb&=bm&8p5ZG3pCt7HQ%iCXLvEbYlEobR5mS z`1$8YpPXZEwfbRO}~DPZjq1TlfUslJ#YHddC>p0J^(lMVR9IzTRpWK zUT@LdnYArq;@vS^devpF|J^V@`ZwoqQF(+afBdtm)|4auD#Tx{%bLu>5u>Ki&yZQE za+KCpW^CgqRf(lRh;;cU=447oT^)|e39XSoQvLa~Cb3o*=;Y?pGtSndY@h#3Ju%sn zyt^J**D{vZqef!#$-1JCH{*C+9HIQ>^>86JPSz87F`?3W`1OV=>WUub{ev=zK}x<< zS6qpGgHeT5=TJR0P1>D_5hs3RJ+j+5L+g<*IDT(E5%wFE8N^$u;dM@&&YHY9wPhzK zd0;)U%gM;9Cx!-+kJJ?(1TqfS74w71m)Jf&<8%fIuaeJapf1cfoVomGt)e-BHgT4c ziK0(T+Q>w)B*yBUNa`XZJ0v<&3JpuQ8QPH`D$G{vLI{t;Cf>vHVx}Hq+5HnqotBW5 zD9%`E8$y&%Nh?bf9qjl~iDIE`v1XfX3{4d6{Gsmrj{mzQA+atl^$5uZoYcc1vEK>o z3CUgo-`0>E5@7H(D40-^K!WAuO$lOOe1qu;qDz8OeRoRo?M#wOl6(h}Zn#vO=Lr>A z+$wol0t&&<*aV#1GCC)qCRT64lUj0RJdrG+1MyTWj3@EqM9-b`;_=Im8x>DBi_pmU z>J;k(V?spLRm8+!4vH;i+(nXJ#U`H%iuYshuLz2HR?NwuSWVq_V&*LUP*Ak>#qSGJ z!#zac+%aDR4tA=Yyps?~f9(Y-d@f^S0tvqKIYBWfE;OB}y|`z#C5R=CZ*WMq4fuM8 z!=>&c4<2nJls#TlHV``F!VpD4+AK?B`!pCw8mf{KQx)`d;{-JkS5Lefv~h8%zq1 z8C`VojERBf8}=zp{DANeHAV|WeHV)42Bs$3Zn`(?QlfS)<$?5qi38GWCia{+s<*HH z4qNUECXPz4*%vhNlj`s8!^4D>f;e({NDrbCoX?2nDPOFop{CJoR#O7?v?_thteHP# z)ndem9}@Ng9V@+qm&6z?vxBN0^Si3!d)vVrxJYJpP@_&pcTEb@Rf-%c~{l2 zuj*XXdA=XFJs?IoYpTvAd6O25c*v0I5HMM$XJSg}sZwC5?UQFl6S8*mI5^~N@MQKg zD4TrkyAv`EtAPz?)C3Z%OHlm)tXbP zGr!3s=5^}z@2Ju}PbUV|!Hls?%cO0OMuT)XmM{C>PvQYfoTv<*RgplM5e+l(wej&t z`3L`1TtH;X={O8^fn#x4pZyEs(DwRRU#Poz0j`mAAdZPlpreB;vxyJM29FhqqiT{L z$B`@}z?vaOGTSdULNl398G%W06!3CJ#Sy}K?~yp(Th_=pvDfRzJM1HGfHgg1XfScu zve;{`FW4&%Kd;OyesRVQUh<1M{>%>v*!G{}%0=6`7GdbTBBYZ`P9!`?=^73;@>yC3 zR?}wJ=3@Ti60ZRa=by6fVSM`Nm%2&-c3h|0%~Fq38Y4D`U{kP#dxXCEYYN=_gH4 z78WxKa2q69%_Q79m(=Oi=M)#UVKznDUsMl`{>I(n9U6fUvbO6f ze0NG`8X4W+5e&lYQtga}drPgVb$5u499~D+nkU2tsq_q_EM&QhJ}`RNs$GMlrNi{@ zh|FZ#oc-}V_gU|=|K0bG%6IgX@v?Z$cw4?{{z3jSrN#*CTHXWHVK3F5Nh%jTb6yfv zUlT4TVB-ie78qe9Oi<2)=Ey|3+DO@$h-SOhwnP*mDO(t=#l&x+Bu7k2+eGXg@r