From 1ba3584631f4db56d0f273de3e3bd7c1b29a29ef Mon Sep 17 00:00:00 2001 From: openeuler_bot Date: Fri, 15 Aug 2025 08:24:28 +0000 Subject: [PATCH] 24.03-lts-sp1 update elasticsearch to 9.1.2 --- .../9.1.2/24.03-lts-sp1/Dockerfile | 69 +++++++ .../24.03-lts-sp1/config/elasticsearch.yml | 2 + .../24.03-lts-sp1/config/log4j2.properties | 193 ++++++++++++++++++ .../9.1.2/24.03-lts-sp1/entrypoint.sh | 84 ++++++++ Bigdata/elasticsearch/meta.yml | 4 +- 5 files changed, 351 insertions(+), 1 deletion(-) create mode 100644 Bigdata/elasticsearch/9.1.2/24.03-lts-sp1/Dockerfile create mode 100644 Bigdata/elasticsearch/9.1.2/24.03-lts-sp1/config/elasticsearch.yml create mode 100644 Bigdata/elasticsearch/9.1.2/24.03-lts-sp1/config/log4j2.properties create mode 100644 Bigdata/elasticsearch/9.1.2/24.03-lts-sp1/entrypoint.sh diff --git a/Bigdata/elasticsearch/9.1.2/24.03-lts-sp1/Dockerfile b/Bigdata/elasticsearch/9.1.2/24.03-lts-sp1/Dockerfile new file mode 100644 index 00000000..046a052f --- /dev/null +++ b/Bigdata/elasticsearch/9.1.2/24.03-lts-sp1/Dockerfile @@ -0,0 +1,69 @@ +ARG BASE=openeuler/openeuler:24.03-lts-sp1 +FROM ${BASE} AS builder + +ARG TARGETARCH +ARG BUILDARCH +ARG VERSION=9.1.2 +ARG TINI_VERSION=0.19.0 + +RUN curl --retry 10 -S -L -O https://github.com/krallin/tini/releases/download/v${TINI_VERSION}/tini-${TARGETARCH} ; \ + curl --retry 10 -S -L -O https://github.com/krallin/tini/releases/download/v${TINI_VERSION}/tini-${TARGETARCH}.sha256sum ; \ + sha256sum -c tini-${TARGETARCH}.sha256sum ; \ + rm tini-${TARGETARCH}.sha256sum ; \ + mv tini-${TARGETARCH} /bin/tini ; \ + chmod 0555 /bin/tini + +RUN mkdir /usr/share/elasticsearch +WORKDIR /usr/share/elasticsearch + +RUN if [ "$TARGETARCH" = "amd64" ]; then \ + BUILDARCH="x86_64"; \ + elif [ "$TARGETARCH" = "arm64" ]; then \ + BUILDARCH="aarch64"; \ + fi; \ + curl --retry 10 -S -L --output /tmp/elasticsearch.tar.gz https://artifacts-no-kpi.elastic.co/downloads/elasticsearch/elasticsearch-${VERSION}-linux-${BUILDARCH}.tar.gz + +RUN tar -zxf /tmp/elasticsearch.tar.gz --strip-components=1 + +COPY config/elasticsearch.yml config/ +COPY config/log4j2.properties config/log4j2.docker.properties + +RUN yum install -y findutils +RUN sed -i -e 's/ES_DISTRIBUTION_TYPE=tar/ES_DISTRIBUTION_TYPE=docker/' bin/elasticsearch-env && \ + mkdir data && \ + mv config/log4j2.properties config/log4j2.file.properties && \ + mv config/log4j2.docker.properties config/log4j2.properties && \ + find . -type d -exec chmod 0555 {} + && \ + find . -type f -exec chmod 0444 {} + && \ + chmod 0555 bin/* jdk/bin/* jdk/lib/jspawnhelper modules/x-pack-ml/platform/linux-*/bin/* && \ + chmod 0775 bin config config/jvm.options.d data logs plugins && \ + find config -type f -exec chmod 0664 {} + + + +FROM ${BASE} + +RUN yum install -y shadow findutils nc p11-kit unzip zip && yum clean all +RUN groupadd -g 1000 elasticsearch && \ + useradd -u 1000 -g 1000 -d /usr/share/elasticsearch -m elasticsearch && \ + usermod -aG root elasticsearch && \ + chown -R 0:0 /usr/share/elasticsearch + +ENV ELASTIC_CONTAINER true +WORKDIR /usr/share/elasticsearch +COPY --from=builder --chown=0:0 /usr/share/elasticsearch /usr/share/elasticsearch +COPY --from=builder --chown=0:0 /bin/tini /bin/tini +ENV PATH /usr/share/elasticsearch/bin:$PATH +ENV SHELL /bin/bash +COPY entrypoint.sh /usr/local/bin/entrypoint.sh + +RUN chmod g=u /etc/passwd && \ + chmod 0555 /usr/local/bin/entrypoint.sh && \ + find / -xdev -perm -4000 -exec chmod ug-s {} + && \ + chmod 0775 /usr/share/elasticsearch && \ + chown elasticsearch bin config config/jvm.options.d data logs plugins + +EXPOSE 9200 9300 + +ENTRYPOINT ["/bin/tini", "--", "/usr/local/bin/entrypoint.sh"] +CMD ["eswrapper"] +USER 1000:0 \ No newline at end of file diff --git a/Bigdata/elasticsearch/9.1.2/24.03-lts-sp1/config/elasticsearch.yml b/Bigdata/elasticsearch/9.1.2/24.03-lts-sp1/config/elasticsearch.yml new file mode 100644 index 00000000..43e858ee --- /dev/null +++ b/Bigdata/elasticsearch/9.1.2/24.03-lts-sp1/config/elasticsearch.yml @@ -0,0 +1,2 @@ +cluster.name: "docker-cluster" +network.host: 0.0.0.0 \ No newline at end of file diff --git a/Bigdata/elasticsearch/9.1.2/24.03-lts-sp1/config/log4j2.properties b/Bigdata/elasticsearch/9.1.2/24.03-lts-sp1/config/log4j2.properties new file mode 100644 index 00000000..c0d67c8e --- /dev/null +++ b/Bigdata/elasticsearch/9.1.2/24.03-lts-sp1/config/log4j2.properties @@ -0,0 +1,193 @@ +status = error + +######## Server JSON ############################ +appender.rolling.type = Console +appender.rolling.name = rolling +appender.rolling.layout.type = ECSJsonLayout +appender.rolling.layout.dataset = elasticsearch.server + +################################################ + +################################################ + +rootLogger.level = info +rootLogger.appenderRef.rolling.ref = rolling + +######## Deprecation JSON ####################### +appender.deprecation_rolling.type = Console +appender.deprecation_rolling.name = deprecation_rolling +appender.deprecation_rolling.layout.type = ECSJsonLayout +# Intentionally follows a different pattern to above +appender.deprecation_rolling.layout.dataset = deprecation.elasticsearch +appender.deprecation_rolling.filter.rate_limit.type = RateLimitingFilter + +appender.header_warning.type = HeaderWarningAppender +appender.header_warning.name = header_warning +################################################# + +logger.deprecation.name = org.elasticsearch.deprecation +logger.deprecation.level = WARN +logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling +logger.deprecation.appenderRef.header_warning.ref = header_warning +logger.deprecation.additivity = false + +######## Search slowlog JSON #################### +appender.index_search_slowlog_rolling.type = Console +appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling +appender.index_search_slowlog_rolling.layout.type = ECSJsonLayout +appender.index_search_slowlog_rolling.layout.dataset = elasticsearch.index_search_slowlog + +################################################# + +################################################# +logger.index_search_slowlog_rolling.name = index.search.slowlog +logger.index_search_slowlog_rolling.level = trace +logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling +logger.index_search_slowlog_rolling.additivity = false + +######## Indexing slowlog JSON ################## +appender.index_indexing_slowlog_rolling.type = Console +appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling +appender.index_indexing_slowlog_rolling.layout.type = ECSJsonLayout +appender.index_indexing_slowlog_rolling.layout.dataset = elasticsearch.index_indexing_slowlog + +################################################# + +logger.index_indexing_slowlog.name = index.indexing.slowlog.index +logger.index_indexing_slowlog.level = trace +logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling +logger.index_indexing_slowlog.additivity = false + +logger.org_apache_pdfbox.name = org.apache.pdfbox +logger.org_apache_pdfbox.level = off + +logger.org_apache_poi.name = org.apache.poi +logger.org_apache_poi.level = off + +logger.org_apache_fontbox.name = org.apache.fontbox +logger.org_apache_fontbox.level = off + +logger.org_apache_xmlbeans.name = org.apache.xmlbeans +logger.org_apache_xmlbeans.level = off + +logger.com_amazonaws.name = com.amazonaws +logger.com_amazonaws.level = warn + +logger.com_amazonaws_jmx_SdkMBeanRegistrySupport.name = com.amazonaws.jmx.SdkMBeanRegistrySupport +logger.com_amazonaws_jmx_SdkMBeanRegistrySupport.level = error + +logger.com_amazonaws_metrics_AwsSdkMetrics.name = com.amazonaws.metrics.AwsSdkMetrics +logger.com_amazonaws_metrics_AwsSdkMetrics.level = error + +logger.com_amazonaws_auth_profile_internal_BasicProfileConfigFileLoader.name = com.amazonaws.auth.profile.internal.BasicProfileConfigFileLoader +logger.com_amazonaws_auth_profile_internal_BasicProfileConfigFileLoader.level = error + +logger.com_amazonaws_services_s3_internal_UseArnRegionResolver.name = com.amazonaws.services.s3.internal.UseArnRegionResolver +logger.com_amazonaws_services_s3_internal_UseArnRegionResolver.level = error + +appender.audit_rolling.type = Console +appender.audit_rolling.name = audit_rolling +appender.audit_rolling.layout.type = PatternLayout +appender.audit_rolling.layout.pattern = {\ + "type":"audit", \ + "timestamp":"%d{yyyy-MM-dd'T'HH:mm:ss,SSSZ}"\ + %varsNotEmpty{, "cluster.name":"%enc{%map{cluster.name}}{JSON}"}\ + %varsNotEmpty{, "cluster.uuid":"%enc{%map{cluster.uuid}}{JSON}"}\ + %varsNotEmpty{, "node.name":"%enc{%map{node.name}}{JSON}"}\ + %varsNotEmpty{, "node.id":"%enc{%map{node.id}}{JSON}"}\ + %varsNotEmpty{, "host.name":"%enc{%map{host.name}}{JSON}"}\ + %varsNotEmpty{, "host.ip":"%enc{%map{host.ip}}{JSON}"}\ + %varsNotEmpty{, "event.type":"%enc{%map{event.type}}{JSON}"}\ + %varsNotEmpty{, "event.action":"%enc{%map{event.action}}{JSON}"}\ + %varsNotEmpty{, "authentication.type":"%enc{%map{authentication.type}}{JSON}"}\ + %varsNotEmpty{, "user.name":"%enc{%map{user.name}}{JSON}"}\ + %varsNotEmpty{, "user.run_by.name":"%enc{%map{user.run_by.name}}{JSON}"}\ + %varsNotEmpty{, "user.run_as.name":"%enc{%map{user.run_as.name}}{JSON}"}\ + %varsNotEmpty{, "user.realm":"%enc{%map{user.realm}}{JSON}"}\ + %varsNotEmpty{, "user.realm_domain":"%enc{%map{user.realm_domain}}{JSON}"}\ + %varsNotEmpty{, "user.run_by.realm":"%enc{%map{user.run_by.realm}}{JSON}"}\ + %varsNotEmpty{, "user.run_by.realm_domain":"%enc{%map{user.run_by.realm_domain}}{JSON}"}\ + %varsNotEmpty{, "user.run_as.realm":"%enc{%map{user.run_as.realm}}{JSON}"}\ + %varsNotEmpty{, "user.run_as.realm_domain":"%enc{%map{user.run_as.realm_domain}}{JSON}"}\ + %varsNotEmpty{, "user.roles":%map{user.roles}}\ + %varsNotEmpty{, "apikey.id":"%enc{%map{apikey.id}}{JSON}"}\ + %varsNotEmpty{, "apikey.name":"%enc{%map{apikey.name}}{JSON}"}\ + %varsNotEmpty{, "authentication.token.name":"%enc{%map{authentication.token.name}}{JSON}"}\ + %varsNotEmpty{, "authentication.token.type":"%enc{%map{authentication.token.type}}{JSON}"}\ + %varsNotEmpty{, "cross_cluster_access":%map{cross_cluster_access}}\ + %varsNotEmpty{, "origin.type":"%enc{%map{origin.type}}{JSON}"}\ + %varsNotEmpty{, "origin.address":"%enc{%map{origin.address}}{JSON}"}\ + %varsNotEmpty{, "realm":"%enc{%map{realm}}{JSON}"}\ + %varsNotEmpty{, "realm_domain":"%enc{%map{realm_domain}}{JSON}"}\ + %varsNotEmpty{, "url.path":"%enc{%map{url.path}}{JSON}"}\ + %varsNotEmpty{, "url.query":"%enc{%map{url.query}}{JSON}"}\ + %varsNotEmpty{, "request.method":"%enc{%map{request.method}}{JSON}"}\ + %varsNotEmpty{, "request.body":"%enc{%map{request.body}}{JSON}"}\ + %varsNotEmpty{, "request.id":"%enc{%map{request.id}}{JSON}"}\ + %varsNotEmpty{, "action":"%enc{%map{action}}{JSON}"}\ + %varsNotEmpty{, "request.name":"%enc{%map{request.name}}{JSON}"}\ + %varsNotEmpty{, "indices":%map{indices}}\ + %varsNotEmpty{, "opaque_id":"%enc{%map{opaque_id}}{JSON}"}\ + %varsNotEmpty{, "trace.id":"%enc{%map{trace.id}}{JSON}"}\ + %varsNotEmpty{, "x_forwarded_for":"%enc{%map{x_forwarded_for}}{JSON}"}\ + %varsNotEmpty{, "transport.profile":"%enc{%map{transport.profile}}{JSON}"}\ + %varsNotEmpty{, "rule":"%enc{%map{rule}}{JSON}"}\ + %varsNotEmpty{, "put":%map{put}}\ + %varsNotEmpty{, "delete":%map{delete}}\ + %varsNotEmpty{, "change":%map{change}}\ + %varsNotEmpty{, "create":%map{create}}\ + %varsNotEmpty{, "invalidate":%map{invalidate}}\ + }%n +# "node.name" node name from the `elasticsearch.yml` settings +# "node.id" node id which should not change between cluster restarts +# "host.name" unresolved hostname of the local node +# "host.ip" the local bound ip (i.e. the ip listening for connections) +# "origin.type" a received REST request is translated into one or more transport requests. This indicates which processing layer generated the event "rest" or "transport" (internal) +# "event.action" the name of the audited event, eg. "authentication_failed", "access_granted", "run_as_granted", etc. +# "authentication.type" one of "realm", "api_key", "token", "anonymous" or "internal" +# "user.name" the subject name as authenticated by a realm +# "user.run_by.name" the original authenticated subject name that is impersonating another one. +# "user.run_as.name" if this "event.action" is of a run_as type, this is the subject name to be impersonated as. +# "user.realm" the name of the realm that authenticated "user.name" +# "user.realm_domain" if "user.realm" is under a domain, this is the name of the domain +# "user.run_by.realm" the realm name of the impersonating subject ("user.run_by.name") +# "user.run_by.realm_domain" if "user.run_by.realm" is under a domain, this is the name of the domain +# "user.run_as.realm" if this "event.action" is of a run_as type, this is the realm name the impersonated user is looked up from +# "user.run_as.realm_domain" if "user.run_as.realm" is under a domain, this is the name of the domain +# "user.roles" the roles array of the user; these are the roles that are granting privileges +# "apikey.id" this field is present if and only if the "authentication.type" is "api_key" +# "apikey.name" this field is present if and only if the "authentication.type" is "api_key" +# "authentication.token.name" this field is present if and only if the authenticating credential is a service account token +# "authentication.token.type" this field is present if and only if the authenticating credential is a service account token +# "cross_cluster_access" this field is present if and only if the associated authentication occurred cross cluster +# "event.type" informs about what internal system generated the event; possible values are "rest", "transport", "ip_filter" and "security_config_change" +# "origin.address" the remote address and port of the first network hop, i.e. a REST proxy or another cluster node +# "realm" name of a realm that has generated an "authentication_failed" or an "authentication_successful"; the subject is not yet authenticated +# "realm_domain" if "realm" is under a domain, this is the name of the domain +# "url.path" the URI component between the port and the query string; it is percent (URL) encoded +# "url.query" the URI component after the path and before the fragment; it is percent (URL) encoded +# "request.method" the method of the HTTP request, i.e. one of GET, POST, PUT, DELETE, OPTIONS, HEAD, PATCH, TRACE, CONNECT +# "request.body" the content of the request body entity, JSON escaped +# "request.id" a synthetic identifier for the incoming request, this is unique per incoming request, and consistent across all audit events generated by that request +# "action" an action is the most granular operation that is authorized and this identifies it in a namespaced way (internal) +# "request.name" if the event is in connection to a transport message this is the name of the request class, similar to how rest requests are identified by the url path (internal) +# "indices" the array of indices that the "action" is acting upon +# "opaque_id" opaque value conveyed by the "X-Opaque-Id" request header +# "trace_id" an identifier conveyed by the part of "traceparent" request header +# "x_forwarded_for" the addresses from the "X-Forwarded-For" request header, as a verbatim string value (not an array) +# "transport.profile" name of the transport profile in case this is a "connection_granted" or "connection_denied" event +# "rule" name of the applied rule if the "origin.type" is "ip_filter" +# the "put", "delete", "change", "create", "invalidate" fields are only present +# when the "event.type" is "security_config_change" and contain the security config change (as an object) taking effect + +logger.xpack_security_audit_logfile.name = org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrail +logger.xpack_security_audit_logfile.level = info +logger.xpack_security_audit_logfile.appenderRef.audit_rolling.ref = audit_rolling +logger.xpack_security_audit_logfile.additivity = false + +logger.xmlsig.name = org.apache.xml.security.signature.XMLSignature +logger.xmlsig.level = error +logger.samlxml_decrypt.name = org.opensaml.xmlsec.encryption.support.Decrypter +logger.samlxml_decrypt.level = fatal +logger.saml2_decrypt.name = org.opensaml.saml.saml2.encryption.Decrypter +logger.saml2_decrypt.level = fatal \ No newline at end of file diff --git a/Bigdata/elasticsearch/9.1.2/24.03-lts-sp1/entrypoint.sh b/Bigdata/elasticsearch/9.1.2/24.03-lts-sp1/entrypoint.sh new file mode 100644 index 00000000..83eb0525 --- /dev/null +++ b/Bigdata/elasticsearch/9.1.2/24.03-lts-sp1/entrypoint.sh @@ -0,0 +1,84 @@ +#!/bin/bash +set -e + +# Files created by Elasticsearch should always be group writable too +umask 0002 + +# Allow user specify custom CMD, maybe bin/elasticsearch itself +# for example to directly specify `-E` style parameters for elasticsearch on k8s +# or simply to run /bin/bash to check the image +if [[ "$1" == "eswrapper" || $(basename "$1") == "elasticsearch" ]]; then + # Rewrite CMD args to remove the explicit command, + # so that we are backwards compatible with the docs + # from the previous Elasticsearch versions < 6 + # and configuration option: + # https://www.elastic.co/guide/en/elasticsearch/reference/5.6/docker.html#_d_override_the_image_8217_s_default_ulink_url_https_docs_docker_com_engine_reference_run_cmd_default_command_or_options_cmd_ulink + # Without this, user could specify `elasticsearch -E x.y=z` but + # `bin/elasticsearch -E x.y=z` would not work. In any case, + # we want to continue through this script, and not exec early. + set -- "${@:2}" +else + # Run whatever command the user wanted + exec "$@" +fi + +# Allow environment variables to be set by creating a file with the +# contents, and setting an environment variable with the suffix _FILE to +# point to it. This can be used to provide secrets to a container, without +# the values being specified explicitly when running the container. +# +# This is also sourced in elasticsearch-env, and is only needed here +# as well because we use ELASTIC_PASSWORD below. Sourcing this script +# is idempotent. +source /usr/share/elasticsearch/bin/elasticsearch-env-from-file + +if [[ -f bin/elasticsearch-users ]]; then + # Check for the ELASTIC_PASSWORD environment variable to set the + # bootstrap password for Security. + # + # This is only required for the first node in a cluster with Security + # enabled, but we have no way of knowing which node we are yet. We'll just + # honor the variable if it's present. + if [[ -n "$ELASTIC_PASSWORD" ]]; then + [[ -f /usr/share/elasticsearch/config/elasticsearch.keystore ]] || (elasticsearch-keystore create) + if ! (elasticsearch-keystore has-passwd --silent) ; then + # keystore is unencrypted + if ! (elasticsearch-keystore list | grep -q '^bootstrap.password$'); then + (echo "$ELASTIC_PASSWORD" | elasticsearch-keystore add -x 'bootstrap.password') + fi + else + # keystore requires password + if ! (echo "$KEYSTORE_PASSWORD" \ + | elasticsearch-keystore list | grep -q '^bootstrap.password$') ; then + COMMANDS="$(printf "%s\n%s" "$KEYSTORE_PASSWORD" "$ELASTIC_PASSWORD")" + (echo "$COMMANDS" | elasticsearch-keystore add -x 'bootstrap.password') + fi + fi + fi +fi + +if [[ -n "$ES_LOG_STYLE" ]]; then + case "$ES_LOG_STYLE" in + console) + # This is the default. Nothing to do. + ;; + file) + # Overwrite the default config with the stack config. Do this as a + # copy, not a move, in case the container is restarted. + cp -f /usr/share/elasticsearch/config/log4j2.file.properties /usr/share/elasticsearch/config/log4j2.properties + ;; + *) + echo "ERROR: ES_LOG_STYLE set to [$ES_LOG_STYLE]. Expected [console] or [file]" >&2 + exit 1 ;; + esac +fi + +if [[ -n "$ENROLLMENT_TOKEN" ]]; then + POSITIONAL_PARAMETERS="--enrollment-token $ENROLLMENT_TOKEN" +else + POSITIONAL_PARAMETERS="" +fi + +# Signal forwarding and child reaping is handled by `tini`, which is the +# actual entrypoint of the container +exec /usr/share/elasticsearch/bin/elasticsearch "$@" $POSITIONAL_PARAMETERS <<<"$KEYSTORE_PASSWORD" \ No newline at end of file diff --git a/Bigdata/elasticsearch/meta.yml b/Bigdata/elasticsearch/meta.yml index 86503555..89ac3762 100644 --- a/Bigdata/elasticsearch/meta.yml +++ b/Bigdata/elasticsearch/meta.yml @@ -1,2 +1,4 @@ 8.17.3-oe2403sp1: - path: 8.17.3/24.03-lts-sp1/Dockerfile \ No newline at end of file + path: 8.17.3/24.03-lts-sp1/Dockerfile +9.1.2-oe2403sp1: + path: 9.1.2/24.03-lts-sp1/Dockerfile \ No newline at end of file -- Gitee